hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
76565de2674e9d5248eee24319881ad3f75779cf | 1,631 | py | Python | src/plugins/time_zone_db/executor.py | AmeyKamat/MEERA | 30635b68995a9ce17024a76eb1b23cc1c300404f | [
"MIT"
] | 20 | 2019-01-30T17:03:50.000Z | 2021-07-06T06:09:29.000Z | src/plugins/time_zone_db/executor.py | arpit006/MEERA | 30635b68995a9ce17024a76eb1b23cc1c300404f | [
"MIT"
] | 22 | 2019-03-13T04:52:07.000Z | 2020-07-17T07:25:44.000Z | src/plugins/time_zone_db/executor.py | arpit006/MEERA | 30635b68995a9ce17024a76eb1b23cc1c300404f | [
"MIT"
] | 1 | 2020-07-18T18:52:39.000Z | 2020-07-18T18:52:39.000Z | import os
from datetime import datetime
import requests
class TimeZoneDBPlugin:
def __init__(self, config):
super(TimeZoneDBPlugin, self).__init__()
self.config = config
def execute(self, context):
entities = context.nlp_analysis.entities
if "location" in entities:
latitude = entities["location"]["latitude"]
longitude = entities["location"]["longitude"]
elif "self-location" in entities:
latitude = entities["self-location"]["latitude"]
longitude = entities["self-location"]["longitude"]
else:
return {'status': 'need-location'}
key_variable = self.config['key_variable']
key = os.environ[key_variable]
response = requests.get(self.config["time_url"], params={
'key': key,
'format': 'json',
'by': 'position',
'lat': latitude,
'lng': longitude
}).json()
received_date_format = self.config["received_date_format"]
required_date_format = self.config["required_date_format"]
result = {}
result["datetime"] = datetime.strptime(
response["formatted"],
received_date_format).strftime(required_date_format)
if entities.get("location") is not None:
location = entities["location"]["location"]
else:
location = None
result["location"] = location if 'location' in entities else response["zoneName"]
response = {
'result': result,
'status': 'success'
}
return response
| 29.654545 | 89 | 0.583078 | 1,572 | 0.963826 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.218884 |
76577690697937632ce3d2513bc5e45ebfbe48a6 | 20,549 | py | Python | baselines/profiling/profile_main.py | Worm4047/TVR | 2a8ce2edbdc0966aef3b84c28872267039f01700 | [
"MIT"
] | 106 | 2020-01-27T02:14:26.000Z | 2022-01-17T18:34:21.000Z | baselines/profiling/profile_main.py | Worm4047/TVR | 2a8ce2edbdc0966aef3b84c28872267039f01700 | [
"MIT"
] | 10 | 2020-02-18T05:31:01.000Z | 2022-01-24T02:28:57.000Z | baselines/profiling/profile_main.py | Worm4047/TVR | 2a8ce2edbdc0966aef3b84c28872267039f01700 | [
"MIT"
] | 25 | 2020-01-31T06:31:08.000Z | 2021-12-08T11:51:19.000Z | """
Profile the time needed for retrieval.
We consider retrieval in a corpus of 1M videos, 1K videos are added, 10K queries are retrieved.
Calculate the time needed for adding 1K videos, and performing retrieval for 10K queries.
1, Data Loading time is ignored, consider it is hidden by computation time.
2, Sort time is ignored, since it is the similar among the methods.
"""
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import pprint
from tqdm import tqdm, trange
from baselines.crossmodal_moment_localization.model_xml import XML, xml_base_config
from baselines.mixture_embedding_experts.model import MEE, mee_base_cfg
from baselines.clip_alignment_with_language.model import CALWithSub, cal_base_cfg
from baselines.excl.model import EXCL, excl_base_cfg
from utils.basic_utils import save_json
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
def mask_logits(target, mask):
return target * mask + (1 - mask) * (-1e10)
class ProfileBase(object):
N_NewQuery = 1e4
N_NewVideo = 1e3
N_Videos = 1e6
AvgVideoLength = 100
ClipLength = 5
AvgClipPerVideo = int(AvgVideoLength / ClipLength) # max_ctx_l
AvgWordInQuery = 15
# estimated by
# scales=[2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], => max_proposal = 14
AvgProposalPerVideo = 170
MaxClipPerProposal = 14 # pad to this length
AvgClipPerProposal = 7 # 6.88
VideoFeatureDim = 3074 # 1024 + 2048 + 2 (TEF)
SubFeatureDim = 770
QueryFeatureDim = 768
HiddenSize = 256
N_Runs = 5 # Get the average time
def __init__(self, device=torch.device("cuda:0"), ctx_batch_size=400, query_batch_size=100):
self.device = device
self.ctx_batch_size = ctx_batch_size
self.query_batch_size = query_batch_size
self.model_config = self.get_model_config()
print(self.model_config)
self.model = self.get_model()
def get_model(self):
return None
def get_model_config(self):
return None
def set_ctx_batch_size(self, batch_size):
self.ctx_batch_size = batch_size
def set_query_batch_size(self, batch_size):
self.query_batch_size = batch_size
def cast_dict_inputs_to_device(self, dict_inputs, device):
return {k: v.to(device) for k, v in dict_inputs.items()}
def get_fake_ctx_raw_input_st_ed(self, no_tef=False):
return dict(
video_feat=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l,
self.VideoFeatureDim - 2*no_tef),
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l, self.SubFeatureDim - 2*no_tef),
ctx_mask=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l),
)
def get_fake_raw_query(self):
return dict(
query_feat=torch.FloatTensor(self.query_batch_size, self.AvgWordInQuery, self.QueryFeatureDim),
query_mask=torch.ones(self.query_batch_size, self.AvgWordInQuery)
)
"""
from baselines.profiling.profile_main import ProfileXML
profile_xml = ProfileXML(ctx_batch_size=400, query_batch_size=100)
profile_xml.get_ctx_encoding_time()
"""
class ProfileXML(ProfileBase):
def get_model_config(self):
xml_base_config["ctx_mode"] = "video_sub_tef"
xml_base_config["merge_two_stream"] = True
xml_base_config["cross_att"] = True
xml_base_config["max_ctx_l"] = self.AvgClipPerVideo
xml_base_config["visual_input_size"] = self.VideoFeatureDim
xml_base_config["query_input_size"] = self.QueryFeatureDim
xml_base_config["sub_input_size"] = self.SubFeatureDim
xml_base_config["hidden_size"] = self.HiddenSize
return xml_base_config
def get_model(self):
model = XML(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_encoded_ctx(self):
return dict(
ctx_feat=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l, self.HiddenSize),
ctx_mask=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l),
)
def get_fake_encoded_query(self):
return dict(query_feat=torch.FloatTensor(self.ctx_batch_size, self.HiddenSize))
def _get_ctx_encoding_time(self, video_feat, sub_feat, ctx_mask):
"""Considered two modalities"""
torch.cuda.synchronize()
st_time = time.time()
self.model.cross_encode_context(video_feat, ctx_mask, sub_feat, ctx_mask)
torch.cuda.synchronize()
return time.time() - st_time
def get_ctx_encoding_time(self):
with torch.no_grad():
fake_ctx_inputs = self.cast_dict_inputs_to_device(self.get_fake_ctx_raw_input_st_ed(), self.device)
raw_video = fake_ctx_inputs["video_feat"]
raw_sub = fake_ctx_inputs["sub_feat"]
ctx_mask = fake_ctx_inputs["ctx_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_ctx_encoding_time(raw_video, raw_sub, ctx_mask)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_query_encoding_time(self, raw_query, query_mask):
"""Considered two modalities"""
torch.cuda.synchronize()
st_time = time.time()
encoded_query = self.model.encode_input(raw_query, query_mask,
self.model.query_input_proj,
self.model.query_encoder,
self.model.query_pos_embed) # (N, Lq, D)
# video level
video_query, sub_query = \
self.model.get_modularized_queries(encoded_query, query_mask, return_modular_att=False)
# st ed
video_query = self.model.video_query_linear(video_query)
sub_query = self.model.sub_query_linear(sub_query)
torch.cuda.synchronize()
return time.time() - st_time
def get_query_encoding_time(self):
with torch.no_grad():
query_inputs = self.cast_dict_inputs_to_device(self.get_fake_raw_query(), self.device)
raw_query = query_inputs["query_feat"]
query_mask = query_inputs["query_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_query_encoding_time(raw_query, query_mask)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_retrieval_time(self, encoded_video_query, encoded_video, ctx_mask):
"""Consider the queries are encoded, Calculate in a single modality then multiply by 2."""
torch.cuda.synchronize()
st_time = time.time()
self.model.get_video_level_scores(encoded_video_query, encoded_video, ctx_mask)
torch.cuda.synchronize()
return (time.time() - st_time) * 2
def get_retrieval_time(self):
with torch.no_grad():
encoded_query = self.cast_dict_inputs_to_device(self.get_fake_encoded_query(), self.device)["query_feat"]
fake_ctx_inputs = self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx(), self.device)
encoded_ctx = fake_ctx_inputs["ctx_feat"]
ctx_mask = fake_ctx_inputs["ctx_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_retrieval_time(encoded_query, encoded_ctx, ctx_mask)]
times = torch.FloatTensor(times) # since we have two modalities
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_span_prediction_time(self, query_feat, ctx_feat, ctx_mask):
"""Considered two modalities"""
torch.cuda.synchronize()
st_time = time.time()
similarity = torch.einsum("md,nld->mnl", query_feat, ctx_feat)
similarity = (similarity + similarity) / 2 # (Nq, Nv, L) from query to all videos.
n_q, n_c, l = similarity.shape
similarity = similarity.view(n_q * n_c, 1, l)
st_prob = self.model.merged_st_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
ed_prob = self.model.merged_ed_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
st_prob = mask_logits(st_prob, ctx_mask) # (N, L)
ed_prob = mask_logits(ed_prob, ctx_mask)
torch.cuda.synchronize()
return time.time() - st_time
def get_span_prediction_time(self):
with torch.no_grad():
encoded_query = self.cast_dict_inputs_to_device(self.get_fake_encoded_query(), self.device)["query_feat"]
fake_ctx_inputs = self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx(), self.device)
encoded_ctx = fake_ctx_inputs["ctx_feat"]
ctx_mask = fake_ctx_inputs["ctx_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_span_prediction_time(encoded_query, encoded_ctx, ctx_mask)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
"""
from baselines.profiling.profile_main import ProfileMEE
profile_mee = ProfileMEE(ctx_batch_size=400, query_batch_size=100)
profile_mee.get_ctx_encoding_time()
"""
class ProfileMEE(ProfileBase):
def get_model_config(self):
mee_base_cfg["ctx_mode"] = "video_sub"
mee_base_cfg["text_input_size"] = self.QueryFeatureDim
mee_base_cfg["vid_input_size"] = self.VideoFeatureDim
mee_base_cfg["output_size"] = self.HiddenSize
return mee_base_cfg
def get_model(self):
model = MEE(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_raw_ctx(self):
return dict(
vid_feat=torch.FloatTensor(self.ctx_batch_size, self.VideoFeatureDim),
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.QueryFeatureDim)
)
def get_fake_encoded_ctx_query(self):
return dict(
ctx_feat=torch.FloatTensor(self.ctx_batch_size, self.HiddenSize),
query_feat=torch.FloatTensor(self.ctx_batch_size, self.HiddenSize)
)
def _get_ctx_encoding_time(self, vid_feat, sub_feat):
torch.cuda.synchronize()
st_time = time.time()
self.model.video_gu(vid_feat)
self.model.sub_gu(sub_feat)
torch.cuda.synchronize()
return time.time() - st_time
def get_ctx_encoding_time(self):
feat_dict = self.cast_dict_inputs_to_device(self.get_fake_raw_ctx(), self.device)
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_ctx_encoding_time(**feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_query_encoding_time(self, query_feat):
"""Considered 2 modalities"""
torch.cuda.synchronize()
st_time = time.time()
pooled_query = self.model.query_pooling(query_feat) # (N, Dt)
video_query = self.model.video_query_gu(pooled_query)
sub_query = self.model.sub_query_gu(pooled_query)
stream_weights = self.model.moe_fc(pooled_query) # (N, 2)
torch.cuda.synchronize()
return time.time() - st_time
def get_query_encoding_time(self):
raw_query = self.cast_dict_inputs_to_device(self.get_fake_raw_query(), self.device)["query_feat"]
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_query_encoding_time(raw_query)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_retrieval_time(self, encoded_query, encoded_ctx):
"""Considered 2 modalities"""
torch.cuda.synchronize()
st_time = time.time()
torch.einsum("md,nd->mn", encoded_query, encoded_ctx) # (N, N)
torch.cuda.synchronize()
return (time.time() - st_time) * 2
def get_retrieval_time(self):
model_inputs = self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx_query(), self.device)
encoded_query = model_inputs["ctx_feat"]
encoded_ctx = model_inputs["query_feat"]
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_retrieval_time(encoded_query, encoded_ctx)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
class ProfileCAL(ProfileBase):
def get_model_config(self):
cal_base_cfg["ctx_mode"] = "video_sub"
cal_base_cfg["embedding_size"] = self.QueryFeatureDim
cal_base_cfg["visual_input_size"] = self.VideoFeatureDim * 2
cal_base_cfg["textual_input_size"] = self.SubFeatureDim * 2
cal_base_cfg["output_size"] = self.HiddenSize
return cal_base_cfg
def get_model(self):
model = CALWithSub(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_raw_ctx(self, model_name="cal"):
"""The features are `*2` since they use both global and local features"""
return dict(
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgProposalPerVideo,
self.AvgClipPerProposal, self.SubFeatureDim * 2),
vid_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgProposalPerVideo,
self.AvgClipPerProposal, self.VideoFeatureDim * 2))
def _get_ctx_encoding_time(self, sub_feat, vid_feat, model_name="cal"):
if model_name == "mcn":
sub_feat = sub_feat.sum(2)
vid_feat = vid_feat.sum(2)
torch.cuda.synchronize()
st_time = time.time()
self.model.moment_encoder(vid_feat, module_name="video")
self.model.moment_encoder(sub_feat, module_name="sub")
torch.cuda.synchronize()
return time.time() - st_time
def get_ctx_encoding_time(self, model_name="cal"):
"""model_name: str, `cal` or `mcn`"""
feat_dict = self.cast_dict_inputs_to_device(
self.get_fake_raw_ctx(model_name=model_name), self.device)
feat_dict["model_name"] = model_name
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_ctx_encoding_time(**feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_query_encoding_time(self, query_feat, query_mask):
torch.cuda.synchronize()
st_time = time.time()
self.model.query_encoder(query_feat, query_mask)
torch.cuda.synchronize()
return time.time() - st_time
def get_query_encoding_time(self):
feat_dict = self.cast_dict_inputs_to_device(self.get_fake_raw_query(), self.device)
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_query_encoding_time(**feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
class ProfileExCL(ProfileBase):
def get_model_config(self):
excl_base_cfg["ctx_mode"] = "video_sub"
excl_base_cfg["query_input_size"] = self.QueryFeatureDim
excl_base_cfg["visual_input_size"] = self.VideoFeatureDim
excl_base_cfg["sub_input_size"] = self.SubFeatureDim
excl_base_cfg["output_size"] = self.HiddenSize
return excl_base_cfg
def get_model(self):
model = EXCL(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_raw_input(self):
"""The features are `*2` since they use both global and local features"""
return dict(
query_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgWordInQuery, self.QueryFeatureDim),
query_mask=torch.ones((self.ctx_batch_size, self.AvgWordInQuery)),
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgClipPerVideo, self.SubFeatureDim),
sub_mask=torch.ones(self.ctx_batch_size, self.AvgClipPerVideo),
video_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgClipPerVideo, self.VideoFeatureDim),
video_mask=torch.ones(self.ctx_batch_size, self.AvgClipPerVideo),
tef_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgClipPerVideo, 2),
tef_mask=torch.ones(self.ctx_batch_size, self.AvgClipPerVideo),
st_ed_indices=torch.ones(2, 2), # not used.
)
def _get_prediction_time(self, input_dict):
torch.cuda.synchronize()
st_time = time.time()
self.model(**input_dict)
torch.cuda.synchronize()
return time.time() - st_time
def get_prediction_time(self):
"""model_name: str, `cal` or `mcn`"""
feat_dict = self.cast_dict_inputs_to_device(
self.get_fake_raw_input(), self.device)
feat_dict["is_training"] = False
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_prediction_time(feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, help="")
parser.add_argument("--ctx_batch_size", type=int, default=400)
parser.add_argument("--query_batch_size", type=int, default=100)
parser.add_argument("--save_dir", type=str, default="baselines/profiling/cache")
args = parser.parse_args()
model = args.model
query_batch_size = args.query_batch_size
ctx_batch_size = args.ctx_batch_size
if model == "mee":
profile_mee = ProfileMEE(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_mee.get_ctx_encoding_time()
ctx_enc_time = profile_mee.get_ctx_encoding_time()
query_enc_time = profile_mee.get_query_encoding_time()
elif model == "cal":
profile_cal = ProfileCAL(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_cal.get_ctx_encoding_time()
ctx_enc_time = profile_cal.get_ctx_encoding_time(model_name="cal")
query_enc_time = profile_cal.get_query_encoding_time()
elif model == "mcn":
profile_cal = ProfileCAL(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_cal.get_ctx_encoding_time()
ctx_enc_time = profile_cal.get_ctx_encoding_time(model_name="mcn")
query_enc_time = profile_cal.get_query_encoding_time()
elif model == "xml":
profile_xml = ProfileXML(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_xml.get_ctx_encoding_time()
ctx_enc_time = profile_xml.get_ctx_encoding_time()
query_enc_time = profile_xml.get_query_encoding_time()
elif model == "excl":
profile_excl = ProfileExCL(ctx_batch_size=ctx_batch_size, query_batch_size=ctx_batch_size)
# use the 2nd one to report time
profile_excl.get_prediction_time()
ctx_enc_time = profile_excl.get_prediction_time()
query_enc_time = 0
# Calculate the total time as ctx_enc_time * (100 * 1M / ctx_batch_size)
else:
raise NotImplementedError
# ctx_enc_time = ctx_enc_time
save_path = os.path.join(args.save_dir, "{}_profile_main.json".format(model))
n_videos = ProfileBase.N_Videos
res = dict(
ctx_enc_time=ctx_enc_time,
ctx_enc_avg_time_all_videos=ctx_enc_time["avg"] * n_videos / ctx_batch_size,
query_enc_time=query_enc_time,
n_videos=n_videos,
ctx_batch_size=ctx_batch_size,
query_batch_size=query_batch_size,
model=model
)
save_json(res, save_path, save_pretty=True)
pprint.pprint(res)
| 42.281893 | 120 | 0.66485 | 16,227 | 0.789673 | 0 | 0 | 0 | 0 | 0 | 0 | 2,634 | 0.128181 |
765863b6ccd1610c4d6e2935c2d041de9971698f | 4,331 | py | Python | python/eskapade/tutorials/esk501_fix_pandas_dataframe.py | mbaak/Eskapade | 00c8f6ca52eb5b738b4268257e277dab71b804cb | [
"Apache-2.0"
] | 16 | 2016-10-10T08:39:30.000Z | 2020-12-22T01:00:56.000Z | python/eskapade/tutorials/esk501_fix_pandas_dataframe.py | mbaak/Eskapade | 00c8f6ca52eb5b738b4268257e277dab71b804cb | [
"Apache-2.0"
] | null | null | null | python/eskapade/tutorials/esk501_fix_pandas_dataframe.py | mbaak/Eskapade | 00c8f6ca52eb5b738b4268257e277dab71b804cb | [
"Apache-2.0"
] | 6 | 2017-06-14T12:01:41.000Z | 2018-04-03T17:01:04.000Z | """Project: Eskapade - A python-based package for data analysis.
Macro: esk501_fix_pandas_dataframe
Created: 2017/04/26
Description:
Macro illustrates how to call FixPandasDataFrame link that gives columns
consistent names and datatypes.
Default settings perform the following clean-up steps on an
input dataframe:
- Fix all column names. Eg. remove punctuation and strange characters,
and convert spaces to underscores.
- Check for various possible nans in the dataset, then make all nans
consistent by turning them into numpy.nan (= float)
- Per column, assess dynamically the most consistent datatype (ignoring
all nans in that column). Eg. bool, int, float, datetime64, string.
- Per column, make the data types of all rows consistent, by using the
identified (or imposed) data type (by default ignoring all nans)
Authors:
KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
import tempfile
from eskapade import ConfigObject, Chain
from eskapade import core_ops, analysis, data_quality
from eskapade import process_manager
from eskapade.logger import Logger
logger = Logger()
logger.debug('Now parsing configuration file esk501_fix_pandas_dataframe')
#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk501_fix_pandas_dataframe'
settings['version'] = 0
#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.
# dummy dataframe filled with inconsistent data types per column
tmp = b"""A,B,C,D,E,F,G,H
True,foo,1.0,1,1,1,a,a
False,bar,2.0,2,2,2.5,b,b
nan,3,bal,3,bla,bar,c,1
,nan,NaN,NaN,nan,nan,d,2
,,,,,,,3
1,2,,,,,,,6
"""
f = tempfile.NamedTemporaryFile(delete=False)
f.write(tmp)
f.close()
# file is not immediately deleted because we used delete=False
# used below with f.name
#########################################################################################
# --- now set up the chains and links based on configuration flags
ch = Chain('DataPrep')
# --- 0. pandas read_csv has multiple settings to help reading in of buggy csv's.
# o The option error_bad_lines=False skips lines with too few or too many values
# o The option encoding='latin1' interprets most non-standard characters
read_data = analysis.ReadToDf(key='vrh',
reader='csv',
path=f.name,
error_bad_lines=False,
encoding='latin1')
ch.add(read_data)
# --- 1. standard setting:
# o convert all nans to np.nan (= float)
# o convert all rows in a column to most occuring datatype in that column
fixer = data_quality.FixPandasDataFrame(name='fixer1')
fixer.read_key = 'vrh'
fixer.store_key = 'vrh_fix1'
ch.add(fixer)
# --- 2. force certain columns to specified datatype
fixer = data_quality.FixPandasDataFrame(name='fixer2')
fixer.read_key = 'vrh'
fixer.store_key = 'vrh_fix2'
fixer.var_dtype = {'B': int, 'C': str}
ch.add(fixer)
# --- 3. convert all nans to data type consistent with rest of column
fixer = data_quality.FixPandasDataFrame(name='fixer3')
fixer.read_key = 'vrh'
fixer.store_key = 'vrh_fix3'
fixer.convert_inconsistent_nans = True
# set a specific nan (GREPME) for a given column (G)
fixer.var_nan = {'G': 'GREPME'}
ch.add(fixer)
# --- 4. compare results
pds = core_ops.PrintDs(name='pds2')
pds.keys = ['vrh', 'vrh_fix1', 'vrh_fix2', 'vrh_fix3']
ch.add(pds)
# --- 5. write out fixed dataframe - turned off in this example
# The dataframe will be saved with the numpy writer which will
# restore the dtypes when reloading the dataframe
writedata = analysis.WriteFromDf(name='writer',
key='vrh_fix1',
path='tmp.npz',
)
# ch.add(writedata)
#########################################################################################
logger.debug('Done parsing configuration file esk501_fix_pandas_dataframe')
| 34.927419 | 89 | 0.64604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,055 | 0.70538 |
765a451b98ec23f375bec8b799471dd2d577921c | 4,612 | py | Python | microdf/inequality.py | Peter-Metz/microdf | 6c5c6ff5cc87181d559553bdcc36dc95aa701cd4 | [
"MIT"
] | null | null | null | microdf/inequality.py | Peter-Metz/microdf | 6c5c6ff5cc87181d559553bdcc36dc95aa701cd4 | [
"MIT"
] | null | null | null | microdf/inequality.py | Peter-Metz/microdf | 6c5c6ff5cc87181d559553bdcc36dc95aa701cd4 | [
"MIT"
] | null | null | null | import numpy as np
import microdf as mdf
def gini(df, col, w=None, negatives=None):
"""Calculates Gini index.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:param negatives: An optional string indicating how to treat negative
values of x:
'zero' replaces negative values with zeroes.
'shift' subtracts the minimum value from all values of x,
when this minimum is negative. That is, it adds the absolute
minimum value.
Defaults to None, which leaves negative values as they are.
:returns: A float, the Gini index.
"""
# Requires float numpy arrays (not pandas Series or lists) to work.
x = np.array(df[col]).astype("float")
if negatives == "zero":
x[x < 0] = 0
if negatives == "shift" and np.amin(x) < 0:
x -= np.amin(x)
if w is not None:
w = np.array(df[w]).astype("float")
sorted_indices = np.argsort(x)
sorted_x = x[sorted_indices]
sorted_w = w[sorted_indices]
cumw = np.cumsum(sorted_w)
cumxw = np.cumsum(sorted_x * sorted_w)
return np.sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (
cumxw[-1] * cumw[-1]
)
else:
sorted_x = np.sort(x)
n = len(x)
cumxw = np.cumsum(sorted_x)
# The above formula, with all weights equal to 1 simplifies to:
return (n + 1 - 2 * np.sum(cumxw) / cumxw[-1]) / n
def top_x_pct_share(df, col, top_x_pct, w=None):
"""Calculates top x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param top_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top x%.
"""
threshold = mdf.weighted_quantile(df, col, w, 1 - top_x_pct)
top_x_pct_sum = mdf.weighted_sum(df[df[col] >= threshold], col, w)
total_sum = mdf.weighted_sum(df, col, w)
return top_x_pct_sum / total_sum
def bottom_x_pct_share(df, col, bottom_x_pct, w=None):
"""Calculates bottom x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param bottom_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the bottom x%.
"""
return 1 - top_x_pct_share(df, col, 1 - bottom_x_pct, w, top=False)
def bottom_50_pct_share(df, col, w=None):
"""Calculates bottom 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the bottom 50%.
"""
return bottom_x_pct_share(df, col, 0.5, w)
def top_50_pct_share(df, col, w=None):
"""Calculates top 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 50%.
"""
return top_x_pct_share(df, col, 0.5, w)
def top_10_pct_share(df, col, w=None):
"""Calculates top 10% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 10%.
"""
return top_x_pct_share(df, col, 0.1, w)
def top_1_pct_share(df, col, w=None):
"""Calculates top 1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 1%.
"""
return top_x_pct_share(df, col, 0.01, w)
def top_0_1_pct_share(df, col, w=None):
"""Calculates top 0.1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 0.1%.
"""
return top_x_pct_share(df, col, 0.001, w)
def t10_b50(df, col, w=None):
"""Calculates ratio between the top 10% and bottom 50% shares.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 10% divided by
the share of w-weighted val held by the bottom 50%.
"""
return top_10_pct_share(df, col, w) / bottom_50_pct_share(df, col, w)
| 31.589041 | 79 | 0.643322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,861 | 0.620338 |
765bdbd234fb42e7aa71522393ad38a642d864a9 | 13,184 | py | Python | web/adoption_stories/adopteeStories/models.py | CuriousG102/Chinese-Adoption | 8aa487b859ee330bd524381e8688aae68c225437 | [
"MIT"
] | null | null | null | web/adoption_stories/adopteeStories/models.py | CuriousG102/Chinese-Adoption | 8aa487b859ee330bd524381e8688aae68c225437 | [
"MIT"
] | null | null | null | web/adoption_stories/adopteeStories/models.py | CuriousG102/Chinese-Adoption | 8aa487b859ee330bd524381e8688aae68c225437 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from embed_video.fields import EmbedYoutubeField, EmbedSoundcloudField
from .custom_model_fields import RestrictedImageField
from .default_settings import ADOPTEE_STORIES_CONFIG as config
class NamesToStringMixin():
NAME_ATTRIBUTES = ['english_name', 'pinyin_name', 'chinese_name']
@property
def name(self):
s = []
for name in self.NAME_ATTRIBUTES:
name_string = getattr(self, name, None)
if name_string:
s.append(name_string)
return ' '.join(s)
class Adoptee(models.Model, NamesToStringMixin):
# english_name must have a value || (pinyin_name && chinese_name)
# must have a value implemented form level
english_name = models.CharField(max_length=150, null=True, blank=True,
# Translators: Name of a field in the admin page
db_index=True, verbose_name=_('English Name'))
pinyin_name = models.CharField(max_length=150, null=True, blank=True,
# Translators: Name of a field in the admin page
db_index=True, verbose_name=_('Pinyin Name'))
chinese_name = models.CharField(max_length=50, null=True, blank=True,
# Translators: Name of a field in the admin page
db_index=True, verbose_name=_('Chinese Name'))
photo_front_story = RestrictedImageField(maximum_size=config['PHOTO_FRONT_STORY_MAX_SIZE'],
required_width=config['PHOTO_FRONT_STORY_WIDTH'],
required_height=config['PHOTO_FRONT_STORY_HEIGHT'],
required_formats=config['FORMATS'],
null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('Photo Front Story'))
# Translators: Name of a field in the admin page
front_story = models.ForeignKey('StoryTeller', null=True, verbose_name=_('Front Story'), blank=True,
limit_choices_to={'approved': True})
# Translators: Name of a field in the admin page
created = models.DateTimeField(auto_now_add=True, verbose_name=_('Created At'))
# Translators: Name of a field in the admin page
updated = models.DateTimeField(auto_now=True, verbose_name=_('Updated At'))
class Meta:
ordering = ['-created']
# Translators: Name of a field in the admin page
verbose_name = _('Adoptee')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Adoptees')
def __str__(self):
string = ' '.join([force_text(self._meta.verbose_name), self.name])
return string
class MultimediaItem(models.Model):
# english_caption || chinese_caption must have a value implemented
# form level
english_caption = models.CharField(max_length=200, null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('English Caption'))
chinese_caption = models.CharField(max_length=200, null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('Chinese Caption'))
# Translators: Name of a field in the admin page
approved = models.BooleanField(default=False, verbose_name=_('Approved'))
# Translators: Name of a field in the admin page
story_teller = models.ForeignKey('StoryTeller', null=True, verbose_name=_('Story Teller'))
# Translators: Name of a field in the admin pages
created = models.DateTimeField(auto_now_add=True, verbose_name=_('Created At'))
# Translators: Name of a field in the admin page
updated = models.DateTimeField(auto_now=True, verbose_name=_('Updated At'))
class Meta:
verbose_name = _('Multimedia Item')
abstract = True
ordering = ['-created']
def __str__(self):
return ' '.join([force_text(self._meta.verbose_name), self.story_teller.name, force_text(self.created)])
class Audio(MultimediaItem):
# Translators: name of field in the admin page
audio = EmbedSoundcloudField(verbose_name=_('Audio Soundcloud Embed'))
class Meta(MultimediaItem.Meta):
abstract = False
# Translators: Name of a field in the admin page
verbose_name = _('Audio item')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Audio items')
class Video(MultimediaItem):
# Translators: name of field in the admin page
video = EmbedYoutubeField(verbose_name=_('Video Youtube Embed'))
class Meta(MultimediaItem.Meta):
abstract = False
# Translators: Name of a field in the admin page
verbose_name = _('Video item')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Video items')
class Photo(MultimediaItem):
# file size and type checking added on form level
# Translators: Name of a field in the admin page
photo_file = models.ImageField(verbose_name=_('Photo File'))
class Meta(MultimediaItem.Meta):
abstract = False
# Translators: Name of a field in the admin page
verbose_name = _('Photo')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Photos')
class RelationshipCategory(models.Model, NamesToStringMixin):
# english_name must have a value || chinese name must have a value at first
# but to publish both must have a value or all stories with an untranslated
# category must only show up english side/chinese side
# Translators: Name of a field in the admin page
english_name = models.CharField(max_length=30, null=True, verbose_name=_('English Name'),
blank=True)
# Translators: Name of a field in the admin page
chinese_name = models.CharField(max_length=30, null=True, verbose_name=_('Chinese Name'),
blank=True)
# Translators: Name of a field in the admin page
approved = models.BooleanField(default=False, verbose_name=_('Approved'))
# Translators: Name of a field in the admin page
created = models.DateTimeField(auto_now_add=True,
verbose_name=_('Created At'))
# Translators: Name of a field in the admin page
updated = models.DateTimeField(auto_now=True,
verbose_name=_('Updated At'))
# Translators: Label for the number determining the order of the relationship category for admins
order = models.IntegerField(null=True, blank=True, verbose_name=_('Position of relationship category'))
class Meta:
ordering = ['order']
# Translators: Name of a field in the admin page
verbose_name = _('Relationship Category')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Relationship Categories')
def __str__(self):
string = ' '.join([force_text(self._meta.verbose_name), self.name])
return string
class StoryTeller(models.Model, NamesToStringMixin):
relationship_to_story = models.ForeignKey('RelationshipCategory',
# Translators: Name of a field in the admin page
verbose_name=_('Relationship to Story'))
# One version of story text because I don't want adoptee's stories to be different between who is viewing it
# Translators: Name of a field in the admin page
story_text = models.TextField(verbose_name=_('Story Text'))
# Translators: Name of a field in the admin page
email = models.EmailField(verbose_name=_('Email'))
# Translators: Name of a field in the admin page
approved = models.BooleanField(default=False, verbose_name=_('Approved'))
related_adoptee = models.ForeignKey('Adoptee', related_name='stories',
# Translators: Name of a field in the admin page
verbose_name=_('Related Adoptee'))
# english_name must have a value || (pinyin_name && chinese_name)
# must have a value implemented form level
english_name = models.CharField(max_length=150, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('English Name'),
blank=True)
chinese_name = models.CharField(max_length=50, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('Chinese Name'),
blank=True)
pinyin_name = models.CharField(max_length=150, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('Pinyin Name'),
blank=True)
created = models.DateTimeField(auto_now_add=True,
# Translators: Name of a field in the admin page
verbose_name=_('Created At'))
updated = models.DateTimeField(auto_now=True,
# Translators: Name of a field in the admin page
verbose_name=_('Updated At'))
class Meta:
ordering = ['-updated', '-created']
# Translators: Name of a field in the admin page
verbose_name = _('Story Teller')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Story Tellers')
def __str__(self):
string = ' '.join([force_text(self._meta.verbose_name), self.name])
return string
class AboutPerson(models.Model, NamesToStringMixin):
photo = RestrictedImageField(maximum_size=config['PHOTO_FRONT_STORY_MAX_SIZE'],
required_height=config['ABOUT_PHOTO_HEIGHT'],
required_width=config['ABOUT_PHOTO_WIDTH'],
required_formats=config['FORMATS'],
verbose_name=_('Picture of person on about page'))
english_caption = models.CharField(max_length=200, null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('English Caption'))
chinese_caption = models.CharField(max_length=200, null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('Chinese Caption'))
about_text_english = models.TextField(verbose_name=_('About text for that person in English.'),
help_text=_('Should include paragraph markup:'
'e.g. <p>This is a paragraph</p>'
'<p>This is a different paragraph</p>'),
null=True, blank=True)
about_text_chinese = models.TextField(verbose_name=_('About text for that person in Chinese.'),
help_text=_('Should include paragraph markup:'
'e.g. <p>This is a paragraph</p>'
'<p>This is a different paragraph</p>'),
null=True, blank=True)
published = models.BooleanField(verbose_name=_('Published status'))
english_name = models.CharField(max_length=150, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('English Name'),
blank=True)
chinese_name = models.CharField(max_length=50, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('Chinese Name'),
blank=True)
pinyin_name = models.CharField(max_length=150, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('Pinyin Name'),
blank=True)
order = models.IntegerField(verbose_name=_('Position of person in about page'))
class Meta:
ordering = ['order']
verbose_name = _('About Person')
verbose_name_plural = _('About People')
def __str__(self):
string = ' '.join([force_text(self._meta.verbose_name), self.name])
return string
| 51.701961 | 112 | 0.586999 | 12,840 | 0.973908 | 0 | 0 | 231 | 0.017521 | 0 | 0 | 4,527 | 0.343371 |
765c85c98a88ed131ecd9f838f2186d4fb82671f | 169 | py | Python | core/utils/gpu_check.py | andregri/keras-segmentation | 699043adc4dd74b97cbed3d3e5b8d8aafb03b71c | [
"Apache-2.0"
] | null | null | null | core/utils/gpu_check.py | andregri/keras-segmentation | 699043adc4dd74b97cbed3d3e5b8d8aafb03b71c | [
"Apache-2.0"
] | null | null | null | core/utils/gpu_check.py | andregri/keras-segmentation | 699043adc4dd74b97cbed3d3e5b8d8aafb03b71c | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
def check_gpu():
n_gpus = len(tf.config.experimental.list_physical_devices('GPU'))
print("Num GPUs Available: ", n_gpus)
check_gpu()
| 16.9 | 69 | 0.715976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.159763 |
765cc31257c572ee270073568f4fce119056c92f | 4,533 | py | Python | pinakes/common/auth/keycloak_django/tests/test_permission_checks.py | Alex-Izquierdo/pinakes | dfeb855662b47d29a6e976e87fd7c090a262cf3f | [
"Apache-2.0"
] | 2 | 2022-03-17T18:53:58.000Z | 2022-03-17T22:04:22.000Z | pinakes/common/auth/keycloak_django/tests/test_permission_checks.py | Alex-Izquierdo/pinakes | dfeb855662b47d29a6e976e87fd7c090a262cf3f | [
"Apache-2.0"
] | 9 | 2022-03-18T08:22:57.000Z | 2022-03-30T17:14:49.000Z | pinakes/common/auth/keycloak_django/tests/test_permission_checks.py | Alex-Izquierdo/pinakes | dfeb855662b47d29a6e976e87fd7c090a262cf3f | [
"Apache-2.0"
] | 7 | 2022-03-17T22:03:08.000Z | 2022-03-28T21:28:34.000Z | from unittest import mock
from pinakes.common.auth.keycloak.models import (
AuthzPermission,
AuthzResource,
)
from pinakes.common.auth.keycloak_django.permissions import (
check_wildcard_permission,
check_resource_permission,
check_object_permission,
get_permitted_resources,
)
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_check_wildcard_permission(get_authz_client):
client = get_authz_client.return_value
client.check_permissions.return_value = True
result = check_wildcard_permission("myresource", "read", mock.Mock())
assert result is True
client.check_permissions.assert_called_once_with(
AuthzPermission("myresource:all", "myresource:read")
)
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_check_resource_permission(get_authz_client):
client = get_authz_client.return_value
client.check_permissions.return_value = True
result = check_resource_permission(
"myresource",
"myresource:1",
"read",
mock.Mock(),
)
assert result is True
client.check_permissions.assert_called_once_with(
[
AuthzPermission("myresource:all", "myresource:read"),
AuthzPermission("myresource:1", "myresource:read"),
]
)
@mock.patch(
"pinakes.common.auth.keycloak_django."
"permissions.check_wildcard_permission",
return_value=False,
)
@mock.patch(
"pinakes.common.auth.keycloak_django"
".permissions.check_resource_permission",
return_value=True,
)
def test_check_object_permission_exists(
check_resource_permission, check_wildcard_permission
):
obj = mock.Mock()
obj.keycloak_id = "598802c2-6266-40f0-9558-142e2cb0d98e"
obj.keycloak_type.return_value = "myresource"
obj.keycloak_name.return_value = "myresource:1"
request = mock.Mock()
assert check_object_permission(obj, "read", request) is True
check_resource_permission.assert_called_once_with(
"myresource", "myresource:1", "read", request
)
check_wildcard_permission.assert_not_called()
@mock.patch(
"pinakes.common.auth.keycloak_django"
".permissions.check_wildcard_permission",
return_value=True,
)
@mock.patch(
"pinakes.common.auth.keycloak_django"
".permissions.check_resource_permission",
return_value=False,
)
def test_check_object_permission_not_exists(
check_resource_permission, check_wildcard_permission
):
obj = mock.Mock()
obj.keycloak_id = None
obj.keycloak_type.return_value = "myresource"
request = mock.Mock()
assert check_object_permission(obj, "read", request) is True
check_wildcard_permission.assert_called_once_with(
"myresource", "read", request
)
check_resource_permission.assert_not_called()
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_get_permitted_resources_empty(get_authz_client):
client = get_authz_client.return_value
client.get_permissions.return_value = []
result = get_permitted_resources("myresource", "read", mock.Mock())
assert result.is_wildcard is False
assert result.items == []
client.get_permissions.assert_called_once_with(
AuthzPermission(scope="myresource:read")
)
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_get_permitted_resources_wildcard(get_authz_client):
client = get_authz_client.return_value
client.get_permissions.return_value = [
AuthzResource(rsid="0", rsname="myresource:all"),
AuthzResource(rsid="1", rsname="myresource:1"),
]
result = get_permitted_resources("myresource", "read", mock.Mock())
assert result.is_wildcard is True
assert result.items == ["1"]
client.get_permissions.assert_called_once_with(
AuthzPermission(scope="myresource:read")
)
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_get_permitted_resources(get_authz_client):
client = get_authz_client.return_value
client.get_permissions.return_value = [
AuthzResource(rsid="1", rsname="myresource:1"),
AuthzResource(rsid="2", rsname="myresource:2"),
]
result = get_permitted_resources("myresource", "read", mock.Mock())
assert result.is_wildcard is False
assert result.items == ["1", "2"]
client.get_permissions.assert_called_once_with(
AuthzPermission(scope="myresource:read")
)
| 29.245161 | 79 | 0.736378 | 0 | 0 | 0 | 0 | 4,209 | 0.928524 | 0 | 0 | 1,107 | 0.244209 |
765e377ff705e12f7620a2be9a2df5b9f1f29c8c | 138 | py | Python | apps/misc/context_processors.py | Thom03/edms-django | ad686a815d612f55921d6893093690311eeb831f | [
"MIT"
] | 70 | 2019-05-26T11:48:32.000Z | 2022-02-16T21:33:58.000Z | apps/misc/context_processors.py | CalebeGeazi/CoinBox | 351611745d496d3cd3d871f96eb705250b5ec03f | [
"MIT"
] | 7 | 2019-10-18T20:20:04.000Z | 2021-04-21T15:25:15.000Z | apps/misc/context_processors.py | CalebeGeazi/CoinBox | 351611745d496d3cd3d871f96eb705250b5ec03f | [
"MIT"
] | 26 | 2019-07-07T17:06:00.000Z | 2022-01-20T06:40:39.000Z | from django.utils.translation import get_language
def django_settings(request):
return {
"LANGUAGE": get_language(),
}
| 15.333333 | 49 | 0.688406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.072464 |
765e4582dc810b4ad81584abb60251666d3180b8 | 20,723 | py | Python | server.py | Arztpraxis/uno-server | 1f87b0ea47d2f5bfc797a69b534aa8472cfdfb75 | [
"MIT"
] | null | null | null | server.py | Arztpraxis/uno-server | 1f87b0ea47d2f5bfc797a69b534aa8472cfdfb75 | [
"MIT"
] | null | null | null | server.py | Arztpraxis/uno-server | 1f87b0ea47d2f5bfc797a69b534aa8472cfdfb75 | [
"MIT"
] | null | null | null | from json import JSONEncoder
from random import choice, sample
from threading import Thread, Timer, Lock
from types import SimpleNamespace
from os import execv
from sys import argv, executable
from optparse import OptionParser
from shlex import split
# Networking
from wsgiref.simple_server import make_server
from ws4py.server.wsgirefserver import WebSocketWSGIRequestHandler
from highway import ServerWSGIApplication, WSGIServer
from highway import Server, Route
# Utilities built into highway
from highway import logging
from highway.utils import capture_trace
# Configuration utility
from Meh import Config, Option, ExceptionInConfigError
from cards import ALL_CARDS, REGULAR_CARDS
from cards import ROTATE, BLOCK, TAKE_TWO, TAKE_FOUR, PICK_COLOR
from cards import CardEncoder, can_play
from utils import broadcast
taken_names = []
lobbies = {}
CHEAT_PARSER = OptionParser()
CHEAT_PARSER.add_option("-f", "--face", action="store", type="int",
dest="face", default=0)
CHEAT_PARSER.add_option("-c", "--color", action="store", type="int",
dest="color", default=None)
CHEAT_PARSER.add_option("-a", "--amount", action="store", type="int",
dest="amount", default=1)
CHEAT_PARSER.add_option("-p", "--player", action="store", type="string",
dest="player", default=None)
def broadcast_to_resting(data, route, json_encoder=None):
for user in [user for user in server.manager.websockets if not user.lobby]:
user.send(data, route, json_encoder=json_encoder)
# Meant to be called from to REPL to troll
def give_cards(count, player_name):
player = find_player(player_name)
if player != None:
player.lobby.give_cards(count, player)
def find_player(player_name):
for lobby in lobbies:
for player in lobbies[lobby].players:
if player_name == player.name:
return player
return None
class LobbyEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, Lobby):
return {
"host" : obj.host.name,
"playerCount" : obj.player_count,
"playing" : obj.playing
}
return JSONEncoder.default(self, obj)
class UserEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, User):
return obj.name
return JSONEncoder.default(self, obj)
class Game:
def __init__(self, lobby, debug=False):
self.lobby = lobby
self.debug = debug
def player_leave(self, player):
pass
def stop(self):
pass
class Uno(Game):
LEFT = 1
RIGHT = 2
def __init__(self, lobby, turn_time=20.0, debug=False):
super().__init__(lobby, debug=debug)
self._draw_card_stack = []
# First card on the stack is never a special card
self.card_stack = [choice(REGULAR_CARDS)]
self.direction = Uno.RIGHT
self.turn_time = turn_time
for player in lobby.players:
player.games.uno = SimpleNamespace()
player.games.uno.turn_over = True
player.games.uno.has_drawn_card = False
player.games.uno.cards = sample(ALL_CARDS, 7)
player.send(player.games.uno.cards, "uno_give_card",
json_encoder=CardEncoder)
self.playing_player = lobby.players[0]
self.playing_player.games.uno.turn_over = False
# Prevent race conditions if player draws or plays too quickly in
# succession
self.play_card_lock = Lock()
self.draw_card_lock = Lock()
self.turn_timer = None
# Send the first card on the stack to all players
broadcast(self.card_stack[0], "uno_card_stack", lobby.players,
json_encoder=CardEncoder)
# Send whos turn it is to all players
broadcast(self.playing_player, "uno_turn", lobby.players,
json_encoder=UserEncoder)
self.reset_turn_timer()
def reset_turn_timer(self):
if self.turn_timer != None:
self.turn_timer.cancel()
self.turn_timer = Timer(self.turn_time,
lambda: self.end_turn(time_expired=True))
self.turn_timer.start()
@property
def draw_card_stack(self):
# Introduce a bit of randomness (does not respect card frequency)
while len(self._draw_card_stack) < 30:
self._draw_card_stack.append(choice(ALL_CARDS))
return self._draw_card_stack
def draw_card_from_stack(self):
# Fetch from property to keep the stack filled
card = self.draw_card_stack[0]
# Delete reference from stack list
del self._draw_card_stack[0]
return card
# For random cards
def give_cards(self, count, player):
cards = []
for i in range(count):
cards.append(self.draw_card_from_stack())
# Save cards to player deck server-side
player.games.uno.cards += cards
# Send client cards
player.send(cards, "uno_give_card", json_encoder=CardEncoder)
# For specific cards (cheating mainly)
def give_card(self, face, color, player):
for card in ALL_CARDS:
if card.face == face and card.color == color:
# Save card to player deck server-side
player.games.uno.cards.append(card)
# Send the card to client
player.send([card], "uno_give_card",
json_encoder=CardEncoder)
return True
return False
def change_direction(self):
if self.direction == Uno.LEFT:
self.direction = Uno.RIGHT
elif self.direction == Uno.RIGHT:
self.direction = Uno.LEFT
else:
# Unexpected direction -> Direction is right
self.direction = Uno.RIGHT
broadcast(self.direction, "uno_direction", self.lobby.players)
if self.debug:
logging.info("Direction changed to '%s'" %
("left" if self.direction == Uno.LEFT else "right"))
def get_next_player(self, player_inc=1):
players = self.lobby.players
player_index = players.index(self.playing_player)
if self.direction == Uno.LEFT:
next_player_overflowing_index = player_index - player_inc
elif self.direction == Uno.RIGHT:
next_player_overflowing_index = player_index + player_inc
else:
# Unexpected direction?
# Repeating turn
next_player_overflowing_index = player_index
return players[next_player_overflowing_index % len(players)]
@property
def next_player(self):
return self.get_next_player()
def end_turn(self, player_inc=1, time_expired=False):
next_player = self.get_next_player(player_inc)
if self.debug:
if time_expired:
logging.info("Turn time of '%i' expired" % self.turn_time)
logging.info("Next player: '%s'" % next_player)
self.playing_player.games.uno.turn_over = True
self.playing_player.games.uno.has_drawn_card = False
next_player.games.uno.turn_over = False
self.playing_player = next_player
self.reset_turn_timer()
broadcast(self.playing_player, "uno_turn", self.lobby.players,
json_encoder=UserEncoder)
def play_card(self, card_id, player):
self.play_card_lock.acquire()
successful = False
# If it's the turn of the player who wants to play a card
if player == self.playing_player:
# Is the card_id valid?
if card_id in range(len(player.games.uno.cards)):
# Acquire the card
card = player.games.uno.cards[card_id]
if self.debug:
logging.info("'%s' played: %s" % (player, card))
logging.info("Cards of '%s': %s" % (player,
player.games.uno.cards))
# Does the played card fit on top of the card stack?
if self.card_stack[-1].can_play(card):
self.card_stack.append(card)
# Send the played card to all players
broadcast(card, "uno_card_stack", self.lobby.players,
json_encoder=CardEncoder)
# Change direction
if card.face == ROTATE:
# Only two players -> Next turn name player
if len(self.lobby.players) != 2:
self.change_direction()
self.end_turn()
# Turn goes on if only two players are playing
# Player can draw another card if needed
else:
player.games.uno.has_drawn_card = False
# Skip player
elif card.face == BLOCK:
if len(self.lobby.players) != 2:
self.end_turn(player_inc=2)
# Turn goes on if only two players are playing
# Player can draw another card if needed
else:
player.games.uno.has_drawn_card = False
# Take two cards
elif card.face == TAKE_TWO:
self.give_cards(2, self.next_player)
self.end_turn()
# Take four cards
elif card.face == TAKE_FOUR:
self.give_cards(4, self.next_player)
# Turn does not end
# End turn only if the card does not require another card
elif card.face != PICK_COLOR:
self.end_turn()
# Remove the card from the players deck
del player.games.uno.cards[card_id]
broadcast({
"player" : player,
"count" : len(player.games.uno.cards)
}, "uno_card_count",
self.lobby.players, exclude=player,
json_encoder=UserEncoder)
successful = True
else:
if self.debug:
logging.warning("Card does not fit on top of stack. "
"Is the client desynchronized? (player: '%s')" %
player)
# If player has no cards left
if len(player.games.uno.cards) == 0:
broadcast(player.name, "uno_win", self.lobby.players)
self.lobby.stop()
self.play_card_lock.release()
player.send(successful, "uno_play_card")
def draw_card(self, player):
self.draw_card_lock.acquire()
successful = False
# If it's the turn of player who wants to play a card and
# he hasn't drawn a card this turn yet and
# he has no card that fits the top of the stack
if player == self.playing_player and \
not player.games.uno.has_drawn_card and \
not can_play(player.games.uno.cards, self.card_stack[-1]):
# Give player 1 card
self.give_cards(1, player)
# Can he play now? (could be optimized)
# No -> End turn
if not can_play(player.games.uno.cards, self.card_stack[-1]):
self.end_turn()
# Yes -> Can't draw any more cards
else:
player.games.uno.has_drawn_card = True
successful = True
if self.debug:
logging.info("Player '%s' drew card '%s'" % (player,
player.games.uno.cards[-1]))
self.draw_card_lock.release()
player.send(successful, "uno_draw_card")
# If client desynchonises -> Should never happen but ¯\_(ツ)_/¯
def sync(self, player):
player.send(player.games.uno.cards, "uno_sync",
json_encoder=CardEncoder)
def stop(self):
self.turn_timer.cancel()
for player in self.lobby.players:
player.games.uno = None
def player_leave(self, player):
if player == self.playing_player:
self.end_turn(player_inc=1)
class Lobby:
def __init__(self, name, host):
self.name = name
self.host = host
self.players = [host]
self.playing = False
self.game = None
if config.lobby_debug:
logging.info("Lobby '%s' created by '%s'" % (self, self.host))
@property
def player_count(self):
return len(self.players)
def join(self, player):
successful = False
if player not in self.players and not self.playing:
player.lobby = self
# Announce new player
broadcast(player.name, "lobby_user_join", self.players)
self.players.append(player)
# Players currently in lobby (including you)
player.send(self.players, "lobby_players",
json_encoder=UserEncoder)
# If host has changed since lobby_list
player.send(self.host.name, "lobby_host")
successful = True
if config.lobby_debug:
logging.info("Player '%s' joined lobby '%s'" % (player,
self))
player.send(successful, "lobby_join")
def leave(self, player):
successful = False
if player in self.players:
# If game is currently being played
if self.playing:
# Just to be sure
if self.game != None:
# Invoke player leave hook *before* removing player from
# self.players
self.game.player_leave(player)
# Leave doesn't block, this could kick an unrelated player by
# accident
player_index = self.players.index(player)
player.lobby = None
del self.players[player_index]
# Broadcast that a player has left
broadcast(player_index, "lobby_user_leave", self.players)
successful = True
if self.playing:
# Game stops when all but 1 player leaves
if self.player_count <= 1:
lobbies[self.name].stop()
if config.lobby_debug:
logging.info("Too few players in '%s'. Stopping game..." %
self)
# No players left in lobby -> delete Lobby
if self.player_count == 0:
lobbies[self.name].stop()
del lobbies[self.name]
lobby_deleted = True
if config.lobby_debug:
logging.info("Lobby '%s' is empty. Deleting..." %
self)
# Still players left
# Host left -> Random player becomes host
elif player == self.host:
self.host = choice(self.players)
broadcast(self.host.name, "lobby_host", self.players)
if config.lobby_debug:
logging.info("Lobby '%s' has new host '%s'" % (self,
self.host))
if config.lobby_debug:
logging.info("Player '%s' left lobby '%s'" % (player,
self))
player.send(successful, "lobby_leave")
def kick(self, player_to_be_kicked, issuing_player):
successful = False
if issuing_player == self.host:
for player in self.players:
if player.name == player_to_be_kicked:
self.leave(player)
successful = True
break
issuing_player.send(successful, "lobby_kick")
def start(self, player):
successful = False
if player == self.host and not self.playing and self.player_count >= 2:
self.playing = True
broadcast(True, "lobby_playing", self.players)
# Game possible replaceable in the future
self.game = Uno(self, debug=config.game_debug)
successful = True
if config.lobby_debug:
logging.info("Game '%s' started in lobby '%s'" % (self.game,
self))
player.send(successful, "lobby_start")
def stop(self, player=None):
if player != None:
successful = False
if player == self.host and self.playing:
self._stop()
successful = True
player.send(successful, "lobby_stop")
else:
self._stop()
def _stop(self):
if self.playing:
self.playing = False
# "Deallocation" and user games namespace cleanup
self.game.stop()
self.game = None
broadcast(False, "lobby_playing", self.players)
if config.lobby_debug:
logging.info("Lobby '%s' stopped" % self)
def chat_message_received(self, message, player):
successful = True
forward_message = True
if message.startswith("/debug"):
forward_message = False
if player.in_game(Uno):
options, args = CHEAT_PARSER.parse_args(
split(message[len("/debug") + 1:]))
if options.player == None:
player_ = player
else:
player_ = find_player(options.player)
if player_ == None:
successful = False
else:
for _ in range(options.amount):
self.game.give_card(options.face,
options.color, player_)
if forward_message:
broadcast({"player" : player, "message" : message},
"lobby_chat_message", self.players, json_encoder=UserEncoder)
# Nothing can go wrong (yet)
player.send(successful, "lobby_chat")
def __eq__(self, other):
return type(other) is Lobby and other.name == self.name
def __str__(self):
return self.name
class User(Server):
def __init__(self, sock, routes, debug=False):
super().__init__(sock, routes, debug=debug)
self.name = None
self.lobby = None
self.wins = 0
self.games = SimpleNamespace()
@property
def logged_in(self):
return self.name != None
def in_game(self, game):
if self.lobby != None:
return type(self.lobby.game) is game
return False
def closed(self, code, reason):
# Leave the lobby
if self.lobby != None:
self.lobby.leave(self)
# Free up taken user name
if self.name != None:
del taken_names[taken_names.index(self.name)]
if type(reason) is bytes:
reason = reason.decode()
if self.logged_in:
logging.info("User '%s' disconnected ('%s': %d)" % (self.name,
reason, code))
else:
logging.info("Unauthenticated user disconnected. ('%s': '%d')" % (
reason, code))
def __str__(self):
return self.name if self.name else ""
class Login(Route):
def run(self, data, handler):
successful = False
if type(data) is str:
if not data in taken_names:
taken_names.append(data)
handler.name = data
successful = True
handler.send(successful, "login")
class LobbyList(Route):
def run(self, data, handler):
handler.send(lobbies, "lobby_list", json_encoder=LobbyEncoder)
class LobbyCreate(Route):
def run(self, data, handler):
successful = False
if handler.logged_in:
if type(data) is str and len(data) > 0:
# If already in a lobby leave
if handler.lobby:
handler.lobby.leave(handler)
handler.lobby = None
# If lobby name not taken
if not data in lobbies:
lobby = Lobby(data, handler)
lobbies[data] = lobby
handler.lobby = lobby
successful = True
handler.send(successful, "lobby_create")
"""
All routes that wrap an instance method only implement
parameter and state checking. Logic specific to the class is
always handled in the class. This includes reporting errors
to the user and state corrections.
Linear state progression (A -> B -> C) is preferred, only the last
state has to to be checked this way. Every state should have a
default value indicating that it has not been reached. If that's
impossible for some good reason the use of helper functions is
encouraged.
If a certain function sigature is required the data is validated
before executing *any* further logic. (Variable definitions are allowed)
The first line in routes that return success or failure is always the
definiton of *successful* with an appropriate default value.
If failure is the only possible outcome outside of the instance method call,
successful should not be defined and return statements must be used to
speed up cancellation.
"""
class LobbyJoin(Route):
def run(self, data, handler):
successful = True
if type(data) is str:
if handler.logged_in:
if handler.lobby != None:
successful = False
elif data in lobbies:
lobbies[data].join(handler)
class LobbyLeave(Route):
def run(self, data, handler):
if handler.lobby:
handler.lobby.leave(handler)
return
handler.send(False, "lobby_leave")
class LobbyStart(Route):
def run(self, data, handler):
if handler.lobby:
handler.lobby.start(handler)
return
handler.send(False, "lobby_start")
class LobbyKick(Route):
def run(self, data, handler):
if type(data) is str:
if handler.lobby:
handler.lobby.kick(data, handler)
return
handler.send(False, "lobby_kick")
class LobbyChat(Route):
def run(self, data, handler):
if type(data) is str:
if handler.lobby:
handler.lobby.chat_message_received(data, handler)
return
handler.send(False, "lobby_chat")
class UnoPlayCard(Route):
def run(self, data, handler):
if handler.in_game(Uno):
if type(data) is int:
handler.lobby.game.play_card(data, handler)
return
handler.send(False, "uno_play_card")
class UnoDrawCard(Route):
def run(self, data, handler):
if handler.in_game(Uno):
handler.lobby.game.draw_card(handler)
return
handler.send(False, "uno_draw_card")
class UnoSync(Route):
def run(self, data, handler):
if handler.in_game(Uno):
handler.lobby.game.sync(handler)
return
handler.send(False, "uno_sync")
class REPL(Thread):
def __init__(self):
super().__init__()
self.daemon = True
def run(self):
logging.header("REPL started. Type in Python code to introspect. "
"(^D to restart)")
while True:
try:
exec(input(""))
except Exception as e:
if type(e) is EOFError:
execv(executable, ["python3"] + argv)
else:
capture_trace()
config = Config()
config.add(Option("address", "127.0.0.1"))
config.add(Option("port", 8500, validator=lambda port: type(port) is int))
config.add(Option("network_debug", False, validator=lambda debug: type(debug) is bool))
config.add(Option("game_debug", False, validator=lambda debug: type(debug) is bool))
config.add(Option("lobby_debug", False, validator=lambda debug: type(debug) is bool))
config.add(Option("repl", False, validator=lambda repl: type(repl) is bool))
CONFIG_PATH = "uno.cfg"
try:
config = config.load(CONFIG_PATH)
except (IOError, ExceptionInConfigError):
config.dump(CONFIG_PATH)
config = config.load(CONFIG_PATH)
server = make_server(config.address, config.port,
server_class=WSGIServer, handler_class=WebSocketWSGIRequestHandler,
app=ServerWSGIApplication(User, routes={
"login" : Login(),
"lobby_list" : LobbyList(),
"lobby_create" : LobbyCreate(),
"lobby_join" : LobbyJoin(),
"lobby_start" : LobbyStart(),
"lobby_leave" : LobbyLeave(),
"lobby_kick" : LobbyKick(),
"lobby_chat" : LobbyChat(),
"uno_play_card" : UnoPlayCard(),
"uno_draw_card" : UnoDrawCard(),
"uno_sync" : UnoSync()
}, debug=config.network_debug))
server.initialize_websockets_manager()
if config.repl:
logging.warning("Toggle the 'repl' flag before deploying!")
repl = REPL()
repl.start()
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close() | 25.742857 | 87 | 0.700429 | 16,538 | 0.797896 | 0 | 0 | 412 | 0.019877 | 0 | 0 | 4,885 | 0.235683 |
765ea91849f86020b6b972615c1e086ac4542de7 | 3,209 | py | Python | interface/seed.py | matthewruttley/Bucketerer | 828ee949b1f53b8432cfe7cf7cae0f6eec7d677f | [
"MIT"
] | null | null | null | interface/seed.py | matthewruttley/Bucketerer | 828ee949b1f53b8432cfe7cf7cae0f6eec7d677f | [
"MIT"
] | null | null | null | interface/seed.py | matthewruttley/Bucketerer | 828ee949b1f53b8432cfe7cf7cae0f6eec7d677f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#Creates an ad bucket with a seed term/domain
from pymongo import MongoClient
from similarity import find_by_html, find_by_similarsites, create_connection, tokenize_clean
from tabulate import tabulate
verbose = False
def get_rank(c, domain):
"""Gets the alexa rank for a domain. Returns False if not found"""
entry = c['domains'].find_one({'domain':domain.replace(".", "#")}, {'alexa.rank.latest':1})
try:
return entry['alexa']['rank']['latest']
except Exception:
return False
def create_bucket_from_domain(seed_domain, total_sites=False):
"""Creates a an ad bucket according to the parameters."""
c = create_connection()
#get some similar sites
sites_by_similar = find_by_similarsites(c, starter_site=seed_domain)
sites_by_html = find_by_html(c, starter_site=seed_domain)
#merge, get rank and sort descending
all_sites = []
for repository in [sites_by_html, sites_by_similar]:
for site in repository:
domain = site['url'] if 'url' in site else site['domain']
rank = get_rank(c, domain)
if rank:
if domain != seed_domain:
all_sites.append([domain, rank])
all_sites = sorted(all_sites, key=lambda x: x[1])
if verbose:
print tabulate(all_sites[:15 if not total_sites else total_sites])
else:
return all_sites
def create_bucket_with_raw_keywords(keywords, total_sites=False):
"""finds sites with the keywords in their description. ranks by alexa"""
c = create_connection()
keywords = tokenize_clean(keywords)
results = []
for keyword in keywords:
matches = c['domains'].find({'$text':{'$search':keyword}}, {'domain':1, 'alexa.rank.latest':1})
for match in matches:
results.append([match['domain'].replace('#', '.'), match['alexa']['rank']['latest']])
results = sorted(results, key=lambda x: x[1])
if verbose:
print tabulate(results[:15 if not total_sites else total_sites])
else:
return results
def create_bucket_from_keywords(search_string, total_sites=False):
"""Given some input text, this creates a bucket"""
c = create_connection()
#get some similar sites
sites_by_html = find_by_html(c, starter_text=search_string)
if verbose: print "Found {0} sites by meta desc".format(len(sites_by_html))
#find similar sites to the top 10% (random number?)
all_sites = []
ten_pc = int(len(sites_by_html) / 10)
for site in sites_by_html[:ten_pc]:
domain = site['url'] if 'url' in site else site['domain']
sites_by_similar = find_by_similarsites(c, starter_site=domain)
ten_pc = int(len(sites_by_similar) / 10) #take top 10%
if verbose: print "Adding {0} sites by similarsites".format(ten_pc)
all_sites += sites_by_similar[:ten_pc]
#prepend original sites_by_html to all_sites
all_sites = sites_by_html + all_sites
if verbose: print "Total sites: {0}".format(len(all_sites))
#get rank and sort descending
ranked_sites = []
for site in all_sites:
domain = site['url'] if 'url' in site else site['domain']
rank = get_rank(c, domain)
if rank:
ranked_sites.append([domain, rank])
ranked_sites = sorted(ranked_sites, key=lambda x: x[1])
if verbose:
print tabulate(ranked_sites[:15 if not total_sites else total_sites])
else:
return ranked_sites
| 28.149123 | 97 | 0.722655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 816 | 0.254285 |
766084a20156128ac28bfcc2ddc378e26abbd081 | 1,461 | py | Python | functional_tests/tests.py | BFH-E1D-2015-2016/iotdemo | 3440517401b8ef45cc1d3fa8c455671e743d706b | [
"MIT"
] | null | null | null | functional_tests/tests.py | BFH-E1D-2015-2016/iotdemo | 3440517401b8ef45cc1d3fa8c455671e743d706b | [
"MIT"
] | 1 | 2019-11-06T08:24:26.000Z | 2019-11-06T08:24:26.000Z | functional_tests/tests.py | BFH-E1D-2015-2016/iotdemo | 3440517401b8ef45cc1d3fa8c455671e743d706b | [
"MIT"
] | 1 | 2019-03-27T14:48:36.000Z | 2019-03-27T14:48:36.000Z | from django.test import LiveServerTestCase
from selenium import webdriver
def populate_db():
from lorawan.tests import populate_db_with_devices
populate_db_with_devices(["Devices 1", "Devices 2"])
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
import os
is_travis = 'TRAVIS' in os.environ
if(is_travis):
self.browser = webdriver.PhantomJS()
else:
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_new_visitor(self):
populate_db()
# John is a relative new employee at Teleski SA. Today, he has heard about IOTDemo, the monitoring solution used to
# gather all sort of information, measurement and status at Teleski SA.
# He open the browser and check its homepage
self.browser.get(self.live_server_url + "/")
# He notices the page title and header mention "IOT Monitoring System"
self.assertIn("IOT Monitoring System", self.browser.title)
self.assertIn("IOT Monitoring System", self.browser.find_element_by_tag_name("h1").text)
# He found a map
self.assertIn()
# He found a list of devices with a status
table = self.browser.find_element_by_id("devices_list")
first_row = table.find_element_by_tag_name('tbody tr')
self.assertIn("Devices 1", first_row.text)
self.assertIn("OK", first_row.text)
| 28.647059 | 123 | 0.670773 | 1,247 | 0.853525 | 0 | 0 | 0 | 0 | 0 | 0 | 480 | 0.328542 |
76611209864a777f582d19c43d076245dd3008ea | 1,568 | py | Python | imagepy/menus/Plugins/Manager/console_wgt.py | cycleuser/imagepy | 5dc1a9a8137280c5215287392ba1b23d368bd7e9 | [
"BSD-4-Clause"
] | 1 | 2020-06-17T05:16:46.000Z | 2020-06-17T05:16:46.000Z | imagepy/menus/Plugins/Manager/console_wgt.py | cycleuser/imagepy | 5dc1a9a8137280c5215287392ba1b23d368bd7e9 | [
"BSD-4-Clause"
] | null | null | null | imagepy/menus/Plugins/Manager/console_wgt.py | cycleuser/imagepy | 5dc1a9a8137280c5215287392ba1b23d368bd7e9 | [
"BSD-4-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import wx
from wx.py.shell import Shell
import scipy.ndimage as ndimg
import numpy as np
# from imagepy import IPy
from imagepy.core.engine import Free
from sciapp import Source
cmds = {'app':'app', 'np':np, 'ndimg':ndimg, 'update':None, 'get_img':None}
class Macros(dict):
def __init__(self):
for i in Source.manager('plugin').names():
if not isinstance(i, str) or i == 'Command Line':
#print(PluginsManager.plgs[i])
continue
name = ''.join(list(filter(str.isalnum, i)))
exec("self.run_%s = lambda para=None, plg=Source.manager('plugin').get(i):plg().start(cmds['app'], para)"%name)
class Plugin(wx.Panel):
title = 'Command Line'
single = None
def __init__(self, parent):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY,
pos = wx.DefaultPosition, size = wx.Size( 500,300 ),
style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
cmds['app'] = parent
cmds['get_img'] = lambda name=None, app=self: self.app.get_img()
cmds['update'] = lambda app=self: self.app.get_img().update()
shell = Shell(self, locals=cmds)
bSizer = wx.BoxSizer( wx.VERTICAL )
bSizer.Add( shell, 1, wx.EXPAND|wx.ALL, 5 )
self.SetSizer(bSizer)
cmds['plgs'] = Macros()
shell.run('# plgs.run_name() to call a ImagePy plugin.\n')
shell.run('# app is avalible here, and get_img() to get the current ImagePlus, update() to redraw.\n') | 41.263158 | 123 | 0.596301 | 1,285 | 0.819515 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.267857 |
7661b5a5e9e423f4f23155f54765fa1febca32f0 | 2,002 | py | Python | Support/Fuego/Pythia/pythia-0.5/packages/pyre/pyre/applications/Application.py | balos1/PelePhysics | d01190cc7b0eaad4ec96fac573034ccb485f0e9f | [
"BSD-3-Clause-LBNL"
] | 31 | 2018-11-21T01:49:06.000Z | 2022-03-30T03:41:43.000Z | Support/Fuego/Pythia/pythia-0.5/packages/pyre/pyre/applications/Application.py | balos1/PelePhysics | d01190cc7b0eaad4ec96fac573034ccb485f0e9f | [
"BSD-3-Clause-LBNL"
] | 123 | 2019-03-12T22:27:29.000Z | 2022-03-29T17:00:04.000Z | Support/Fuego/Pythia/pythia-0.5/packages/pyre/pyre/applications/Application.py | sundials-codes/PelePhysics | 5624f83a04f43aa95288be9d8a7bb372a4adefe6 | [
"BSD-3-Clause-LBNL"
] | 32 | 2018-11-05T11:51:59.000Z | 2022-03-29T13:09:32.000Z | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from __future__ import absolute_import, print_function
from pyre.components.Component import Component
class Application(Component):
def main(self, *args, **kwds):
self.defaults()
registry = self.prime()
self.configure(registry)
self.init()
self.execute(*args, **kwds)
self.fini()
return
def prime(self):
parser = self.inventory.commandlineParser
self._registry.orphans += parser.parse(root=self._registry)
return self._registry
def execute(self, *args, **kwds):
if self._registry.help:
self.help()
elif self._registry.unknown:
self.usage()
else:
self.run(*args, **kwds)
return
def help(self):
return
def usage(self):
print("unkown arguments:", self._registry.unknown)
return
def __init__(self, name):
Component.__init__(self, name, "application")
import sys
self.filename = sys.argv[0]
self._registry = self._createRegistry()
return
def _createRegistry(self):
from .ConfigurationRegistry import ConfigurationRegistry
return ConfigurationRegistry(self.name)
class Inventory(Component.Inventory):
import pyre.facilities
import pyre.journal
from .CommandlineParser import CommandlineParser
inventory = [
pyre.journal.journal(),
pyre.facilities.facility(
"commandlineParser", default=CommandlineParser()
),
]
# version
__id__ = "$Id$"
# End of file
| 22.244444 | 80 | 0.539461 | 1,474 | 0.736264 | 0 | 0 | 0 | 0 | 0 | 0 | 444 | 0.221778 |
7661be5bdbfa6ef0baed25f569d43c16bca82340 | 7,393 | py | Python | cars/models.py | zhangpotato/kx_backmanage | 5a484c19616a77a75698f63b8fa220ee027c0c17 | [
"Apache-2.0"
] | null | null | null | cars/models.py | zhangpotato/kx_backmanage | 5a484c19616a77a75698f63b8fa220ee027c0c17 | [
"Apache-2.0"
] | null | null | null | cars/models.py | zhangpotato/kx_backmanage | 5a484c19616a77a75698f63b8fa220ee027c0c17 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
class Car_basic(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
car_color = models.CharField(max_length=10, null=True, blank=True)
register_date = models.DateTimeField(null=True, blank=True)
user_name = models.CharField(max_length=20, null=True, blank=True)
traffic_number = models.CharField(max_length=30, null=True, blank=True)
# traffic_area = models.CharField(max_length=50, null=True, blank=True)
release_time = models.DateTimeField(null=True, blank=True)
car_type = models.CharField(max_length=10, null=True, blank=True)
manufacture_name = models.CharField(max_length=20, null=True, blank=True)
manufacture_type = models.CharField(max_length=50, null=True, blank=True)
manufacture_date = models.DateTimeField(null=True, blank=True)
# goods_type = models.CharField(max_length=10, null=True, blank=True) # 国产/进口
vin_number = models.CharField(max_length=50, null=True, blank=True)
total_number = models.CharField(max_length=50, null=True, blank=True)
# car_size = models.CharField(max_length=50, null=True, blank=True)
# packing_size = models.CharField(max_length=50, null=True, blank=True)
# total_number = models.CharField(max_length=50, null=True, blank=True)
curb_weight = models.CharField(max_length=50, null=True, blank=True)
traction_weight = models.CharField(max_length=50, null=True, blank=True)
sure_weight = models.CharField(max_length=50, null=True, blank=True)
sure_people_number = models.CharField(max_length=50, null=True, blank=True)
engine_type = models.CharField(max_length=50, null=True, blank=True)
engine_number = models.CharField(max_length=50, null=True, blank=True)
engine_displacement = models.CharField(max_length=50, null=True, blank=True)
engine_power = models.CharField(max_length=50, null=True, blank=True)
displacement_standard = models.CharField(max_length=50, null=True, blank=True)
battery_type = models.CharField(max_length=50, null=True, blank=True)
motor_model = models.CharField(max_length=50, null=True, blank=True)
motor_power = models.CharField(max_length=50, null=True, blank=True)
power_type = models.CharField(max_length=20, null=True, blank=True)
axle_number = models.CharField(max_length=10, null=True, blank=True)
detection_year_date = models.DateTimeField(null=True, blank=True)
year_dated = models.DateTimeField(null=True, blank=True)
# wheelbase = models.CharField(max_length=10, null=True, blank=True)
# tires_number = models.CharField(max_length=10, null=True, blank=True)
# braking_system = models.CharField(max_length=10, null=True, blank=True)
# brak_form = models.CharField(max_length=20, null=True, blank=True)
# abs_boolean = models.CharField(max_length=10, null=True, blank=True)
# transmission_form = models.CharField(max_length=10, null=True, blank=True)
# slow_machine = models.CharField(max_length=10, null=True, blank=True)
# air_system = models.CharField(max_length=10, null=True, blank=True)
# gps_boolean = models.CharField(max_length=10, null=True, blank=True)
class Meta:
db_table = 'car_basic'
verbose_name = 'car_basic'
class Car_detection(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
detection_name = models.CharField(max_length=30, null=True, blank=True)
detection_date = models.DateTimeField(null=True, blank=True)
report_id = models.CharField(max_length=30, null=True, blank=True)
detection_validity = models.DateTimeField(null=True, blank=True)
register_name = models.CharField(max_length=10, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_detection'
verbose_name = 'car_detection'
class Car_repair(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
total_km = models.DecimalField(max_digits=12, decimal_places=3, null=True, blank=True)
repair_date = models.DateTimeField(null=True, blank=True)
repair_type = models.CharField(max_length=10, null=True, blank=True)
repair_content = models.CharField(max_length=50, null=True, blank=True)
repair_name = models.CharField(max_length=30, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
register_name = models.CharField(max_length=10, null=True, blank=True)
card_number = models.CharField(max_length=20, null=True, blank=True)
class Meta:
db_table = 'car_repair'
verbose_name = 'car_repair'
class Car_replace(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
replace_name = models.CharField(max_length=20, null=True, blank=True)
replace_date = models.DateTimeField(null=True, blank=True)
produce_name = models.CharField(max_length=20, null=True, blank=True)
replace_type = models.CharField(max_length=20, null=True, blank=True)
produce_id = models.CharField(max_length=20, null=True, blank=True)
repair_company = models.CharField(max_length=20, null=True, blank=True)
register_name = models.CharField(max_length=20, null=True, blank=True)
pass_id = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_replace'
verbose_name = 'car_replace'
class Car_change(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
change_content = models.CharField(max_length=100, null=True, blank=True)
change_date = models.DateTimeField(null=True, blank=True)
change_reason = models.CharField(max_length=20, null=True, blank=True)
register_name = models.CharField(max_length=20, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_change'
verbose_name = 'car_change'
class Car_km(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
start_date = models.DateTimeField(null=True, blank=True)
end_date = models.DateTimeField(null=True, blank=True)
start_address = models.CharField(max_length=100, null=True, blank=True)
end_address = models.CharField(max_length=100, null=True, blank=True)
km = models.DecimalField(max_digits=12, decimal_places=3, null=True, blank=True)
register_name = models.CharField(max_length=10, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_km'
verbose_name = 'car_km'
class Car_traffic(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
traffic_location = models.CharField(max_length=30, null=True, blank=True)
traffic_date = models.DateTimeField(null=True, blank=True)
traffic_type = models.CharField(max_length=10, null=True, blank=True)
traffic_responsibility = models.CharField(max_length=30, null=True, blank=True)
register_name = models.CharField(max_length=30, null=True, blank=True)
car_situation = models.CharField(max_length=100, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_traffic'
verbose_name = 'car_traffic'
| 52.06338 | 90 | 0.739889 | 7,324 | 0.989596 | 0 | 0 | 0 | 0 | 0 | 0 | 1,196 | 0.1616 |
76638422cf9ebc3c0f5b9d7f1f24d4651283c521 | 1,250 | py | Python | pytorch_binding/setup.py | rickyHong/Warp-ctc-repl | 9a2f718ac03588a2d91ffbce57e06350e0fe4fee | [
"Apache-2.0"
] | null | null | null | pytorch_binding/setup.py | rickyHong/Warp-ctc-repl | 9a2f718ac03588a2d91ffbce57e06350e0fe4fee | [
"Apache-2.0"
] | null | null | null | pytorch_binding/setup.py | rickyHong/Warp-ctc-repl | 9a2f718ac03588a2d91ffbce57e06350e0fe4fee | [
"Apache-2.0"
] | null | null | null | # build.py
import os
import platform
import sys
from distutils.core import setup
from torch.utils.ffi import create_extension
extra_compile_args = ['-std=c++11', '-fPIC']
warp_ctc_path = "../build"
if platform.system() == 'Darwin':
lib_ext = ".dylib"
else:
lib_ext = ".so"
if "WARP_CTC_PATH" in os.environ:
warp_ctc_path = os.environ["WARP_CTC_PATH"]
if not os.path.exists(os.path.join(warp_ctc_path, "libwarpctc" + lib_ext)):
print(("Could not find libwarpctc.so in {}.\n"
"Build warp-ctc and set WARP_CTC_PATH to the location of"
" libwarpctc.so (default is '../build')").format(warp_ctc_path))
sys.exit(1)
include_dirs = [os.path.realpath('../include')]
ffi = create_extension(
name='warp_ctc',
language='c++',
headers=['src/binding.h'],
sources=['src/binding.cpp'],
with_cuda=True,
include_dirs=include_dirs,
library_dirs=[os.path.realpath(warp_ctc_path)],
runtime_library_dirs=[os.path.realpath(warp_ctc_path)],
libraries=['warpctc'],
extra_compile_args=extra_compile_args)
ffi = ffi.distutils_extension()
ffi.name = 'warpctc_pytorch._warp_ctc'
setup(
name="warpctc_pytorch",
version="0.1",
packages=["warpctc_pytorch"],
ext_modules=[ffi],
)
| 27.777778 | 75 | 0.6872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.2976 |
76664b71c48bb37a8d1d4e8a21d159c35c4c4f30 | 5,275 | py | Python | ddganAE/utils.py | Zeff020/Adversarial_ROM | 8c9e7ff86250e9370e5fdd2018f9ad04ded5f122 | [
"MIT"
] | 1 | 2021-12-27T06:14:32.000Z | 2021-12-27T06:14:32.000Z | ddganAE/utils.py | Zeff020/Adversarial_ROM | 8c9e7ff86250e9370e5fdd2018f9ad04ded5f122 | [
"MIT"
] | null | null | null | ddganAE/utils.py | Zeff020/Adversarial_ROM | 8c9e7ff86250e9370e5fdd2018f9ad04ded5f122 | [
"MIT"
] | 3 | 2021-08-05T11:17:37.000Z | 2021-09-02T02:37:44.000Z | """
General utilities for package
"""
import numpy as np
import keras.backend as K
from keras.losses import mse
__author__ = "Zef Wolffs"
__credits__ = ["Claire Heaney"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Zef Wolffs"
__email__ = "zefwolffs@gmail.com"
__status__ = "Development"
def calc_pod(snapshots, nPOD=-2, cumulative_tol=0.99, R=None):
"""
Calculate POD coefficients and basis functions
Args:
snapshots (list of ndarrays): List of arrays with subgrid
snapshots. shape:
(n_grids, n_nodes*n_scalar,
n_timelevels)
Returns:
list of ndarrays: POD coefficients per subgrid
"""
# Reshape to have multiple subgrids account for multiple batches
out = np.zeros((snapshots[0].shape[0],
len(snapshots)*snapshots[0].shape[-1]))
for i, coeff in enumerate(snapshots):
out[:, i*snapshots[0].shape[-1]:(i+1)*snapshots[0].shape[-1]] = coeff
s = None
if R is None:
snapshots_matrix = out
nrows, ncols = snapshots_matrix.shape
if nrows > ncols/4:
SSmatrix = np.dot(snapshots_matrix.T, snapshots_matrix)
else:
SSmatrix = np.dot(snapshots_matrix, snapshots_matrix.T)
print('WARNING - CHECK HOW THE BASIS FUNCTIONS ARE CALCULATED WITH \
THIS METHOD')
print('SSmatrix', SSmatrix.shape)
eigvalues, v = np.linalg.eigh(SSmatrix)
eigvalues = eigvalues[::-1]
# get rid of small negative eigenvalues (there shouldn't be any as the
# eigenvalues of a real, symmetric
# matrix are non-negative, but sometimes very small negative values do
# appear)
eigvalues[eigvalues < 0] = 0
s = np.sqrt(eigvalues)
# print('s values', s_values[0:20])
cumulative_info = np.zeros(len(eigvalues))
for j in range(len(eigvalues)):
if j == 0:
cumulative_info[j] = eigvalues[j]
else:
cumulative_info[j] = cumulative_info[j-1] + eigvalues[j]
cumulative_info = cumulative_info / cumulative_info[-1]
nAll = len(eigvalues)
basis_functions = np.zeros((out.shape[0], nPOD))
for j in reversed(range(nAll-nPOD, nAll)):
Av = np.dot(snapshots_matrix, v[:, j])
basis_functions[:, nAll-j-1] = Av/np.linalg.norm(Av)
R = basis_functions
coeffs = []
for iGrid in range(len(snapshots)):
snapshots_per_grid = \
out[:, iGrid*snapshots[0].shape[-1]:(iGrid+1) *
snapshots[0].shape[-1]]
coeffs.append(np.dot(R.T, snapshots_per_grid))
return coeffs, R, s
def reconstruct_pod(coeffs, R):
"""
Reconstruct grid from POD coefficients and transormation matrix R.
Args:
coeffs (np.array): POD coefficients
R (np.array): Transformation matrix R
Returns:
np.array: Reconstructed grid
"""
return R @ coeffs
class mse_weighted:
"""
Custom weighted mean squared error loss
"""
def __init__(self) -> None:
"""
Constructor, name is required for TensorFlow custom losses. Since
we only know weights after compiling the weights needs to be attributes
"""
self.weights = None
self.__name__ = "mse_weighted"
def __call__(self, y_true, y_pred):
"""
Tensorflow loss needs to be callable.
Args:
y_true (np.array or tf.tensor): True values
y_pred (np.array or tf.tensor): Predicted values
Returns:
float: Weighted MSE loss
"""
return K.mean(K.square(y_pred*self.weights - y_true*self.weights),
axis=-1)
class mse_PI:
"""
Mean squared error loss class.
"""
def __init__(self, dx=None, dy=None):
self.dx = dx
self.dy = dx
self.__name__ = "mse_PI"
def __call__(self, y_true, y_pred):
"""
Call the class, calculate the physics informed MSE loss
Args:
y_true (np.array): True values
y_pred (np.array): Predictions by model
Raises:
ValueError: Raises error if intervals dx and dy are not set
Returns:
float: Physics informed loss value
"""
if self.dx is None or self.dy is None:
raise ValueError("First set dx and dy")
# cty is the value of the continuity equation
cty = 0
# keep a count such that we can average later
count = 0
for k in range(y_pred.shape[0]):
# K is the grid in the batch
for i in range(1, y_pred.shape[1]-1):
# index in x direction
for j in range(1, y_pred.shape[2]-1):
# index in y direction
cty += (y_pred[k, i+1, j, 0] - y_pred[k, i-1, j, 0]) / \
(2*self.dx) + \
(y_pred[k, i, j+1, 1] - y_pred[k, i, j-1, 1]) / \
(2*self.dy)
count += 1
cty = cty/count
return K.mean(mse(y_true, y_pred)) + abs(cty)
| 28.825137 | 80 | 0.561896 | 2,198 | 0.416682 | 0 | 0 | 0 | 0 | 0 | 0 | 2,179 | 0.413081 |
766833811c4404edc21132096dd4af5a0802f1ce | 8,923 | py | Python | Preprocess/mosaic_utils.py | monchhichizzq/CenterNet | 718bfbfa1940a8b068ab359aaca6737c3c173ad0 | [
"MIT"
] | null | null | null | Preprocess/mosaic_utils.py | monchhichizzq/CenterNet | 718bfbfa1940a8b068ab359aaca6737c3c173ad0 | [
"MIT"
] | null | null | null | Preprocess/mosaic_utils.py | monchhichizzq/CenterNet | 718bfbfa1940a8b068ab359aaca6737c3c173ad0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/12/28 23:33
# @Author : Zeqi@@
# @FileName: mosaic_utils.py
# @Software: PyCharm
import os
import cv2
from PIL import Image
import numpy as np
def merge_bboxes(bboxes, cutx, cuty):
merge_bbox = []
for i in range(len(bboxes)):
for box in bboxes[i]:
tmp_box = []
x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
if i == 0:
if y1 > cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if y2 - y1 < 5:
continue
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if x2 - x1 < 5:
continue
if i == 1:
if y2 < cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if y2 - y1 < 5:
continue
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if x2 - x1 < 5:
continue
if i == 2:
if y2 < cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if y2 - y1 < 5:
continue
if x2 >= cutx and x1 <= cutx:
x1 = cutx
if x2 - x1 < 5:
continue
if i == 3:
if y1 > cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if y2 - y1 < 5:
continue
if x2 >= cutx and x1 <= cutx:
x1 = cutx
if x2 - x1 < 5:
continue
tmp_box.append(x1)
tmp_box.append(y1)
tmp_box.append(x2)
tmp_box.append(y2)
tmp_box.append(box[-1])
merge_bbox.append(tmp_box)
return merge_bbox
class Data_augmentation_with_Mosaic():
def __init__(self,
four_annotation_lines,
input_shape,
max_boxes=100,
hue=.1,
sat=1.5,
val=1.5,
visual=True):
self.four_annotation_lines = four_annotation_lines
self.h, self.w = input_shape
self.min_offset_x = 0.4
self.min_offset_y = 0.4
self.scale_low = 1 - min(self.min_offset_x, self.min_offset_y)- 0.3# 改动
self.scale_high = self.scale_low + 0.2 + 0.3# 改动
# print(self.scale_low, self.scale_high)
self.max_boxes = max_boxes
self.hue = hue
self.sat = sat
self.val = val
self.place_x = [0, 0, int(self.w * self.min_offset_x), int(self.w * self.min_offset_x)]
self.place_y = [0, int(self.h * self.min_offset_y), int(self.h * self.min_offset_y), 0]
self.visual = visual
def main(self):
image_datas = []
box_datas = []
index = 0
for annotation_line in self.four_annotation_lines:
# 每一行进行分割
line_content = annotation_line.split()
# 打开图片
image = Image.open(line_content[0])
image = image.convert("RGB")
# 图片的大小
iw, ih = image.size
# 保存框的位置
box = np.array([np.array(list(map(int, box.split(',')))) for box in line_content[1:]])
# 是否翻转图片
flip = np.random.rand() < .5
if flip and len(box) > 0:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
box[:, [0, 2]] = iw - box[:, [2, 0]]
# 对输入进来的图片进行缩放
new_ar = self.w / self.h
scale = np.random.uniform(self.scale_low, self.scale_high)
if new_ar < 1:
nh = int(scale * self.h)
nw = int(nh * new_ar)
else:
nw = int(scale * self.w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# 进行色域变换
hue = np.random.uniform(-self.hue, self.hue)
sat = np.random.uniform(1, self.sat) if np.random.uniform() < .5 else 1 / np.random.uniform(1, self.sat)
val = np.random.uniform(1, self.val) if np.random.uniform() < .5 else 1 / np.random.uniform(1, self.val)
x = cv2.cvtColor(np.array(image, np.float32) / 255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue * 360
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0] > 360, 0] = 360
x[:, :, 1:][x[:, :, 1:] > 1] = 1
x[x < 0] = 0
image = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) # numpy array, 0 to 1
image = Image.fromarray((image * 255).astype(np.uint8))
# 将图片进行放置,分别对应四张分割图片的位置
dx = self.place_x[index]
dy = self.place_y[index]
new_image = Image.new('RGB', (self.w, self.h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image) / 255
if self.visual:
image_ = np.array(image_data * 255., dtype=np.uint8)
# print(np.shape(image_))
image_ = cv2.cvtColor(image_, cv2.COLOR_RGB2BGR)
cv2.imshow('Image', image_)
cv2.waitKey(100)
index = index + 1
box_data = []
# 对box进行重新处理
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > self.w] = self.w
box[:, 3][box[:, 3] > self.h] = self.h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)]
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
image_datas.append(image_data)
box_datas.append(box_data)
# 将图片分割,放在一起
cutx = np.random.randint(int(self.w * self.min_offset_x), int(self.w * (1 - self.min_offset_x)))
cuty = np.random.randint(int(self.h * self.min_offset_y), int(self.h * (1 - self.min_offset_y)))
new_image = np.zeros([self.h, self.w, 3])
# print("\n mosaic")
# print('image datas',np.shape(image_datas), cutx, cuty)
new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]
# 对框进行进一步的处理
new_boxes = merge_bboxes(box_datas, cutx, cuty)
# 将box进行调整
box_data = np.zeros((self.max_boxes, 5))
if len(new_boxes) > 0:
if len(new_boxes) > self.max_boxes: new_boxes = new_boxes[:self.max_boxes]
box_data[:len(new_boxes)] = new_boxes
if self.visual:
# print(new_image.shape, np.max(new_image), np.min(new_image))
new_image = np.array(new_image * 255., dtype=np.uint8)
# print(new_image.shape, np.max(new_image), np.min(new_image))
for box in box_data:
box = [int(b) for b in box]
cv2.rectangle(new_image, (box[0], box[1]), (box[2], box[3]), color=(255, 255, 255), thickness=1)
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
cv2.imshow('Image', new_image)
cv2.waitKey(1000)
return new_image, box_data
if __name__ == "__main__":
annotation_path = '../Preparation/data_txt'
annotation_lines = open(os.path.join(annotation_path, 'kitti_ssd_obj_test.txt')).readlines()
# print(len(annotation_lines))
four_annotation_lines = []
for i, line in enumerate(annotation_lines):
four_annotation_lines.append(line)
if (i+1) % 4==0:
mosaic_aug = Data_augmentation_with_Mosaic(four_annotation_lines,
input_shape=(320, 960),
max_boxes=100,
hue=.1,
sat=1.5,
val=1.5,
visual=True)
image_data, box_data = mosaic_aug.main()
four_annotation_lines = []
| 35.268775 | 116 | 0.455004 | 6,012 | 0.658561 | 0 | 0 | 0 | 0 | 0 | 0 | 864 | 0.094643 |
7668a391529415983ac3244d55ce03076f5a3acc | 146 | py | Python | starter_code/api_keys.py | tstenner1/python-api-challenge | dfc0642da6831d80664514d0705865f01e5c6c5a | [
"ADSL"
] | null | null | null | starter_code/api_keys.py | tstenner1/python-api-challenge | dfc0642da6831d80664514d0705865f01e5c6c5a | [
"ADSL"
] | null | null | null | starter_code/api_keys.py | tstenner1/python-api-challenge | dfc0642da6831d80664514d0705865f01e5c6c5a | [
"ADSL"
] | null | null | null | # OpenWeatherMap API Key
weather_api_key = "f4695ec49ac558195fc591f0d450c34c"
# Google API Key
g_key = "AIzaSyAyIq5hhFN-Y0M16Ltie3YuwpDiKWx8tCk"
| 24.333333 | 52 | 0.835616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.787671 |
76697ffea65a3048f61ff82f93d0bee2b75272f6 | 1,535 | py | Python | zoopla_history.py | chaudhary-amit/zoopla-scraper | 9486a549bbd245493cdf5ec757fac9b003e9201f | [
"MIT"
] | 1 | 2022-01-13T16:49:57.000Z | 2022-01-13T16:49:57.000Z | zoopla_history.py | chaudhary-amit/zoopla-scraper | 9486a549bbd245493cdf5ec757fac9b003e9201f | [
"MIT"
] | null | null | null | zoopla_history.py | chaudhary-amit/zoopla-scraper | 9486a549bbd245493cdf5ec757fac9b003e9201f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 26 16:48:20 2017
@author: AC
"""
import logging
from lxml import html
import requests
import zoopla_rq
logger = logging.getLogger()
class HomeListing():
def __init__(self, config, parent_id, history_id, survey_id):
self.config = config
self.parent_id = parent_id
self.history_id = history_id
self.survey_id = survey_id
self.time = None
self.price = None
#add other attributes
def get_property_history_info(self):
property_history_url = (self.config.URL_PROPERTY_ROOT
+ str(self.room_id))
response = zoopla_rq.rq_request_with_repeats(self.config,
property_history_url)
if response is not None:
page = response.text
tree = html.fromstring(page)
self.__get_history_info_from_tree(tree)
return True
else:
return False
def __get_time(self, tree):
temp = tree.xpath('//p[@id="historic-listing-title"]/strong/text()')
time = temp[0].strip()
time = time.replace('\n', '')
self.time = time
def __get_price(self, tree):
temp = tree.xpath('//strong[@class="buyers"]/text()')
price = temp[0].strip()
self.price = price
def __get_history_info_from_tree(self, tree):
self.__get_time(tree)
self.__get_price(tree)
| 28.425926 | 76 | 0.566775 | 1,334 | 0.869055 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.123779 |
766aab70e53fbf30d766b2eb2c674ed0d93615eb | 604 | py | Python | cavoke_server/tasks.py | cavoke-project/cavoke_server | 5d2e39ca760e140ab9a8b0c50b45a6d4b22e21b5 | [
"MIT"
] | null | null | null | cavoke_server/tasks.py | cavoke-project/cavoke_server | 5d2e39ca760e140ab9a8b0c50b45a6d4b22e21b5 | [
"MIT"
] | 1 | 2020-01-06T17:47:03.000Z | 2020-01-06T17:47:03.000Z | cavoke_server/tasks.py | cavoke-project/cavoke-server | 5d2e39ca760e140ab9a8b0c50b45a6d4b22e21b5 | [
"MIT"
] | null | null | null | # from celery.schedules import crontab
# from celery.task import periodic_task
# from django.utils import timezone
# from cavoke_app.models import GameSession
#
#
# @periodic_task(run_every=crontab(minute='*/1'))
# def delete_old_foos():
# # Query all the foos in our database
# gss = GameSession.objects.all()
#
# # Iterate through them
# for gs in gss:
#
# # If the expiration date is bigger than now delete it
# if gs.expiresOn < timezone.now():
# gs.delete()
# # log deletion
# return "completed deleting foos at {}".format(timezone.now())
| 30.2 | 67 | 0.655629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 585 | 0.968543 |
766ae60371b09b86c76e076b90ac87115e738f34 | 830 | py | Python | src/stock/lib/daily.py | callim105/alphascan | a8eea45b3560a369b186c9971b01351a7a900d13 | [
"MIT"
] | 1 | 2021-12-03T01:30:12.000Z | 2021-12-03T01:30:12.000Z | src/stock/lib/daily.py | callim105/alphascan | a8eea45b3560a369b186c9971b01351a7a900d13 | [
"MIT"
] | null | null | null | src/stock/lib/daily.py | callim105/alphascan | a8eea45b3560a369b186c9971b01351a7a900d13 | [
"MIT"
] | null | null | null | import requests
import pandas as pd
from util.api_util import AV_API_KEY
def daily_adjusted_ohlc(symbol):
url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={symbol}&outputsize=full&apikey={AV_API_KEY}"
r = requests.get(url)
data = r.json()
df = pd.DataFrame(data[f"Time Series (Daily)"]).T
df = df.rename(
columns={
'1. open': 'open',
'2. high': 'high',
'3. low': 'low',
'4. close': 'close',
'5. adjusted close': 'adj_close',
'6. volume': 'volume',
'7. dividend amount': 'dividend',
'8. split coefficient': 'split_coefficient'
})
df.index = pd.to_datetime(df.index)
df[df.columns] = df[df.columns].astype("float64")
df['symbol'] = symbol
return df | 34.583333 | 134 | 0.584337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.413253 |
766b5f11549c7cb69fb9753e27e954a3ff27f37d | 2,047 | py | Python | FindingUsefulPoints/CodeThatClassifiesPoints.py | PaoloMura/VoronoiAR | 373d2d4858b7cd1a6ae95692ffaab382542bca4a | [
"MIT"
] | null | null | null | FindingUsefulPoints/CodeThatClassifiesPoints.py | PaoloMura/VoronoiAR | 373d2d4858b7cd1a6ae95692ffaab382542bca4a | [
"MIT"
] | null | null | null | FindingUsefulPoints/CodeThatClassifiesPoints.py | PaoloMura/VoronoiAR | 373d2d4858b7cd1a6ae95692ffaab382542bca4a | [
"MIT"
] | null | null | null | from Point import *
'''
A function that compares the distance of all the other points to a cast point
with the distance from that cast point to the point it was casted from and
returns either a closer point than the original one or the cast if the original
was already the closest one
'''
def Searching(_cast, _distance, _points):
for _point in _points:
if _point - _cast < _distance:
return _point
return _cast
# Just a handy function that writes in a given file all the points from an array
def WriteOn(file, array):
for point in array:
file.write(str(point.index) + ") " + str(point.x) + "," + str(point.y) + "," + str(point.z) + "\n" )
file = open("Coordinates.txt", "r")
# Reading the corners (the first 4 points in the file) and framing them
Corners = []
for i in range(4):
Corners.append(Point.Read(file.readline(), -1))
frame = Frame.Create(Corners)
# Reading all the coordinates and creating our indexed points
Points = []
index = 0
for line in file:
Points.append(Point.Read(line, index))
index += 1
file.close()
# Lists that will come in handy later ;)
Less = []
Ful = []
'''
This litle loop casts a point onto the frame from all the existing points and
then checks to see if that point is the closest to its cast. If it is it's
stored in 'Ful' (from useful)
'''
for point in Points:
cast = point.CastOnto(frame)
distance = cast - point
candidate = Searching(cast, distance, Points)
if (candidate.index == cast.index):
candidate = point
if (candidate not in Ful):
Ful.append(candidate)
# Less (from useless) gets all the point that were not in Ful appended to it
Less = [item for item in Points if item not in Ful]
"""
These were used for debugging
print(len(Points))
print(len(Ful))
print(len(Less))
"""
# Two txt files are created and our results are safed in theri appropriately named file
less = open("outUseless.txt", "w")
ful = open("outUseful.txt", "w")
WriteOn(less, Less)
WriteOn(ful, Ful)
less.close()
ful.close()
# Fin
| 26.24359 | 109 | 0.686859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,039 | 0.507572 |
767007c2e137e066c7c342a0aed20d260bc5e9d2 | 3,091 | py | Python | label_studio/ml/examples/object_detection.py | zoumt1633/label-studio | 324d542b49b42cac5c9a3e23373b9febaf9a426e | [
"Apache-2.0"
] | null | null | null | label_studio/ml/examples/object_detection.py | zoumt1633/label-studio | 324d542b49b42cac5c9a3e23373b9febaf9a426e | [
"Apache-2.0"
] | null | null | null | label_studio/ml/examples/object_detection.py | zoumt1633/label-studio | 324d542b49b42cac5c9a3e23373b9febaf9a426e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/7/12 14:19
# @Author : zoumaotai
# @Email : zoumaotai@ailongma.com
# @File : object_detection.py
# @Software: PyCharm
import random
import urllib
from gluoncv import model_zoo, data
from label_studio.ml import LabelStudioMLBase
import mxnet as mx
class ObjectDetectionModel(LabelStudioMLBase):
def __init__(self, **kwargs):
super(ObjectDetectionModel, self).__init__(**kwargs)
from_name, schema = list(self.parsed_label_config.items())[0]
self.from_name = from_name
self.to_name = schema['to_name'][0]
self.labels = schema['labels']
self.net = model_zoo.get_model('faster_rcnn_resnet50_v1b_voc', pretrained=True)
def predict(self, tasks, **kwargs):
results = []
for task in tasks:
image_url = task.get('data').get('image')
image_url = f'http://localhost:8080{image_url}' if not image_url.startswith('http') else image_url
urllib.request.urlretrieve(image_url, "test.jpg")
src = mx.img.imread('test.jpg')
org_h, org_w, _ = src.shape
x, orig_img = data.transforms.presets.rcnn.load_test("test.jpg")
h, w, _ = orig_img.shape
ratio_h = org_h/h
ratio_w = org_w/w
print('h缩放比例', ratio_h)
print('w缩放比例', ratio_w)
box_ids, scores, bboxes = self.net(x)
result_list = []
for bbox, box_id, score in zip(bboxes[0].asnumpy().tolist(), box_ids[0].asnumpy().tolist(),
scores[0].asnumpy().tolist()):
if bbox[0] == -1:
break
label = self.net.classes[int(box_id[0])]
score = score[0]
x = bbox[0] * ratio_w * 100 / org_w
y = bbox[1] * ratio_h * 100 / org_h
height = (bbox[3] - bbox[1]) * ratio_h * 100 / org_h
width = (bbox[2] - bbox[0]) * ratio_w * 100 / org_w
if score > 0.8:
result_list.append(
{
"from_name": "label",
"id": "t5sp3TyXPo",
"source": "$image",
"to_name": "image",
"type": "rectanglelabels",
"value": {
"height": height, # 高度占比
"rectanglelabels": [
label
],
"rotation": 0,
"width": width, # 宽度占比
"x": x,
"y": y
}
})
results.append({
"result": result_list,
'score': 0.9
})
print(results)
return results
def fit(self, completions, workdir=None, **kwargs):
return {'random': random.randint(1, 10)}
| 38.6375 | 110 | 0.461663 | 2,823 | 0.903939 | 0 | 0 | 0 | 0 | 0 | 0 | 516 | 0.165226 |
7671557048679bb49752d923c9406604fe45cfb7 | 4,295 | py | Python | ee/cli/plugins/sync.py | quimica/easyengine | 07056759922a6e05949ca4b56ee94df6b2e580de | [
"MIT"
] | null | null | null | ee/cli/plugins/sync.py | quimica/easyengine | 07056759922a6e05949ca4b56ee94df6b2e580de | [
"MIT"
] | null | null | null | ee/cli/plugins/sync.py | quimica/easyengine | 07056759922a6e05949ca4b56ee94df6b2e580de | [
"MIT"
] | null | null | null | from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.fileutils import EEFileUtils
from ee.cli.plugins.sitedb import *
from ee.core.mysql import *
from ee.core.logging import Log
def ee_sync_hook(app):
# do something with the ``app`` object here.
pass
class EESyncController(CementBaseController):
class Meta:
label = 'sync'
stacked_on = 'base'
stacked_type = 'nested'
description = 'synchronize EasyEngine database'
@expose(hide=True)
def default(self):
self.sync()
@expose(hide=True)
def sync(self):
"""
1. reads database information from wp/ee-config.php
2. updates records into ee database accordingly.
"""
Log.info(self, "Synchronizing ee database, please wait...")
sites = getAllsites(self)
if not sites:
pass
for site in sites:
if site.site_type in ['mysql', 'wp', 'wpsubdir', 'wpsubdomain']:
ee_site_webroot = site.site_path
# Read config files
configfiles = glob.glob(ee_site_webroot + '/*-config.php')
#search for wp-config.php inside htdocs/www/
if not configfiles:
Log.debug(self, "Config files not found in {0}/ "
.format(ee_site_webroot))
if site.site_type != 'mysql':
Log.debug(self, "Searching wp-config.php in {0}/htdocs/www/ "
.format(ee_site_webroot))
configfiles = glob.glob(ee_site_webroot + '/htdocs/www/wp-config.php')
if configfiles:
if EEFileUtils.isexist(self, configfiles[0]):
ee_db_name = (EEFileUtils.grep(self, configfiles[0],
'DB_NAME').split(',')[1]
.split(')')[0].strip().replace('\'', ''))
ee_db_user = (EEFileUtils.grep(self, configfiles[0],
'DB_USER').split(',')[1]
.split(')')[0].strip().replace('\'', ''))
ee_db_pass = (EEFileUtils.grep(self, configfiles[0],
'DB_PASSWORD').split(',')[1]
.split(')')[0].strip().replace('\'', ''))
ee_db_host = (EEFileUtils.grep(self, configfiles[0],
'DB_HOST').split(',')[1]
.split(')')[0].strip().replace('\'', ''))
# Check if database really exist
try:
if not EEMysql.check_db_exists(self, ee_db_name):
# Mark it as deleted if not exist
ee_db_name = 'deleted'
ee_db_user = 'deleted'
ee_db_pass = 'deleted'
except StatementExcecutionError as e:
Log.debug(self, str(e))
except Exception as e:
Log.debug(self, str(e))
if site.db_name != ee_db_name:
# update records if any mismatch found
Log.debug(self, "Updating ee db record for {0}"
.format(site.sitename))
updateSiteInfo(self, site.sitename,
db_name=ee_db_name,
db_user=ee_db_user,
db_password=ee_db_pass,
db_host=ee_db_host)
else:
Log.debug(self, "Config files not found for {0} "
.format(site.sitename))
def load(app):
# register the plugin class.. this only happens if the plugin is enabled
handler.register(EESyncController)
# register a hook (function) to run after arguments are parsed.
hook.register('post_argument_parsing', ee_sync_hook)
| 44.739583 | 94 | 0.469849 | 3,711 | 0.864028 | 0 | 0 | 3,499 | 0.814668 | 0 | 0 | 936 | 0.217928 |
7672b96f23df1eb333154249a0fe4fcc3c65df84 | 7,534 | py | Python | nempy/spot_markert_backend/variable_ids.py | hy3440/nempy | ffc6c3e1a0becde8cbf6ba56d5885768dc1c0a37 | [
"BSD-3-Clause"
] | 24 | 2020-05-16T11:46:25.000Z | 2022-03-29T22:25:09.000Z | nempy/spot_markert_backend/variable_ids.py | hy3440/nempy | ffc6c3e1a0becde8cbf6ba56d5885768dc1c0a37 | [
"BSD-3-Clause"
] | 6 | 2020-11-17T22:37:35.000Z | 2022-03-03T00:11:08.000Z | nempy/spot_markert_backend/variable_ids.py | hy3440/nempy | ffc6c3e1a0becde8cbf6ba56d5885768dc1c0a37 | [
"BSD-3-Clause"
] | 12 | 2020-04-30T09:42:22.000Z | 2022-03-06T23:45:08.000Z | import pandas as pd
import numpy as np
from nempy.help_functions import helper_functions as hf
def bids(volume_bids, unit_info, next_variable_id):
"""Create decision variables that correspond to unit bids, for use in the linear program.
This function defines the needed parameters for each variable, with a lower bound equal to zero, an upper bound
equal to the bid volume, and a variable type of continuous. There is no limit on the number of bid bands and each
column in the capacity_bids DataFrame other than unit is treated as a bid band. Volume bids should be positive.
numeric values only.
Examples
--------
>>> import pandas
A set of capacity bids.
>>> volume_bids = pd.DataFrame({
... 'unit': ['A', 'B'],
... '1': [10.0, 50.0],
... '2': [20.0, 30.0]})
The locations of the units.
>>> unit_info = pd.DataFrame({
... 'unit': ['A', 'B'],
... 'region': ['NSW', 'X'],
... 'dispatch_type': ['generator', 'load']})
>>> next_variable_id = 0
Create the decision variables and their mapping into constraints.
>>> decision_variables, unit_level_constraint_map, regional_constraint_map = bids(
... volume_bids, unit_info, next_variable_id)
>>> print(decision_variables)
unit capacity_band service variable_id lower_bound upper_bound type
0 A 1 energy 0 0.0 10.0 continuous
1 A 2 energy 1 0.0 20.0 continuous
2 B 1 energy 2 0.0 50.0 continuous
3 B 2 energy 3 0.0 30.0 continuous
>>> print(unit_level_constraint_map)
variable_id unit service coefficient
0 0 A energy 1.0
1 1 A energy 1.0
2 2 B energy 1.0
3 3 B energy 1.0
>>> print(regional_constraint_map)
variable_id region service coefficient
0 0 NSW energy 1.0
1 1 NSW energy 1.0
2 2 X energy -1.0
3 3 X energy -1.0
Parameters
----------
volume_bids : pd.DataFrame
Bids by unit, in MW, can contain up to n bid bands.
======== ===============================================================
Columns: Description:
unit unique identifier of a dispatch unit (as `str`)
service the service being provided, optional, if missing energy assumed
(as `str`)
1 bid volume in the 1st band, in MW (as `float`)
2 bid volume in the 2nd band, in MW (as `float`)
n bid volume in the nth band, in MW (as `float`)
======== ===============================================================
unit_info : pd.DataFrame
The region each unit is located in.
======== ======================================================
Columns: Description:
unit unique identifier of a dispatch unit (as `str`)
region unique identifier of a market region (as `str`)
======== ======================================================
next_variable_id : int
The next integer to start using for variables ids.
Returns
-------
decision_variables : pd.DataFrame
============= ===============================================================
Columns: Description:
unit unique identifier of a dispatch unit (as `str`)
capacity_band the bid band of the variable (as `str`)
variable_id the id of the variable (as `int`)
lower_bound the lower bound of the variable, is zero for bids (as `np.float64`)
upper_bound the upper bound of the variable, the volume bid (as `np.float64`)
type the type of variable, is continuous for bids (as `str`)
============= ===============================================================
unit_level_constraint_map : pd.DataFrame
============= =============================================================================
Columns: Description:
variable_id the id of the variable (as `np.int64`)
unit the unit level constraints the variable should map to (as `str`)
service the service type of the constraints the variables should map to (as `str`)
coefficient the upper bound of the variable, the volume bid (as `np.float64`)
============= =============================================================================
regional_constraint_map : pd.DataFrame
============= =============================================================================
Columns: Description:
variable_id the id of the variable (as `np.int64`)
region the regional constraints the variable should map to (as `str`)
service the service type of the constraints the variables should map to (as `str`)
coefficient the upper bound of the variable, the volume bid (as `np.float64`)
============= =============================================================================
"""
# If no service column is provided assume bids are for energy.
if 'service' not in volume_bids.columns:
volume_bids['service'] = 'energy'
# Get a list of all the columns that contain volume bids.
bid_bands = [col for col in volume_bids.columns if col not in ['unit', 'service']]
# Reshape the table so each bid band is on it own row.
decision_variables = hf.stack_columns(volume_bids, cols_to_keep=['unit', 'service'],
cols_to_stack=bid_bands, type_name='capacity_band', value_name='upper_bound')
decision_variables = decision_variables[decision_variables['upper_bound'] >= 0.0001]
# Group units together in the decision variable table.
decision_variables = decision_variables.sort_values(['unit', 'capacity_band'])
# Create a unique identifier for each decision variable.
decision_variables = hf.save_index(decision_variables, 'variable_id', next_variable_id)
# The lower bound of bidding decision variables will always be zero.
decision_variables['lower_bound'] = 0.0
decision_variables['type'] = 'continuous'
constraint_map = decision_variables.loc[:, ['variable_id', 'unit', 'service']]
constraint_map = pd.merge(constraint_map, unit_info.loc[:, ['unit', 'region', 'dispatch_type']], 'inner', on='unit')
regional_constraint_map = constraint_map.loc[:, ['variable_id', 'region', 'service', 'dispatch_type']]
regional_constraint_map['coefficient'] = np.where((regional_constraint_map['dispatch_type'] == 'load') &
(regional_constraint_map['service'] == 'energy'), -1.0, 1.0)
regional_constraint_map = regional_constraint_map.drop('dispatch_type', axis=1)
unit_level_constraint_map = constraint_map.loc[:, ['variable_id', 'unit', 'service']]
unit_level_constraint_map['coefficient'] = 1.0
decision_variables = \
decision_variables.loc[:, ['unit', 'capacity_band', 'service', 'variable_id', 'lower_bound', 'upper_bound',
'type']]
return decision_variables, unit_level_constraint_map, regional_constraint_map
| 48.922078 | 120 | 0.541147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,037 | 0.801301 |
7673a924d37892716bfea072dd29ec7597bd35da | 3,422 | py | Python | numba/cuda/tests/cudapy/test_random.py | blair1306/numba | 3b9647d17d653abac15363da604eeb804dbdd15a | [
"BSD-2-Clause"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | numba/cuda/tests/cudapy/test_random.py | blair1306/numba | 3b9647d17d653abac15363da604eeb804dbdd15a | [
"BSD-2-Clause"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | numba/cuda/tests/cudapy/test_random.py | blair1306/numba | 3b9647d17d653abac15363da604eeb804dbdd15a | [
"BSD-2-Clause"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import math
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest
import numba.cuda.random
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
from numba.cuda.random import \
xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, \
xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64
from numba.core import config
# Distributions
UNIFORM = 1
NORMAL = 2
@cuda.jit
def rng_kernel_float32(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float32(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float32(states, thread_id)
@cuda.jit
def rng_kernel_float64(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float64(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float64(states, thread_id)
class TestCudaRandomXoroshiro128p(CUDATestCase):
def test_create(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def test_create_subsequence_start(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s1 = states.copy_to_host()
states = cuda.random.create_xoroshiro128p_states(10, seed=1,
subsequence_start=3)
s2 = states.copy_to_host()
# Starting seeds should match up with offset of 3
np.testing.assert_array_equal(s1[3:], s2[:-3])
def test_create_stream(self):
stream = cuda.stream()
states = cuda.random.create_xoroshiro128p_states(10, seed=1, stream=stream)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def check_uniform(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=np.float32)
kernel_func[2, 32](states, out, 32, UNIFORM)
self.assertAlmostEqual(out.min(), 0.0, delta=1e-3)
self.assertAlmostEqual(out.max(), 1.0, delta=1e-3)
self.assertAlmostEqual(out.mean(), 0.5, delta=1.5e-2)
self.assertAlmostEqual(out.std(), 1.0/(2*math.sqrt(3)), delta=6e-3)
def test_uniform_float32(self):
self.check_uniform(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_uniform_float64(self):
self.check_uniform(rng_kernel_float64, np.float64)
def check_normal(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=dtype)
kernel_func[2, 32](states, out, 32, NORMAL)
self.assertAlmostEqual(out.mean(), 0.0, delta=4e-3)
self.assertAlmostEqual(out.std(), 1.0, delta=2e-3)
def test_normal_float32(self):
self.check_normal(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_normal_float64(self):
self.check_normal(rng_kernel_float64, np.float64)
if __name__ == '__main__':
unittest.main()
| 33.881188 | 89 | 0.688778 | 2,187 | 0.6391 | 0 | 0 | 1,046 | 0.305669 | 0 | 0 | 144 | 0.042081 |
767403c68c6963f4ac7af7065b349de0b68aecd1 | 4,307 | py | Python | reviewboard/scmtools/sshutils.py | smorley/reviewboard | 39dd1166fdec19d4fbced965b42a3a23a3b6b956 | [
"MIT"
] | 1 | 2019-01-16T11:59:40.000Z | 2019-01-16T11:59:40.000Z | reviewboard/scmtools/sshutils.py | smorley/reviewboard | 39dd1166fdec19d4fbced965b42a3a23a3b6b956 | [
"MIT"
] | null | null | null | reviewboard/scmtools/sshutils.py | smorley/reviewboard | 39dd1166fdec19d4fbced965b42a3a23a3b6b956 | [
"MIT"
] | null | null | null | import os
import urlparse
from django.utils.translation import ugettext_lazy as _
import paramiko
from reviewboard.scmtools.errors import AuthenticationError, \
BadHostKeyError, SCMError, \
UnknownHostKeyError
# A list of known SSH URL schemes.
ssh_uri_schemes = ["ssh", "sftp"]
urlparse.uses_netloc.extend(ssh_uri_schemes)
class RaiseUnknownHostKeyPolicy(paramiko.MissingHostKeyPolicy):
"""A Paramiko policy that raises UnknownHostKeyError for missing keys."""
def missing_host_key(self, client, hostname, key):
raise UnknownHostKeyError(hostname, key)
def humanize_key(key):
"""Returns a human-readable key as a series of hex characters."""
return ':'.join(["%02x" % ord(c) for c in key.get_fingerprint()])
def get_host_keys_filename():
"""Returns the URL to the known host keys file."""
return os.path.expanduser('~/.ssh/known_hosts')
def is_ssh_uri(url):
"""Returns whether or not a URL represents an SSH connection."""
return urlparse.urlparse(url)[0] in ssh_uri_schemes
def get_ssh_client():
"""Returns a new paramiko.SSHClient with all known host keys added."""
client = paramiko.SSHClient()
filename = get_host_keys_filename()
if os.path.exists(filename):
client.load_host_keys(filename)
return client
def add_host_key(hostname, key):
"""Adds a host key to the known hosts file."""
dirname = os.path.dirname(get_host_keys_filename())
if not os.path.exists(dirname):
# Make sure the .ssh directory exists.
try:
os.mkdir(dirname, 0700)
except OSError, e:
raise IOError(_("Unable to create directory %(dirname)s, which is "
"needed for the SSH host keys. Create this "
"directory, set the web server's user as the "
"the owner, and make it writable only by that "
"user.") % {
'dirname': dirname,
})
try:
fp = open(get_host_keys_filename(), 'a')
fp.write('%s %s %s\n' % (hostname, key.get_name(), key.get_base64()))
fp.close()
except IOError, e:
raise IOError(
_('Unable to write host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
def replace_host_key(hostname, old_key, new_key):
"""
Replaces a host key in the known hosts file with another.
This is used for replacing host keys that have changed.
"""
filename = get_host_keys_filename()
if not os.path.exists(filename):
add_host_key(hostname, new_key)
return
try:
fp = open(filename, 'r')
lines = fp.readlines()
fp.close()
old_key_base64 = old_key.get_base64()
except IOError, e:
raise IOError(
_('Unable to read host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
try:
fp = open(filename, 'w')
for line in lines:
parts = line.strip().split(" ")
if parts[-1] == old_key_base64:
parts[-1] = new_key.get_base64()
fp.write(' '.join(parts) + '\n')
fp.close()
except IOError, e:
raise IOError(
_('Unable to write host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
def check_host(hostname, username=None, password=None):
"""
Checks if we can connect to a host with a known key.
This will raise an exception if we cannot connect to the host. The
exception will be one of BadHostKeyError, UnknownHostKeyError, or
SCMError.
"""
client = get_ssh_client()
client.set_missing_host_key_policy(RaiseUnknownHostKeyPolicy())
try:
client.connect(hostname, username=username, password=password)
except paramiko.BadHostKeyException, e:
raise BadHostKeyError(e.hostname, e.key, e.expected_key)
except paramiko.AuthenticationException, e:
raise AuthenticationError()
except paramiko.SSHException, e:
raise SCMError(unicode(e))
| 30.118881 | 79 | 0.602972 | 245 | 0.056884 | 0 | 0 | 0 | 0 | 0 | 0 | 1,291 | 0.299745 |
7676eaebbf4a10d110f2aa934b65cc618ec30ec1 | 10,056 | py | Python | extract_data.py | dsrincon/bitcoin_network_analysis | 897cdf6c2ad02a5790f4fb1b16382c4e4b31b0d2 | [
"MIT"
] | null | null | null | extract_data.py | dsrincon/bitcoin_network_analysis | 897cdf6c2ad02a5790f4fb1b16382c4e4b31b0d2 | [
"MIT"
] | null | null | null | extract_data.py | dsrincon/bitcoin_network_analysis | 897cdf6c2ad02a5790f4fb1b16382c4e4b31b0d2 | [
"MIT"
] | 1 | 2021-03-25T20:00:12.000Z | 2021-03-25T20:00:12.000Z | # -*- coding: utf-8 -*-
# Functions and Script to extract data
import blocksci
import pandas as pd
import numpy as np
import networkx as nx
import multiprocessing as mp
import itertools
import random
import time
import string
import pickle
import csv
import gc
import os, sys
from functools import partial
#***********CLASSES AND FUNTIONS***********
# Class that creates a blockchain a blockchain partition (dictionary) given data range and partition type (blocks,days,weeks)
class BchainPartition():
def __init__(self,chain,start_timestamp,end_timestamp,ptype='blocks',sample_size=10):
blocks=chain.range(start=start_timestamp,end=end_timestamp)
self.block_h=blocks.height
print('Start_block: {}'.format(self.block_h[0]))
print('End_block: {}'.format(self.block_h[-1]))
if sample_size>0: #Samples blocks from the
sample_list=list(np.random.choice(self.block_h,sample_size))
sample_blocks=[chain[ix_b] for ix_b in sample_list]
txs=[b.txes for b in sample_blocks]
self.partition={h:[t for t in t_l] for h,t_l in zip(sample_list,txs)}
self.no_parts=len(sample_blocks)
else:
if ptype=='blocks':
self.partition={b.height:[tx for tx in b.txes] for b in blocks}
self.no_parts=np.int32(len(blocks))
print('Number of Blocks: {} '.format(len(blocks)))
print('Highest block height: {}'.format(blocks[-1].height))
print('Number of Transactions: {} '.format(len(txs)))
# ***TODO: Create partition for other types of partitions (use tx.block_time)
# Function that takes blockchain partition and outputs pandas data frame with features
# for the graph defined by each split in the partition
def partition_data(chainpartiton,directory,filename):
# Dictionary with partition
partition=chainpartiton.partition
partindex=partition.keys()
parts=partition.values()
data_tuples=[]
graphs=[]
print('Number of parts: {}'.format(len(partindex)))
tuples=[(index,part) for index,part in zip(partindex,parts)]
no_parts=len(tuples)
processed=0
for t in tuples:
data_i,columns_i,graph_i=graph_features(t,slice_type='blocks')
with open(filename,'a') as f:
writer = csv.writer(f, delimiter=',')
if len(data_tuples)==0: # Write column names on first pass
writer.writerow(columns_i)
writer.writerow(data_i)
# Save graph
nx.write_gpickle(graph_i,directory+str(graph_i.graph['graph_id'])+'.gpickle')
data_tuples.append((data_i,columns_i))
graphs.append(graph_i)
processed+=1
progress=(processed/no_parts)*100
#sys.stdout.write("Download progress: %d%% \r" % (progress) )
sys.stdout.write("Download progress: {:07.4f} \r".format(progress) )
sys.stdout.flush()
'''
chunksize=len(tuples)%ncpu
with mp.Pool(processes=ncpu) as pool:
data_tuples=pool.map(graph_features,tuples,chunksize)
'''
columns=data_tuples[0][1] #This value is being re-written. This design choice is to mantain consistency with columns.
data=[i for i,j in data_tuples]
data=np.array(data)
df=pd.DataFrame(data=data[:,:],columns=columns)
return (df,graphs)
# Function that receives a chain part (list of transactions), generates transaction graph and calculates statistics
def graph_features(chain_part_tuple,slice_type='blocks'):
index=chain_part_tuple[0]
chain_part=chain_part_tuple[1]
block_height=chain_part[-1].block_height
graph=block_graph(chain_part,index,slice_type)
nx.info(graph)
nodes=graph.nodes(data=True)
edges=graph.edges(data=True)
data=[index]
columns=['block_height']
# Number of Nodes
no_nodes=nx.number_of_nodes(graph)
data.append(no_nodes)
columns.append('no_nodes')
# Number of Edges (address to address transactions)
no_edges=nx.number_of_edges(graph)
data.append(no_edges)
columns.append('no_edges')
# Total value transacted
total_value=np.sum(np.array([a['value'] for n1,n2,a in edges]))
data.append(total_value)
columns.append('value_transacted')
# Total Density
density=nx.density(graph)
data.append(density)
columns.append('total_density')
# Nodes with self loops nx.loops nodes_with_selfloops(G) nodes_with_selfloops(G)
nodes_self=nx.number_of_selfloops(graph)
data.append(nodes_self)
columns.append('nodes_self')
# Value of self loops nodes_with_selfloops(G)
values=np.array([a['value'] for n1,n2,a in nx.selfloop_edges(graph,data=True)])
selfloop_value=np.sum(values)
data.append(selfloop_value)
columns.append('selfloop_value')
# Number of transactions to old addresses
old_nodes=[n for n,a in nodes if a['block_created']<block_height]
edges_to_old=graph.in_edges(old_nodes,data=True)
data.append(len(edges_to_old))
columns.append('old_nodes_in')
# Ratio of transactions to old addresses to total transactions
ratio_oldin_totalin=len(edges_to_old)/(no_edges+1)
data.append(ratio_oldin_totalin)
columns.append('ratio_oldin_totalin')
# Value of transactions to old addresses
value_to_old=[a['value'] for n1,n2,a in edges_to_old]
data.append(np.sum(np.array(value_to_old)))
columns.append('value_to_old')
# Old address density
old_graph=nx.induced_subgraph(graph,old_nodes)
old_density=nx.density(old_graph)
data.append(old_density)
columns.append('old_density')
# ***TODO*** (Aggregated graph analysis)
# Accumulated reuse
# Dominance (Agg graph or new vs. old dominance)
#https://networkx.github.io/documentation/stable/reference/algorithms/dominance.html
# Common ancenstors (as with dominance the address ancestor path should be proportional
#to the blockchain lenght if address reuse is minimal)
#***********
#print('{} Processed'.format(index))
return (data,columns,graph)
# Function that creates transaction graph for a given number transactions
def block_graph(txs,index,slice_type):
# Create graph and process
graph = nx.MultiDiGraph(graph_id=index,slice_type=slice_type)
nodes=[]
edges=[]
# Extract transactions information
init_block=txs[0].block.height
txs_dic={tx.index:tx for tx in txs}
txs_ix=list(txs_dic.keys())
txs_ix.sort()
start_ix=txs_ix[0]
end_ix=txs_ix[-1]
# Generate edges to input to graph
# TODO:Re-write for pre-process: See last answ with qeues https://stackoverflow.com/questions/33107019/multiple-threads-writing-to-the-same-csv-in-python
'''
with mp.Pool(processes=ncpu) as pool:
edges=pool.map(extract_nodes_edges,txs,chunksize)
'''
for tx in txs:
edges_i,nodes_i=extract_nodes_edges(tx)
nodes.append(nodes_i)
edges.append(edges_i)
nodes=list(itertools.chain.from_iterable(nodes))
edges=list(itertools.chain.from_iterable(edges))
# Input to graph
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
#print('Generated Graph for Block starting at:{}'.format(init_block))
return graph
# Function that receives a transaction and generates nodes and edges from addresses in transaction
def extract_nodes_edges(transaction):
# Initialize values and get info from transaction
edges=[]
output_value=transaction.output_value
block_height=transaction.block_height
tx_id=transaction.index
# Get inputs, types and values
inputs=transaction.inputs.address
input_val=transaction.inputs.value
input_nodes=[(inp.address_num,{'raw_type':inp.raw_type,'block_created':inp.first_tx.block.height})for inp in inputs]
# Get outputs and types
outputs=transaction.outputs.address
output_nodes=[(out.address_num,{'raw_type':out.raw_type,'block_created':out.first_tx.block.height})for out in outputs]
# ****TODO: Add address balance as attribute to node****
# Create nodes
nodes=input_nodes+output_nodes
# Create edges (NetworkX will automatically create nodes when given edges)
for i in range(len(inputs)):
value=input_val[i]
prop_value=value/len(outputs)
for o in range(len(outputs)):
edge=(inputs[i].address_num,outputs[o].address_num,{'value':prop_value,'tx_id':block_height})
edges.append(edge)
return edges,nodes
#***********SCRIPT***********
# Point to parsed blockchain data
ncpu=mp.cpu_count()
chain = blocksci.Blockchain("/home/ubuntu/bitcoin")
types=blocksci.address_type.types
total_blocks=chain.blocks
print('Total Blocks up to {}: {} '.format(total_blocks[-1].time,len(total_blocks)))
#---SCRIPT: generates data for graphs in each part of the partition
# Create directories and files to store graphs and dataframe
# Generate an extraction ID (Each id has random id)
extraction_id = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(6)])
print('Extraction id: {}'.format(extraction_id))
#---Save Dataframes
# Create directory and save
start='2010-02-01 00:00:00'
end='2018-02-01 11:59:59'
blocks=chain.range(start=start,end=end)
sample_size=35000
start_c=start
start_c=start_c.replace('-','_').replace(' ','_').replace(':','_')
end_c=end
end_c=end_c.replace('-','_').replace(' ','_').replace(':','_')
directory='extractions/'+extraction_id+'-'+str(sample_size)+'-blocks-'+start_c+'-'+end_c+'/graphs'+'/'
if not os.path.exists(directory):
os.makedirs(directory)
# Create Filename and save
filename='extractions/'+extraction_id+'-'+str(sample_size)+'-blocks-'+start_c+'-'+end_c+'/'+extraction_id+'-'+str(sample_size)+'-blocks-'+start_c+'-'+end_c+'.csv'
start_time=time.time()
partition=BchainPartition(chain,start,end,sample_size=sample_size)
df,graphs=partition_data(partition,directory,filename)
df.head()
end_time=time.time()
print('Time taken={}'.format(end_time-start_time))
print('\n***EXTRACTION COMPLETED SUCCESSFULLY***')
| 30.380665 | 162 | 0.699682 | 1,154 | 0.114723 | 0 | 0 | 0 | 0 | 0 | 0 | 3,697 | 0.367532 |
767a24e4966929593e14aec0be63ad350928e7b1 | 1,616 | py | Python | egs/wmt14_en_de/nlp1/local/generate_stand_vocab.py | didichuxing/delta | 31dfebc8f20b7cb282b62f291ff25a87e403cc86 | [
"Apache-2.0"
] | 1,442 | 2019-07-09T07:34:28.000Z | 2020-11-15T09:52:09.000Z | egs/wmt14_en_de/nlp1/local/generate_stand_vocab.py | didichuxing/delta | 31dfebc8f20b7cb282b62f291ff25a87e403cc86 | [
"Apache-2.0"
] | 93 | 2019-07-22T09:20:20.000Z | 2020-11-13T01:59:30.000Z | egs/wmt14_en_de/nlp1/local/generate_stand_vocab.py | didichuxing/delta | 31dfebc8f20b7cb282b62f291ff25a87e403cc86 | [
"Apache-2.0"
] | 296 | 2019-07-09T07:35:28.000Z | 2020-11-16T02:27:51.000Z | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
from absl import logging
def generate_stand_vocab(old_vocab, new_vocab):
vocab_file = open(new_vocab, 'w')
vocab_file.write('<pad>' + '\t' + '0' + '\n')
vocab_file.write('<s>' + '\t' + '1' + '\n')
vocab_file.write('</s>' + '\t' + '2' + '\n')
vocab_file.write('<unk>' + '\t' + '3' + '\n')
vocab_file.write('<sos>' + '\t' + '4' + '\n')
vocab_file.write('<eos>' + '\t' + '5' + '\n')
idx = 6
with open(old_vocab, 'r') as f:
for i, line in enumerate(f.readlines()):
if i > 2:
vocab_file.write(line.strip() + '\t' +
str(idx) + '\n')
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
if len(sys.argv) != 3:
logging.error("Usage python {} old_vocab new_vocab".format(sys.argv[0]))
sys.exit(-1)
old_vocab = sys.argv[1]
new_vocab = sys.argv[2]
generate_stand_vocab(old_vocab, new_vocab)
| 34.382979 | 80 | 0.618193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 880 | 0.544554 |
767a289d0241b57b9fea319169e096af9bc0394e | 6,524 | py | Python | Preprocessing/Crawler/Crawler.py | sambacha/pyblock | f8f207de36a2f91dfe5f61681eba0e371cb0c552 | [
"MIT"
] | null | null | null | Preprocessing/Crawler/Crawler.py | sambacha/pyblock | f8f207de36a2f91dfe5f61681eba0e371cb0c552 | [
"MIT"
] | null | null | null | Preprocessing/Crawler/Crawler.py | sambacha/pyblock | f8f207de36a2f91dfe5f61681eba0e371cb0c552 | [
"MIT"
] | null | null | null | """A client to interact with node and to save data to mongo."""
from pymongo import MongoClient
import crawler_util
import requests
import json
import sys
import os
import logging
import time
import tqdm
sys.path.append(os.path.realpath(os.path.dirname(__file__)))
DIR = "/mnt/c/data/db"
LOGFIL = "crawler.log"
if "BLOCKCHAIN_ANALYSIS_LOGS" in os.environ:
LOGFIL = "{}/{}".format(os.environ["BLOCKCHAIN_ANALYSIS_LOGS"], LOGFIL)
crawler_util.refresh_logger(LOGFIL)
logging.basicConfig(filename=LOGFIL, level=logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class Crawler(object):
def __init__(
self, start=True, rpc_port=8545, host="http://localhost", delay=0.0001
):
"""Initialize the Crawler."""
print("Starting Crawler")
self.url = "{}:{}".format(host, rpc_port)
self.headers = {"content-type": "application/json"}
# Initializes to default host/port = localhost/27017
self.mongo_client = crawler_util.initMongo(MongoClient())
# The max block number that is in mongo
self.max_block_mongo = None
# The max block number in the public blockchain
self.max_block_geth = None
# Record errors for inserting block data into mongo
self.insertion_errors = list()
# Make a stack of block numbers that are in mongo
self.block_queue = crawler_util.makeBlockQueue(self.mongo_client)
# The delay between requests to geth
self.delay = delay
self.session = requests.Session()
if start:
self.max_block_mongo = self.highestBlockMongo()
self.max_block_geth = 4369999 # self.highestBlockEth()
self.run()
def _rpcRequest(self, method, params, key):
"""Make an RPC request to geth on port 8545."""
payload = {"method": method, "params": params, "jsonrpc": "2.0", "id": 0}
"""time.sleep(self.delay)"""
data = json.dumps(payload)
res = self.session.post(self.url, data, stream=True).json()
return res[key]
def getBlock(self, n):
"""Get a specific block from the blockchain and filter the data."""
data = self._rpcRequest("eth_getBlockByNumber", [n, True], "result")
block = crawler_util.decodeBlock(data)
uncleHash = data["hash"]
if data["uncles"]:
uncles = self.retrieveUncles(uncleHash, block["number"])
block["uncles"] = uncles
return block
def getMiner(self, n):
miner = self._rpcRequest("eth_getBlockByNumber", [hex(n), False], "result")[
"miner"
]
crawler_util.insertMiner(self.mongo_client, n, miner)
def retrieveUncles(self, blockHash, height):
uncleCountHex = self._rpcRequest(
"eth_getUncleCountByBlockHash", [blockHash], "result"
)
uncleCount = int(uncleCountHex, 16)
uncles = []
for i in range(uncleCount):
uncleBlock = self._rpcRequest(
"eth_getUncleByBlockHashAndIndex", [blockHash, hex(i)], "result"
)
newBlock = {
"miner": uncleBlock["miner"],
"reward": (8 - (height - int(uncleBlock["number"], 16))) / 8 * 5,
}
uncles.append(newBlock)
return uncles
def highestBlockEth(self):
"""Find the highest numbered block in geth."""
num_hex = self._rpcRequest("eth_blockNumber", [], "result")
return int(num_hex, 16)
def saveBlock(self, block):
"""Insert a given parsed block into mongo."""
e = crawler_util.insertMongo(self.mongo_client, block)
if e:
self.insertion_errors.append(e)
def saveMiner(self, block):
"""Insert a given parsed block into mongo."""
e = crawler_util.insertMiner(self.mongo_client, block)
if e:
self.insertion_errors.append(e)
def highestBlockMongo(self):
"""Find the highest numbered block in the mongo database."""
highest_block = crawler_util.highestBlock(self.mongo_client)
logging.info("Highest block found in mongodb:{}".format(highest_block))
return highest_block
def add_block(self, n):
"""Add a block to mongo."""
b = self.getBlock(n)
if b:
self.saveBlock(b)
time.sleep(0.001)
else:
self.saveBlock({"number": n, "transactions": []})
def add_miner(self, n):
"""Add a block to mongo."""
self.getMiner(n)
# else:
# self.saveBlock({"number": n, "transactions": []})
def run(self):
"""
Run the process.
Iterate through the blockchain on geth and fill up mongodb
with block data.
"""
print("Processing geth blockchain:")
print("Highest block found as: {}".format(self.max_block_geth))
print("Number of blocks to process: {}".format(len(self.block_queue)))
# Make sure the database isn't missing any blocks up to this point
logging.debug("Verifying that mongo isn't missing any blocks...")
self.max_block_mongo = 1
if len(self.block_queue) > 0:
print("Looking for missing blocks...")
self.max_block_mongo = self.block_queue.pop()
for n in tqdm.tqdm(range(1, self.max_block_mongo)):
if len(self.block_queue) == 0:
# If we have reached the max index of the queue,
# break the loop
break
else:
# -If a block with number = current index is not in
# the queue, add it to mongo.
# -If the lowest block number in the queue (_n) is
# not the current running index (n), then _n > n
# and we must add block n to mongo. After doing so,
# we will add _n back to the queue.
_n = self.block_queue.popleft()
if n != _n:
self.add_block(n)
self.block_queue.appendleft(_n)
logging.info("Added block {}".format(n))
# Get all new blocks
print("Processing remainder of the blockchain...")
for n in tqdm.tqdm(range(self.max_block_mongo, self.max_block_geth)):
self.add_block(hex(n))
# for n in tqdm.tqdm(range(3826871, self.max_block_geth)):
# self.add_miner(n)
print("Done!\n")
| 36.244444 | 84 | 0.591355 | 5,937 | 0.910025 | 0 | 0 | 0 | 0 | 0 | 0 | 2,201 | 0.33737 |
767abb3e3d12d3c5ac07f73a70a0a54a78206ee1 | 302 | py | Python | packages/models-library/src/models_library/__init__.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | null | null | null | packages/models-library/src/models_library/__init__.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | 55 | 2018-05-15T09:47:00.000Z | 2022-03-31T06:56:50.000Z | packages/models-library/src/models_library/__init__.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | 1 | 2020-04-22T15:06:58.000Z | 2020-04-22T15:06:58.000Z | """ osparc's service models library
"""
#
# NOTE:
# - "examples" = [ ...] keyword and NOT "example". See https://json-schema.org/understanding-json-schema/reference/generic.html#annotations
#
import pkg_resources
__version__: str = pkg_resources.get_distribution("simcore-models-library").version
| 25.166667 | 141 | 0.738411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.708609 |
767bc9780baf150a4175689b7f218ae850fdbe0b | 257 | py | Python | chords/more/gevent_task.py | omergertel/chords | 1f5dd50f55f21ee32b8a4d7b808687d8ed1400c2 | [
"MIT"
] | null | null | null | chords/more/gevent_task.py | omergertel/chords | 1f5dd50f55f21ee32b8a4d7b808687d8ed1400c2 | [
"MIT"
] | null | null | null | chords/more/gevent_task.py | omergertel/chords | 1f5dd50f55f21ee32b8a4d7b808687d8ed1400c2 | [
"MIT"
] | null | null | null | import functools
from gevent import spawn
from ..task import Task
class GeventTask(Task):
"""
Task that spawns a greenlet
"""
def start(self, *args, **kwargs):
return spawn(functools.partial(Task.start, self, *args, **kwargs))
| 21.416667 | 74 | 0.653696 | 184 | 0.715953 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.167315 |
767cbca3d4897fcfab64d04f9846298a51e53364 | 1,994 | py | Python | tst/drivers/uart/uart_suite.py | ivankravets/pumbaa | 2a1869cc204e3128516ed6fa9f89529aedec1702 | [
"MIT"
] | 69 | 2016-09-04T18:36:18.000Z | 2021-07-04T21:51:54.000Z | tst/drivers/uart/uart_suite.py | ivankravets/pumbaa | 2a1869cc204e3128516ed6fa9f89529aedec1702 | [
"MIT"
] | 42 | 2016-09-02T20:10:19.000Z | 2020-07-01T05:54:01.000Z | tst/drivers/uart/uart_suite.py | ivankravets/pumbaa | 2a1869cc204e3128516ed6fa9f89529aedec1702 | [
"MIT"
] | 11 | 2016-09-29T14:33:23.000Z | 2021-02-28T19:30:49.000Z | #
# @section License
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2017, Erik Moqvist
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is part of the Pumbaa project.
#
from drivers import Uart
import select
def test_print():
print(Uart)
uart = Uart(1)
print(uart)
def test_write():
uart = Uart(1)
uart.start()
assert uart.write(b'1234') == 4
assert uart.write(b'5678', 1) == 1
uart.stop()
def test_read():
uart = Uart(1)
uart.start()
poll = select.poll()
poll.register(uart)
print('polling with 5 seconds timeout')
if poll.poll(5) == []:
print('poll timeout')
buf = bytearray(8)
assert uart.read_into(buf, 1) == 1
print('read:', buf)
print('reading one byte from UART 1')
buf = uart.read(1)
assert len(buf) == 1
print('read:', buf)
uart.stop()
TESTCASES = [
(test_print, "test_print"),
(test_write, "test_write"),
(test_read, "test_read")
]
| 25.240506 | 69 | 0.691575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,307 | 0.655466 |
767e0dd0e7903c31af372aae1bd3170fe9248c58 | 2,035 | py | Python | elasticapm/instrumentation/packages/tornado_httplib_response.py | laerteallan/apm-agent-python | 62e494fa032e69bf4999cbc0efb30b80093ad5ba | [
"BSD-3-Clause"
] | 2 | 2019-02-15T20:23:39.000Z | 2019-02-15T20:26:06.000Z | elasticapm/instrumentation/packages/tornado_httplib_response.py | laerteallan/apm-agent-python | 62e494fa032e69bf4999cbc0efb30b80093ad5ba | [
"BSD-3-Clause"
] | null | null | null | elasticapm/instrumentation/packages/tornado_httplib_response.py | laerteallan/apm-agent-python | 62e494fa032e69bf4999cbc0efb30b80093ad5ba | [
"BSD-3-Clause"
] | null | null | null | from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
from elasticapm.traces import capture_span
from elasticapm.utils import default_ports
from elasticapm.utils.compat import urlparse
def get_host_from_url(url):
parsed_url = urlparse.urlparse(url)
host = parsed_url.hostname or " "
if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:
host += ":" + str(parsed_url.port)
return host
class HttpClientTornadoInstrumentation(AbstractInstrumentedModule):
name = "tornado"
instrument_list = [("tornado.httpclient", "HTTPResponse")]
def call(self, module, method, wrapped, instance, args, kwargs):
http_request_proxy = args[0]
url = http_request_proxy.url
duration = kwargs.get('request_time', 0)
start_time = kwargs.get('start_time', 0)
print("Inicio da requisicao")
signature = "{} {}".format(http_request_proxy.method.upper(), get_host_from_url(http_request_proxy.url))
print("start time tornado")
with capture_span(signature, "ext.http.tornado", {"url": url}, leaf=True, start_time=start_time,
duration=duration):
print("Tornado test")
teste = wrapped(*args, **kwargs)
return teste
# return wrapped(*args, **kwargs)
# http_request = kwargs.get("url", None)
# kwargs__http = vars(http_request)
# del kwargs__http['_body']
# del kwargs__http['_headers']
# del kwargs__http['_body_producer']
# del kwargs__http['_streaming_callback']
# del kwargs__http['_header_callback']
# del kwargs__http['_prepare_curl_callback']
# del kwargs__http['start_time']
# url = http_request.url
# signature = "{} {}".format(http_request.method.upper(), get_host_from_url(http_request.url))
#
# with capture_span(signature, "ext.http.tornado", {"url": url}, leaf=True):
# return wrapped(*args, **kwargs__http)
| 38.396226 | 112 | 0.660934 | 1,567 | 0.770025 | 0 | 0 | 0 | 0 | 0 | 0 | 755 | 0.371007 |
76803418e7c483c10d751d87b7054156573f1939 | 586 | py | Python | src/waldur_ansible/python_management/migrations/0004_removed_virtual_env_field_from_global_requests.py | opennode/waldur-ansible | c81c5f0491be02fa9a55a6d5bf9d845750fd1ba9 | [
"MIT"
] | 1 | 2017-09-05T08:09:47.000Z | 2017-09-05T08:09:47.000Z | src/waldur_ansible/python_management/migrations/0004_removed_virtual_env_field_from_global_requests.py | opennode/waldur-ansible | c81c5f0491be02fa9a55a6d5bf9d845750fd1ba9 | [
"MIT"
] | null | null | null | src/waldur_ansible/python_management/migrations/0004_removed_virtual_env_field_from_global_requests.py | opennode/waldur-ansible | c81c5f0491be02fa9a55a6d5bf9d845750fd1ba9 | [
"MIT"
] | 3 | 2017-09-24T03:13:19.000Z | 2018-08-12T07:44:38.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-27 14:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('python_management', '0003_added_unique_constraint'),
]
operations = [
migrations.RemoveField(
model_name='pythonmanagementdeleterequest',
name='virtual_env_name',
),
migrations.RemoveField(
model_name='pythonmanagementfindvirtualenvsrequest',
name='virtual_env_name',
),
]
| 24.416667 | 64 | 0.645051 | 436 | 0.744027 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.387372 |
76803f5a83658d39e862a3ea11083cca906b0016 | 3,369 | py | Python | pymain.py | Farhad-Mrkm/NCE_ICC-2022 | 998db82536c2077dbdb157aa21af8c6b84957761 | [
"MIT"
] | 1 | 2022-01-18T02:08:30.000Z | 2022-01-18T02:08:30.000Z | pymain.py | Farhad-Mrkm/NCE_ICC-2022 | 998db82536c2077dbdb157aa21af8c6b84957761 | [
"MIT"
] | null | null | null | pymain.py | Farhad-Mrkm/NCE_ICC-2022 | 998db82536c2077dbdb157aa21af8c6b84957761 | [
"MIT"
] | null | null | null | # author__Farhad_Mirkarimi-*- coding: utf-8 -*-
import os
import h5py
import glob, os
import numpy as np
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
import torch
import torch.nn as nn
from tqdm.auto import tqdm, trange
from numpy.random import default_rng
import torch.nn.functional as F
import argparse
import gc
gc.collect()
print(np.version.version)
from all_params import all_params
from joint_training import joint_training
################ parsing input args###################
parser = argparse.ArgumentParser(description='provide arguments for neural capacity estimation')
#parser.add_argument('--SNR', type=int, default=[10], help='Signal to noise(unit)')
parser.add_argument('--SNR',nargs='+',type=int)
parser.add_argument('--init_epoch', type=int, default=100, help='First round epoch')
parser.add_argument('--max_epoch', type=int, default=3000, help='joint training epoch')
parser.add_argument('--seed_size', type=int, default=2, help='seed size for discrete inputs')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--hidden_dim_critic', type=int, default=256, help='hidden dim for mi_est net')
parser.add_argument('--hidden_dim_nit', type=int, default=256, help='hidden_dim for nit net')
parser.add_argument('--dim', type=int, default=1, help='dimension for mi_est net')
parser.add_argument('--dim_nit', type=int, default=1, help='dimension for NIT net')
parser.add_argument('--layer_mi', type=int, default=4, help='layer number for mi_est net')
parser.add_argument('--layer_nit', type=int, default=4, help='layer number for nit_net')
parser.add_argument('--lr_rate_nit', type=float, default=.0001, help='training lr')
parser.add_argument('--lr_rate_mi_est', type=float, default=.00001, help='training lr')
parser.add_argument('--type_channel', type=str, default='conts_awgn', help='channel name')
parser.add_argument('--estimator', type=str, default='mine', help='estimator type')
parser.add_argument('--activation', type=str, default='relu', help='activation function')
parser.add_argument('--peak', type=float, default=None, help='peak_amplitude constraint')
parser.add_argument('--positive', type=float, default=None, help='positivity of input')
#parser.add_argument('--verbose', dest='verbose', action='store_true')
#parser.set_defaults(verbose=False)
args = parser.parse_args()
######################################################3
nit_params,critic_params=all_params(dim=args.dim,layers_critic=args.layer_mi,embed_dim=32,hidden_dim_critic=256,activation_F1='relu',lr_critic=.0001,dim_NIT=args.dim_nit,layers_NIT=args.layer_nit,hidden_dim_NIT=256,t_x_power=1,lr_NIT=.0001,channel_type=args.type_channel,peak_amp=args.peak,positive=args.positive)
batch_x0,cap= joint_training(typeinp=args.type_channel,nit_params=nit_params,critic_params=critic_params,SNR=args.SNR,estimator=args.estimator,init_epoch=args.init_epoch,max_epoch=args.max_epoch,itr_every_nit=2,itr_every_mi=5,batch_size=args.batch_size,seed_size=args.seed_size)
| 63.566038 | 313 | 0.688038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,057 | 0.313743 |
76806ed1e58231dbb9592dc36161e9c8ee832dc4 | 3,245 | py | Python | libpySat/pySatTransformPolarMotion.py | grzeskokbol/pySatTools | 7518ce670866bfebd4eb3d2f390e885d83c2e9c9 | [
"MIT"
] | null | null | null | libpySat/pySatTransformPolarMotion.py | grzeskokbol/pySatTools | 7518ce670866bfebd4eb3d2f390e885d83c2e9c9 | [
"MIT"
] | null | null | null | libpySat/pySatTransformPolarMotion.py | grzeskokbol/pySatTools | 7518ce670866bfebd4eb3d2f390e885d83c2e9c9 | [
"MIT"
] | null | null | null |
import datetime
import numpy as np
import libpySat as pySat
from astropy import _erfa as erfa
from scipy.misc import derivative
from scipy import interpolate
class TransformPolarMotion:
def __init__(self,fxp,fyp):
self.fxp=fxp
self.fyp=fyp
self.epochSave = datetime.datetime.now()
self.rotSave = np.matrix(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
self.sprime=0.0
def __getPolarMotion(self, epoch: datetime.datetime):
"""
:param epoch:
:return: polar motion: x,y [mas]
"""
mjd=pySat.UTC2MJD(epoch)
return self.fxp(mjd),self.fyp(mjd)
def __getPolarMotionDot(self, epoch: datetime.datetime):
"""
:param epoch:
:return: polar motion: x,y [mas/s]
"""
mjd=pySat.UTC2MJD(epoch)
xpdot=derivative(self.fxp,mjd,dx=1e-3,n=1)
ypdot = derivative(self.fyp, mjd, dx=1e-3,n=1)
return xpdot,ypdot
def getMatrix_PolarMotion(self,epoch:datetime.datetime):
"""
Get the polar motion matrix. Relates ITRF to TIRS.
:param epoch:
:return:
"""
if (epoch !=self.epochSave):
xp,yp = self.__getPolarMotion(epoch)
# TODO: Implementation of tidal and libration terms for polar motion...
xp*=np.pi/180.0/3600.0
yp*=np.pi/180.0/3600.0
sp= self.__getTIO(epoch)
#print(xp,yp,sp)
rxy= np.matmul(pySat.RotationMatrix3DY(xp),pySat.RotationMatrix3DX(yp))
rs=pySat.RotationMatrix3DZ(-sp)
self.rotSave=np.matmul(rs,rxy)
self.epochSave = epoch
return self.rotSave
else:
return self.rotSave
def __getTIO(self, epoch:datetime.datetime ):
"""
Gets the Terrestrial Intermediate Origin (TIO) locator s'
Terrestrial Intermediate Ref Sys (TIRS) defined by TIO and CIP.
TIRS related to to CIRS by Earth Rotation Angle
:param epoch:
:return:
"""
mjd = pySat.pySatTime.UTC2MJD(epoch)
self.sprime=erfa.sp00(2400000.5,mjd)
return self.sprime
def getMatrix_PolarMotionDot(self,epoch:datetime.datetime):
"""
Get the polar motion matrix. Relates ITRF to TIRS.
:param epoch:
:return:
"""
# TODO: Implementation of tidal and libration terms for polar motion...
xp, yp = self.__getPolarMotion(epoch)
xpDot,ypDot = self.__getPolarMotionDot(epoch)
xp *= np.pi / 180.0 / 3600.0
yp *= np.pi / 180.0 / 3600.0
xpDot*=np.pi/180.0/3600.0
ypDot*=np.pi/180.0/3600.0
spDot = -47.0 / 1.0e6 / 3600.0 / 180.0 * np.pi / 86400.0 / 36525.0
sp = self.__getTIO(epoch)
print('Pmotion dot:',xpDot,ypDot,spDot)
rxy= np.matmul(pySat.RotationMatrix3DY(xp),pySat.RotationMatrix3DX(yp))
rxyDot = np.matmul(xpDot* pySat.RotationMatrix3DY(xp), pySat.RotationMatrix3DX(yp)) \
+np.matmul( pySat.RotationMatrix3DY(xp),ypDot* pySat.RotationMatrix3DX(yp))
rs=pySat.RotationMatrix3DZ(-sp)
rsDot=-spDot*pySat.RotationMatrix3DZ(-sp)
return np.matmul(rsDot,rxy) + np.matmul(rs,rxyDot)
| 32.777778 | 93 | 0.600616 | 3,084 | 0.950385 | 0 | 0 | 0 | 0 | 0 | 0 | 808 | 0.248998 |
7683107d396e18c771d9adf914e31a75696e11b6 | 2,086 | py | Python | graphtheory/dominatingsets/tests/test_dsetus.py | mashal02/graphs-dict | 39917d8a7f3bdcd5d95f3549ca054d16ba535e90 | [
"BSD-3-Clause"
] | 36 | 2015-09-20T20:55:39.000Z | 2021-09-20T05:49:03.000Z | graphtheory/dominatingsets/tests/test_dsetus.py | mashal02/graphs-dict | 39917d8a7f3bdcd5d95f3549ca054d16ba535e90 | [
"BSD-3-Clause"
] | 6 | 2016-03-25T21:41:46.000Z | 2020-02-12T03:18:59.000Z | graphtheory/dominatingsets/tests/test_dsetus.py | mashal02/graphs-dict | 39917d8a7f3bdcd5d95f3549ca054d16ba535e90 | [
"BSD-3-Clause"
] | 9 | 2016-09-12T07:57:27.000Z | 2022-03-21T16:15:39.000Z | #!/usr/bin/python
import unittest
from graphtheory.structures.edges import Edge
from graphtheory.structures.graphs import Graph
from graphtheory.dominatingsets.dsetus import UnorderedSequentialDominatingSet
# 0 --- 1 --- 2 not bipartite (triangles are present)
# | / | |
# | / | | maximum iset: (0, 4, 2)
# | / | | minimum dset: (3, 2), (3, 5), (1, 5), (1, 4), (1, 2), (0, 5)
# 3 --- 4 --- 5
class TestDominatingSet(unittest.TestCase):
def setUp(self):
self.N = 6
self.G = Graph(self.N)
self.nodes = range(self.N)
self.edges = [
Edge(0, 1), Edge(0, 3), Edge(1, 3), Edge(1, 4),
Edge(1, 2), Edge(2, 5), Edge(3, 4), Edge(4, 5)]
for node in self.nodes:
self.G.add_node(node)
for edge in self.edges:
self.G.add_edge(edge)
#self.G.show()
def test_dominating_set(self):
algorithm = UnorderedSequentialDominatingSet(self.G)
algorithm.run()
used = set(algorithm.dominating_set)
for node in algorithm.dominating_set:
used.update(self.G.iteradjacent(node))
self.assertEqual(len(used), self.N)
self.assertEqual(algorithm.cardinality, len(algorithm.dominating_set))
self.assertEqual(algorithm.cardinality, 3) # best = 2
#print ( algorithm.dominating_set )
def test_dominating_set_source(self):
algorithm = UnorderedSequentialDominatingSet(self.G)
algorithm.run(1)
used = set(algorithm.dominating_set)
for node in algorithm.dominating_set:
used.update(self.G.iteradjacent(node))
self.assertEqual(len(used), self.N)
self.assertEqual(algorithm.cardinality, len(algorithm.dominating_set))
self.assertEqual(algorithm.cardinality, 2) # best = 2
#print ( algorithm.dominating_set )
def test_exceptions(self):
self.assertRaises(ValueError, UnorderedSequentialDominatingSet,
Graph(5, directed=True))
def tearDown(self): pass
if __name__ == "__main__":
unittest.main()
# EOF
| 33.645161 | 78 | 0.626558 | 1,616 | 0.774688 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.160115 |
76838a7af2293c66cd15e86e536b35b9a382902f | 7,857 | py | Python | wet_chicken_discrete/baseline_policy.py | Philipp238/Safe-Policy-Improvement-Approaches-on-Discrete-Markov-Decision-Processes | d0eb7281c5151e043547d5b7379144deea0bbe03 | [
"MIT"
] | null | null | null | wet_chicken_discrete/baseline_policy.py | Philipp238/Safe-Policy-Improvement-Approaches-on-Discrete-Markov-Decision-Processes | d0eb7281c5151e043547d5b7379144deea0bbe03 | [
"MIT"
] | null | null | null | wet_chicken_discrete/baseline_policy.py | Philipp238/Safe-Policy-Improvement-Approaches-on-Discrete-Markov-Decision-Processes | d0eb7281c5151e043547d5b7379144deea0bbe03 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
class WetChickenBaselinePolicy:
def __init__(self, env, gamma, method='heuristic', epsilon=0.1, convergence=0.1, learning_rate=0.1, max_nb_it=999,
order_epsilon=3, order_learning_rate=3):
self.env = env
self.gamma = gamma
self.nb_states = env.width * env.length
self.nb_actions = 5
self.pi = np.ones((self.nb_states, self.nb_actions)) / self.nb_actions
self.epsilon = epsilon
self.convergence = convergence
self.learning_rate = learning_rate
self.method = method
self.max_nb_it = max_nb_it
self.order_epsilon = order_epsilon
self.order_learning_rate = order_learning_rate
self.compute_baseline()
def compute_baseline(self):
if self.method == 'fixed_learning':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
while nb_it < self.max_nb_it:
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += self.learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = self.epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - self.epsilon
nb_it += 1
elif self.method == 'variable_learning':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
while nb_it < self.max_nb_it:
nb_it += 1
epsilon = self.epsilon * 1 / nb_it ** (1 / self.order_epsilon)
learning_rate = self.learning_rate * 1 / nb_it ** (1 / self.order_learning_rate)
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - epsilon
elif self.method == 'variable_learning':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
while nb_it < self.max_nb_it:
nb_it += 1
epsilon = self.epsilon * 1 / nb_it ** (1 / self.order_epsilon)
learning_rate = self.learning_rate * 1 / nb_it ** (1 / self.order_learning_rate)
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - epsilon
elif self.method == 'state_count_dependent_variable':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
count_state_action = np.zeros((self.nb_states, self.nb_actions))
while nb_it < self.max_nb_it:
nb_it += 1
epsilon = self.epsilon * 1 / nb_it ** (1 / self.order_epsilon)
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
count_state_action[state, action] += 1
learning_rate = self.learning_rate * 1 / count_state_action[state, action] ** (
1 / self.order_learning_rate)
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - epsilon
elif self.method == 'heuristic':
# Try to get to in the middle of the river and then paddle as strong as possible against the stream
# I.e. try to get to state (2,2), as a number 12, and then choose action 2
pi = np.zeros((self.nb_states, self.nb_actions))
for state in range(self.nb_states):
for action in range(self.nb_actions):
x, y = int(state / self.nb_actions), state % self.nb_actions
if x > 2:
pi[state, 2] = 1 # We are too close to the waterfall ==> paddle as strong as possible
elif y < 2:
pi[state, 4] = 1 # We are not in immediate danger, but too close to the left ==> go right
elif y > 2:
pi[state, 3] = 1 # We are not in immediate danger, but too close to the right ==> go left
elif x == 2:
pi[state, 2] = 1 # We are perfect now, try to keep the position by paddling as strong as poss
elif x == 1:
pi[state, 1] = 1 # Close to perfect, just paddle a bit
else:
pi[state, 0] = 1 # Right lane, but too high up, just drift with the river
self.pi = (1 - self.epsilon) * pi + self.epsilon * self.pi
else:
print(
f'Method {self.method} is not available. Only acceptable methods are: \'heuristic\' and \'state_count_dependent_learning\' ')
class ContinuousWetChickenHeuristic:
def __init__(self, epsilon):
self.epsilon = epsilon
def pi(self, state):
x, y = state[0], state[1]
pi = np.zeros(5)
if x > 2.5:
pi[2] = 1 # We are too close to the waterfall ==> paddle as strong as possible
elif y < 2:
pi[4] = 1 # We are not in immediate danger, but too close to the left ==> go right
elif y > 3:
pi[3] = 1 # We are not in immediate danger, but too close to the right ==> go left
elif x > 2:
pi[2] = 1 # We are perfect now, try to keep the position by paddling as strong as poss
elif x > 1:
pi[1] = 1 # Close to perfect, just paddle a bit
else:
pi[0] = 1 # Right lane, but too high up, just drift with the river
pi = (1 - self.epsilon) * pi + self.epsilon * 1/5
return pi
| 55.330986 | 141 | 0.543846 | 7,812 | 0.994273 | 0 | 0 | 0 | 0 | 0 | 0 | 1,559 | 0.198422 |
768603f80762916b71f5b82d5dc5b04a5977e674 | 136 | py | Python | timeflow/__init__.py | zach-nervana/TimeFlow | c1ea2e1d2583907ab7775f189541af87f8de9d53 | [
"MIT"
] | 76 | 2016-11-22T14:09:10.000Z | 2020-09-23T19:02:19.000Z | timeflow/__init__.py | genesiscrew/TensorFlow-Predictor | d129172b064d9e73e9118ac7164eb826a1263100 | [
"MIT"
] | 1 | 2017-08-17T18:43:01.000Z | 2017-08-17T18:43:01.000Z | timeflow/__init__.py | genesiscrew/TensorFlow-Predictor | d129172b064d9e73e9118ac7164eb826a1263100 | [
"MIT"
] | 26 | 2017-03-09T02:41:07.000Z | 2021-11-17T09:46:52.000Z | import layers as layers
import placeholders as placeholders
import trainer as trainer
import features as features
import utils as utils
| 22.666667 | 35 | 0.852941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7687001bee9e147d41cd9ca528d4c1347da72a03 | 1,459 | py | Python | plot_graphs.py | bsciolla/merks | 500569fb1222726238c3711826ff3d8f8ee8ac28 | [
"Apache-2.0"
] | 1 | 2018-07-27T08:31:55.000Z | 2018-07-27T08:31:55.000Z | plot_graphs.py | bsciolla/merks | 500569fb1222726238c3711826ff3d8f8ee8ac28 | [
"Apache-2.0"
] | null | null | null | plot_graphs.py | bsciolla/merks | 500569fb1222726238c3711826ff3d8f8ee8ac28 | [
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
import networkx as nx
import numpy
plt.ion()
# test
def plot_neural_network(mek):
G = nx.DiGraph(numpy.transpose(mek.nn.links))
mylabels = dict(zip(range(len(mek.nn.neurons)),
[to_string(i)+'\n#'
+ str(ix)+'' for (ix, i) in enumerate(mek.nn.neurons)]))
G = nx.relabel_nodes(G, mylabels)
pos = nx.layout.spring_layout(G, k=2)
epos = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0.5]
eneg = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] <= -0.5]
arrowsize = 50
colorspos = numpy.arange(len(epos))/5.0+4.0*len(epos)/5.0
colorsneg = numpy.arange(len(eneg))/5.0+4.0*len(eneg)/5.0
nx.draw_networkx_edges(G, pos, edgelist=epos, edge_color=colorspos,
width=3, arrowsize=arrowsize, alpha=1, arrowstyle='->', edge_cmap=plt.cm.Blues)
nx.draw_networkx_edges(G, pos, edgelist=eneg,
width=2, arrowsize=arrowsize, alpha=1, edge_color=colorsneg, arrowstyle='->', edge_cmap=plt.cm.Reds)
nodes = nx.draw_networkx_nodes(
G, pos, node_size=1500, node_color='gray', alpha=1)
nx.draw_networkx_labels(G, pos, font_size=10,
font_family='sans-serif', font_weight='bold')
ax = plt.gca()
ax.set_axis_off()
plt.show()
def to_string(name):
out = ""
for i in name:
out = out + str(i)
return(out)
| 33.930233 | 127 | 0.594243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.04318 |
76879dfbcaa68b4faac6a435eae8b80ca5c3ab47 | 2,071 | py | Python | Logistic-Regression/code.py | VaidikTrivedi/ga-learner-dsb-repo | 4e31ce32d04f57744dd01ae53a2be57d76d5cab1 | [
"MIT"
] | null | null | null | Logistic-Regression/code.py | VaidikTrivedi/ga-learner-dsb-repo | 4e31ce32d04f57744dd01ae53a2be57d76d5cab1 | [
"MIT"
] | null | null | null | Logistic-Regression/code.py | VaidikTrivedi/ga-learner-dsb-repo | 4e31ce32d04f57744dd01ae53a2be57d76d5cab1 | [
"MIT"
] | null | null | null | # --------------
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv(path)
print(df.head(5))
X = df.drop('insuranceclaim', axis=1)
#print(X.head(5))
y = df['insuranceclaim']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=6)
print(X_train.head(5), "\n", y_train.head(5))
# --------------
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# Code starts here
ax = sns.boxplot(x=X_train['bmi'])
q_value = np.quantile(X_train['bmi'], .95)
print(q_value)
plt.show()
print(y_train.value_counts())
# Code ends here
# --------------
# Code starts here
import seaborn as sns
relation = X_train.corr()
print(relation)
sns.pairplot(X_train)
plt.show()
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols = ['children', 'sex', 'region', 'smoker']
fig, axes = plt.subplots(nrows=2, ncols=2)
for i in range(0,2):
for j in range(0,2):
col = cols[i*2+j]
sns.countplot(x=X_train[col], hue=y_train, ax=axes[i,j])
plt.show()
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr = LogisticRegression(random_state=9)
grid = GridSearchCV(estimator=lr, param_grid=parameters)
grid.fit(X_train, y_train)
y_pred = grid.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy: ", accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts here
score = roc_auc_score(y_test, y_pred)
print(score)
y_pred_proba = grid.predict_proba(X_test)[:,1]
#print(y_pred_proba)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
#print(fpr, tpr)
roc_auc = roc_auc_score(y_test, y_pred_proba)
print(roc_auc)
plt.plot(fpr, tpr, label="Logistic model, auc"+str(roc_auc))
# Code ends here
| 21.572917 | 89 | 0.700145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 461 | 0.222598 |
7687b35a200e00e3a680b96e15c14d349cc8f6ec | 586 | py | Python | tests/test_sink.py | LauWien/smooth | 3d2ee96e3c2b2f9d5d805da1a920748f2dbbd538 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 5 | 2019-10-15T15:56:35.000Z | 2021-02-04T10:11:31.000Z | tests/test_sink.py | LauWien/smooth | 3d2ee96e3c2b2f9d5d805da1a920748f2dbbd538 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 121 | 2020-01-06T14:32:30.000Z | 2021-09-23T11:26:11.000Z | tests/test_sink.py | LauWien/smooth | 3d2ee96e3c2b2f9d5d805da1a920748f2dbbd538 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2019-10-21T08:36:05.000Z | 2021-03-26T10:37:17.000Z | from smooth.components.component_sink import Sink
import oemof.solph as solph
def test_init():
s = Sink({})
assert hasattr(s, "input_max")
assert hasattr(s, "bus_in")
assert s.commodity_costs == 0
s = Sink({"name": "foo"})
assert s.name == "foo"
def test_add_to_oemof_model():
s = Sink({"bus_in": "foo"})
oemof_model = solph.EnergySystem()
component = s.add_to_oemof_model({"foo": solph.Bus(label="foo")}, oemof_model)
assert type(component) == solph.network.Sink
assert len(component.inputs) == 1
assert len(component.outputs) == 0
| 26.636364 | 82 | 0.662116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.098976 |
76893a63b5463519de757a813d249bfa6abfb31f | 12,814 | py | Python | Forex/KalmanFilterPairsTrading.py | pvkraju80/Financial-Algorithms | d1847f52e38cf015c555cf789cd6627aaf1e2046 | [
"MIT"
] | 6 | 2020-12-10T07:22:22.000Z | 2022-01-23T17:57:41.000Z | Forex/KalmanFilterPairsTrading.py | pvkraju80/Financial-Algorithms | d1847f52e38cf015c555cf789cd6627aaf1e2046 | [
"MIT"
] | null | null | null | Forex/KalmanFilterPairsTrading.py | pvkraju80/Financial-Algorithms | d1847f52e38cf015c555cf789cd6627aaf1e2046 | [
"MIT"
] | 6 | 2020-12-10T07:22:38.000Z | 2022-02-23T08:10:05.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 8 17:50:11 2019
@author: ArmelFabrice
"""
##### import the necessary modules and set chart style####
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.style.use('bmh')
import matplotlib.pylab as plt
import statsmodels.api as sm
from math import sqrt
import warnings
warnings.filterwarnings("ignore")
def dateparse2(s):
string = s[:2]+'/' + s[3:5] + '/' + s[6:10] + ' '
string +=s[11:16]
return pd.datetime.strptime(string, '%d/%m/%Y %H:%M')
pwd = '../Data/'
# Ticker stock market prices
currs = ['EURUSD','AUDUSD','GBPUSD','NZDUSD','USDCHF','USDCAD','USDJPY',#Majors
'AUDCAD','CADCHF','CADJPY','CHFJPY','EURAUD','EURCAD','EURCHF',
'EURGBP','EURJPY','GBPCHF','GBPJPY','NZDJPY']
#0.00006 of commission
#Plus FXCM Transaction costs
trans_costs_list = [0.00021, 0.00025, 0.00027, 0.00029, 0.00024, 0.00032, 0.00022,
0.00038, 0.00043, 0.00033, 0.00036, 0.00038, 0.00039, 0.00033,
0.00033, 0.00029, 0.0004, 0.00037, 0.00032]
df0 = pd.DataFrame()
df00 = pd.DataFrame()
for ticker, trans_costs in zip(currs, trans_costs_list):
path = pwd + '%s'%ticker+'1H'+'.csv'
dftemp1 = pd.read_csv(path)
dftemp1['Date'] = dftemp1['Local time']
dftemp1['Date'] = dftemp1['Date'].apply(lambda x: dateparse2(x))
dftemp1.index = dftemp1['Date']
dftemp = dftemp1.resample('12H').agg({'Open': 'first', 'High': 'max', 'Low': 'min','Close': 'last', 'Volume': 'sum'})
dftemp = dftemp[dftemp.index.dayofweek < 5]
df0[ticker] = dftemp['Close']
df00[ticker] = [trans_costs/2]*len(df0)
df00.index = df0.index
df0 = df0[df0.index.dayofweek < 5]
df00 = df00[df00.index.dayofweek < 5]
def func(comp, param, trans_costs, critical_level, error_level):
dfa = df0[0:comp+param]
dfb = df00[0:comp+param]
#NOTE CRITICAL LEVEL HAS BEEN SET TO 5% FOR COINTEGRATION TEST
def find_cointegrated_pairs(dataframe, critical_level = 0.05):
n = dataframe.shape[1] # the length of dateframe
pvalue_matrix = np.ones((n, n)) # initialize the matrix of p
keys = dataframe.columns # get the column names
pairs = [] # initilize the list for cointegration
for i in range(n):
for j in range(i+1, n): # for j bigger than i
stock1 = dataframe[keys[i]] # obtain the price of "stock1"
stock2 = dataframe[keys[j]]# obtain the price of "stock2"
result = sm.tsa.stattools.coint(stock1, stock2) # get conintegration
pvalue = result[1] # get the pvalue
pvalue_matrix[i, j] = pvalue
if pvalue < critical_level: # if p-value less than the critical level
pairs.append((keys[i], keys[j], pvalue)) # record the contract with that p-value
return pvalue_matrix, pairs
#set up the split point for our "training data" on which to perform the co-integration test (the remaining data will be fed to our backtest function)
split = param
#run our dataframe (up to the split point) of ticker price data through our co-integration function and store results
pvalue_matrix,pairs = find_cointegrated_pairs(dfa[:-split], critical_level)
def half_life(spread):
spread_lag = spread.shift(1)
spread_lag.iloc[0] = spread_lag.iloc[1]
spread_ret = spread - spread_lag
spread_ret.iloc[0] = spread_ret.iloc[1]
spread_lag2 = sm.add_constant(spread_lag)
model = sm.OLS(spread_ret,spread_lag2)
res = model.fit()
halflife = int(round(-np.log(2) / res.params[1],0))
if halflife <= 0:
halflife = 1
return halflife
def backtest(dfa, dfb, param, s1, s2, error_level, trans_costs=False):
#############################################################
# INPUT:
# DataFrame of prices
# s1: the symbol of contract one
# s2: the symbol of contract two
# x: the price series of contract one
# y: the price series of contract two
# OUTPUT:
# df1['cum rets']: cumulative returns in pandas data frame
# sharpe: Sharpe ratio
# CAGR: Compound Annual Growth Rate
#Kalman filter params
delta = 1e-4
wt = delta / (1 - delta) * np.eye(2)
vt = 1e-3
theta = np.zeros(2)
C = np.zeros((2, 2))
R = None
# n = 4
# s1 = pairs[n][0]
# s2 = pairs[n][1]
x = list(dfa[s1])
y = list(dfa[s2])
ts_x = list(dfb[s1])
ts_y = list(dfb[s2])
#X = sm.add_constant(x)
# hrs = [0]*len(df)
Fs = [0]*len(dfa)
Rs = [0]*len(dfa)
ets = [0]*len(dfa)
Qts = [0]*len(dfa)
sqrt_Qts = [0]*len(dfa)
thetas = [0]*len(dfa)
Ats = [0]*len(dfa)
Cs = [0]*len(dfa)
for i in range(len(dfa)-param-10, len(dfa)):
# res = sm.OLS(y[i-split+1:i+1],X[i-split+1:i+1]).fit()
# hr[i] = res.params[1]
F = np.asarray([x[i], 1.0]).reshape((1, 2))
Fs[i] = F
if R is not None:
R = C + wt
else:
R = np.zeros((2, 2))
Rs[i] = R
# Calculate the Kalman Filter update
# ----------------------------------
# Calculate prediction of new observation
# as well as forecast error of that prediction
yhat = F.dot(theta)
et = y[i] - yhat
ets[i] = et[0]
# Q_t is the variance of the prediction of
# observations and hence \sqrt{Q_t} is the
# standard deviation of the predictions
Qt = F.dot(R).dot(F.T) + vt
sqrt_Qt = np.sqrt(Qt)[0][0]
Qts[i] = Qt
sqrt_Qts[i] = sqrt_Qt
# The posterior value of the states \theta_t is
# distributed as a multivariate Gaussian with mean
# m_t and variance-covariance C_t
At = R.dot(F.T) / Qt
theta = theta + At.flatten() * et
C = R - At * F.dot(R)
thetas[i] = theta[0]
Ats[i] = At
Cs[i] = C
dfa['et'] = ets
# df['et'] = df['et'].rolling(5).mean()
dfa['sqrt_Qt'] = sqrt_Qts
dfa['theta'] = thetas
# run regression (including Kalman Filter) to find hedge ratio and then create spread series
df1 = pd.DataFrame({'y':y,'x':x, 'ts_x':ts_x, 'ts_y':ts_y, 'et':dfa['et'], 'sqrt_Qt':dfa['sqrt_Qt'], 'theta':dfa['theta']})[-param:]
# df1[['et','sqrt_Qt']].plot()
# df1['spread0'] = df1['y'] - df1['theta']*df1['x']
# # calculate half life
# halflife = half_life(df1['spread0'])
# # calculate z-score with window = half life period
# meanSpread = df1.spread0.rolling(window=halflife).mean()
# stdSpread = df1.spread0.rolling(window=halflife).std()
# df1['zScore'] = (df1.spread0-meanSpread)/stdSpread
# #############################################################
# # Trading logic
# entryZscore = 0.8
# exitZscore = 0.2
#set up num units long
# df1['long entry'] = ((df1.zScore < - entryZscore) & (df1.zScore.shift(1) > - entryZscore))
# df1['long exit'] = ((df1.zScore > - exitZscore) & (df1.zScore.shift(1) < - exitZscore))
threshold = error_level * 0.001
df1['long entry'] = ((df1.et < -threshold) & (df1.et.shift(1) > -threshold))
df1['long exit'] = ((df1.et > 0) & (df1.et.shift(1) < 0))
df1['num units long'] = np.nan
df1.loc[df1['long entry'],'num units long'] = 1
df1.loc[df1['long exit'],'num units long'] = 0
df1['num units long'][0] = 0
df1['num units long'] = df1['num units long'].fillna(method='pad')
#set up num units short
# df1['short entry'] = ((df1.zScore > entryZscore) & (df1.zScore.shift(1) < entryZscore))
# df1['short exit'] = ((df1.zScore < exitZscore) & (df1.zScore.shift(1) > exitZscore))
df1['short entry'] = ((df1.et > threshold) & (df1.et.shift(1) < threshold))
df1['short exit'] = ((df1.et < 0) & (df1.et.shift(1) > 0))
df1.loc[df1['short entry'],'num units short'] = -1
df1.loc[df1['short exit'],'num units short'] = 0
df1['num units short'][0] = 0
df1['num units short'] = df1['num units short'].fillna(method='pad')
df1['numUnits'] = df1['num units long'] + df1['num units short']
df1['signals'] = df1['numUnits'].diff()
#df1['signals'].iloc[0] = df1['numUnits'].iloc[0]
df1['yfrets'] = df1['y'].pct_change().shift(-1)
df1['xfrets'] = df1['x'].pct_change().shift(-1)
if trans_costs == True:
df1['spread'] = (df1['y']*(1+df1['signals']*df1['ts_y'])) - (df1['theta']*(df1['x']*(1-df1['signals']*df1['ts_x'])))
#df1['spread'] = (df1['y']*(1+df1['signals']*0.0001))
else:
df1['spread'] = df1['y'] - df1['theta']*df1['x']
#df1['spread'] = df1['y']
df1['spread pct ch'] = (df1['spread'] - df1['spread'].shift(1)) / ((df1['x'] * abs(df1['theta'])) + df1['y'])
#df1['spread pct ch'] = (df1['spread'] - df1['spread'].shift(1)) / df1['spread'].shift(1)
df1['port rets'] = df1['spread pct ch']*df1['numUnits'].shift(1)
df1['cum rets'] = df1['port rets'].cumsum()
df1['cum rets'] = df1['cum rets'] + 1
#df1 = df1.dropna()
##############################################################
try:
sharpe = ((df1['port rets'].mean() / df1['port rets'].std()) * sqrt(252*2))
except ZeroDivisionError:
sharpe = 0.0
##############################################################
start_val = 1
end_val = df1['cum rets'].iloc[-1]
# print(len(df1[df1['long entry']==True])+len(df1[df1['short entry']==True]))
# print(end_val)
start_date = df1.iloc[0].name
end_date = df1.iloc[-1].name
days = (end_date - start_date).days
CAGR = round(((float(end_val) / float(start_val)) ** (252.0/days)) - 1,4)
df1[s1+ " "+s2] = df1['cum rets']
return df1[s1+" "+s2], sharpe, CAGR
results = []
for pair in pairs:
rets, sharpe, CAGR = backtest(dfa, dfb, param, pair[0], pair[1], trans_costs, error_level)
results.append(rets)
#print("The pair {} and {} produced a Sharpe Ratio of {} and a CAGR of {}".format(pair[0],pair[1],round(sharpe,2),round(CAGR,4)))
#rets.plot(figsize=(20,15),legend=True)
#concatenate together the individual equity curves into a single DataFrame
try:
results_df = pd.concat(results,axis=1).dropna()
#equally weight each equity curve by dividing each by the number of pairs held in the DataFrame
results_df /= len(results_df.columns)
#sum up the equally weighted equity curves to get our final equity curve
final_res = results_df.sum(axis=1)
#plot the chart of our final equity curve
plt.figure()
final_res.plot(figsize=(20,15))
plt.title('Between {} and {}'.format(str(results_df.index[0])[:10],str(results_df.index[-1])[:10]))
plt.xlabel('Date')
plt.ylabel('Returns')
#calculate and print our some final stats for our combined equity curve
try:
sharpe = (final_res.pct_change().mean() / final_res.pct_change().std()) * (sqrt(252*2))
except ZeroDivisionError:
sharpe = 0.0
start_val = 1
end_val = final_res.iloc[-1]
start_date = final_res.index[0]
end_date = final_res.index[-1]
days = (end_date - start_date).days
CAGR = round(((float(end_val) / float(start_val)) ** (252.0/days)) - 1,4)
print("Sharpe Ratio is {} and CAGR is {}".format(round(sharpe,2),round(CAGR,4)))
except ValueError:
# return "No result"
print('No pair found')
indexes = df0.index
param = 500
compini = 1000
comps = [i for i in range(compini,len(df0),param)]
for comp in comps:
print('')
print('Between {} and {}'.format(str(indexes[comp])[:10],str(indexes[min(comp+param,len(indexes)-1)])[:10]))
func(comp, param, trans_costs = True, critical_level = 0.15, error_level = 5)
| 38.250746 | 153 | 0.53184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,938 | 0.38536 |
7689a6e4d7295be853f74c7a7da78f85d2aecd55 | 1,592 | py | Python | gym/tests/webserver.py | intrig-unicamp/gym | 044be41236502ebde64427c3e1fff53bfff34aa2 | [
"Apache-2.0"
] | 8 | 2019-02-13T17:17:50.000Z | 2021-08-17T12:32:42.000Z | gym/tests/webserver.py | intrig-unicamp/gym | 044be41236502ebde64427c3e1fff53bfff34aa2 | [
"Apache-2.0"
] | 2 | 2017-09-18T03:06:29.000Z | 2019-07-30T23:59:34.000Z | gym/tests/webserver.py | intrig-unicamp/gym | 044be41236502ebde64427c3e1fff53bfff34aa2 | [
"Apache-2.0"
] | 2 | 2018-07-03T11:03:28.000Z | 2020-03-12T17:20:11.000Z | import sys
import json
import asyncio
import aiohttp
from aiohttp import web
from urllib.parse import urlparse
class SimpleWebServer:
def __init__(self):
self.loop = asyncio.get_event_loop()
self.app = web.Application(loop=self.loop)
async def check_ack(self):
asyncio.sleep(1)
print("Closing WebServer")
def save_post(self, data):
print("Saving post data into vnf-br.json")
filename = "./vnf-br.json"
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
return True
return False
async def post(self, request):
from_id = request.match_info['id']
print("Received msg from id", from_id)
reply = web.HTTPOk(text="Ack")
try:
raw_data = await request.read()
payload = raw_data.decode(encoding='UTF-8')
data = json.loads(payload)
self.save_post(data)
except Exception as e:
print("Could not save data to file")
print(e)
reply = web.HTTPBadRequest()
finally:
self.app.loop.create_task(self.check_ack())
return reply
def run(self, url):
self.app.add_routes([web.route("POST", "/{id}", self.post)])
url_parsed = urlparse(url)
host, port = url_parsed.hostname, url_parsed.port
print("Waiting for Player VNF-BR")
web.run_app(self.app, host=host, port=port)
if __name__ == "__main__":
app = SimpleWebServer()
app.run("http://127.0.0.1:7879") | 30.037736 | 68 | 0.597362 | 1,385 | 0.869975 | 0 | 0 | 0 | 0 | 678 | 0.425879 | 212 | 0.133166 |
768d0d745dc0784b2a3c714af9a252bf04f94f52 | 358 | py | Python | fundamentos/exer027.py | edelvandro/Python | 152685590af873bf63fcc5a29cf3528e4cc31a3e | [
"MIT"
] | 1 | 2020-04-14T14:43:59.000Z | 2020-04-14T14:43:59.000Z | fundamentos/exer027.py | edelvandro/Python | 152685590af873bf63fcc5a29cf3528e4cc31a3e | [
"MIT"
] | null | null | null | fundamentos/exer027.py | edelvandro/Python | 152685590af873bf63fcc5a29cf3528e4cc31a3e | [
"MIT"
] | null | null | null | '''
Faça um programa que leia o nome completo de uma pessoa,
mostrando em seguida o primeiro e o último nome separadamente.
'''
entrada = str(input('Digite um nome completo: ')).strip()
print('Olá {}'.format(entrada))
nome = entrada.split()
print('Seu primeiro nome é: {}'.format(nome[0]))
print('Seu último nome é: {}'.format(nome[len(nome) - 1]))
| 32.545455 | 66 | 0.678771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.615385 |
768ea2d8cf6d6a06597ecbe6151b681589c1a073 | 526 | py | Python | convert_to_neglog.py | yashkhem1/chemprop | ddfaf7c0d1ae0c78a3dc6a3be1071d68a7ff6544 | [
"MIT"
] | null | null | null | convert_to_neglog.py | yashkhem1/chemprop | ddfaf7c0d1ae0c78a3dc6a3be1071d68a7ff6544 | [
"MIT"
] | null | null | null | convert_to_neglog.py | yashkhem1/chemprop | ddfaf7c0d1ae0c78a3dc6a3be1071d68a7ff6544 | [
"MIT"
] | null | null | null | import csv
import numpy as np
import os
infile = "dopamine_processed.csv"
outfile = "dopamine.csv"
if os.path.exists(outfile):
os.remove(outfile)
with open(infile, 'r', newline='') as f:
i=0
reader = csv.reader(f,delimiter=',')
for row in reader:
with open(outfile, 'a', newline='') as w:
if i == 0:
i += 1
continue
writer = csv.writer(w, delimiter=',')
writer.writerow([row[0], str(-1*np.log10(float(row[1])))])
i += 1
| 25.047619 | 70 | 0.539924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.102662 |
768f6ea4edae64dc3c5e980c6abcd8369f91dd60 | 1,057 | py | Python | DummyIntermediateDevices/connectiontable_dummy_intermediate.py | fretchen/synqs_devices | ec4bb24639e9d0d4cb707e1c2a6296a47dfe9f02 | [
"MIT"
] | null | null | null | DummyIntermediateDevices/connectiontable_dummy_intermediate.py | fretchen/synqs_devices | ec4bb24639e9d0d4cb707e1c2a6296a47dfe9f02 | [
"MIT"
] | 4 | 2020-04-06T14:20:58.000Z | 2020-04-17T10:47:11.000Z | DummyIntermediateDevices/connectiontable_dummy_intermediate.py | fretchen/synqs_devices | ec4bb24639e9d0d4cb707e1c2a6296a47dfe9f02 | [
"MIT"
] | 2 | 2020-04-10T08:56:28.000Z | 2020-09-06T20:08:29.000Z | """The sample file to be run in runmanager.
This is the minimal sample that you can load from runmanager to see if your
code is working properly.
"""
from labscript import *
# from user_devices.dummy_device.labscript_devices import DummyDevice
from labscript_devices.DummyPseudoclock.labscript_devices import DummyPseudoclock
from user_devices.DummyIntermediateDevices.labscript_devices import (
DummyIntermediateDevice,
)
DummyPseudoclock("dummy_pseudoclock")
ClockLine(
name="dummy_clockline",
pseudoclock=dummy_pseudoclock.pseudoclock,
connection="flag 0",
)
# DummyDevice(name="dummy_device_0", parent_device=dummy_clockline)
DummyIntermediateDevice(name="dummy_intermediate_device", parent_device=dummy_clockline)
AnalogOut(name="AO1", parent_device=dummy_intermediate_device, connection="ao0")
AnalogOut(name="AO2", parent_device=dummy_intermediate_device, connection="ao1")
# DigitalOut(name="dummy_DO1", parent_device=dummy_intermediate_device, connection="dummy_connection")
if __name__ == "__main__":
start()
stop(1)
| 34.096774 | 102 | 0.811731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 489 | 0.46263 |
768fe188493a5880a64b1d123272dc48ba2a7f08 | 1,227 | py | Python | src/autoencoder/dataset.py | fabiobedeschi/datepalm | ae240c5a6666d5d3b3ed91095c54175e9bd171bb | [
"MIT"
] | null | null | null | src/autoencoder/dataset.py | fabiobedeschi/datepalm | ae240c5a6666d5d3b3ed91095c54175e9bd171bb | [
"MIT"
] | null | null | null | src/autoencoder/dataset.py | fabiobedeschi/datepalm | ae240c5a6666d5d3b3ed91095c54175e9bd171bb | [
"MIT"
] | null | null | null | from keras_preprocessing.image import ImageDataGenerator
from src.config import IMG_SIZE
from src.autoencoder.params import BATCH_SIZE
def load_train_dataset(train_dir: str, batch_size=BATCH_SIZE, img_size=IMG_SIZE, val_split=0.2):
train_datagen = ImageDataGenerator(
rescale=1. / 255,
validation_split=val_split
)
# Train data
train_generator = train_datagen.flow_from_directory(
directory=train_dir,
target_size=(img_size, img_size),
class_mode='input',
batch_size=batch_size,
subset='training'
)
# Validation data
valid_generator = train_datagen.flow_from_directory(
directory=train_dir,
target_size=(img_size, img_size),
class_mode='input',
batch_size=batch_size,
subset='validation'
)
return train_generator, valid_generator
def load_test_dataset(test_dir: str, batch_size=BATCH_SIZE, img_size=IMG_SIZE):
test_generator = ImageDataGenerator(
rescale=1. / 255
).flow_from_directory(
directory=test_dir,
target_size=(img_size, img_size),
class_mode='input',
batch_size=batch_size,
shuffle=False
)
return test_generator
| 26.673913 | 96 | 0.691932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.05868 |
7691a86ea40534f4131045fa474a445f4a5556fb | 1,585 | py | Python | test/knxip_tests/hpai_test.py | iligiddi/xknx | c450d5934c8ddc608741229a7d14168013c3684c | [
"MIT"
] | 179 | 2016-12-29T00:24:47.000Z | 2022-03-11T23:46:06.000Z | test/knxip_tests/hpai_test.py | iligiddi/xknx | c450d5934c8ddc608741229a7d14168013c3684c | [
"MIT"
] | 820 | 2016-12-25T22:45:12.000Z | 2022-03-31T10:18:25.000Z | test/knxip_tests/hpai_test.py | magenbrot/xknx | dc012c8b53606b591d4ba3c006700576149c268e | [
"MIT"
] | 111 | 2016-12-30T00:02:13.000Z | 2022-03-20T15:18:33.000Z | """Unit test for KNX/IP HPAI objects."""
import pytest
from xknx.exceptions import ConversionError, CouldNotParseKNXIP
from xknx.knxip import HPAI
class TestKNXIPHPAI:
"""Test class for KNX/IP HPAI objects."""
def test_hpai(self):
"""Test parsing and streaming HPAI KNX/IP fragment."""
raw = (0x08, 0x01, 0xC0, 0xA8, 0x2A, 0x01, 0x84, 0x95)
hpai = HPAI()
assert hpai.from_knx(raw) == 8
assert hpai.ip_addr == "192.168.42.1"
assert hpai.port == 33941
hpai2 = HPAI(ip_addr="192.168.42.1", port=33941)
assert hpai2.to_knx() == list(raw)
def test_from_knx_wrong_input1(self):
"""Test parsing of wrong HPAI KNX/IP packet (wrong length)."""
raw = (0x08, 0x01, 0xC0, 0xA8, 0x2A)
with pytest.raises(CouldNotParseKNXIP):
HPAI().from_knx(raw)
def test_from_knx_wrong_input2(self):
"""Test parsing of wrong HPAI KNX/IP packet (wrong length byte)."""
raw = (0x09, 0x01, 0xC0, 0xA8, 0x2A, 0x01, 0x84, 0x95)
with pytest.raises(CouldNotParseKNXIP):
HPAI().from_knx(raw)
def test_from_knx_wrong_input3(self):
"""Test parsing of wrong HPAI KNX/IP packet (wrong HPAI type)."""
raw = (0x08, 0x02, 0xC0, 0xA8, 0x2A, 0x01, 0x84, 0x95)
with pytest.raises(CouldNotParseKNXIP):
HPAI().from_knx(raw)
def test_to_knx_wrong_ip(self):
"""Test serializing HPAI to KNV/IP with wrong ip type."""
hpai = HPAI(ip_addr=127001)
with pytest.raises(ConversionError):
hpai.to_knx()
| 35.222222 | 75 | 0.628391 | 1,435 | 0.905363 | 0 | 0 | 0 | 0 | 0 | 0 | 414 | 0.261199 |
7691b87c25055ca65fe6a4859ce2615cd80362d1 | 815 | py | Python | cloudmesh/sign/api.py | cloudmesh/cloudmesh.sign | 84ddaf3b9f29e62f87db35d5a06357a51906db12 | [
"Apache-2.0"
] | null | null | null | cloudmesh/sign/api.py | cloudmesh/cloudmesh.sign | 84ddaf3b9f29e62f87db35d5a06357a51906db12 | [
"Apache-2.0"
] | 10 | 2017-04-30T00:43:05.000Z | 2017-04-30T13:19:27.000Z | cloudmesh/sign/api.py | cloudmesh/extstreet | 84ddaf3b9f29e62f87db35d5a06357a51906db12 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
#import cv2
class Sign(object):
def __init__(self):
# not your location will not work easily / is not writeable
self.classifier = '/street-signal/classifier/stopsign_classifier.xml'
def hello(self, msg):
print ("Hello Sign", msg)
def detect(self, image):
'''
stop_cascade = cv2.CascadeClassifier(self.classifier)
test = cv2.imread(image)
gray = cv2.cvtColor(test,cv2.COLOR_BGR2GRAY)
stops = stop_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=2)
for (x,y,w,h) in stops:
cv2.rectangle(test,(x,y),(x+w,y+h),(255,0,0),2)
return test
'''
return image # remove once you fix the above
def cp(self, image, server):
return image
| 29.107143 | 84 | 0.623313 | 762 | 0.934969 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.642945 |
76958da8ea27dd73e495fa167896e0a6d0b9ab84 | 2,457 | py | Python | profrage/generate/utils.py | federicoVS/ProFraGe | d8a653efae80af51e9a99f01268815c99fe62f84 | [
"MIT"
] | null | null | null | profrage/generate/utils.py | federicoVS/ProFraGe | d8a653efae80af51e9a99f01268815c99fe62f84 | [
"MIT"
] | null | null | null | profrage/generate/utils.py | federicoVS/ProFraGe | d8a653efae80af51e9a99f01268815c99fe62f84 | [
"MIT"
] | null | null | null | import torch
def reparametrize(mu, log_var, device):
"""
Reparametrize based on input mean and log variance
Parameters
----------
mu : torch.tensor
The mean.
log_var : torch.tensor
The log variance.
device : str
The device on which to put the data.
Returns
-------
z : torch.tensor
The reparametrized value.
"""
sigma = torch.exp(0.5*log_var)
epsilon = torch.rand_like(sigma)
z = mu + epsilon*sigma
return z.to(device)
def adj_to_seq(adj, device='cpu'):
"""
Convert a dense adjacency matrix into a sequence.
Parameters
----------
adj : torch.Tensor
The dense adjacency tensor.
device : str, optional
The device onto which to put the data. The default is 'cpu'.
Returns
-------
adj_seq : torch.Tensor
The sequence representing the input adjacency tensor.
"""
B, N = adj.shape[0], adj.shape[1]
adj_seq = torch.zeros(B,int(((N-1)*N)/2)).to(device)
for b in range(B):
for i in range(1,N):
for j in range(i):
adj_seq[b,i+j] = adj[b,i,j]
return adj_seq
def seq_to_adj(adj_seq, device='cpu'):
"""
Convert an adjacency sequence to its corresponding dense representation.
Parameters
----------
adj_seq : torch.Tensor
The sequence adjacency.
device : str, optional
The device onto which to put the data. The default is 'cpu'.
Returns
-------
adj : torch.Tensor
The dense representation of the input sequence.
"""
B, n = adj_seq.shape[0], adj_seq.shape[1]
adj = torch.zeros(B,n,n).to(device)
for b in range(B):
for i in range(n):
for j in range(n):
adj[b,i,j] = adj[b,j,i] = adj_seq[b,i,j]
return adj
def clipping_dist(delta):
"""
Returns the average distance between residues i and j, based on experimental data.
Parameters
----------
delta : int
The delta between residue indexes i and j.
Returns
-------
float
The average distance.
"""
if delta == 1:
return 4
elif delta == 2:
return 6
elif delta == 3:
return 7.5
elif delta == 4:
return 8.5
elif delta == 5:
return 10
elif delta == 6:
return 10.5
elif delta == 7:
return 11
elif delta == 8:
return 12
else:
return 12.5 | 23.4 | 86 | 0.559626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,346 | 0.547823 |
7696fdf8e67e695e5aa8e8399f3b07acfb6c071a | 9,923 | py | Python | neighbor_app/views.py | LawiOtieno/neighborhood-alert | 883baa5a636129ac430631bd45b59a613f30bc8f | [
"MIT"
] | null | null | null | neighbor_app/views.py | LawiOtieno/neighborhood-alert | 883baa5a636129ac430631bd45b59a613f30bc8f | [
"MIT"
] | null | null | null | neighbor_app/views.py | LawiOtieno/neighborhood-alert | 883baa5a636129ac430631bd45b59a613f30bc8f | [
"MIT"
] | null | null | null | from django.shortcuts import render,redirect,get_object_or_404,HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
from .forms import *
from .models import NeighborHood,Profile,Post,Business
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_text
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.utils.http import urlsafe_base64_decode
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.template.loader import render_to_string
from .tokens import account_activation_token
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib import messages
from django.contrib.auth import login as auth_login
from django.core.mail import EmailMessage
# Create your views here.
###################### Landing
@login_required(login_url='/accounts/login/')
# @login_required
def index(request):
current_user = request.user
neighborhoods = NeighborHood.objects.all().order_by('-created_at')
return render(request, 'index.html',{'current_user':current_user, 'neighborhoods':neighborhoods})
###################### Login
def login(request):
if request.method == 'POST':
form = AuthenticationForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
auth_login(request, user)
messages.info(request, f"You are now logged in as {username}")
return redirect('home')
else:
messages.error(request, "Invalid username or password.")
else:
messages.error(request, "Invalid username or password.")
form = AuthenticationForm()
return render(request = request,template_name = "registration/login.html",context={"form":form})
###################### Account Activation
###################### Neighborhood
@login_required(login_url='/accounts/login/')
# @login_required
def create_neighborhood(request):
if request.method == 'POST':
add_neighborhood_form = CreateNeighborHoodForm(request.POST, request.FILES)
if add_neighborhood_form.is_valid():
neighborhood = add_neighborhood_form.save(commit=False)
neighborhood.admin = request.user.profile
neighborhood.save()
return redirect('home')
else:
add_neighborhood_form = CreateNeighborHoodForm()
return render(request, 'create_neighbor.html', {'add_neighborhood_form': add_neighborhood_form})
@login_required(login_url='/accounts/login/')
# @login_required
def neighborhood(request, neighborhood_id):
current_user = request.user
neighborhood = NeighborHood.objects.get(id=neighborhood_id)
business = Business.objects.filter(neighborhood=neighborhood)
users = Profile.objects.filter(neighborhood=neighborhood)
posts = Post.objects.filter(neighborhood=neighborhood)
return render(request, 'neighbor.html', {'users':users,'current_user':current_user, 'neighborhood':neighborhood,'business':business,'posts':posts})
@login_required(login_url='/accounts/login/')
# @login_required
def update_neighborhood(request, neighborhood_id):
neighborhood = NeighborHood.objects.get(pk=neighborhood_id)
if request.method == 'POST':
update_neighborhood_form = UpdateNeighborhoodForm(request.POST,request.FILES, instance=neighborhood)
if update_neighborhood_form.is_valid():
update_neighborhood_form.save()
messages.success(request, f'Post updated!')
return redirect('home')
else:
update_neighborhood_form = UpdateNeighborhoodForm(instance=neighborhood)
return render(request, 'update_neighbor.html', {"update_neighborhood_form":update_neighborhood_form})
@login_required(login_url='/accounts/login/')
# @login_required
def delete_neighborhood(request,neighborhood_id):
current_user = request.user
neighborhood = NeighborHood.objects.get(pk=neighborhood_id)
if neighborhood:
neighborhood.delete_neighborhood()
return redirect('home')
@login_required(login_url='/accounts/login/')
# @login_required
def search(request):
if 'name' in request.GET and request.GET["name"]:
search_term = request.GET.get("name")
searched_businesses = Business.search_businesses(search_term)
message = f"{search_term}"
return render(request,'search.html', {"message":message,"businesses":searched_businesses})
else:
message = "You haven't searched for any term"
return render(request,'search.html',{"message":message})
@login_required(login_url='/accounts/login/')
# @login_required
def choose_neighborhood(request, neighborhood_id):
neighborhood = get_object_or_404(NeighborHood, id=neighborhood_id)
request.user.profile.neighborhood = neighborhood
request.user.profile.save()
return redirect('home')
def get_neighborhood_users(request, neighborhood_id):
neighborhood = NeighborHood.objects.get(id=neighborhood_id)
users = Profile.objects.filter(neighborhood=neighborhood)
return render(request, 'neighborhood_users.html', {'users': users})
@login_required(login_url='/accounts/login/')
# @login_required
def leave_neighborhood(request, neighborhood_id):
neighborhood = get_object_or_404(NeighborHood, id=neighborhood_id)
request.user.profile.neighborhood = None
request.user.profile.save()
return redirect('home')
@login_required(login_url='/accounts/login/')
# @login_required
def create_business(request,neighborhood_id):
neighborhood = NeighborHood.objects.get(id=neighborhood_id)
if request.method == 'POST':
add_business_form = CreateBusinessForm(request.POST, request.FILES)
if add_business_form.is_valid():
business = add_business_form.save(commit=False)
business.neighborhood =neighborhood
business.user = request.user
business.save()
return redirect('neighborhood', neighborhood.id)
else:
add_business_form = CreateBusinessForm()
return render(request, 'create_business.html', {'add_business_form': add_business_form,'neighborhood':neighborhood})
@login_required(login_url='/accounts/login/')
# @login_required
def delete_business(request,business_id):
current_user = request.user
business = Business.objects.get(pk=business_id)
if business:
business.delete_business()
return redirect('home')
@login_required(login_url='/accounts/login/')
# @login_required
def update_business(request, business_id):
business = Business.objects.get(pk=business_id)
if request.method == 'POST':
update_business_form = UpdateBusinessForm(request.POST,request.FILES, instance=business)
if update_business_form.is_valid():
update_business_form.save()
messages.success(request, f'Business updated!')
return redirect('home')
else:
update_business_form = UpdateBusinessForm(instance=business)
return render(request, 'update_business.html', {"update_business_form":update_business_form})
@login_required(login_url='/accounts/login/')
# @login_required
def create_post(request, neighborhood_id):
neighborhood = NeighborHood.objects.get(id=neighborhood_id)
if request.method == 'POST':
add_post_form = CreatePostForm(request.POST,request.FILES)
if add_post_form.is_valid():
post = add_post_form.save(commit=False)
post.neighborhood = neighborhood
post.user = request.user
post.save()
return redirect('neighborhood', neighborhood.id)
else:
add_post_form = CreatePostForm()
return render(request, 'create_post.html', {'add_post_form': add_post_form,'neighborhood':neighborhood})
@login_required(login_url='/accounts/login/')
# @login_required
def delete_post(request,post_id):
current_user = request.user
post = Post.objects.get(pk=post_id)
if post:
post.delete_post()
return redirect('home')
@login_required(login_url='/accounts/login/')
# @login_required
def update_post(request, post_id):
post = Post.objects.get(pk=post_id)
if request.method == 'POST':
update_post_form = UpdatePostForm(request.POST,request.FILES, instance=post)
if update_post_form.is_valid():
update_post_form.save()
messages.success(request, f'Post updated!')
return redirect('home')
else:
update_post_form = UpdatePostForm(instance=post)
return render(request, 'update_post.html', {"update_post_form":update_post_form})
@login_required(login_url='/accounts/login/')
# @login_required
def profile(request):
current_user = request.user
user_posts = Post.objects.filter(user_id = current_user.id).all()
return render(request,'profile/profile.html',{'user_posts':user_posts,"current_user":current_user})
@login_required(login_url='/accounts/login/')
# @login_required
def update_profile(request):
if request.method == 'POST':
user_form = UpdateUser(request.POST,instance=request.user)
profile_form = UpdateProfile(request.POST,request.FILES,instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request,'Your Profile account has been updated successfully')
return redirect('profile')
else:
user_form = UpdateUser(instance=request.user)
profile_form = UpdateProfile(instance=request.user.profile)
params = {
'user_form':user_form,
'profile_form':profile_form
}
return render(request,'profile/update.html',params)
@login_required(login_url='/accounts/login/')
# @login_required
def users_profile(request,pk):
user = User.objects.get(pk = pk)
user_posts = Post.objects.filter(user_id = user.id).all()
current_user = request.user
return render(request,'profile/users_profile.html',{'user_posts':user_posts,"user":user,"current_user":current_user})
| 35.439286 | 149 | 0.75713 | 0 | 0 | 0 | 0 | 7,750 | 0.781014 | 0 | 0 | 1,861 | 0.187544 |
7697f0bf45c2a2e873665b3ec989e0158b6b5db8 | 8,233 | py | Python | declare_qtquick/application.py | likianta/declare-qtquick | 93c2ce49d841ccdeb0272085c5f731139927f0d7 | [
"MIT"
] | 3 | 2021-11-02T03:45:27.000Z | 2022-03-27T05:33:36.000Z | declare_qtquick/application.py | likianta/declare-qtquick | 93c2ce49d841ccdeb0272085c5f731139927f0d7 | [
"MIT"
] | null | null | null | declare_qtquick/application.py | likianta/declare-qtquick | 93c2ce49d841ccdeb0272085c5f731139927f0d7 | [
"MIT"
] | null | null | null | import os.path as xpath
from PySide6.QtCore import QObject
from PySide6.QtQml import QQmlApplicationEngine
from PySide6.QtQml import QQmlContext
from PySide6.QtWidgets import QApplication
from .typehint import Dict
from .typehint import TPath
class _Application(QApplication):
engine: QQmlApplicationEngine
root: QQmlContext
# the holder is made for preventing the objects which were registered to
# qml side from being recycled by python garbage collector incorrectly.
__pyobj_holder: Dict[str, QObject]
def __init__(self, app_name='Declare QtQuick Demo', **kwargs):
"""
Args:
app_name: str
Set application name now or later by calling `Application
.set_app_name(...)`.
kwargs:
organization: str['lib.declare_pyside']
Set an organization name, to avoid error info when we use
`QtQuick.Dialogs.FileDialog`.
Note: if no organization name set, the error message shows:
QML Settings: The following application identifiers
have not been set: QVector("organizationName",
"organizationDomain")
theme_dir: str[`../theme`]
You can pass a custom dir which includes themes. Otherwise
declare-qml will use its built-in themes (the widely used
and maintained is 'LightClean' theme).
The theme dir structure should be like this:
D:/workspace/xxx
|= custom_theme # pass this to kwargs['theme_dir']
|= MaterialTheme # use big camel case
|- MText.qml
|- MRectangle.qml
|- ...
|- qmldir # don't forget this file
|= OceanTheme
|= BootstrapTheme
|= ...
"""
super().__init__()
self.setApplicationName(app_name)
self.setOrganizationName(kwargs.get(
'organization', 'lib.declare_pyside'
))
self.engine = QQmlApplicationEngine()
self.root = self.engine.rootContext()
self.__pyobj_holder = {}
self._fine_tune()
def set_app_name(self, name: str):
# just made a consistent snake-case function alias for external caller,
# especially for who imports a global instance `app` from this module.
self.setApplicationName(name)
def _fine_tune(self):
# set font 'microsoft yahei ui' if platform is windows
from platform import system
if system() == 'Windows':
self.setFont('Microsoft YaHei')
def add_import_entrance(self, qmldir: str):
"""
Args:
qmldir: The absolute path of directory, this dir should include at
least one component library subfolder (the subfolder's first
letter shoule be capitalized), the subfolder should include a
file named 'qmldir' (no suffix with it).
See examples:
../qmlside/qlogger (includes 'LKQmlSide')
../../theme (includes 'LightClean')
"""
self.engine.addImportPath(qmldir)
def register_pyobj(self, obj: QObject, name=''):
"""
Register Python object to QML root context. Then we can use it as a
global registered property across all QML files.
Notes:
This object must inherit from `PySide6.QtCore.QObject`.
This mehod should be used before `self.start` running.
`declare-qml` will register 'PySide' as a built-in object, please
do not use 'PySide' for your custom object, or it may erase all
built-in features.
Args:
obj:
name: Object name. Suggest using capital camel case, and prefixed
with 'Py', for example, 'PySide', 'PyHandler', 'PyHook', etc.
If name not defined, we'll use object's class name instead.
"""
name = name or obj.__class__.__name__
self.root.setContextProperty(name, obj)
self.__pyobj_holder[name] = obj
def start(self, qmlfile: TPath):
"""
Args:
qmlfile: Pass a '.qml' file to launch the application.
Usually the filename is 'Main.qml' or 'Homepage.qml', the name
case is not sensitive (you can also use lower case if you like).
Make sure the file content accords with QML syntax and the root
item should be Window.
Notice:
It seems that `self.engine` cannot recognize abspath format, when
we use:
self.engine.load(<an_abspath_of_qmlfile>)
It will raise a network error:
QQmlApplicationengine failed to load component.
We should add a prefix 'file:///' to it to resolve this problem.
By the way a relative path is always safe to use.
"""
if xpath.isabs(qmlfile): qmlfile = 'file:///' + qmlfile
self.engine.load(qmlfile)
self.exec()
# note: do not use `sys.exit(self.exec())` here, that will cause
# `~.py_side.pycomm.PySide` be released in advance. then qml will
# alert 'cannot call from null' in its destroying stage.
class Application:
# _appcore: _Application
def __init__(self, app_name='', **kwargs):
if app_name:
app.setApplicationName(app_name)
if x := kwargs.get('organization'):
app.setOrganizationName(x)
if x := kwargs.get('theme_dir'):
app.add_import_entrance(x)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# noinspection PyUnusedLocal
# file = self.build(
# os.getcwd() + '/' + '__declare_qtquick_autogen__' + '.qml'
# )
# self.start(file)
pass
@staticmethod
def build(output_file):
from .builder import build_component
from .control import id_mgr
from .control import id_gen
id_mgr.finalized()
qml = build_component(id_mgr.get_component(id_gen.root_id), level=0)
with open(output_file, 'w', encoding='utf-8') as f:
from textwrap import dedent, indent
# # f.write(qml)
# TEST
from .pyside import pyside
def _ensoul(qobj):
from .black_magic import proxy
proxy.build(qobj)
pyside.register(_ensoul)
qml = dedent('''
import QtQuick
import QtQuick.Controls
''').lstrip() + qml
qml = qml[:-2] + indent(dedent('''
Component.onCompleted: {
console.log('get started by declare-qtquick engine.')
pyside.call('_ensoul', this)
}
''').rstrip(), ' ') + qml[-2:]
f.write(qml)
return output_file
def start(self, qmlfile: TPath = None):
if not qmlfile:
from os import getcwd
qmlfile = getcwd() + '/' + '__declare_qtquick_autogen__' + '.qml'
self.build(qmlfile)
app.start(qmlfile)
def debug(self, qmlfile: TPath = None):
if not qmlfile:
from os import getcwd
qmlfile = getcwd() + '/' + '__declare_qtquick_autogen__' + '.qml'
self.build(qmlfile)
from .qmlside import hot_loader
hot_loader.start(qmlfile)
# function alias for compatibility.
launch = run = open = exec_ = start
# https://ux.stackexchange.com/questions/106001/do-we-open-or-launch-or
# -startapps+&cd=1&hl=zh-CN&ct=clnk&gl=sg
app = _Application()
| 37.593607 | 80 | 0.54986 | 7,959 | 0.966719 | 0 | 0 | 1,182 | 0.143569 | 0 | 0 | 4,919 | 0.597474 |
76996d0c6fee1f7994689e5e99ec874d21d739eb | 1,259 | py | Python | src/main.py | mdsanima-dev/mdsanima-rt-go | 6032efb8f9fd3dfeb1640fba47b0b8b7759d4572 | [
"Apache-2.0"
] | 2 | 2021-12-09T10:22:27.000Z | 2022-02-16T19:23:46.000Z | src/main.py | mdsanima-dev/mdsanima-rt-go | 6032efb8f9fd3dfeb1640fba47b0b8b7759d4572 | [
"Apache-2.0"
] | 60 | 2021-08-13T17:24:35.000Z | 2021-08-25T16:25:59.000Z | src/main.py | mdsanima-dev/mdsanima-rt-go | 6032efb8f9fd3dfeb1640fba47b0b8b7759d4572 | [
"Apache-2.0"
] | 1 | 2021-08-20T00:48:28.000Z | 2021-08-20T00:48:28.000Z | """
Main application MDSANIMA RT GO
"""
import kivy
from __init__ import __version__
kivy.require('2.0.0')
from kivy.uix.screenmanager import ScreenManager
from kivymd.app import MDApp
from plyer import notification
from __init__ import resource_path
from config.image import get_images
from config.setting import check_platform, theme_kivy
from libs.screen.calculation import MDSRTGO_scr_2
from libs.screen.info import MDSRTGO_scr_3
from libs.screen.welcome import MDSRTGO_scr_1
class MDSRTGO_main(MDApp):
title = 'MDSANIMA RT GO v' + __version__
def build(self):
theme_kivy(self, 'Orange', 'Blue', 'Dark')
img = get_images()
self.icon = resource_path(img[0])
notification_icon = check_platform()
notification.notify(
title='MDSANIMA RT GO',
message='You have a 2 messages and 10 new issues',
app_name='MDSANIMA RT GO',
app_icon=resource_path(notification_icon),
timeout=10
)
sm = ScreenManager()
sm.add_widget(MDSRTGO_scr_1(name='scr_1'))
sm.add_widget(MDSRTGO_scr_2(name='scr_2'))
sm.add_widget(MDSRTGO_scr_3(name='scr_3'))
return sm
if __name__ == '__main__':
MDSRTGO_main().run()
| 24.686275 | 62 | 0.68467 | 716 | 0.568705 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.149325 |
769a048a0a36fac60d8155d390abc6c865fbbb32 | 1,188 | py | Python | puzzler/puzzles/polysticks123.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | puzzler/puzzles/polysticks123.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | puzzler/puzzles/polysticks123.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | 1 | 2022-01-02T16:54:14.000Z | 2022-01-02T16:54:14.000Z | #!/usr/bin/env python
# $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: (C) 1998-2015 by David J. Goodger
# License: GPL 2 (see __init__.py)
"""
Concrete polystick (orders 1 through 3) puzzles.
"""
from puzzler import coordsys
from puzzler.puzzles.polysticks import Polysticks123
class Polysticks123_4x4ClippedCorners1(Polysticks123):
"""
21 solutions
"""
width = 4
height = 4
holes = set(((0,0,0), (0,0,1), (2,3,0), (3,2,1)))
"""
no solutions:
holes = set(((1,1,0), (1,1,1), (1,2,0), (2,1,1)))
holes = set(((0,1,1), (1,0,0), (1,3,0), (3,1,1)))
"""
def coordinates(self):
for coord in self.coordinates_bordered(self.width, self.height):
if coord not in self.holes:
yield coord
def customize_piece_data(self):
self.piece_data['L3'][-1]['flips'] = None
self.piece_data['L3'][-1]['rotations'] = (0, 1)
class Polysticks123_4x4ClippedCorners2(Polysticks123_4x4ClippedCorners1):
"""
132 solutions
"""
holes = set(((0,3,0), (0,2,1), (2,3,0), (3,2,1)))
def customize_piece_data(self):
self.piece_data['L3'][-1]['flips'] = None
| 21.6 | 73 | 0.590067 | 884 | 0.744108 | 163 | 0.137205 | 0 | 0 | 0 | 0 | 444 | 0.373737 |
769acd1960ba73c348d3e0c4296d92a024bce7eb | 2,076 | py | Python | src/scoring/split_by_target.py | annacarbery/VS_ECFP | 20d464ecf3c0270957885b499748d1e2231d7ae8 | [
"MIT"
] | null | null | null | src/scoring/split_by_target.py | annacarbery/VS_ECFP | 20d464ecf3c0270957885b499748d1e2231d7ae8 | [
"MIT"
] | null | null | null | src/scoring/split_by_target.py | annacarbery/VS_ECFP | 20d464ecf3c0270957885b499748d1e2231d7ae8 | [
"MIT"
] | null | null | null | from sklearn import tree
import os
import random
import numpy as np
import json
DATA_DIR = '/Users/tyt15771/Documents/VS_ECFP/data/input/'
data_train = []
class_train = []
data_test = []
class_test = []
hits = os.listdir(f'{DATA_DIR}/hit')
random.shuffle(hits)
miss = os.listdir(f'{DATA_DIR}/miss')
random.shuffle(miss)
hits_train, hits_test = [i for i in hits if 'DCLRE1AA' not in i], [i for i in hits if '_DCLRE1AA' in i]
miss_train, miss_test = [i for i in miss if 'DCLRE1AA' not in i], [i for i in miss if '_DCLRE1AA' in i]
miss_train = miss_train[:len(hits_train)]
print(len(hits_train), len(miss_train), len(hits_test), len(miss_test))
for pair in hits_train:
array = np.load(f'{DATA_DIR}/hit/{pair}/array.npy')
data_train.append([int(array[0][i]+array[1][i]) for i in range(len(array[0]))])
# data_train.append([list(array[0]), list(array[1])])
class_train.append(1)
print(len(data_train))
for pair in miss_train:
array = np.load(f'{DATA_DIR}/miss/{pair}/array.npy')
data_train.append([int(array[0][i]+array[1][i]) for i in range(len(array[0]))])
# data_train.append([list(array[0]), list(array[1])])
class_train.append(0)
print(len(data_train))
for pair in hits_test:
array = np.load(f'{DATA_DIR}/hit/{pair}/array.npy')
data_test.append([int(array[0][i]+array[1][i]) for i in range(len(array[0]))])
class_test.append(1)
print(len(data_test))
for pair in miss_test:
array = np.load(f'{DATA_DIR}/miss/{pair}/array.npy')
data_test.append([int(array[0][i]+array[1][i]) for i in range(len(array[0]))])
class_test.append(0)
print(len(data_test))
json.dump(data_train, open('/Users/tyt15771/Documents/VS_ECFP/src/scoring/target_test_set/data_train.json', 'w'))
json.dump(class_train, open('/Users/tyt15771/Documents/VS_ECFP/src/scoring/target_test_set/class_train.json', 'w'))
json.dump(data_test, open('/Users/tyt15771/Documents/VS_ECFP/src/scoring/target_test_set/data_test.json', 'w'))
json.dump(class_test, open('/Users/tyt15771/Documents/VS_ECFP/src/scoring/target_test_set/class_test.json', 'w'))
| 34.032787 | 115 | 0.707611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 696 | 0.33526 |
769ada13f8c3423b8e779440f35e0b945f5e4443 | 989 | py | Python | AsRoot/app/views.py | erin-hughes/qradar-sample-apps | 13e05e37dbd2c2cbf52220fbd8508b35c4091523 | [
"Apache-2.0"
] | 8 | 2020-09-09T10:11:28.000Z | 2021-11-21T12:56:35.000Z | AsRoot/app/views.py | erin-hughes/qradar-sample-apps | 13e05e37dbd2c2cbf52220fbd8508b35c4091523 | [
"Apache-2.0"
] | 2 | 2021-03-04T13:25:29.000Z | 2022-01-10T13:33:53.000Z | AsRoot/app/views.py | erin-hughes/qradar-sample-apps | 13e05e37dbd2c2cbf52220fbd8508b35c4091523 | [
"Apache-2.0"
] | 13 | 2020-09-18T12:33:40.000Z | 2022-03-04T12:16:08.000Z | # Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Blueprint
# pylint: disable=invalid-name
viewsbp = Blueprint('viewsbp', __name__, url_prefix='/')
# Simple endpoint that displays the contents of sudoers
@viewsbp.route('/index')
def index():
with open('/opt/app-root/sudoers', 'r') as file:
text_formatted = " "
for line in file:
text_formatted += line + "</br>"
file.close()
return text_formatted
| 32.966667 | 74 | 0.718908 | 0 | 0 | 0 | 0 | 233 | 0.235592 | 0 | 0 | 707 | 0.714863 |
769cd151bfff35e599b6b4d6a430fabc5c061bb9 | 56 | py | Python | web/code/mmg/jobtrak/util/__init__.py | 559Labs/JobTrak | 5b118248e9b6e62f479a335b5a23b7062b6f2368 | [
"Apache-2.0"
] | 1 | 2015-01-27T00:41:31.000Z | 2015-01-27T00:41:31.000Z | web/code/mmg/jobtrak/util/__init__.py | andrewmarconi/JobTrak | 5b118248e9b6e62f479a335b5a23b7062b6f2368 | [
"Apache-2.0"
] | 118 | 2015-01-26T14:02:52.000Z | 2015-01-29T18:35:07.000Z | web/code/mmg/jobtrak/util/__init__.py | MarconiMediaGroup/JobTrak | 5b118248e9b6e62f479a335b5a23b7062b6f2368 | [
"Apache-2.0"
] | null | null | null | default_app_config = 'mmg.jobtrak.util.apps.LocalConfig' | 56 | 56 | 0.839286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.625 |
769e263d23fa479291700d7aa03a51789709cb77 | 2,471 | py | Python | timemachines/skaters/nproph/nprophetiskaterfactory.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 253 | 2021-01-08T17:33:30.000Z | 2022-03-21T17:32:36.000Z | timemachines/skaters/nproph/nprophetiskaterfactory.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 65 | 2021-01-20T16:43:35.000Z | 2022-03-30T19:07:22.000Z | timemachines/skaters/nproph/nprophetiskaterfactory.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 28 | 2021-02-04T14:58:30.000Z | 2022-01-17T04:35:17.000Z | from timemachines.skaters.nproph.nprophetinclusion import using_neuralprophet, NeuralProphet
if using_neuralprophet:
import pandas as pd
from typing import List, Tuple, Any
from timemachines.skatertools.utilities.conventions import wrap
from timemachines.skaters.nproph.nprophparams import NPROPHET_MODEL, NPROPHET_META
def nprophet_iskater_factory(y: [[float]], k: int, a: List = None, t: List = None, e=None, freq: str = None, n_max=1000,
recursive: bool = False, model_params: dict = None, return_forecast=True):
# For now we keep it simple. Will add to this over time
y0s = [wrap(yi)[0] for yi in y]
x, x_std, forecast,m = nprophet_fit_and_predict_simple(y=y0s,k=k,freq=freq,model_params=model_params)
return (x, x_std, forecast, m) if return_forecast else (x, x_std)
def nprophet_fit_and_predict_simple(y: [float], k: int, freq: str = None, model_params: dict = None) -> Tuple[
List, List, Any, Any]:
""" Simpler wrapper for offlinetesting - univariate only """
assert isinstance(y[0],float)
freq = freq or NPROPHET_META['freq']
used_params = NPROPHET_MODEL
used_params.update({'n_forecasts':k})
if model_params:
used_params.update(model_params)
if len(y)<used_params['n_lags']:
x = [wrap(y)[0]]*k
x_std = [1.0]*k
return x, x_std, None, None
else:
model = NeuralProphet(**used_params)
df = pd.DataFrame(columns=['y'], data=y)
df['ds'] = pd.date_range(start='2021-01-01', periods=len(y), freq=freq)
metrics = model.fit(df, freq=freq, epochs=40, progress_bar=False)
future = model.make_future_dataframe(df)
forecast = model.predict(future)
x = [ forecast['yhat'+str(j+1)].values[-k+j] for j in range(k) ]
x_std = [1.0]*k
return x, x_std, forecast, model
if __name__=='__main__':
assert using_neuralprophet,'pip install neuralprophet'
from timemachines.skatertools.data.real import hospital
k = 3
n = 500
y = hospital(n=n)[-200:]
x, x_std, forecast, m = nprophet_iskater_factory(y=y, k=k)
print(x)
assert len(x) == k
x1, x_std1, forecast1, m1 = nprophet_fit_and_predict_simple(y=y, k=k)
if True:
m.plot(forecast)
m1.plot(forecast1)
import matplotlib.pyplot as plt
plt.show() | 43.350877 | 124 | 0.630514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.082558 |
769ea6168b2afa78cac264bb8c29d434c439de34 | 1,165 | py | Python | cride/teachers/serializers/skills.py | alexhernandez-git/cride-frontend | 3c7e7b2ab374efb33b798d959e473a943b6df03c | [
"MIT"
] | null | null | null | cride/teachers/serializers/skills.py | alexhernandez-git/cride-frontend | 3c7e7b2ab374efb33b798d959e473a943b6df03c | [
"MIT"
] | 11 | 2020-02-25T07:34:21.000Z | 2022-01-13T02:15:57.000Z | cride/teachers/serializers/skills.py | alexhernandez-git/cride-frontend | 3c7e7b2ab374efb33b798d959e473a943b6df03c | [
"MIT"
] | null | null | null | """Teacher serializer."""
# Django REST Framework
from rest_framework import serializers
# Models
from cride.teachers.models import Skill
class SkillModelSerializer(serializers.ModelSerializer):
"""Profile model serializer."""
class Meta:
"""Meta class."""
model = Skill
fields = (
'id',
'skill_value',
'level_value'
)
read_only_fields = (
'id',
)
def validate_skill_value(self, data):
language = Skill.objects.filter(skill_value=data, teacher_id=self.context['request'].user.teacher.pk)
if language:
raise serializers.ValidationError('You already introduced that skill.')
return data
def validate(self, data):
if data.get('skill_value') == "":
raise serializers.ValidationError('Skill value is not fill')
if data.get('level_value') == None:
raise serializers.ValidationError('Level value is not fill')
return data
def create(self, validated_data):
return Skill.objects.create(**validated_data, teacher_id=self.context['request'].user.teacher.pk)
| 27.738095 | 109 | 0.628326 | 1,022 | 0.877253 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.23176 |
769f8f43c009cc26df35b8f606d1b914ff167485 | 670 | py | Python | ibt/context.py | rcook/ibt | 6ab5bda2067ef053874938a4ec445ea0e54b30f2 | [
"MIT"
] | null | null | null | ibt/context.py | rcook/ibt | 6ab5bda2067ef053874938a4ec445ea0e54b30f2 | [
"MIT"
] | 13 | 2018-08-08T15:40:54.000Z | 2021-06-22T21:00:36.000Z | ibt/context.py | rcook/ibt | 6ab5bda2067ef053874938a4ec445ea0e54b30f2 | [
"MIT"
] | null | null | null | ###############################################################################
#
# IBT: Isolated Build Tool
# Copyright (C) 2016, Richard Cook. All rights reserved.
#
# Simple wrappers around Docker etc. for fully isolated build environments
#
###############################################################################
from __future__ import print_function
from ibt.util import get_user_info
class Context(object):
def __init__(self, working_dir):
self._working_dir = working_dir
self._user_info = get_user_info(working_dir)
@property
def working_dir(self): return self._working_dir
def user_info(self): return self._user_info
| 29.130435 | 79 | 0.568657 | 268 | 0.4 | 0 | 0 | 61 | 0.091045 | 0 | 0 | 317 | 0.473134 |
76a110e79c22c20d6fd80a6e38e2b2d759f2b55b | 480 | py | Python | holobot/sdk/exceptions/argument_error.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 1 | 2021-05-24T00:17:46.000Z | 2021-05-24T00:17:46.000Z | holobot/sdk/exceptions/argument_error.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 41 | 2021-03-24T22:50:09.000Z | 2021-12-17T12:15:13.000Z | holobot/sdk/exceptions/argument_error.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | null | null | null | class ArgumentError(Exception):
def __init__(self, argument_name: str, *args) -> None:
super().__init__(*args)
self.argument_name = argument_name
@property
def argument_name(self) -> str:
return self.__argument_name
@argument_name.setter
def argument_name(self, value: str) -> None:
self.__argument_name = value
def __str__(self) -> str:
return f"{super().__str__()}\nArgument name: {self.argument_name}"
| 30 | 74 | 0.645833 | 479 | 0.997917 | 0 | 0 | 188 | 0.391667 | 0 | 0 | 59 | 0.122917 |
76a1d4ac05ac140e8cf845cb7dc3c6f309ba9c87 | 5,178 | py | Python | python/cogs/helpall.py | HexF/felix | e7fc4027de8437af222c936c17a30d0ad4e887b3 | [
"MIT"
] | null | null | null | python/cogs/helpall.py | HexF/felix | e7fc4027de8437af222c936c17a30d0ad4e887b3 | [
"MIT"
] | null | null | null | python/cogs/helpall.py | HexF/felix | e7fc4027de8437af222c936c17a30d0ad4e887b3 | [
"MIT"
] | null | null | null | """This is a cog for a discord.py bot.
It hides the help command and adds these commands:
helpall show all commands (including all hidden ones)
The commands will output to the current channel or to a dm channel
according to the pm_help kwarg of the bot.
Only users that have an admin role can use the commands.
"""
import itertools
from discord import Embed
from discord.ext import commands
from discord.ext.commands import HelpCommand, DefaultHelpCommand
#pylint: disable=E1101
class myHelpCommand(HelpCommand):
def __init__(self, **options):
super().__init__(**options)
self.paginator = None
self.spacer = "\u1160 " # Invisible Unicode Character to indent lines
async def send_pages(self, header=False, footer=False):
destination = self.get_destination()
embed = Embed(
color=0x2ECC71
)
if header:
embed.set_author(
name=self.context.bot.description,
icon_url=self.context.bot.user.avatar_url
)
for category, entries in self.paginator:
embed.add_field(
name=category,
value=entries,
inline=False
)
if footer:
embed.set_footer(
text='Use felix help <command/category> for more information.'
)
await destination.send(embed=embed)
async def send_bot_help(self, mapping):
ctx = self.context
bot = ctx.bot
def get_category(command):
cog = command.cog
return cog.qualified_name + ':' if cog is not None else 'Help:'
filtered = await self.filter_commands(
bot.commands,
sort=True,
key=get_category
)
to_iterate = itertools.groupby(filtered, key=get_category)
for cog_name, command_grouper in to_iterate:
cmds = sorted(command_grouper, key=lambda c: c.name)
category = f'► {cog_name}'
if len(cmds) == 1:
entries = f'{self.spacer}{cmds[0].name} → {cmds[0].short_doc}'
else:
entries = ''
while len(cmds) > 0:
entries += self.spacer
entries += ' | '.join([cmd.name for cmd in cmds[0:8]])
cmds = cmds[8:]
entries += '\n' if cmds else ''
self.paginator.append((category, entries))
await self.send_pages(header=True, footer=True)
async def send_cog_help(self, cog):
filtered = await self.filter_commands(cog.get_commands(), sort=True)
if not filtered:
await self.context.send(
'No public commands in this cog. Try again with felix helpall.'
)
return
category = f'▼ {cog.qualified_name}'
entries = '\n'.join(
self.spacer +
f'**{command.name}** → {command.short_doc or command.description}'
for command in filtered
)
self.paginator.append((category, entries))
await self.send_pages(footer=True)
async def send_group_help(self, group):
filtered = await self.filter_commands(group.commands, sort=True)
if not filtered:
await self.context.send(
'No public commands in group. Try again with felix helpall.'
)
return
category = f'**{group.name}** - {group.description or group.short_doc}'
entries = '\n'.join(
self.spacer + f'**{command.name}** → {command.short_doc}'
for command in filtered
)
self.paginator.append((category, entries))
await self.send_pages(footer=True)
async def send_command_help(self, command):
signature = self.get_command_signature(command)
helptext = command.help or command.description or 'No help Text'
self.paginator.append(
(signature, helptext)
)
await self.send_pages()
async def prepare_help_command(self, ctx, command=None):
self.paginator = []
await super().prepare_help_command(ctx, command)
class Help(commands.Cog):
def __init__(self, client):
self.client = client
self.client.help_command = myHelpCommand(
command_attrs={
'aliases': ['halp'],
'help': 'Shows help about the bot, a command, or a category'
}
)
async def cog_check(self, ctx):
return self.client.user_is_admin(ctx.author)
def cog_unload(self):
self.client.get_command('help').hidden = False
self.client.help_command = DefaultHelpCommand()
@commands.command(
aliases=['halpall'],
hidden=True
)
async def helpall(self, ctx, *, text=None):
"""Print bot help including all hidden commands"""
self.client.help_command = myHelpCommand(show_hidden=True)
if text:
await ctx.send_help(text)
else:
await ctx.send_help()
self.client.help_command = myHelpCommand()
def setup(client):
client.add_cog(Help(client))
| 33.623377 | 79 | 0.587099 | 4,628 | 0.892059 | 0 | 0 | 401 | 0.077294 | 3,849 | 0.741904 | 1,043 | 0.201041 |
76a3321595c138692bf6ba78f4463d4296950073 | 11,200 | py | Python | helpers.py | SacaSoh/AIPND---Project-2 | 0d8227fa3fa2e7ab9c3c34bb0fd88ab95c3079df | [
"MIT"
] | null | null | null | helpers.py | SacaSoh/AIPND---Project-2 | 0d8227fa3fa2e7ab9c3c34bb0fd88ab95c3079df | [
"MIT"
] | null | null | null | helpers.py | SacaSoh/AIPND---Project-2 | 0d8227fa3fa2e7ab9c3c34bb0fd88ab95c3079df | [
"MIT"
] | null | null | null | # PROGRAMMER: Diego da Costa Oliveira
# DATE CREATED: Mar, 23, 2019.
# REVISED DATE:
# PURPOSE: Helpers for training a CNN and to predict the class for an input image
import argparse
import numpy as np
import torch
import json
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
from collections import OrderedDict
from PIL import Image
def load_data(data_dir, batchsize=60):
""" Load data folders, set batch size, execute transforms, and define the dataloaders and class to idx mapping
Returns trainloader, testloader, validloader, class_to_idx (class to index mapping)
"""
# set directories
train_dir = data_dir + '/train'
test_dir = data_dir + '/test'
valid_dir = data_dir + '/valid'
# execute transforms
train_transforms = transforms.Compose([transforms.RandomRotation(50),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = valid_transforms = transforms.Compose([transforms.Resize(250),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
# Define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data, batch_size=batchsize, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=batchsize)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=batchsize)
class_to_idx = train_data.class_to_idx
return trainloader, testloader, validloader, class_to_idx
def build_model(arch, hidden_units, learn_rate):
""" Build CNN model based on parameters, returns model, criterion, and optimizer
"""
# select model
if arch == 'vgg16':
model = models.vgg16(pretrained=True)
elif arch == 'vgg11_bn':
model = models.vgg11_bn(pretrained=True)
else:
print('Model not recognized - select between "vgg11_bn" and "vgg16"')
# freezing parameters
for param in model.parameters():
param.requires_grad = False
# classifier - two options just for purposes of this exercise
if arch =='vgg16':
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, hidden_units)),
('relu1', nn.ReLU()),
('Dropout1', nn.Dropout(0.2)),
('fc2', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
elif arch == 'vgg11_bn':
# for some reason, the deactivated relu (relu2) helps precision
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, hidden_units)),
('relu1', nn.ReLU()),
('Dropout1', nn.Dropout(0.2)),
('fc2', nn.Linear(hidden_units, 384)),
#('relu2', nn.ReLU()),
('fc3', nn.Linear(384, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
# set model criterion
criterion = nn.NLLLoss()
# Set optimizer -0 Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=learn_rate)
return model, criterion, optimizer
def load_checkpoint_prediction(filepath):
""" Load checkpoint for use at prediction (no parameters for further training)
"""
checkpoint = torch.load(filepath)
hidden_units = checkpoint['hidden_units']
if checkpoint['arch'] == 'vgg16':
# make sure to create same model used as before
model = models.vgg16(pretrained=True)
# freezing parameters
for param in model.parameters():
param.requires_grad = False
# classifier
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, hidden_units)),
('relu1', nn.ReLU()),
('Dropout1', nn.Dropout(0.2)),
('fc2', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
elif checkpoint['arch'] == 'vgg11_bn':
# make sure to create same model used as before
model = models.vgg11_bn(pretrained=True)
# freezing parameters
for param in model.parameters():
param.requires_grad = False
# classifier
# for some reason, the deactivated relu helps precision
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, hidden_units)),
('relu1', nn.ReLU()),
('Dropout1', nn.Dropout(0.2)),
('fc2', nn.Linear(hidden_units, 384)),
# ('relu2', nn.ReLU()),
('fc3', nn.Linear(384, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
else:
print('Architecture not recognized on load function -- options "vgg11_bn" and "vgg16"')
# load model parameters
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
return model
def process_image(image):
""" Scales, crops, and normalizes a PIL image for input to a PyTorch model,
returns a Numpy array
"""
img = Image.open(image)
# keep shortest side as 256 pixels
if img.size[0] > img.size[1]:
img.thumbnail((img.size[0], 256))
else:
img.thumbnail((256, img.size[1]))
# crop to a 224x224 image
left_margin = (img.width - 224) / 2
bottom_margin = (img.height - 224) / 2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
img = img.crop((left_margin, bottom_margin, right_margin, top_margin))
# convert to numpy array, execute normalization
np_image = np.array(img)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = np_image / 255
np_image = (np_image - mean) / std
# transpose image to satisfy torch expected structure (color channel as 1st channel) - PIL uses 3st channel
np_image = np.transpose(np_image, (2, 0, 1))
return np_image
def predict(image_path, checkpoint, topk, category_names, gpu):
""" Predict the class (or classes) of an image using a trained deep learning model.
"""
# load model on eval mode
model = load_checkpoint_prediction(checkpoint)
model.eval()
device = torch.device("cuda:0" if gpu is True else "cpu")
model.to(device)
# preprocess image
image = process_image(image_path)
image = torch.from_numpy(image)
reshaped = image.unsqueeze(0)
reshaped = reshaped.float().to(device)
# run image thru network
with torch.no_grad():
logps = model(reshaped)
ps = torch.exp(logps)
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
top_p_list, idx_class_list, class_to_idx, flower_names, top_labels = [], [], [], [], []
# get top probabilities and respective classes (0-indexed)
top_p, top_class = ps.topk(topk, dim=1)
# populate list with k probabilities and classes, and index to cat_to_name (flower names)
for i in range(top_p.shape[1]):
top_p_list.append(top_p.data[0][i].item())
idx_class_list.append(top_class.data[0][i].item())
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
top_labels = [idx_to_class[lab] for lab in idx_class_list]
flower_names = [cat_to_name[i] for i in top_labels]
return top_p_list, flower_names
def get_input_args_train():
""" Process input arguments for training script
"""
# Create Parse using ArgumentParser
parser = argparse.ArgumentParser()
# Create command line arguments
parser.add_argument('data_dir', type=str,
help='path to the folder containing train, test, and validation subfolders')
# optional arguments
parser.add_argument('--batchsize', type=int, default=60,
help='minibatch size (default=60)')
parser.add_argument('--print_every', type=int, default=1,
help='Number of epochs of traning for each testing pass (default=1 i.e. every epoch)')
parser.add_argument('--save_dir', type=str, default='',
help='path to the folder to save checkpoints (default= "" (root folder))')
parser.add_argument('--arch', type=str, default='vgg11_bn',
help='CNN Model Architecture - vgg11_bn, or vgg16 (default= "vgg11_bn")')
parser.add_argument('--learn_rate', type=int, default=0.001,
help='Learning rate (default=0.001)')
parser.add_argument('--gpu', default=False, action='store_true',
help='Define usage of GPU CUDA device (default=True')
parser.add_argument('--epochs', type=int, default=12,
help='Number of epochs to execute the training')
parser.add_argument('--hidden_units', type=int, default=700,
help='Number of epochs to execute the training')
return parser.parse_args()
def get_input_args_predict():
""" Process input arguments for prediction script
"""
# Create Parse using ArgumentParser
parser = argparse.ArgumentParser()
# Create command line arguments
parser.add_argument('path_to_image', type=str,
help='path to the image to predict flower name')
parser.add_argument('path_to_checkpoint', type=str,
help='path to the checkpoint with CNN training data')
# optional arguments
parser.add_argument('--top_k', type=int, default=5,
help='Return top K most likely classes (default=5)')
parser.add_argument('--gpu', default=False, action='store_true',
help='Define usage of GPU CUDA device (default=True')
parser.add_argument('--category_names', type=str, default='cat_to_name.json',
help='Use a mapping of categories to real names (default= "cat_to_name.json")')
return parser.parse_args() | 36.842105 | 114 | 0.596071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,484 | 0.311071 |
76a35f5239c3e344d44f45b914edad790aa2a4e3 | 949 | py | Python | test_single.py | hkchengrex/CharTrans-GAN | 4a589e383eb49dad7a033458aa70eeb207b7de74 | [
"Apache-2.0"
] | 2 | 2019-08-17T12:48:52.000Z | 2019-12-20T07:40:10.000Z | test_single.py | hkchengrex/CharTrans-GAN | 4a589e383eb49dad7a033458aa70eeb207b7de74 | [
"Apache-2.0"
] | 1 | 2018-12-19T12:21:37.000Z | 2018-12-19T13:59:57.000Z | test_single.py | hkchengrex/CharTrans-GAN | 4a589e383eb49dad7a033458aa70eeb207b7de74 | [
"Apache-2.0"
] | 1 | 2018-11-04T11:04:21.000Z | 2018-11-04T11:04:21.000Z | from net import Net
from font_loader import FontDataset
import torch
import torchvision
import torchvision.transforms as transforms
font_root = './data/test1/'
std_font = './data/standard/'
font_size = 52
image_size = 48
numTransform = 5
numRef = 5
char_list_path = './character_set/character_set_1798'
# Load the set of common characters
with open(char_list_path, 'r') as char_set:
char_list = char_set.readlines()
char_list = [x.strip() for x in char_list]
# char_list = ['的']
train_batch_size = 1
# Create dataset and dataloader
train_dataset = FontDataset(font_root, char_list, std_font, font_size, image_size, numTransform, numRef, '鬼')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size,
shuffle=True, num_workers=1)
net = Net(train_loader, 500, 10, 'model/BBResize_S5T5_eN3_NoRefA_train_T5', numTransform, numRef)
net.load(389)
net.generateOne()
| 26.361111 | 109 | 0.737619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.21511 |
76a3aadc66cb9b6a3ffc18d0b5d0424cb3f1bf19 | 1,435 | py | Python | tests/package_path_test.py | radiasoft/sirepo | db3d1737bab7a84d39d456c0e8913c88deff3c31 | [
"Apache-2.0"
] | 49 | 2015-07-29T14:11:29.000Z | 2021-12-10T15:24:26.000Z | tests/package_path_test.py | radiasoft/sirepo | db3d1737bab7a84d39d456c0e8913c88deff3c31 | [
"Apache-2.0"
] | 3,732 | 2015-08-03T22:07:26.000Z | 2022-03-31T22:48:33.000Z | tests/package_path_test.py | radiasoft/sirepo | db3d1737bab7a84d39d456c0e8913c88deff3c31 | [
"Apache-2.0"
] | 28 | 2015-11-20T16:23:46.000Z | 2021-09-20T07:22:48.000Z | # -*- coding: utf-8 -*-
u"""Using a sim type that lives in a package outside of sirepo.
:copyright: Copyright (c) 2021 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import contextlib
import pytest
def test_run():
from pykern import pkunit
from pykern.pkdebug import pkdp, pkdlog
with _install():
fc = _fc()
r = fc.sr_login_as_guest(sim_type='code1')
d = fc.sr_sim_data(sim_type='code1', sim_name='Secret sauce')
pkunit.pkeq('green', d.models.sauce.color)
def _fc():
from pykern.pkdebug import pkdp
from pykern.pkcollections import PKDict
from sirepo import srunit
fc = srunit.flask_client(
cfg=PKDict(
SIREPO_FEATURE_CONFIG_PACKAGE_PATH='sirepo_test_package_path:sirepo',
),
sim_types='code1',
no_chdir_work=True,
)
return fc
@contextlib.contextmanager
def _install():
from pykern import pkunit, pkio
from pykern.pkdebug import pkdp, pkdlog
import subprocess
import sys
with pkunit.save_chdir_work() as d:
pkunit.data_dir().join('sirepo_test_package_path.tar.gz').copy(d)
subprocess.run('tar xzf sirepo_test_package_path.tar.gz', shell=True)
with pkio.save_chdir('sirepo_test_package_path') as d:
sys.path.append(str(d))
yield
| 28.137255 | 81 | 0.681533 | 0 | 0 | 442 | 0.308014 | 469 | 0.326829 | 0 | 0 | 392 | 0.273171 |
76a644c9158839039d51c335089af8172474435d | 35,948 | py | Python | fanficdownloader/story.py | rodrigonz/rodrigodeoliveiracosta-ffdown | e28e20232e9cd6cef84aa9e830ed8de7dbb208ae | [
"Apache-2.0"
] | null | null | null | fanficdownloader/story.py | rodrigonz/rodrigodeoliveiracosta-ffdown | e28e20232e9cd6cef84aa9e830ed8de7dbb208ae | [
"Apache-2.0"
] | null | null | null | fanficdownloader/story.py | rodrigonz/rodrigodeoliveiracosta-ffdown | e28e20232e9cd6cef84aa9e830ed8de7dbb208ae | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, re
import urlparse
import string
from math import floor
from functools import partial
import logging
logger = logging.getLogger(__name__)
import urlparse as up
import exceptions
from htmlcleanup import conditionalRemoveEntities, removeAllEntities
from configurable import Configurable
SPACE_REPLACE=u'\s'
SPLIT_META=u'\,'
# Create convert_image method depending on which graphics lib we can
# load. Preferred: calibre, PIL, none
imagetypes = {
'jpg':'image/jpeg',
'jpeg':'image/jpeg',
'png':'image/png',
'gif':'image/gif',
'svg':'image/svg+xml',
}
try:
from calibre.utils.magick import Image
convtype = {'jpg':'JPG', 'png':'PNG'}
def convert_image(url,data,sizes,grayscale,
removetrans,imgtype="jpg",background='#ffffff'):
export = False
img = Image()
img.load(data)
owidth, oheight = img.size
nwidth, nheight = sizes
scaled, nwidth, nheight = fit_image(owidth, oheight, nwidth, nheight)
if scaled:
img.size = (nwidth, nheight)
export = True
if normalize_format_name(img.format) != imgtype:
export = True
if removetrans and img.has_transparent_pixels():
canvas = Image()
canvas.create_canvas(int(img.size[0]), int(img.size[1]), str(background))
canvas.compose(img)
img = canvas
export = True
if grayscale and img.type != "GrayscaleType":
img.type = "GrayscaleType"
export = True
if export:
return (img.export(convtype[imgtype]),imgtype,imagetypes[imgtype])
else:
logger.debug("image used unchanged")
return (data,imgtype,imagetypes[imgtype])
except:
# No calibre routines, try for PIL for CLI.
try:
import Image
from StringIO import StringIO
convtype = {'jpg':'JPEG', 'png':'PNG'}
def convert_image(url,data,sizes,grayscale,
removetrans,imgtype="jpg",background='#ffffff'):
export = False
img = Image.open(StringIO(data))
owidth, oheight = img.size
nwidth, nheight = sizes
scaled, nwidth, nheight = fit_image(owidth, oheight, nwidth, nheight)
if scaled:
img = img.resize((nwidth, nheight),Image.ANTIALIAS)
export = True
if normalize_format_name(img.format) != imgtype:
if img.mode == "P":
# convert pallete gifs to RGB so jpg save doesn't fail.
img = img.convert("RGB")
export = True
if removetrans and img.mode == "RGBA":
background = Image.new('RGBA', img.size, background)
# Paste the image on top of the background
background.paste(img, img)
img = background.convert('RGB')
export = True
if grayscale and img.mode != "L":
img = img.convert("L")
export = True
if export:
outsio = StringIO()
img.save(outsio,convtype[imgtype])
return (outsio.getvalue(),imgtype,imagetypes[imgtype])
else:
logger.debug("image used unchanged")
return (data,imgtype,imagetypes[imgtype])
except:
# No calibre or PIL, simple pass through with mimetype.
def convert_image(url,data,sizes,grayscale,
removetrans,imgtype="jpg",background='#ffffff'):
return no_convert_image(url,data)
## also used for explicit no image processing.
def no_convert_image(url,data):
parsedUrl = up.urlparse(url)
ext=parsedUrl.path[parsedUrl.path.rfind('.')+1:].lower()
if ext not in imagetypes:
logger.debug("no_convert_image url:%s - no known extension"%url)
# doesn't have extension? use jpg.
ext='jpg'
return (data,ext,imagetypes[ext])
def normalize_format_name(fmt):
if fmt:
fmt = fmt.lower()
if fmt == 'jpeg':
fmt = 'jpg'
return fmt
def fit_image(width, height, pwidth, pheight):
'''
Fit image in box of width pwidth and height pheight.
@param width: Width of image
@param height: Height of image
@param pwidth: Width of box
@param pheight: Height of box
@return: scaled, new_width, new_height. scaled is True iff new_width and/or new_height is different from width or height.
'''
scaled = height > pheight or width > pwidth
if height > pheight:
corrf = pheight/float(height)
width, height = floor(corrf*width), pheight
if width > pwidth:
corrf = pwidth/float(width)
width, height = pwidth, floor(corrf*height)
if height > pheight:
corrf = pheight/float(height)
width, height = floor(corrf*width), pheight
return scaled, int(width), int(height)
try:
# doesn't really matter what, just checking for appengine.
from google.appengine.api import apiproxy_stub_map
is_appengine = True
except:
is_appengine = False
# The list comes from ffnet, the only multi-language site we support
# at the time of writing. Values are taken largely from pycountry,
# but with some corrections and guesses.
langs = {
"English":"en",
"Spanish":"es",
"French":"fr",
"German":"de",
"Chinese":"zh",
"Japanese":"ja",
"Dutch":"nl",
"Portuguese":"pt",
"Russian":"ru",
"Italian":"it",
"Bulgarian":"bg",
"Polish":"pl",
"Hungarian":"hu",
"Hebrew":"he",
"Arabic":"ar",
"Swedish":"sv",
"Norwegian":"no",
"Danish":"da",
"Finnish":"fi",
"Filipino":"fil",
"Esperanto":"eo",
"Hindi":"hi",
"Punjabi":"pa",
"Farsi":"fa",
"Greek":"el",
"Romanian":"ro",
"Albanian":"sq",
"Serbian":"sr",
"Turkish":"tr",
"Czech":"cs",
"Indonesian":"id",
"Croatian":"hr",
"Catalan":"ca",
"Latin":"la",
"Korean":"ko",
"Vietnamese":"vi",
"Thai":"th",
"Devanagari":"hi",
}
class InExMatch:
keys = []
regex = None
match = None
negate = False
def __init__(self,line):
if "=~" in line:
(self.keys,self.match) = line.split("=~")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.regex = re.compile(self.match)
elif "!~" in line:
(self.keys,self.match) = line.split("!~")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.regex = re.compile(self.match)
self.negate = True
elif "==" in line:
(self.keys,self.match) = line.split("==")
self.match = self.match.replace(SPACE_REPLACE,' ')
elif "!=" in line:
(self.keys,self.match) = line.split("!=")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.negate = True
self.keys = map( lambda x: x.strip(), self.keys.split(",") )
# For conditional, only one key
def is_key(self,key):
return key == self.keys[0]
# For conditional, only one key
def key(self):
return self.keys[0]
def in_keys(self,key):
return key in self.keys
def is_match(self,value):
retval = False
if self.regex:
if self.regex.search(value):
retval = True
#print(">>>>>>>>>>>>>%s=~%s r: %s,%s=%s"%(self.match,value,self.negate,retval,self.negate != retval))
else:
retval = self.match == value
#print(">>>>>>>>>>>>>%s==%s r: %s,%s=%s"%(self.match,value,self.negate,retval, self.negate != retval))
return self.negate != retval
def __str__(self):
if self.negate:
f='!'
else:
f='='
if self.regex:
s='~'
else:
s='='
return u'InExMatch(%s %s%s %s)'%(self.keys,f,s,self.match)
class Story(Configurable):
def __init__(self, configuration):
Configurable.__init__(self, configuration)
try:
## calibre plugin will set externally to match PI version.
self.metadata = {'version':os.environ['CURRENT_VERSION_ID']}
except:
self.metadata = {'version':'4.4'}
self.replacements = []
self.in_ex_cludes = {}
self.chapters = [] # chapters will be tuples of (title,html)
self.imgurls = []
self.imgtuples = []
self.cover=None # *href* of new cover image--need to create html.
self.oldcover=None # (oldcoverhtmlhref,oldcoverhtmltype,oldcoverhtmldata,oldcoverimghref,oldcoverimgtype,oldcoverimgdata)
self.calibrebookmark=None # cheesy way to carry calibre bookmark file forward across update.
self.logfile=None # cheesy way to carry log file forward across update.
## Look for config parameter, split and add each to metadata field.
for (config,metadata) in [("extracategories","category"),
("extragenres","genre"),
("extracharacters","characters"),
("extraships","ships"),
("extrawarnings","warnings")]:
for val in self.getConfigList(config):
self.addToList(metadata,val)
self.setReplace(self.getConfig('replace_metadata'))
in_ex_clude_list = ['include_metadata_pre','exclude_metadata_pre',
'include_metadata_post','exclude_metadata_post']
for ie in in_ex_clude_list:
ies = self.getConfig(ie)
# print("%s %s"%(ie,ies))
if ies:
iel = []
self.in_ex_cludes[ie] = self.set_in_ex_clude(ies)
def join_list(self, key, vallist):
return self.getConfig("join_string_"+key,u", ").replace(SPACE_REPLACE,' ').join(map(unicode, vallist))
def setMetadata(self, key, value, condremoveentities=True):
# keep as list type, but set as only value.
if self.isList(key):
self.addToList(key,value,condremoveentities=condremoveentities,clear=True)
else:
## still keeps < < and &
if condremoveentities:
self.metadata[key]=conditionalRemoveEntities(value)
else:
self.metadata[key]=value
if key == "language":
try:
# getMetadata not just self.metadata[] to do replace_metadata.
self.setMetadata('langcode',langs[self.getMetadata(key)])
except:
self.setMetadata('langcode','en')
if key == 'dateUpdated' and value:
# Last Update tags for Bill.
self.addToList('lastupdate',value.strftime("Last Update Year/Month: %Y/%m"))
self.addToList('lastupdate',value.strftime("Last Update: %Y/%m/%d"))
## metakey[,metakey]=~pattern
## metakey[,metakey]==string
## *for* part lines. Effect only when trailing conditional key=~regexp matches
## metakey[,metakey]=~pattern[&&metakey=~regexp]
## metakey[,metakey]==string[&&metakey=~regexp]
## metakey[,metakey]=~pattern[&&metakey==string]
## metakey[,metakey]==string[&&metakey==string]
def set_in_ex_clude(self,setting):
dest = []
# print("set_in_ex_clude:"+setting)
for line in setting.splitlines():
if line:
(match,condmatch)=(None,None)
if "&&" in line:
(line,conditional) = line.split("&&")
condmatch = InExMatch(conditional)
match = InExMatch(line)
dest.append([match,condmatch])
return dest
def do_in_ex_clude(self,which,value,key):
if value and which in self.in_ex_cludes:
include = 'include' in which
keyfound = False
found = False
for (match,condmatch) in self.in_ex_cludes[which]:
keyfndnow = False
if match.in_keys(key):
# key in keys and either no conditional, or conditional matched
if condmatch == None or condmatch.is_key(key):
keyfndnow = True
else:
condval = self.getMetadata(condmatch.key())
keyfndnow = condmatch.is_match(condval)
keyfound |= keyfndnow
# print("match:%s %s\ncondmatch:%s %s\n\tkeyfound:%s\n\tfound:%s"%(
# match,value,condmatch,condval,keyfound,found))
if keyfndnow:
found = isinstance(value,basestring) and match.is_match(value)
if found:
# print("match:%s %s\n\tkeyfndnow:%s\n\tfound:%s"%(
# match,value,keyfndnow,found))
if not include:
value = None
break
if include and keyfound and not found:
value = None
return value
## Two or three part lines. Two part effect everything.
## Three part effect only those key(s) lists.
## pattern=>replacement
## metakey,metakey=>pattern=>replacement
## *Five* part lines. Effect only when trailing conditional key=>regexp matches
## metakey[,metakey]=>pattern=>replacement[&&metakey=>regexp]
def setReplace(self,replace):
for line in replace.splitlines():
# print("replacement line:%s"%line)
(metakeys,regexp,replacement,condkey,condregexp)=(None,None,None,None,None)
if "&&" in line:
(line,conditional) = line.split("&&")
(condkey,condregexp) = conditional.split("=>")
if "=>" in line:
parts = line.split("=>")
if len(parts) > 2:
metakeys = map( lambda x: x.strip(), parts[0].split(",") )
(regexp,replacement)=parts[1:]
else:
(regexp,replacement)=parts
if regexp:
regexp = re.compile(regexp)
if condregexp:
condregexp = re.compile(condregexp)
# A way to explicitly include spaces in the
# replacement string. The .ini parser eats any
# trailing spaces.
replacement=replacement.replace(SPACE_REPLACE,' ')
self.replacements.append([metakeys,regexp,replacement,condkey,condregexp])
def doReplacements(self,value,key,return_list=False,seen_list=[]):
value = self.do_in_ex_clude('include_metadata_pre',value,key)
value = self.do_in_ex_clude('exclude_metadata_pre',value,key)
retlist = [value]
for replaceline in self.replacements:
if replaceline in seen_list: # recursion on pattern, bail
# print("bailing on %s"%replaceline)
continue
#print("replacement tuple:%s"%replaceline)
(metakeys,regexp,replacement,condkey,condregexp) = replaceline
if (metakeys == None or key in metakeys) \
and isinstance(value,basestring) \
and regexp.search(value):
doreplace=True
if condkey and condkey != key: # prevent infinite recursion.
condval = self.getMetadata(condkey)
doreplace = condval != None and condregexp.search(condval)
if doreplace:
# split into more than one list entry if
# SPLIT_META present in replacement string. Split
# first, then regex sub, then recurse call replace
# on each. Break out of loop, each split element
# handled individually by recursion call.
if SPLIT_META in replacement:
retlist = []
for splitrepl in replacement.split(SPLIT_META):
retlist.extend(self.doReplacements(regexp.sub(splitrepl,value),
key,
return_list=True,
seen_list=seen_list+[replaceline]))
break
else:
# print("replacement,value:%s,%s->%s"%(replacement,value,regexp.sub(replacement,value)))
value = regexp.sub(replacement,value)
retlist = [value]
for val in retlist:
retlist = map(partial(self.do_in_ex_clude,'include_metadata_post',key=key),retlist)
retlist = map(partial(self.do_in_ex_clude,'exclude_metadata_post',key=key),retlist)
# value = self.do_in_ex_clude('include_metadata_post',value,key)
# value = self.do_in_ex_clude('exclude_metadata_post',value,key)
if return_list:
return retlist
else:
return self.join_list(key,retlist)
def getMetadataRaw(self,key):
if self.isValidMetaEntry(key) and self.metadata.has_key(key):
return self.metadata[key]
def getMetadata(self, key,
removeallentities=False,
doreplacements=True):
value = None
if not self.isValidMetaEntry(key):
return value
if self.isList(key):
# join_string = self.getConfig("join_string_"+key,u", ").replace(SPACE_REPLACE,' ')
# value = join_string.join(self.getList(key, removeallentities, doreplacements=True))
value = self.join_list(key,self.getList(key, removeallentities, doreplacements=True))
if doreplacements:
value = self.doReplacements(value,key+"_LIST")
return value
elif self.metadata.has_key(key):
value = self.metadata[key]
if value:
if key == "numWords":
value = commaGroups(value)
if key == "numChapters":
value = commaGroups("%d"%value)
if key in ("dateCreated"):
value = value.strftime(self.getConfig(key+"_format","%Y-%m-%d %H:%M:%S"))
if key in ("datePublished","dateUpdated"):
value = value.strftime(self.getConfig(key+"_format","%Y-%m-%d"))
if doreplacements:
value=self.doReplacements(value,key)
if removeallentities and value != None:
return removeAllEntities(value)
else:
return value
else: #if self.getConfig("default_value_"+key):
return self.getConfig("default_value_"+key)
def getAllMetadata(self,
removeallentities=False,
doreplacements=True,
keeplists=False):
'''
All single value *and* list value metadata as strings (unless
keeplists=True, then keep lists).
'''
allmetadata = {}
# special handling for authors/authorUrls
linkhtml="<a class='%slink' href='%s'>%s</a>"
if self.isList('author'): # more than one author, assume multiple authorUrl too.
htmllist=[]
for i, v in enumerate(self.getList('author')):
aurl = self.getList('authorUrl')[i]
auth = v
# make sure doreplacements & removeallentities are honored.
if doreplacements:
aurl=self.doReplacements(aurl,'authorUrl')
auth=self.doReplacements(auth,'author')
if removeallentities:
aurl=removeAllEntities(aurl)
auth=removeAllEntities(auth)
htmllist.append(linkhtml%('author',aurl,auth))
# join_string = self.getConfig("join_string_authorHTML",u", ").replace(SPACE_REPLACE,' ')
self.setMetadata('authorHTML',self.join_list("join_string_authorHTML",htmllist))
else:
self.setMetadata('authorHTML',linkhtml%('author',self.getMetadata('authorUrl', removeallentities, doreplacements),
self.getMetadata('author', removeallentities, doreplacements)))
if self.getMetadataRaw('seriesUrl'):
self.setMetadata('seriesHTML',linkhtml%('series',self.getMetadata('seriesUrl', removeallentities, doreplacements),
self.getMetadata('series', removeallentities, doreplacements)))
elif self.getMetadataRaw('series'):
self.setMetadata('seriesHTML',self.getMetadataRaw('series'))
# logger.debug("make_linkhtml_entries:%s"%self.getConfig('make_linkhtml_entries'))
for k in self.getConfigList('make_linkhtml_entries'):
# Assuming list, because it has to be site specific and
# they are all lists. Bail if kUrl list not the same
# length.
# logger.debug("\nk:%s\nlist:%s\nlistURL:%s"%(k,self.getList(k),self.getList(k+'Url')))
if len(self.getList(k+'Url')) != len(self.getList(k)):
continue
htmllist=[]
for i, v in enumerate(self.getList(k)):
url = self.getList(k+'Url')[i]
# make sure doreplacements & removeallentities are honored.
if doreplacements:
url=self.doReplacements(url,k+'Url')
v=self.doReplacements(v,k)
if removeallentities:
url=removeAllEntities(url)
v=removeAllEntities(v)
htmllist.append(linkhtml%(k,url,v))
# join_string = self.getConfig("join_string_"+k+"HTML",u", ").replace(SPACE_REPLACE,' ')
self.setMetadata(k+'HTML',self.join_list("join_string_"+k+"HTML",htmllist))
for k in self.getValidMetaList():
if self.isList(k) and keeplists:
allmetadata[k] = self.getList(k, removeallentities, doreplacements)
else:
allmetadata[k] = self.getMetadata(k, removeallentities, doreplacements)
return allmetadata
# just for less clutter in adapters.
def extendList(self,listname,l):
for v in l:
self.addToList(listname,v.strip())
def addToList(self,listname,value,condremoveentities=True,clear=False):
if value==None:
return
if condremoveentities:
value = conditionalRemoveEntities(value)
if clear or not self.isList(listname) or not listname in self.metadata:
# Calling addToList to a non-list meta will overwrite it.
self.metadata[listname]=[]
# prevent duplicates.
if not value in self.metadata[listname]:
self.metadata[listname].append(value)
if listname == 'category' and self.getConfig('add_genre_when_multi_category') and len(self.metadata[listname]) > 1:
self.addToList('genre',self.getConfig('add_genre_when_multi_category'))
def isList(self,listname):
'Everything set with an include_in_* is considered a list.'
return self.isListType(listname) or \
( self.isValidMetaEntry(listname) and self.metadata.has_key(listname) \
and isinstance(self.metadata[listname],list) )
def getList(self,listname,
removeallentities=False,
doreplacements=True,
includelist=[]):
#print("getList(%s,%s)"%(listname,includelist))
retlist = []
if not self.isValidMetaEntry(listname):
return retlist
# includelist prevents infinite recursion of include_in_'s
if self.hasConfig("include_in_"+listname) and listname not in includelist:
for k in self.getConfigList("include_in_"+listname):
retlist.extend(self.getList(k,removeallentities=False,
doreplacements=doreplacements,includelist=includelist+[listname]))
else:
if not self.isList(listname):
retlist = [self.getMetadata(listname,removeallentities=False,
doreplacements=doreplacements)]
else:
retlist = self.getMetadataRaw(listname)
if retlist:
if doreplacements:
newretlist = []
for val in retlist:
newretlist.extend(self.doReplacements(val,listname,return_list=True))
retlist = newretlist
if removeallentities:
retlist = map(removeAllEntities,retlist)
retlist = filter( lambda x : x!=None and x!='' ,retlist)
# reorder ships so b/a and c/b/a become a/b and a/b/c. Only on '/',
# use replace_metadata to change separator first if needed.
# ships=>[ ]*(/|&|&)[ ]*=>/
if listname == 'ships' and self.getConfig('sort_ships') and retlist:
retlist = [ '/'.join(sorted(x.split('/'))) for x in retlist ]
if retlist:
if listname in ('author','authorUrl','authorId') or self.getConfig('keep_in_order_'+listname):
# need to retain order for author & authorUrl so the
# two match up.
return retlist
else:
# remove dups and sort.
return sorted(list(set(retlist)))
else:
return []
def getSubjectTags(self, removeallentities=False):
# set to avoid duplicates subject tags.
subjectset = set()
tags_list = self.getConfigList("include_subject_tags") + self.getConfigList("extra_subject_tags")
# metadata all go into dc:subject tags, but only if they are configured.
for (name,value) in self.getAllMetadata(removeallentities=removeallentities,keeplists=True).iteritems():
if name in tags_list:
if isinstance(value,list):
for tag in value:
subjectset.add(tag)
else:
subjectset.add(value)
if None in subjectset:
subjectset.remove(None)
if '' in subjectset:
subjectset.remove('')
return list(subjectset | set(self.getConfigList("extratags")))
def addChapter(self, url, title, html):
if self.getConfig('strip_chapter_numbers') and \
self.getConfig('chapter_title_strip_pattern'):
title = re.sub(self.getConfig('chapter_title_strip_pattern'),"",title)
self.chapters.append( (url,title,html) )
def getChapters(self,fortoc=False):
"Chapters will be tuples of (title,html)"
retval = []
## only add numbers if more than one chapter.
if len(self.chapters) > 1 and \
(self.getConfig('add_chapter_numbers') == "true" \
or (self.getConfig('add_chapter_numbers') == "toconly" and fortoc)) \
and self.getConfig('chapter_title_add_pattern'):
for index, (url,title,html) in enumerate(self.chapters):
retval.append( (url,
string.Template(self.getConfig('chapter_title_add_pattern')).substitute({'index':index+1,'title':title}),
html) )
else:
retval = self.chapters
return retval
def formatFileName(self,template,allowunsafefilename=True):
values = origvalues = self.getAllMetadata()
# fall back default:
if not template:
template="${title}-${siteabbrev}_${storyId}${formatext}"
if not allowunsafefilename:
values={}
pattern = re.compile(self.getConfig("output_filename_safepattern",r"[^a-zA-Z0-9_\. \[\]\(\)&'-]+"))
for k in origvalues.keys():
values[k]=re.sub(pattern,'_', removeAllEntities(self.getMetadata(k)))
return string.Template(template).substitute(values).encode('utf8')
# pass fetch in from adapter in case we need the cookies collected
# as well as it's a base_story class method.
def addImgUrl(self,parenturl,url,fetch,cover=False,coverexclusion=None):
# otherwise it saves the image in the epub even though it
# isn't used anywhere.
if cover and self.getConfig('never_make_cover'):
return
url = url.strip() # ran across an image with a space in the
# src. Browser handled it, so we'd better, too.
# appengine (web version) isn't allowed to do images--just
# gets too big too fast and breaks things.
if is_appengine:
return
if url.startswith("http") or url.startswith("file") or parenturl == None:
imgurl = url
else:
parsedUrl = urlparse.urlparse(parenturl)
if url.startswith("//") :
imgurl = urlparse.urlunparse(
(parsedUrl.scheme,
'',
url,
'','',''))
elif url.startswith("/") :
imgurl = urlparse.urlunparse(
(parsedUrl.scheme,
parsedUrl.netloc,
url,
'','',''))
else:
toppath=""
if parsedUrl.path.endswith("/"):
toppath = parsedUrl.path
else:
toppath = parsedUrl.path[:parsedUrl.path.rindex('/')]
imgurl = urlparse.urlunparse(
(parsedUrl.scheme,
parsedUrl.netloc,
toppath + '/' + url,
'','',''))
#print("\n===========\nparsedUrl.path:%s\ntoppath:%s\nimgurl:%s\n\n"%(parsedUrl.path,toppath,imgurl))
# apply coverexclusion to explicit covers, too. Primarily for ffnet imageu.
if cover and coverexclusion and re.search(coverexclusion,imgurl):
return
prefix='ffdl'
if imgurl not in self.imgurls:
parsedUrl = urlparse.urlparse(imgurl)
try:
if self.getConfig('no_image_processing'):
(data,ext,mime) = no_convert_image(imgurl,
fetch(imgurl))
else:
try:
sizes = [ int(x) for x in self.getConfigList('image_max_size') ]
except Exception, e:
raise exceptions.FailedToDownload("Failed to parse image_max_size from personal.ini:%s\nException: %s"%(self.getConfigList('image_max_size'),e))
grayscale = self.getConfig('grayscale_images')
imgtype = self.getConfig('convert_images_to')
if not imgtype:
imgtype = "jpg"
removetrans = self.getConfig('remove_transparency')
removetrans = removetrans or grayscale or imgtype=="jpg"
(data,ext,mime) = convert_image(imgurl,
fetch(imgurl),
sizes,
grayscale,
removetrans,
imgtype,
background="#"+self.getConfig('background_color'))
except Exception, e:
logger.info("Failed to load or convert image, skipping:\n%s\nException: %s"%(imgurl,e))
return "failedtoload"
# explicit cover, make the first image.
if cover:
if len(self.imgtuples) > 0 and 'cover' in self.imgtuples[0]['newsrc']:
# remove existing cover, if there is one.
del self.imgurls[0]
del self.imgtuples[0]
self.imgurls.insert(0,imgurl)
newsrc = "images/cover.%s"%ext
self.cover=newsrc
self.imgtuples.insert(0,{'newsrc':newsrc,'mime':mime,'data':data})
else:
self.imgurls.append(imgurl)
# First image, copy not link because calibre will replace with it's cover.
# Only if: No cover already AND
# make_firstimage_cover AND
# NOT never_make_cover AND
# either no coverexclusion OR coverexclusion doesn't match
if self.cover == None and \
self.getConfig('make_firstimage_cover') and \
not self.getConfig('never_make_cover') and \
not (coverexclusion and re.search(coverexclusion,imgurl)):
newsrc = "images/cover.%s"%ext
self.cover=newsrc
self.imgtuples.append({'newsrc':newsrc,'mime':mime,'data':data})
self.imgurls.append(imgurl)
newsrc = "images/%s-%s.%s"%(
prefix,
self.imgurls.index(imgurl),
ext)
self.imgtuples.append({'newsrc':newsrc,'mime':mime,'data':data})
#logger.debug("\nimgurl:%s\nnewsrc:%s\nimage size:%d\n"%(imgurl,newsrc,len(data)))
else:
newsrc = self.imgtuples[self.imgurls.index(imgurl)]['newsrc']
#print("===============\n%s\nimg url:%s\n============"%(newsrc,self.imgurls[-1]))
return newsrc
def getImgUrls(self):
retlist = []
for i, url in enumerate(self.imgurls):
#parsedUrl = urlparse.urlparse(url)
retlist.append(self.imgtuples[i])
return retlist
def __str__(self):
return "Metadata: " +str(self.metadata)
def commaGroups(s):
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
| 41.510393 | 169 | 0.537193 | 28,808 | 0.80138 | 0 | 0 | 0 | 0 | 0 | 0 | 10,100 | 0.280961 |
76a66cd1b58b111bec10cfd3ff837789c2aaac34 | 336 | py | Python | 2021/07/2021-07-2 The Treachery of Whales.py | dpustovarov/Advent-of-Code | 3a08944c26ca6428ecca98aed96777d70038b6ef | [
"MIT"
] | null | null | null | 2021/07/2021-07-2 The Treachery of Whales.py | dpustovarov/Advent-of-Code | 3a08944c26ca6428ecca98aed96777d70038b6ef | [
"MIT"
] | null | null | null | 2021/07/2021-07-2 The Treachery of Whales.py | dpustovarov/Advent-of-Code | 3a08944c26ca6428ecca98aed96777d70038b6ef | [
"MIT"
] | null | null | null | import sys, statistics
def solution(N):
m = int(round(statistics.mean(N))) # average for (n - i)**2
d = int(statistics.median(N)) # average for abs(n - i)
return min(sum((n - i)**2 + abs(n - i) for n in N) for i in range(m, d, (d > m) - (d < m))) // 2
print(solution([int(n) for n in sys.stdin.read().split(',')]))
| 37.333333 | 100 | 0.5625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.151786 |
76a8df49a9b9a1d0460ba09a5a23cc25330a7581 | 213 | py | Python | app/__init__.py | PushyZqin/flask-restful-plugin | 7a142de96500910f5f7648d5edf8986afaa72b70 | [
"MIT"
] | 2 | 2018-11-28T13:49:18.000Z | 2018-11-29T11:13:40.000Z | app/__init__.py | PushyZqin/flask-restful-plugin | 7a142de96500910f5f7648d5edf8986afaa72b70 | [
"MIT"
] | null | null | null | app/__init__.py | PushyZqin/flask-restful-plugin | 7a142de96500910f5f7648d5edf8986afaa72b70 | [
"MIT"
] | null | null | null | #encoding:utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import config
app = Flask(__name__)
app.config.from_object(config)
db = SQLAlchemy(app)
from app import req_demo, res_demo, models | 19.363636 | 42 | 0.807512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.070423 |
76a95224312c942a2d4fbf9434826429874c5283 | 870 | py | Python | scripts/storage.py | ibigbug/mixin-bot | 73b2e6a83cf4bbb4e30dfc05cf4463dc616b871b | [
"Unlicense"
] | null | null | null | scripts/storage.py | ibigbug/mixin-bot | 73b2e6a83cf4bbb4e30dfc05cf4463dc616b871b | [
"Unlicense"
] | 1 | 2021-06-01T23:35:10.000Z | 2021-06-01T23:35:10.000Z | scripts/storage.py | ibigbug/mixin-bot | 73b2e6a83cf4bbb4e30dfc05cf4463dc616b871b | [
"Unlicense"
] | null | null | null | import os
import sys
from azure.storage.blob import BlockBlobService
from azure.common import AzureMissingResourceHttpError
account_name = os.environ.get('azure_storage_account_name')
account_key = os.environ.get('azure_storage_account_key')
container_name = os.environ.get('azure_storage_container_name')
block_blob_service = BlockBlobService(account_name, account_key)
target_folder = sys.argv[1]
for f in os.listdir(target_folder):
if not f.startswith('.'):
full_path = os.path.join(target_folder, f)
try:
block_blob_service.get_blob_metadata(
container_name=container_name, blob_name=f)
except AzureMissingResourceHttpError as ex:
block_blob_service.create_blob_from_path(
container_name, f, full_path)
for b in block_blob_service.list_blobs(container_name):
print(b.name)
| 33.461538 | 64 | 0.749425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.101149 |
76adb21c8f58a8a73d261c808726722aa9b2c501 | 20,829 | py | Python | t5/models/hf_model.py | thomasw21/text-to-text-transfer-transformer | 6b7bcc8fe39ae772682bc7c72c93dec1e9120a80 | [
"Apache-2.0"
] | 1 | 2022-01-17T15:55:24.000Z | 2022-01-17T15:55:24.000Z | t5/models/hf_model.py | thomasw21/text-to-text-transfer-transformer | 6b7bcc8fe39ae772682bc7c72c93dec1e9120a80 | [
"Apache-2.0"
] | null | null | null | t5/models/hf_model.py | thomasw21/text-to-text-transfer-transformer | 6b7bcc8fe39ae772682bc7c72c93dec1e9120a80 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Hugging Face Transformers T5 Model.
This model API is fully functional but should be treated as experimental and
subject to change. Due to implementation details, if you are interested in
exactly replicating the results in ``Exploring the Limits of Transfer Learning
with a Unified Text-to-Text Transformer'' you should use the MtfModel API
instead.
Usage example for fine-tuning and evaluating on CoLA:
```Python
import functools
import t5
import t5.models
import torch
import transformers
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = t5.models.HfPyTorchModel("t5-base", "/tmp/hft5/", device)
# Evaluate the pre-trained checkpoint, before further fine-tuning
model.eval(
"glue_cola_v002",
sequence_length={"inputs": 64, "targets": 4},
batch_size=128,
)
# Run 1000 steps of fine-tuning
model.train(
mixture_or_task_name="glue_cola_v002",
steps=1000,
save_steps=100,
sequence_length={"inputs": 64, "targets": 4},
split="train",
batch_size=32,
optimizer=functools.partial(transformers.AdamW, lr=1e-4),
)
# Evaluate after fine-tuning
model.eval(
"glue_cola_v002",
checkpoint_steps="all",
sequence_length={"inputs": 64, "targets": 4},
batch_size=128,
)
# Generate some predictions
inputs = [
"cola sentence: This is a totally valid sentence.",
"cola sentence: A doggy detail was walking famously.",
]
model.predict(
inputs,
sequence_length={"inputs": 32},
batch_size=2,
output_file="/tmp/hft5/example_predictions.txt",
)
```
"""
import functools
import itertools
import os
import re
import time
from absl import logging
import mesh_tensorflow.transformer.dataset as transformer_dataset
import t5.data
from t5.models import utils
from t5.models.t5_model import T5Model
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
import torch
import torch.utils.tensorboard
CHECKPOINT_FILE_FORMAT = "model-{}.checkpoint"
def tokens_to_batches(dataset,
sequence_length,
batch_size,
output_features,
mixture_or_task=None):
"""Convert a dataset of token sequences to batches of padded/masked examples.
Args:
dataset: tf.data.Dataset containing examples with token sequences.
sequence_length: dict of int, a dict mapping feature name to length.
batch_size: int, the number of padded sequences in each batch.
output_features: list of str, features to include in the dataset.
mixture_or_task: a Task or Mixture object, used to correctly specify eos if
provided. If none, eos is always added at the end of the sequence.
Returns:
A generator that produces batches of numpy examples.
"""
if mixture_or_task:
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
else:
eos_keys = True
dataset = transformer_dataset.pack_or_pad(
dataset,
sequence_length,
pack=False,
feature_keys=output_features,
ensure_eos=eos_keys,
)
def _map_fn(ex):
for key in output_features:
tensor = ex[key]
mask = tf.cast(tf.greater(tensor, 0), tensor.dtype)
ex[key + "_mask"] = mask
return ex
dataset = dataset.map(
_map_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
dataset = dataset.batch(batch_size, drop_remainder=False)
return tfds.as_numpy(dataset)
def _get_dataset(mixture_or_task_or_name,
sequence_length,
split,
shuffle=True):
"""Get a tf.data.Dataset for a given Task or Mixture.
Args:
mixture_or_task_or_name: Task or Mixture or str, the name of the Mixture or
Task to train on or the Tasks or Mixture object itself.
Must be pre-registered in the global `t5.data.TaskRegistry` or
`t5.data.MixtureRegistry.`
sequence_length: dict of int, a dict mapping feature name to length.
split: str or `tensorflow_datasets.Split`, the data split to load.
shuffle: boolean, whether to shuffle the dataset.
Returns:
A generator that produces batches of numpy examples.
"""
if isinstance(mixture_or_task_or_name, str):
task = t5.data.get_mixture_or_task(mixture_or_task_or_name)
else:
task = mixture_or_task_or_name
return task.get_dataset(sequence_length, split, shuffle=shuffle)
class HfPyTorchModel(T5Model):
"""Wrapper class for Hugging Face Transformers PyTorch T5 model."""
def __init__(self, model_spec, model_dir, device):
"""Constructor for HfModel class.
Args:
model_spec: A str to pass into the `pretrained_model_name_or_path`
argument of `transformers.T5ForConditionalGeneration.from_pretrained`
(e.g. `"t5-base"` or a path to a previously trained model) or an
instance of the `transformers.configuration_t5.T5Config` class to use
to directly construct the `transformers.T5ForConditionalGeneration`
object.
model_dir: str, directory to save and load model checkpoints.
device: `torch.device` on which the model should be run.
"""
# We have to import transformers here because it has a side effect of
# creating a TensorFlow graph, which prevents eager execution from being
# enabled in files that import hf_model.py
import transformers # pylint: disable=import-outside-toplevel,g-import-not-at-top
if isinstance(model_spec, str):
self._model = transformers.T5ForConditionalGeneration.from_pretrained(
model_spec
)
elif isinstance(model_spec, transformers.T5Config):
self._model = transformers.T5ForConditionalGeneration(model_spec)
else:
raise ValueError("model_spec should be a string or T5Config.")
tf.io.gfile.makedirs(model_dir)
self._writer = torch.utils.tensorboard.writer.SummaryWriter(model_dir)
self._model_dir = model_dir
self._device = device
if self._device.type == "cuda":
self._model.cuda()
self._step = 0
self.load_latest_checkpoint()
self.to_tensor = functools.partial(
torch.as_tensor, device=self._device, dtype=torch.long)
@property
def model(self):
return self._model
@property
def step(self):
return self._step
def save_checkpoint(self, step):
"""Save the current model parameters to the `model_dir`.
Args:
step: int, the current training step.
"""
path = os.path.join(self._model_dir, CHECKPOINT_FILE_FORMAT.format(step))
torch.save(self._model.state_dict(), path)
def load_checkpoint(self, step, model_dir=None):
"""Load the model parameters from a checkpoint at a given step.
Args:
step: int, load the checkpoint from this training step.
model_dir: str, the directory of the checkpoint to load or None to use
this model's directory.
"""
model_dir = model_dir or self._model_dir
path = os.path.join(model_dir, CHECKPOINT_FILE_FORMAT.format(step))
logging.info("Loading from %s", path)
self._model.load_state_dict(torch.load(path))
self._step = step
def get_all_checkpoint_steps(self, model_dir=None):
"""Retrieve the steps corresponding to all checkpoints in `model_dir`.
Args:
model_dir: str, the directory of the checkpoints or None to use this
model's directory.
Returns:
A list of ints corresponding to all checkpoint steps, or None if there
are no checkpoints in the model directory.
"""
model_dir = model_dir or self._model_dir
checkpoint_files = tf.io.gfile.glob(
os.path.join(model_dir, CHECKPOINT_FILE_FORMAT.format("*"))
)
if not checkpoint_files:
return
step_regex = re.compile(".*" + CHECKPOINT_FILE_FORMAT.format(r"(\d+)"))
steps = [int(step_regex.match(path).group(1)) for path in checkpoint_files]
return sorted(steps)
def get_latest_checkpoint_step(self, model_dir=None):
"""Retrieve the step corresponding to the most recent checkpoint.
Args:
model_dir: str, the directory of the checkpoints or None to use this
model's directory.
Returns:
An integer corresponding to the most recent step, or None if there are no
checkpoints in the model directory.
"""
steps = self.get_all_checkpoint_steps(model_dir)
if steps is not None:
return max(steps)
def load_latest_checkpoint(self):
"""Load the most recent checkpoint and update the model's current step."""
latest_step = self.get_latest_checkpoint_step()
if latest_step is not None:
self.load_checkpoint(latest_step)
def train(
self,
mixture_or_task_name,
steps,
save_steps,
sequence_length,
split,
batch_size,
optimizer,
learning_rate_scheduler=None,
):
"""Train the model on the given Mixture or Task.
Args:
mixture_or_task_name: str, the name of the Mixture or Task to train on.
Must be pre-registered in the global `t5.data.TaskRegistry` or
`t5.data.MixtureRegistry.`
steps: int, the total number of steps to train for.
save_steps: int, the number of steps between checkpoint saves.
sequence_length: dict of int, a dict mapping feature name to length.
split: str or `tensorflow_datasets.Split`, the data split to load.
batch_size: int, the number of padded sequences in each batch.
optimizer: function that takes the model parameters as its sole argument.
For example, to use an AdamW optimizer with a learning rate of 1e-4,
you could pass in `functools.partial(transformers.AdamW, lr=1e-4)`.
learning_rate_scheduler: optional function that takes in an optimizer as
its sole argument. For example, to use a schedule that warms up the
optimizer's learning rate after 100 steps, you could pass in
`functools.partial(transformers.get_constant_schedule_with_warmup,
num_warmup_steps=100)`.
"""
self._model.train()
ds = _get_dataset(mixture_or_task_name, sequence_length, split)
task = t5.data.get_mixture_or_task(mixture_or_task_name)
ds = tokens_to_batches(ds, sequence_length, batch_size,
tuple(task.output_features), task)
# Repeat dataset forever
ds = itertools.cycle(ds)
optimizer = optimizer(self._model.parameters())
if learning_rate_scheduler:
learning_rate_scheduler = learning_rate_scheduler(optimizer)
now = time.time()
for train_step, batch in enumerate(itertools.islice(ds, steps)):
if not train_step % save_steps:
# TODO(craffel): Consider saving optimizer and scheduler state.
logging.info("Saving checkpoint for step %s", self._step)
self.save_checkpoint(self._step)
self._model.zero_grad()
outputs = self._model(
input_ids=self.to_tensor(batch["inputs"]),
attention_mask=self.to_tensor(batch["inputs_mask"]),
decoder_attention_mask=self.to_tensor(batch["targets_mask"]),
labels=self.to_tensor(batch["targets"]),
)
loss = outputs[0]
loss.backward()
optimizer.step()
if learning_rate_scheduler:
learning_rate_scheduler.step()
self._writer.add_scalar(
"loss", loss.detach().cpu().numpy(), self._step
)
self._writer.add_scalar("step/s", 1 / (time.time() - now), self._step)
now = time.time()
self._step += 1
logging.info("Saving final checkpoint for step %s", self._step)
self.save_checkpoint(self._step)
def eval(
self,
mixture_or_task_name,
sequence_length,
batch_size,
checkpoint_steps=None,
summary_dir=None,
split="validation",
compute_sequence_length=False,
**generate_kwargs,
):
"""Evaluate the model on the given Mixture or Task.
*Note*: If a checkpoint step is provided (i.e. `checkpoint_steps is not
None`), the model's state will be replaced by the state in those
checkpoints. If you have not saved your model before calling `eval`, you
should call `save_checkpoint` before `eval` to avoid losing its parameter
values and state.
Args:
mixture_or_task_name: str, the name of the Mixture or Task to evaluate
on. Must be pre-registered in the global `t5.data.TaskRegistry` or
`t5.data.MixtureRegistry.`
sequence_length: dict of int, a dict mapping feature name to length.
batch_size: int, the number of padded sequences in each batch.
checkpoint_steps: int, list of ints, "all", or None. If None, eval in the
model in its current state without loading any checkpoints. If an int
or list of ints, evaluation will be run on the checkpoint files in
`model_dir` whose global steps are those provided. If -1, eval on the
latest checkpoint from the model directory. If "all", evaluate all
checkpoints in the model directory.
summary_dir: str, path to write TensorBoard events file summaries for
eval. If None, use model_dir/{split}_eval.
split: str, the mixture/task split to evaluate on.
compute_sequence_length: bool, automatically compute sequence length
during eval mode.
**generate_kwargs: Additional keyword arguments to pass to
`transformers.PretrainedModel.generate()`, for example to change the
decoding strategy. See the documentation for
`transformers.PretrainedModel.generate()` for options.
"""
def _predict_from_tasks(tasks, vocabulary, checkpoint_step, sequence_length,
datasets, **unused_kwargs):
if isinstance(vocabulary, tuple):
vocab = vocabulary[1]
if checkpoint_step != self._step:
self.load_checkpoint(checkpoint_step)
self._model.eval()
outputs = []
for task in tasks:
if compute_sequence_length:
ds = _get_dataset(task.name, sequence_length, split, shuffle=False)
else:
ds = datasets[task.name]
ds = list(tokens_to_batches(
ds, sequence_length, batch_size, tuple(task.output_features), task))
for batch in ds:
predicted_tokens = self._model.generate(
input_ids=self.to_tensor(batch["inputs"]), **generate_kwargs
)
predicted_tokens = predicted_tokens.cpu().numpy().tolist()
predictions = [vocab.decode(p) for p in predicted_tokens]
outputs.extend(predictions)
return outputs
if checkpoint_steps is None:
checkpoint_steps = [self._step]
elif isinstance(checkpoint_steps, int):
checkpoint_steps = [checkpoint_steps]
elif checkpoint_steps == "all":
checkpoint_steps = self.get_all_checkpoint_steps()
elif not isinstance(checkpoint_steps, (list, tuple)):
raise ValueError(
f"checkpoint_steps must be None, int or list; got {checkpoint_steps}"
)
summary_dir = summary_dir or os.path.join(self._model_dir, f"{split}_eval")
tf.io.gfile.makedirs(summary_dir)
utils.run_eval(
mixture_or_task_name=mixture_or_task_name,
predict_or_score_fn=_predict_from_tasks,
checkpoint_steps=checkpoint_steps,
dataset_fn=functools.partial(_get_dataset, shuffle=False),
summary_dir=summary_dir,
split=split,
sequence_length=None if compute_sequence_length else sequence_length,
batch_size=batch_size)
def predict(
self,
inputs,
sequence_length,
batch_size,
output_file=None,
vocabulary=None,
**generate_kwargs,
):
"""Evaluate the model on the given Mixture or Task.
*Note*: If a checkpoint step is provided (i.e. `checkpoint_steps is not
None`), the model's state will be replaced by the state in those
checkpoints. If you have not saved your model before calling `eval`, you
should call `save_checkpoint` before `eval` to avoid losing its parameter
values and state.
Args:
inputs: list of str or str, either a list of inputs to feed into the
model or the path to a text file that contains a single input on each
line.
sequence_length: dict of int, a dict mapping feature name to length.
batch_size: int, the number of padded sequences in each batch.
output_file: str or None, path to write out predictions or None to skip
writing.
vocabulary: t5.data.vocabularies.Vocabulary or dict or None. Either the
Vocabulary to use for processing inputs and targets, a dict mapping
"inputs" to a Vocabulary for encoding the inputs and "targets" for
decoding the predictions, or None (default) to use a
t5.data.SentencePieceVocabulary with the provided
sentencepiece_model_path (as was used in all pre-trained T5 models).
**generate_kwargs: Additional keyword arguments to pass to
`transformers.PretrainedModel.generate()`, for example to change the
decoding strategy. See the documentation for
`transformers.PretrainedModel.generate()` for options.
"""
if isinstance(inputs, str):
if not tf.io.gfile.exists(inputs):
raise ValueError(
f"A str was provided for `inputs`, but the path {inputs} does not "
"exist. If you want the model's output for {inputs}, you should "
"feed in inputs=['{inputs}']"
)
with tf.io.gfile.GFile(inputs) as f:
inputs = [l.strip() for l in f]
if vocabulary is None:
vocab = t5.data.get_default_vocabulary()
vocabs = {"inputs": vocab, "targets": vocab}
elif isinstance(vocabulary, t5.data.vocabularies.Vocabulary):
vocabs = {"inputs": vocabulary, "targets": vocabulary}
elif isinstance(vocabulary, dict):
vocabs = vocabulary
else:
raise ValueError("vocabulary must be a dict, a Vocabulary, or None")
dataset = tf.data.Dataset.from_tensor_slices(inputs)
dataset = dataset.map(
lambda x: {"inputs": tf.cast(vocabs["inputs"].encode_tf(x), tf.int64)},
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
dataset = tokens_to_batches(
dataset, sequence_length, batch_size, ["inputs"]
)
predictions = []
for batch in dataset:
predicted_tokens = self._model.generate(
input_ids=self.to_tensor(batch["inputs"]), **generate_kwargs
)
predicted_tokens = predicted_tokens.cpu().numpy().tolist()
predictions.extend(
[vocabs["targets"].decode(p) for p in predicted_tokens]
)
for inp, pred in zip(inputs, predictions):
logging.info("%s\n -> %s", inp, pred)
if output_file is not None:
utils.write_lines_to_file(predictions, output_file)
def finetune(
self,
mixture_or_task_name,
finetune_steps,
pretrained_model_dir,
pretrained_checkpoint_step=-1,
**train_kwargs,
):
"""Trains model after loading from any existing checkpoint.
Note that if you have initialized the model using a pre-trained model
specification (e.g. by passing "t5-base" for `model_spec`) then you can
just call `train` directly. This function is only provided for convenience
for loading a pre-trained model checkpoint from an arbitrary model
directory before calling `train`.
Args:
mixture_or_task_name: str, the name of the Mixture or Task to evaluate
on. Must be pre-registered in the global `t5.data.TaskRegistry` or
`t5.data.MixtureRegistry.`
finetune_steps: int, the number of additional steps to train for.
pretrained_model_dir: str, directory with pretrained model checkpoints.
pretrained_checkpoint_step: int, checkpoint to initialize weights from.
If -1 (default), use the latest checkpoint from the pretrained model
directory.
**train_kwargs: Additional keyword arguments to pass to `train`. See the
docstring for `train` for more details.
"""
if pretrained_checkpoint_step == -1:
pretrained_checkpoint_step = self.get_latest_checkpoint_step(
pretrained_model_dir
)
self.load_checkpoint(pretrained_checkpoint_step, pretrained_model_dir)
self.train(mixture_or_task_name, finetune_steps, **train_kwargs)
| 36.414336 | 86 | 0.69408 | 15,826 | 0.759806 | 0 | 0 | 100 | 0.004801 | 0 | 0 | 11,456 | 0.550002 |
76adf99c53be9fdd9abcf7404d34ffbbdbf4e9ae | 1,422 | py | Python | api-reference-examples/python/pytx/pytx/threat_descriptor.py | b-bold/ThreatExchange | 6f8d0dc803faccf576c9398569bb52d54a4f9a87 | [
"BSD-3-Clause"
] | 997 | 2015-03-13T18:04:03.000Z | 2022-03-30T12:09:10.000Z | api-reference-examples/python/pytx/pytx/threat_descriptor.py | b-bold/ThreatExchange | 6f8d0dc803faccf576c9398569bb52d54a4f9a87 | [
"BSD-3-Clause"
] | 444 | 2015-03-26T17:28:49.000Z | 2022-03-28T19:34:05.000Z | api-reference-examples/python/pytx/pytx/threat_descriptor.py | b-bold/ThreatExchange | 6f8d0dc803faccf576c9398569bb52d54a4f9a87 | [
"BSD-3-Clause"
] | 294 | 2015-03-13T22:19:43.000Z | 2022-03-30T08:42:45.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .common import Common
from .vocabulary import ThreatDescriptor as td
from .vocabulary import ThreatExchange as t
class ThreatDescriptor(Common):
_URL = t.URL + t.VERSION + t.THREAT_DESCRIPTORS
_DETAILS = t.URL + t.VERSION
_RELATED = t.URL + t.VERSION
_fields = [
td.ADDED_ON,
td.CONFIDENCE,
td.DESCRIPTION,
td.EXPIRED_ON,
td.FIRST_ACTIVE,
td.ID,
td.INDICATOR,
td.LAST_ACTIVE,
td.LAST_UPDATED,
td.METADATA,
td.MY_REACTIONS,
td.OWNER,
td.PRECISION,
td.PRIVACY_MEMBERS,
td.PRIVACY_TYPE,
td.RAW_INDICATOR,
td.REVIEW_STATUS,
td.SEVERITY,
td.SHARE_LEVEL,
td.SOURCE_URI,
td.STATUS,
td.TAGS,
td.TYPE,
]
_default_fields = [
td.ADDED_ON,
td.CONFIDENCE,
td.DESCRIPTION,
td.EXPIRED_ON,
td.FIRST_ACTIVE,
td.ID,
td.INDICATOR,
td.LAST_ACTIVE,
td.LAST_UPDATED,
td.METADATA,
td.MY_REACTIONS,
td.OWNER,
td.PRECISION,
td.RAW_INDICATOR,
td.REVIEW_STATUS,
td.SEVERITY,
td.SHARE_LEVEL,
td.SOURCE_URI,
td.STATUS,
td.TAGS,
td.TYPE,
]
_connections = [
]
_unique = [
]
| 20.911765 | 70 | 0.553446 | 1,230 | 0.864979 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.049226 |
76ae6ebfed82ce4d1cbcefc8ce95998102452884 | 2,766 | py | Python | why_how_what/multiarmed_bandit.py | chandrabsingh/learnings | a3f507bbbf46582ce5a64991983dfc0759db0af5 | [
"MIT"
] | null | null | null | why_how_what/multiarmed_bandit.py | chandrabsingh/learnings | a3f507bbbf46582ce5a64991983dfc0759db0af5 | [
"MIT"
] | null | null | null | why_how_what/multiarmed_bandit.py | chandrabsingh/learnings | a3f507bbbf46582ce5a64991983dfc0759db0af5 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
class BanditClass:
def __init__(self, epsilon, leverCount, probs):
self.epsilon = epsilon
self.leverCount = leverCount
self.probs = probs
self.redefineSettings()
def calcAction(self):
# Select action/lever as per explore-exploit probability
if np.random.rand() < self.epsilon: # explore case
return np.random.choice(self.leverCount)
else: # exploit case
return np.argmax(self.valueEstimateQ) # random.choice(self.leverCount)
def calcReward(self, actionId):
val = np.random.randn() + self.valueTrueQStar[actionId]
# return val
# return 1 if (np.random.random() < self.probs[actionId]) else 0
return val if (np.random.random() < self.probs[actionId]) else 0
def calcQEstimate(self, actionId, reward):
self.actionCount[actionId] += 1
self.valueEstimateQ[actionId] += (1/self.actionCount[actionId]) * (reward - self.valueEstimateQ[actionId])
def redefineSettings(self):
# Define individual lever probability
self.valueTrueQStar = np.random.randn(self.leverCount) # Reset the valueTrueQStar before each incremental step
self.valueEstimateQ = np.zeros(self.leverCount, dtype=float)
self.actionCount = np.zeros_like(self.valueEstimateQ, dtype=int)
# def main():
epsilons = [0.1] # Define list of epsilons=exploration
leverCount = 10 # Define number of levers
runs = 2000
steps = 1000
probs = [0.10, 0.50, 0.60, 0.80, 0.10,
0.25, 0.60, 0.45, 0.75, 0.65]
# Define list of arm probabilities
rewards = np.zeros((len(epsilons), runs, steps))
actions = np.zeros((len(epsilons), runs, steps))
for e, epsilon in enumerate(epsilons): # loop over all the bandits epsilons
bandit = BanditClass(epsilon, leverCount, probs)
for run in tqdm(range(runs)): # loop over all the runs
bandit.redefineSettings()
for step in range(steps): # loop over all the steps
actionId = bandit.calcAction()
reward = bandit.calcReward(actionId)
bandit.calcQEstimate(actionId, reward)
actions[e, run, step] = actionId
rewards[e, run, step] = reward
# print('Test')
avgActions, avgRewards = actions.mean(axis=1), rewards.mean(axis=1)
plt.subplot(2, 1, 1)
for eps, rewardsY in zip(epsilons, avgRewards):
plt.plot(rewardsY, label=r'$\epsilon$ = {}'.format(eps), lw=1)
plt.xlabel('Steps')
plt.ylabel('Average reward')
plt.legend()
plt.subplot(2, 1, 2)
for eps, actionsX in zip(epsilons, avgActions):
plt.plot(actionsX, label=r'$\epsilon$ = {}'.format(eps), lw=1)
plt.xlabel('Steps')
plt.ylabel('% Average action')
plt.legend()
plt.show()
| 36.88 | 118 | 0.667751 | 1,324 | 0.47867 | 0 | 0 | 0 | 0 | 0 | 0 | 577 | 0.208604 |
76af59b924c1dbc7c80b888dfc866f25f3b80a37 | 1,293 | py | Python | algorithms/python/leetcode/tests/test_NonnegativeIntegerswithoutConsecutiveOnes.py | ytjia/coding-pratice | 6ddba1f3b86c40639a8203cbc3373d52301c1b1f | [
"MIT"
] | null | null | null | algorithms/python/leetcode/tests/test_NonnegativeIntegerswithoutConsecutiveOnes.py | ytjia/coding-pratice | 6ddba1f3b86c40639a8203cbc3373d52301c1b1f | [
"MIT"
] | null | null | null | algorithms/python/leetcode/tests/test_NonnegativeIntegerswithoutConsecutiveOnes.py | ytjia/coding-pratice | 6ddba1f3b86c40639a8203cbc3373d52301c1b1f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Authors: Y. Jia <ytjia.zju@gmail.com>
import unittest
from .. import NonnegativeIntegerswithoutConsecutiveOnes
class TestNonnegativeIntegerswithoutConsecutiveOnes(unittest.TestCase):
solution = NonnegativeIntegerswithoutConsecutiveOnes.Solution()
def test_findIntegers(self):
self.assertEqual(self.solution.findIntegers(1), 2)
self.assertEqual(self.solution.findIntegers(3), 3)
self.assertEqual(self.solution.findIntegers(5), 5)
self.assertEqual(self.solution.findIntegers(8), 6)
def test_find_binary_bit_pos(self):
self.assertEqual(self.solution.find_binary_bit_pos(0), list())
self.assertEqual(self.solution.find_binary_bit_pos(1), [0])
self.assertEqual(self.solution.find_binary_bit_pos(5), [0, 2])
self.assertEqual(self.solution.find_binary_bit_pos(8), [3])
self.assertEqual(self.solution.find_binary_bit_pos(15), [0, 1, 2, 3])
def test_stat_bit_cnt(self):
self.assertEqual(self.solution.stat_bit_cnt(0), [2])
self.assertEqual(self.solution.stat_bit_cnt(1), [2, 1])
self.assertEqual(self.solution.stat_bit_cnt(2), [2, 1, 2])
self.assertEqual(self.solution.stat_bit_cnt(3), [2, 1, 2, 3])
if __name__ == '__main__':
unittest.main()
| 38.029412 | 77 | 0.70843 | 1,102 | 0.852282 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.055684 |
76af968ed5c491f4545b0402bd5a825b42b19aab | 1,273 | py | Python | tierpsy/debugging/check_roi_flow.py | mgh17/tierpsy-tracker | a18c06aa80a5fb22fd51563d82c639b520742777 | [
"MIT"
] | 9 | 2021-01-11T10:49:21.000Z | 2022-02-28T15:48:00.000Z | tierpsy/debugging/check_roi_flow.py | mgh17/tierpsy-tracker | a18c06aa80a5fb22fd51563d82c639b520742777 | [
"MIT"
] | 18 | 2020-05-08T15:43:08.000Z | 2022-03-23T10:19:24.000Z | tierpsy/debugging/check_roi_flow.py | mgh17/tierpsy-tracker | a18c06aa80a5fb22fd51563d82c639b520742777 | [
"MIT"
] | 10 | 2019-12-18T12:10:12.000Z | 2022-01-05T09:12:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 17:59:36 2018
@author: avelinojaver
"""
import numpy as np
import cv2
from functools import partial
import json
from pathlib import Path
import pandas as pd
from tierpsy.analysis.ske_create.helperIterROI import generateMoviesROI
mask_file = Path('/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies/CX11314_Ch1_04072017_103259.hdf5')
root_dir = '/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies/'
for mask_file in list(Path(root_dir).glob('*.hdf5')):
skeletons_file = mask_file.parent / 'Results' / (mask_file.stem + '_skeletons.hdf5')
with pd.HDFStore(str(skeletons_file), "r") as ske_file_id:
#attribute useful to understand if we are dealing with dark or light worms
bgnd_param = ske_file_id.get_node('/plate_worms')._v_attrs['bgnd_param']
bgnd_param = json.loads(bgnd_param.decode("utf-8"))
print(bgnd_param)
#%%
ROIs_generator = generateMoviesROI(masked_image_file,
trajectories_data,
bgnd_param = bgnd_param,
progress_prefix = '')
for frame_props in ROIs_generator:
break
| 28.931818 | 112 | 0.660644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.326787 |
76b0032871e9f4df5fee3742b9f1d69889d52346 | 7,716 | py | Python | models/SFD_net.py | LeileiCao/SFD_Pytorch | 384b0cd42485946371812f4905e4c9fd3c5d4e65 | [
"MIT"
] | 1 | 2020-05-03T02:46:42.000Z | 2020-05-03T02:46:42.000Z | models/SFD_net.py | LeileiCao/SFD_Pytorch | 384b0cd42485946371812f4905e4c9fd3c5d4e65 | [
"MIT"
] | 1 | 2019-03-30T04:04:39.000Z | 2019-03-30T04:04:39.000Z | models/SFD_net.py | LeileiCao/SFD_Pytorch | 384b0cd42485946371812f4905e4c9fd3c5d4e65 | [
"MIT"
] | 3 | 2019-02-22T07:00:53.000Z | 2021-01-13T10:19:59.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from layers import *
import torchvision.transforms as transforms
import torchvision.models as models
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import os
class L2Norm(nn.Module):
'''L2Norm layer across all channels.'''
def __init__(self, in_features, scale):
super(L2Norm, self).__init__()
self.weight = nn.Parameter(torch.Tensor(in_features))
self.reset_parameters(scale)
def reset_parameters(self, scale):
init.constant(self.weight, scale)
def forward(self, x):
x = F.normalize(x, dim=1)
scale = self.weight[None,:,None,None]
return scale * x
def STlayer(x,r):
r=int(r)
pixel_shuffle=nn.PixelShuffle(r)
channels=x.shape[1]
O=torch.Tensor(x.shape[0], channels//(r**2), x.shape[2]*r, x.shape[3]*r)
O=O.cuda()
#print(O.shape,x.shape)
O=Variable(O, requires_grad=False)
for i in range(0,channels,r**2):
O[:,i%(r**2):i%(r**2)+1,:,:] = pixel_shuffle(x[:,i:i+r**2,:,:])
#O = nn.ReLU(O,inplace=True)
return O
class SFDNet(nn.Module):
def __init__(self, phase, base, extras, head):
super(SFDNet, self).__init__()
self.phase = phase
# vgg network
self.base = nn.ModuleList(base)
self.conv3_Norm=L2Norm(256,10)
self.conv4_Norm=L2Norm(512,8)
self.conv5_Norm=L2Norm(512,5)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if self.phase == 'test':
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3*batch,300,300].
Return:
Depending on phase:
test:
list of concat outputs from:
1: softmax layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
# apply vgg up to conv4_3 relu
for k in range(16):
x = self.base[k](x)
s=self.conv3_Norm(x)
sources.append(s)
for k in range(16,23):
x = self.base[k](x)
s=self.conv4_Norm(x)
sources.append(s)
for k in range(23,30):
x= self.base[k](x)
s=self.conv5_Norm(x)
sources.append(s)
for k in range(30,len(self.base)):
x= self.base[k](x)
sources.append(x)
# apply extra layers and cache source layer outputs
x = self.extras[0](x)
x = self.extras[1](x)
x = self.extras[2](x)
x = self.extras[3](x)
sources.append(x)
x = self.extras[4](x)
x = self.extras[5](x)
x = self.extras[6](x)
x = self.extras[7](x)
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
#print([o.size() for o in loc])
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, 2)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, 2),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
# This function is derived from torchvision VGG make_layers()
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6, nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'640': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
}
def add_extras():
# Extra layers added to VGG for feature scaling
layers = []
conv6_1 = nn.Conv2d(1024, 256, kernel_size=1)
conv6_2 = nn.Conv2d(256,512, kernel_size=3,stride=2,padding=1)
conv7_1 = nn.Conv2d(512, 128, kernel_size=1)
conv7_2 = nn.Conv2d(128,256, kernel_size=3, stride=2, padding=1)
layers += [conv6_1, nn.ReLU(inplace=True), conv6_2, nn.ReLU(inplace=True),
conv7_1, nn.ReLU(inplace=True), conv7_2, nn.ReLU(inplace=True)]
return layers
def multibox(vgg):
loc_layers = []
conf_layers = []
loc_layers = loc_layers+[nn.Conv2d(vgg[14].out_channels, 4, kernel_size=3, padding=1)]
conf_layers = conf_layers+[nn.Conv2d(vgg[14].out_channels, 2, kernel_size=3, padding=1)]
loc_layers = loc_layers+[nn.Conv2d(vgg[21].out_channels, 4, kernel_size=3, padding=1)]
conf_layers = conf_layers+[nn.Conv2d(vgg[21].out_channels, 2, kernel_size=3, padding=1)]
loc_layers = loc_layers+[nn.Conv2d(vgg[28].out_channels, 4, kernel_size=3, padding=1)]
conf_layers = conf_layers+[nn.Conv2d(vgg[28].out_channels, 2, kernel_size=3, padding=1)]
loc_layers = loc_layers+[nn.Conv2d(vgg[33].out_channels, 4, kernel_size=3, padding=1)]
conf_layers = conf_layers+[nn.Conv2d(vgg[33].out_channels, 2, kernel_size=3, padding=1)]
#loc_layers += [nn.Conv2d(128, 4, kernel_size=3, padding=1)]
#conf_layers += [nn.Conv2d(128, 2, kernel_size=3, padding=1)]
loc_layers += [nn.Conv2d(512, 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, 2, kernel_size=3, padding=1)]
loc_layers += [nn.Conv2d(256, 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, 2, kernel_size=3, padding=1)]
return (loc_layers, conf_layers)
def build_net(phase):
if phase != "test" and phase != "train":
print("Error: Phase not recognized")
return
return SFDNet(phase, vgg(base[str(640)],3), add_extras(), multibox(vgg(base[str(640)],3)))
| 35.394495 | 94 | 0.581519 | 3,859 | 0.50013 | 0 | 0 | 0 | 0 | 0 | 0 | 1,540 | 0.199585 |
76b14bd5cac8768daf91adb9dc2873199856ee7c | 2,572 | py | Python | PP4E/Examples/PP4E/Integrate/Embed/prioredition-2x/Inventory/WithDbase/inventory.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Integrate/Embed/prioredition-2x/Inventory/WithDbase/inventory.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Integrate/Embed/prioredition-2x/Inventory/WithDbase/inventory.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | ############################################################################
# implement inventory/buyer databases as persistent shelve/pickle files;
# since the validations are already coded to use a function call interface,
# we just map those calls back to the shelve or pickled object here--no
# need to change validations code; caveat: some dbm flavors may need a
# Inventory.close() call, and this scheme doesn't support concurrent dbase
# access--shelves must be locked if concurrent access is possible (see
# flock() in the PyErrata example in the Internet chapter), and we would
# want to load the Buyers list from its file on each buyers() call;
#
# note that shelves require string keys (not ints), but we load raw
# strings from the order data file, so no conversions are necessary here;
# Buyers could be a shelve with all values = None if the list grows long:
# that would replace the 'in' test with a shelve index (but may be slower,
# since it adds a file access); Inventory could almost be a simple dbm
# file instead of a shelve, but that requires mapping integer values to
# and from strings (dbm values must be strings--see persistence chapter);
############################################################################
import shelve, pickle, string
from dbasetools import inventoryFile, buyerFile
# open shelve once per process, on first
# import of this file; changes are auto
# written through to file on key assignment
Inventory = shelve.open(inventoryFile)
def skus():
return Inventory.keys()
def stock(sku):
return Inventory[sku]
def reduce(sku, qty):
Inventory[sku] = Inventory[sku] - qty
def closedbase():
Inventory.close() # if your dbm flavor requires it
# load buyers list once per process
# writes changes through to fil on changes
Buyers = pickle.load(open(buyerFile, 'r'))
def buyers():
return Buyers
def add_buyer(buyer):
Buyers.append(buyer)
pickle.dump(Buyers, open(buyerFile, 'w'))
def print_files():
text = ''
for key in Inventory.keys():
text = text + (' %s=>%d ' % (key, Inventory[key]))
print 'Stock => {%s}' % text
print 'Buyer =>', Buyers
# load order list from flat text file;
# converts quantity only to an integer
def load_orders(filename):
orders = []
for line in open(filename, 'r').readlines():
product, quantity, buyer = string.split(line)
orders.append( (product, string.atoi(quantity), buyer) )
return orders
| 35.232877 | 77 | 0.64619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,611 | 0.626361 |
76b2c1eb2360e1542020b3c834b8848005058b6f | 715 | py | Python | services/worker/main.py | mrgrassho/geo-diff-2 | 4b89d36e247d389871d4fcd3f4df5c9a0203750f | [
"MIT"
] | null | null | null | services/worker/main.py | mrgrassho/geo-diff-2 | 4b89d36e247d389871d4fcd3f4df5c9a0203750f | [
"MIT"
] | 11 | 2020-07-08T23:27:10.000Z | 2022-02-27T07:45:57.000Z | services/worker/main.py | mrgrassho/geo-diff-2 | 4b89d36e247d389871d4fcd3f4df5c9a0203750f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from geodiff_worker import GeoDiffWorker
from os import environ
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
def main():
"""Main entry point to the program."""
# Get the location of the AMQP broker (RabbitMQ server) from
# an environment variable
amqp_url = environ['AMQP_URL']
task_queue = environ['TASK_QUEUE']
result_xchg = environ['RES_XCHG']
keep_alive_queue = environ['KEEP_ALIVE_QUEUE']
worker = GeoDiffWorker(amqp_url, task_queue, result_xchg, keep_alive_queue, debug=True)
worker.start()
if __name__ == '__main__':
main()
| 26.481481 | 91 | 0.717483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.327273 |
76b3074992a122c8788429826bad0b8169c1290c | 4,922 | py | Python | tasks/tv_raffle_handler.py | fakegit/bili2.0 | 6086588a59aba1b19dae4ab1eae59073d884becd | [
"MIT"
] | null | null | null | tasks/tv_raffle_handler.py | fakegit/bili2.0 | 6086588a59aba1b19dae4ab1eae59073d884becd | [
"MIT"
] | null | null | null | tasks/tv_raffle_handler.py | fakegit/bili2.0 | 6086588a59aba1b19dae4ab1eae59073d884becd | [
"MIT"
] | 1 | 2019-07-25T18:05:11.000Z | 2019-07-25T18:05:11.000Z | import asyncio
import random
import bili_statistics
from reqs.tv_raffle_handler import TvRaffleHandlerReq
from tasks.utils import UtilsTask
import utils
from .task_func_decorator import normal
from .base_class import ForcedTask
class TvRaffleJoinTask(ForcedTask):
TASK_NAME = 'join_tv_raffle'
# 这是superuser做的,做完之后就broadcast
@staticmethod
async def check(user, real_roomid): # v4
if not await UtilsTask.is_normal_room(user, real_roomid):
return None
json_response = await user.req_s(TvRaffleHandlerReq.check, user, real_roomid)
# print(json_response['data']['list'])
checklen = json_response['data']['list']
if not checklen: # sb可能返回None
return None
next_step_settings = []
for j in checklen:
raffle_id = j['raffleId']
raffle_type = j['type']
max_wait = j['time'] - 10
# 处理一些重复
if not bili_statistics.is_raffleid_duplicate(raffle_id):
user.info(f'确认获取到小电视抽奖 {raffle_id}', with_userid=False)
next_step_setting = (-2, (j['time_wait'], max_wait), real_roomid, raffle_id, raffle_type)
next_step_settings.append(next_step_setting)
bili_statistics.add2raffle_ids(raffle_id, 'TV')
return next_step_settings
@staticmethod
@normal
async def work(user, real_roomid, raffleid, raffle_type): # v4
# print('参与', raffleid)
# await UtilsTask.enter_room(user, real_roomid)
json_rsp = await user.req_s(TvRaffleHandlerReq.join_v4, user, real_roomid, raffleid, raffle_type)
bili_statistics.add2joined_raffles('小电视(合计)', user.id)
code = json_rsp['code']
if not code:
data = json_rsp['data']
gift_name = data['gift_name']
gift_num = data['gift_num']
user.info(f'小电视({raffleid})的参与结果: {gift_name}X{gift_num}')
bili_statistics.add2results(gift_name, user.id, gift_num)
elif code == -403 and '拒绝' in json_rsp['msg']:
user.fall_in_jail()
else:
user.warn(f'小电视({raffleid})的参与结果: {json_rsp}')
@staticmethod
async def check_v3(user, real_roomid):
if not await UtilsTask.is_normal_room(user, real_roomid):
return None
json_response = await user.req_s(TvRaffleHandlerReq.check, user, real_roomid)
# print(json_response['data']['list'])
checklen = json_response['data']['list']
if not checklen:
return None
next_step_settings = []
for j in checklen:
raffle_id = j['raffleId']
raffle_type = j['type']
max_wait = j['time'] - 10
raffle_end_time = j['time'] + utils.curr_time()
# 处理一些重复
if not bili_statistics.is_raffleid_duplicate(raffle_id):
print('本次获取到的抽奖id为', raffle_id)
next_step_setting = (-2, (0, max_wait), real_roomid, raffle_id, raffle_type, raffle_end_time)
next_step_settings.append(next_step_setting)
bili_statistics.add2raffle_ids(raffle_id)
return next_step_settings
@staticmethod
@normal
async def work_v3(user, real_roomid, raffleid, raffle_type, raffle_end_time):
# print('参与', raffleid)
# await UtilsTask.enter_room(user, real_roomid)
json_response2 = await user.req_s(TvRaffleHandlerReq.join, user, real_roomid, raffleid)
bili_statistics.add2joined_raffles('小电视(合计)', user.id)
user.info(f'小电视({raffleid})的参与状态: {json_response2["msg"]}')
# -400不存在
# -500繁忙
code = json_response2['code']
if code:
if code == -500:
print('# -500繁忙,稍后重试')
elif code == 400:
user.fall_in_jail()
else:
print(json_response2)
else:
sleeptime = raffle_end_time - utils.curr_time() + 5 # 小于0,call_later自动置0
delay = random.uniform(sleeptime, sleeptime+90)
await asyncio.sleep(delay)
json_response = await user.req_s(TvRaffleHandlerReq.notice, user, real_roomid, raffleid)
# print(json_response)
if not json_response['code']:
# {'code': 0, 'msg': '正在抽奖中..', 'message': '正在抽奖中..', 'data': {'gift_id': '-1', 'gift_name': '', 'gift_num': 0, 'gift_from': '', 'gift_type': 0, 'gift_content': '', 'status': 3}}
if json_response['data']['gift_id'] != '-1':
data = json_response['data']
user.info(f'小电视({raffleid})的参与结果: {data["gift_name"]}X{data["gift_num"]}')
bili_statistics.add2results(data['gift_name'], user.id, data['gift_num'])
else:
user.warn(f'小电视({raffleid})的参与结果: {json_response}')
| 43.557522 | 194 | 0.596709 | 4,946 | 0.955195 | 0 | 0 | 4,762 | 0.91966 | 4,666 | 0.90112 | 1,284 | 0.247972 |
76b51edc5f6025c886db8fed031828f33d4df24f | 265 | py | Python | openstack/regression/utils/wait.py | viduship/ceph-qe-scripts | 886619fa6600c24cbf989d65868951b9c3decd72 | [
"MIT"
] | 6 | 2019-04-12T17:45:44.000Z | 2021-09-14T19:59:05.000Z | openstack/regression/utils/wait.py | viduship/ceph-qe-scripts | 886619fa6600c24cbf989d65868951b9c3decd72 | [
"MIT"
] | 111 | 2019-12-10T10:41:08.000Z | 2022-03-31T11:42:30.000Z | openstack/regression/utils/wait.py | viduship/ceph-qe-scripts | 886619fa6600c24cbf989d65868951b9c3decd72 | [
"MIT"
] | 23 | 2019-05-30T19:48:25.000Z | 2022-03-24T17:07:19.000Z | import time
class Wait(object):
def __init__(self):
pass
def wait_for_state_change(self, expected_status, from_status):
for i in range(0, 20):
if expected_status != from_status:
break
time.sleep(1)
| 20.384615 | 66 | 0.584906 | 250 | 0.943396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
76b63b1bac33101e8507f58ebdfd16790912e32c | 20,894 | py | Python | cdci_data_analysis/analysis/parameters.py | andreatramacere/cdci_data_analysis | 8ae34a7252d6baf011a3b99fbe4f6e624b63d7df | [
"MIT"
] | null | null | null | cdci_data_analysis/analysis/parameters.py | andreatramacere/cdci_data_analysis | 8ae34a7252d6baf011a3b99fbe4f6e624b63d7df | [
"MIT"
] | null | null | null | cdci_data_analysis/analysis/parameters.py | andreatramacere/cdci_data_analysis | 8ae34a7252d6baf011a3b99fbe4f6e624b63d7df | [
"MIT"
] | null | null | null | """
Overview
--------
general info about this module
Classes and Inheritance Structure
----------------------------------------------
.. inheritance-diagram::
Summary
---------
.. autosummary::
list of the module you want
Module API
----------
"""
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "Andrea Tramacere"
import ast
import decorator
from datetime import datetime, date, time
from astropy.time import Time as astropyTime
from astropy.time import TimeDelta as astropyTimeDelta
from astropy.coordinates import Angle as astropyAngle
from .catalog import BasicCatalog
import numpy as np
@decorator.decorator
def check_par_list(func,par_list,*args, **kwargs):
for par in par_list:
if isinstance(par,Parameter):
pass
else:
raise RuntimeError('each parameter in the par_list has to be an instance of Parameters')
return func(par_list, *args, **kwargs)
class ParameterGroup(object):
def __init__(self,par_list,name,exclusive=True,def_selected=None,selected=None):
self.name=name
self._par_list=par_list
self._check_pars(par_list)
self.exclusive=True
self.msk = np.ones(len(par_list), dtype=np.bool)
if exclusive==True:
self.msk[::]=False
if def_selected is None:
self.msk[0]==True
if def_selected is not None:
self.select(def_selected)
if selected is not None:
self.select(selected)
@property
def par_list(self):
return self._par_list
@property
def names(self):
return [p.name for p in self._par_list]
def select(self,name):
if isinstance(name,Parameter):
name=Parameter.value
for ID,p in enumerate(self._par_list):
if p.name==name:
self.msk[ID]=True
self._selected=self._par_list[ID].name
if self.msk.sum()>1 and self.exclusive==True:
raise RuntimeError('only one paramter can be selected in mutually exclusive groups')
def _check_pars(self, par_list):
for p in par_list:
if isinstance(p,Parameter):
pass
elif isinstance(p,ParameterRange):
pass
else:
raise RuntimeError('you can group Paramters or ParamtersRanges found',type(p))
def to_list(self):
_l=[]
for p in self._par_list:
if isinstance(p,Parameter):
_l.append(p)
elif isinstance(p,ParameterRange):
_l.extend(p.to_list())
return _l
def add_par(self,par):
self.par_list.append(par)
self.msk=np.append(self.msk,False)
def build_selector(self,name):
return Parameter(name, allowed_values=self.names)
class ParameterRange(object):
def __init__(self,p1,p2,name):
self._check_pars(p1,p2)
self.name=name
self.p1=p1
self.p2=p2
def _check_pars(self,p1,p2):
if type(p1)!=type(p2):
raise RuntimeError('pars must be of the same time')
for p in (p1,p2):
try:
assert (isinstance(p,Parameter))
except:
raise RuntimeError('both p1 and p2 must be Parameters objects, found',type(p))
def to_list(self):
return [self.p1,self.p2]
class ParameterTuple(object):
def __init__(self,p_list,name):
self._check_pars(p_list)
self.name=name
self.p_list=tuple(p_list)
def _check_pars(self,p_list):
if any( type(x)!=type(p_list[0]) for x in p_list):
raise RuntimeError('pars must be of the same time')
for p in (p_list):
try:
assert (isinstance(p,Parameter))
except:
raise RuntimeError('both p1 and p2 must be Parameters objects, found',type(p))
def to_list(self):
return self.p_list
class Parameter(object):
def __init__(self,value=None,units=None,name=None,allowed_units=[],check_value=None,allowed_values=None,units_name=None):
self.check_value=check_value
self._allowed_units = allowed_units
self._allowed_values = allowed_values
self.name = name
self.units=units
self.value = value
self.units_name=units_name
#self._wtform_dict=wtform_dict
@property
def value(self):
return self._value
@value.setter
def value(self,v):
#print ('set',self.name,v,self._allowed_values)
if v is not None:
if self.check_value is not None:
self.check_value(v, units=self.units,name=self.name)
if self._allowed_values is not None:
if v not in self._allowed_values:
raise RuntimeError('value',v,'not allowed, allowed=',self._allowed_values)
#print('set->',self.name,v,type(v))
if type(v)==str or type(v)== unicode:
self._value=v.strip()
else:
self._value = v
else:
self._value=None
@property
def units(self):
return self._units
@units.setter
def units(self,units):
if self._allowed_units !=[] and self._allowed_units is not None:
self.chekc_units(units,self._allowed_units,self.name)
self._units=units
def set_from_form(self,form,verbose=False):
par_name = self.name
units_name = self.units_name
v = None
u = None
in_dictionary=False
if units_name is not None:
if units_name in form.keys():
u = form[units_name]
if par_name in form.keys():
v=form[par_name]
in_dictionary=True
if in_dictionary is True:
self.set_par(value=v,units=u)
#print('setting par:', par_name, 'to val=', self.value, 'and units', units_name, 'to', self.units )
else:
if verbose is True:
print('setting par:', par_name, 'not in dictionary')
def set_par(self,value,units=None):
if units is not None:
self.units=units
self.value=value
def get_form(self,wtform_cls,key,validators,defaults):
return wtform_cls('key', validators=validators, default=defaults)
@staticmethod
def chekc_units(units,allowed,name):
if units not in allowed:
raise RuntimeError('wrong units for par: %s'%name, ' found: ',units,' allowed:', allowed)
@staticmethod
def check_value(val,units,par_name):
pass
# def get_form_field(self,key=None,default=None,validators=None,wtform_dict=None,wtform=None):
# if key is None:
# key=self.name
#
# if wtform is None and wtform_dict is None:
#
# wtform_dict=self._wtform_dict
#
# if default is not None:
# self.check_value(default,self.units)
# else:
# default=self.value
#
#
# if wtform is not None and wtform_dict is not None:
# raise RuntimeError('either you provide wtform or wtform_dict or you pass a wtform_dict to the constructor')
#
# elif wtform_dict is not None:
# wtform=wtform_dict[self.units]
#
# else:
# raise RuntimeError('yuo must provide wtform or wtform_dict')
#
# return wtform(label=key, validators=validators, default=default)
def reprJSON(self):
return dict(name=self.name, units=self.units, value=self.value)
#class Instrument(Parameter):
# def __init__(self,T_format,name,value=None):
#wtform_dict = {'iso': SelectField}
class Name(Parameter):
def __init__(self,value=None, name_format='str', name=None):
_allowed_units = ['str']
super(Name,self).__init__(value=value,
units=name_format,
check_value=self.check_name_value,
name=name,
allowed_units=_allowed_units)
@staticmethod
def check_name_value(value, units=None, name=None):
pass
class Float(Parameter):
def __init__(self,value=None,units=None,name=None):
_allowed_units = None
#wtform_dict = {'keV': FloatField}
super(Float, self).__init__(value=value,
units=units,
check_value=self.check_float_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
self.value=value
@property
def value(self):
return self._v
@value.setter
def value(self, v):
if v is not None and v!='':
self.check_float_value(v,name=self.name)
self._v = np.float(v)
else:
self._v=None
@staticmethod
def check_float_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
if value is None or value=='':
pass
else:
try:
value=ast.literal_eval(value)
except:
pass
value=np.float(value)
if type(value) == int or type(value) == np.int:
pass
elif type(value) == float or type(value) == np.float:
pass
else:
raise RuntimeError('type of ', name, 'not valid', type(value))
class Integer(Parameter):
def __init__(self,value=None,units=None,name=None):
_allowed_units = None
#wtform_dict = {'keV': FloatField}
super(Integer, self).__init__(value=value,
units=units,
check_value=self.check_int_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
self.value=value
@property
def value(self):
return self._v
@value.setter
def value(self, v):
if v is not None and v!='':
self.check_int_value(v,name=self.name)
self._v = np.int(v)
else:
self._v=None
@staticmethod
def check_int_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
if value is None or value=='':
pass
else:
try:
value=ast.literal_eval(value)
except:
pass
value=np.int(value)
if type(value) == int or type(value) == np.int:
pass
elif type(value) == float or type(value) == np.float:
pass
else:
raise RuntimeError('type of ', name, 'not valid', type(value))
class Time(Parameter):
def __init__(self,value=None,T_format=None,name=None,Time_format_name=None):
#_allowed_units = astropyTime.FORMATS
#wtform_dict = {'iso': StringField}
#wtform_dict['mjd'] = FloatField
#wtform_dict['prod_list'] = TextAreaField
super(Time,self).__init__(value=value,
units=T_format,
units_name=Time_format_name,
name=name,
allowed_units=None)
#wtform_dict=wtform_dict)
self._set_time(value,format=T_format)
@property
def value(self):
return self._astropy_time.value
@value.setter
def value(self, v):
units=self.units
self._set_time(v, format=units)
def _set_time(self,value,format):
try:
value=ast.literal_eval(value)
except:
pass
self._astropy_time = astropyTime(value, format=format)
self._value =value
class TimeDelta(Parameter):
def __init__(self, value=None, delta_T_format='sec', name=None, delta_T_format_name=None):
# _allowed_units = astropyTime.FORMATS
# wtform_dict = {'iso': StringField}
# wtform_dict['mjd'] = FloatField
# wtform_dict['prod_list'] = TextAreaField
super(TimeDelta, self).__init__(value=value,
units=delta_T_format,
units_name=delta_T_format_name,
name=name,
allowed_units=None)
# wtform_dict=wtform_dict)
self._set_time(value, format=delta_T_format)
@property
def value(self):
return self._astropy_time_delta.value
@value.setter
def value(self, v):
units = self.units
self._set_time(v, format=units)
def _set_time(self, value, format):
try:
value = ast.literal_eval(value)
except:
pass
#print ('value',value)
self._astropy_time_delta = astropyTimeDelta(value, format=format)
self._value = value
class InputProdList(Parameter):
def __init__(self,value=None,_format='names_list',name=None):
_allowed_units = ['names_list']
if value is None:
value=[]
super(InputProdList, self).__init__(value=value,
units=_format,
check_value=self.check_list_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
self._split(value)
def _split(self,str_list):
if type(str_list)==list:
pass
elif type(str_list)==str or type(str(str_list)):
if ',' in str_list:
str_list= str_list.split(',')
else:
str_list = str_list.split(' ')
else:
raise RuntimeError('parameter format is not correct')
if str_list == ['']:
str_list = []
return str_list
@property
def value(self):
if self._value==[''] or self._value is None:
return []
else:
return self._value
@value.setter
def value(self, v):
#print('set', self.name, v, self._allowed_values)
if v is not None:
if self.check_value is not None:
self.check_value(v, units=self.units, name=self.name)
if self._allowed_values is not None:
if v not in self._allowed_values:
raise RuntimeError('value', v, 'not allowed, allowed=', self._allowed_values)
if v == [''] or v is None or str(v) == '':
self._value=['']
else:
self._value = v
else:
self._value = ['']
self._value=self._split(self._value)
#print ('set to ',self._value)
@staticmethod
def check_list_value(value,units,name='par'):
if units=='names_list':
try:
#print(type(value))
assert (type(value) == list or type(value) == str or type(str(value))== str)
except:
raise RuntimeError('par:',name,', value is not product list format : list of strings','it is',type(value),value)
else:
raise RuntimeError(name,'units not valid',units)
class Angle(Parameter):
def __init__(self,value=None, units=None,name=None):
super(Angle, self).__init__(value=value,
units=units,
name=name,
allowed_units=None)
# wtform_dict=wtform_dict)
self._set_angle(value, units=units)
@property
def value(self):
return self._astropy_angle.value
@value.setter
def value(self, v, units=None):
if units is None:
units = self.units
self._set_angle(v, units=units)
def _set_angle(self, value, units):
if value=='' or value is None:
pass
else:
self._astropy_angle = astropyAngle(value, unit=units)
self._value = self._astropy_angle.value
# class AngularDistance(Parameter):
# def __init__(self, angular_units,name, value=None):
# _allowed_units = ['deg']
# super(AngularDistance, self).__init__(value=value,
# units=angular_units,
# check_value=self.check_angle_value,
# name=name,
# allowed_units=_allowed_units)
#
#
#
# @staticmethod
# def check_angle_value(value, units=None, name=None):
# print('check type of ', name, 'value', value, 'type', type(value))
# pass
#
class SpectralBoundary(Parameter):
def __init__(self,value=None,E_units='keV',name=None):
_allowed_units = ['keV','eV','MeV','GeV','TeV','Hz','MHz','GHz']
#wtform_dict = {'keV': FloatField}
super(SpectralBoundary, self).__init__(value=value,
units=E_units,
check_value=self.check_energy_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
@staticmethod
def check_energy_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
try:
value=ast.literal_eval(value)
except:
pass
if type(value)==int or type(value)==np.int:
pass
elif type(value)==float or type(value)==np.float:
pass
else:
raise RuntimeError('type of ',name,'not valid',type(value))
class Energy(Parameter):
def __init__(self,value=None,E_units=None,name=None):
_allowed_units = ['keV','eV','MeV','GeV','TeV']
#wtform_dict = {'keV': FloatField}
super(Energy, self).__init__(value=value,
units=E_units,
check_value=self.check_energy_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
@staticmethod
def check_energy_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
try:
value=ast.literal_eval(value)
except:
pass
if type(value)==int or type(value)==np.int:
pass
elif type(value)==float or type(value)==np.float:
pass
else:
raise RuntimeError('type of ',name,'not valid',type(value))
class DetectionThreshold(Parameter):
def __init__(self,value=None,units='sigma',name=None):
_allowed_units = ['sigma']
#wtform_dict = {'keV': FloatField}
super(DetectionThreshold, self).__init__(value=value,
units=units,
check_value=self.check_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
@staticmethod
def check_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
try:
value=ast.literal_eval(value)
except:
pass
if type(value)==int or type(value)==np.int:
pass
elif type(value)==float or type(value)==np.float:
pass
else:
raise RuntimeError('type of ',name,'not valid',type(value))
class UserCatalog(Parameter):
def __init__(self, value=None,name_format='str', name=None):
_allowed_units = ['str']
super(UserCatalog,self).__init__(value=value,
units=name_format,
check_value=self.check_name_value,
name=name,
allowed_units=_allowed_units)
@staticmethod
def check_name_value(value, units=None, name=None):
pass
| 27.348168 | 128 | 0.542117 | 18,989 | 0.908826 | 0 | 0 | 6,883 | 0.329425 | 0 | 0 | 3,978 | 0.19039 |
76b7c7406ad873b2c3844218bc4fda711535e033 | 3,552 | py | Python | utils/utils.py | lindsey98/dml_cross_entropy | 4312cb295e972abda7b0e2bdadecf1965c5d7ed5 | [
"BSD-3-Clause"
] | null | null | null | utils/utils.py | lindsey98/dml_cross_entropy | 4312cb295e972abda7b0e2bdadecf1965c5d7ed5 | [
"BSD-3-Clause"
] | null | null | null | utils/utils.py | lindsey98/dml_cross_entropy | 4312cb295e972abda7b0e2bdadecf1965c5d7ed5 | [
"BSD-3-Clause"
] | null | null | null | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
def state_dict_to_cpu(state_dict: OrderedDict):
"""Moves a state_dict to cpu and removes the module. added by DataParallel.
Parameters
----------
state_dict : OrderedDict
State_dict containing the tensors to move to cpu.
Returns
-------
new_state_dict : OrderedDict
State_dict on cpu.
"""
new_state = OrderedDict()
for k in state_dict.keys():
newk = k.replace('module.', '') # remove "module." if model was trained using DataParallel
new_state[newk] = state_dict[k].cpu()
return new_state
class SmoothCrossEntropy(nn.Module):
def __init__(self, epsilon: float = 0.1):
super(SmoothCrossEntropy, self).__init__()
self.epsilon = float(epsilon)
def forward(self, logits: torch.Tensor, labels: torch.LongTensor) -> torch.Tensor:
# target probs is of shape [N x C], only the gt labels get values 1 - self.epsilon,
# other entries for other labels get values (self.epsilon)/(C - 1)
target_probs = torch.full_like(logits, self.epsilon / (logits.shape[1] - 1))
target_probs.scatter_(1, labels.unsqueeze(1), 1 - self.epsilon)
# LogSoftMax for logits
softmax_logits = F.softmax(logits, 1)
logsoftmax_logits = torch.log(softmax_logits + 1e-5) # manually control underflow
loss = F.kl_div(logsoftmax_logits, target_probs, reduction='none').sum(1)
# kl_divergence = \sum p(y) * log(p(yhat)/p(y)) while CE = - \sum p(y) * log(p(yhat))
if torch.isnan(loss).any():
# print('labels:', labels)
print(labels.shape)
print('Labels Min: {}, Max: {}'.format(torch.min(labels), torch.max(labels)))
# print('target prob:', target_probs)
print(target_probs.shape)
print(torch.sum(target_probs == 0))
print('Target probs Min: {}, Max: {}'.format(torch.min(target_probs), torch.max(target_probs)))
# print('log_softmax logits: ', torch.log_softmax(logits, 1))
print(logsoftmax_logits.shape)
print(torch.sum(logsoftmax_logits == 0))
print('logsoftmax_logits Min: {}, Max: {}'.format(torch.min(logsoftmax_logits), torch.max(logsoftmax_logits)))
print(loss)
raise RuntimeError('Loss has nan values, probably because the log operations lead to -inf')
return loss
class VAELoss(nn.Module):
def __init__(self, kld_weight: float = 0.005):
super(VAELoss, self).__init__()
self.kld_weight = float(kld_weight)
def forward(self, recons: torch.Tensor, input: torch.Tensor, mu: torch.Tensor, log_var: torch.Tensor) -> dict:
recons_loss = F.binary_cross_entropy(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + self.kld_weight * kld_loss
loss_dict = {'loss':loss, 'reconstruct':recons_loss, 'kld_loss': -kld_loss}
print(loss_dict)
return loss_dict
class AELoss(nn.Module):
def __init__(self):
super(AELoss, self).__init__()
def forward(self, recons: torch.Tensor, input: torch.Tensor) -> dict:
recons_loss = F.binary_cross_entropy(recons, input)
loss = recons_loss
loss_dict = {'loss':loss, 'reconstruct':recons_loss}
# print(loss_dict)
return loss_dict
| 39.032967 | 122 | 0.628097 | 2,851 | 0.802646 | 0 | 0 | 0 | 0 | 0 | 0 | 1,030 | 0.289977 |
76b9ae265a175f4dd03817b9470d4b4760c676f3 | 526 | py | Python | examples/tinytag/fuzzbp.py | MJ-SEO/py_fuzz | 789fbfea21bf644ba4d00554fe4141694b0a190a | [
"Apache-2.0"
] | null | null | null | examples/tinytag/fuzzbp.py | MJ-SEO/py_fuzz | 789fbfea21bf644ba4d00554fe4141694b0a190a | [
"Apache-2.0"
] | null | null | null | examples/tinytag/fuzzbp.py | MJ-SEO/py_fuzz | 789fbfea21bf644ba4d00554fe4141694b0a190a | [
"Apache-2.0"
] | null | null | null | from pythonfuzz.main import PythonFuzz
from tinytag import TinyTag
import tempfile
import random
def suffix():
randnum = random.randint(0, 3)
if(randnum == 0):
return ".mp4"
elif(randnum == 1):
return ".mp3"
elif(randnum == 2):
return ".WMA"
elif(randnum == 3):
return ".riff"
@PythonFuzz
def fuzz(buf):
try:
f = tempfile.NamedTemporaryFile('wb', suffix=suffix())
f.write(buf)
f.seek(0)
tag = TinyTag.get(f.name)
f.close()
except UnicodeDecodeError:
pass
if __name__ == '__main__':
fuzz()
| 15.939394 | 56 | 0.6673 | 0 | 0 | 0 | 0 | 191 | 0.363118 | 0 | 0 | 39 | 0.074144 |
76bb0cabecd4a5466d01422f9d7179dffe4f8c9a | 5,653 | py | Python | r2d2.py | alicemirror/R2-D2-ArcadeLive | 24a90edcadeed7d78b1f84f329211d5e5ba87fd8 | [
"MIT"
] | 1 | 2020-04-09T18:54:55.000Z | 2020-04-09T18:54:55.000Z | r2d2.py | alicemirror/R2-D2-ArcadeLive | 24a90edcadeed7d78b1f84f329211d5e5ba87fd8 | [
"MIT"
] | null | null | null | r2d2.py | alicemirror/R2-D2-ArcadeLive | 24a90edcadeed7d78b1f84f329211d5e5ba87fd8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# R2D2 Python source code to control the Sphero R2D2 droic
# Author: Enrico Miglino
# Version: 1.0
# Date: Sept, 2019
# License: LGPL 3.0
#
# Based on the reverse engineering work
# "Scripting Sphero's Star Wars Droids"
# by ~bbraun.
#
# Thanks to Arnaud Coolsaet who inspired the live chroma key
# methodological approach with openCV
# <http://www.synack.net/~bbraun/spherodroid/>
#
# Credits:
#
# * Sphero (Sphero/inc) for the precious documentation on
# low-level APIs for their Sphero robots
# <https://github.com/sphero-inc/DeveloperResources>
#
# * Christopher Peplin for the pygatt Python library
# <https://github.com/peplin/pygatt>
#
# * Pimoroni for the complete and exhaustive documentation on their
# PiCade HAT <https://github.com/pimoroni/picade-hat>
#
# * Phil Hutchinson and Tariq Ahmad by Element14.com for providing the
# hardware
import pygatt
import time
import sys
import tty
import getopt
import ctypes
# Import specific BLE libraries. Pygatt should be installed in your
# Python environment.
from pygatt.backends import BLEBackend, Characteristic, BLEAddressType
# Initial command status
command = None
# Specify the Bluetooth address of the droid. Can be changed during
# the call to a command.
address = 'FD:F9:CA:74:DC:DA'
sendbytes = None
# If the flag is set the droid is set to sleep when the program exits
sleeponexit = False
# Commands dictionary in human redable form
commandmap = dict([
("laugh", [0x0A,0x18,0x00,0x1F,0x00,0x32,0x00,0x00,0x00,0x00,0x00]),
("yes", [0x0A,0x17,0x05,0x41,0x00,0x0F]),
("no", [0x0A,0x17,0x05,0x3F,0x00,0x10]),
("alarm", [0x0A,0x17,0x05,0x17,0x00,0x07]),
("angry", [0x0A,0x17,0x05,0x18,0x00,0x08]),
("annoyed", [0x0A,0x17,0x05,0x19,0x00,0x09]),
("ionblast", [0x0A,0x17,0x05,0x1A,0x00,0x0E]),
("sad", [0x0A,0x17,0x05,0x1C,0x00,0x11]),
("scared", [0x0A,0x17,0x05,0x1D,0x00,0x13]),
("chatty", [0x0A,0x17,0x05,0x17,0x00,0x0A]),
("confident", [0x0A,0x17,0x05,0x18,0x00,0x12]),
("excited", [0x0A,0x17,0x05,0x19,0x00,0x0C]),
("happy", [0x0A,0x17,0x05,0x1A,0x00,0x0D]),
("laugh2", [0x0A,0x17,0x05,0x1B,0x00,0x0F]),
("surprise", [0x0A,0x17,0x05,0x1C,0x00,0x18]),
("tripod", [0x0A,0x17,0x0D,0x1D,0x01]),
("bipod", [0x0A,0x17,0x0D,0x1C,0x02]),
("rot+", [0x8D,0x0A,0x17,0x0F,0x1C,0x42,0xB4,0x00,0x00,0xBD,0xD8]),
("rot0", [0x8D,0x0A,0x17,0x0F,0x1E,0x00,0x00,0x00,0x00,0xB1,0xD8])
])
# Generate the CRC 256 modulus sum of all the bytes bitwise inverted
def GenCrc(bytes):
ret = 0;
for b in bytes:
ret += b
ret = ret % 256
return ~ret % 256
# Create the data packet to send to the droid
def BuildPacket(bytes):
# 0x8D marks the start of a packet
ret = [0x8D]
for b in bytes:
ret.append(b)
# CRC is always the 2nd to last byte
ret.append(GenCrc(bytes))
# 0xD8 marks the end of a packet
ret.append(0xD8)
return ret
# Initialize the communication with the droid. If sleeping awake it
def r2d2_init():
# Initialize the BLE Gatt adapter and start the connection
# Note: no address type is specified.
adapter = pygatt.GATTToolBackend()
adapter.start()
device = adapter.connect(address = address, address_type = BLEAddressType.random)
# 'usetheforce...band' tells the droid we're a controller and prevents disconnection.
device.char_write_handle(0x15, [0x75,0x73,0x65,0x74,0x68,0x65,0x66,0x6F,0x72,0x63,0x65,0x2E,0x2E,0x2E,0x62,0x61,0x6E,0x64], True)
# wake from sleep? Droid is responsive and front led flashes blue/red
device.char_write_handle(0x1c, [0x8D,0x0A,0x13,0x0D,0x00,0xD5,0xD8], True)
# Turn on holoprojector led, 0xff (max) intensity
device.char_write_handle(0x1c, [0x8D,0x0A,0x1A,0x0E,0x1C,0x00,0x80,0xFF,0x32,0xD8], True)
return device, adapter
###############
# Main program
###############
def main():
sequences = []
pexit = False # Exit condition, never set to true. For future devel.
# Init the connection
r2d2, ble = r2d2_init()
# Start reading the pad
tty.setcbreak(sys.stdin)
# Control loop
while pexit == False:
# Get the scancode from the mapped pad
scancode = ord(sys.stdin.read(1))
# In case of wrong scancode, command is set to False
valid_command = True
# Create the commands sequence
if scancode == 65: # Up
sequences.append(commandmap["tripod"])
sequences.append(commandmap["yes"])
elif scancode == 66: # Down
sequences.append(commandmap["bipod"])
sequences.append(commandmap["yes"])
elif scancode == 67: # Rot left
sequences.append(commandmap["rot0"])
sequences.append(commandmap["no"])
elif scancode == 68: # Rot right
sequences.append(commandmap["rot+"])
sequences.append(commandmap["surprise"])
elif scancode == 122: # Button 3, 4
sequences.append(commandmap["laugh"])
sequences.append(commandmap["happy"])
sequences.append(commandmap["excited"])
elif scancode == 32: # Button 5
sequences.append(commandmap["surprise"])
sequences.append(commandmap["sad"])
sequences.append(commandmap["scared"])
elif scancode == 120: # Button 6
sequences.append(commandmap["confident"])
sequences.append(commandmap["laugh2"])
elif scancode == 105: # Front left
sequences.append(commandmap["angry"])
sequences.append(commandmap["alarm"])
elif scancode == 111: # Front right
sequences.append(commandmap["chatty"])
sequences.append(commandmap["ionblast"])
else:
valid_command = False
# Executes the command sequence
if valid_command == True:
for seq in sequences:
#device.char_write_handle(0x1c, commandmap[command], True)
r2d2.char_write_handle(0x1c, BuildPacket(seq), True)
# Empty the sequence list
del sequences[:]
ble.stop()
if __name__ == '__main__':
main()
| 31.232044 | 130 | 0.711834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,535 | 0.448434 |
76bbab12b9082806b48585d80d0ee5d93f4121db | 882 | py | Python | week9_ML_svm_poly_norm/day2_svm_poly/theory/visualize_boundary.py | Clapiniella/data_science_nov_2020 | 7f98e626328169e266b0ee980f457b8402999647 | [
"Apache-2.0"
] | 1 | 2021-01-18T13:15:08.000Z | 2021-01-18T13:15:08.000Z | week9_ML_svm_poly_norm/day2_svm_poly/theory/visualize_boundary.py | Clapiniella/data_science_nov_2020 | 7f98e626328169e266b0ee980f457b8402999647 | [
"Apache-2.0"
] | null | null | null | week9_ML_svm_poly_norm/day2_svm_poly/theory/visualize_boundary.py | Clapiniella/data_science_nov_2020 | 7f98e626328169e266b0ee980f457b8402999647 | [
"Apache-2.0"
] | 1 | 2021-05-24T21:49:24.000Z | 2021-05-24T21:49:24.000Z | import numpy as np
import matplotlib.pyplot as plt
from plot_data import plot_data
def visualize_boundary(X, y, clf):
"""
Plots a linear decision boundary learned by the SVM.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Samples, where n_samples is the number of samples and n_features is the number of features.
y : ndarray, shape (n_samples,)
Labels.
clf : sklearn.svm.classes.SVC
The trained SVM.
"""
plot_data(X, y)
x1_plot = np.linspace(np.min(X[:, 0]), np.max(X[:, 0]), 100)
x2_plot = np.linspace(np.min(X[:, 1]), np.max(X[:, 1]), 100)
X1, X2 = np.meshgrid(x1_plot, x2_plot)
vals = np.zeros(X1.shape)
for i in range(X1.shape[1]):
X_tmp = np.hstack((X1[:, i:i + 1], X2[:, i:i + 1]))
vals[:, i] = clf.predict(X_tmp)
plt.contour(X1, X2, vals, levels=[0])
| 29.4 | 99 | 0.603175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.404762 |
76bbc2598c25fd191128e41eb9c547440be4a5b9 | 86 | py | Python | src/integ_test_resources/common/platforms.py | kaichengyan/amplify-ci-support | 5a56acd7fa8fb37ec7db975e080be6ba838dcec7 | [
"Apache-2.0"
] | 9 | 2020-06-09T21:59:02.000Z | 2021-06-27T07:15:18.000Z | src/integ_test_resources/common/platforms.py | kaichengyan/amplify-ci-support | 5a56acd7fa8fb37ec7db975e080be6ba838dcec7 | [
"Apache-2.0"
] | 27 | 2020-05-06T13:48:06.000Z | 2022-02-14T10:10:33.000Z | src/integ_test_resources/common/platforms.py | kaichengyan/amplify-ci-support | 5a56acd7fa8fb37ec7db975e080be6ba838dcec7 | [
"Apache-2.0"
] | 12 | 2020-05-15T11:51:41.000Z | 2022-02-11T18:07:15.000Z | from enum import Enum
class Platform(Enum):
IOS = "ios"
ANDROID = "android"
| 12.285714 | 23 | 0.639535 | 61 | 0.709302 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.162791 |
76bd0ea831e87f7129cc79c1729569f2c699f4dd | 1,608 | py | Python | pattern7-tree-breadth-first-search/7. Level Order Successor (easy).py | dopiwoo/Grokking-the-Coding-Interview | 78b2bacf9d761b460ac78882bac42df7465feec9 | [
"MIT"
] | null | null | null | pattern7-tree-breadth-first-search/7. Level Order Successor (easy).py | dopiwoo/Grokking-the-Coding-Interview | 78b2bacf9d761b460ac78882bac42df7465feec9 | [
"MIT"
] | null | null | null | pattern7-tree-breadth-first-search/7. Level Order Successor (easy).py | dopiwoo/Grokking-the-Coding-Interview | 78b2bacf9d761b460ac78882bac42df7465feec9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 17:29:12 2021
@author: dopiwoo
Given a binary tree and a node, find the level order successor of the given node in the tree. The level order successor
is the node that appears right after the given node in the level order traversal.
"""
from collections import deque
class TreeNode:
def __init__(self, val: int = 0, left: 'TreeNode' = None, right: 'TreeNode' = None):
self.val = val
self.left = left
self.right = right
def __repr__(self):
return str(self.val)
def find_successor(root: TreeNode, key: int) -> TreeNode or None:
"""
Time Complexity: O(N)
Space Complexity: O(N)
Parameters
----------
root : TreeNode
Input binary tree.
key : int
Input node.
Returns
-------
TreeNode or None
The level order successor of the given node in the given tree.
"""
if not root:
return None
queue = deque([root])
while queue:
cur_node = queue.popleft()
if cur_node.left:
queue.append(cur_node.left)
if cur_node.right:
queue.append(cur_node.right)
if cur_node.val == key:
break
return queue[0] if queue else None
if __name__ == '__main__':
root_node = TreeNode(12)
root_node.left = TreeNode(7)
root_node.right = TreeNode(1)
root_node.left.left = TreeNode(9)
root_node.right.left = TreeNode(10)
root_node.right.right = TreeNode(5)
print(find_successor(root_node, 12))
print(find_successor(root_node, 9))
| 24.363636 | 119 | 0.623756 | 233 | 0.1449 | 0 | 0 | 0 | 0 | 0 | 0 | 633 | 0.393657 |
76be0a159433c52bf9bb4c8d2bfce86eebcbe457 | 427 | py | Python | UnitTesting/Columbus/views.py | FalseG0d/AdvancedDjango | 52715ffea132e591f98f94b781960fc12a8613e4 | [
"MIT"
] | 9 | 2020-10-17T14:03:35.000Z | 2022-01-12T17:51:14.000Z | UnitTesting/Columbus/views.py | FalseG0d/AdvancedDjango | 52715ffea132e591f98f94b781960fc12a8613e4 | [
"MIT"
] | null | null | null | UnitTesting/Columbus/views.py | FalseG0d/AdvancedDjango | 52715ffea132e591f98f94b781960fc12a8613e4 | [
"MIT"
] | 4 | 2020-10-20T06:52:26.000Z | 2022-01-07T23:51:59.000Z | from django.shortcuts import render
from .models import Name
from .forms import NameForm
# Create your views here.
def i_was_here(request):
form=NameForm()
if request.method=="POST":
form=NameForm(request.POST)
if form.is_valid():
form.save()
names=Name.objects.all()
context={
"names":names,
"form":form,
}
return render(request,"index.html",context) | 21.35 | 47 | 0.627635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.131148 |
76be67bce0cf697811965ca2143b5be3cb2e2a79 | 1,307 | py | Python | bruges/attribute/energy.py | sbachkheti/bruges | 10fa2524bf8f1b02df2e4e195dc44ead1ed97cbd | [
"Apache-2.0"
] | null | null | null | bruges/attribute/energy.py | sbachkheti/bruges | 10fa2524bf8f1b02df2e4e195dc44ead1ed97cbd | [
"Apache-2.0"
] | null | null | null | bruges/attribute/energy.py | sbachkheti/bruges | 10fa2524bf8f1b02df2e4e195dc44ead1ed97cbd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from scipy.signal import fftconvolve
def energy(traces, duration, dt=1):
"""
Compute an mean-squared energy measurement for each point of a
seismic section.
:param traces: The data array to use for calculating MS energy.
Must be 1D or 2D numpy array.
:param duration: the time duration of the window (in seconds), or
samples if dt=1.
:param dt: the sample interval of the data (in seconds). Defaults
to 1 so duration can be in samples.
:returns: An array the same dimensions as the input array.
"""
energy_data = np.zeros(traces.shape)
signal = traces * traces
n_samples = int(duration / dt)
window = np.ones(n_samples)
if np.ndim(signal) == 1:
# Compute the sliding average using a convolution
energy_data = fftconvolve(signal, window, mode='same') \
/ n_samples
elif np.ndim(signal) == 2:
for trace in range(signal.shape[1]):
energy_data[:, trace] = (fftconvolve(signal[:, trace],
window,
mode='same'))
else:
raise ValueError('Array must be 1D or 2D')
return energy_data
| 31.878049 | 69 | 0.575363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.472073 |
76be7613f5b089099bb687a960915778fa3c9364 | 5,288 | py | Python | examples/test_heat.py | nschloe/maelstrom | 16a86524d8995448b510109493b1f12f8b35c82f | [
"MIT"
] | 26 | 2016-07-21T16:57:30.000Z | 2022-03-22T17:44:52.000Z | examples/test_heat.py | nschloe/maelstrom | 16a86524d8995448b510109493b1f12f8b35c82f | [
"MIT"
] | 2 | 2017-11-18T01:11:58.000Z | 2021-12-03T07:32:37.000Z | examples/test_heat.py | nschloe/maelstrom | 16a86524d8995448b510109493b1f12f8b35c82f | [
"MIT"
] | 6 | 2016-01-28T21:22:26.000Z | 2021-06-28T11:48:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from __future__ import print_function
from dolfin import plot, dx, Constant, Measure, Function, project, XDMFFile
import matplotlib.pyplot as plt
import numpy
import problems
import maelstrom
import parabolic
def _parameter_quest():
"""Find parameter sets fitting crucible data.
"""
# Create the set of parameter values to search.
flux0 = [500.0 * k for k in range(11)]
flux1 = [500.0 * k for k in range(11)]
from itertools import product
search_space = product(flux0, flux1)
control_values = [
# bottom left
((0.0, 0.366), 1580.0),
# top left
((0.0, 0.411), 1495.0),
# top middle
((0.038, 0.411), 1511.0),
# top right
((0.076, 0.411), 1540.0),
]
tol = 20.0
first = True
best_match = None
best_match_norm = None
for p in search_space:
print(p)
# for boundary conditions
flux = {"upper": p[0], "crucible": p[1]}
theta = test_stationary_solve(flux)
# Check if temperature values match with crucible blueprint.
dev = numpy.array([theta(c[0]) - c[1] for c in control_values])
print("Deviations from control temperatures:")
print(dev)
dev_norm = numpy.linalg.norm(dev)
if not best_match or dev_norm < best_match_norm:
print("New best match! {} (||dev|| = {:e})".format(p, dev_norm))
best_match = p
best_match_norm = dev_norm
if all(abs(dev) < tol):
print("Success! {}".format(p))
print("Temperature at control points (with reference values):")
for c in control_values:
print(
"({:e}, {:e}): {:e} ({:e})".format(
c[0][0], c[0][1], theta(c[0]), c[1]
)
)
print()
if first:
theta_1 = theta
first = False
else:
theta_1.assign(theta)
plot(theta_1, rescale=True)
return
def test_stationary_solve(show=False):
problem = problems.Crucible()
boundaries = problem.wp_boundaries
average_temp = 1551.0
material = problem.subdomain_materials[problem.wpi]
rho = material.density(average_temp)
cp = material.specific_heat_capacity
kappa = material.thermal_conductivity
my_ds = Measure("ds")(subdomain_data=boundaries)
convection = None
heat = maelstrom.heat.Heat(
problem.Q,
kappa,
rho,
cp,
convection,
source=Constant(0.0),
dirichlet_bcs=problem.theta_bcs_d,
neumann_bcs=problem.theta_bcs_n,
robin_bcs=problem.theta_bcs_r,
my_dx=dx,
my_ds=my_ds,
)
theta_reference = heat.solve_stationary()
theta_reference.rename("theta", "temperature")
if show:
# with XDMFFile('temperature.xdmf') as f:
# f.parameters['flush_output'] = True
# f.parameters['rewrite_function_mesh'] = False
# f.write(theta_reference)
tri = plot(theta_reference)
plt.colorbar(tri)
plt.show()
assert abs(maelstrom.helpers.average(theta_reference) - 1551.0) < 1.0e-1
return theta_reference
def test_time_step():
problem = problems.Crucible()
boundaries = problem.wp_boundaries
# The melting point of GaAs is 1511 K.
average_temp = 1520.0
f = Constant(0.0)
material = problem.subdomain_materials[problem.wpi]
rho = material.density(average_temp)
cp = material.specific_heat_capacity
kappa = material.thermal_conductivity
my_ds = Measure("ds")(subdomain_data=boundaries)
# from dolfin import DirichletBC
convection = None
heat = maelstrom.heat.Heat(
problem.Q,
kappa,
rho,
cp,
convection,
source=Constant(0.0),
dirichlet_bcs=problem.theta_bcs_d,
neumann_bcs=problem.theta_bcs_n,
robin_bcs=problem.theta_bcs_r,
my_dx=dx,
my_ds=my_ds,
)
# create time stepper
# stepper = parabolic.ExplicitEuler(heat)
stepper = parabolic.ImplicitEuler(heat)
# stepper = parabolic.Trapezoidal(heat)
theta0 = project(Constant(average_temp), problem.Q)
# theta0 = heat.solve_stationary()
theta0.rename("theta0", "temperature")
theta1 = Function(problem.Q)
theta1 = Function(problem.Q)
t = 0.0
dt = 1.0e-3
end_time = 10 * dt
with XDMFFile("temperature.xdmf") as f:
f.parameters["flush_output"] = True
f.parameters["rewrite_function_mesh"] = False
f.write(theta0, t)
while t < end_time:
theta1.assign(stepper.step(theta0, t, dt))
theta0.assign(theta1)
t += dt
#
f.write(theta0, t)
assert abs(maelstrom.helpers.average(theta0) - 1519.81) < 1.0e-2
return
if __name__ == "__main__":
# # for boundary conditions
# heat_transfer_coefficient = {
# 'upper': 50.0,
# 'upper left': 300.0,
# 'crucible': 15.0
# }
# T = {'upper': 1480.0,
# 'upper left': 1500.0,
# 'crucible': 1660.0}
test_stationary_solve(show=True)
# test_time_step()
| 25.795122 | 76 | 0.587557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,195 | 0.225983 |
76c007829c9f8744254e7acb7d3b55012a5f340a | 1,823 | py | Python | news_collector/news_collector/spiders/laarena.py | mfalcon/chequeabot | 380ac0f19f27b29237c36205bdb94412eb4f7cd3 | [
"MIT"
] | 11 | 2019-02-05T06:59:21.000Z | 2021-02-04T10:00:28.000Z | news_collector/news_collector/spiders/laarena.py | mfalcon/chequeabot | 380ac0f19f27b29237c36205bdb94412eb4f7cd3 | [
"MIT"
] | 8 | 2021-03-18T21:37:54.000Z | 2022-03-11T23:36:04.000Z | news_collector/news_collector/spiders/laarena.py | chequeado/chequeabot | 682289952d6160aa6a6e70b002564e6b9c4be094 | [
"MIT"
] | 4 | 2019-11-18T21:48:35.000Z | 2020-11-04T13:39:39.000Z | import datetime
import newspaper
import scrapy
import locale
import datetime
locale.setlocale(locale.LC_ALL, "es_AR.utf8")
BASE_URL = 'http://www.laarena.com.ar/'
class LaArenaSpider(scrapy.Spider):
name = "laarena"
def start_requests(self):
urls = [
'http://www.laarena.com.ar/category/el_pais',
'http://www.laarena.com.ar/category/la_pampa'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse_seccion)
def parse_seccion(self, response):
seccion = response.url.split('/')[-1] #ejemplo: la_pampa
noticias = response.xpath('//a[contains(@href,"' + seccion +'-")]/@href').extract()
#la arena tiene urls de este estilo:
# http://www.laarena.com.ar/la_pampa-se-posterga-el-1-encuentro-de-artesanias-por-mal-tiempo-2018676-163.htm
#la seccion la_pampa esta por delante de la url especifica del articulo, por lo tanto
#hay que utilizar ese indicador para determinar la seccion de cada articulo
for noticia in noticias:
nota = response.urljoin(noticia)
yield scrapy.Request(url=nota, callback=self.parse_noticia)
def parse_noticia(self, response):
ff = newspaper.Article(response.url)
ff.download()
ff.parse()
texto = ff.text.replace('“','"').replace('″','"').replace('”','"')
titulo = ff.title
noticia_url = ff.url
noticia_fecha = ff.publish_date if ff.publish_date else datetime.datetime.now()
data = {
'titulo': titulo,
'fecha': noticia_fecha,
'noticia_texto': texto,
'noticia_url': noticia_url,
'source': 'La Arena',
'formato': 'web'
}
yield data
| 29.403226 | 116 | 0.60011 | 1,645 | 0.899399 | 1,561 | 0.853472 | 0 | 0 | 0 | 0 | 596 | 0.325861 |
76c1957fa3e62fb50e20aef92a66bad4d263db88 | 5,343 | py | Python | Server/server.py | hackerghost93/Encrypted_FTP | 6d4b79fa0f3b9989a5c340c2859b53c4f7bc0970 | [
"MIT"
] | null | null | null | Server/server.py | hackerghost93/Encrypted_FTP | 6d4b79fa0f3b9989a5c340c2859b53c4f7bc0970 | [
"MIT"
] | null | null | null | Server/server.py | hackerghost93/Encrypted_FTP | 6d4b79fa0f3b9989a5c340c2859b53c4f7bc0970 | [
"MIT"
] | null | null | null | from multiprocessing import Process
from Crypto.Cipher import AES
import os
import sys
import threading
import socket
import platform
printing_lock = threading.Lock()
obj = AES.new('This is a key123', AES.MODE_CBC, 'This is an IV456')
counterTcp = 0
counterUdp = 0
def udp_handler(client_socket, address, number, data, filename='ahmed.txt'): # this will take an address and send the file to the client
print('udp client is starting ' ,address)
if os.path.isfile(filename):
print('file is valid ');
file_size = os.path.getsize(filename)
#socket.sendto(str(file_size).zfill(25), address) # sending size
else:
print("'file couldn't be found thread is terminating'")
return -1
with open(filename, 'rb') as f:
while True:
bytes_to_send = f.read(1024)
if bytes_to_send != b'':
if len(bytes_to_send) % 16 != 0:
bytes_to_send += bytes('\0'*(16-len(bytes_to_send)%16),'utf-8')
print(bytes_to_send)
ciphertext = obj.encrypt(bytes_to_send)
print(ciphertext)
else:
ciphertext = obj.encrypt(bytes_to_send)
else:
ciphertext = b''
client_socket.sendto(ciphertext, address)
if bytes_to_send == b'':
print('file completed of thread', number)
break
print('thread number', number, 'is terminating')
def tcp_handler(client_socket, address, number, filename='ahmed.txt'):
print('tcp client', number, 'is starting with address', str(address))
if os.path.isfile(filename):
# print('file is valid ');
file_size = str(os.path.getsize(filename))
print('file size is',file_size)
#assert isinstance(file_size, object)
client_socket.send(bytes(file_size,'utf-8')) # sending size
else:
print("'file couldn't be found thread is terminating'")
client_socket.close()
return -1
print('sending file')
with open(filename, 'rb') as f:
while True:
bytes_to_send = f.read(1024)
if bytes_to_send != b'':
if len(bytes_to_send) % 16 != 0:
bytes_to_send += bytes('\0' * (16 - len(bytes_to_send) % 16), 'utf-8')
print(bytes_to_send)
ciphertext = obj.encrypt(bytes_to_send)
print(ciphertext)
else:
ciphertext = obj.encrypt(bytes_to_send)
else:
ciphertext = b''
client_socket.send(ciphertext)
if bytes_to_send == b'': # terminating condition
print('file completed of thread', number)
break
client_socket.close()
print('thread number', number, 'is terminating')
def udp_server(name):
print(name)
threads = []
host = '127.0.0.1'
port = 5671
connections = 0
printing_lock.acquire()
print(name,' process has just started')
printing_lock.release()
listener_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket creation
listener_socket.bind((host, port)) # add to socket the address and port
#listener_socket.setblocking(0)
FILE_CONSTANT = 'family.jpeg'
print('udp process is waiting for any datagram')
agenda = []
while True:
data, address = listener_socket.recvfrom(1024)
print('data received from address', str(address))
if address not in agenda:
agenda.append(address)
try:
connections += 1
t1 = threading.Thread(target=udp_handler, args=(listener_socket, address, connections, data))
t1.start()
threads.append(t1)
except:
print('cannot be done')
listener_socket.close()
def tcp_server(name):
#print(name)
threads = []
connections = 0
host = '127.0.0.1'
port = 5572
printing_lock.acquire()
print(name, ' process has just started')
printing_lock.release()
listener_socket = socket.socket()
listener_socket.bind((host, port))
listener_socket.listen(1000)
print('tcp process is waiting to accept any connection')
while True:
try:
connection_socket, address = listener_socket.accept()
connections += 1
t1 = threading.Thread(target=tcp_handler, args=(connection_socket, address, connections))
t1.start()
t1.join()
except:
break
listener_socket.close()
def main():
print('current platform',platform.platform())
print('Main process has just started')
p1 = Process(target=udp_server, args=('the udp ftp server shadow',))
p2 = Process(target=tcp_server, args=('the tcp ftp server ghost' ,))
p1.start()
p2.start()
#p1.join()
#p2.join()
while True:
order = input("Enter quit to terminate -> ")
if order == 'quit':
p1.terminate()
p2.terminate()
break
print('program is terminating.. goodbye')
if __name__ == '__main__': # is running as a script
main() # run main function
| 34.921569 | 138 | 0.579637 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,217 | 0.227775 |
76c4af815aa877a1d595acd54dc152c3b1eb6533 | 511 | py | Python | placement/settings.py | vipulgupta2048/getmejob | 348427b03e0c3d57661cba7a53d9738199497bd6 | [
"MIT"
] | 1 | 2019-08-14T09:11:48.000Z | 2019-08-14T09:11:48.000Z | placement/settings.py | vipulgupta2048/getmejob | 348427b03e0c3d57661cba7a53d9738199497bd6 | [
"MIT"
] | 3 | 2021-03-31T19:36:43.000Z | 2021-12-13T20:35:37.000Z | placement/settings.py | vipulgupta2048/getmejob | 348427b03e0c3d57661cba7a53d9738199497bd6 | [
"MIT"
] | 1 | 2019-08-14T09:11:52.000Z | 2019-08-14T09:11:52.000Z | BOT_NAME = "placement"
SPIDER_MODULES = ["placement.spiders"]
NEWSPIDER_MODULE = "placement.spiders"
ROBOTSTXT_OBEY = True
CONCURRENT_REQUESTS = 16
DUPEFILTER_DEBUG = True
EXTENSIONS = {"spidermon.contrib.scrapy.extensions.Spidermon": 500}
SPIDERMON_ENABLED = True
ITEM_PIPELINES = {"spidermon.contrib.scrapy.pipelines.ItemValidationPipeline": 800}
SPIDERMON_VALIDATION_CERBERUS = ["/home/vipulgupta2048/placement/placement/schema.json"]
USER_AGENT = "Vipul Gupta - placement (vipulgupta2048@gmail.com)"
| 26.894737 | 88 | 0.800391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.510763 |
76c5586c324a6c8cddcb69f882bddedf4ac01b9d | 628 | py | Python | api/src/mail.py | jsangmeister/openslides.com-1 | 73a5e2a36d5c969964ba0dd94c70dba83de99034 | [
"MIT"
] | null | null | null | api/src/mail.py | jsangmeister/openslides.com-1 | 73a5e2a36d5c969964ba0dd94c70dba83de99034 | [
"MIT"
] | 21 | 2019-08-07T15:11:17.000Z | 2022-01-31T09:57:07.000Z | api/src/mail.py | jsangmeister/openslides.com-1 | 73a5e2a36d5c969964ba0dd94c70dba83de99034 | [
"MIT"
] | 9 | 2019-07-10T14:11:48.000Z | 2022-02-08T11:40:59.000Z | import smtplib
from flask_babel import gettext as _
from flask_mail import Mail
from .app import app
from .errors import ViewError
mail = Mail(app)
def try_send_mail(msg):
try:
mail.send(msg)
except smtplib.SMTPServerDisconnected:
raise ViewError(_("Der Server ist nicht korrekt konfiguriert"))
except smtplib.SMTPRecipientsRefused as e:
messages = [
"{}: {} {}".format(r, errno, msg.decode())
for r, (errno, msg) in e.recipients.items()
]
raise ViewError(
_("Konnte E-Mail nicht versenden an:") + " " + ", ".join(messages)
)
| 24.153846 | 78 | 0.619427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.152866 |
76c5d6ceadd233d09e6617cd9a2eac6fd428af2a | 183 | py | Python | src/python/starpattern.py | DHANUSHXENO/a-patterns | 701ce22fdb1ac54b71943167edb97db89b7f311b | [
"MIT"
] | null | null | null | src/python/starpattern.py | DHANUSHXENO/a-patterns | 701ce22fdb1ac54b71943167edb97db89b7f311b | [
"MIT"
] | null | null | null | src/python/starpattern.py | DHANUSHXENO/a-patterns | 701ce22fdb1ac54b71943167edb97db89b7f311b | [
"MIT"
] | null | null | null | def star_pattern(n):
for i in range(n):
for j in range(i+1):
print("*",end=" ")
print()
star_pattern(5)
'''
star_pattern(5)
*
* *
* * *
* * * *
* * * * *
'''
| 10.764706 | 24 | 0.437158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.360656 |
76c632d8dcd5648691e6cdef8c48b528a22c1ea2 | 447 | py | Python | setup.py | sdimitro/savedump-workflows | 2f5b34f006d84c09918b2ade98c20902d411ed3f | [
"Apache-2.0"
] | 1 | 2021-03-27T14:10:55.000Z | 2021-03-27T14:10:55.000Z | setup.py | sdimitro/savedump-workflows | 2f5b34f006d84c09918b2ade98c20902d411ed3f | [
"Apache-2.0"
] | 9 | 2020-07-17T16:21:15.000Z | 2020-09-01T15:50:10.000Z | setup.py | sdimitro/savedump-workflows | 2f5b34f006d84c09918b2ade98c20902d411ed3f | [
"Apache-2.0"
] | 2 | 2020-08-03T17:53:56.000Z | 2020-08-12T21:49:12.000Z | #!/usr/bin/env python3
from setuptools import setup
setup(
name='savedump',
version="0.1.0",
packages=[
"savedump",
],
entry_points={
'console_scripts': ['savedump=savedump.savedump:main'],
},
author='Delphix Platform Team',
author_email='serapheim@delphix.com',
description='Archive linux crash dumps and cores',
license='Apache-2.0',
url='https://github.com/sdimitro/savedump',
)
| 19.434783 | 63 | 0.635347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.519016 |
76c6d2932401f4203f7334417d8df963804344f7 | 899 | py | Python | ex081.py | LucasBalbinoSS/Exercicios-Python-Mundo3 | 11799e9529ce4d9f20285b53206083310a076203 | [
"MIT"
] | null | null | null | ex081.py | LucasBalbinoSS/Exercicios-Python-Mundo3 | 11799e9529ce4d9f20285b53206083310a076203 | [
"MIT"
] | null | null | null | ex081.py | LucasBalbinoSS/Exercicios-Python-Mundo3 | 11799e9529ce4d9f20285b53206083310a076203 | [
"MIT"
] | null | null | null | listaNum = list()
contadorde5 = 0
while True:
num = int(input('Digite um número: '))
if num == 5:
contadorde5 += 1
listaNum.append(num)
continuar = str(input('Quer continuar? [ S / N ] ')).strip().upper()
print()
if continuar[0] == 'N':
break
while continuar[0] != 'S' and continuar[0] != 'N':
continuar = str(input('Quer continuar? [ S / N ] ')).strip().upper()
print()
print('=-' * 35)
print(f'Sua lista ficou assim: {listaNum}')
print('=-' * 35)
if len(listaNum) == 1:
print('Você digitou apenas 1 número...')
else:
print(f'Foram digitados {len(listaNum)} números!')
print(f'A lista de forma descrescente se torna {sorted(listaNum, reverse=True)}')
print('=-' * 35)
if 5 in listaNum:
print(f'O valor 5 está sim na lista!\nEncontrei {contadorde5} deles!')
else:
print('Não encontrei nenhum número 5 na lista...')
| 24.972222 | 81 | 0.604004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 396 | 0.437086 |
76c72ad5a13bd88794ba69132692e818b1f546b9 | 8,432 | py | Python | SHLDataset/data_fusion3.py | jenhuluck/deep-learning-in-ADL | c6f70ed2c845253698f8bf8dcdd47a8e74b30fd2 | [
"MIT"
] | 3 | 2020-10-29T09:37:47.000Z | 2022-03-24T13:03:28.000Z | SHLDataset/data_fusion3.py | jenhuluck/Deep-Learning-in-Human-Activity-Recognition- | c6f70ed2c845253698f8bf8dcdd47a8e74b30fd2 | [
"MIT"
] | 1 | 2020-10-13T03:12:28.000Z | 2020-10-13T03:12:28.000Z | SHLDataset/data_fusion3.py | jenhuluck/deep-learning-in-ADL | c6f70ed2c845253698f8bf8dcdd47a8e74b30fd2 | [
"MIT"
] | 1 | 2021-02-10T14:00:04.000Z | 2021-02-10T14:00:04.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 21 21:38:36 2020
@author: Jieyun Hu
"""
#using deep learning on data fusion of motion and video data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
from sklearn import metrics
import h5py
import matplotlib.pyplot as plt
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, SimpleRNN, GRU, LSTM, GlobalMaxPooling1D,GlobalMaxPooling2D,MaxPooling2D,BatchNormalization, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, SGD
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils.vis_utils import plot_model
class models():
#def __init__(self):
def read_h5(self, path_array):
split_array = []
l = len(path_array)
for i, path in enumerate(path_array):
f = h5py.File(path, 'r')
X = f.get('inputs')
y = f.get('labels')
X = np.array(X)
y = np.array(y)
split_array.append(X) # add X to array for split
if i == l - 1:
split_array.append(y) # add y to the last
self.split = train_test_split(*split_array,test_size=0.2, random_state = 1)
'''
print(len(split))
print(split[0].shape) # data1_train_x
print(split[1].shape) # data1_test_x
print(split[2].shape) # data2_train_x
print(split[3].shape) # data2_test_x
print(split[4].shape) # y_train
print(split[5].shape) # y_test
'''
return self.split
# K is the number of classes
def create_motion_cnn(self, input_shape, K):
i = Input(shape = input_shape)
x = Conv2D(16, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(i)
x = BatchNormalization()(x)
#x = MaxPooling2D((2,2))(x)
x = Dropout(0.2)(x)
#x = Conv2D(32, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
#x = BatchNormalization()(x)
#x = MaxPooling2D((2,2))(x)
#x = Dropout(0.2)(x)
#x = Conv2D(256, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
#x = BatchNormalization()(x)
#x = MaxPooling2D((2,2))(x)
#x = Conv2D(128, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
#x = BatchNormalization()(x)
x = Flatten()(x)
x = Dropout(0.2)(x)
x = Dense(128,activation = 'relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(K,activation = 'relu')(x)
model = Model(i, x)
return model
def create_img_cnn(self, input_shape, K):
i = Input(shape = input_shape)
x = Conv2D(32, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(i)
x = BatchNormalization()(x)
x = MaxPooling2D((2,2))(x)
x = Dropout(0.2)(x)
x = Conv2D(64, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2,2))(x)
x = Dropout(0.4)(x)
x = Conv2D(128, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
x = BatchNormalization()(x)
#x = MaxPooling2D((2,2))(x)
x = Dropout(0.5)(x)
#x = Conv2D(128, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
#x = BatchNormalization()(x)
x = Flatten()(x)
#x = Dropout(0.2)(x)
x = Dense(256,activation = 'relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(K,activation = 'relu')(x)
model = Model(i, x)
return model
# merge n cnn models
def merge_models(self,n):
motion_input_shape = np.expand_dims(self.split[0], -1)[0].shape
K = len(set(self.split[-2]))
print(motion_input_shape)
cnns = [] # save all cnn models
for i in range(n-1):
cnn_i = self.create_motion_cnn(motion_input_shape,K)
cnns.append(cnn_i)
img_input_shape = np.expand_dims(self.split[-4], -1)[0].shape # last data should be image data
print(img_input_shape)
img_cnn = self.create_img_cnn(img_input_shape, K)
cnns.append(img_cnn)
#cnn1 = self.create_cnn(input_shape, K)
#cnn2 = self.create_cnn(input_shape, K)
#combinedInput = concatenate([cnn1.output, cnn2.output])
combinedInput = concatenate([c.output for c in cnns])
x = Dense(K,activation='softmax')(combinedInput)
self.mix_model = Model(inputs = [c.input for c in cnns], outputs = x)
#model = Model(inputs = [cnn1.input, cnn2.input], outputs = x)
self.mix_model.compile(optimizer = Adam(lr=0.0005),loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
#self.r = self.mix_model.fit(x = [np.expand_dims(self.split[0],-1),self.split[]])
self.r = self.mix_model.fit(x = [np.expand_dims(self.split[i],-1) for i in range(2*n) if i % 2 == 0],
y = self.split[-2], validation_data = ([np.expand_dims(self.split[i],-1) for i in range(2*n) if i % 2 != 0],self.split[-1]),
epochs = 50, batch_size = 256 )
print(self.mix_model.summary())
return self.r
#r = model.fit(x = [np.expand_dims(self.split[0],-1),np.expand_dims(self.split[2],-1)], y = self.split[4], validation_data = ([np.expand_dims(self.split[1],-1),np.expand_dims(self.split[3],-1)],self.split[5]), epochs = 50, batch_size = 32 )
def draw(self):
f1 = plt.figure(1)
plt.title('Loss')
plt.plot(self.r.history['loss'], label = 'loss')
plt.plot(self.r.history['val_loss'], label = 'val_loss')
plt.legend()
f1.show()
f2 = plt.figure(2)
plt.plot(self.r.history['acc'], label = 'accuracy')
plt.plot(self.r.history['val_acc'], label = 'val_accuracy')
plt.legend()
f2.show()
# summary, confusion matrix and heatmap
def con_matrix(self,n):
K = len(set(self.split[-2]))
self.y_pred = self.mix_model.predict([np.expand_dims(self.split[i],-1) for i in range(2*n) if i % 2 != 0]).argmax(axis=1)
cm = confusion_matrix(self.split[-1],self.y_pred)
self.plot_confusion_matrix(cm,list(range(K)))
def plot_confusion_matrix(self, cm, classes, normalize = False, title='Confusion matrix', cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:,np.newaxis]
print("Normalized confusion matrix")
else:
print("Confusion matrix, without normalization")
print(cm)
f3 = plt.figure(3)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max()/2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment = "center",
color = "white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('predicted label')
f3.show()
if __name__ == "__main__":
model_name = "cnn" # can be cnn/dnn/rnn
paths = ["./bag.h5","./image_for_fusion.h5"] # a motion data fuses with video data
#paths = ["./bag.h5", "./hand.h5", "./hip.h5","./torso.h5", "./image_for_fusion.h5"]
mix = models()
print("read h5 file....")
data_array = mix.read_h5(paths)
mix.merge_models(len(paths))
mix.draw()
mix.con_matrix(len(paths))
| 41.742574 | 248 | 0.591675 | 7,119 | 0.844284 | 0 | 0 | 0 | 0 | 0 | 0 | 2,492 | 0.295541 |
76c82645a89d088d85f5022f67a7ad6314a2c8d3 | 245 | py | Python | api-test-code/credentials.py | srijansingh53/hatErase | eab4873efee4803a0c641777bce5bd63fb4fcc2f | [
"MIT"
] | 1 | 2020-06-26T07:24:03.000Z | 2020-06-26T07:24:03.000Z | api-test-code/credentials.py | srijansingh53/hatErase | eab4873efee4803a0c641777bce5bd63fb4fcc2f | [
"MIT"
] | null | null | null | api-test-code/credentials.py | srijansingh53/hatErase | eab4873efee4803a0c641777bce5bd63fb4fcc2f | [
"MIT"
] | null | null | null | consumer_key = 'F1BCRW0AXUlr0wjLE8L6Znm8a'
consumer_secret = 'HqRCnviPD8TmI05TSL47CA9eL6niFYtRu35CIP6J2F0fhjL9zz'
access_token = '1163025465423982592-WrNzWHSUZykiwRmYbexhkUa5BGudYZ'
access_secret = 'bZ0mWxWgFMClId9OoauDLQgT9IlwojmFS65OuGsy0E9QQ' | 61.25 | 70 | 0.902041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.726531 |