max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
scrappers/plugins/cbsnews/__init__.py | nathanIL/openews | 2 | 12766651 | import scrappers
import re
import scrappers.mixins
class CBSNews(scrappers.mixins.RSSScrapper, scrappers.Scrapper):
"""The CBS News RSS feeds scrapper.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def should_translate(self):
return False
def encoding(self):
return 'UTF-8'
def skipping_rules(self, title):
"""
:param title: The scraped title
:return: True if we want to skip, otherwise False.
"""
skip_regexs = [re.compile(r'^Photos\s+of\s+the\s+week', re.IGNORECASE)]
return any([r.match(title) for r in skip_regexs])
def resource_urls(self):
return [{'category': 'Top Stories', 'url': 'http://www.cbsnews.com/latest/rss/main'},
{'category': 'US', 'url': 'http://www.cbsnews.com/latest/rss/us'},
{'category': 'Sci-Tech', 'url': 'http://www.cbsnews.com/latest/rss/tech'},
{'category': 'World', 'url': 'http://www.cbsnews.com/latest/rss/world'},
{'category': 'Politics', 'url': 'http://www.cbsnews.com/latest/rss/politics'}]
| 2.890625 | 3 |
examples/network/processingClient.py | dhaase-de/dh-python-dh | 0 | 12766652 | #!/usr/bin/python3
import time
import dh.data
import dh.image
import dh.network
import dh.utils
###
#%% main
###
def main():
C = dh.network.ImageProcessingClient2("localhost")
# input
I = dh.data.lena()
params = {"gamma": 0.5}
print("Input:")
dh.image.pinfo(I)
# result
t0 = time.time()
(J, info) = C.process(I, params)
t1 = time.time()
# show result
print("Output:")
dh.image.pinfo(J)
print("Info:")
print(info)
print("Received result after {} ms".format(dh.utils.around((t1 - t0) * 1000.0)))
dh.image.show(dh.image.stack([I, J]), wait=0, closeWindow=True)
if __name__ == "__main__":
main()
| 2.453125 | 2 |
examples/inspector.py | yaoanderson/loadimpact-sdk-python | 1 | 12766653 | #!/usr/bin/env python
# coding=utf-8
"""
Copyright 2013 Load Impact
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import optparse
import sys
import traceback
from loadimpact import (
ApiTokenClient, ApiError, DataStore, LoadZone, TestConfig, UserScenario,
__version__ as li_sdk_version)
def get_or_list(client, cls, resource_id=None):
if resource_id:
return [cls.get(client, resource_id)]
else:
return cls.list(client)
def inspect_resource(api_token, resource_name, resource_id=None, debug=False):
client = ApiTokenClient(api_token)
resources = []
if resource_name in ['ds', 'datastore', 'data-store', 'data_store']:
resources = get_or_list(client, DataStore, resource_id)
elif resource_name in ['lz', 'loadzone', 'load-zone', 'load_zone']:
resources = get_or_list(client, LoadZone, None)
elif resource_name in ['tc', 'testconfig', 'test-config', 'test_config']:
resources = get_or_list(client, TestConfig, resource_id)
elif resource_name in ['us', 'userscenario', 'user-scenario',
'user_scenario']:
resources = get_or_list(client, UserScenario, resource_id)
else:
raise RuntimeError("Unknown resource: %s" % resource_name)
for resource in resources:
print(repr(resource))
if __name__ == "__main__":
p = optparse.OptionParser(version=('%%prog %s' % li_sdk_version))
p.add_option('--api-token', action='store',
dest='api_token', default=None,
help=("Your Load Impact API token."))
p.add_option('--debug', action='store_true', dest='debug', default=False,
help=("."))
opts, args = p.parse_args()
if 1 > len(args):
print("You need to specify at least 1 argument (to list): "
"resource_name")
print("Specify 2 arguments (to get specific resource): resource_name, "
"resource_id")
sys.exit(2)
resource_name = args[0]
resource_id = None
if 1 < len(args):
resource_id = int(args[1])
try:
inspect_resource(opts.api_token, resource_name, resource_id=resource_id,
debug=opts.debug)
except ApiError:
print("Error encountered: %s" % traceback.format_exc())
| 1.90625 | 2 |
basicmonitor/triggers/__init__.py | TorbenFricke/basicmonitor | 0 | 12766654 | <reponame>TorbenFricke/basicmonitor<filename>basicmonitor/triggers/__init__.py
from basicmonitor.triggers.manager import TriggerManager
from basicmonitor.triggers.trigger import Trigger | 1.171875 | 1 |
code/ppe/experiment/code/report.py | alexandergg/MLOps-YoloV3-Azure | 6 | 12766655 | <gh_stars>1-10
import glob
import json
import os
import shutil
import operator
import sys
import argparse
import cv2
import matplotlib.pyplot as plt
from scripts.compress import zipFolder, unzipFolder
from azureml.core import Model, Run, Datastore
class mAP():
def __init__(self):
self._parser = argparse.ArgumentParser("mAP")
self._parser.add_argument("--release_id", type=str, help="The ID of the release triggering this pipeline run")
self._parser.add_argument("--model_name", type=str, help="Name of the tf model")
self._parser.add_argument("--ckpt_path", type=str, help="Chekpoint path", default="checkpoint/yolov3.ckpt")
self._parser.add_argument("--datastore", type=str, help="Name of the datastore", default="epis_datastore")
self._parser.add_argument("--storage_container", type=str, help="Name of the storage container", default="ppe")
self._parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
self._parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
self._parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
self._parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
self._parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
self._args = self._parser.parse_args()
self._run = Run.get_context()
self._exp = self._run.experiment
self._ws = self._run.experiment.workspace
self._datastore = Datastore.get(self._ws, datastore_name=self._args.datastore)
self._MINOVERLAP = 0.5
def main(self):
if self._args.ignore is None:
self._args.ignore = []
specific_iou_flagged = False
if self._args.set_class_iou is not None:
specific_iou_flagged = True
img_path = 'images'
if os.path.exists(img_path):
for dirpath, dirnames, files in os.walk(img_path):
if not files:
args.no_animation = True
else:
self._args.no_animation = True
show_animation = False
if not self._args.no_animation:
try:
show_animation = True
except ImportError:
print("\"opencv-python\" not found, please install to visualize the results.")
self._args.no_animation = True
draw_plot = False
if not self._args.no_plot:
try:
draw_plot = True
except ImportError:
print("\"matplotlib\" not found, please install it to get the resulting plots.")
args.no_plot = True
all_runs = self._exp.get_runs(properties={"release_id": self._args.release_id, "run_type": "eval"},
include_children=True)
eval_run = next(all_runs)
print(f'New Run found with Run ID of: {eval_run.id}')
eval_run.download_file(name="grtruth.zip", output_file_path=".")
unzipFolder('grtruth.zip')
eval_run.download_file(name="predicts.zip", output_file_path=".")
unzipFolder('predicts.zip')
tmp_files_path, results_files_path = self.__create_tmp_paths(draw_plot, show_animation)
gt_classes, gt_counter_per_class, n_classes, ground_truth_files_list = self.__ground_truth(tmp_files_path)
predicted_files_list = self.__predicted(gt_classes, tmp_files_path)
count_true_positives, ap_dictionary, mAP = self.__calculatemAP(draw_plot, show_animation, img_path,
gt_classes, n_classes, gt_counter_per_class,
results_files_path, gt_classes, tmp_files_path,
specific_iou_flagged)
shutil.rmtree(tmp_files_path)
pred_classes, pred_counter_per_class = self.__countTotalPredictions(predicted_files_list)
self.__plotTotalNumberOccurenceGroundTruth(draw_plot, ground_truth_files_list, n_classes,
results_files_path, gt_counter_per_class)
self.__writeGroundTruthObjects(results_files_path, gt_counter_per_class)
self.__countingTruePositives(pred_classes, gt_classes, count_true_positives)
self.__writePredictedObjects(results_files_path, pred_classes, pred_counter_per_class, count_true_positives)
self.__plotTotalNumberOcurrencesPredicted(draw_plot, predicted_files_list, pred_counter_per_class, results_files_path, count_true_positives)
self.__plotmAP(draw_plot, results_files_path, ap_dictionary, n_classes, mAP)
eval_run.download_file(name="model.zip", output_file_path=".")
unzipFolder('model.zip')
self._run.upload_file(name='saved_model.pb', path_or_stream="models/saved_model.pb")
self._run.register_model(
model_name = self._args.model_name,
model_path = self._args.model_name,
properties = {"release_id": self._args.release_id},
tags= {"mAP": f"{mAP*100:.2f}%"}
)
print("Registered model!")
zipFolder("report.zip", "mAP/results")
self._run.upload_file(name='report.zip', path_or_stream="report.zip")
print(f"Uploaded the report to experiment {self._run.experiment.name}")
print("Following files are uploaded")
print(self._run.get_file_names())
self._run.add_properties({"release_id": self._args.release_id, "run_type": "report"})
print(f"added properties: {self._run.properties}")
self._run.complete()
def __create_tmp_paths(self, draw_plot, show_animation):
tmp_files_path = "mAP/tmp_files"
if not os.path.exists(tmp_files_path):
os.makedirs(tmp_files_path)
results_files_path = "mAP/results"
if os.path.exists(results_files_path):
shutil.rmtree(results_files_path)
os.makedirs(results_files_path)
if draw_plot:
os.makedirs(results_files_path + "/classes")
if show_animation:
os.makedirs(results_files_path + "/images")
os.makedirs(results_files_path + "/images/single_predictions")
return tmp_files_path, results_files_path
def __ground_truth(self, tmp_files_path):
ground_truth_files_list = glob.glob('mAP/ground-truth/*.txt')
if len(ground_truth_files_list) == 0:
self.__error("Error: No ground-truth files found!")
ground_truth_files_list.sort()
gt_counter_per_class = {}
for txt_file in ground_truth_files_list:
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
if not os.path.exists('mAP/predicted/' + file_id + ".txt"):
error_msg = "Error. File not found: predicted/" + file_id + ".txt\n"
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-pred.py)"
self.__error(error_msg)
lines_list = self.__file_lines_to_list(txt_file)
bounding_boxes = []
is_difficult = False
for line in lines_list:
try:
if "difficult" in line:
class_name, left, top, right, bottom, _difficult = line.split()
is_difficult = True
else:
class_name, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <left> <top> <right> <bottom> ['difficult']\n"
error_msg += " Received: " + line
error_msg += "\n\nIf you have a <class_name> with spaces between words you should remove them\n"
error_msg += "by running the script \"remove_space.py\" or \"rename_class.py\" in the \"extra/\" folder."
self.__error(error_msg)
if class_name in self._args.ignore:
continue
bbox = left + " " + top + " " + right + " " +bottom
if is_difficult:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False, "difficult":True})
is_difficult = False
else:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
gt_counter_per_class[class_name] = 1
with open(tmp_files_path + "/" + file_id + "_ground_truth.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
return gt_classes, gt_counter_per_class, n_classes, ground_truth_files_list
def __check_format_flag(self, specific_iou_flagged, gt_classes):
if specific_iou_flagged:
n_args = len(self._args.set_class_iou)
error_msg = \
'\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]'
if n_args % 2 != 0:
self.__error('Error, missing arguments. Flag usage:' + error_msg)
specific_iou_classes = self._args.set_class_iou[::2]
iou_list = self._args.set_class_iou[1::2]
if len(specific_iou_classes) != len(iou_list):
self.__error('Error, missing arguments. Flag usage:' + error_msg)
for tmp_class in specific_iou_classes:
if tmp_class not in gt_classes:
self.error('Error, unknown class \"' + tmp_class + '\". Flag usage:' + error_msg)
for num in iou_list:
if not self.__is_float_between_0_and_1(num):
self.__error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg)
return specific_iou_classes, iou_list
def __predicted(self, gt_classes, tmp_files_path):
predicted_files_list = glob.glob('mAP/predicted/*.txt')
predicted_files_list.sort()
for class_index, class_name in enumerate(gt_classes):
bounding_boxes = []
for txt_file in predicted_files_list:
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
if class_index == 0:
if not os.path.exists('mAP/ground-truth/' + file_id + ".txt"):
error_msg = "Error. File not found: ground-truth/" + file_id + ".txt\n"
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-pred.py)"
self.__error(error_msg)
lines = self.__file_lines_to_list(txt_file)
for line in lines:
try:
tmp_class_name, confidence, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <confidence> <left> <top> <right> <bottom>\n"
error_msg += " Received: " + line
self.__error(error_msg)
if tmp_class_name == class_name:
bbox = left + " " + top + " " + right + " " +bottom
bounding_boxes.append({"confidence":confidence, "file_id":file_id, "bbox":bbox})
bounding_boxes.sort(key=lambda x:float(x['confidence']), reverse=True)
with open(tmp_files_path + "/" + class_name + "_predictions.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
return predicted_files_list
def __calculatemAP(self, draw_plot, show_animation, img_path, specific_iou_classes, n_classes, gt_counter_per_class, results_files_path, gt_classes, tmp_files_path, specific_iou_flagged):
sum_AP = 0.0
ap_dictionary = {}
with open(results_files_path + "/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
predictions_file = tmp_files_path + "/" + class_name + "_predictions.json"
predictions_data = json.load(open(predictions_file))
nd = len(predictions_data)
tp = [0] * nd
fp = [0] * nd
for idx, prediction in enumerate(predictions_data):
file_id = prediction["file_id"]
if show_animation:
ground_truth_img = glob.glob1(img_path, file_id + ".*")
if len(ground_truth_img) == 0:
self.__error("Error. Image not found with id: " + file_id)
elif len(ground_truth_img) > 1:
self.__error("Error. Multiple image with id: " + file_id)
else:
img = cv2.imread(img_path + "/" + ground_truth_img[0])
img_cumulative_path = results_files_path + "/images/" + ground_truth_img[0]
if os.path.isfile(img_cumulative_path):
img_cumulative = cv2.imread(img_cumulative_path)
else:
img_cumulative = img.copy()
bottom_border = 60
BLACK = [0, 0, 0]
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
gt_file = tmp_files_path + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
bb = [ float(x) for x in prediction["bbox"].split() ]
for obj in ground_truth_data:
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ]
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
if show_animation:
status = "NO MATCH FOUND!"
min_overlap = self._MINOVERLAP
if ovmax >= min_overlap:
if "difficult" not in gt_match:
if not bool(gt_match["used"]):
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
if show_animation:
status = "MATCH!"
else:
fp[idx] = 1
if show_animation:
status = "REPEATED MATCH!"
else:
fp[idx] = 1
if ovmax > 0:
status = "INSUFFICIENT OVERLAP"
if show_animation:
height, widht = img.shape[:2]
white = (255,255,255)
light_blue = (255,200,100)
green = (0,255,0)
light_red = (30,30,255)
margin = 10
v_pos = int(height - margin - (bottom_border / 2))
text = "Image: " + ground_truth_img[0] + " "
img, line_width = self.__draw_text_in_image(img, text, (margin, v_pos), white, 0)
text = "Class [" + str(class_index) + "/" + str(n_classes) + "]: " + class_name + " "
img, line_width = self.__draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue, line_width)
if ovmax != -1:
color = light_red
if status == "INSUFFICIENT OVERLAP":
text = "IoU: {0:.2f}% ".format(ovmax*100) + "< {0:.2f}% ".format(min_overlap*100)
else:
text = "IoU: {0:.2f}% ".format(ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100)
color = green
img, _ = self.__draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
v_pos += int(bottom_border / 2)
rank_pos = str(idx+1)
text = "Prediction #rank: " + rank_pos + " confidence: {0:.2f}% ".format(float(prediction["confidence"])*100)
img, line_width = self.__draw_text_in_image(img, text, (margin, v_pos), white, 0)
color = light_red
if status == "MATCH!":
color = green
text = "Result: " + status + " "
img, line_width = self.__draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
font = cv2.FONT_HERSHEY_SIMPLEX
if ovmax > 0:
bbgt = [ int(x) for x in gt_match["bbox"].split() ]
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.rectangle(img_cumulative,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.putText(img_cumulative, class_name, (bbgt[0],bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA)
bb = [int(i) for i in bb]
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.rectangle(img_cumulative,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.putText(img_cumulative, class_name, (bb[0],bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA)
cv2.imshow("Animation", img)
cv2.waitKey(20)
output_img_path = results_files_path + "/images/single_predictions/" + class_name + "_prediction" + str(idx) + ".jpg"
cv2.imwrite(output_img_path, img)
cv2.imwrite(img_cumulative_path, img_cumulative)
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
ap, mrec, mprec = self.__voc_ap(rec, prec)
sum_AP += ap
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP "
rounded_prec = [ '%.2f' % elem for elem in prec ]
rounded_rec = [ '%.2f' % elem for elem in rec ]
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
if not self._args.quiet:
print(text)
ap_dictionary[class_name] = ap
if draw_plot:
plt.plot(rec, prec, '-o')
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
fig = plt.gcf()
fig.canvas.set_window_title('AP ' + class_name)
plt.title('class: ' + text)
plt.xlabel('Recall')
plt.ylabel('Precision')
axes = plt.gca()
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05])
fig.savefig(results_files_path + "/classes/" + class_name + ".png")
plt.cla()
if show_animation:
cv2.destroyAllWindows()
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP*100)
results_file.write(text + "\n")
print(text)
return count_true_positives, ap_dictionary, mAP
def __countTotalPredictions(self, predicted_files_list):
pred_counter_per_class = {}
for txt_file in predicted_files_list:
lines_list = self.__file_lines_to_list(txt_file)
for line in lines_list:
class_name = line.split()[0]
if class_name in self._args.ignore:
continue
if class_name in pred_counter_per_class:
pred_counter_per_class[class_name] += 1
else:
pred_counter_per_class[class_name] = 1
pred_classes = list(pred_counter_per_class.keys())
return pred_classes, pred_counter_per_class
def __plotTotalNumberOccurenceGroundTruth(self, draw_plot, ground_truth_files_list, n_classes, results_files_path, gt_counter_per_class):
if draw_plot:
window_title = "Ground-Truth Info"
plot_title = "Ground-Truth\n"
plot_title += "(" + str(len(ground_truth_files_list)) + " files and " + str(n_classes) + " classes)"
x_label = "Number of objects per class"
output_path = results_files_path + "/Ground-Truth Info.png"
to_show = False
plot_color = 'forestgreen'
self.__draw_plot_func(
gt_counter_per_class,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
'',
)
def __writeGroundTruthObjects(self, results_files_path, gt_counter_per_class):
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(gt_counter_per_class):
results_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n")
def __countingTruePositives(self, pred_classes, gt_classes, count_true_positives):
for class_name in pred_classes:
if class_name not in gt_classes:
count_true_positives[class_name] = 0
def __writePredictedObjects(self, results_files_path, pred_classes, pred_counter_per_class, count_true_positives):
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of predicted objects per class\n")
for class_name in sorted(pred_classes):
n_pred = pred_counter_per_class[class_name]
text = class_name + ": " + str(n_pred)
text += " (tp:" + str(count_true_positives[class_name]) + ""
text += ", fp:" + str(n_pred - count_true_positives[class_name]) + ")\n"
results_file.write(text)
def __plotTotalNumberOcurrencesPredicted(self, draw_plot, predicted_files_list, pred_counter_per_class, results_files_path, count_true_positives):
if draw_plot:
window_title = "Predicted Objects Info"
plot_title = "Predicted Objects\n"
plot_title += "(" + str(len(predicted_files_list)) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(pred_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
x_label = "Number of objects per class"
output_path = results_files_path + "/Predicted Objects Info.png"
to_show = False
plot_color = 'forestgreen'
true_p_bar = count_true_positives
self.__draw_plot_func(
pred_counter_per_class,
len(pred_counter_per_class),
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
true_p_bar
)
def __plotmAP(self, draw_plot, results_files_path, ap_dictionary, n_classes, mAP):
if draw_plot:
window_title = "mAP"
plot_title = "mAP = {0:.2f}%".format(mAP*100)
x_label = "Average Precision"
output_path = results_files_path + "/mAP.png"
to_show = False
plot_color = 'royalblue'
self.__draw_plot_func(
ap_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
def __error(self, msg):
print(msg)
sys.exit(0)
def __is_float_between_0_and_1(self, value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
def __voc_ap(self, rec, prec):
rec.insert(0, 0.0)
rec.append(1.0)
mrec = rec[:]
prec.insert(0, 0.0)
prec.append(0.0)
mpre = prec[:]
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i)
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
def __file_lines_to_list(self, path):
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def __draw_text_in_image(self, img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
def __adjust_axes(self, r, t, fig, axes):
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
def __draw_plot_func(self, dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
if true_p_bar != "":
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Predictions')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Predictions', left=fp_sorted)
plt.legend(loc='lower right')
fig = plt.gcf()
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1):
self.__adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
fig = plt.gcf()
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val)
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
if i == (len(sorted_values)-1):
self.__adjust_axes(r, t, fig, axes)
fig.canvas.set_window_title(window_title)
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
init_height = fig.get_figheight()
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4)
height_in = height_pt / dpi
top_margin = 0.15
bottom_margin = 0.05
figure_height = height_in / (1 - top_margin - bottom_margin)
if figure_height > init_height:
fig.set_figheight(figure_height)
plt.title(plot_title, fontsize=14)
plt.xlabel(x_label, fontsize='large')
fig.tight_layout()
fig.savefig(output_path)
if to_show:
plt.show()
plt.close()
if __name__ == '__main__':
mAP = mAP()
mAP.main() | 2.140625 | 2 |
biencoder/useb/useb/setup.py | dumpmemory/sgpt | 91 | 12766656 | <gh_stars>10-100
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
readme = fh.read()
setup(
name="useb",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="Heterogenous, Task- and Domain-Specific Benchmark for Unsupervised Sentence Embeddings used in the TSDAE paper.",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/kwang2049/useb",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=[
'sentence-transformers>=1.2.0',
'pytrec_eval'
],
) | 1.34375 | 1 |
lcc_web/web/interface/lcc_views/jobs.py | mavrix93/LightCurvesClassifier | 12 | 12766657 | import glob
import json
import os
from wsgiref.util import FileWrapper
import shutil
import pandas as pd
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponse
from django.shortcuts import render
from interface.models import DbQuery
from interface.models import StarsFilter
@login_required(login_url='login/')
def all_filters(request):
stars_filters_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters")
header = ["Job id", "Status", "Start date",
"Finish date", "Descriptors", "Deciders", "Link"]
dat = []
for star_filt in StarsFilter.objects.filter(user=request.user):
row = [star_filt.id,
star_filt.status,
str(star_filt.start_date),
str(star_filt.finish_date),
star_filt.descriptors.replace(";", "<br>"),
star_filt.deciders,
str(star_filt.id)]
dat.append(row)
table = pd.DataFrame(
dat, columns=["fold_name", "status", "start", "stop", "descr", "decid", "job_id"])
table["start"] = pd.to_datetime(table["start"])
table.sort_values(by="start", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Star filter jobs",
"header": header,
"stars_filter": True,
"delete_prefix" : '"../{}/delete/"'.format(os.environ.get(
"DOCKYARD_APP_CONTEXT"), ""),
"table": zip(table.values.tolist(), job_ids)})
@login_required(login_url='login/')
def _all_filters(request):
stars_filters_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters")
header = ["Job id", "Status", "Date", "Descriptors", "Deciders", "Link"]
dat = []
for folder_name in os.listdir(stars_filters_path):
try:
with open(os.path.join(stars_filters_path, folder_name, "status.json"), 'r') as status_file:
status = json.load(status_file)
row = [folder_name,
status.get("status", ""),
status.get("start", ""),
status.get("descriptors", ""),
status.get("deciders", ""),
str(folder_name)]
dat.append(row)
except:
pass
table = pd.DataFrame(
dat, columns=["fold_name", "status", "start", "descr", "decid", "job_id"])
table["start"] = pd.to_datetime(table["start"])
table.sort_values(by="start", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Star filter jobs",
"header": header,
"stars_filter": True,
"table": zip(table.values.tolist(), job_ids)})
@login_required(login_url='login/')
def all_results(request):
queries_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "query_results")
header = ["Job id", "Status", "Started",
"Finished", "Queries", "Connectors", "Link"]
dat = []
for query in DbQuery.objects.filter(user=request.user):
row = [query.id,
query.status,
str(query.start_date),
str(query.finish_date),
str(query.queries),
query.connectors,
str(query.id)]
dat.append(row)
table = pd.DataFrame(
dat, columns=["fold_name", "status", "started", "finished", "queries", "conn", "job_id"])
table["started"] = pd.to_datetime(table["started"])
table.sort_values(by="started", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Queries jobs",
"stars_filter": False,
"header": header,
"delete_prefix": '"../{}/delete/"'.format(os.environ.get(
"DOCKYARD_APP_CONTEXT")),
"table": zip(table.values.tolist(), job_ids)})
def download_file(request, file_name):
"""
Send a file through Django without loading the whole file into
memory at once. The FileWrapper will turn the file object into an
iterator for chunks of 8KB.
"""
if file_name.startswith("estim"):
file_type = "estim"
file_name = file_name[9:]
filename = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters", file_name, "estimator")
elif not file_name.startswith("filt"):
file_type = "query"
filename = os.path.join(
settings.MEDIA_ROOT, str(request.user.id), "query_results", file_name + ".zip")
else:
file_type = "filter"
file_name = file_name[4:]
pa = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters", file_name)
filter_names = glob.glob(pa + "/*.filter")
if filter_names:
filter_name = os.path.basename(filter_names[0])
filename = os.path.join(
settings.MEDIA_ROOT, str(request.user.id), "stars_filters", file_name, filter_name)
else:
return render(request, 'interface/error_page.html', {"error_m": "There is no filter in %s" % file_name})
wrapper = FileWrapper(open(filename, 'rb'))
response = HttpResponse(wrapper, content_type='text/plain')
response['Content-Length'] = os.path.getsize(filename)
if file_type == "filter":
response[
'Content-Disposition'] = 'attachment; filename="%s.filter"' % filter_name
elif file_type == "estim":
response[
'Content-Disposition'] = 'attachment; filename="estimator"'
else:
response[
'Content-Disposition'] = 'attachment; filename="results_%s.zip"' % file_name
return response
| 2.125 | 2 |
configs/__init__.py | kaylode/KAI | 4 | 12766658 | <gh_stars>1-10
from .configs import Config
def get_config(name):
if name == 'KAI':
return Config('./configs/kai.yaml') | 1.742188 | 2 |
backend/home/management/commands/generate_project_report.py | crowdbotics-apps/test-31818 | 2 | 12766659 | <reponame>crowdbotics-apps/test-31818
import json
import subprocess
import django
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Generate a json with all Models and URLs of the project."
def handle(self, *args, **options):
models = django.apps.apps.get_models(
include_auto_created=True, include_swapped=True
)
cmd = "python3 manage.py show_urls --format=json"
loc = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
print(
json.dumps(
{
"models": [
str(model).split(".")[-1].replace("'", "").strip(">")
for model in models
],
"urls": json.loads(loc.stdout.decode().strip()),
}
)
)
| 2.359375 | 2 |
backend/dataset/validate/validations/audio_metadata_check.py | agupta54/ulca | 3 | 12766660 | from models.abstract_handler import BaseValidator
from configs.configs import dataset_type_asr, dataset_type_asr_unlabeled, dataset_type_tts, asr_minimum_words_per_min
import logging
from logging.config import dictConfig
log = logging.getLogger('file')
import audio_metadata
from word2number import w2n
from datetime import timedelta
import os
class AudioMetadataCheck(BaseValidator):
"""
Verifies the metadata for the audio file and
adds the durationInSeconds field. Also verifies
the correlation between the length of text and duration of audio clip
"""
def execute(self, request):
log.info('----Executing the audio file metadata check----')
try:
if request["datasetType"] in [dataset_type_asr, dataset_type_asr_unlabeled, dataset_type_tts]:
audio_file = request['record']['fileLocation']
try:
if os.path.exists(audio_file) and os.path.isfile(audio_file):
file_size = os.path.getsize(audio_file)
else:
log.info('The audio file does not exist in file store')
return {"message": "Exception while executing Audio metadata check", "code": "SERVER_PROCESSING_ERROR", "status": "FAILED"}
except Exception as e:
log.exception(f"Exception while accessing file from file store: {str(e)}")
return {"message": "Exception while executing Audio metadata check", "code": "SERVER_PROCESSING_ERROR", "status": "FAILED"}
if file_size == 0:
return {"message": "The audio file is unplayable, the filesize is 0 bytes", "code": "ZERO_BYTES_FILE", "status": "FAILED"}
try:
if os.path.exists(audio_file) and os.path.isfile(audio_file):
metadata = audio_metadata.load(audio_file)
else:
log.info('The audio file does not exist in file store')
return {"message": "Exception while executing Audio metadata check", "code": "SERVER_PROCESSING_ERROR", "status": "FAILED"}
except Exception as e:
log.exception(f"Exception while loading the audio file: {str(e)}")
return {"message": "Unable to load the audio file, file format is unsupported or the file is corrupt", "code": "INVALID_AUDIO_FILE", "status": "FAILED"}
if 'duration' in request['record'].keys():
request['record']['durationInSeconds'] = request['record']['duration']
elif 'startTime' in request['record'].keys() and 'endTime' in request['record'].keys():
h, m, s = request['record']['startTime'].split(':')
start_t = timedelta(hours=int(h), minutes=int(m), seconds=float(s))
h, m, s = request['record']['endTime'].split(':')
end_t = timedelta(hours=int(h), minutes=int(m), seconds=float(s))
request['record']['durationInSeconds'] = (end_t-start_t).total_seconds()
else:
request['record']['durationInSeconds'] = metadata.streaminfo.duration
if 'samplingRate' in request['record'].keys() and request['record']['samplingRate'] != None:
if metadata.streaminfo.sample_rate != request['record']['samplingRate']*1000:
error_message = 'Sampling rate does not match the specified value: Expected Value - ' + str(metadata.streaminfo.sample_rate/1000) + ', Specified Value - ' + str(request['record']['samplingRate'])
return {"message": error_message, "code": "INCORRECT_SAMPLING_RATE", "status": "FAILED"}
if 'bitsPerSample' in request['record'].keys() and request['record']['bitsPerSample'] != None:
if metadata.streaminfo.bit_depth != w2n.word_to_num(request['record']['bitsPerSample']):
error_message = 'Bits per sample does not match the specified value: Expected Value - ' + str(metadata.streaminfo.bit_depth) + ', Specified Value - ' + str(request['record']['bitsPerSample'])
return {"message": error_message, "code": "INCORRECT_BITS_PER_SAMPLE", "status": "FAILED"}
if request["datasetType"] in [dataset_type_asr, dataset_type_tts]:
num_words = len(list(request['record']['text'].split()))
words_per_minute = (num_words/request['record']['durationInSeconds'])*60
if words_per_minute < asr_minimum_words_per_min:
return {"message": "Number of words too less for the audio duration", "code": "AUDIO_TEXT_INVALID_CORRELATION", "status": "FAILED"}
log.info('----Audio metadata check -> Passed----')
return super().execute(request)
except Exception as e:
log.exception(f"Exception while executing Audio metadata check: {str(e)}")
return {"message": "Exception while executing Audio metadata check", "code": "SERVER_PROCESSING_ERROR", "status": "FAILED"}
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
}) | 2.40625 | 2 |
BitTornado/Storage/FileSelector.py | crossbrowsertesting/BitTornado | 1 | 12766661 | <filename>BitTornado/Storage/FileSelector.py<gh_stars>1-10
import random
class FileSelector:
def __init__(self, files, piece_length, bufferdir,
storage, storagewrapper, sched, failfunc):
self.files = files
self.storage = storage
self.storagewrapper = storagewrapper
self.sched = sched
self.failfunc = failfunc
self.downloader = None
self.picker = None
storage.set_bufferdir(bufferdir)
self.numfiles = len(files)
self.priority = [1] * self.numfiles
self.new_priority = None
self.new_partials = None
self.filepieces = []
total = 0
for _, length in files:
if not length:
self.filepieces.append(())
else:
pieces = range(int(total / piece_length),
int((total + length - 1) / piece_length) + 1)
self.filepieces.append(tuple(pieces))
total += length
self.numpieces = int((total + piece_length - 1) / piece_length)
self.piece_priority = [1] * self.numpieces
def init_priority(self, new_priority):
try:
assert len(new_priority) == self.numfiles
for v in new_priority:
assert isinstance(v, int)
assert v >= -1
assert v <= 2
except AssertionError:
return False
try:
files_updated = False
for f in range(self.numfiles):
if new_priority[f] < 0:
self.storage.disable_file(f)
files_updated = True
if files_updated:
self.storage.reset_file_status()
self.new_priority = new_priority
except (IOError, OSError) as e:
self.failfunc("can't open partial file for "
+ self.files[f][0] + ': ' + str(e))
return False
return True
'''
d['priority'] = [file #1 priority [,file #2 priority...] ]
a list of download priorities for each file.
Priority may be -1, 0, 1, 2. -1 = download disabled,
0 = highest, 1 = normal, 2 = lowest.
Also see Storage.pickle and StorageWrapper.pickle for additional keys.
'''
def unpickle(self, d):
if 'priority' in d and not self.init_priority(d['priority']):
return
pieces = self.storage.unpickle(d)
if not pieces: # don't bother, nothing restoreable
return
new_piece_priority = self._get_piece_priority_list(self.new_priority)
self.storagewrapper.reblock([i == -1 for i in new_piece_priority])
self.new_partials = self.storagewrapper.unpickle(d, pieces)
def tie_in(self, picker, cancelfunc, requestmorefunc, rerequestfunc):
self.picker = picker
self.cancelfunc = cancelfunc
self.requestmorefunc = requestmorefunc
self.rerequestfunc = rerequestfunc
if self.new_priority:
self.priority = self.new_priority
self.new_priority = None
self.new_piece_priority = self._set_piece_priority(self.priority)
if self.new_partials:
random.shuffle(self.new_partials)
for p in self.new_partials:
self.picker.requested(p)
self.new_partials = None
def _set_files_disabled(self, old_priority, new_priority):
old_disabled = [p == -1 for p in old_priority]
new_disabled = [p == -1 for p in new_priority]
data_to_update = []
for f in range(self.numfiles):
if new_disabled[f] != old_disabled[f]:
data_to_update.extend(self.storage.get_piece_update_list(f))
buffer = []
for piece, start, length in data_to_update:
if self.storagewrapper.has_data(piece):
data = self.storagewrapper.read_raw(piece, start, length)
if data is None:
return False
buffer.append((piece, start, data))
files_updated = False
try:
for f in range(self.numfiles):
if new_disabled[f] and not old_disabled[f]:
self.storage.disable_file(f)
files_updated = True
if old_disabled[f] and not new_disabled[f]:
self.storage.enable_file(f)
files_updated = True
except (IOError, OSError) as e:
if new_disabled[f]:
msg = "can't open partial file for "
else:
msg = 'unable to open '
self.failfunc(msg + self.files[f][0] + ': ' + str(e))
return False
if files_updated:
self.storage.reset_file_status()
changed_pieces = set()
for piece, start, data in buffer:
if not self.storagewrapper.write_raw(piece, start, data):
return False
data.release()
changed_pieces.add(piece)
if not self.storagewrapper.doublecheck_data(changed_pieces):
return False
return True
def _get_piece_priority_list(self, file_priority_list):
l = [-1] * self.numpieces
for f in range(self.numfiles):
if file_priority_list[f] == -1:
continue
for i in self.filepieces[f]:
if l[i] == -1:
l[i] = file_priority_list[f]
continue
l[i] = min(l[i], file_priority_list[f])
return l
def _set_piece_priority(self, new_priority):
was_complete = self.storagewrapper.am_I_complete()
new_piece_priority = self._get_piece_priority_list(new_priority)
pieces = list(range(self.numpieces))
random.shuffle(pieces)
new_blocked = []
new_unblocked = []
for piece in pieces:
self.picker.set_priority(piece, new_piece_priority[piece])
o = self.piece_priority[piece] == -1
n = new_piece_priority[piece] == -1
if n and not o:
new_blocked.append(piece)
if o and not n:
new_unblocked.append(piece)
if new_blocked:
self.cancelfunc(new_blocked)
self.storagewrapper.reblock([i == -1 for i in new_piece_priority])
if new_unblocked:
self.requestmorefunc(new_unblocked)
if was_complete and not self.storagewrapper.am_I_complete():
self.rerequestfunc()
return new_piece_priority
def set_priorities_now(self, new_priority=None):
if not new_priority:
new_priority = self.new_priority
self.new_priority = None # potential race condition
if not new_priority:
return
old_priority = self.priority
self.priority = new_priority
if not self._set_files_disabled(old_priority, new_priority):
return
self.piece_priority = self._set_piece_priority(new_priority)
def set_priorities(self, new_priority):
self.new_priority = new_priority
self.sched(self.set_priorities_now)
def set_priority(self, f, p):
new_priority = self.get_priorities()
new_priority[f] = p
self.set_priorities(new_priority)
def get_priorities(self):
priority = self.new_priority
if not priority:
priority = self.priority # potential race condition
return [i for i in priority]
def __setitem__(self, index, val):
self.set_priority(index, val)
def __getitem__(self, index):
try:
return self.new_priority[index]
except TypeError:
return self.priority[index]
def finish(self):
for f in range(self.numfiles):
if self.priority[f] == -1:
self.storage.delete_file(f)
def pickle(self):
d = {'priority': self.priority}
try:
s = self.storage.pickle()
sw = self.storagewrapper.pickle()
d.update(s)
d.update(sw)
except (IOError, OSError):
pass
return d
| 2.609375 | 3 |
app.py | spencerpomme/notebook | 0 | 12766662 | import os
import sys
import click
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask import redirect, url_for, abort, render_template, flash
from flask_wtf import FlaskForm
from wtforms import SubmitField, TextAreaField
from wtforms.validators import DataRequired
# SQLite URI compatible
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', prefix + os.path.join(app.root_path, 'data.db'))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
@app.route('/')
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
app.run()
| 2.4375 | 2 |
explosig_data/utils.py | lrgr/explosig-data | 1 | 12766663 | import pandas as pd
import logging
import os
import snakemake as snakemake_api
import tempfile
import yaml
from .constants import *
from .i_o import get_logger, get_df_drop_message
# Helper functions
def convert_with_map(row, index, convert_map):
try:
return convert_map[row[index]]
except KeyError:
return NAN_VAL
def clean_ssm_df(df):
"""Perform the final stage of standardization of a simple somatic mutation dataframe.
Parameters
----------
df : `pd.DataFrame`
A simple somatic mutation dataframe that contains all of the expected columns.
Returns
-------
`pd.DataFrame`
The dataframe with typed columns, sorted rows, and filtered rows (filtered if NaN/invalid chromosome, NaN start pos, or NaN end pos).
"""
# Drop mutations with NaN chromosome
filtered_df = df.dropna(subset=[COLNAME.CHR.value])
logging.debug(get_df_drop_message(COLNAME.CHR.value, "NaN value", df, filtered_df))
df = filtered_df
# Drop mutations with NaN start position
filtered_df = df.dropna(subset=[COLNAME.POS_START.value])
logging.debug(get_df_drop_message(COLNAME.POS_START.value, "NaN value", df, filtered_df))
df = filtered_df
# Drop mutations with NaN end position
filtered_df = df.dropna(subset=[COLNAME.POS_END.value])
logging.debug(get_df_drop_message(COLNAME.POS_END.value, "NaN value", df, filtered_df))
df = filtered_df
# Drop mutations with invalid chromosome
filtered_df = df.loc[df[COLNAME.CHR.value].isin(CHROMOSOMES)]
logging.debug(get_df_drop_message(COLNAME.CHR.value, "invalid value", df, filtered_df))
df = filtered_df
# Ensure correct types before sorting
df[COLNAME.CHR.value] = df[COLNAME.CHR.value].apply(str) # make sure everything is a string
df[COLNAME.POS_START.value] = df[COLNAME.POS_START.value].astype(int)
df[COLNAME.POS_END.value] = df[COLNAME.POS_END.value].astype(int)
# Sort the mutations by sample and then genomic location
df[COLNAME.CHR.value] = pd.Categorical(df[COLNAME.CHR.value], CHROMOSOMES, ordered=True)
df = df.sort_values([COLNAME.PATIENT.value, COLNAME.SAMPLE.value, COLNAME.CHR.value, COLNAME.POS_START.value])
# Restrict to the standard set of columns
return df[SSM_COLUMNS]
def run_snakemake_with_config(snakefile_path, config):
# Since snakemake() function can only handle "flat" dicts using the direct config= parameter,
# need to write the config dict to a temporary file and instead pass in to configfile=
try:
f = tempfile.NamedTemporaryFile(mode='w', delete=False)
yaml.dump(config, f, default_flow_style=False)
snakemake_api.snakemake(snakefile=snakefile_path, configfiles=[f.name])
f.close()
finally:
os.unlink(f.name)
| 2.578125 | 3 |
lintcode/NineChapters/05/longest-common-subsequence.py | shootsoft/practice | 0 | 12766664 | <reponame>shootsoft/practice
__author__ = 'yinjun'
#@see http://www.jiuzhang.com/solutions/longest-common-subsequence/
class Solution:
"""
@param A, B: Two strings.
@return: The length of longest common subsequence of A and B.
"""
def longestCommonSubsequence(self, A, B):
# write your code here
x = len(A)
y = len(B)
dp = [[0 for j in range(y+1)] for i in range(x+1)]
for i in range(1, x+1):
for j in range(1, y+1):
if A[i-1] == B[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[x][y] | 3.3125 | 3 |
src/segmentation code/Tester.py | Zoopare/Optic-Disk-Cup-Segmentation | 38 | 12766665 | <filename>src/segmentation code/Tester.py
import torch
from torch.nn import functional as F
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from torchvision import transforms
from PIL import Image
import os
import cv2
import numpy as np
from model import FCDenseNet57
root_path = '/media/brats/Varghese/REFUGE_2018/data/Testing/Disc_Cup_Images'
test_imgs = os.listdir(root_path)
modelC_ckpt = '/media/brats/Varghese/REFUGE_2018/segmentation_Codes/modelsclaheWC11/model-m-best_loss.pth.tar'
modelWC_ckpt = '/media/brats/Varghese/REFUGE_2018/segmentation_Codes/models/model-m-25062018-184326-tramisu_2D_FC57_without_coordinate_loss = 3.8611210505167644_acc = 0.9975007267321571_best_acc.pth.tar'
modelWC = FCDenseNet57(3, 5)
modelC= FCDenseNet57(3, 11)
modelWC.load_state_dict(torch.load(modelWC_ckpt)['state_dict'])
modelC.load_state_dict(torch.load(modelC_ckpt)['state_dict'])
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transformList = []
transformList.append(transforms.ToTensor())
transformList.append(normalize)
transformSequence=transforms.Compose(transformList)
def apply_coordinates(image):
# image: h, h, c
assert image.size[0] == image.size[1]
x, y = image.size
x = np.arange(x)/image.size[0]
y = np.arange(y)/image.size[1]
Xmat, Ymat = np.meshgrid(x, y)
return Xmat, Ymat
def clahe_single(ori_img,clipLimit,tileGridSize):
ori_img = np.uint8(ori_img)
# ori_img = Image.open(pth)
# bgr = cv2.imread(pth)
lab = cv2.cvtColor(ori_img, cv2.COLOR_RGB2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit,tileGridSize)
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
rgb = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)
return rgb
def clahe_all(ori_img):
rgb_1 = clahe_single(ori_img, 2.0 , (8,8))
rgb_2 = clahe_single(ori_img, 2.0, (300,300))
return Image.fromarray(rgb_1), Image.fromarray(rgb_2)
# compute dice
def background(self,data):
return data==0
def opticdisk(self,data):
return data==1
def opticcup(self,data):
return data==2
def get_dice_score(self,prediction,ground_truth):
masks=(self.background, self.opticdisk, self.opticcup)
pred=torch.exp(prediction)
p=np.uint8(np.argmax(pred.data.cpu().numpy(), axis=1))
gt=np.uint8(ground_truth.data.cpu().numpy())
b, od, oc=[2*np.sum(func(p)*func(gt)) / (np.sum(func(p)) + np.sum(func(gt))+1e-3) for func in masks]
return b, od, oc
plt.ion()
for img in test_imgs:
imagen = Image.open(os.path.join(root_path,img)).convert('RGB').resize((512,512))
rgb1, rgb2 = clahe_all(np.array(imagen))
image = transformSequence(imagen)
rgb1= transformSequence(rgb1)
rgb2= transformSequence(rgb2)
image3 = torch.cat([image, rgb1, rgb2], 0)
Xmat, Ymat = apply_coordinates(imagen)
Xmat = torch.FloatTensor(Xmat).unsqueeze(0)
Ymat = torch.FloatTensor(Ymat).unsqueeze(0)
imageC = torch.cat([image3, Xmat, Ymat], 0) # comment for normal model
imageWC = torch.cat([image, Xmat, Ymat], 0)
print (imageC.size())
predC = modelC(imageC.unsqueeze(0))
_, predC = torch.max(predC, 1)
predC = predC.squeeze(0).detach().cpu().numpy()
predWC = modelWC(imageWC.unsqueeze(0))
_, predWC=torch.max(predWC, 1)
predWC = predWC.squeeze(0).detach().cpu().numpy()
plt.subplot(1,3,1)
plt.imshow(imagen)
plt.subplot(1,3,2)
plt.imshow(predC)
plt.xlabel("with 11 channels")
plt.subplot(1,3,3)
plt.imshow(predWC)
plt.xlabel("with 5 channels")
plt.pause(0.5)
plt.show()
| 2.09375 | 2 |
abbrev-amsrefs.py | vanabel/ams-abbrevs | 0 | 12766666 | #!/bin/python3
# -*- coding: utf-8 -*-
import pandas
data = pandas.read_csv('./annser.csv',
header=0,
na_values=[''],
usecols=[0, 1, 3, 4])
abbrevs = ""
for index, rows in data.iterrows():
abbrev = "".join(
[s[:1] for s in (str(rows[1])).split(' ') if s[:1].isupper()]).lower()
if(str(rows[3]) == 'nan'):
rows[3] = "?" + abbrev
abbrevs += "".join([
"\\DefineJournal{",
abbrev,
"}{",
str(rows[3]),
"}\n{",
str(rows[0]),
"}\n{",
str(rows[1]),
"}\n"
])
file = open("annser-abbrev.tex", "w")
file.write(abbrevs)
file.close()
| 2.90625 | 3 |
utility/plot.py | xlnwel/grl | 5 | 12766667 | <gh_stars>1-10
import os, sys, glob
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
def plot_data(data, x, y, outdir, tag, title, timing=None):
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
if timing:
data = data[data.Timing == timing].drop('Timing', axis=1)
if not os.path.isdir(outdir):
os.mkdir(outdir)
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
sns.set(style="whitegrid", font_scale=1.5)
sns.set_palette('Set2') # or husl
if 'Timing' in data.columns:
sns.lineplot(x=x, y=y, ax=ax, data=data, hue=tag, style='Timing')
else:
sns.lineplot(x=x, y=y, ax=ax, data=data, hue=tag)
ax.grid(True, alpha=0.8, linestyle=':')
ax.legend(loc='best').set_draggable(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if timing:
title = f'{title}-{timing}'
outpath = f'{outdir}/{title}.png'
ax.set_title(title)
fig.savefig(outpath)
print(f'Plot Path: {outpath}')
def get_datasets(filedir, tag, condition=None):
unit = 0
datasets = []
for root, _, files in os.walk(filedir):
for f in files:
if f.endswith('log.txt'):
log_path = os.path.join(root, f)
data = pd.read_csv(log_path, sep='\t')
data.insert(len(data.columns), tag, condition)
datasets.append(data)
unit +=1
return datasets
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('logdir', nargs='*')
parser.add_argument('--title', '-t', default='', type=str)
parser.add_argument('--legend', nargs='*')
parser.add_argument('--legendtag', '-tag', default='Algo')
parser.add_argument('--x', '-x', default='env_step', nargs='*')
parser.add_argument('--y', '-y', default='score', nargs='*')
parser.add_argument('--timing', default=None, choices=['Train', 'Eval', None],
help='select timing to plot; both training and evaluation stats are plotted by default')
args = parser.parse_args()
# by default assume using `python utility/plot.py` to call this file
if len(args.logdir) != 1:
dirs = [f'{d}' for d in args.logdir]
else:
dirs = glob.glob(args.logdir[0])
# dir follows pattern: logs/env/algo(/model_name)
title = args.title or dirs[0].split('/')[1].split('_')[-1]
# set up legends
if args.legend:
assert len(args.legend) == len(dirs), (
"Must give a legend title for each set of experiments: "
f"#legends({args.legend}) != #dirs({args.dirs})")
legends = args.legend
else:
legends = [path.split('/')[2] for path in dirs]
legends = [l[3:] if l.startswith('GS-') else l for l in legends]
tag = args.legendtag
print('Directories:')
for d in dirs:
print(f'\t{d}')
print('Legends:')
for l in legends:
print(f'\t{l}')
data = []
for logdir, legend_title in zip(dirs, legends):
data += get_datasets(logdir, tag, legend_title)
xs = args.x if isinstance(args.x, list) else [args.x]
ys = args.y if isinstance(args.y, list) else [args.y]
for x in xs:
for y in ys:
outdir = f'results/{title}-{x}-{y}'
plot_data(data, x, y, outdir, tag, title, args.timing)
if __name__ == '__main__':
main()
| 2.3125 | 2 |
utilities/pyPlotting/writeFiltFields.py | mightylorenzo/Puffin | 15 | 12766668 | # Copyright (c) 2012-2018, University of Strathclyde
# Authors: <NAME>
# License: BSD-3-Clause
"""
This is an examplar script to produce a plot of the filtered energy.
"""
import sys, glob, os
import numpy as np
import tables
from numpy import arange
from retrieve import readField
from retrieve import filterField
from puffdata import fdata
from puffdata import puffData
def getFilteredFields(h5fname, cfr=None, dfr=None, qAv = 0, qScale = None):
mdata = fdata(h5fname)
if (qScale==None):
qScale = mdata.vars.qscale
lenz2 = (mdata.vars.nz2-1) * mdata.vars.dz2
z2axis = (np.arange(0, mdata.vars.nz2)) * mdata.vars.dz2
xaxis = (np.arange(0, mdata.vars.nx)) * mdata.vars.dxbar
yaxis = (np.arange(0, mdata.vars.ny)) * mdata.vars.dybar
xf, yf = readField(h5fname)
if ((cfr != None) and (dfr != None)):
xf = filterField(xf, cfr, dfr, mdata.vars)
yf = filterField(yf, cfr, dfr, mdata.vars)
return xf, yf
def writeFiltFields(fname, oname):
h5f = tables.open_file(fname,mode='r') # open full field dump file
dx = h5f.root.runInfo._v_attrs.sLengthOfElmX
dy = h5f.root.runInfo._v_attrs.sLengthOfElmY
dz2 = h5f.root.runInfo._v_attrs.sLengthOfElmZ2
nx = h5f.root.runInfo._v_attrs.nX
ny = h5f.root.runInfo._v_attrs.nY
nz2 = h5f.root.runInfo._v_attrs.nZ2
xf, yf = getFilteredFields(fname, 2, 0.4, qAv = 0, qScale = None)
tfield = np.zeros([nx,ny,nz2,2])
tfield[:,:,:,0] = xf
tfield[:,:,:,1] = yf
# xnr = xno+1:nx-xno+1
# ynr = yno+1:ny-yno+1
print np.shape(tfield)
# numpy.shape(redField)
h5o = tables.open_file(oname,'w') # open output file
# Write reduced field to file
h5o.create_array('/','aperp', tfield)
# if (ninner != 1):
# Copy data attributes
h5f.root.aperp._v_attrs._f_copy(h5o.root.aperp)
# Copy runInfo from one of the files to the new reduced file
h5f.root.globalLimits._f_copy(h5o.root)
h5f.root.meshScaled._f_copy(h5o.root)
h5f.root.intensityScaled._f_copy(h5o.root)
h5f.root.runInfo._f_copy(h5o.root)
h5f.root.time._f_copy(h5o.root)
# Modify vs attributes to new array shape (for VisIt)
# h5o.root.meshScaled._v_attrs.vsLowerBounds = np.array((lowLimX, lowLimY, 0))
# h5o.root.meshScaled._v_attrs.vsUpperBounds = np.array((uppLimX, uppLimY, dz2 * (nz2 - 1.)))
#
# h5o.root.meshScaled._v_attrs.vsNumCells = np.array((newnx, newny, nz2))
#
# h5o.root.aperp._v_attrs.vsIndexOrder = 'compMinorC'
# h5o.root.meshScaled._v_attrs.vsIndexOrder = 'compMinorC'
# h5o.root.aperp._v_attrs.vsCentering = 'zonal'
# close files...
h5f.close()
h5o.close()
# h5o.root.meshScaled._v_attrs.vsUpperBounds = numpy.array((uppLimX, uppLimY, 0))
#h5o.create_group('/','runInfo','')
#h5o.root.runInfo = h5f.root.runInfo
# h5o.create_group('/','gridZ_SI','')
# numCells=numpy.array((numpy.int(numSpatialPoints)-1,numpy.int(numTimes)-1))
# h5o.root.gridZ_SI._v_attrs.vsLowerBounds=numpy.array((numpy.double(minS),numpy.double(minZT)))
# h5o.root.gridZ_SI._v_attrs.vsStartCell=numpy.array((numpy.int(0),numpy.int(0)))
# h5o.root.gridZ_SI._v_attrs.vsUpperBounds=numpy.array((numpy.double(maxS),numpy.double(maxZT)))
# h5o.root.gridZ_SI._v_attrs.vsNumCells=numpy.array(numCells)
# h5o.root.gridZ_SI._v_attrs.vsKind="uniform"
# h5o.root.gridZ_SI._v_attrs.vsType="mesh"
# h5o.root.gridZ_SI._v_attrs.vsCentering="nodal"
# h5o.root.gridZ_SI._v_attrs.vsAxisLabels="ct-z,z"
# print(h5o.root.runInfo._v_attrs)
if __name__ == '__main__':
h5finame=sys.argv[1]
if len(sys.argv) > 2:
h5foname=sys.argv[2]
print "Output file specified as: " + sys.argv[2]
else:
filenamepieces=h5finame.split('_')
dumpnoAndExt = filenamepieces[-1]
bname = filenamepieces[0:-1]
bname = '_'.join(bname)
h5foname = bname + '_2nd_' + dumpnoAndExt
print "No output file specified - will be written to: " + h5foname
writeFiltFields(h5finame, h5foname)
| 2.171875 | 2 |
fig/python_cmd/prime_count.py | gavinln/mesos-marathon | 2 | 12766669 | <reponame>gavinln/mesos-marathon
import math
print "Primes up to a million"
pp = 2
ps = [pp]
pp += 1
ps.append(pp)
lim = 1000000
while pp < int(lim):
pp += 2
test = True
sqrtpp = math.sqrt(pp)
for a in ps:
if a > sqrtpp: break
if pp % a == 0:
test = False
break
if test:
ps.append(pp)
print len(ps)
| 3.421875 | 3 |
alipay/aop/api/domain/AlipayFincoreComplianceCrossborderMerchantBatchqueryModel.py | antopen/alipay-sdk-python-all | 213 | 12766670 | <reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BaseCrossborderMerchantInfo import BaseCrossborderMerchantInfo
class AlipayFincoreComplianceCrossborderMerchantBatchqueryModel(object):
def __init__(self):
self._biz_source = None
self._org_list = None
self._out_biz_no = None
self._total = None
@property
def biz_source(self):
return self._biz_source
@biz_source.setter
def biz_source(self, value):
self._biz_source = value
@property
def org_list(self):
return self._org_list
@org_list.setter
def org_list(self, value):
if isinstance(value, list):
self._org_list = list()
for i in value:
if isinstance(i, BaseCrossborderMerchantInfo):
self._org_list.append(i)
else:
self._org_list.append(BaseCrossborderMerchantInfo.from_alipay_dict(i))
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def to_alipay_dict(self):
params = dict()
if self.biz_source:
if hasattr(self.biz_source, 'to_alipay_dict'):
params['biz_source'] = self.biz_source.to_alipay_dict()
else:
params['biz_source'] = self.biz_source
if self.org_list:
if isinstance(self.org_list, list):
for i in range(0, len(self.org_list)):
element = self.org_list[i]
if hasattr(element, 'to_alipay_dict'):
self.org_list[i] = element.to_alipay_dict()
if hasattr(self.org_list, 'to_alipay_dict'):
params['org_list'] = self.org_list.to_alipay_dict()
else:
params['org_list'] = self.org_list
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.total:
if hasattr(self.total, 'to_alipay_dict'):
params['total'] = self.total.to_alipay_dict()
else:
params['total'] = self.total
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFincoreComplianceCrossborderMerchantBatchqueryModel()
if 'biz_source' in d:
o.biz_source = d['biz_source']
if 'org_list' in d:
o.org_list = d['org_list']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'total' in d:
o.total = d['total']
return o
| 1.835938 | 2 |
sweep_power.py | intermod-pro/presto-measure | 0 | 12766671 | # -*- coding: utf-8 -*-
"""
2D sweep of drive power and frequency in Lockin mode.
"""
from typing import List
import h5py
import numpy as np
from presto.hardware import AdcFSample, AdcMode, DacFSample, DacMode
from presto import lockin
from presto.utils import ProgressBar
from _base import Base
DAC_CURRENT = 32_000 # uA
CONVERTER_CONFIGURATION = {
"adc_mode": AdcMode.Mixed,
"adc_fsample": AdcFSample.G4,
"dac_mode": DacMode.Mixed42,
"dac_fsample": DacFSample.G10,
}
class SweepPower(Base):
def __init__(
self,
freq_center: float,
freq_span: float,
df: float,
num_averages: int,
amp_arr: List[float],
output_port: int,
input_port: int,
dither: bool = True,
num_skip: int = 0,
) -> None:
self.freq_center = freq_center
self.freq_span = freq_span
self.df = df # modified after tuning
self.num_averages = num_averages
self.amp_arr = np.atleast_1d(amp_arr).astype(np.float64)
self.output_port = output_port
self.input_port = input_port
self.dither = dither
self.num_skip = num_skip
self.freq_arr = None # replaced by run
self.resp_arr = None # replaced by run
def run(
self,
presto_address: str,
presto_port: int = None,
ext_ref_clk: bool = False,
) -> str:
with lockin.Lockin(
address=presto_address,
port=presto_port,
ext_ref_clk=ext_ref_clk,
**CONVERTER_CONFIGURATION,
) as lck:
assert lck.hardware is not None
lck.hardware.set_adc_attenuation(self.input_port, 0.0)
lck.hardware.set_dac_current(self.output_port, DAC_CURRENT)
lck.hardware.set_inv_sinc(self.output_port, 0)
nr_amps = len(self.amp_arr)
# tune frequencies
_, self.df = lck.tune(0.0, self.df)
f_start = self.freq_center - self.freq_span / 2
f_stop = self.freq_center + self.freq_span / 2
n_start = int(round(f_start / self.df))
n_stop = int(round(f_stop / self.df))
n_arr = np.arange(n_start, n_stop + 1)
nr_freq = len(n_arr)
self.freq_arr = self.df * n_arr
self.resp_arr = np.zeros((nr_amps, nr_freq), np.complex128)
lck.hardware.configure_mixer(
freq=self.freq_arr[0],
in_ports=self.input_port,
out_ports=self.output_port,
)
lck.set_df(self.df)
og = lck.add_output_group(self.output_port, 1)
og.set_frequencies(0.0)
og.set_amplitudes(self.amp_arr[0])
og.set_phases(0.0, 0.0)
lck.set_dither(self.dither, self.output_port)
ig = lck.add_input_group(self.input_port, 1)
ig.set_frequencies(0.0)
lck.apply_settings()
pb = ProgressBar(nr_amps * nr_freq)
pb.start()
for jj, amp in enumerate(self.amp_arr):
og.set_amplitudes(amp)
lck.apply_settings()
for ii, freq in enumerate(self.freq_arr):
lck.hardware.configure_mixer(
freq=freq,
in_ports=self.input_port,
out_ports=self.output_port,
)
lck.hardware.sleep(1e-3, False)
_d = lck.get_pixels(self.num_skip + self.num_averages, quiet=True)
data_i = _d[self.input_port][1][:, 0]
data_q = _d[self.input_port][2][:, 0]
data = data_i.real + 1j * data_q.real # using zero IF
self.resp_arr[jj, ii] = np.mean(data[-self.num_averages:])
pb.increment()
pb.done()
# Mute outputs at the end of the sweep
og.set_amplitudes(0.0)
lck.apply_settings()
return self.save()
def save(self, save_filename: str = None) -> str:
return super().save(__file__, save_filename=save_filename)
@classmethod
def load(cls, load_filename: str) -> 'SweepPower':
with h5py.File(load_filename, "r") as h5f:
freq_center = h5f.attrs["freq_center"]
freq_span = h5f.attrs["freq_span"]
df = h5f.attrs["df"]
num_averages = h5f.attrs["num_averages"]
output_port = h5f.attrs["output_port"]
input_port = h5f.attrs["input_port"]
dither = h5f.attrs["dither"]
num_skip = h5f.attrs["num_skip"]
amp_arr = h5f["amp_arr"][()]
freq_arr = h5f["freq_arr"][()]
resp_arr = h5f["resp_arr"][()]
self = cls(
freq_center=freq_center,
freq_span=freq_span,
df=df,
num_averages=num_averages,
amp_arr=amp_arr,
output_port=output_port,
input_port=input_port,
dither=dither,
num_skip=num_skip,
)
self.freq_arr = freq_arr
self.resp_arr = resp_arr
return self
def analyze(self, norm: bool = True, portrait: bool = True, blit: bool = False):
if self.freq_arr is None:
raise RuntimeError
if self.resp_arr is None:
raise RuntimeError
import matplotlib.pyplot as plt
try:
from resonator_tools import circuit
import matplotlib.widgets as mwidgets
_do_fit = True
except ImportError:
_do_fit = False
nr_amps = len(self.amp_arr)
self._AMP_IDX = nr_amps // 2
if norm:
resp_scaled = np.zeros_like(self.resp_arr)
for jj in range(nr_amps):
resp_scaled[jj] = self.resp_arr[jj] / self.amp_arr[jj]
else:
resp_scaled = self.resp_arr
resp_dB = 20. * np.log10(np.abs(resp_scaled))
amp_dBFS = 20 * np.log10(self.amp_arr / 1.0)
# choose limits for colorbar
cutoff = 1. # %
lowlim = np.percentile(resp_dB, cutoff)
highlim = np.percentile(resp_dB, 100. - cutoff)
# extent
x_min = 1e-9 * self.freq_arr[0]
x_max = 1e-9 * self.freq_arr[-1]
dx = 1e-9 * (self.freq_arr[1] - self.freq_arr[0])
y_min = amp_dBFS[0]
y_max = amp_dBFS[-1]
dy = amp_dBFS[1] - amp_dBFS[0]
if portrait:
fig1 = plt.figure(tight_layout=True, figsize=(6.4, 9.6))
ax1 = fig1.add_subplot(2, 1, 1)
# fig1 = plt.figure(tight_layout=True)
# ax1 = fig1.add_subplot(1, 1, 1)
else:
fig1 = plt.figure(tight_layout=True, figsize=(12.8, 4.8))
ax1 = fig1.add_subplot(1, 2, 1)
im = ax1.imshow(
resp_dB,
origin='lower',
aspect='auto',
interpolation='none',
extent=(x_min - dx / 2, x_max + dx / 2, y_min - dy / 2, y_max + dy / 2),
vmin=lowlim,
vmax=highlim,
)
line_sel = ax1.axhline(amp_dBFS[self._AMP_IDX], ls="--", c="k", lw=3, animated=blit)
# ax1.set_title(f"amp = {amp_arr[AMP_IDX]:.2e}")
ax1.set_xlabel("Frequency [GHz]")
ax1.set_ylabel("Drive amplitude [dBFS]")
cb = fig1.colorbar(im)
if portrait:
cb.set_label("Response amplitude [dB]")
else:
ax1.set_title("Response amplitude [dB]")
fig1.show()
# return fig1
if portrait:
ax2 = fig1.add_subplot(4, 1, 3)
ax3 = fig1.add_subplot(4, 1, 4, sharex=ax2)
else:
ax2 = fig1.add_subplot(2, 2, 2)
ax3 = fig1.add_subplot(2, 2, 4, sharex=ax2)
ax2.yaxis.set_label_position("right")
ax2.yaxis.tick_right()
ax3.yaxis.set_label_position("right")
ax3.yaxis.tick_right()
line_a, = ax2.plot(1e-9 * self.freq_arr, resp_dB[self._AMP_IDX], label="measured", animated=blit)
line_p, = ax3.plot(1e-9 * self.freq_arr, np.angle(self.resp_arr[self._AMP_IDX]), animated=blit)
if _do_fit:
line_fit_a, = ax2.plot(1e-9 * self.freq_arr,
np.full_like(self.freq_arr, np.nan),
ls="--",
label="fit",
animated=blit)
line_fit_p, = ax3.plot(1e-9 * self.freq_arr, np.full_like(self.freq_arr, np.nan), ls="--", animated=blit)
f_min = 1e-9 * self.freq_arr.min()
f_max = 1e-9 * self.freq_arr.max()
f_rng = f_max - f_min
a_min = resp_dB.min()
a_max = resp_dB.max()
a_rng = a_max - a_min
p_min = -np.pi
p_max = np.pi
p_rng = p_max - p_min
ax2.set_xlim(f_min - 0.05 * f_rng, f_max + 0.05 * f_rng)
ax2.set_ylim(a_min - 0.05 * a_rng, a_max + 0.05 * a_rng)
ax3.set_xlim(f_min - 0.05 * f_rng, f_max + 0.05 * f_rng)
ax3.set_ylim(p_min - 0.05 * p_rng, p_max + 0.05 * p_rng)
ax3.set_xlabel("Frequency [GHz]")
ax2.set_ylabel("Response amplitude [dB]")
ax3.set_ylabel("Response phase [rad]")
ax2.legend(loc="lower right")
def onbuttonpress(event):
if event.inaxes == ax1:
self._AMP_IDX = np.argmin(np.abs(amp_dBFS - event.ydata))
update()
def onkeypress(event):
if event.inaxes == ax1:
if event.key == "up":
self._AMP_IDX += 1
if self._AMP_IDX >= len(amp_dBFS):
self._AMP_IDX = len(amp_dBFS) - 1
update()
elif event.key == "down":
self._AMP_IDX -= 1
if self._AMP_IDX < 0:
self._AMP_IDX = 0
update()
def update():
line_sel.set_ydata([amp_dBFS[self._AMP_IDX], amp_dBFS[self._AMP_IDX]])
# ax1.set_title(f"amp = {amp_arr[AMP_IDX]:.2e}")
print(
f"drive amp {self._AMP_IDX:d}: {self.amp_arr[self._AMP_IDX]:.2e} FS = {amp_dBFS[self._AMP_IDX]:.1f} dBFS"
)
line_a.set_ydata(resp_dB[self._AMP_IDX])
line_p.set_ydata(np.angle(self.resp_arr[self._AMP_IDX]))
if _do_fit:
line_fit_a.set_ydata(np.full_like(self.freq_arr, np.nan))
line_fit_p.set_ydata(np.full_like(self.freq_arr, np.nan))
# ax2.set_title("")
if blit:
fig1.canvas.restore_region(self._bg)
ax1.draw_artist(line_sel)
ax2.draw_artist(line_a)
ax3.draw_artist(line_p)
fig1.canvas.blit(fig1.bbox)
fig1.canvas.flush_events()
else:
fig1.canvas.draw()
if _do_fit:
def onselect(xmin, xmax):
port = circuit.notch_port(self.freq_arr, self.resp_arr[self._AMP_IDX])
port.autofit(fcrop=(xmin * 1e9, xmax * 1e9))
if norm:
line_fit_a.set_data(1e-9 * port.f_data,
20 * np.log10(np.abs(port.z_data_sim / self.amp_arr[self._AMP_IDX])))
else:
line_fit_a.set_data(1e-9 * port.f_data, 20 * np.log10(np.abs(port.z_data_sim)))
line_fit_p.set_data(1e-9 * port.f_data, np.angle(port.z_data_sim))
# print(port.fitresults)
print("----------------")
print(f"fr = {port.fitresults['fr']}")
print(f"Qi = {port.fitresults['Qi_dia_corr']}")
print(f"Qc = {port.fitresults['Qc_dia_corr']}")
print(f"Ql = {port.fitresults['Ql']}")
print(f"kappa = {port.fitresults['fr'] / port.fitresults['Qc_dia_corr']}")
print("----------------")
# ax2.set_title(
# f"fr = {1e-6*fr:.0f} MHz, Ql = {Ql:.0f}, Qi = {Qi:.0f}, Qc = {Qc:.0f}, kappa = {1e-3*kappa:.0f} kHz")
if blit:
fig1.canvas.restore_region(self._bg)
ax1.draw_artist(line_sel)
ax2.draw_artist(line_a)
ax2.draw_artist(line_fit_a)
ax3.draw_artist(line_p)
ax3.draw_artist(line_fit_p)
fig1.canvas.blit(fig1.bbox)
fig1.canvas.flush_events()
else:
fig1.canvas.draw()
rectprops = dict(facecolor='tab:gray', alpha=0.5)
fig1._span_a = mwidgets.SpanSelector(ax2, onselect, 'horizontal', rectprops=rectprops, useblit=blit)
fig1._span_p = mwidgets.SpanSelector(ax3, onselect, 'horizontal', rectprops=rectprops, useblit=blit)
fig1.canvas.mpl_connect('button_press_event', onbuttonpress)
fig1.canvas.mpl_connect('key_press_event', onkeypress)
fig1.show()
if blit:
fig1.canvas.draw()
fig1.canvas.flush_events()
self._bg = fig1.canvas.copy_from_bbox(fig1.bbox)
ax1.draw_artist(line_sel)
ax2.draw_artist(line_a)
ax3.draw_artist(line_p)
fig1.canvas.blit(fig1.bbox)
return fig1
| 2.25 | 2 |
mlangokumi_app/models.py | John-Osiko/Mlango10 | 0 | 12766672 | <reponame>John-Osiko/Mlango10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
#from django.core.files.storage import FileSystemStorage
#fs = FileSystemStorage(location='/media/photos')
# Create your models here.
class Profile(models.Model):
RESIDENT = 1
ADMINISTRATOR = 2
ROLE_CHOICES = (
(RESIDENT, 'Resident'),
(ADMINISTRATOR, 'Administrator'),
)
name = models.OneToOneField(User, on_delete=models.CASCADE,related_name='profile')
profile_pic = models.ImageField(upload_to='profile_pics/',blank=True, default='static/images/default.png')
age = models.IntegerField(null=True)
contact = models.IntegerField(null=False)
address = models.CharField(max_length=250)
estate = models.ForeignKey('Neighbourhood', on_delete=models.CASCADE, blank=True, null=True)
role = models.PositiveSmallIntegerField(choices=ROLE_CHOICES, null=True, blank=True)
class Meta:
db_table = 'profiles'
ordering = ['name']
def __str__(self): # __unicode__ for Python 3
return f'{self.name}'
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(post_save, sender=User)
def create_or_update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(name=instance)
instance.profile.save()
class Neighbourhood(models.Model):
name = models.CharField(max_length=100)
location = models.CharField(max_length=150)
image = models.ImageField(upload_to='hood_images/', blank=True, max_length=None)
class Meta:
db_table = 'neighbourhoods'
ordering = ['name']
def __str__(self):
return f"{self.name}"
@classmethod
def search_hood(cls, searchTerm):
hoods = cls.objects.filter(name__icontains=searchTerm)
return hoods
class Updates(models.Model):
title = models.CharField(max_length=250)
notification = models.CharField(max_length=170)
tag = models.CharField(max_length=250)
editor = models.ForeignKey(User, on_delete=models.CASCADE)
estate = models.ForeignKey('Neighbourhood', on_delete=models.CASCADE, blank=True, null=True)
up_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.title}"
class Business(models.Model):
name = models.CharField(max_length=250)
description = models.TextField()
location = models.CharField(max_length=250)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
db_table = 'businesses'
ordering = ['-name']
def __repr__(self):
return f'{self.name}'
@classmethod
def search_biz(cls, searchTerm):
biz = cls.objects.filter(name__icontains=searchTerm)
return biz
class EmergencyContact(models.Model):
name = models.CharField(max_length=250)
contact = models.CharField(max_length=250)
description = models.TextField()
class Meta:
db_table = 'e_contacts'
ordering = ['-name']
def __repr__(self):
return f'{self.name}'
@classmethod
def search_emergencies(cls, searchTerm):
emergencies = cls.objects.filter(name__icontains=searchTerm)
return emergencies
class Post(models.Model):
title = models.CharField(max_length=250)
content = models.TextField(max_length=250)
tag = models.CharField(max_length=250)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
hood = models.ForeignKey('Neighbourhood', on_delete=models.CASCADE, related_name='hoods', default=1)
class Meta:
db_table = 'posts'
ordering = ['-title']
def __repr__(self):
return f'{self.title}'
@classmethod
def search_posts(cls, searchTerm):
posts = cls.objects.filter(title__icontains=searchTerm)
return posts
class MyHoodmailer(models.Model):
name = models.CharField(max_length = 30)
email = models.EmailField | 2.15625 | 2 |
datasets/smart_buildings_dataset.py | pganssle-google/madi | 0 | 12766673 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides access to the Smart Buidlings dataset for Anomaly Detection."""
from madi.datasets.base_dataset import BaseDataset
import numpy as np
import pandas as pd
import tensorflow as tf
_DATA_FILE = "madi/datasets/data/anomaly_detection_sample_1577622599.csv"
_README_FILE = "madi/datasets/data/anomaly_detection_sample_1577622599_README.md"
class SmartBuildingsDataset(BaseDataset):
"""Smart Buildings data set for Multivariate Anomaly Detection."""
def __init__(self,
datafilepath: str = _DATA_FILE,
readmefilepath: str = _README_FILE):
self._sample = self._load_data_file(datafilepath)
self._description = self._load_readme(readmefilepath)
@property
def sample(self) -> pd.DataFrame:
return self._sample
@property
def name(self) -> str:
return "smart_buildings"
@property
def description(self) -> str:
return self._description
def _load_data_file(self, datafile: str) -> pd.DataFrame:
sample = None
if not tf.io.gfile.exists(datafile):
raise AssertionError("{} does not exist".format(datafile))
with tf.io.gfile.GFile(datafile) as csv_file:
sample = pd.read_csv(csv_file, header="infer", index_col=0)
sample = sample.reindex(np.random.permutation(sample.index))
return sample
| 2.1875 | 2 |
ips/coordinator/urls.py | marklit/mass-ipv4-whois | 20 | 12766674 | <reponame>marklit/mass-ipv4-whois<gh_stars>10-100
from django.conf.urls import url
from coordinator.views import get_ips
urlpatterns = [
url(r'^$', get_ips, name='get_ips'),
]
| 1.53125 | 2 |
PARTE_3/EX019/index.py | 0Fernando0/CursoPython | 0 | 12766675 | from datetime import date, datetime
dados = dict()
dados['nome'] = str(input('Nome: '))
nasc = int(input('ANO DE NASCIMENTO: '))
dados['idade'] = datetime.now().year - nasc
dados['ctps'] = int(input('carteira de trabalho (0 não tem): '))
if dados['ctps'] != 0:
dados['contratação'] = int(input('ano de contratação: '))
dados['salário'] = float(input('salário: R$'))
dados['aposentadoria'] = (dados['contratação'] + 35) - datetime.now().year
for v,r in dados.items():
print(f'- {v} é igual a {r}') | 3.625 | 4 |
asteroids/utils.py | lxndrdagreat/game-dev-with-python | 2 | 12766676 | <reponame>lxndrdagreat/game-dev-with-python
from enum import Enum
import math
from typing import Tuple
import arcade
class ThemeColors(Enum):
FOREGROUND = (arcade.color.AFRICAN_VIOLET, )
BACKGROUND = (arcade.color.AERO_BLUE, )
def __init__(self, color):
self.color = color
def rotate_point(x: int, y: int, ox: int, oy: int, angle_degrees: float) -> Tuple[int, int]:
angle_rads = round(math.radians(angle_degrees), 2)
s = math.sin(angle_rads)
c = math.cos(angle_rads)
x -= ox
y -= oy
newx = round(x * c - y * s, 2) + ox
newy = round(x * s + y * c, 2) + oy
return (newx, newy, )
def point_in_polygon(point_x: int, point_y: int, polygon: list) -> bool:
n = len(polygon)
inside = False
x1, y1 = polygon[0]
for i in range(n + 1):
x2, y2 = polygon[i % n]
if point_y > min(y1, y2):
if point_y <= max(y1, y2):
if point_x <= max(x1, x2):
if y1 != y2:
x_intersect = (point_y - y1) * (x2 - x1) / (y2 - y1) + x1
if x1 == x2 or point_x <= x_intersect:
inside = not inside
x1, y1 = x2, y2
return inside
| 3.296875 | 3 |
eeve/importer.py | vMarcelino/eeve | 1 | 12766677 | <filename>eeve/importer.py
import os
import sys
import importlib
import travel_backpack.exceptions
def import_from_folder(folder):
imported_files = []
folder = os.path.abspath(folder)
if folder not in sys.path:
sys.path.insert(0, folder)
for file_obj in os.listdir(folder):
try:
if file_obj not in ['.', '..', '__pycache__', '.vscode']:
print('inspecting', file_obj)
file_obj = os.path.join(folder, file_obj)
if os.path.isfile(file_obj):
module_name = os.path.splitext(os.path.basename(file_obj))[0]
#module_name = os.path.basename(file_obj)
print('==> importing', module_name)
imported_files.append(importlib.import_module(module_name))
elif os.path.isdir(file_obj):
module_name = os.path.basename(file_obj)
print('--> importing', module_name)
imported_files.append(importlib.import_module(module_name))
except Exception as ex:
print(travel_backpack.exceptions.format_exception_string(ex))
return imported_files
| 2.71875 | 3 |
glue/plugins/tools/spectrum_tool/qt/tests/test_profile_viewer.py | ejeschke/glue | 3 | 12766678 | <reponame>ejeschke/glue
from __future__ import absolute_import, division, print_function
from collections import namedtuple
import pytest
import numpy as np
from mock import MagicMock
from ..profile_viewer import ProfileViewer
from glue.utils import renderless_figure
FIG = renderless_figure()
Event = namedtuple('Event', 'xdata ydata inaxes button dblclick')
class TestProfileViewer(object):
def setup_method(self, method):
FIG.clf()
FIG.canvas.draw = MagicMock()
self.viewer = ProfileViewer(FIG)
self.axes = self.viewer.axes
def test_set_profile(self):
self.viewer.set_profile([1, 2, 3], [2, 3, 4])
self.axes.figure.canvas.draw.assert_called_once_with()
def test_new_value_callback_fire(self):
cb = MagicMock()
s = self.viewer.new_value_grip(callback=cb)
s.value = 20
cb.assert_called_once_with(20)
def test_new_range_callback_fire(self):
cb = MagicMock()
s = self.viewer.new_range_grip(callback=cb)
s.range = (20, 40)
cb.assert_called_once_with((20, 40))
def test_pick_grip(self):
self.viewer.set_profile([1, 2, 3], [10, 20, 30])
s = self.viewer.new_value_grip()
s.value = 1.7
assert self.viewer.pick_grip(1.7, 20) is s
def test_pick_grip_false(self):
self.viewer.set_profile([1, 2, 3], [10, 20, 30])
s = self.viewer.new_value_grip()
s.value = 3
assert self.viewer.pick_grip(1.7, 20) is None
def test_pick_range_grip(self):
self.viewer.set_profile([1, 2, 3], [10, 20, 30])
s = self.viewer.new_range_grip()
s.range = (1.5, 2.5)
assert self.viewer.pick_grip(1.5, 20) is s
assert self.viewer.pick_grip(2.5, 20) is s
assert self.viewer.pick_grip(1.0, 20) is None
def test_value_drag_updates_value(self):
h = self.viewer.new_value_grip()
x2 = h.value + 10
self._click(h.value)
self._drag(x2)
self._release()
assert h.value == x2
def test_disabled_grips_ignore_events(self):
h = self.viewer.new_value_grip()
h.value = 5
h.disable()
self._click(h.value)
self._drag(10)
self._release()
assert h.value == 5
def test_value_ignores_distant_picks(self):
self.viewer.set_profile([1, 2, 3], [1, 2, 3])
h = self.viewer.new_value_grip()
h.value = 3
self._click(1)
self._drag(2)
self._release()
assert h.value == 3
def test_range_translates_on_center_drag(self):
h = self.viewer.new_range_grip()
h.range = (1, 3)
self._click_range_center(h)
self._drag(1)
self._release()
assert h.range == (0, 2)
def test_range_stretches_on_edge_drag(self):
h = self.viewer.new_range_grip()
h.range = (1, 3)
self._click(1)
self._drag(2)
self._release()
assert h.range == (2, 3)
def test_range_redefines_on_distant_drag(self):
self.viewer.set_profile([1, 2, 3], [1, 2, 3])
h = self.viewer.new_range_grip()
h.range = (2, 2)
self._click(1)
self._drag(1.5)
self._release()
assert h.range == (1, 1.5)
def test_dblclick_sets_value(self):
h = self.viewer.new_value_grip()
h.value = 1
self._click(1.5, double=True)
assert h.value == 1.5
def _click_range_center(self, grip):
x, y = sum(grip.range) / 2, 0
self._click(x, y)
def _click(self, x, y=0, double=False):
e = Event(xdata=x, ydata=y, inaxes=True, button=1, dblclick=double)
self.viewer._on_down(e)
def _drag(self, x, y=0):
e = Event(xdata=x, ydata=y, inaxes=True, button=1, dblclick=False)
self.viewer._on_move(e)
def _release(self):
e = Event(xdata=0, ydata=0, inaxes=True, button=1, dblclick=False)
self.viewer._on_up(e)
def test_fit(self):
fitter = MagicMock()
self.viewer.set_profile([0, 1, 2, 3, 4, 5], [1, 2, 3, 4, 5, 6])
self.viewer.fit(fitter, xlim=[1, 3])
args = fitter.build_and_fit.call_args[0]
np.testing.assert_array_equal(args[0], [1, 2, 3])
np.testing.assert_array_equal(args[1], [2, 3, 4])
def test_fit_error_without_profile(self):
with pytest.raises(ValueError) as exc:
self.viewer.fit(None)
assert exc.value.args[0] == "Must set profile before fitting"
def test_new_select(self):
h = self.viewer.new_range_grip()
h.new_select(0, 1)
h.new_drag(1, 1)
h.release()
assert h.range == (0, 1)
h.new_select(1, 1)
h.new_drag(.5, 1)
h.release()
assert h.range == (0.5, 1)
h.new_select(.4, 1)
h.new_drag(.4, 1)
h.release()
assert h.range == (.4, .4)
| 1.953125 | 2 |
src/model/ImageDataset.py | Lucianod28/topo-csc | 0 | 12766679 | import torch
from torch.utils.data import Dataset
import numpy as np
from scipy.io import loadmat
class NatPatchDataset(Dataset):
def __init__(self, N:int, width:int, height:int, border:int=4, fpath:str='../../data/IMAGES.mat', test=False):
super(NatPatchDataset, self).__init__()
self.N = N
self.width = width
self.height = height
self.border = border
self.fpath = fpath
self.test = test
# holder
self.images = None
# initialize patches
self.extract_patches_()
def __len__(self):
return self.images.shape[0]
def __getitem__(self, idx):
return self.images[idx]
def extract_patches_(self):
# load mat
X = loadmat(self.fpath)
X = X['IMAGES']
img_size = X.shape[0]
n_img = X.shape[2]
self.images = torch.zeros((self.N * n_img, self.width, self.height))
# for every image
counter = 0
# Save the last image for testing
if self.test:
image_indices = [-1]
else:
image_indices = range(n_img)[:-1]
for i in image_indices:
img = X[:, :, i]
for j in range(self.N):
if self.test: # use a deterministic patch for producing figures
x = 63
y = 14
else:
x = np.random.randint(self.border, img_size - self.width - self.border)
y = np.random.randint(self.border, img_size - self.height - self.border)
crop = torch.tensor(img[x:x+self.width, y:y+self.height])
self.images[counter, :, :] = crop - crop.mean()
counter += 1
| 2.40625 | 2 |
RC3D/operator_py/proposal_target_twin.py | likelyzhao/MxNet-RC3D | 1 | 12766680 | <reponame>likelyzhao/MxNet-RC3D<filename>RC3D/operator_py/proposal_target_twin.py
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by <NAME>
# --------------------------------------------------------
"""
Proposal Target Operator selects foreground and background roi and assigns label, bbox_transform to them.
"""
import mxnet as mx
import numpy as np
from distutils.util import strtobool
from easydict import EasyDict as edict
import cPickle
from core.rcnn import sample_rois
from twin.twin_transform import twin_overlaps,twin_transform
import numpy.random as npr
DEBUG = False
def _sample_rois(all_rois, gt_wins, fg_rois_per_image, rois_per_image, num_classes,cfg):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_wins)
overlaps = twin_overlaps(
np.ascontiguousarray(all_rois[:, 1:3], dtype=np.float),
np.ascontiguousarray(gt_wins[:, :2], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_wins[gt_assignment, 2]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
rois = all_rois[keep_inds]
twin_target_data = _compute_targets(
rois[:, 1:3], gt_wins[gt_assignment[keep_inds], :2], labels)
twin_targets, twin_inside_weights = \
_get_twin_regression_labels(twin_target_data, num_classes)
return labels, rois, twin_targets, twin_inside_weights
def _get_twin_regression_labels(twin_target_data, num_classes,cfg):
"""Bounding-box regression targets (twin_target_data) are stored in a
compact form N x (class, tx, tl)
This function expands those targets into the 4-of-2*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
twin_target (ndarray): N x 4K blob of regression targets
twin_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = twin_target_data[:, 0]
twin_targets = np.zeros((clss.size, 2 * num_classes), dtype=np.float32)
twin_inside_weights = np.zeros(twin_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = int(2 * cls)
end = start + 2
twin_targets[ind, start:end] = twin_target_data[ind, 1:]
twin_inside_weights[ind, start:end] = cfg.TRAIN.TWIN_INSIDE_WEIGHTS
return twin_targets, twin_inside_weights
def _compute_targets(ex_rois, gt_rois, labels,cfg):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 2
assert gt_rois.shape[1] == 2
targets = twin_transform(ex_rois, gt_rois)
if cfg.TRAIN.TWIN_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(cfg.TRAIN.TWIN_NORMALIZE_MEANS))
/ np.array(cfg.TRAIN.TWIN_NORMALIZE_STDS))
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _sample_all_rois(all_rois, gt_wins, num_classes,cfg):
"""Generate all RoIs comprising foreground and background examples.
"""
# overlaps: (rois x gt_wins)
overlaps = twin_overlaps(
np.ascontiguousarray(all_rois[:, 1:3], dtype=np.float),
np.ascontiguousarray(gt_wins[:, :2], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_wins[gt_assignment, 2]
labels = labels
rois = all_rois
twin_target_data = _compute_targets(
rois[:, 1:3], gt_wins[gt_assignment, :2], labels, cfg)
twin_targets, twin_inside_weights = \
_get_twin_regression_labels(twin_target_data, num_classes,cfg)
return labels, rois, twin_targets, twin_inside_weights
class ProposalTargetOperator(mx.operator.CustomOp):
def __init__(self, num_classes, batch_images, batch_rois, cfg, fg_fraction):
super(ProposalTargetOperator, self).__init__()
self._num_classes = num_classes
self._batch_images = batch_images
self._batch_rois = batch_rois
self._cfg = cfg
self._fg_fraction = fg_fraction
if DEBUG:
self._count = 0
self._fg_num = 0
self._bg_num = 0
def forward(self, is_train, req, in_data, out_data, aux):
assert self._batch_rois == -1 or self._batch_rois % self._batch_images == 0, \
'batchimages {} must devide batch_rois {}'.format(self._batch_images, self._batch_rois)
all_rois = in_data[0].asnumpy()
gt_wins = in_data[1].asnumpy()
if self._batch_rois == -1:
rois_per_image = all_rois.shape[0] + gt_wins.shape[0]
fg_rois_per_image = rois_per_image
else:
rois_per_image = self._batch_rois / self._batch_images
fg_rois_per_image = np.round(self._fg_fraction * rois_per_image).astype(int)
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_wins.shape[0], 1), dtype=gt_wins.dtype)
all_rois = np.vstack((all_rois, np.hstack((zeros, gt_wins[:, :-1]))))
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), 'Only single item batches are supported'
if self._sample == "All":
labels, rois, twin_targets, twin_inside_weights = _sample_all_rois(
all_rois, gt_wins, self._num_classes)
else:
# Sample rois with classification labels and bounding box regression
# targets
num_images = 1
rois_per_image = self._cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = int(round(self._cfg.TRAIN.FG_FRACTION * rois_per_image))
labels, rois, twin_targets, twin_inside_weights = _sample_rois(
all_rois, gt_wins, fg_rois_per_image,
rois_per_image, self._num_classes)
# rois, labels, bbox_targets, bbox_weights = \
# _sample_all_rois(all_rois, fg_rois_per_image, rois_per_image, self._num_classes, self._cfg, gt_boxes=gt_boxes,cfg=self._cfg)
if DEBUG:
print "labels=", labels
print 'num fg: {}'.format((labels > 0).sum())
print 'num bg: {}'.format((labels == 0).sum())
self._count += 1
self._fg_num += (labels > 0).sum()
self._bg_num += (labels == 0).sum()
print "self._count=", self._count
print 'num fg avg: {}'.format(self._fg_num / self._count)
print 'num bg avg: {}'.format(self._bg_num / self._count)
print 'ratio: {:.3f}'.format(float(self._fg_num) / float(self._bg_num))
for ind, val in enumerate([rois, labels, twin_targets, twin_inside_weights]):
self.assign(out_data[ind], req[ind], val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
self.assign(in_grad[1], req[1], 0)
@mx.operator.register('proposal_target_twin')
class ProposalTargetProp(mx.operator.CustomOpProp):
def __init__(self, num_classes, batch_images, batch_rois, cfg, fg_fraction='0.25'):
super(ProposalTargetProp, self).__init__(need_top_grad=False)
self._num_classes = int(num_classes)
self._batch_images = int(batch_images)
self._batch_rois = int(batch_rois)
self._cfg = cfg
self._fg_fraction = float(fg_fraction)
def list_arguments(self):
return ['rois', 'gt_boxes']
def list_outputs(self):
return ['rois_output', 'label', 'bbox_target', 'bbox_weight']
def infer_shape(self, in_shape):
rpn_rois_shape = in_shape[0]
gt_boxes_shape = in_shape[1]
rois = rpn_rois_shape[0] + gt_boxes_shape[0] if self._batch_rois == -1 else self._batch_rois
output_rois_shape = (rois, 5)
label_shape = (rois, )
bbox_target_shape = (rois, self._num_classes * 4)
bbox_weight_shape = (rois, self._num_classes * 4)
return [rpn_rois_shape, gt_boxes_shape], \
[output_rois_shape, label_shape, bbox_target_shape, bbox_weight_shape]
def create_operator(self, ctx, shapes, dtypes):
return ProposalTargetOperator(self._num_classes, self._batch_images, self._batch_rois, self._cfg, self._fg_fraction)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
| 1.859375 | 2 |
src/haddock2mmcif/modules/docking.py | haddocking/haddock2mmcif | 0 | 12766681 | import ihm.model
import logging
dockinglog = logging.getLogger("log")
class DockingModel(ihm.model.Model):
"""Subclass to save memory."""
# ======================================================================
# IMPORTANT #
# To add the atoms, the class module needs a list containing the following:
# [(<ihm.AsymUnit object...108edf5b0>, 1, 'C', 'CA', 1.0, 2.0, 3.0), ...]
# Which means that the AsymUnit object will be copied many times and this
# will eat a lot of memory.
# To avoid this we subclass IHM Model class and override get_atoms function
# ======================================================================
def __init__(self, assymetric_dic, atom_list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.asym_unit_map = assymetric_dic
self.atom_list = atom_list
def get_atoms(self):
for asym, seq_id, type_symbol, atom_id, x, y, z in self.atom_list:
yield ihm.model.Atom(
asym_unit=self.asym_unit_map[asym],
type_symbol=type_symbol,
seq_id=seq_id,
atom_id=atom_id,
x=x,
y=y,
z=z,
)
| 2.578125 | 3 |
modules/centroid/centroid_define.py | RealAllenDa/EEWMap | 0 | 12766682 | <filename>modules/centroid/centroid_define.py
"""
EEWMap - Modules - Centroid - Centroid_Define
The main entry point of this module.
"""
import csv
import json
import time
import traceback
import requests
from config import PROXY
from modules.utilities import response_verify, relpath
class Centroid:
"""A centroid class that contains area & city centroids."""
def __init__(self, logger):
"""
Initializes the instance.
:param logger: The Flask app logger
"""
self.logger = logger
self._area_centroid = {}
self._station_centroid = {}
self._eq_station_centroid = {}
start_initialize_time = time.perf_counter()
self.logger.debug("Initializing Centroid library...")
self.refresh_stations()
self._init_area_centroid()
self._init_earthquake_station_centroid()
self._init_station_centroid()
self.logger.debug(f"Successfully initialized centroid library "
f"in {(time.perf_counter() - start_initialize_time):.3f} seconds.")
def refresh_stations(self):
"""
Updates intensity station names using DM-S.S.S, and refreshes the stations.
NOTE: The refresh station information are fetched using a bad/borrowed key.
For commercial uses, please change the key to a self-obtained one.
"""
self.logger.info("Updating intensity station names...")
try:
response = requests.get(
url="https://api.dmdata.jp/v2/parameter/earthquake/station?key=1603dbeeac99a4df6b61403626b9decc19850c571809edc1",
proxies=PROXY, timeout=10
)
response.encoding = "utf-8"
if not response_verify(response):
self.logger.error("Failed to update intensity stations. (response code not 200)")
return
except:
self.logger.error("Failed to update intensity stations. Exception occurred: \n" + traceback.format_exc())
return
response = response.json()
if response.get("status", "") == "error":
self.logger.error("Failed to update intensity stations. (response status error)")
return
to_write = ""
for i in response["items"]:
if i["status"] != "現":
continue
name = i["name"]
latitude = i["latitude"]
longitude = i["longitude"]
to_write += f"{name},{latitude},{longitude}\n"
with open(relpath("./intensity_stations.csv"), "w+", encoding="utf-8") as f:
f.write(to_write)
f.close()
self.logger.info("Successfully updated intensity station names!")
self._init_station_centroid()
def _init_area_centroid(self):
"""
Initializes the centroid for the areas.
"""
start_initialize_time = time.perf_counter()
with open(relpath("./jma_area_centroid.csv"), "r", encoding="utf-8") as f:
fieldnames = ("name", "latitude", "longitude")
reader = csv.DictReader(f, fieldnames)
for row in reader:
self._area_centroid[row["name"]] = (row["latitude"], row["longitude"])
f.close()
self.logger.debug(f"Successfully initialized centroid for areas "
f"in {(time.perf_counter() - start_initialize_time):.3f} seconds.")
def _init_station_centroid(self):
"""
Initializes the centroid for intensity stations.
"""
start_initialize_time = time.perf_counter()
with open(relpath("./intensity_stations.csv"), "r", encoding="utf-8") as f:
fieldnames = ("name", "latitude", "longitude")
reader = csv.DictReader(f, fieldnames)
for row in reader:
self._station_centroid[row["name"]] = (row["latitude"], row["longitude"])
f.close()
self.logger.debug(f"Successfully initialized centroid for stations "
f"in {(time.perf_counter() - start_initialize_time):.3f} seconds.")
def _init_earthquake_station_centroid(self):
"""
Initializes the centroid for observation stations.
"""
start_initialize_time = time.perf_counter()
with open(relpath("./observation_points.json"), "r", encoding="utf-8") as f:
self._eq_station_centroid = json.loads(f.read())
for i in self._eq_station_centroid:
if i["Point"] is None or i["IsSuspended"]:
self._eq_station_centroid.remove(i)
self.logger.debug(f"Successfully initialized centroid for observation stations "
f"in {(time.perf_counter() - start_initialize_time):.3f} seconds.")
@property
def station_centroid(self):
return self._station_centroid
@property
def area_centroid(self):
return self._area_centroid
@property
def earthquake_station_centroid(self):
return self._eq_station_centroid
| 2.734375 | 3 |
norns/enemy/admin.py | the-norns/norns | 0 | 12766683 | <gh_stars>0
from django.contrib import admin
from .models import Enemy, EnemyType
admin.site.register(Enemy)
admin.site.register(EnemyType)
| 1.289063 | 1 |
mainapp/management/commands/test-email.py | cyroxx/meine-stadt-transparent | 34 | 12766684 | from django.core.management.base import BaseCommand
from mainapp.functions.mail import send_mail
from mainapp.models import UserProfile
class Command(BaseCommand):
help = "Sends a test e-mail to check if the mail-system is configured correctly"
def add_arguments(self, parser):
parser.add_argument("to-email", type=str)
def handle(self, *args, **options):
body_text = "The test e-mail has arrived 🎉"
body_html = "<h1>The test e-mail has arrived 🎉</h1>"
to_email = options["to-email"]
profile = UserProfile.objects.filter(user__email=to_email).first()
send_mail(to_email, "Hello 🌏", body_text, body_html, profile)
| 2.28125 | 2 |
src/globus_sdk/services/gcs/response.py | rudyardrichter/globus-sdk-python | 47 | 12766685 | <reponame>rudyardrichter/globus-sdk-python
import re
from typing import Any, Callable, Dict, Optional, Union
from globus_sdk.response import GlobusHTTPResponse, IterableResponse
class IterableGCSResponse(IterableResponse):
"""
Response class for non-paged list oriented resources. Allows top level
fields to be accessed normally via standard item access, and also
provides a convenient way to iterate over the sub-item list in the
``data`` key:
>>> print("Path:", r["path"])
>>> # Equivalent to: for item in r["data"]
>>> for item in r:
>>> print(item["name"], item["type"])
"""
default_iter_key = "data"
class UnpackingGCSResponse(GlobusHTTPResponse):
"""
An "unpacking" response looks for a "data" array in the response data, which is
expected to have dict elements. The "data" is traversed until the first matching
object is found, and this is presented as the ``data`` property of the response.
The full response data is available as ``full_data``.
If the expected datatype is not found in the array, or the array is missing, the
``data`` will be the full response data (identical to ``full_data``).
:param match: Either a string containing a DATA_TYPE prefix, or an arbitrary
callable which does the matching
:type match: str or callable
"""
def _default_unpacking_match(self, spec: str) -> Callable[[Dict[str, Any]], bool]:
if not re.fullmatch(r"\w+", spec):
raise ValueError("Invalid UnpackingGCSResponse specification.")
def match_func(data: Dict[str, Any]) -> bool:
if not ("DATA_TYPE" in data and isinstance(data["DATA_TYPE"], str)):
return False
if "#" not in data["DATA_TYPE"]:
return False
name, _version = data["DATA_TYPE"].split("#", 1)
return name == spec
return match_func
def __init__(
self,
response: GlobusHTTPResponse,
match: Union[str, Callable[[Dict[str, Any]], bool]],
):
super().__init__(response)
if callable(match):
self._match_func = match
else:
self._match_func = self._default_unpacking_match(match)
self._unpacked_data: Optional[Dict[str, Any]] = None
self._did_unpack = False
@property
def full_data(self) -> Any:
"""
The full, parsed JSON response data.
``None`` if the data cannot be parsed as JSON.
"""
return self._parsed_json
def _unpack(self) -> Optional[Dict[str, Any]]:
"""
Unpack the response from the `"data"` array, returning the first match found.
If no matches are founds, or the data is the wrong shape, return None.
"""
if isinstance(self._parsed_json, dict) and isinstance(
self._parsed_json.get("data"), list
):
for item in self._parsed_json["data"]:
if isinstance(item, dict) and self._match_func(item):
return item
return None
@property
def data(self) -> Any:
# only do the unpacking operation once, as it may be expensive on large payloads
if not self._did_unpack:
self._unpacked_data = self._unpack()
self._did_unpack = True
if self._unpacked_data is not None:
return self._unpacked_data
return self._parsed_json
| 2.703125 | 3 |
kerasncp/utils.py | lihui-colin/keras-ncp | 0 | 12766686 | # Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging.version import parse
try:
import tensorflow as tf
except:
raise ImportWarning(
"It seems like the Tensorflow package is not installed\n"
"Please run"
"`$ pip install tensorflow`. \n",
)
def check_tf_version():
if parse(tf.__version__) < parse("2.0.0"):
raise ImportError(
"The Tensorflow package version needs to be at least 2.0.0 \n"
"for keras-ncp to run. Currently, your TensorFlow version is \n"
"{version}. Please upgrade with \n"
"`$ pip install --upgrade tensorflow`. \n"
"You can use `pip freeze` to check afterwards that everything is "
"ok.".format(version=tf.__version__)
)
| 2.875 | 3 |
src/datasets/base_dataset.py | tchaton/sagemaker-pytorch-boilerplate | 7 | 12766687 | <reponame>tchaton/sagemaker-pytorch-boilerplate<filename>src/datasets/base_dataset.py
import os
import numpy as np
import pandas as pd
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, random_split
from src.paths import Paths
class BaseSagemakerDataset(LightningDataModule):
name = "BaseSagemaker"
def __init__(
self, val_split: float = 0.3, P: Paths = None, *args, **kwargs,
):
super().__init__(*args, **kwargs)
self._P = P
self._val_split = val_split
assert self._P is not None
@property
def num_features(self):
return NotImplementedError
@property
def num_classes(self):
return NotImplementedError
def _labelize(self, raw_data):
raise NotImplementedError
def prepare_splitted_data(self, path_train, path_val):
train_raw_data = pd.read_csv(path_train, header=None).values
val_raw_data = pd.read_csv(path_val, header=None).values
self._labelize(train_raw_data)
self._labelize(val_raw_data)
self.dataset_train = torch.from_numpy(train_raw_data.astype(np.float)).float()
self.dataset_val = torch.from_numpy(val_raw_data.astype(np.float)).float()
def _prepare_no_splitted_data(self):
input_files = [
os.path.join(self._P.TRAINING_PATH, filename)
for filename in os.listdir(self._P.TRAINING_PATH)
]
if len(input_files) == 0:
raise ValueError(
(
"There are no files in {}.\n"
+ "This usually indicates that the channel ({}) was incorrectly specified,\n"
+ "the data specification in S3 was incorrectly specified or the role specified\n"
+ "does not have permission to access the data."
).format(training_path, channel_name)
)
raw_data = [pd.read_csv(file, header=None) for file in input_files]
raw_data = pd.concat(raw_data).values
self._labelize(raw_data)
raw_data = torch.from_numpy(raw_data.astype(np.float)).float()
raw_data_length = len(raw_data)
train_size = int(raw_data_length * (1 - self._val_split))
val_size = raw_data_length - train_size
self.dataset_train, self.dataset_val = random_split(
raw_data,
[train_size, val_size],
generator=torch.Generator().manual_seed(self._seed),
)
def prepare_data(self):
path_train = os.path.join(self._P.TRAINING_PATH, "train.csv")
path_val = os.path.join(self._P.TRAINING_PATH, "val.csv")
if os.path.exists(path_train) and os.path.exists(path_val):
self._prepare_splitted_data(path_train, path_val)
else:
self._prepare_no_splitted_data()
def train_dataloader(self, batch_size=32, transforms=None):
loader = DataLoader(
self.dataset_train,
batch_size=batch_size,
shuffle=True,
num_workers=self.num_workers,
drop_last=True,
pin_memory=True,
)
return loader
def val_dataloader(self, batch_size=32, transforms=None):
loader = DataLoader(
self.dataset_val,
batch_size=batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=True,
pin_memory=True,
)
return loader
| 2.625 | 3 |
src/simple1D.py | Mm24/BiomarkersVoiceClasifierApp | 0 | 12766688 | <gh_stars>0
from keras.models import Sequential
from keras.layers import Dense, Flatten, Convolution1D, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.initializers import random_uniform
#hyperparameters
input_dimension = 88200
learning_rate = 0.0025
momentum = 0.85
SEED = 24
linear_init = random_uniform(seed=SEED)
dropout_rate = 0.2
def create_model(init_shape, hidden_units=256, n_class =3, dropout_rate=0.3, loss= 'binary_crossentropy', optimizer='adam', metrics=['accuracy']):
# create model
model = Sequential()
ksize =2* int(init_shape[0]//8)+1
model.add(Convolution1D(filters=32, kernel_size=ksize , input_shape=init_shape, activation='relu'))
model.add(Convolution1D(filters=16, kernel_size=9, activation='relu'))
model.add(Flatten())
model.add(Dropout(dropout_rate))
#model.add(Dense(input_dimension//2, input_dim=input_dimension, kernel_initializer=linear_init, activation='relu'))
#model.add(Dropout(dropout_rate))
model.add(Dense(hidden_units, input_dim=init_shape[0], kernel_initializer=linear_init, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(hidden_units//2, kernel_initializer=linear_init, activation='relu'))
model.add(Dense(n_class, kernel_initializer=linear_init, activation='softmax'))
model.compile(loss=str(loss), optimizer=str(optimizer), metrics=metrics)
#sgd = SGD(lr=learning_rate, momentum=momentum)
#model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['acc'])
return model
if __name__ =='__main__':
m = create_model(init_shape = (88200,1), n_class =3)
m.summary()
#model.fit(X_train, y_train, epochs=5, batch_size=128)
#predictions = model.predict_proba(X_test)
| 2.484375 | 2 |
experiments/preprocessing.py | CCWI/corpus-gtcs6k | 0 | 12766689 | import string
from nltk.tokenize.casual import TweetTokenizer
def tokenize(text):
tweet_tokenizer = TweetTokenizer()
# 1. Tokenize
text = tweet_tokenizer.tokenize(text)
# 2. Cleaning
# Punctuation
text = [t for t in text if t not in string.punctuation]
# Normalisieren
text = [t.lower() for t in text]
return text
| 3.65625 | 4 |
clippy_runner.py | M0n0xy2/ELang | 1 | 12766690 | import subprocess
proc = subprocess.Popen(["cargo", "clippy"], stderr=subprocess.PIPE)
output = proc.stderr.read().decode("utf-8")
groups = [group for group in output.split("\n\n") if "parser.rs" not in group]
print("\n-----------------\n".join(groups[:20]))
| 2.625 | 3 |
src/playlist/forms.py | KushalVijay/Youtube-Video-Downloader | 4 | 12766691 | from django import forms
from .models import videolink
class getlink(forms.ModelForm):
class Meta:
model = videolink
fields = '__all__'
def clean(self):
data = self.cleaned_data
return data
class downlink(forms.ModelForm):
class Meta:
model = videolink
fields = '__all__'
def clean(self):
data = self.cleaned_data
return data
| 2.109375 | 2 |
src/tariochbctools/importers/truelayer/importer.py | markferry/beancounttools | 0 | 12766692 | import logging
from datetime import timedelta
from os import path
import dateutil.parser
import requests
import yaml
from beancount.core import amount, data
from beancount.core.number import D
from beancount.ingest import importer
class Importer(importer.ImporterProtocol):
"""An importer for Truelayer API (e.g. for Revolut)."""
def __init__(self):
self.config = None
self.baseAccount = None
self.clientId = None
self.clientSecret = None
self.refreshToken = None
self.sandbox = None
self.existing_entries = None
self.domain = "truelayer.com"
def _configure(self, file, existing_entries):
with open(file.name, "r") as f:
self.config = yaml.safe_load(f)
self.baseAccount = self.config["baseAccount"]
self.clientId = self.config["client_id"]
self.clientSecret = self.config["client_secret"]
self.refreshToken = self.config["refresh_token"]
self.sandbox = self.clientId.startswith("sandbox")
self.existing_entries = existing_entries
if self.sandbox:
self.domain = "truelayer-sandbox.com"
def identify(self, file):
return "truelayer.yaml" == path.basename(file.name)
def file_account(self, file):
return ""
def extract(self, file, existing_entries=None):
self._configure(file, existing_entries)
r = requests.post(
f"https://auth.{self.domain}/connect/token",
data={
"grant_type": "refresh_token",
"client_id": self.clientId,
"client_secret": self.clientSecret,
"refresh_token": self.refreshToken,
},
)
tokens = r.json()
accessToken = tokens["access_token"]
headers = {"Authorization": "Bearer " + accessToken}
entries = []
entries.extend(self._extract_endpoint_transactions("accounts", headers))
entries.extend(
self._extract_endpoint_transactions("cards", headers, invert_sign=True)
)
return entries
def _extract_endpoint_transactions(self, endpoint, headers, invert_sign=False):
entries = []
r = requests.get(
f"https://api.{self.domain}/data/v1/{endpoint}", headers=headers
)
if not r:
try:
r.raise_for_status()
except requests.HTTPError as e:
logging.warning(e)
return []
for account in r.json()["results"]:
accountId = account["account_id"]
accountCcy = account["currency"]
r = requests.get(
f"https://api.{self.domain}/data/v1/{endpoint}/{accountId}/transactions",
headers=headers,
)
transactions = sorted(r.json()["results"], key=lambda trx: trx["timestamp"])
for trx in transactions:
entries.extend(
self._extract_transaction(
trx, accountCcy, transactions, invert_sign
)
)
return entries
def _extract_transaction(self, trx, accountCcy, transactions, invert_sign):
entries = []
metakv = {}
# sandbox Mock bank doesn't have a provider_id
if "meta" in trx and "provider_id" in trx["meta"]:
metakv["tlref"] = trx["meta"]["provider_id"]
if trx["transaction_classification"]:
metakv["category"] = trx["transaction_classification"][0]
meta = data.new_metadata("", 0, metakv)
trxDate = dateutil.parser.parse(trx["timestamp"]).date()
account = self.baseAccount + accountCcy
tx_amount = D(str(trx["amount"]))
# avoid pylint invalid-unary-operand-type
signed_amount = -1 * tx_amount if invert_sign else tx_amount
entry = data.Transaction(
meta,
trxDate,
"*",
"",
trx["description"],
data.EMPTY_SET,
data.EMPTY_SET,
[
data.Posting(
account,
amount.Amount(signed_amount, trx["currency"]),
None,
None,
None,
None,
),
],
)
entries.append(entry)
if trx["transaction_id"] == transactions[-1]["transaction_id"]:
balDate = trxDate + timedelta(days=1)
metakv = {}
if self.existing_entries is not None:
for exEntry in self.existing_entries:
if (
isinstance(exEntry, data.Balance)
and exEntry.date == balDate
and exEntry.account == account
):
metakv["__duplicate__"] = True
meta = data.new_metadata("", 0, metakv)
# Only if the 'balance' permission is present
if "running_balance" in trx:
tx_balance = D(str(trx["running_balance"]["amount"]))
# avoid pylint invalid-unary-operand-type
signed_balance = -1 * tx_balance if invert_sign else tx_balance
entries.append(
data.Balance(
meta,
balDate,
account,
amount.Amount(
signed_balance, trx["running_balance"]["currency"]
),
None,
None,
)
)
return entries
| 2.265625 | 2 |
tests/test_callbacks.py | Ow-woo/stable-baselines | 3 | 12766693 | <gh_stars>1-10
import os
import shutil
import pytest
from stable_baselines import A2C, ACKTR, ACER, DQN, DDPG, PPO1, PPO2, SAC, TD3, TRPO
from stable_baselines.common.callbacks import (CallbackList, CheckpointCallback, EvalCallback,
EveryNTimesteps, StopTrainingOnRewardThreshold, BaseCallback)
LOG_FOLDER = './logs/callbacks/'
class CustomCallback(BaseCallback):
"""
Callback to check that every method was called once at least
"""
def __init__(self):
super(CustomCallback, self).__init__()
self.calls = {
'training_start': False,
'rollout_start': False,
'step': False,
'rollout_end': False,
'training_end': False,
}
def _on_training_start(self):
self.calls['training_start'] = True
def _on_rollout_start(self):
self.calls['rollout_start'] = True
def _on_step(self):
self.calls['step'] = True
return True
def _on_rollout_end(self):
self.calls['rollout_end'] = True
def _on_training_end(self):
self.calls['training_end'] = True
def validate(self, allowed_failures):
for allowed_failure in allowed_failures:
self.calls[allowed_failure] = True
assert all(self.calls.values())
@pytest.mark.parametrize("model_class", [A2C, ACER, ACKTR, DQN, DDPG, PPO1, PPO2, SAC, TD3, TRPO])
def test_callbacks(model_class):
env_id = 'Pendulum-v0'
if model_class in [ACER, DQN]:
env_id = 'CartPole-v1'
allowed_failures = []
# Number of training timesteps is too short
# otherwise, the training would take too long, or would require
# custom parameter per algorithm
if model_class in [PPO1, DQN, TRPO]:
allowed_failures = ['rollout_end']
# Create RL model
model = model_class('MlpPolicy', env_id)
checkpoint_callback = CheckpointCallback(save_freq=500, save_path=LOG_FOLDER)
# For testing: use the same training env
eval_env = model.get_env()
# Stop training if the performance is good enough
callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-1200, verbose=1)
eval_callback = EvalCallback(eval_env, callback_on_new_best=callback_on_best,
best_model_save_path=LOG_FOLDER,
log_path=LOG_FOLDER, eval_freq=100)
# Equivalent to the `checkpoint_callback`
# but here in an event-driven manner
checkpoint_on_event = CheckpointCallback(save_freq=1, save_path=LOG_FOLDER,
name_prefix='event')
event_callback = EveryNTimesteps(n_steps=500, callback=checkpoint_on_event)
callback = CallbackList([checkpoint_callback, eval_callback, event_callback])
model.learn(500, callback=callback)
model.learn(200, callback=None)
custom_callback = CustomCallback()
model.learn(200, callback=custom_callback)
# Check that every called were executed
custom_callback.validate(allowed_failures=allowed_failures)
# Transform callback into a callback list automatically
custom_callback = CustomCallback()
model.learn(500, callback=[checkpoint_callback, eval_callback, custom_callback])
# Check that every called were executed
custom_callback.validate(allowed_failures=allowed_failures)
# Automatic wrapping, old way of doing callbacks
model.learn(200, callback=lambda _locals, _globals : True)
# Cleanup
if os.path.exists(LOG_FOLDER):
shutil.rmtree(LOG_FOLDER)
| 2.1875 | 2 |
src/opserver/plugins/alarm_disk_usage/main.py | biswajit-mandal/contrail-controller | 3 | 12766694 | <reponame>biswajit-mandal/contrail-controller<filename>src/opserver/plugins/alarm_disk_usage/main.py<gh_stars>1-10
from opserver.plugins.alarm_base import *
from opserver.sandesh.alarmgen_ctrl.sandesh_alarm_base.ttypes import *
import json
class DiskUsage(AlarmBase):
"""Disk Usage crosses a threshold.
NodeMgr reports disk usage in DatabaseUsageInfo.database_usage"""
def __init__(self):
AlarmBase.__init__(self, AlarmBase.SYS_ERR)
self._threshold = 0.90
def __call__(self, uve_key, uve_data):
or_list = []
db_usage_info = uve_data.get("DatabaseUsageInfo", None)
if db_usage_info is None:
return None
db_usage_list = db_usage_info.get("database_usage", None)
if db_usage_list is None:
return None
for db_usage in db_usage_list:
used_space = db_usage["disk_space_used_1k"]
available_space = db_usage["disk_space_available_1k"]
use_space_threshold = available_space * self._threshold
if used_space > use_space_threshold:
or_list.append(AllOf(all_of=[AlarmElement(\
rule=AlarmTemplate(oper=">",
operand1=Operand1(\
keys=["DatabaseUsageInfo","database_usage","disk_space_used_1k"]),
operand2=Operand2(json_value=str(use_space_threshold))),
json_operand1_value=str(used_space),
json_vars={\
"DatabaseUsageInfo.database_usage.disk_space_used_1k":\
str(used_space),
"DatabaseUsageInfo.database_usage.disk_space_available_1k":\
str(available_space)})]))
if len(or_list):
return or_list
else:
return None
| 2.421875 | 2 |
src/racelogger/processing/singlecarproc.py | mpapenbr/python-racelogger | 0 | 12766695 | import logging
import sys
from enum import Enum
from racelogger.util.utils import gate
CAR_SLOW_SPEED = 25
""" a car is considered to be slow if its speed is below this value"""
CarsManifest = ['state','carIdx','carNum','userName','teamName','car','carClass','pos','pic','lap','lc','gap','interval','trackPos','speed','dist','pitstops', 'stintLap','last','best']
"""
this is the base manifest for car data. Sector times may be added at the end.
On the other hand, items like "teamName, carClass" (and maybe others)
may be removed if they are not used in the recording session.
"""
class SectionTiming:
"""
this class is used to measure a sector time or a complete lap time.
The key attr identifies a sector or lap number
"""
def __init__(self) -> None:
self.start_time = -1
self.stop_time = -1
self.duration = -1
self.best = sys.maxsize
def mark_start(self,sessionTime):
self.start_time = sessionTime
def mark_stop(self,sessionTime):
self.stop_time = sessionTime
self.duration = self.stop_time - self.start_time
return self.duration
# self.best = min(self.best,self.duration)
class CarLaptiming:
def __init__(self, num_sectors=0) -> None:
self.lap = SectionTiming()
self.sectors = [SectionTiming() for x in range(num_sectors)]
def reset(self):
pass
class CarState(Enum):
INIT = 0
RUN = 1
PIT = 2
FINISHED = 3
OUT = 4
SLOW = 5
class PitBoundaryData:
"""
@param keep_hist use at most this many entries for computation
@param min_hist build up at least this many entries before deciding about which entries to keep.
"""
def __init__(self, keep_hist=21, min_hist=3) -> None:
self.min = 0
self.max = 0
self.middle = 0
self.hist = []
self.keep_hist = keep_hist
self.min_hist = min_hist
def process(self, trackPos):
"""
process the given trackPos. while
"""
if len(self.hist) < self.keep_hist:
self.hist.append(trackPos)
self.compute_values()
return
self.hist.append(trackPos)
if len(self.hist) % 2 == 1:
self.hist.sort()
self.hist = self.hist[1:-1]
def compute_values(self):
self.min = self.hist[0]
self.max = self.hist[-1]
self.middle = self.hist[len(self.hist)>>1]
def __repr__(self) -> str:
tmp = ", ".join([f"{e}" for e in self.hist] )
return f'PitBoundaryData min: {self.min} max: {self.max} avg: {self.middle} hist: {tmp}'
class PitBoundaries():
def __init__(self) -> None:
self.pit_entry_boundary = PitBoundaryData()
self.pit_exit_boundary = PitBoundaryData()
def process_entry(self, trackPos):
self.pit_entry_boundary.process(trackPos)
def process_exit(self, trackPos):
self.pit_exit_boundary.process(trackPos)
def __repr__(self) -> str:
return f'PitEntry: {self.pit_entry_boundary}\nPitExit: {self.pit_exit_boundary}\n'
class CarData:
"""
this class holds data about a car during a race.
No data history is stored here.
"""
def __init__(self,carIdx=None, manifest=CarsManifest,num_sectors=0, driver_proc=None, pit_boundaries=None) -> None:
self.logger = logging.getLogger(self.__class__.__name__)
for item in manifest:
self.__setattr__(item, "")
self.current_best = sys.maxsize
self.carIdx = carIdx
self.manifest = manifest
self.slow_marker = False
self.current_sector = -1
self.stintLap = 0
self.pitstops = 0
self.driver_proc = driver_proc
self.lap_timings = CarLaptiming(num_sectors=num_sectors)
self.pit_boundaries = pit_boundaries
self.marker_info = (-1,"") # lapNo/marker
self.processState = CarState.INIT
self.stateSwitch = {
CarState.INIT: self.state_init,
CarState.RUN: self.state_racing,
CarState.SLOW: self.state_racing_slow,
CarState.PIT: self.state_pitting,
CarState.FINISHED: self.state_finished,
CarState.OUT: self.state_out_of_race,
}
self.postProcessStateSwitch = {
CarState.INIT: self.state_post_process_noop,
CarState.RUN: self.state_post_process_run,
CarState.SLOW: self.state_post_process_slow,
CarState.PIT: self.state_post_process_noop,
CarState.FINISHED: self.state_post_process_noop,
CarState.OUT: self.state_post_process_noop,
}
def state_init(self, ir):
self.copy_standards(ir)
self.trackPos = gate(ir['CarIdxLapDistPct'][self.carIdx])
self.pos = ir['CarIdxPosition'][self.carIdx]
self.pic = ir['CarIdxClassPosition'][self.carIdx]
self.lap = ir['CarIdxLap'][self.carIdx]
self.lc = ir['CarIdxLapCompleted'][self.carIdx]
if ir['CarIdxLapDistPct'][self.carIdx] == -1:
self.state= "OUT"
self.processState = CarState.OUT
return
if ir['CarIdxOnPitRoad'][self.carIdx]:
self.copy_when_racing(ir)
self.state = "PIT"
self.processState = CarState.PIT
self.stintLap = 0
else:
self.copy_when_racing(ir)
self.state = "RUN"
self.processState = CarState.RUN
def state_racing(self, ir):
self.copy_standards(ir)
if ir['CarIdxLapDistPct'][self.carIdx] == -1:
self.state= "OUT"
self.processState = CarState.OUT
return
if ir['CarIdxOnPitRoad'][self.carIdx] == False and ir['CarIdxLapCompleted'][self.carIdx]>self.lc:
self.stintLap += 1
self.copy_when_racing(ir)
if ir['CarIdxOnPitRoad'][self.carIdx]:
self.state = "PIT"
self.pitstops += 1
self.processState = CarState.PIT
self.pit_boundaries.process_entry(ir['CarIdxLapDistPct'][self.carIdx])
def state_racing_slow(self, ir):
self.copy_standards(ir)
if ir['CarIdxLapDistPct'][self.carIdx] == -1:
self.state= "OUT"
self.processState = CarState.OUT
return
self.copy_when_racing(ir)
if ir['CarIdxOnPitRoad'][self.carIdx]:
self.state = "PIT"
self.pitstops += 1
self.processState = CarState.PIT
self.pit_boundaries.process_entry(ir['CarIdxLapDistPct'][self.carIdx])
def state_pitting(self, ir):
self.copy_standards(ir)
if ir['CarIdxLapDistPct'][self.carIdx] == -1:
self.state= "OUT"
self.processState = CarState.OUT
return
self.copy_when_racing(ir)
if ir['CarIdxOnPitRoad'][self.carIdx] == 0:
self.state = "RUN"
self.stintLap = 1
self.processState = CarState.RUN
self.pit_boundaries.process_exit(ir['CarIdxLapDistPct'][self.carIdx])
def state_finished(self, ir):
# self.logger.debug(f"carIdx {self.carIdx} finished the race.")
self.copy_standards(ir)
def state_out_of_race(self, ir):
self.copy_standards(ir)
# this may happen after resets or tow to pit road. if not on the pit road it may just be a short connection issue.
if ir['CarIdxOnPitRoad'][self.carIdx]:
self.state = "PIT"
self.processState = CarState.PIT
else:
if ir['CarIdxLapDistPct'][self.carIdx] > -1:
self.state = "RUN"
self.processState = CarState.RUN
def process(self, ir):
# handle processing depending on current state
self.stateSwitch[self.processState](ir)
#
# handle post processing after times, speed, delta are computed
#
def state_post_process_noop(self, msg_proc=None):
pass # do nothing by design
def state_post_process_run(self, msg_proc):
if self.speed > 0 and self.speed < CAR_SLOW_SPEED :
self.state = 'SLOW'
self.processState = CarState.SLOW
msg_proc.add_car_slow(self.carIdx,self.speed)
def state_post_process_slow(self, msg_proc):
if self.speed > CAR_SLOW_SPEED:
if self.processState == CarState.SLOW:
self.processState = CarState.RUN
self.state = 'RUN'
else:
self.logger.warn(f"should not happen. carNum {self.driver_proc.car_number(self.carIdx)} procState: {self.processState} state: {self.state}")
def post_process(self, msg_proc):
# handles post processing of special cases.
self.postProcessStateSwitch[self.processState](msg_proc)
def copy_standards(self,ir):
self.carNum = self.driver_proc.car_number(self.carIdx)
self.userName = self.driver_proc.user_name(self.carIdx)
self.teamName = self.driver_proc.team_name(self.carIdx)
self.carClass = self.driver_proc.car_class(self.carIdx)
self.car = self.driver_proc.car(self.carIdx)
def copy_when_racing(self, ir):
self.trackPos = gate(ir['CarIdxLapDistPct'][self.carIdx])
self.pos = ir['CarIdxPosition'][self.carIdx]
self.pic = ir['CarIdxClassPosition'][self.carIdx]
self.lap = ir['CarIdxLap'][self.carIdx]
self.lc = ir['CarIdxLapCompleted'][self.carIdx]
self.dist = 0
self.interval = 0
def manifest_output(self):
return [self.__getattribute__(x) for x in self.manifest]
| 3.03125 | 3 |
comments/encoder.py | onerbs/treux | 0 | 12766696 | from base64 import b64encode as _e, b64decode as _d
def encode(text: str, timestamp: float) -> str:
"""Encode comment into base64 string.
:param text: The text of the comment.
:param timestamp: The timestamp of the comment.
:return: The base64-encoded comment.
"""
return '%s:%s' % (_encode(text), _encode(timestamp))
def decode(comment: str) -> tuple:
"""Decode base64 string into comment.
:param comment: The base64-encoded comment.
:return: The decoded comment (text, timestamp).
"""
text, timestamp = comment.split(':', 1)
return _decode(text), _decode(timestamp)
def encode_many(comments: list) -> str:
"""Encode many comments into base64 string.
:param comments: The source list of comments.
:return: The base64-encoded list of comments.
"""
return ';'.join(encode(text, timestamp) for text, timestamp in comments)
def decode_many(comments: str) -> list:
"""Decode base64 string into many comments.
:param comments: The base64-encoded list of comments.
:return: The decoded list o comments [(text, timestamp)].
"""
return [decode(comment) for comment in comments.split(';')]
def _encode(src: str or float) -> str:
return _e(str(src).encode()).decode()
def _decode(src: str) -> str or float:
decoded = _d(src).decode()
try:
return float(decoded)
except ValueError:
return decoded
| 3.25 | 3 |
plato/datasources/femnist.py | cuiboyuan/plato | 135 | 12766697 | <reponame>cuiboyuan/plato
"""
The Federated EMNIST dataset.
The Federated EMNIST dataset originates from the EMNIST dataset, which contains
817851 images, each of which is a 28x28 greyscale image in 1 out of 62 classes.
The difference between the Federated EMNIST dataset and its original counterpart
is that this dataset is already partitioned by the client ID, using the data
provider IDs included in the original EMNIST dataset. As a result of this
partitioning, there are 3597 clients in total, each of which has 227.37 images
on average (std is 88.84). For each client, 90% data samples are used for
training, while the remaining samples are used for testing.
Reference:
<NAME>, <NAME>, <NAME>, and <NAME>, "EMNIST: Extending MNIST to
handwritten letters," in the 2017 International Joint Conference on Neural
Networks (IJCNN).
"""
import json
import logging
import os
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
from plato.config import Config
from plato.datasources import base
class CustomDictDataset(Dataset):
""" Custom dataset from a dictionary with support of transforms. """
def __init__(self, loaded_data, transform=None):
""" Initializing the custom dataset. """
super().__init__()
self.loaded_data = loaded_data
self.transform = transform
def __getitem__(self, index):
sample = self.loaded_data['x'][index]
target = self.loaded_data['y'][index]
if self.transform:
sample = self.transform(sample)
return sample, target
def __len__(self):
return len(self.loaded_data['y'])
class ReshapeListTransform:
""" The transform that reshapes an image. """
def __init__(self, new_shape):
self.new_shape = new_shape
def __call__(self, img):
return np.array(img, dtype=np.float32).reshape(self.new_shape)
class DataSource(base.DataSource):
"""The FEMNIST dataset."""
def __init__(self, client_id=0):
super().__init__()
self.trainset = None
self.testset = None
root_path = os.path.join(Config().data.data_path, 'FEMNIST',
'packaged_data')
if client_id == 0:
# If we are on the federated learning server
data_dir = os.path.join(root_path, 'test')
data_url = "https://jiangzhifeng.s3.us-east-2.amazonaws.com/FEMNIST/test/" \
+ str(client_id) + ".zip"
else:
data_dir = os.path.join(root_path, 'train')
data_url = "https://jiangzhifeng.s3.us-east-2.amazonaws.com/FEMNIST/train/" \
+ str(client_id) + ".zip"
if not os.path.exists(os.path.join(data_dir, str(client_id))):
logging.info(
"Downloading the Federated EMNIST dataset "
"with the client datasets pre-partitioned. This may take a while.",
)
self.download(url=data_url, data_path=data_dir)
loaded_data = DataSource.read_data(
file_path=os.path.join(data_dir, str(client_id), 'data.json'))
_transform = transforms.Compose([
ReshapeListTransform((28, 28, 1)),
transforms.ToPILImage(),
transforms.RandomCrop(28,
padding=2,
padding_mode="constant",
fill=1.0),
transforms.RandomResizedCrop(28,
scale=(0.8, 1.2),
ratio=(4. / 5., 5. / 4.)),
transforms.RandomRotation(5, fill=1.0),
transforms.ToTensor(),
transforms.Normalize(0.9637, 0.1597),
])
dataset = CustomDictDataset(loaded_data=loaded_data,
transform=_transform)
if client_id == 0: # testing dataset on the server
self.testset = dataset
else: # training dataset on one of the clients
self.trainset = dataset
@staticmethod
def read_data(file_path):
""" Reading the dataset specific to a client_id. """
with open(file_path, 'r') as fin:
loaded_data = json.load(fin)
return loaded_data
def num_train_examples(self):
return len(self.trainset)
def num_test_examples(self):
return len(self.testset)
| 2.78125 | 3 |
setup.py | JonathanPetit/Parser-Renamer-torrentfile | 7 | 12766698 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import pypandoc
import os
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open('requirements.txt') as requirements:
install_requires = requirements.read().splitlines()
try:
long_description = pypandoc.convert('README.md', 'rst')
long_description = long_description.replace("\r","")
except OSError:
print("Pandoc not found. Long_description conversion failure.")
import io
with io.open('README.md', encoding="utf-8") as f:
long_description = f.read()
setup(
name='MovieSerieTorrent',
version='1.0.16',
packages=find_packages(),
install_requires=install_requires,
author="<NAME>",
author_email="<EMAIL>",
description="Parser and Renamer for torrents files (Movies and series)",
long_description= long_description,
include_package_data=True,
url='https://github.com/JonathanPetit/Parser-Renamer',
license= 'MIT',
keywords = 'parser renamer formatting python torrents torrent files file movie serie movies series',
classifiers=[
"Programming Language :: Python",
"Natural Language :: French",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
],
)
| 2 | 2 |
past_files_poster.py | birkin/annex_receipts | 0 | 12766699 | # -*- coding: utf-8 -*-
import argparse, datetime, glob, json, logging, os, pprint, random, time
from functools import partial
from operator import itemgetter
from typing import Iterator, List, Optional
import asks, trio
logging.basicConfig(
# filename=settings.LOG_PATH,
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s [%(module)s-%(funcName)s()::%(lineno)d] %(message)s',
datefmt='%d/%b/%Y %H:%M:%S' )
log = logging.getLogger(__name__)
class Initializer:
""" Creates initial tracker. """
def __init__( self ):
self.SOURCE_DIR_PATH = os.environ['ANXEOD__SOURCE_DIR_PATH']
self.DESTINATION_PATH = os.environ['ANXEOD__TRACKER_A_PATH']
self.filepath_tracker = []
self.start = datetime.datetime.now()
self.files: list = glob.glob( f'{self.SOURCE_DIR_PATH}/*.dat' )
def initialize_tracker( self ):
""" Manages build.
Called by main() """
log.debug( f'len(files), `{len(self.files)}`' )
for path in self.files:
self.build_initial_tracker( path )
sorted_filepath_tracker = self.build_sorted_tracker()
time_taken = str( datetime.datetime.now() - self.start )
log.debug( f'time_taken, `{time_taken}`' )
with open( self.DESTINATION_PATH, 'w' ) as f:
jsn: str = json.dumps( sorted_filepath_tracker, sort_keys=True, indent=2 )
f.write( jsn )
return
def build_initial_tracker( self, path: str ) -> None:
""" Creates initial dict of file-info & appends it to self.filepath_tracker list.
Called by initialize_tracker() """
file_timestamp: float = os.path.getmtime( path )
timestamp: datetime.datetime = datetime.datetime.fromtimestamp( file_timestamp )
info: dict = { 'path': path, 'timestamp': timestamp, 'updated': None }
self.filepath_tracker.append( info )
return
def build_sorted_tracker( self ) -> list:
""" Sorts initial tracker & updates timestamp-type.
Called by initialize_tracker() """
sorted_filepath_tracker: list = sorted( self.filepath_tracker, key=itemgetter('timestamp') )
for entry in sorted_filepath_tracker:
entry['timestamp'] = str( entry['timestamp'] ) # needs for json dump
log.debug( f'len(sorted_filepath_tracker), `{len(sorted_filepath_tracker)}`' )
return sorted_filepath_tracker
## end class Initializer
class Counter:
""" Creates count-tracker. """
def __init__( self ):
self.INITIAL_TRACKER_PATH = os.environ['ANXEOD__TRACKER_A_PATH']
self.COUNT_TRACKER_PATH = os.environ['ANXEOD__TRACKER_B_PATH']
self.date_dct = {}
self.start = datetime.datetime.now()
# def build_count_tracker( self ) -> None:
# """
# Flow...
# load file
# create new count_tracker file
# create a list of date-dicts by going through all entries
# for each entry
# determin the proper date
# determine the _kind_ of count
# determine the count
# update the count-tracker file
# """
# file_entries: List[dict] = self.load_file_list()
# self.initialize_count_tracker()
# self.make_date_dict( file_entries )
# for entry in file_entries:
# entry_date: datetime.date = datetime.datetime.strptime( entry['timestamp'], '%Y-%m-%d %H:%M:%S' ).date()
# count_type: str = self.parse_type( entry['path'] )
# count: int = self.parse_count( entry['path'] )
# self.date_dct[str(entry_date)][count_type] = count
# self.update_count_tracker()
# return
def build_count_tracker( self ) -> None:
"""
Flow...
load file
create new count_tracker file
create a list of date-dicts by going through all entries
for each entry
determin the proper date
determine the _kind_ of count
determine the count
update the count-tracker file
"""
file_entries: List[dict] = self.load_file_list()
self.initialize_count_tracker()
self.make_date_dict( file_entries )
for entry in file_entries:
entry_date: datetime.date = datetime.datetime.strptime( entry['timestamp'], '%Y-%m-%d %H:%M:%S' ).date()
count_type: str = self.parse_type( entry['path'] )
count: int = self.parse_count( entry['path'] )
self.update_date_dct( entry_date, count_type, count ) # handles multiple files in a given day
# self.date_dct[str(entry_date)][count_type] = count
self.update_count_tracker()
return
def load_file_list( self ) -> List[dict]:
""" Loads tracker-a.
Called by build_count_tracker() """
with open( self.INITIAL_TRACKER_PATH, 'r' ) as f:
entries_jsn: str = f.read()
entries: list = json.loads( entries_jsn )
return entries
def initialize_count_tracker( self ) -> None:
""" Saves empty list file.
Called by build_count_tracker() """
count_tracker: list = []
empty_count_tracker_jsn: str = json.dumps( count_tracker )
with open( self.COUNT_TRACKER_PATH, 'w' ) as f:
f.write( empty_count_tracker_jsn )
return
def make_date_dict( self, file_entries: List[dict] ) -> None:
""" Populates self.date_dct.
Called by build_count_tracker() """
for entry in file_entries:
timestamp: str = entry['timestamp']
date_obj: datetime.date = datetime.datetime.strptime( timestamp, '%Y-%m-%d %H:%M:%S' ).date()
date_str: str = str( date_obj )
self.date_dct[date_str] = {}
log.debug( f'self.date_dct, ```{pprint.pformat(self.date_dct)[0:100]}```' )
log.debug( f'num-dates, `{len(self.date_dct.keys())}`' )
return
def parse_type( self, path: str ) -> str:
""" Parses count type.
Called by build_count_tracker() """
count_type: str = ''
if 'QHACS' in path:
count_type = 'hay_accessions'
elif 'QSACS' in path:
count_type = 'non-hay_accessions'
elif 'QHREF' in path:
count_type = 'hay_refiles'
elif 'QSREF' in path:
count_type = 'non-hay_refiles'
else:
raise Exception( 'unhandled count-type' )
return count_type
def parse_count( self, path: str ) -> int:
""" Loads file and parses count.
Called by build_count_tracker() """
with open( path, 'r' ) as f:
data = f.readlines()
count = len( data )
return count
def update_date_dct( self, entry_date, count_type, count ) -> None:
if count_type in self.date_dct[str(entry_date)].keys():
log.info( f'existing count of `{count}` already found for date, ```{entry_date}```; count_type, `{count_type}`' )
self.date_dct[str(entry_date)][count_type] += count
else:
self.date_dct[str(entry_date)][count_type] = count
return
def update_count_tracker( self ) -> None:
""" Writes file.
Called by build_count_tracker() """
jsn: str = json.dumps( self.date_dct, sort_keys=True, indent=2 )
with open( self.COUNT_TRACKER_PATH, 'w' ) as f:
f.write( jsn )
log.debug( f'time-taken, `{str( datetime.datetime.now() - self.start )}`' )
return
## end class Counter
class Updater:
""" Updates db. """
def __init__( self ):
self.COUNT_TRACKER_PATH = os.environ['ANXEOD__TRACKER_B_PATH']
self.UPDATED_COUNT_TRACKER_PATH = os.environ['ANXEOD__TRACKER_C_PATH']
self.API_UPDATER_URL = os.environ['ANXEOD__ANNEX_COUNTS_API_UPDATER_URL']
self.API_AUTHKEY = os.environ['ANXEOD__ANNEX_COUNTS_API_AUTHKEY']
self.updated_count_tracker_dct = {}
self.nursery = None
self.throttle: float = 1.0
self.mutex = None
self.continue_worker_flag = True
self.start = datetime.datetime.now()
self.sanity_check_limit: int = 3
def update_db( self ) -> None:
""" Calls concurrency-manager function.
Called by main()
Credit: <https://stackoverflow.com/questions/51250706/combining-semaphore-and-time-limiting-in-python-trio-with-asks-http-request>
"""
self.setup_final_tracker()
trio.run( partial(self.manage_concurrent_updates, n_workers=3) )
log.debug( f'total time taken, `{str( datetime.datetime.now() - self.start )}` seconds' )
return
def setup_final_tracker( self ) -> None:
""" Initializes final tracker if it doesn't exist.
Called by update_db() """
try:
with open( self.UPDATED_COUNT_TRACKER_PATH, 'r' ) as f:
self.updated_count_tracker_dct = json.loads( f.read() )
log.debug( 'existing updated_count_tracker found and loaded' )
except Exception as e:
log.debug( f'updated_count_tracker _not_ found, exception was ```{e}```, so creating it' )
self.create_final_tracker()
return
def create_final_tracker( self ) -> None:
""" Writes final-tracker-file.
Called by setup_final_tracker() """
with open( self.COUNT_TRACKER_PATH, 'r' ) as f:
count_tracker_dct = json.loads( f.read() )
for date_key, count_info in count_tracker_dct.items():
count_info['updated'] = None
actual_count_info_keys = list( count_info.keys() )
for required_key in ['hay_accessions', 'hay_refiles', 'non-hay_accessions', 'non-hay_refiles']:
if required_key not in actual_count_info_keys:
count_info[required_key] = 0
self.updated_count_tracker_dct = count_tracker_dct
with open( self.UPDATED_COUNT_TRACKER_PATH, 'w' ) as f:
f.write( json.dumps(self.updated_count_tracker_dct, sort_keys=True, indent=2) )
return
async def manage_concurrent_updates(self, n_workers: int ):
""" Manages asynchronous processing of db updates.
Called by update_db() """
async with trio.open_nursery() as nursery:
self.nursery = nursery
for _ in range(n_workers):
self.nursery.start_soon( self.run_worker_job )
async def run_worker_job( self ) -> None:
""" Manages worker job.
Called by manage_concurrent_updates() """
log.debug( 'function starting' )
temp_counter = 0
while self.continue_worker_flag is True:
temp_counter += 1
await self.get_mutex().acquire()
log.debug( 'mutex acquired to start job' )
self.nursery.start_soon( self.tick )
entry: Optional[dict] = self.grab_next_entry()
if entry is None:
log.info( 'no more entries -- cancel' )
self.continue_worker_flag = False
elif temp_counter >= self.sanity_check_limit:
log.info( f'temp_counter, `{temp_counter}`, so will stop' )
self.continue_worker_flag = False
else:
# await asks.get( 'https://httpbin.org/delay/4' )
await self.post_update( entry )
log.debug( 'url processed' )
self.save_updated_tracker()
return
def get_mutex( self ):
if self.mutex == None:
self.mutex = trio.Semaphore(1)
else:
pass
log.debug( 'returning mutex' )
return self.mutex
async def tick( self ) -> None:
await trio.sleep( self.throttle )
self.mutex.release()
def grab_next_entry( self ) -> Optional[dict]:
""" Finds and returns next entry to process.
Called by run_worker_job() """
key_entry: Optional[dict] = None
for key, count_info in self.updated_count_tracker_dct.items():
# log.debug( f'current key, `{key}`; current count_info, ```{count_info}```' )
if count_info['updated'] is None:
log.debug( 'found next entry to process' )
key_entry = { key: count_info }
count_info['updated'] = 'in_process'
break
log.debug( f'returning key_entry, ```{key_entry}```' )
# log.debug( f'self.updated_count_tracker_dct, ```{pprint.pformat(self.updated_count_tracker_dct)[0:1000]}```' )
return key_entry
async def post_update( self, entry: dict ):
""" Runs the post.
Called by run_worker_job() """
params: dict = self.prep_params( entry )
params['auth_key'] = self.API_AUTHKEY
temp_process_id = random.randint( 1111, 9999 )
log.debug( f'`{temp_process_id}` -- about to hit url' )
resp = await asks.post( self.API_UPDATER_URL, data=params, timeout=10 )
# resp = await asks.get( 'https://httpbin.org/delay/4' )
log.debug( f'`{temp_process_id}` -- url response received, ```{resp.content}```' )
date_key, other = list(entry.items())[0]
if resp.status_code == 200:
self.updated_count_tracker_dct[date_key]['updated'] = str( datetime.datetime.now() )
else:
self.updated_count_tracker_dct[date_key]['updated'] = 'PROBLEM'
log.debug( f'status_code, `{resp.status_code}`; type(status_code), `{type(resp.status_code)}`; content, ```{resp.content}```' )
return
def prep_params( self, entry: dict ):
""" Preps post params.
Called by post_update() """
( date_key, info ) = list( entry.items() )[0] # date_key: str, info: dict
log.debug( f'info, ```{info}```' )
param_dct = {
'date': date_key,
'hay_accessions': info['hay_accessions'],
'hay_refiles': info['hay_refiles'],
'non_hay_accessions': info['non-hay_accessions'],
'non_hay_refiles': info['non-hay_refiles'],
}
log.debug( f'param_dct, ```{param_dct}```' )
return param_dct
def save_updated_tracker( self ) -> None:
""" Writes dct attribute.
Called by run_worker_job() """
with open( self.UPDATED_COUNT_TRACKER_PATH, 'w' ) as f:
f.write( json.dumps(self.updated_count_tracker_dct, sort_keys=True, indent=2) )
log.debug( 'updated tracker saved' )
return
## end class Updater
# --------------------
# caller
# --------------------
def parse_args():
""" Parses arguments when module called via __main__ """
parser = argparse.ArgumentParser( description='Required: function-name.' )
parser.add_argument( '--function', '-f', help='function name required', required=True )
args_dict = vars( parser.parse_args() )
return args_dict
def call_function( function_name: str ) -> None:
""" Safely calls function named via input string to __main__
Credit: <https://stackoverflow.com/a/51456172> """
log.debug( f'function_name, ```{function_name}```' )
initializer = Initializer()
counter = Counter()
updater = Updater()
safe_dispatcher = {
'initialize_tracker': initializer.initialize_tracker,
'build_counts': counter.build_count_tracker,
'update_db': updater.update_db
}
try:
safe_dispatcher[function_name]()
except:
raise Exception( 'invalid function' )
return
if __name__ == '__main__':
args: dict = parse_args()
log.debug( f'args, ```{args}```' )
submitted_function: str = args['function']
call_function( submitted_function )
# --------------------
# trio experimentation from: <https://stackoverflow.com/questions/51250706/combining-semaphore-and-time-limiting-in-python-trio-with-asks-http-request>
# --------------------
# (neither of the two methods below work with requests)
# # --------------------
# # works...
# # --------------------
# import pprint
# from functools import partial
# from typing import List, Iterator
# import asks
# import trio
# links: List[str] = [
# 'https://httpbin.org/delay/7',
# 'https://httpbin.org/delay/6',
# 'https://httpbin.org/delay/3'
# ] * 2
# responses = []
# async def fetch_urls(urls: Iterator, responses: list, n_workers: int, throttle: int ):
# # Using binary `trio.Semaphore` to be able
# # to release it from a separate task.
# mutex = trio.Semaphore(1)
# async def tick():
# await trio.sleep(throttle)
# mutex.release()
# async def worker():
# for url in urls:
# await mutex.acquire()
# print( f'[{round(trio.current_time(), 2)}] Start loading link: {url}' )
# nursery.start_soon(tick)
# response = await asks.get(url)
# responses.append(response)
# async with trio.open_nursery() as nursery:
# for _ in range(n_workers):
# nursery.start_soon(worker)
# # trio.run( fetch_urls, iter(links), responses, 5, 1 ) # works
# # trio.run( fetch_urls, urls=iter(links), responses=responses, n_workers=5, throttle=1 ) # doesn't work
# trio.run( partial(fetch_urls, urls=iter(links), responses=responses, n_workers=5, throttle=1) ) # works
# print( f'responses, ```{pprint.pformat(responses)}```' )
# --------------------
# works...
# --------------------
# from typing import List, Iterator
# import asks
# import trio
# asks.init('trio')
# links: List[str] = [
# 'https://httpbin.org/delay/4',
# 'https://httpbin.org/delay/3',
# 'https://httpbin.org/delay/1'
# ] * 3
# async def fetch_urls(urls: List[str], number_workers: int, throttle_rate: float):
# async def token_issuer(token_sender: trio.abc.SendChannel, number_tokens: int):
# async with token_sender:
# for _ in range(number_tokens):
# await token_sender.send(None)
# await trio.sleep(1 / throttle_rate)
# async def worker(url_iterator: Iterator, token_receiver: trio.abc.ReceiveChannel):
# async with token_receiver:
# for url in url_iterator:
# await token_receiver.receive()
# print(f'[{round(trio.current_time(), 2)}] Start loading link: {url}')
# response = await asks.get(url)
# # print(f'[{round(trio.current_time(), 2)}] Loaded link: {url}')
# responses.append(response)
# responses = []
# url_iterator = iter(urls)
# token_send_channel, token_receive_channel = trio.open_memory_channel(0)
# async with trio.open_nursery() as nursery:
# async with token_receive_channel:
# nursery.start_soon(token_issuer, token_send_channel.clone(), len(urls))
# for _ in range(number_workers):
# nursery.start_soon(worker, url_iterator, token_receive_channel.clone())
# return responses
# responses = trio.run(fetch_urls, links, 5, 1.)
| 2.25 | 2 |
mods/libvis_mods/project-templates/test.py | danlkv/pywebviz | 0 | 12766700 | from cookiecutter.main import cookiecutter
def main():
cookiecutter('source-files')#, no_input=True)
if __name__ == '__main__':
main()
| 1.1875 | 1 |
serv/api/errors.py | jnhu76/ImageServ | 0 | 12766701 | from typing import Union
from fastapi import HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.openapi.constants import REF_PREFIX
from fastapi.openapi.utils import validation_error_response_definition
from pydantic import ValidationError
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
async def http_error_handler(_: Request, exc: HTTPException) -> JSONResponse:
return JSONResponse({"errors": [exc.detail]}, status_code=exc.status_code)
async def http422_error_handler(
_: Request,
exc: Union[RequestValidationError, ValidationError],
) -> JSONResponse:
return JSONResponse(
{"errors": exc.errors()},
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
)
validation_error_response_definition["properties"] = {
"errors": {
"title": "Errors",
"type": "array",
"items": {"$ref": "{0}ValidationError".format(REF_PREFIX)},
},
}
| 2.140625 | 2 |
src/tools/train_tracknet.py | sanket-pixel/self-tracker | 0 | 12766702 | <reponame>sanket-pixel/self-tracker
"""
This file contains the training logic for
tracknet.
"""
# import statements
import torch
from loguru import logger
import numpy as np
import configparser
from tqdm import tqdm
import time
import os
import sys
from pathlib import Path
path_root = Path(__file__).parents[2]
sys.path.append(str(path_root))
from pathlib import Path
import pandas as pd
from tensorboardX import SummaryWriter
from src.dataloader.tracklet_loader import TrackInstance
from src.models.backbones.tracknet import TrackNet
from src.models import losses
from src.tools.tracknet import *
from src.models.utils import save_checkpoint, load_checkpoint
from torch.utils.data import DataLoader, WeightedRandomSampler
from torchvision import transforms
from einops import rearrange
# set flags / seeds
torch.backends.cudnn.benchmark = True
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
if __name__ == '__main__':
# load config files
print(os.listdir())
config = configparser.ConfigParser()
config.read(os.path.join("src", "configs", "tracknet.config"))
# data loader config
data_path = config.get("DataLoader","data_path")
# training config
resume = config.getboolean("Training", "resume")
batch_size = config.getint("Training", "batch_size")
num_workers = config.getint("Training", "num_workers")
val_frequency = config.getint("Training", "val_frequency")
epochs = config.getint("Training", "epochs")
learning_rate = config.getfloat("Training", "learning_rate")
feature_size = config.getint("Training", "feature_size")
model_dir_name = config.get("Training", "model_dir_name")
init_epoch = config.getint("Training", "init_epoch")
path_to_best = config.get("Training", "path_to_best")
experiment_name = config.get("Training", "experiment_name")
loss = config.get("Training", "loss")
# train and val sequences
train_sequences = ['MOT17-04-FRCNN', 'MOT17-05-FRCNN', 'MOT17-09-FRCNN', 'MOT17-11-FRCNN', 'MOT17-13-FRCNN']
val_sequences = ['MOT17-02-FRCNN', 'MOT17-10-FRCNN']
# experiment folder
exp_folder = os.path.join("models", model_dir_name, experiment_name)
# configure logger
sink = os.path.join(exp_folder, "training_log.log")
logger.add(sink=sink)
logger.info("Config Loaded")
# device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Running on {device}.")
# tracknet dataset
logger.info(f"Preparing training and validation dataset..")
train_dataset = TrackInstance(mode="train", transform_mode = "auto_augment")
# train_dataset_eval = TrackInstance(mode="train",transform_mode = "no_augment")
#
# val_dataset = TrackInstance(mode="val" ,transform_mode = "no_augment" )
# tracknet dataloader
train_sample_weights = train_dataset.get_weights()
train_sampler = WeightedRandomSampler(weights=train_sample_weights, num_samples=len(train_sample_weights))
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=True)
# train_dataloader = DataLoader(train_dataset, batch_size=batch_size,
# num_workers=num_workers, pin_memory=True,shuffle=False)
# train_dataloader_eval= DataLoader(train_dataset_eval, batch_size=batch_size,
# num_workers=num_workers, pin_memory=True)
# val_dataloader = DataLoader(val_dataset, batch_size=batch_size,
# num_workers=num_workers, pin_memory=True)
# instantiate network (which has been imported from *networks.py*)
logger.info(f"Initializing model..")
tracknet = TrackNet(embedding_dim=2048,projection_dim=128)
# loss
logger.info(f"Initializing Loss..")
criterion = getattr(losses, loss)()
# criterion = getattr(losses,loss)(margin=0.3)
# if running on GPU and we want to use cuda move model there
use_cuda = torch.cuda.is_available()
if use_cuda:
tracknet = tracknet.cuda()
criterion = criterion.cuda()
# optimizer
logger.info(f"Initializing Optimizer..")
# combine backbone and fully connected
params = list(tracknet.parameters())
optimizer = torch.optim.Adam(params=params, lr=learning_rate)
# load checkpoint if needed/ wanted
start_n_iter = 0
start_epoch = 0
if resume:
logger.info(f"Loading saved checkpoint..")
if use_cuda:
map_location = "cuda:0"
else:
map_location = "cpu"
# custom method for loading last checkpoint
ckpt = load_checkpoint(model_dir_name, map_location=map_location)
tracknet.load_state_dict(ckpt['net'])
start_epoch = ckpt['epoch']
start_n_iter = ckpt['n_iter']
optimizer.load_state_dict(ckpt['optim'])
logger.info("Latest checkpoint loaded.")
# tensorboard
tensorboard_path = os.path.join(exp_folder, "tensorboard_logs")
writer = SummaryWriter(tensorboard_path)
# now we start the main loop
n_iter = start_n_iter
best_val_accuracy = 0
total_i = 0
is_best = False
logger.info(f"Training begins ..")
for epoch in range(start_epoch, epochs):
# set models to train mode
tracknet.train()
ckpt_folder = os.path.join(exp_folder, "checkpoints", str(epoch))
Path(ckpt_folder).mkdir(parents=True, exist_ok=True)
# use prefetch_generator and tqdm for iterating through data
pbar = tqdm(enumerate(train_dataloader),
total=len(train_dataloader))
start_time = time.time()
# for loop going through dataset
running_loss = 0
for i, batch in pbar:
# data preparation
patch, bbox, conf, track_id = batch
patch = rearrange(patch, 'b v c h w -> (b v) c h w')
if use_cuda:
patch = patch.cuda()
bbox = bbox.cuda()
conf = conf.cuda()
track_id = track_id.cuda()
prepare_time = start_time - time.time()
# forward and backward pass
embedding = tracknet(patch, mode="train")
embedding = rearrange(embedding, '(b v) d -> b v d', v=2)
# embedding = embedding / embedding.norm(p=2, dim=1, keepdim=True)
c_loss = criterion(embedding, track_id)
running_loss += c_loss.item()
optimizer.zero_grad()
c_loss.backward()
optimizer.step()
# udpate tensorboardX
total_i += 1
writer.add_scalar('Training Loss', running_loss / (i + 1), total_i)
# compute computation time and *compute_efficiency*
process_time = start_time - time.time() - prepare_time
compute_efficiency = process_time / (process_time + prepare_time)
pbar.set_description(
f'Compute efficiency: {compute_efficiency:.2f}, '
f'loss: {c_loss.item():.2f}, epoch: {epoch}/{epochs}')
start_time = time.time()
# evaluate on validation dataset
logger.info(f"Validation begins ..")
# evaluate on validation set
# eval_list = []
# for seq in val_sequences:
# evaluate_sequence(seq, tracknet, chkp_path=ckpt_folder, m="val")
# eval_list.append(eval_results(seq, ckpt_folder, m="val"))
# eval_df = pd.DataFrame(eval_list)
# validation_stats = eval_df.mean().to_dict()
# writer.add_scalar('IDF1', validation_stats["idf1"], epoch)
# writer.add_scalar('MOTA', validation_stats["mota"], epoch)
# logger.info("IDF1 : {idf1}".format(idf1=validation_stats["idf1"]))
# logger.info("MOTA : {mota}".format(mota=validation_stats["mota"]))
# writer.close()
# save checkpoint if needed
# logger.info(f"Saving best and latest checkpoint..")
cpkt = {
'net': tracknet.state_dict(),
'epoch': epoch,
'n_iter': n_iter,
'optim': optimizer.state_dict(),
'val_stats':[]
}
save_path = os.path.join(ckpt_folder, 'model.ckpt')
save_checkpoint(cpkt, save_path, is_best, best_ckpt_path=path_to_best)
print("training_done") | 1.96875 | 2 |
grive/tree.py | hoangnv-bkhn/grive | 0 | 12766703 | <gh_stars>0
class Node:
def __init__(self, key):
self.key = key
self.child = []
def newNode(key):
temp = Node(key)
return temp
def LevelOrderTraversal(root):
if (root == None):
return;
# Standard level order traversal code
# using queue
q = [] # Create a queue
q.append(root); # Enqueue root
while (len(q) != 0):
n = len(q);
# If this node has children
while (n > 0):
# Dequeue an item from queue and print it
p = q[0]
q.pop(0);
print(p.key, end='->')
if len(p.child) > 0:
for e in p.child:
print(e.key.get("name"), end=",")
print()
# Enqueue all children of the dequeued item
for i in range(len(p.child)):
q.append(p.child[i]);
n -= 1
print() # Print new line between two levels
def add_node(root, index, value):
if (root == None):
return;
# Standard level order traversal code
# using queue
q = [] # Create a queue
q.append(root); # Enqueue root
while (len(q) != 0):
n = len(q);
# If this node has children
while (n > 0):
p = q[0]
q.pop(0);
tmp_key = {'id': p.key['id'], 'name': p.key['name']}
if tmp_key == index:
tmp_value = {'id': value['id'], 'name': value['name']}
if len(list(filter(lambda e: {'id': e.key['id'], 'name': e.key['name']} == tmp_value, p.child))) == 0:
p.child.append(newNode(value))
# Enqueue all children of the dequeued item
for i in range(len(p.child)):
q.append(p.child[i]);
n -= 1
def get_direct_sub_node(root, index):
if root is None:
return
# Standard level order traversal code
# using queue
q = [] # Create a queue
q.append(root) # Enqueue root
while len(q) != 0:
n = len(q)
# If this node has children
while n > 0:
p = q[0]
q.pop(0)
if index['id']: # if id != None
if index['id'] == p.key['id']:
return p.key, p.child
else:
tmp_key = {"id": p.key["id"], "name": p.key["name"]}
if tmp_key == index:
return p.key, p.child
# Enqueue all children of the dequeued item
for i in range(len(p.child)):
q.append(p.child[i])
n -= 1
return None
def find_node_by_canonicalPath(root, path):
if root is None:
return
q = [] # Create a queue
q.append(root) # Enqueue root
while len(q) != 0:
n = len(q)
# If this node has children
while n > 0:
p = q[0]
q.pop(0)
if p.key.get("canonicalPath") == path:
return p
# Enqueue all children of the dequeued item
for i in range(len(p.child)):
q.append(p.child[i])
n -= 1
return None
| 3.734375 | 4 |
nonebot/adapters/telegram/config.py | ColdThunder11/nonebot-adapter-telegram | 8 | 12766704 | from nonebot import adapters
from typing import Optional
from pydantic import Field, BaseModel
class Config(BaseModel):
"""
telegram配置类
:配置项:
- ``webhook_host`` / ``telegram_webhook_host``: webhook的host
- ``bot_token`` / ``telegram_bot_token``: bot_token
- ``telegram_command_only`` / ``telegram_command_only``: 不处理非command的消息 #还无效
- ``telegram_bot_server_addr`` / ``telegram_bot_server_addr``: telegram bot api服务器地址,默认为官方
"""
webhook_addr: Optional[str] = Field(default=None, alias="telegram_webhook_host")
bot_token: Optional[str] = Field(default=None, alias="telegram_bot_token")
telegram_adapter_debug: Optional[bool] = Field(default=False, alias="telegram_adapter_debug")
telegram_command_only: Optional[bool] = Field(default=False, alias="telegram_command_only")
telegram_bot_api_server_addr: Optional[str] = Field(default="https://api.telegram.org", alias="telegram_bot_server_addr")
class Config:
extra = "ignore"
allow_population_by_field_name = True
| 2.421875 | 2 |
tests/nearestPointInterpolation.py | MarkTravers/magLabUtilities | 0 | 12766705 | <filename>tests/nearestPointInterpolation.py<gh_stars>0
#!python3
import numpy as np
from magLabUtilities.signalutilities.signals import SignalThread, Signal
from magLabUtilities.signalutilities.canonical1d import Parabola
if __name__=='__main__':
parabolaGen = Parabola(tMaxDx=4.0, tVertex=0.0, xMaxDx=16.0, xVertex=0.0, power=2.0)
parabola = parabolaGen.evaluate(SignalThread(np.linspace(0.0, 4.0, num=5)))
interpTThread = SignalThread(np.array([0.0, 2.0, 4.0]))
interpolatedSignal = parabola.sample(interpTThread, 'nearestPoint')
print('done') | 2.3125 | 2 |
core/communication.py | discord-intech/VocalDaemon | 0 | 12766706 | import json
from core.utils.cleanOrder import cleanString
yesWords = ["oui", "d'accord", "d accord", "ok", "ça marche", "pourquoi pas", "bien sur"]
noWords = ["non", "pas du tout", "hors de question", "pas question", "impossible", "je refuse"]
def sendAnswer(answer, client):
msg = { "type": "answer", "msg": answer }
jsonMsg = json.dumps(msg)
client.send(str.encode(jsonMsg))
def sendError(answer, client):
msg = { "type": "ERROR", "msg": answer }
jsonMsg = json.dumps(msg)
client.send(str.encode(jsonMsg))
def askConfirmation(confirmMessage, originalRequest, client):
msg = {"type": "askConfirmation", "msg": confirmMessage, "originalRequest": originalRequest}
jsonMsg = json.dumps(msg)
client.send(str.encode(jsonMsg))
def isConfirmation(str):
for word in noWords:
if word in cleanString(str):
return False
for word in yesWords:
if word in cleanString(str):
return True
return False
def recvFromClient(client):
rawOrder = client.recv(1024).decode('utf-8')
#print(rawOrder)
orderJson = json.loads(rawOrder)
if(orderJson == "") :
raise ValueError("Json Vide")
return orderJson
| 2.78125 | 3 |
cogs/misc.py | imri0t/ri0t-bot | 2 | 12766707 | <reponame>imri0t/ri0t-bot<gh_stars>1-10
'''cog: miscellaneous commands'''
import os
import asyncio
import random
import discord
from discord.ext import commands
class Misc(): # pylint: disable=too-few-public-methods
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, aliases=['delete', 'prune', 'Clear', 'CLEAR'])
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount):
'''deletes messages'''
channel = ctx.message.channel
messages = []
async for message in self.bot.logs_from(channel, limit=int(amount) + 1):
messages.append(message)
await self.bot.delete_messages(messages)
msg = await self.bot.say('I got rid of __{} {}__ for you'.format(amount, 'message' if int(amount) == 1 else 'messages'))
await asyncio.sleep(3)
await self.bot.delete_message(msg)
def setup(bot):
'''cog setup'''
bot.add_cog(Misc(bot))
print('misc commands ready')
#░▓▒ | 2.59375 | 3 |
src/woolgatherer/db_models/feedback.py | dojoteef/storium-frontend | 3 | 12766708 | """
Feedback database model
"""
from uuid import UUID
# pylint incorrectly complains about unused import for UniqueConstraint... not sure why
from sqlalchemy.schema import ( # pylint:disable=unused-import
ForeignKey,
UniqueConstraint,
)
from pydantic import Field
from woolgatherer.db_models.base import DBBaseModel
from woolgatherer.models.feedback import FeedbackType
class Feedback(DBBaseModel, constraints=[UniqueConstraint("type", "suggestion_id")]):
"""
This is the db model for a suggestion.
"""
response: str = Field(...)
type: FeedbackType = Field(..., index=True)
suggestion_id: UUID = Field(
..., index=True, foriegn_key=ForeignKey("suggestion.uuid")
)
| 2.0625 | 2 |
mangrove/form_model/field.py | mariot/mangrove | 0 | 12766709 | <gh_stars>0
from collections import OrderedDict
import re
from HTMLParser import HTMLParser
import abc
from datetime import datetime
from babel.dates import format_date
from coverage.html import escape
from mangrove.data_cleaner import TelephoneNumber
from mangrove.datastore.entity import get_all_entities
from mangrove.errors.MangroveException import AnswerTooBigException, AnswerTooSmallException, AnswerWrongType, \
IncorrectDate, AnswerTooLongException, AnswerTooShortException, GeoCodeFormatException, \
RequiredFieldNotPresentException
from mangrove.form_model.validation import ChoiceConstraint, GeoCodeConstraint, constraints_factory, \
TextLengthConstraint, ShortCodeRegexConstraint
from mangrove.utils.types import is_sequence, is_empty, sequence_to_str
from mangrove.validate import VdtValueTooBigError, VdtValueTooSmallError, VdtTypeError, VdtValueTooShortError, \
VdtValueTooLongError
def create_question_from(dictionary, dbm):
"""
Given a dictionary that defines a question, this would create a field with all the validations that are
defined on it.
"""
type = dictionary.get("type")
name = dictionary.get("name")
code = dictionary.get("code")
label = dictionary.get("label")
instruction = dictionary.get("instruction")
required = dictionary.get("required")
unique_id_type = dictionary.get("unique_id_type")
parent_field_code = dictionary.get("parent_field_code")
hint = dictionary.get('hint')
constraint_message = dictionary.get('constraint_message')
appearance = dictionary.get('appearance')
default = dictionary.get('default')
xform_constraint = dictionary.get('xform_constraint')
relevant = dictionary.get('relevant')
if type == field_attributes.TEXT_FIELD:
return _get_text_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
if type == field_attributes.BOOLEAN_FIELD:
return _get_boolean_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
if type == field_attributes.TIME:
return _get_time_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
if type == field_attributes.DATE_TIME:
return _get_date_time_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
elif type == field_attributes.INTEGER_FIELD:
return _get_integer_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
elif type == field_attributes.DATE_FIELD:
return _get_date_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
elif type == field_attributes.LOCATION_FIELD:
return _get_geo_code_field(code, instruction, label, name, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
elif type == field_attributes.SELECT_FIELD or type == field_attributes.MULTISELECT_FIELD:
return _get_select_field(code, dictionary, label, name, type, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
elif type == field_attributes.SELECT_ONE_EXTERNAL_FIELD:
return _get_select_one_external_field(code, label, name, instruction, required,
parent_field_code, hint, appearance, default, relevant)
elif type == field_attributes.LIST_FIELD:
return _get_list_field(name, code, label, instruction, required, parent_field_code, hint, constraint_message,
appearance, default, xform_constraint, relevant)
elif type == field_attributes.TELEPHONE_NUMBER_FIELD:
return _get_telephone_number_field(code, dictionary, label, name, instruction, required, parent_field_code,
hint, constraint_message, appearance, default, xform_constraint, relevant)
elif type == field_attributes.SHORT_CODE_FIELD:
return _get_short_code_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
elif type == field_attributes.UNIQUE_ID_FIELD:
return _get_unique_id_field(unique_id_type, code, dictionary, label, name, instruction, required,
parent_field_code, hint, constraint_message, appearance, default, xform_constraint,
relevant)
elif type == field_attributes.FIELD_SET:
return _get_field_set_field(code, dictionary, label, name, instruction, required, dbm, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
elif type == field_attributes.PHOTO or type == field_attributes.VIDEO or type == field_attributes.AUDIO:
return _get_media_field(type, code, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant)
return None
def _get_media_class(type):
type_media_dict = {'photo': PhotoField, 'video': VideoField, 'audio': AudioField}
return type_media_dict[type]
def _get_media_field(type, code, label, name, instruction, required, parent_field_code, hint, constraint_message,
appearance, default, xform_constraint, relevant):
MediaClass = _get_media_class(type)
field = MediaClass(name=name, code=code, label=label, instruction=instruction,
required=required, parent_field_code=parent_field_code, hint=hint,
constraint_message=constraint_message,
appearance=appearance, default=default, xform_constraint=xform_constraint, relevant=relevant)
return field
def _get_field_set_field(code, dictionary, label, name, instruction, required, dbm, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant):
constraints, constraints_json = [], dictionary.get("constraints")
if constraints_json is not None:
constraints = constraints_factory(constraints_json)
sub_fields = dictionary.get("fields")
fieldset_type = dictionary.get("fieldset_type")
repeat_question_fields = [create_question_from(f, dbm) for f in sub_fields]
field = FieldSet(name=name, code=code, label=label, instruction=instruction, required=required,
field_set=repeat_question_fields, fieldset_type=fieldset_type, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
return field
def _get_text_field(code, dictionary, label, name, instruction, required, parent_field_code, hint, constraint_message,
appearance, default, xform_constraint, relevant):
constraints, constraints_json = [], dictionary.get("constraints")
if constraints_json is not None:
constraints = constraints_factory(constraints_json)
field = TextField(name=name, code=code, label=label,
constraints=constraints, instruction=instruction, required=required,
parent_field_code=parent_field_code, is_calculated=dictionary.get('is_calculated'),
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
return field
def _get_boolean_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant):
constraints, constraints_json = [], dictionary.get("constraints")
if constraints_json is not None:
constraints = constraints_factory(constraints_json)
field = BooleanField(name=name, code=code, label=label, constraints=constraints, instruction=instruction,
required=required, parent_field_code=parent_field_code, hint=hint,
constraint_message=constraint_message,
appearance=appearance, default=default, xform_constraint=xform_constraint, relevant=relevant)
return field
def _get_time_field(code, dictionary, label, name, instruction, required, parent_field_code, hint, constraint_message,
appearance, default, xform_constraint, relevant):
constraints, constraints_json = [], dictionary.get("constraints")
if constraints_json is not None:
constraints = constraints_factory(constraints_json)
field = TimeField(name=name, code=code, label=label,
constraints=constraints, instruction=instruction, required=required,
parent_field_code=parent_field_code, hint=hint, constraint_message=constraint_message,
appearance=appearance, default=default, xform_constraint=xform_constraint, relevant=relevant)
return field
def _get_date_time_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant):
constraints, constraints_json = [], dictionary.get("constraints")
if constraints_json is not None:
constraints = constraints_factory(constraints_json)
field = DateTimeField(name=name, code=code, label=label,
constraints=constraints, instruction=instruction, required=required,
parent_field_code=parent_field_code, hint=hint, constraint_message=constraint_message,
appearance=appearance, default=default, xform_constraint=xform_constraint, relevant=relevant)
return field
def _get_short_code_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant):
constraints, constraints_json = [], dictionary.get("constraints")
if constraints_json is not None:
constraints = constraints_factory(constraints_json)
field = ShortCodeField(name=name, code=code, label=label,
constraints=constraints, instruction=instruction, required=required,
parent_field_code=parent_field_code, hint=hint, appearance=appearance,
constraint_message=constraint_message, default=default, xform_constraint=xform_constraint,
relevant=relevant)
return field
def _get_unique_id_field(unique_id_type, code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant):
return UniqueIdField(unique_id_type=unique_id_type, name=name, code=code,
label=dictionary.get("label"),
instruction=dictionary.get("instruction"), parent_field_code=parent_field_code,
xform_field_reference=dictionary.get("xform_field_reference"), hint=hint,
constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant, required=required)
def _get_telephone_number_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant):
constraints, constraints_json = [], dictionary.get("constraints")
if constraints_json is not None:
constraints = constraints_factory(constraints_json)
field = TelephoneNumberField(name=name, code=code, label=label, constraints=constraints,
instruction=instruction, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance,
default=default, xform_constraint=xform_constraint, relevant=relevant)
return field
def _get_integer_field(code, dictionary, label, name, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant):
constraints, constraint_list = [], dictionary.get('constraints')
if constraint_list is not None:
constraints = constraints_factory(constraint_list)
integer_field = IntegerField(name=name, code=code, label=label, instruction=instruction,
constraints=constraints, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance,
default=default, xform_constraint=xform_constraint, relevant=relevant)
return integer_field
def _get_date_field(code, dictionary, label, name, instruction, required, parent_field_code, hint, constraint_message,
appearance, default, xform_constraint, relevant):
date_format = dictionary.get("date_format")
date_field = DateField(name=name, code=code, label=label, date_format=date_format,
instruction=instruction, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
return date_field
def _get_select_field(code, dictionary, label, name, type, instruction, required, parent_field_code, hint,
constraint_message, appearance, default, xform_constraint, relevant):
choices = dictionary.get("choices")
single_select = True if type == field_attributes.SELECT_FIELD else False
field = SelectField(name=name, code=code, label=label, options=choices, single_select_flag=single_select,
instruction=instruction, required=required, parent_field_code=parent_field_code,
has_other=dictionary.get("has_other"), hint=hint, constraint_message=constraint_message,
appearance=appearance, default=default, xform_constraint=xform_constraint, relevant=relevant)
return field
def _get_select_one_external_field(code, label, name, instruction, required, parent_field_code, hint, appearance, default, relevant):
field = SelectOneExternalField(name=name, code=code, label=label,
instruction=instruction, required=required,
parent_field_code=parent_field_code,
hint=hint, appearance=appearance, default=default, relevant=relevant)
return field
def _get_list_field(name, code, label, instruction, required, parent_field_code, hint, constraint_message, appearance,
default, xform_constraint, relevant):
field = HierarchyField(name, code, label, instruction=instruction, required=required,
parent_field_code=parent_field_code, hint=hint, appearance=appearance,
constraint_message=constraint_message, default=default, xform_constraint=xform_constraint,
relevant=relevant)
return field
def _get_geo_code_field(code, instruction, label, name, required, parent_field_code, hint, constraint_message,
appearance, default, xform_constraint, relevant):
field = GeoCodeField(name=name, code=code, label=label, instruction=instruction,
required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
return field
def field_to_json(object):
# assert isinstance(object, Field)
if isinstance(object, datetime):
return object.isoformat()
else:
return object._to_json_view()
class field_attributes(object):
"""Constants for referencing standard attributes in questionnaire."""
LANGUAGE = "language"
FIELD_CODE = "code"
INSTRUCTION = "instruction"
INTEGER_FIELD = "integer"
TEXT_FIELD = "text"
BOOLEAN_FIELD = "boolean"
SHORT_CODE_FIELD = "short_code"
TELEPHONE_NUMBER_FIELD = "telephone_number"
SELECT_FIELD = 'select1'
LOCATION_FIELD = "geocode"
DATE_FIELD = 'date'
MULTISELECT_FIELD = 'select'
DEFAULT_LANGUAGE = "en"
ENTITY_QUESTION_FLAG = 'entity_question_flag'
NAME = "name"
LIST_FIELD = "list"
UNIQUE_ID_FIELD = "unique_id"
FIELD_SET = "field_set"
PHOTO = "photo"
VIDEO = "video"
AUDIO = "audio"
TIME = "time"
DATE_TIME = "datetime"
SELECT_ONE_EXTERNAL_FIELD = "select_one_external"
class Field(object):
def __init__(self, type="", name="", code="", label='', instruction='',
constraints=None, required=True, parent_field_code=None, hint=None, constraint_message=None,
appearance=None, default=None, xform_constraint=None, relevant=None):
if not constraints: constraints = []
self._dict = {}
self._dict = {'name': name, 'type': type, 'code': code, 'instruction': instruction,
'label': label, 'required': required, 'parent_field_code': parent_field_code,
'hint': hint, 'constraint_message': constraint_message, 'appearance': appearance,
'default': default, 'xform_constraint': xform_constraint,
'relevant': relevant}
self.constraints = constraints
self.errors = []
self.value = None
if not is_empty(constraints):
self._dict['constraints'] = []
for constraint in constraints:
constraint_json = constraint._to_json()
if not is_empty(constraint_json):
self._dict['constraints'].append(constraint_json)
@property
def name(self):
return self._dict.get("name")
def set_name(self, new_name):
self._dict["name"] = new_name
def set_label(self, new_label):
self._dict["label"] = new_label
def set_instruction(self, new_instruction):
self._dict["instruction"] = new_instruction
def set_constraints(self, new_constraints):
self._dict["constraints"] = new_constraints
self.constraints = new_constraints
@property
def label(self):
return self._dict.get('label')
@property
def type(self):
return self._dict.get('type')
@property
def code(self):
return self._dict.get('code')
@property
def instruction(self):
return self._dict.get('instruction')
@property
def hint(self):
return self._dict.get('hint')
@property
def appearance(self):
return self._dict.get('appearance')
@property
def default(self):
return self._dict.get('default')
@property
def constraint_message(self):
return self._dict.get('constraint_message')
@property
def xform_constraint(self):
return self._dict.get('xform_constraint')
@property
def relevant(self):
return self._dict.get('relevant')
@property
def parent_field_code(self):
return self._dict.get('parent_field_code')
@property
def is_entity_field(self):
return False
@property
def is_event_time_field(self):
return False
@property
def is_field_set(self):
return False
def is_group(self):
return False
def _to_json(self):
dict = self._dict.copy()
dict['instruction'] = self._dict['instruction']
return dict
def _to_json_view(self):
json = self._to_json()
if 'constraints' in json:
constraints = json.pop('constraints')
for constraint in constraints:
json[constraint[0]] = constraint[1]
return json
def set_value(self, value):
self.value = value
def get_constraint_text(self):
return ""
def is_required(self):
return self._dict['required']
def set_required(self, required):
self._dict["required"] = required
def validate(self, value):
if self.is_required() and is_empty(value):
raise RequiredFieldNotPresentException(self.code)
def convert_to_unicode(self):
if self.value is None:
return unicode("")
return unicode(self.value)
def stringify(self):
return self.convert_to_unicode()
def xform_constraints(self):
return " and ".join(filter(None, [constraint.xform_constraint() for constraint in self.constraints]))
@abc.abstractmethod
def formatted_field_values_for_excel(self, value):
pass
class IntegerField(Field):
def __init__(self, name, code, label, instruction=None,
constraints=None, required=True, parent_field_code=None, hint=None, constraint_message=None,
appearance=None, default=None, xform_constraint=None, relevant=None):
if not constraints: constraints = []
Field.__init__(self, type=field_attributes.INTEGER_FIELD, name=name, code=code,
label=label, instruction=instruction, constraints=constraints, required=required,
parent_field_code=parent_field_code, hint=hint, constraint_message=constraint_message,
appearance=appearance, default=default, xform_constraint=xform_constraint, relevant=relevant)
def validate(self, value):
Field.validate(self, value)
try:
for constraint in self.constraints:
constraint.validate(value)
try:
return int(value)
except Exception:
return float(value)
except VdtValueTooBigError:
raise AnswerTooBigException(self._dict[field_attributes.FIELD_CODE], value)
except VdtValueTooSmallError:
raise AnswerTooSmallException(self._dict[field_attributes.FIELD_CODE], value)
except VdtTypeError:
raise AnswerWrongType(self._dict[field_attributes.FIELD_CODE], value)
def get_constraint_text(self):
max, min = self._get_max_min()
if min is not None and max is None:
constraint_text = "Minimum %s" % min
return constraint_text
if min is None and max is not None:
constraint_text = "Upto %s" % max
return constraint_text
elif min is not None and max is not None:
constraint_text = "%s -- %s" % (min, max)
return constraint_text
return ""
def _get_max_min(self):
max = min = None
if len(self.constraints) > 0:
constraint = self.constraints[0]
min = constraint.min
max = constraint.max
return max, min
def formatted_field_values_for_excel(self, value):
try:
if value is None:
return ""
return float(value)
except ValueError:
return value
class DateField(Field):
DATE_FORMAT = "date_format"
DATE_DICTIONARY = {'mm.yyyy': '%m.%Y', 'dd.mm.yyyy': '%d.%m.%Y', 'mm.dd.yyyy': '%m.%d.%Y', 'yyyy': '%Y',
"dd.MM.yyyy HH:mm:ss": "%d.%m.%Y"}
FORMAT_DATE_DICTIONARY = {'mm.yyyy': 'MM.yyyy', 'dd.mm.yyyy': 'dd.MM.yyyy', 'mm.dd.yyyy': 'MM.dd.yyyy',
'submission_date_format': 'MMM. dd, yyyy, hh:mm a', 'yyyy': 'yyyy',
"hh:mm": "hour_minute",
"dd.MM.yyyy HH:mm:ss": "dd.MM.yyyy HH:mm:ss"}
def __init__(self, name, code, label, date_format, instruction=None,
required=True, parent_field_code=None, hint=None, constraint_message=None, appearance=None,
default=None, xform_constraint=None, relevant=None):
Field.__init__(self, type=field_attributes.DATE_FIELD, name=name, code=code,
label=label, instruction=instruction, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
self._dict[self.DATE_FORMAT] = date_format
def validate(self, value):
Field.validate(self, value)
return self.__date__(value)
@property
def date_format(self):
return self._dict.get(self.DATE_FORMAT)
@property
def is_monthly_format(self):
return self.date_format == 'mm.yyyy'
def get_constraint_text(self):
return self.date_format
def convert_to_unicode(self):
if self.value is None:
return unicode("")
date_format = self.FORMAT_DATE_DICTIONARY.get(self.date_format)
return format_date(self.value, date_format) if isinstance(self.value, datetime) else unicode(self.value)
def formatted_field_values_for_excel(self, value):
try:
if value is None:
return ""
return ExcelDate(self.__date__(value), self.date_format)
except IncorrectDate:
return value
def __date__(self, date_string):
try:
return datetime.strptime(date_string.strip(), DateField.DATE_DICTIONARY.get(self.date_format))
except ValueError:
raise IncorrectDate(self._dict.get(field_attributes.FIELD_CODE), date_string,
self._dict.get(self.DATE_FORMAT))
# All the Field Types should be be wrapped with Excel Field types defined in other project including the lead part fields.
# That will require atleast a couple of days of work
class ExcelDate(object):
DATE_DICTIONARY = {'mm.yyyy': '%m.%Y', 'dd.mm.yyyy': '%d.%m.%Y', 'mm.dd.yyyy': '%m.%d.%Y'}
def __init__(self, date, date_format):
self.date = date
self.date_format = date_format
def date_as_string(self):
'''This is implemented for KeywordFilter.filter method. Ideally we should pass in the keyword here and return true or false.'''
return self.date.strftime(ExcelDate.DATE_DICTIONARY.get(self.date_format, '%b. %d, %Y, %I:%M %p'))
def __eq__(self, other):
return self.date == other.date
class TextField(Field):
DEFAULT_VALUE = "defaultValue"
CONSTRAINTS = "constraints"
def __init__(self, name, code, label, constraints=None, defaultValue="", instruction=None,
required=True, parent_field_code=None, is_calculated=False, hint=None, constraint_message=None,
appearance=None, default=None, xform_constraint=None, relevant=None, is_other=None):
if not constraints: constraints = []
assert isinstance(constraints, list)
Field.__init__(self, type=field_attributes.TEXT_FIELD, name=name, code=code,
label=label, instruction=instruction, constraints=constraints, required=required,
parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
self.value = self._dict[self.DEFAULT_VALUE] = defaultValue if defaultValue is not None else ""
if is_calculated:
self.is_calculated = True
self.is_other = True if is_other is not None and is_other else False
@property
def is_calculated(self):
return self._dict.get("is_calculated", False)
@is_calculated.setter
def is_calculated(self, is_calculated):
self._dict["is_calculated"] = is_calculated
def set_value(self, value):
self.value = "" if self.is_calculated and value in ['NaN', 'Invalid Date'] else value
def validate(self, value):
Field.validate(self, value)
try:
value = value.strip()
for constraint in self.constraints:
value = constraint.validate(value)
return value
except VdtValueTooLongError as valueTooLongError:
raise AnswerTooLongException(self._dict[field_attributes.FIELD_CODE], value, valueTooLongError.args[1])
except VdtValueTooShortError as valueTooShortError:
raise AnswerTooShortException(self._dict[field_attributes.FIELD_CODE], value, valueTooShortError.args[1])
def get_constraint_text(self):
if not is_empty(self.constraints):
length_constraint = self.constraints[0]
min = length_constraint.min
max = length_constraint.max
if min is not None and max is None:
constraint_text = "Minimum %s characters" % min
return constraint_text
if min is None and max is not None:
constraint_text = "Upto %s characters" % max
return constraint_text
elif min is not None and max is not None:
constraint_text = "Between %s -- %s characters" % (min, max)
return constraint_text
return ""
def formatted_field_values_for_excel(self, value):
return value
class BooleanField(Field):
DEFAULT_VALUE = "defaultValue"
CONSTRAINTS = "constraints"
def __init__(self, name, code, label, constraints=None, defaultValue=False, instruction=None,
required=True, parent_field_code=None, hint=None, constraint_message=None, appearance=None,
default=None, xform_constraint=None, relevant=None):
if not constraints:
constraints = []
assert isinstance(constraints, list)
Field.__init__(self, type=field_attributes.BOOLEAN_FIELD, name=name, code=code,
label=label, instruction=instruction, constraints=constraints, required=required,
parent_field_code=parent_field_code, hint=hint, constraint_message=constraint_message,
appearance=appearance, default=default, xform_constraint=xform_constraint, relevant=relevant)
self.value = self._dict[self.DEFAULT_VALUE] = defaultValue if defaultValue is not None else ""
def validate(self, value):
super(BooleanField, self).validate(value)
try:
return value == 'True'
except ValueError:
return False
class UniqueIdField(Field):
def __init__(self, unique_id_type, name, code, label, constraints=None, defaultValue=None, instruction=None,
required=True, parent_field_code=None, xform_field_reference=None, hint=None, constraint_message=None,
appearance=None, default=None, xform_constraint=None, relevant=None):
if not constraints: constraints = []
assert isinstance(constraints, list)
Field.__init__(self, type=field_attributes.UNIQUE_ID_FIELD, name=name, code=code, label=label,
instruction=instruction,
constraints=constraints, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
self.unique_id_type = unique_id_type
self.xform_field_reference = xform_field_reference
def validate(self, value):
super(UniqueIdField, self).validate(value)
return value.lower()
@property # TODO:Remove
def is_entity_field(self):
return True
def _to_json(self):
dict = super(UniqueIdField, self)._to_json()
dict['unique_id_type'] = self.unique_id_type
dict['xform_field_reference'] = self.xform_field_reference
return dict
def stringify(self):
return unicode("%s(%s)" % (unicode(self.unique_id_type.capitalize()), self.convert_to_unicode()))
def set_value(self, value):
if value:
self.value = value.lower()
class UniqueIdUIField(UniqueIdField):
def __init__(self, field, dbm):
super(UniqueIdUIField, self).__init__(unique_id_type=field.unique_id_type, name=field.name, code=field.code,
label=field.label, instruction=field.instruction,
constraints=field.constraints, parent_field_code=field.parent_field_code,
hint=field.hint, constraint_message=field.constraint_message,
appearance=field.appearance, default=field.default,
xform_constraint=field.xform_constraint, relevant=field.relevant)
self.dbm = dbm
@property
def options(self):
h = HTMLParser()
list = []
for entity in get_all_entities(self.dbm, [self.unique_id_type]):
unescapedLabel = h.unescape(entity.data['name']['value'])
list.append((entity.short_code, unescapedLabel))
return list
@property
def enketo_options(self):
enketo_options = []
for value in self.options:
temp_dict = OrderedDict()
temp_dict['label'] = value[1] + ' (' + value[0] + ')'
temp_dict['value'] = value[0]
enketo_options.append(temp_dict)
return enketo_options
class SelectOneExternalField(Field):
def __init__(self, name, code, label, instruction=None, required=True, parent_field_code=None, hint=None,
appearance=None, default=None, relevant=None):
type = field_attributes.SELECT_ONE_EXTERNAL_FIELD
Field.__init__(self, type=type, name=name, code=code,
label=label, instruction=instruction, required=required, parent_field_code=parent_field_code,
hint=hint, appearance=appearance, default=default, relevant=relevant)
def get_option_value_list(self, question_value, itemset_data):
lines = re.sub('"', '', itemset_data).split('\n')
header = lines[0].split(',')
for line in lines[1:]:
row = dict(zip(header, line.split(',')))
if unicode(row['name']) == question_value:
return row['label']
return question_value
def _to_json_view(self):
dict = self._dict.copy()
return dict
class TelephoneNumberField(TextField):
def __init__(self, name, code, label, constraints=None, defaultValue=None, instruction=None,
required=True, parent_field_code=None, hint=None, constraint_message=None, appearance=None,
default=None, xform_constraint=None, relevant=None):
if not constraints: constraints = []
assert isinstance(constraints, list)
TextField.__init__(self, name=name, code=code, label=label, instruction=instruction, constraints=constraints,
defaultValue=defaultValue,
required=required, parent_field_code=parent_field_code, hint=hint,
constraint_message=constraint_message,
appearance=appearance, default=default, xform_constraint=xform_constraint, relevant=relevant)
self._dict['type'] = field_attributes.TELEPHONE_NUMBER_FIELD
def _clean(self, value):
return TelephoneNumber().clean(value)
def validate(self, value):
value = self._clean(value)
return super(TelephoneNumberField, self).validate(value)
class ShortCodeField(TextField):
def __init__(self, name, code, label, constraints=None, defaultValue=None, instruction=None,
required=False, parent_field_code=None, hint=None, constraint_message=None, appearance=None,
default=None, xform_constraint=None, relevant=None):
if not constraints:
constraints = [TextLengthConstraint(max=20), ShortCodeRegexConstraint("^[a-zA-Z0-9]+$")]
assert isinstance(constraints, list)
TextField.__init__(self, name=name, code=code, label=label, instruction=instruction, constraints=constraints,
defaultValue=defaultValue,
required=required, parent_field_code=parent_field_code, hint=hint,
constraint_message=constraint_message,
appearance=appearance, default=default, xform_constraint=xform_constraint, relevant=relevant)
self._dict['type'] = field_attributes.SHORT_CODE_FIELD
def _clean(self, value):
return value.lower() if value else None
def validate(self, value):
value = self._clean(value)
return super(ShortCodeField, self).validate(value)
@property # TODO:Remove
def is_entity_field(self):
return True
class HierarchyField(Field):
def __init__(self, name, code, label, instruction=None,
required=True, parent_field_code=None, hint=None, constraint_message=None, appearance=None,
default=None, xform_constraint=None, relevant=None):
Field.__init__(self, type=field_attributes.LIST_FIELD, name=name, code=code,
label=label, instruction=instruction, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
def validate(self, value):
Field.validate(self, value)
if is_sequence(value) or value is None:
return value
return [value]
def convert_to_unicode(self):
if self.value is None:
return unicode("")
return sequence_to_str(self.value) if isinstance(self.value, list) else unicode(self.value)
class SelectField(Field):
'''option values for this should contain single letters like a,b,c,d etc and after 26 options should start with a number followed by single character
like 1a,1b,1c,1d etc '''
OPTIONS = "choices"
def __init__(self, name, code, label, options, instruction=None,
single_select_flag=True, required=True, parent_field_code=None, has_other=False, hint=None,
constraint_message=None, appearance=None, default=None, xform_constraint=None, relevant=None,
is_cascade=False):
assert len(options) > 0
type = field_attributes.SELECT_FIELD if single_select_flag else field_attributes.MULTISELECT_FIELD
self.single_select_flag = single_select_flag
Field.__init__(self, type=type, name=name, code=code,
label=label, instruction=instruction, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
self._dict[self.OPTIONS] = []
valid_choices = self._dict[self.OPTIONS]
if has_other:
self._dict['has_other'] = has_other
if options is not None:
for option in options:
if isinstance(option, tuple):
single_language_specific_option = {'text': option[0], 'val': option[1]}
elif isinstance(option, dict):
single_language_specific_option = option
else:
single_language_specific_option = {'text': option, 'val': option}
valid_choices.append(single_language_specific_option)
self._dict["is_cascade"] = is_cascade
self.constraint = ChoiceConstraint(
list_of_valid_choices=valid_choices,
single_select_constraint=single_select_flag, code=code, has_other=has_other)
SINGLE_SELECT_FLAG = 'single_select_flag'
def validate(self, value):
Field.validate(self, value)
return self.constraint.validate(answer=value)
@property
def options(self):
return self._dict.get(self.OPTIONS)
@property
def is_cascade(self):
return self._dict.get("is_cascade")
def _to_json_view(self):
dict = self._dict.copy()
return dict
@property
def has_other(self):
return self._dict.get('has_other')
def get_constraint_text(self):
return [option["text"] for option in self.options]
def convert_to_unicode(self):
if self.value is None:
return unicode("")
return unicode(",".join([unicode(i) for i in self.value])) if isinstance(self.value, list) else unicode(
self.value)
# def _get_value_by_option(self, option):
# for opt in self.options:
# opt_text = opt['text']
# opt_value = opt['val']
# if opt_value.lower() == option.lower():
# return opt_text
# return None
@property
def is_single_select(self):
return self.type == "select1"
def get_value_by_option(self, option, default=None):
for opt in self.options:
opt_text = opt['text']
opt_value = opt['val']
if opt_value.lower() == option.lower():
return opt_text
return default
def get_option_value_list(self, question_value):
# if isinstance(question_value, list) and question_value[0] == 'other':
# if self.is_single_select:
# return [question_value[1]]
# else:
# question_value = ','.join(question_value[1].split(' '))
options = self.get_option_list(question_value)
result = []
for option in options:
option_value = self.get_value_by_option(option, default=option)
if option_value:
result.append(option_value)
return result
def get_option_list(self, question_value):
if question_value is None: return []
if ',' in question_value:
responses = question_value.split(',')
responses = [r.strip() for r in responses]
elif ' ' in question_value:
responses = question_value.split(' ')
elif question_value in [item.get('val') for item in self._dict[self.OPTIONS]]:
# yes in ['yes','no']
responses = [question_value]
elif self.has_other and question_value == 'other':
responses = ['other']
else:
responses = re.findall(r'[1-9]?[a-zA-Z]', question_value)
return responses
def formatted_field_values_for_excel(self, value):
if value is None: return []
options = self.get_option_list(value)
result = []
for option in options:
option_value = self.get_value_by_option(option)
if option_value:
result.append(option_value)
return result
def get_options_map(self):
options_map = {}
for option in self.options:
options_map.update({option['val']: option['text']})
return options_map
def escape_option_text(self):
for option in self._dict.get(self.OPTIONS):
option['text'] = escape(option['text'])
class GeoCodeField(Field):
type = field_attributes.LOCATION_FIELD
def __init__(self, name, code, label, instruction=None, required=True, parent_field_code=None, hint=None,
constraint_message=None, appearance=None, default=None, xform_constraint=None, relevant=None):
Field.__init__(self, type=field_attributes.LOCATION_FIELD, name=name, code=code,
label=label, instruction=instruction, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
def validate(self, lat_long_string):
Field.validate(self, lat_long_string)
if not lat_long_string:
return None
lat_long = lat_long_string.replace(",", " ")
lat_long = re.sub(' +', ' ', lat_long).split(" ")
if len(lat_long) != 2:
raise GeoCodeFormatException(self.code)
return GeoCodeConstraint().validate(latitude=lat_long[0], longitude=lat_long[1])
def get_constraint_text(self):
return "xx.xxxx yy.yyyy"
def convert_to_unicode(self):
if self.value is None:
return unicode("")
return ", ".join(str(b) for b in list(self.value)) if isinstance(self.value, list) or isinstance(self.value,
tuple) else unicode(
self.value)
def formatted_field_values_for_excel(self, value):
value_list = value.split(',')
return self._empty_if_no_data(value_list, 0), self._empty_if_no_data(value_list, 1)
def _empty_if_no_data(self, list, index):
if len(list) < index + 1:
return ''
else:
try:
return float(list[index])
except ValueError:
return list[index]
class MediaField(Field):
def __init__(self, type, name, code, label, instruction=None, required=True,
parent_field_code=None, hint=None, constraint_message=None, appearance=None, default=None,
xform_constraint=None, relevant=None):
Field.__init__(self, type=type, name=name, code=code, label=label, instruction=instruction,
constraints=[], required=required, parent_field_code=parent_field_code, hint=hint,
constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
def formatted_field_values_for_excel(self, value):
return value
class PhotoField(MediaField):
def __init__(self, name, code, label, instruction=None, required=True, parent_field_code=None, hint=None,
constraint_message=None, appearance=None, default=None, xform_constraint=None, relevant=None):
MediaField.__init__(self, type=field_attributes.PHOTO, name=name, code=code, label=label,
instruction=instruction,
required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
class VideoField(MediaField):
def __init__(self, name, code, label, instruction=None, required=True, parent_field_code=None, hint=None,
constraint_message=None, appearance=None, default=None, xform_constraint=None, relevant=None):
MediaField.__init__(self, type=field_attributes.VIDEO, name=name, code=code, label=label,
instruction=instruction,
required=required, parent_field_code=parent_field_code, hint=hint,
constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
class AudioField(MediaField):
def __init__(self, name, code, label, instruction=None, required=True, parent_field_code=None, hint=None,
constraint_message=None, appearance=None, default=None, xform_constraint=None, relevant=None):
MediaField.__init__(self, type=field_attributes.AUDIO, name=name, code=code, label=label,
instruction=instruction,
required=required, parent_field_code=parent_field_code, hint=hint,
constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
class FieldSet(Field):
FIELDSET_TYPE = 'fieldset_type'
def __init__(self, name, code, label, instruction=None, required=True, field_set=[], fieldset_type='group',
parent_field_code=None, hint=None, constraint_message=None, appearance=None, default=None,
xform_constraint=None, relevant=None):
Field.__init__(self, type=field_attributes.FIELD_SET, name=name, code=code,
label=label, instruction=instruction, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
self.fields = self._dict['fields'] = field_set
self._dict[self.FIELDSET_TYPE] = fieldset_type
@property
def is_field_set(self):
return True
def is_group(self):
return self._dict.get(self.FIELDSET_TYPE) == 'group'
def _find_field_for_code(self, code):
for field in self.fields:
if field.code.lower() == code.lower():
return field
return None
def set_value(self, value):
list = []
if value:
for current_value in value:
dict = OrderedDict()
for field_code, answer in current_value.iteritems():
field = self._find_field_for_code(field_code)
if field:
field.set_value(answer)
dict.update({field_code: field.value})
list.append(dict)
super(FieldSet, self).set_value(list)
@property
def fieldset_type(self):
return self._dict.get(self.FIELDSET_TYPE)
def validate(self, value):
# todo call all validators of the child fields
Field.validate(self, value)
if is_sequence(value) or value is None:
return value
return [value]
# todo find the application of this
def convert_to_unicode(self):
if self.value is None:
return unicode("")
return sequence_to_str(self.value) if isinstance(self.value, list) else unicode(self.value)
def _to_json(self):
dict = self._dict.copy()
dict['instruction'] = self._dict['instruction']
dict['fields'] = [f._to_json() for f in self.fields]
return dict
class TimeField(Field):
def __init__(self, name, code, label, constraints=None, instruction=None, required=True,
parent_field_code=None, hint=None, constraint_message=None, appearance=None, default=None,
xform_constraint=None, relevant=None):
if not constraints: constraints = []
assert isinstance(constraints, list)
Field.__init__(self, type='time', name=name, code=code, label=label, instruction=instruction,
constraints=constraints, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
@property
def date_format(self):
return "hh:mm"
def formatted_field_values_for_excel(self, value):
return value
class DateTimeField(Field):
def __init__(self, name, code, label, constraints=None, instruction=None, required=True,
parent_field_code=None, hint=None, constraint_message=None, appearance=None, default=None,
xform_constraint=None, relevant=None):
if not constraints: constraints = []
assert isinstance(constraints, list)
Field.__init__(self, type='datetime', name=name, code=code, label=label, instruction=instruction,
constraints=constraints, required=required, parent_field_code=parent_field_code,
hint=hint, constraint_message=constraint_message, appearance=appearance, default=default,
xform_constraint=xform_constraint, relevant=relevant)
@property
def date_format(self):
return "dd.MM.yyyy HH:mm:ss"
@property
def is_monthly_format(self):
return False
def formatted_field_values_for_excel(self, value):
return value
| 2.15625 | 2 |
GeneticAlgorithmSolver.py | THuzeyfe/GA_TSP | 0 | 12766710 | from RouteManager import RouteManager
from Route import Route
import numpy as np
class GeneticAlgorithmSolver:
def __init__(self, cities, population_size=50, mutation_rate=0.05, tournament_size=5, elitism=True):
self.cities = cities
self.population_size = population_size
self.mutation_rate = mutation_rate
self.tournament_size = tournament_size
self.elitism = elitism
def solve(self, rm):
rm = self.evolve(rm)
for i in range(100):
rm = self.evolve(rm)
return rm
def evolve(self, routes):
'''This function provides general flow to create a new generation
from given population
Input:
routes: RouteManager object that will be evolved
Output:
child: new generation of RouteManager
'''
selected_routes = RouteManager(self.cities,self.population_size) #to store routes in selection state
#SELECTION STATE
for i in range(self.population_size-int(self.elitism)):
#replace existing routes with tournament winners
#as many as tournament_size particapants are chosen randomly
selected_routes.set_route(i, self.tournament(np.random.choice(routes.routes, self.tournament_size)))
##ELITISM PART
child_routes = RouteManager(self.cities,self.population_size) #to store new child routes
if self.elitism: #if elitism then best route will directly pass to next generation
temporary_route = Route(self.cities)
elite_route = routes.find_best_route()
for i in range(len(elite_route)):
temporary_route.assign_city(i,elite_route.get_city(i))
child_routes.set_route(self.population_size-1, temporary_route)
#CROSS-OVER STATE
for i in range(self.population_size-int(self.elitism)):
#replace existing child routes with actually generated ones
#first route is matched with last, second is matched with second from last and so on.
child_routes.set_route(i, self.crossover(selected_routes.get_route(i),selected_routes.get_route(self.population_size-1-i)))
#MUTATION STATE
for i in range(len(child_routes)-int(self.elitism)):
#send each routes to mutation function
self.mutate(child_routes.get_route(i))
return child_routes
def crossover(self, route_1, route_2):
'''This function creates a crossed-over child route from
two given parent routes.
Input:
route_1: first parent route
route_2: second parent route
Output:
child: generated child route
'''
#determining random start and end genes
#which will stay same as in the first parent
a = np.random.rand()
b = np.random.rand()
low_point=int(min(a,b)*len(self.cities))
up_point=int(max(a,b)*len(self.cities))
child=route_1 #child creation
gen_list=[] #this list stores the cities as in the generated child's order
for i in range(low_point,up_point):
#from randomly generated low to up point cities will stay same
gen_list.append(route_1.get_city(i))
#subset contains cities that hasnot been added to gen list and as in the second parent's order
subset=[item for item in route_2.route if item not in gen_list]
#add the cities in the subset
for i in range(len(self.cities)):
if i not in range(low_point,up_point):
indx=i if i<low_point else i-(up_point-low_point)
child.assign_city(i,subset[indx])
return child
def mutate(self, route):
'''This function randomly deformate the genes with
a given probabiliy
Input:
route: RouteManager object that would mutate
Output:
None
'''
for i in range(len(route)): #each gene can be subject to mutation
if np.random.rand()<self.mutation_rate: #mutation occurs with the probality of mutation_rate
#if probabability occurs given gene is replaced with another random gene
swap_indx=int(len(route)*np.random.rand())
city1 = route.get_city(i)
city2 = route.get_city(swap_indx)
route.assign_city(i,city2)
route.assign_city(swap_indx, city1)
return
def tournament(self, routes):
'''This function returns the route with best fitness score
among a set of routes.
Input:
routes: list of routes
Output:
return_route: route that gives best fitness
'''
best_fitness=0 #first set
for r in routes:
if r.calc_fitness()>best_fitness: #update if better route exist than current best.
best_fitness=r.calc_fitness()
tour_winner=r
return_route = Route(self.cities) #creating the return value
for i in range(len(return_route)):
return_route.assign_city(i,tour_winner.get_city(i))
return return_route | 3.1875 | 3 |
mmtbx/kinemage/__init__.py | dperl-sol/cctbx_project | 155 | 12766711 | <filename>mmtbx/kinemage/__init__.py
from __future__ import absolute_import, division, print_function
def kin_vec(start_key, start_xyz, end_key, end_xyz, width=None):
start_altloc = start_key[0:1]
if start_altloc == ' ':
start_altloc_txt = ""
else:
start_altloc_txt = " '%s'" % start_altloc.lower()
end_altloc = end_key[0:1]
if end_altloc == ' ':
end_altloc_txt = ""
else:
end_altloc_txt = " '%s'" % end_altloc.lower()
if width is None:
return "{%s} P%s %.3f %.3f %.3f {%s} L%s %.3f %.3f %.3f\n" % (
start_key,
start_altloc_txt,
start_xyz[0],
start_xyz[1],
start_xyz[2],
end_key,
end_altloc_txt,
end_xyz[0],
end_xyz[1],
end_xyz[2])
else:
return "{%s} P%s %.3f %.3f %.3f {%s} L%s width%d %.3f %.3f %.3f\n" % (
start_key,
start_altloc_txt,
start_xyz[0],
start_xyz[1],
start_xyz[2],
end_key,
end_altloc_txt,
width,
end_xyz[0],
end_xyz[1],
end_xyz[2])
| 2.078125 | 2 |
leetcode/python/636_exclusive_time_of_functions.py | VVKot/leetcode-solutions | 4 | 12766712 | <gh_stars>1-10
"""
T: O(N)
S: O(N)
We use stack to record last executed operation. If we have seen an operation Y
execute in the middle of operation X, this means X will continue at Y's end+1.
"""
from typing import List
class Solution:
def exclusiveTime(self, n: int, logs: List[str]) -> List[int]:
tasks = [] # type: List[List[int]]
tasks_time = [0] * n
for log in logs:
task_str_id, operation, timestamp_str = log.split(':')
task_id, timestamp = int(task_str_id), int(timestamp_str)
if operation == 'start':
if tasks:
prev_task, prev_timestamp = tasks[-1]
tasks_time[prev_task] += timestamp - prev_timestamp
tasks.append([task_id, timestamp])
else:
prev_task, prev_timestamp = tasks.pop()
tasks_time[prev_task] += timestamp - prev_timestamp + 1
if tasks:
tasks[-1][1] = timestamp + 1
return tasks_time
| 3.34375 | 3 |
ymir/backend/src/ymir_viz/tests/controllers/conftest.py | Zhang-SJ930104/ymir | 64 | 12766713 | import json
import pytest
from flask import Response
from flask.testing import FlaskClient
class APIResponse(Response):
def json(self):
return json.loads(self.data)
@pytest.fixture()
def test_client(core_app):
core_app.test_client_class = FlaskClient
core_app.response_class = APIResponse
return core_app.test_client()
| 2.328125 | 2 |
cli/gameplay.py | gaebor/CnC_AI | 0 | 12766714 | <gh_stars>0
import ctypes
from PIL import Image
import cnc_structs
import decoders
import input_requests
class TDGameplay:
def __init__(self, dll_path, content_directory):
self.players = []
self.actions = {}
self.content_directory = content_directory
self.dll = ctypes.WinDLL(dll_path)
self.dll.CNC_Init(ctypes.c_char_p(b"-CD\"" + content_directory + b"\""), None)
self.dll.CNC_Config(ctypes.byref(cnc_structs.get_diff()))
self.dll.CNC_Advance_Instance.restype = ctypes.c_bool
self.dll.CNC_Set_Multiplayer_Data.restype = ctypes.c_bool
self.dll.CNC_Start_Instance.restype = ctypes.c_bool
self.dll.CNC_Start_Instance_Variation.restype = ctypes.c_bool
self.dll.CNC_Start_Custom_Instance.restype = ctypes.c_bool
self.dll.CNC_Get_Start_Game_Info.restype = ctypes.c_bool
self.dll.CNC_Get_Palette.restype = ctypes.c_bool
self.dll.CNC_Get_Visible_Page.restype = ctypes.c_bool
self.dll.CNC_Clear_Object_Selection.restype = ctypes.c_bool
self.dll.CNC_Select_Object.restype = ctypes.c_bool
self.dll.CNC_Get_Game_State.restype = ctypes.c_bool
self.dll.CNC_Save_Load.restype = ctypes.c_bool
self.game_state_buffer = (ctypes.c_uint8 * (4 * 1024 ** 2))()
self.image_buffer = (ctypes.c_uint8 * ((64 * 24) ** 2))()
def add_player(self, playerinfo: cnc_structs.CNCPlayerInfoStruct):
self.players.append(playerinfo)
def start_game(
self,
multiplayer_options: cnc_structs.CNCMultiplayerOptionsStruct,
scenario_index,
difficulty=0,
):
self.players = (cnc_structs.CNCPlayerInfoStruct * len(self.players))(*self.players)
if False == self.dll.CNC_Set_Multiplayer_Data(
ctypes.c_int(scenario_index),
ctypes.byref(multiplayer_options),
ctypes.c_int(len(self.players)),
self.players,
ctypes.c_int(6), # max number of players
):
raise ValueError('CNC_Set_Multiplayer_Data')
if False == self.dll.CNC_Start_Instance_Variation(
ctypes.c_int(scenario_index),
ctypes.c_int(-1), # scenario_variation
ctypes.c_int(0), # scenario_direction
ctypes.c_int(7), # build_level
ctypes.c_char_p(b"MULTI"), # faction
ctypes.c_char_p(b"GAME_GLYPHX_MULTIPLAYER"), # game_type
ctypes.c_char_p(self.content_directory),
ctypes.c_int(-1), # sabotaged_structure
ctypes.c_char_p(b""), # override_map_name
):
raise ValueError('CNC_Start_Instance_Variation')
self.dll.CNC_Set_Difficulty(ctypes.c_int(difficulty))
for player in self.players:
StartLocationIndex = ctypes.c_int()
if self.dll.CNC_Get_Start_Game_Info(
ctypes.c_uint64(player.GlyphxPlayerID), ctypes.byref(StartLocationIndex)
):
player.StartLocationIndex = StartLocationIndex.value
else:
raise ValueError('CNC_Get_Start_Game_Info')
self.dll.CNC_Handle_Game_Request(ctypes.c_int(1)) # INPUT_GAME_LOADING_DONE
self.retrieve_players_info()
self.init_palette()
self.static_map = self.get_game_state('GAME_STATE_STATIC_MAP', 0)
def load(self, filename):
if False == self.dll.CNC_Save_Load(
ctypes.c_bool(False),
ctypes.c_char_p(filename.encode('ascii')),
ctypes.c_char_p(b'GAME_GLYPHX_MULTIPLAYER'),
):
raise ValueError('CNC_Save_Load')
self.players = (cnc_structs.CNCPlayerInfoStruct * len(self.players))(*self.players)
self.dll.CNC_Handle_Game_Request(ctypes.c_int(1)) # INPUT_GAME_LOADING_DONE
self.retrieve_players_info()
self.init_palette()
self.static_map = self.get_game_state('GAME_STATE_STATIC_MAP', 0)
def save(self, filename):
if False == self.dll.CNC_Save_Load(
ctypes.c_bool(True),
ctypes.c_char_p(filename.encode('ascii')),
ctypes.c_char_p(b'GAME_GLYPHX_MULTIPLAYER'),
):
raise ValueError('CNC_Save_Load')
else:
return
def retrieve_players_info(self):
# overrides House info: GOOD/BAD becomes MULTI1-6
for player in self.players:
if not self.dll.CNC_Get_Game_State(
ctypes.c_int(8), # GAME_STATE_PLAYER_INFO
ctypes.c_uint64(player.GlyphxPlayerID),
ctypes.pointer(player),
ctypes.sizeof(cnc_structs.CNCPlayerInfoStruct)
+ 33, # A little extra for no reason
):
raise ValueError('CNC_Get_Game_State (PLAYER_INFO)')
def get_game_state(self, state_request, player_index):
player = self.players[player_index]
request_type, result_type = cnc_structs.GameStateRequestEnum[state_request]
if self.dll.CNC_Get_Game_State(
ctypes.c_int(request_type),
ctypes.c_uint64(player.GlyphxPlayerID),
ctypes.pointer(self.game_state_buffer),
ctypes.c_int(len(self.game_state_buffer)),
):
return result_type(self.game_state_buffer)
def init_palette(self):
self.palette = (ctypes.c_uint8 * (256 * 3))()
if self.dll.CNC_Get_Palette(self.palette):
for i in range(len(self.palette)):
self.palette[i] *= 4
else:
raise ValueError('CNC_Get_Palette')
def show_image(self):
width, height = ctypes.c_uint(0), ctypes.c_uint(0)
if self.dll.CNC_Get_Visible_Page(
self.image_buffer, ctypes.byref(width), ctypes.byref(height)
):
img = Image.frombuffer('P', (width.value, height.value), self.image_buffer)
img.putpalette(self.palette)
img.show()
def get_units(self, player_index):
player = self.players[player_index]
units = decoders.players_units(self.get_game_state('GAME_STATE_LAYERS', 0), player.House)
return units
def advance(self, count=1):
for player_id, (action, args) in self.actions.items():
self.handle_request(action, player_id, *args)
self.actions = {}
result = self.dll.CNC_Advance_Instance(ctypes.c_uint64(0))
for _ in range(1, count):
result = self.dll.CNC_Advance_Instance(ctypes.c_uint64(0))
return result
def register_request(self, player_index, request_type, arg1, *args):
player = self.players[player_index]
self.actions[player.GlyphxPlayerID] = (request_type, (arg1,) + args)
def handle_request(self, request_type, player_id, x1, y1=0, x2=0, y2=0):
if request_type == 'INPUT_REQUEST_SPECIAL_KEYS':
self.dll.CNC_Handle_Input(
ctypes.c_int(10),
ctypes.c_ubyte(x1),
ctypes.c_uint64(player_id),
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_int(0),
)
elif request_type.startswith('INPUT_REQUEST'):
self.dll.CNC_Handle_Input(
ctypes.c_int(input_requests.InputRequestEnum[request_type]),
ctypes.c_ubyte(0),
ctypes.c_uint64(player_id),
ctypes.c_int(x1),
ctypes.c_int(y1),
ctypes.c_int(x2),
ctypes.c_int(y2),
)
elif request_type == 'SUPERWEAPON_REQUEST_PLACE_SUPER_WEAPON':
self.dll.CNC_Handle_SuperWeapon_Request(
ctypes.c_int(0),
ctypes.c_uint64(player_id),
ctypes.c_int(x1),
ctypes.c_int(y1),
ctypes.c_int(x2),
ctypes.c_int(y2),
)
elif request_type.startswith('INPUT_STRUCTURE'):
self.dll.CNC_Handle_Structure_Request(
ctypes.c_int(input_requests.StructureRequestEnum[request_type]),
ctypes.c_uint64(player_id),
ctypes.c_int(x1),
)
elif request_type.startswith('INPUT_UNIT'):
self.dll.CNC_Handle_Unit_Request(
ctypes.c_int(input_requests.UnitRequestEnum[request_type]),
ctypes.c_uint64(player_id),
)
elif request_type.startswith('SIDEBAR'):
self.dll.CNC_Handle_Sidebar_Request(
ctypes.c_int(input_requests.SidebarRequestEnum[request_type]),
ctypes.c_uint64(player_id),
ctypes.c_int(x1),
ctypes.c_int(y1),
ctypes.c_short(x2),
ctypes.c_short(y2),
)
elif request_type.startswith('CONTROL_GROUP_REQUEST'):
self.dll.CNC_Handle_ControlGroup_Request(
ctypes.c_int(input_requests.ControlGroupRequestEnum[request_type]),
ctypes.c_uint64(player_id),
ctypes.c_ubyte(x1),
)
elif request_type.startswith('INPUT_BEACON_'):
self.dll.CNC_Handle_Beacon_Request(
ctypes.c_int(input_requests.BeaconRequestEnum[request_type]),
ctypes.c_uint64(player_id),
ctypes.c_int(x1),
ctypes.c_int(y1),
)
else:
raise ValueError(request_type)
def get_what_player_see(self, player_index):
player = self.players[player_index]
dynamic_map = self.get_game_state('GAME_STATE_DYNAMIC_MAP', 0)
layers = self.get_game_state('GAME_STATE_LAYERS', player_index)
shroud = decoders.shroud_array(
self.get_game_state('GAME_STATE_SHROUD', player_index),
(self.static_map.MapCellHeight, self.static_map.MapCellWidth),
)
occupiers = self.get_game_state('GAME_STATE_OCCUPIER', 0)
fixed_pos_map_assets, fixed_pos_map_shape, actors = decoders.f(
dynamic_map,
layers,
occupiers,
shroud,
(self.static_map.MapCellHeight, self.static_map.MapCellWidth),
player.House,
player.AllyFlags,
)
return fixed_pos_map_assets, fixed_pos_map_shape, actors
def __del__(self):
ctypes.windll.kernel32.FreeLibrary(self.dll._handle)
| 2.15625 | 2 |
okta_uuid.py | tmehlinger/okta-uuid | 0 | 12766715 | """
--- okta-user-id --
This is a simple module for turning (apparently) base62-encoded Okta user IDs
into UUIDs. It also allows for reversing the UUID to an Okta user ID.
"""
__title__ = 'okta-uuid'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.0'
import uuid
import base62
class OktaUserId(object):
def __init__(self, uid):
self.__uid = uid
d = base62.decode(uid)
b = d.to_bytes(16, byteorder='little')
self.__uuid = u = uuid.UUID(bytes_le=b)
@property
def uid(self):
return self.__uid
@property
def uuid(self):
return self.__uuid
def __eq__(self, other):
return self.uid == other.uid
def __str__(self):
return self.uid
def __repr__(self):
return "OktaUserId('{}')".format(self.uid)
@classmethod
def from_uuid(cls, u, length=20):
"""
Derive an Okta UID from the given UUID, padding the left of the string
ID with zeroes.
"""
b = int.from_bytes(u.bytes_le, byteorder='little')
d = base62.encode(b)
padded = d.zfill(length)
return cls(padded)
| 2.84375 | 3 |
lesson06/lesson06_hard_1.py | Liar-Ziro/Python.Basics_of_language | 0 | 12766716 | '''Задача - 1
Вам необходимо создать завод по производству мягких игрушек для детей. Вам надо
продумать структуру классов, чтобы у вас был класс, который создаёт игрушки на
основании: Названия, Цвета, Типа (животное, персонаж мультфильма) Опишите
процедуры создания игрушки в трёх методах: -- Закупка сырья, пошив, окраска Не
усложняйте пусть методы просто выводят текст о том, что делают. В итоге ваш
класс по производству игрушек должен вернуть объект нового класса Игрушка.'''
class Toys:
def __init__(self, name, model, color):
self.name = name
self.model = model
self.color = color
self._purchase_materials()
self._sewing()
self._painting()
def _purchase_materials(self):
print(f'Закупка сырья для {self.name}')
def _sewing(self):
print(f'Пошив {self.name}')
def _painting(self):
print(f'Окраска {self.name} в {self.color}')
class Factory(Toys):
def __init__(self, name, model, color):
super().__init__(name, model, color)
self.name_factory = 'some factory'
self.number_factory = '123123'
toy1 = Factory('toy', 'animal', 'green')
| 3.953125 | 4 |
seeqler/ui/schema.py | ceilors/seeqler | 0 | 12766717 | import dearpygui.dearpygui as dpg
from .window import Window
from . import ConnectionListWindow
class SchemaWindow(Window):
def __init__(self, app, **kwargs):
# fmt: off
super().__init__(
app, 'Текущее подключение', 'main_window', (800, 500),
tag_schema_selector='schema selector', tag_listbox='table list', tag_content='current table',
tag_schema='current schema', tag_handler='table list handler', tag_limit='select_limit',
table_params={
'header_row': True, 'borders_outerH': True, 'borders_innerV': True, 'borders_innerH': True,
'borders_outerV': True, 'resizable': True, 'no_host_extendX': True
}, **kwargs
)
# fmt: on
def construct(self) -> None:
with dpg.group(horizontal=True):
with dpg.group(width=200):
dpg.add_text('Schemas')
dpg.add_combo((), tag=self.tag_schema_selector, callback=self.ui_select_schema)
dpg.add_text('Tables')
dpg.add_listbox((), tag=self.tag_listbox, callback=self.ui_select_table)
with dpg.group():
dpg.add_text('Limit')
dpg.add_input_text(tag='select_limit', default_value='10')
dpg.add_spacer(height=20)
dpg.add_button(label='Отключиться', callback=self.ui_disconnect)
with dpg.tab_bar(label='tabs'):
with dpg.tab(label='content'):
dpg.add_table(tag=self.tag_content, **self.table_params)
with dpg.tab(label='schema'):
dpg.add_table(tag=self.tag_schema, **self.table_params)
def show(self) -> None:
super().show()
dpg.configure_item(self.tag_schema_selector, items=self.app.inspector.get_schema_names())
def ui_disconnect(self) -> None:
ConnectionListWindow().show()
self.app.engine = None
self.app.inspector = None
dpg.delete_item(self.window_id)
self.initiated = False
def ui_select_table(self, sender, table=None):
"""Select table from schema and initiate tab panel"""
dpg.delete_item(self.tag_content, children_only=True)
dpg.delete_item(self.tag_schema, children_only=True)
schema = dpg.get_value(self.tag_schema_selector)
table = table or dpg.get_value(self.tag_listbox)
limit = dpg.get_value(self.tag_limit)
if not table:
return
columns = self.app.inspector.get_columns(table, schema=schema)
for c in columns:
dpg.add_table_column(label=c['name'], parent=self.tag_content)
for column in ['param', 'type', 'nullable', 'default', 'foreign key']:
dpg.add_table_column(label=column, parent=self.tag_schema)
with self.app.engine.connect() as conn:
for row in conn.execute(f'select * from {table} limit {limit}'):
with dpg.table_row(parent=self.tag_content):
for e in row:
dpg.add_text(e)
foreign_keys = {
i['constrained_columns'][0]: '{referred_schema}.{referred_table}({referred_columns[0]})'.format(**i)
for i in self.app.inspector.get_foreign_keys(table, schema=schema)
}
for item in columns:
with dpg.table_row(parent=self.tag_schema):
dpg.add_text(item['name'])
dpg.add_text(item['type'])
dpg.add_text(item['nullable'])
dpg.add_text(item['default'])
dpg.add_text(foreign_keys.get(item['name']))
def ui_select_schema(self, sender, schema):
"""Select schema from schemas list."""
dpg.configure_item(self.tag_listbox, items=sorted(self.app.inspector.get_table_names(schema=schema)))
self.ui_select_table(sender) # because listbox has selected item, but doesnt trigger callback itself
| 2.28125 | 2 |
corpus_generation_scripts/create_parliament.py | NbAiLab/notram | 7 | 12766718 | #!/usr/bin/env python3
####################################################################################
# Script is not in use, since this is no longer in the main corpus
# Cleaning up parliament speech files
# Output is an UTF-8 file with one article per line
####################################################################################
import sys, glob, os, re, argparse
import pandas as pd
def main(args):
minimum_number_of_words_in_an_article = 1
all_articles = ""
valid_article_count = 0
#Read the file
df = pd.read_csv(args.input_file, encoding='utf-8', dtype='string')
for index, row in df.iterrows():
article = row['text']
if len(str(article).split()) >= minimum_number_of_words_in_an_article:
valid_article_count += 1
all_articles += str(article) + '\n'
#Uncomment to run a test on part of the dataset
#if index > 100:
# break
with open(args.output_file, 'w+', encoding="utf-8") as f:
f.write(all_articles)
#Print some statistics
word_count = len(re.findall(r'\w+', all_articles))
print(f'Saved file: {args.output_file}')
print(f'Total number of articles: {index}')
print(f'Number of valid articles: {valid_article_count}')
print(f'Number of words: {word_count}')
def parse_args():
# Parse commandline
parser = argparse.ArgumentParser(
description="Create corpus from parliament files! Output is an UTF-8 JSON lines")
parser.add_argument('--input_file', required=True, type=str, help='Input file')
parser.add_argument('--output_file', required=True, type=str, help='Output file')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
| 3.78125 | 4 |
desec.py | desec-utils/debug | 0 | 12766719 | <reponame>desec-utils/debug
from socket import getaddrinfo
def _resolve(name):
try:
return getaddrinfo(name, 53)[0][4][0]
except OSError:
print(f'Unknown server: {name}')
quit(1)
frontend_servers = {
_resolve(s): s for s in
[
'lhr-1.b.desec.io',
'vie-1.b.desec.io',
'fra-1.a.desec.io',
'ams-1.a.desec.io',
'lax-1.b.desec.io',
'nyc-1.b.desec.io',
'dfw-1.a.desec.io',
'sao-1.a.desec.io',
'sao-1.b.desec.io',
'scl-1.b.desec.io',
'syd-1.a.desec.io',
'syd-1.b.desec.io',
'sin-1.b.desec.io',
'hkg-1.a.desec.io',
'jnb-1.a.desec.io',
]
}
| 2.5 | 2 |
bestbid/bidding/routing.py | wh0th3h3llam1/bestbid | 1 | 12766720 | <gh_stars>1-10
from django.urls import path
from django.conf.urls import url, re_path
from . import consumers
websocket_urlpatterns = [
# path(r'asset/<int:id>/', consumers.LiveAuctioningConsumer),
url(r'^asset/(?P<id>[0-9]+)/$', consumers.LiveAuctioningConsumer),
] | 1.625 | 2 |
my_ip/migrations/0005_rename_image_project_project_image.py | OscarMugendi/Django-week3-IP | 0 | 12766721 | # Generated by Django 3.2.7 on 2021-09-20 13:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('my_ip', '0004_alter_rating_table'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='image',
new_name='project_image',
),
]
| 1.601563 | 2 |
aoc2019/solutions/day04.py | mdalzell/advent-of-code-2019 | 0 | 12766722 | from aoc2019.helpers.day04 import countValidPasswordsInRange, consecutiveDigitsRegex, exactlyTwoConsecutiveDigitsRegex
from aoc2019.shared.solution import Solution
class Day4(Solution):
def part1(self):
return countValidPasswordsInRange(
152085, 670283, consecutiveDigitsRegex)
def part2(self):
return countValidPasswordsInRange(
152085, 670283, exactlyTwoConsecutiveDigitsRegex)
| 1.984375 | 2 |
tests/tests_preprocessing/test_datetime_transformer.py | stjordanis/mljar-supervised | 1,882 | 12766723 | <filename>tests/tests_preprocessing/test_datetime_transformer.py
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
from supervised.preprocessing.datetime_transformer import DateTimeTransformer
class DateTimeTransformerTest(unittest.TestCase):
def test_transformer(self):
d = {
"col1": [
"2020/06/01",
"2020/06/02",
"2020/06/03",
"2021/06/01",
"2022/06/01",
]
}
df = pd.DataFrame(data=d)
df["col1"] = pd.to_datetime(df["col1"])
df_org = df.copy()
transf = DateTimeTransformer()
transf.fit(df, "col1")
df = transf.transform(df)
self.assertTrue(df.shape[0] == 5)
self.assertTrue("col1" not in df.columns)
self.assertTrue("col1_Year" in df.columns)
transf2 = DateTimeTransformer()
transf2.from_json(transf.to_json())
df2 = transf2.transform(df_org)
self.assertTrue("col1" not in df2.columns)
self.assertTrue("col1_Year" in df2.columns)
| 3.078125 | 3 |
storage/app/public/calculator_Iot_tx.py | kevinkuogod/ethereum_BIS_web | 0 | 12766724 | <filename>storage/app/public/calculator_Iot_tx.py
import numpy as np
import matplotlib.pyplot as plt
# T=100
# n=1
# # n=5
# # n=10
# Tx = []
# plt.figure(figsize=(10, 4), dpi=500) # 图片长宽和清晰度
# for i in range(int(T/n)):
# Tx.append(i)
# ldr_gas_total = []
# ldr_field=1
# camera_gas_total = []
# camera_field=2
# smart_contract_basic_cost = 21784
# for i in range(int(T/n)):
# ldr_gas_total.append(smart_contract_basic_cost+(256*ldr_field*i*64))
# camera_gas_total.append(smart_contract_basic_cost+(256*camera_field*i*64))
# plt.step(Tx, ldr_gas_total, label='ldr')
# plt.plot(Tx, ldr_gas_total, 'o--', color='grey', alpha=0.3)
# plt.step(Tx, camera_gas_total, label='camera')
# plt.plot(Tx, camera_gas_total, 'o--', color='grey', alpha=0.3)
# plt.xlabel("Tx number(n=1)")
# # plt.xlabel("Tx number(n=5)")
# # plt.xlabel("Tx number(n=10)")
# plt.ylabel("gas number")
# plt.grid(axis='x', color='0.95')
# plt.legend(title='IOT:')
# plt.title('cost(with BIS)')
# plt.savefig("python_BIS_n_1.png")
# # plt.savefig("python_BIS_n_5.png") # 保存图片
# # plt.savefig("python_BIS_n_10.png")
# plt.yscale('log')
# plt.show()
#1e10
# T=100
# n=1
# Tx = []
# plt.figure(figsize=(10, 4), dpi=500) # 图片长宽和清晰度
# for i in range(int(T/n)):
# Tx.append(i)
# ldr_gas_total = []
# camera_gas_total = []
# smart_contract_basic_cost = 21784
# #76187 ldr number(5秒) 15237(1秒)
# ldr_field=15237
# #133 tag數量(5秒) 26(1秒)
# #15144045 flag數量(5秒) 3,028,809(1秒)
# #98339914 video大小
# camera_field=26+3028809
# for i in range(int(T/n)):
# ldr_gas_total.append(smart_contract_basic_cost+(ldr_field*i*64))
# if((i+1) == int(T/n)):
# #camera_gas_total.append(smart_contract_basic_cost+((camera_field+98339914)*i*64))
# camera_gas_total.append(smart_contract_basic_cost+((camera_field)*i*64))
# else:
# camera_gas_total.append(smart_contract_basic_cost+(camera_field*i*64))
# plt.step(Tx, ldr_gas_total, label='ldr')
# plt.plot(Tx, ldr_gas_total, 'o--', color='grey', alpha=0.3)
# plt.step(Tx, camera_gas_total, label='camera')
# plt.plot(Tx, camera_gas_total, 'o--', color='grey', alpha=0.3)
# plt.xlabel("time")
# plt.ylabel("gas number")
# plt.grid(axis='x', color='0.95')
# plt.legend(title='IOT:')
# plt.title('cost(without BIS)')
# plt.savefig("python_BS.png")
# # plt.yscale('log')
# plt.show()
#-------------------------------------------------------------------------------------------------------------------------------------------------
# T=100
# # n=1
# # n=5
# n=10
# Tx = []
# plt.figure(figsize=(7, 6), dpi=500) # 图片长宽和清晰度
# for i in range(int(T/n)):
# Tx.append(i)
# ldr_gas_total = []
# ldr_field=1
# camera_gas_total = []
# camera_field=2
# smart_contract_basic_cost = 21784
# for i in range(int(T/n)):
# ldr_gas_total.append(smart_contract_basic_cost+(256*ldr_field*i*64))
# camera_gas_total.append(smart_contract_basic_cost+(256*camera_field*i*64))
# plt.plot(Tx, ldr_gas_total, '^', color='grey', label='ldr')
# plt.plot(Tx, camera_gas_total, 'o', color='green', label='camera')
# # plt.xlabel("Tx number(n=1)")
# # plt.xlabel("Tx number(n=5)")
# plt.xlabel("Tx number(n=10)")
# plt.ylabel("gas number")
# plt.grid(axis='x', color='0.95')
# plt.legend(title='IOT:')
# plt.title('cost(with BIS)')
# # plt.savefig("python_BIS_n_1_v2.png")
# # plt.savefig("python_BIS_n_5_v2.png") # 保存图片
# plt.savefig("python_BIS_n_10_v2.png")
# # plt.yscale('log')
# plt.show()
T=100
n=1
Tx = []
plt.figure(figsize=(7, 6), dpi=500) # 图片长宽和清晰度
for i in range(int(T/n)):
Tx.append(i)
ldr_gas_total = []
camera_gas_total = []
smart_contract_basic_cost = 21784
#76187 ldr number(5秒) 15237(1秒)
ldr_field=15237
#133 tag數量(5秒) 26(1秒)
#15144045 flag數量(5秒) 3,028,809(1秒)
#98339914 video大小
camera_field=26+3028809
for i in range(int(T/n)):
print(smart_contract_basic_cost+(ldr_field*i*64))
ldr_gas_total.append(smart_contract_basic_cost+(ldr_field*i*64))
if((i+1) == int(T/n)):
#camera_gas_total.append(smart_contract_basic_cost+((camera_field+98339914)*i*64))
camera_gas_total.append(smart_contract_basic_cost+((camera_field)*i*64))
else:
camera_gas_total.append(smart_contract_basic_cost+(camera_field*i*64))
plt.plot(Tx, ldr_gas_total, '^', color='grey', label='ldr')
plt.plot(Tx, camera_gas_total, 'o', color='green', label='camera')
plt.xlabel("time")
plt.ylabel("gas number")
plt.grid(axis='x', color='0.95')
plt.legend(title='IOT:')
plt.title('cost(without BIS)')
plt.savefig("python_BS_v2.png")
# plt.yscale('log')
plt.show()
#正如您在y轴上方所见,它表示1e11意味着单位为100亿
#https://www.thinbug.com/q/50335690
#https://blog.csdn.net/bajiang7063/article/details/102145851
#https://vlight.me/2018/04/14/Numerical-Python-Plotting-and-Visualization/ | 2.671875 | 3 |
imagecaptioniser.py | Vijayvj1/FacebookHateMEMS | 0 | 12766725 | # Load libraries
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import numpy as np
import os
from keras.applications.resnet50 import ResNet50
from keras.optimizers import Adam
from keras.layers import Dense, Flatten,Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector,Concatenate
from keras.models import Sequential, Model
from keras.utils import np_utils
import random
from keras.preprocessing import image, sequence
import matplotlib.pyplot as plt
# Load data
images_dir = os.listdir("D:\\FBAi\\data\\Flickr_Data")
images_path = 'D:\\FBAi\\data\\Flickr_Data\\Images\\'
captions_path = 'D:\\FBAi\data\Flickr_Data\\Flickr_TextData\\Flickr8k.token.txt'
train_path = 'D:\\FBAi\\data\\Flickr_Data\\Flickr_TextData\\Flickr_8k.trainImages.txt'
val_path = 'D:\\FBAi\\data\\Flickr_Data\\Flickr_TextData\\Flickr_8k.devImages.txt'
test_path = 'D:\\FBAi\\data\\Flickr_Data\\Flickr_TextData\\Flickr_8k.testImages.txt'
captions = open(captions_path, 'r').read().split("\n")
x_train = open(train_path, 'r').read().split("\n")
x_val = open(val_path, 'r').read().split("\n")
x_test = open(test_path, 'r').read().split("\n")
# Loading captions as values and images as key in dictionary
tokens = {}
for ix in range(len(captions)-1):
temp = captions[ix].split("#")
if temp[0] in tokens:
tokens[temp[0]].append(temp[1][2:])
else:
tokens[temp[0]] = [temp[1][2:]]
# displaying an image and captions given to it
temp = captions[10].split("#")
from IPython.display import Image, display
z = Image(filename=images_path+temp[0])
display(z)
for ix in range(len(tokens[temp[0]])):
print(tokens[temp[0]][ix])
# Creating train, test and validation dataset files with header as 'image_id' and 'captions'
train_dataset = open('flickr_8k_train_dataset.txt','wb')
train_dataset.write(b"image_id\tcaptions\n")
val_dataset = open('flickr_8k_val_dataset.txt','wb')
val_dataset.write(b"image_id\tcaptions\n")
test_dataset = open('flickr_8k_test_dataset.txt','wb')
test_dataset.write(b"image_id\tcaptions\n")
# Populating the above created files for train, test and validation dataset with image ids and captions for each of these images
for img in x_train:
if img == '':
continue
for capt in tokens[img]:
caption = "<start> "+ capt + " <end>"
train_dataset.write((img+"\t"+caption+"\n").encode())
train_dataset.flush()
train_dataset.close()
for img in x_test:
if img == '':
continue
for capt in tokens[img]:
caption = "<start> "+ capt + " <end>"
test_dataset.write((img+"\t"+caption+"\n").encode())
test_dataset.flush()
test_dataset.close()
for img in x_val:
if img == '':
continue
for capt in tokens[img]:
caption = "<start> "+ capt + " <end>"
val_dataset.write((img+"\t"+caption+"\n").encode())
val_dataset.flush()
val_dataset.close()
# Loading 50 layer Residual Network Model and getting the summary of the model
from IPython.core.display import display, HTML
display(HTML("""<a href="http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006">ResNet50 Architecture</a>"""))
model = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
model.summary()
# Note: For more details on ResNet50 architecture you can click on hyperlink given below
# Helper function to process images
def preprocessing(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im)
im = np.expand_dims(im, axis=0)
return im
train_data = {}
ctr=0
for ix in x_train:
if ix == "":
continue
if ctr >= 3000:
break
ctr+=1
if ctr%1000==0:
print(ctr)
path = images_path + ix
img = preprocessing(path)
pred = model.predict(img).reshape(2048)
train_data[ix] = pred
train_data['2513260012_03d33305cf.jpg'].shape
# opening train_encoded_images.p file and dumping it's content
with open( "train_encoded_images.p", "wb" ) as pickle_f:
pickle.dump(train_data, pickle_f )
# Loading image and its corresponding caption into a dataframe and then storing values from dataframe into 'ds'
pd_dataset = pd.read_csv("flickr_8k_train_dataset.txt", delimiter='\t')
ds = pd_dataset.values
print(ds.shape)
pd_dataset.head()
# Storing all the captions from ds into a list
sentences = []
for ix in range(ds.shape[0]):
sentences.append(ds[ix, 1])
print(len(sentences))
# First 5 captions stored in sentences
sentences[:5]
# Splitting each captions stored in 'sentences' and storing them in 'words' as list of list
words = [i.split() for i in sentences]
# Creating a list of all unique words
unique = []
for i in words:
unique.extend(i)
unique = list(set(unique))
print(len(unique))
vocab_size = len(unique)
# Vectorization
word_2_indices = {val:index for index, val in enumerate(unique)}
indices_2_word = {index:val for index, val in enumerate(unique)}
word_2_indices['UNK'] = 0
word_2_indices['raining'] = 8253
indices_2_word[0] = 'UNK'
indices_2_word[8253] = 'raining'
print(word_2_indices['<start>'])
print(indices_2_word[4011])
print(word_2_indices['<end>'])
print(indices_2_word[8051])
vocab_size = len(word_2_indices.keys())
print(vocab_size)
max_len = 0
for i in sentences:
i = i.split()
if len(i) > max_len:
max_len = len(i)
print(max_len)
padded_sequences, subsequent_words = [], []
for ix in range(ds.shape[0]):
partial_seqs = []
next_words = []
text = ds[ix, 1].split()
text = [word_2_indices[i] for i in text]
for i in range(1, len(text)):
partial_seqs.append(text[:i])
next_words.append(text[i])
padded_partial_seqs = sequence.pad_sequences(partial_seqs, max_len, padding='post')
next_words_1hot = np.zeros([len(next_words), vocab_size], dtype=np.bool)
#Vectorization
for i,next_word in enumerate(next_words):
next_words_1hot[i, next_word] = 1
padded_sequences.append(padded_partial_seqs)
subsequent_words.append(next_words_1hot)
padded_sequences = np.asarray(padded_sequences)
subsequent_words = np.asarray(subsequent_words)
print(padded_sequences.shape)
print(subsequent_words.shape)
print(padded_sequences[0])
for ix in range(len(padded_sequences[0])):
for iy in range(max_len):
print(indices_2_word[padded_sequences[0][ix][iy]],)
print("\n")
print(len(padded_sequences[0]))
num_of_images = 2000
captions = np.zeros([0, max_len])
next_words = np.zeros([0, vocab_size])
for ix in range(num_of_images):#img_to_padded_seqs.shape[0]):
captions = np.concatenate([captions, padded_sequences[ix]])
next_words = np.concatenate([next_words, subsequent_words[ix]])
np.save("captions.npy", captions)
np.save("next_words.npy", next_words)
print(captions.shape)
print(next_words.shape)
with open('D:\\FBAi\\data\\train_encoded_images.p', 'rb') as f:
encoded_images = pickle.load(f, encoding="bytes")
imgs = []
for ix in range(ds.shape[0]):
if ds[ix, 0].encode() in encoded_images.keys():
# print(ix, encoded_images[ds[ix, 0].encode()])
imgs.append(list(encoded_images[ds[ix, 0].encode()]))
imgs = np.asarray(imgs)
print(imgs.shape)
images = []
for ix in range(num_of_images):
for iy in range(padded_sequences[ix].shape[0]):
images.append(imgs[ix])
images = np.asarray(images)
np.save("images.npy", images)
print(images.shape)
image_names = []
for ix in range(num_of_images):
for iy in range(padded_sequences[ix].shape[0]):
image_names.append(ds[ix, 0])
image_names = np.asarray(image_names)
np.save("image_names.npy", image_names)
print(len(image_names))
#Model
captions = np.load("captions.npy")
next_words = np.load("next_words.npy")
print(captions.shape)
print(next_words.shape)
images = np.load("images.npy")
print(images.shape)
imag = np.load("image_names.npy")
print(imag.shape)
embedding_size = 128
max_len = 40
image_model = Sequential()
image_model.add(Dense(embedding_size, input_shape=(2048,), activation='relu'))
image_model.add(RepeatVector(max_len))
image_model.summary()
language_model = Sequential()
language_model.add(Embedding(input_dim=vocab_size, output_dim=embedding_size, input_length=max_len))
language_model.add(LSTM(256, return_sequences=True))
language_model.add(TimeDistributed(Dense(embedding_size)))
language_model.summary()
conca = Concatenate()([image_model.output, language_model.output])
x = LSTM(128, return_sequences=True)(conca)
x = LSTM(512, return_sequences=False)(x)
x = Dense(vocab_size)(x)
out = Activation('softmax')(x)
model = Model(inputs=[image_model.input, language_model.input], outputs = out)
model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy'])
model.summary()
hist = model.fit([images, captions], next_words, batch_size=512, epochs=200)
model.save_weights("model_weights.h5")
#Predictions
def preprocessing(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im)
im = np.expand_dims(im, axis=0)
return im
def get_encoding(model, img):
image = preprocessing(img)
pred = model.predict(image).reshape(2048)
return pred
resnet = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
img = "D:\\FBAi\\data\\Flickr_Data\\Images\\1453366750_6e8cf601bf.jpg"
test_img = get_encoding(resnet, img)
def predict_captions(image):
start_word = ["<start>"]
while True:
par_caps = [word_2_indices[i] for i in start_word]
par_caps = sequence.pad_sequences([par_caps], maxlen=max_len, padding='post')
preds = model.predict([np.array([image]), np.array(par_caps)])
word_pred = indices_2_word[np.argmax(preds[0])]
start_word.append(word_pred)
if word_pred == "<end>" or len(start_word) > max_len:
break
return ' '.join(start_word[1:-1])
Argmax_Search = predict_captions(test_img)
z = Image(filename=img)
display(z)
print(Argmax_Search)
| 2.453125 | 2 |
config.py | XxuChen/Adversarial-Attack-on-CapsNets | 0 | 12766726 | <filename>config.py
# Copyright 2018 <NAME> All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer('num_gpus', 2,
'Number of GPUs available.')
tf.flags.DEFINE_integer('total_batch_size', 64,
'The total batch size for each batch. It will be splitted into num_gpus partitions.')
tf.flags.DEFINE_integer('save_epochs', 25,
'How often to save ckpt files.')
tf.flags.DEFINE_integer('max_epochs', 400,
'train, evaluate, ensemble: maximum epochs to run;\n'
'others: number of different examples to viasualize.')
tf.flags.DEFINE_integer('image_size', 28,
'Define the image size for dataset.')
tf.flags.DEFINE_string('mode', 'train',
'train: train the model;\n'
'test: test the model;\n'
'evaluate: evaluate the model for both training and testing set using different evaluation metrics;\n')
tf.flags.DEFINE_string('adversarial_method', 'Default',
'Default: no adversarial training method applied;\n'
'FGSM: fast gradient sign method;\n'
'BIM: basic iterative method;\n'
'LLCM: least-likely class method.')
tf.flags.DEFINE_string('hparams_override', None,
'--hparams_override=num_prime_capsules=64,padding=SAME,leaky=true,remake=false')
tf.flags.DEFINE_string('data_dir', 'dataset',
'The data directory')
tf.flags.DEFINE_string('dataset', 'mnist',
'The dataset to use for the experiment.\n'
'mnist, fashion_mnist, svhn, cifar10.')
tf.flags.DEFINE_string('model', 'caps',
'The model to use for the experiment.\n'
'caps, caps_r or cnn.')
tf.flags.DEFINE_string('summary_dir', './summary',
'The directory to write results.')
tf.flags.DEFINE_string('load_test_path', None,
'The (processed) test set file to load.')
tf.flags.DEFINE_float('epsilon', 1,
'epsilon for adversarial attacks.')
tf.flags.DEFINE_integer('iteration_n', 1,
'iteration number for iterative procedure.')
def default_hparams():
"""Builds an HParams object with default hperparameters."""
return tf.contrib.training.HParams(
decay_rate=0.96,
decay_steps=2000,
leaky=False,
learning_rate=0.001,
loss_type='margin',
num_prime_capsules=32,
padding='VALID',
remake=True,
routing=3,
verbose=False) | 1.960938 | 2 |
Validation/Geometry/test/MaterialBudget.py | PKUfudawei/cmssw | 2 | 12766727 | <filename>Validation/Geometry/test/MaterialBudget.py
#! /usr/bin/env python3
# Pure trick to start ROOT in batch mode, pass this only option to it
# and the rest of the command line options to this code.
from __future__ import print_function
import sys
oldargv = sys.argv[:]
sys.argv = [ '-b-' ]
from ROOT import TCanvas, TPad, TGaxis, TLegend, TPaveText, THStack, TFile, TLatex
from ROOT import TProfile, TProfile2D, TH1D, TH2F, TPaletteAxis, TStyle, TColor
from ROOT import kBlack, kWhite, kOrange, kAzure, kBlue, kRed, kGreen
from ROOT import kGreyScale, kTemperatureMap
from ROOT import kTRUE, kFALSE
from ROOT import gROOT, gStyle, gPad
gROOT.SetBatch(True)
sys.argv = oldargv
from Validation.Geometry.plot_utils import setTDRStyle, Plot_params, plots, COMPOUNDS, DETECTORS, sDETS, hist_label_to_num, drawEtaValues
from collections import namedtuple, OrderedDict
import sys, os, copy
import argparse
def paramsGood_(detector, plot, geometryOld = '', geometryNew = ''):
"""Check the validity of the arguments.
Common function to check the validity of the parameters passed
in. It returns a tuple composed by a bool and a string. The
bool indicates if all checks are ok, the string the appropriate
ROOT filename to open (empty string in case any check failed)
If geometry comparison is being made, a list of strings is
returned instead.
"""
theFiles = []
if plot not in plots.keys():
print("Error, unknown plot %s" % plot)
return (False, '')
if detector not in DETECTORS and detector not in COMPOUNDS.keys():
print('Error, unknown detector: %s' % detector)
return (False, '')
if detector not in DETECTORS:
detector = COMPOUNDS[detector][0]
if geometryNew:
oldgeoFilename = 'matbdg_%s_%s.root' % (detector,geometryOld)
theFiles.append(oldgeoFilename)
newgeoFilename = 'matbdg_%s_%s.root' % (detector,geometryNew)
theFiles.append(newgeoFilename)
else:
theFiles.append('matbdg_%s_%s.root' % (detector,geometryOld))
for thisFile in theFiles:
if not checkFile_(thisFile):
print("Error, missing file %s" % thisFile)
raise RuntimeError
if len(theFiles) > 1:
return (True, theFiles)
else:
return (True, theFiles[0])
def checkFile_(filename):
return os.path.exists(filename)
def setColorIfExists_(histos, h, color):
if h in histos.keys():
histos[h].SetFillColor(color)
def assignOrAddIfExists_(h1, h2):
"""Assign the projection of h2 to h1.
Function to assign the h2 to h1 in the case in
which h1 is None, otherwise add h2 to the already
valid h1 object
"""
if not h1:
h1 = h2
else:
h1.Add(h2, +1.000)
return h1
def get1DHisto_(detector,plotNumber,geometry):
"""
This function opens the appropiate ROOT file,
extracts the TProfile and turns it into a Histogram,
if it is a compound detector, this function
takes care of the subdetectors' addition unless the
detector's ROOT file is present in which case no addition
is performed and the detector ROOT file is used.
"""
histo = None
rootFile = TFile()
detectorFilename = 'matbdg_%s_%s.root'%(detector,geometry)
if detector not in COMPOUNDS.keys() or checkFile_(detectorFilename):
if not checkFile_(detectorFilename):
print('Warning: %s not found' % detectorFilename)
return 0
print('Reading from: %s File' % detectorFilename)
rootFile = TFile.Open(detectorFilename,'READ')
prof = rootFile.Get("%d" % plotNumber)
if not prof: return 0
# Prevent memory leaking by specifing a unique name
prof.SetName('%u_%s_%s' %(plotNumber,detector,geometry))
histo = prof.ProjectionX()
else:
theFiles = []
histos = OrderedDict()
for subDetector in COMPOUNDS[detector]:
subDetectorFilename = 'matbdg_%s_%s.root' % (subDetector,geometry)
if not checkFile_(subDetectorFilename):
print('Warning: %s not found'%subDetectorFilename)
continue
print('Reading from: %s File' % subDetectorFilename)
subDetectorFile = TFile.Open(subDetectorFilename,'READ')
theFiles.append(subDetectorFile)
prof = subDetectorFile.Get('%d'%(plotNumber))
if not prof: return 0
prof.__class__ = TProfile
histo = assignOrAddIfExists_(histo,prof.ProjectionX())
return copy.deepcopy(histo)
def get2DHisto_(detector,plotNumber,geometry):
"""
This function opens the appropiate ROOT file,
extracts the TProfile2D and turns it into a Histogram,
if it is a compound detector, this function
takes care of the subdetectors' addition.
Note that it takes plotNumber as opposed to plot
"""
histo = None
rootFile = TFile()
detectorFilename = 'matbdg_%s_%s.root'%(detector,geometry)
if detector not in COMPOUNDS.keys() or checkFile_(detectorFilename):
if not checkFile_(detectorFilename):
print('Warning: %s not found' % detectorFilename)
return 0
rootFile = TFile.Open(detectorFilename,'READ')
prof = rootFile.Get("%d" % plotNumber)
if not prof: return 0
# Prevent memory leaking by specifing a unique name
prof.SetName('%u_%s_%s' %(plotNumber,detector,geometry))
prof.__class__ = TProfile2D
histo = prof.ProjectionXY()
else:
histos = OrderedDict()
theFiles = []
for subDetector in COMPOUNDS[detector]:
subDetectorFilename = 'matbdg_%s_%s.root' % (subDetector,geometry)
if not checkFile_(subDetectorFilename):
print('Warning: %s not found'%subDetectorFilename)
continue
subDetectorFile = TFile.Open(subDetectorFilename,'READ')
theFiles.append(subDetectorFile)
print('*** Open file... %s' % subDetectorFilename)
prof = subDetectorFile.Get('%d'%plotNumber)
if not prof: return 0
prof.__class__ = TProfile2D
if not histo:
histo = prof.ProjectionXY('B_%s' % prof.GetName())
else:
histo.Add(prof.ProjectionXY('B_%s' % prof.GetName()))
return copy.deepcopy(histo)
def createCompoundPlotsGeometryComparison(detector, plot, geometryOld,
geometryNew):
setTDRStyle()
goodToGo, theFiles = paramsGood_(detector,plot,
geometryOld,geometryNew)
if not goodToGo:
return
oldHistos = OrderedDict()
newHistos = OrderedDict()
ratioHistos = OrderedDict()
diffHistos = OrderedDict()
def setUpCanvas(canvas):
gStyle.SetOptStat(False)
mainPadTop = [
TPad("mainPadTop"+str(i)+'_'+canvas.GetName(),
"mainPad"+str(i),
i*0.25, 0.60, (i+1)*0.25, 1.0)
for i in range(4)
]
subPadTop = [
TPad("subPadTop"+str(i)+'_'+canvas.GetName(),
"subPad"+str(i),
i*0.25, 0.50, (i+1)*0.25, 0.6)
for i in range(4)
]
mainPadBottom = [
TPad("mainPadBottom"+str(i)+'_'+canvas.GetName(),
"subPad"+str(i),
i*0.25, 0.10, (i+1)*0.25, 0.5)
for i in range(4)
]
subPadBottom = [
TPad("subPadBottom"+str(i)+'_'+canvas.GetName(),
"subPad"+str(i),
i*0.25, 0.00, (i+1)*0.25, 0.1)
for i in range(4)
]
mainPad = mainPadTop + mainPadBottom
subPad = subPadTop + subPadBottom
leftMargin = 0.12
rightMargin = 0.12
topMargin = 0.12
bottomMargin = 0.3
for i in range(8):
mainPad[i].SetLeftMargin(leftMargin)
mainPad[i].SetRightMargin(rightMargin)
mainPad[i].SetTopMargin(topMargin)
mainPad[i].SetBottomMargin(1e-3)
mainPad[i].Draw()
subPad[i].SetLeftMargin(leftMargin)
subPad[i].SetRightMargin(rightMargin)
subPad[i].SetTopMargin(1e-3)
subPad[i].SetBottomMargin(bottomMargin)
subPad[i].Draw()
return mainPad, subPad
canComparison = TCanvas("canComparison","canComparison",2400,1200)
mainPad, subPad = setUpCanvas(canComparison)
def setStyleHistoSubPad(histo):
histo.SetTitle('')
histo.SetMarkerColor(kBlack)
histo.SetMarkerStyle(20) # Circles
histo.SetMarkerSize(.5)
histo.SetLineWidth(1)
histo.GetYaxis().SetTitleSize(14)
histo.GetYaxis().SetTitleFont(43)
histo.GetYaxis().SetLabelSize(0.17)
histo.GetYaxis().SetTitleOffset(5.0)
histo.GetYaxis().SetNdivisions(6,3,0)
histo.GetXaxis().SetTitleSize(25)
histo.GetXaxis().SetTitleFont(43)
histo.GetXaxis().SetTitleOffset(6.0)
histo.GetXaxis().SetLabelSize(0.17)
return histo
def makeRatio(histoX,histoY):
# return stylized ratio histoX/histoY
histoXOverY = copy.deepcopy(histoX)
histoXOverY.Divide(histoY)
histoXOverY.GetYaxis().SetTitle('#frac{%s}{%s}' % (geometryNew,geometryOld))
return histoXOverY
def makeDiff(histoNew,histoOld):
# Return stylized histoNew - histoOld
diff = copy.deepcopy(histoNew)
diff.Add(histoOld,-1.0)
diff.GetYaxis().SetTitle(geometryNew
+ " - "
+ geometryOld)
diff.GetYaxis().SetNdivisions(6,3,0)
diff.GetXaxis().SetTitleSize(25)
diff.GetXaxis().SetTitleFont(43)
diff.GetXaxis().SetTitleOffset(3.5)
diff.GetXaxis().SetLabelSize(0.17)
return diff
# Plotting the different categories
def setUpTitle(detector,label,plot):
title = 'Material Budget %s [%s];%s;%s' % (detector,label,
plots[plot].abscissa,
plots[plot].ordinate)
return title
def setUpLegend(gOld,gNew,label):
legend = TLegend(0.4,0.7,0.7,0.85)
legend.AddEntry(gOld,"%s %s [%s]"%(detector,geometryOld,label),"F") #(F)illed Box
legend.AddEntry(gNew,"%s %s [%s]"%(detector,geometryNew,label),"P") #(P)olymarker
legend.SetTextFont(42)
legend.SetTextSize(0.03)
return legend
def setRanges(h):
legendSpace = 1. + 0.3 # 30%
minX = h.GetXaxis().GetXmin()
maxX = h.GetXaxis().GetXmax()
minY = h.GetYaxis().GetXmin()
maxY = h.GetBinContent(h.GetMaximumBin()) * legendSpace
h.GetYaxis().SetRangeUser(minY, maxY)
h.GetXaxis().SetRangeUser(minX, maxX)
########### Ratio ###########
counter = 0
legends = OrderedDict() #KeepAlive
for label, [num, color, leg] in hist_label_to_num.items():
mainPad[counter].cd()
oldHistos[label] = get1DHisto_(detector,
num+plots[plot].plotNumber
,geometryOld)
oldHistos[label].SetTitle(setUpTitle(detector,leg,plot))
oldHistos[label].SetFillColor(color)
oldHistos[label].SetLineColor(kBlack)
oldHistos[label].SetLineWidth(1)
setRanges(oldHistos[label])
oldHistos[label].Draw("HIST")
newHistos[label] = get1DHisto_(detector,
num+plots[plot].plotNumber
,geometryNew)
newHistos[label].SetMarkerSize(.5)
newHistos[label].SetMarkerStyle(20)
newHistos[label].Draw('SAME P')
legends[label]= setUpLegend(oldHistos[label],newHistos[label],
leg);
legends[label].Draw()
# Ratio
subPad[counter].cd()
ratioHistos[label] = makeRatio( newHistos[label],oldHistos[label] )
ratioHistos[label] = setStyleHistoSubPad(ratioHistos[label])
ratioHistos[label].Draw("HIST P")
counter += 1
theDirname = "Images"
if not checkFile_(theDirname):
os.mkdir(theDirname)
canComparison.SaveAs( "%s/%s_ComparisonRatio_%s_%s_vs_%s.png"
% (theDirname,detector,plot,geometryOld,geometryNew) )
######## Difference ########
canDiff = TCanvas("canDiff","canDiff",2400,1200)
mainPadDiff, subPadDiff = setUpCanvas(canDiff)
counter = 0
for label, [num, color, leg] in hist_label_to_num.items():
mainPadDiff[counter].cd()
oldHistos[label].SetTitle(setUpTitle(detector,leg,plot))
oldHistos[label].Draw("HIST")
newHistos[label].Draw('SAME P')
legends[label].Draw()
# Difference
subPadDiff[counter].cd()
diffHistos[label] = makeDiff( newHistos[label],oldHistos[label] )
diffHistos[label] = setStyleHistoSubPad(diffHistos[label])
diffHistos[label].SetTitle('')
diffHistos[label].SetFillColor(color+1)
diffHistos[label].Draw("HIST")
counter +=1
canDiff.SaveAs( "%s/%s_ComparisonDifference_%s_%s_vs_%s.png"
% (theDirname,detector,plot,geometryOld,geometryNew) )
def setUpPalette(histo2D, plot) :
# Configure Palette for 2D Histos
minX = 1.03*histo2D.GetXaxis().GetXmin();
maxX = 1.03*histo2D.GetXaxis().GetXmax();
minY = 1.03*histo2D.GetYaxis().GetXmin();
maxY = 1.03*histo2D.GetYaxis().GetXmax();
palette = histo2D.GetListOfFunctions().FindObject("palette")
if palette:
palette.__class__ = TPaletteAxis
palette.SetX1NDC(0.945)
palette.SetY1NDC(gPad.GetBottomMargin())
palette.SetX2NDC(0.96)
palette.SetY2NDC(1-gPad.GetTopMargin())
palette.GetAxis().SetTickSize(.01)
palette.GetAxis().SetTitle("")
if plots[plot].zLog:
palette.GetAxis().SetLabelOffset(-0.01)
if histo2D.GetMaximum()/histo2D.GetMinimum() < 1e3 :
palette.GetAxis().SetMoreLogLabels(True)
palette.GetAxis().SetNoExponent(True)
paletteTitle = TLatex(1.12*maxX, maxY, plots[plot].quotaName)
paletteTitle.SetTextAngle(90.)
paletteTitle.SetTextSize(0.05)
paletteTitle.SetTextAlign(31)
paletteTitle.Draw()
histo2D.GetXaxis().SetTickLength(histo2D.GetXaxis().GetTickLength()/4.)
histo2D.GetYaxis().SetTickLength(histo2D.GetYaxis().GetTickLength()/4.)
histo2D.SetTitleOffset(0.5,'Y')
histo2D.GetXaxis().SetNoExponent(True)
histo2D.GetYaxis().SetNoExponent(True)
def create2DPlotsGeometryComparison(detector, plot,
geometryOld, geometryNew):
setTDRStyle()
print('Extracting plot: %s.'%(plot))
goodToGo, theFiles = paramsGood_(detector,plot,
geometryOld,geometryNew)
if not goodToGo:
return
gStyle.SetOptStat(False)
old2DHisto = get2DHisto_(detector,plots[plot].plotNumber,geometryOld)
new2DHisto = get2DHisto_(detector,plots[plot].plotNumber,geometryNew)
if plots[plot].iRebin:
old2DHisto.Rebin2D()
new2DHisto.Rebin2D()
def setRanges(h):
h.GetXaxis().SetRangeUser(plots[plot].xmin, plots[plot].xmax)
h.GetYaxis().SetRangeUser(plots[plot].ymin, plots[plot].ymax)
if plots[plot].histoMin != -1.:
h.SetMinimum(plots[plot].histoMin)
if plots[plot].histoMax != -1.:
h.SetMaximum(plots[plot].histoMax)
ratio2DHisto = copy.deepcopy(new2DHisto)
ratio2DHisto.Divide(old2DHisto)
# Ratio and Difference have the same call
# But different 'Palette' range so we are
# setting the range only for the Ratio
ratio2DHisto.SetMinimum(0.2)
ratio2DHisto.SetMaximum(1.8)
setRanges(ratio2DHisto)
diff2DHisto = copy.deepcopy(new2DHisto)
diff2DHisto.Add(old2DHisto,-1.0)
setRanges(diff2DHisto)
def setPadStyle():
gPad.SetLeftMargin(0.05)
gPad.SetRightMargin(0.08)
gPad.SetTopMargin(0.10)
gPad.SetBottomMargin(0.10)
gPad.SetLogz(plots[plot].zLog)
gPad.SetFillColor(kWhite)
gPad.SetBorderMode(0)
can = TCanvas('can','can',
2724,1336)
can.Divide(1,2)
can.cd(1)
setPadStyle()
gPad.SetLogz(plots[plot].zLog)
gStyle.SetOptStat(0)
gStyle.SetFillColor(kWhite)
gStyle.SetPalette(kTemperatureMap)
ratio2DHisto.SetTitle("%s, Ratio: %s/%s;%s;%s"
%(plots[plot].quotaName,
geometryOld, geometryNew,
plots[plot].abscissa,
plots[plot].ordinate))
ratio2DHisto.Draw('COLZ')
can.Update()
setUpPalette(ratio2DHisto,plot)
etasTop = []
if plots[plot].iDrawEta:
etasTop.extend(drawEtaValues())
can.cd(2)
diff2DHisto.SetTitle('%s, Difference: %s - %s %s;%s;%s'
%(plots[plot].quotaName,geometryNew,geometryOld,detector,
plots[plot].abscissa,plots[plot].ordinate))
setPadStyle()
diff2DHisto.Draw("COLZ")
can.Update()
setUpPalette(diff2DHisto,plot)
etasBottom = []
if plots[plot].iDrawEta:
etasBottom.extend(drawEtaValues())
can.Modified()
theDirname = "Images"
if not checkFile_(theDirname):
os.mkdir(theDirname)
can.SaveAs( "%s/%s_Comparison_%s_%s_vs_%s.png"
% (theDirname,detector,plot,geometryOld,geometryNew) )
gStyle.SetStripDecimals(True)
def createPlots_(plot, geometry):
"""Cumulative material budget from simulation.
Internal function that will produce a cumulative profile of the
material budget inferred from the simulation starting from the
single detectors that compose the tracker. It will iterate over
all existing detectors contained in the DETECTORS
dictionary. The function will automatically skip non-existent
detectors.
"""
IBs = ["InnerServices", "Phase2PixelBarrel", "TIB", "TIDF", "TIDB"]
theDirname = "Figures"
if plot not in plots.keys():
print("Error: chosen plot name not known %s" % plot)
return
hist_X0_detectors = OrderedDict()
hist_X0_IB = None
hist_X0_elements = OrderedDict()
for subDetector,color in DETECTORS.items():
h = get1DHisto_(subDetector,plots[plot].plotNumber,geometry)
if not h:
print('Warning: Skipping %s'%subDetector)
continue
hist_X0_detectors[subDetector] = h
# Merge together the "inner barrel detectors".
if subDetector in IBs:
hist_X0_IB = assignOrAddIfExists_(
hist_X0_IB,
hist_X0_detectors[subDetector]
)
# category profiles
for label, [num, color, leg] in hist_label_to_num.items():
if label == 'SUM': continue
hist_label = get1DHisto_(subDetector, num + plots[plot].plotNumber, geometry)
hist_X0_elements[label] = assignOrAddIfExists_(
hist_X0_elements.setdefault(label,None),
hist_label,
)
hist_X0_elements[label].SetFillColor(color)
cumulative_matbdg = TH1D("CumulativeSimulMatBdg",
"CumulativeSimulMatBdg",
hist_X0_IB.GetNbinsX(),
hist_X0_IB.GetXaxis().GetXmin(),
hist_X0_IB.GetXaxis().GetXmax())
cumulative_matbdg.SetDirectory(0)
# colors
for det, color in DETECTORS.items():
setColorIfExists_(hist_X0_detectors, det, color)
# First Plot: BeamPipe + Pixel + TIB/TID + TOB + TEC + Outside
# stack
stackTitle_SubDetectors = "Tracker Material Budget;%s;%s" % (
plots[plot].abscissa,plots[plot].ordinate)
stack_X0_SubDetectors = THStack("stack_X0",stackTitle_SubDetectors)
for det, histo in hist_X0_detectors.items():
stack_X0_SubDetectors.Add(histo)
cumulative_matbdg.Add(histo, 1)
# canvas
can_SubDetectors = TCanvas("can_SubDetectors","can_SubDetectors",800,800)
can_SubDetectors.Range(0,0,25,25)
can_SubDetectors.SetFillColor(kWhite)
# Draw
stack_X0_SubDetectors.SetMinimum(plots[plot].ymin)
stack_X0_SubDetectors.SetMaximum(plots[plot].ymax)
stack_X0_SubDetectors.Draw("HIST")
stack_X0_SubDetectors.GetXaxis().SetLimits(plots[plot].xmin, plots[plot].xmax)
# Legenda
theLegend_SubDetectors = TLegend(0.180,0.8,0.98,0.92)
theLegend_SubDetectors.SetNColumns(3)
theLegend_SubDetectors.SetFillColor(0)
theLegend_SubDetectors.SetFillStyle(0)
theLegend_SubDetectors.SetBorderSize(0)
for det, histo in hist_X0_detectors.items():
theLegend_SubDetectors.AddEntry(histo, det, "f")
theLegend_SubDetectors.Draw()
# text
text_SubDetectors = TPaveText(0.180,0.727,0.402,0.787,"NDC")
text_SubDetectors.SetFillColor(0)
text_SubDetectors.SetBorderSize(0)
text_SubDetectors.AddText("CMS Simulation")
text_SubDetectors.SetTextAlign(11)
text_SubDetectors.Draw()
# Store
can_SubDetectors.Update()
if not checkFile_(theDirname):
os.mkdir(theDirname)
can_SubDetectors.SaveAs("%s/Tracker_SubDetectors_%s.pdf" % (theDirname, plot))
can_SubDetectors.SaveAs("%s/Tracker_SubDetectors_%s.root" % (theDirname, plot))
# Second Plot: BeamPipe + SEN + ELE + CAB + COL + SUP + OTH/AIR +
# Outside stack
stackTitle_Materials = "Tracker Material Budget;%s;%s" % (plots[plot].abscissa,
plots[plot].ordinate)
stack_X0_Materials = THStack("stack_X0",stackTitle_Materials)
stack_X0_Materials.Add(hist_X0_detectors["BeamPipe"])
for label, [num, color, leg] in hist_label_to_num.items():
if label == 'SUM':
continue
stack_X0_Materials.Add(hist_X0_elements[label])
# canvas
can_Materials = TCanvas("can_Materials","can_Materials",800,800)
can_Materials.Range(0,0,25,25)
can_Materials.SetFillColor(kWhite)
# Draw
stack_X0_Materials.SetMinimum(plots[plot].ymin)
stack_X0_Materials.SetMaximum(plots[plot].ymax)
stack_X0_Materials.Draw("HIST")
stack_X0_Materials.GetXaxis().SetLimits(plots[plot].xmin, plots[plot].xmax)
# Legenda
theLegend_Materials = TLegend(0.180,0.8,0.95,0.92)
theLegend_Materials.SetNColumns(3)
theLegend_Materials.SetFillColor(0)
theLegend_Materials.SetBorderSize(0)
theLegend_Materials.AddEntry(hist_X0_detectors["BeamPipe"], "Beam Pipe", "f")
for label, [num, color, leg] in hist_label_to_num.items():
if label == 'SUM':
continue
theLegend_Materials.AddEntry(hist_X0_elements[label], leg, "f")
theLegend_Materials.Draw()
# text
text_Materials = TPaveText(0.180,0.727,0.402,0.787,"NDC")
text_Materials.SetFillColor(0)
text_Materials.SetBorderSize(0)
text_Materials.AddText("CMS Simulation")
text_Materials.SetTextAlign(11)
text_Materials.Draw()
# Store
can_Materials.Update()
can_Materials.SaveAs("%s/Tracker_Materials_%s.pdf" % (theDirname, plot))
can_Materials.SaveAs("%s/Tracker_Materials_%s.root" % (theDirname, plot))
return cumulative_matbdg
def createPlotsReco_(reco_file, label, debug=False):
"""Cumulative material budget from reconstruction.
Internal function that will produce a cumulative profile of the
material budget in the reconstruction starting from the single
detectors that compose the tracker. It will iterate over all
existing detectors contained in the sDETS dictionary. The
function will automatically stop everytime it encounters a
non-existent detector, until no more detectors are left to
try. For this reason the keys in the sDETS dictionary can be as
inclusive as possible.
"""
cumulative_matbdg = None
sPREF = ["Original_RadLen_vs_Eta_", "RadLen_vs_Eta_"]
c = TCanvas("c", "c", 1024, 1024);
diffs = []
if not checkFile_(reco_file):
print("Error: missing file %s" % reco_file)
raise RuntimeError
file = TFile(reco_file)
prefix = "/DQMData/Run 1/Tracking/Run summary/RecoMaterial/"
for s in sPREF:
hs = THStack("hs","");
histos = []
for det, color in sDETS.items():
layer_number = 0
while True:
layer_number += 1
name = "%s%s%s%d" % (prefix, s, det, layer_number)
prof = file.Get(name)
# If we miss an object, since we are incrementally
# searching for consecutive layers, we may safely
# assume that there are no additional layers and skip
# to the next detector.
if not prof:
if debug:
print("Missing profile %s" % name)
break
else:
histos.append(prof.ProjectionX("_px", "hist"));
diffs.append(histos[-1]);
histos[-1].SetFillColor(color + layer_number);
histos[-1].SetLineColor(color + layer_number + 1);
name = "CumulativeRecoMatBdg_%s" % s
if s == "RadLen_vs_Eta_":
cumulative_matbdg = TH1D(name, name,
histos[0].GetNbinsX(),
histos[0].GetXaxis().GetXmin(),
histos[0].GetXaxis().GetXmax())
cumulative_matbdg.SetDirectory(0)
for h in histos:
hs.Add(h)
if cumulative_matbdg:
cumulative_matbdg.Add(h, 1.)
hs.Draw()
hs.GetYaxis().SetTitle("RadLen")
c.Update()
c.Modified()
c.SaveAs("%sstacked_%s.png" % (s, label))
hs = THStack("diff","")
for d in range(0,len(diffs)/2):
diffs[d+len(diffs)/2].Add(diffs[d], -1.)
hs.Add(diffs[d+len(diffs)/2]);
hs.Draw()
hs.GetYaxis().SetTitle("RadLen")
c.Update()
c.Modified()
c.SaveAs("RadLen_difference_%s.png" % label)
return cumulative_matbdg
def materialBudget_Simul_vs_Reco(reco_file, label, geometry, debug=False):
"""Plot reco vs simulation material budget.
Function are produces a direct comparison of the material
budget as extracted from the reconstruction geometry and
inferred from the simulation one.
"""
setTDRStyle()
# plots
cumulative_matbdg_sim = createPlots_("x_vs_eta", geometry)
cumulative_matbdg_rec = createPlotsReco_(reco_file, label, debug=False)
cc = TCanvas("cc", "cc", 1024, 1024)
cumulative_matbdg_sim.SetMinimum(0.)
cumulative_matbdg_sim.SetMaximum(3.5)
cumulative_matbdg_sim.GetXaxis().SetRangeUser(-3.0, 3.0)
cumulative_matbdg_sim.SetLineColor(kOrange)
cumulative_matbdg_rec.SetMinimum(0.)
cumulative_matbdg_rec.SetMaximum(3.)
cumulative_matbdg_rec.SetLineColor(kAzure+1)
l = TLegend(0.18, 0.8, 0.95, 0.92)
l.AddEntry(cumulative_matbdg_sim, "Sim Material", "f")
l.AddEntry(cumulative_matbdg_rec, "Reco Material", "f")
cumulative_matbdg_sim.Draw("HIST")
cumulative_matbdg_rec.Draw("HIST SAME")
l.Draw()
filename = "MaterialBdg_Reco_vs_Simul_%s.png" % label
cc.SaveAs(filename)
def createCompoundPlots(detector, plot, geometry):
"""Produce the requested plot for the specified detector.
Function that will plot the requested @plot for the specified
@detector. The specified detector could either be a real
detector or a compound one. The list of available plots are the
keys of plots dictionary (imported from plot_utils.
"""
setTDRStyle()
theDirname = 'Images'
if not checkFile_(theDirname):
os.mkdir(theDirname)
goodToGo, theDetectorFilename = paramsGood_(detector, plot, geometry)
if not goodToGo:
return
hist_X0_elements = OrderedDict()
# stack
stackTitle = "%s;%s;%s" % (detector,
plots[plot].abscissa,
plots[plot].ordinate)
stack_X0 = THStack("stack_X0", stackTitle);
theLegend = TLegend(0.50, 0.70, 0.70, 0.90);
def setRanges(h):
legendSpace = 1. + 0.3 # 30%
minY = h.GetYaxis().GetXmin()
maxY = h.GetBinContent(h.GetMaximumBin()) * legendSpace
h.GetYaxis().SetRangeUser(minY, maxY)
for label, [num, color, leg] in hist_label_to_num.items():
# We don't want the sum to be added as part of the stack
if label == 'SUM':
continue
hist_X0_elements[label] = get1DHisto_(detector,
num + plots[plot].plotNumber,
geometry)
hist_X0_elements[label].SetFillColor(color)
hist_X0_elements[label].SetLineColor(kBlack)
stack_X0.Add(hist_X0_elements[label])
if hist_X0_elements[label].Integral() > 0.: theLegend.AddEntry(hist_X0_elements[label], leg, "f")
# canvas
canname = "MBCan_1D_%s_%s" % (detector, plot)
can = TCanvas(canname, canname, 800, 800)
can.Range(0,0,25,25)
gStyle.SetOptTitle(0)
# Draw
setRanges(stack_X0.GetStack().Last())
stack_X0.Draw("HIST");
stack_X0.GetXaxis().SetLabelSize(0.035)
stack_X0.GetYaxis().SetLabelSize(0.035)
theLegend.Draw();
cmsMark = TLatex()
cmsMark.SetNDC();
cmsMark.SetTextAngle(0);
cmsMark.SetTextColor(kBlack);
cmsMark.SetTextFont(61)
cmsMark.SetTextSize(5e-2)
cmsMark.SetTextAlign(11)
cmsMark.DrawLatex(0.16,0.86,"CMS")
simuMark = TLatex()
simuMark.SetNDC();
simuMark.SetTextAngle(0);
simuMark.SetTextColor(kBlack);
simuMark.SetTextSize(3e-2)
simuMark.SetTextAlign(11)
simuMark.DrawLatex(0.16,0.82,"#font[52]{Simulation Internal}")
# Store
can.Update();
can.SaveAs( "%s/%s_%s_%s.pdf"
% (theDirname, detector, plot, geometry))
can.SaveAs( "%s/%s_%s_%s.png"
% (theDirname, detector, plot, geometry))
def create2DPlots(detector, plot, geometry):
"""Produce the requested plot for the specified detector.
Function that will plot the requested 2D-@plot for the
specified @detector. The specified detector could either be a
real detector or a compound one. The list of available plots
are the keys of plots dictionary (imported from plot_utils).
"""
theDirname = 'Images'
if not checkFile_(theDirname):
os.mkdir(theDirname)
goodToGo, theDetectorFilename = paramsGood_(detector, plot, geometry)
if not goodToGo:
return
theDetectorFile = TFile(theDetectorFilename)
hist_X0_total = get2DHisto_(detector,plots[plot].plotNumber,geometry)
# # properties
gStyle.SetStripDecimals(False)
# Ratio
if plots[plot].iRebin:
hist_X0_total.Rebin2D()
# stack
hist2dTitle = ('%s %s;%s;%s;%s' % (plots[plot].quotaName,
detector,
plots[plot].abscissa,
plots[plot].ordinate,
plots[plot].quotaName))
hist_X0_total.SetTitle(hist2dTitle)
hist_X0_total.SetTitleOffset(0.5,"Y")
if plots[plot].histoMin != -1.:
hist_X0_total.SetMinimum(plots[plot].histoMin)
if plots[plot].histoMax != -1.:
hist_X0_total.SetMaximum(plots[plot].histoMax)
can2name = "MBCan_2D_%s_%s" % (detector, plot)
can2 = TCanvas(can2name, can2name, 2480+248, 580+58+58)
can2.SetTopMargin(0.1)
can2.SetBottomMargin(0.1)
can2.SetLeftMargin(0.04)
can2.SetRightMargin(0.06)
can2.SetFillColor(kWhite)
gStyle.SetOptStat(0)
gStyle.SetTitleFillColor(0)
gStyle.SetTitleBorderSize(0)
# Color palette
gStyle.SetPalette(kGreyScale)
# Log?
can2.SetLogz(plots[plot].zLog)
# Draw in colors
hist_X0_total.Draw("COLZ")
# Store
can2.Update()
#Aesthetic
setUpPalette(hist_X0_total,plot)
#Add eta labels
keep_alive = []
if plots[plot].iDrawEta:
keep_alive.extend(drawEtaValues())
can2.Modified()
hist_X0_total.SetContour(255)
# Store
can2.Update()
can2.Modified()
can2.SaveAs( "%s/%s_%s_%s_bw.pdf"
% (theDirname, detector, plot, geometry))
can2.SaveAs( "%s/%s_%s_%s_bw.png"
% (theDirname, detector, plot, geometry))
gStyle.SetStripDecimals(True)
def createRatioPlots(detector, plot, geometry):
"""Create ratio plots.
Function that will make the ratio between the radiation length
and interaction length, for the specified detector. The
specified detector could either be a real detector or a
compound one.
"""
goodToGo, theDetectorFilename = paramsGood_(detector, plot, geometry)
if not goodToGo:
return
theDirname = 'Images'
if not os.path.exists(theDirname):
os.mkdir(theDirname)
theDetectorFile = TFile(theDetectorFilename)
# get TProfiles
prof_x0_det_total = theDetectorFile.Get('%d' % plots[plot].plotNumber)
prof_l0_det_total = theDetectorFile.Get('%d' % (1000+plots[plot].plotNumber))
# histos
hist_x0_total = get1DHisto_(detector,plots[plot].plotNumber,geometry)
hist_l0_total = get1DHisto_(detector,1000+plots[plot].plotNumber,geometry)
hist_x0_over_l0_total = hist_x0_total
hist_x0_over_l0_total.Divide(hist_l0_total)
histTitle = "Material Budget %s;%s;%s" % (detector,
plots[plot].abscissa,
plots[plot].ordinate)
hist_x0_over_l0_total.SetTitle(histTitle)
# properties
hist_x0_over_l0_total.SetMarkerStyle(1)
hist_x0_over_l0_total.SetMarkerSize(3)
hist_x0_over_l0_total.SetMarkerColor(kBlue)
# canvas
canRname = "MBRatio_%s_%s" % (detector, plot)
canR = TCanvas(canRname,canRname,800,800)
canR.Range(0,0,25,25)
canR.SetFillColor(kWhite)
gStyle.SetOptStat(0)
# Draw
hist_x0_over_l0_total.Draw("E1")
# Store
canR.Update()
canR.SaveAs("%s/%s_%s_%s.pdf"
% (theDirname, detector, plot, geometry))
canR.SaveAs("%s/%s_%s_%s.png"
% (theDirname, detector, plot, geometry))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generic Material Plotter',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--reco',
help='Input Reconstruction Material file, DQM format')
parser.add_argument('-l', '--label',
help='Label to use in naming the plots')
parser.add_argument('-c', '--compare',
help='Compare simulation and reco materials',
action='store_true',
default=False)
parser.add_argument('-sw','--subdetector-wise',
help="Subdetector-wise categorization. Individual ROOT " \
"files for each subdetector are required.",
action="store_true",
default=False)
parser.add_argument('-s', '--single',
help='Material budget for single detector from simulation',
action='store_true',
default=False)
parser.add_argument('-d', '--detector',
help='Detector for which you want to compute the material budget',
type=str,)
parser.add_argument('-g', '--geometry',
help='Geometry, used to determine filenames',
type=str)
parser.add_argument('-gc', '--geometry-comparison',
help='Compare the material budget for two different geometries'
+'-g should be specied',
type=str)
args = parser.parse_args()
if args.geometry is None:
print("Error, missing geometry. -g is Required")
raise RuntimeError
if args.geometry_comparison and args.geometry is None:
print("Error, geometry comparison requires two geometries.")
print("use -gc option")
raise RuntimeError
if args.geometry_comparison and args.geometry:
# For the definition of the properties of these graphs
# check plot_utils.py
required_plots = ["x_vs_eta","x_vs_phi","x_vs_R",
"l_vs_eta","l_vs_phi","l_vs_R"]
required_2Dplots = ["x_vs_eta_vs_phi",
"l_vs_eta_vs_phi",
"x_vs_z_vs_R",
"l_vs_z_vs_R_geocomp",
"x_vs_z_vs_Rsum",
"l_vs_z_vs_Rsum"]
for p in required_plots:
createCompoundPlotsGeometryComparison(args.detector, p, args.geometry,
args.geometry_comparison)
for p in required_2Dplots:
create2DPlotsGeometryComparison(args.detector, p, args.geometry,
args.geometry_comparison)
if args.compare and args.single:
print("Error, too many actions required")
raise RuntimeError
if args.compare:
if args.reco is None:
print("Error, missing input reco file")
raise RuntimeError
if args.label is None:
print("Error, missing label")
raise RuntimeError
materialBudget_Simul_vs_Reco(args.reco, args.label, args.geometry, debug=False)
if args.single and not args.geometry_comparison:
if args.detector is None:
print("Error, missing detector")
raise RuntimeError
required_2Dplots = ["x_vs_eta_vs_phi", "l_vs_eta_vs_phi", "x_vs_z_vs_R",
"l_vs_z_vs_R", "x_vs_z_vs_Rsum", "l_vs_z_vs_Rsum"]
required_plots = ["x_vs_eta", "x_vs_phi", "l_vs_eta", "l_vs_phi"]
required_ratio_plots = ["x_over_l_vs_eta", "x_over_l_vs_phi"]
for p in required_2Dplots:
create2DPlots(args.detector, p, args.geometry)
for p in required_plots:
createCompoundPlots(args.detector, p, args.geometry)
for p in required_ratio_plots:
createRatioPlots(args.detector, p, args.geometry)
if args.subdetector_wise and args.geometry:
required_plots = ["x_vs_eta","l_vs_eta"]
for p in required_plots:
createPlots_(p, args.geometry)
| 2.203125 | 2 |
bolt_server/message_dispatcher/message_dispatcher.py | project-bolt/bolt-server | 1 | 12766728 | '''
File: message_dispatcher.py
Description: Message dispatch handler
Date: 29/09/2017
Author: <NAME> <<EMAIL>>
'''
from structures import Message, MessagePacket, MessageQueue
class MessageDispatcher(object):
"""Handle the dispatch of the message from the bolt server
Registers new messages for the dispatch handling based on the subscribed
topics.
"""
def __init__(self, socket_server):
"""Initialize the MessageDispatcher
Keyword Arguments:
socket_server -- A socket server object to help with the message dispatch
"""
#The general purpose message register looks like
# message_register = {message_name: [topics]}
self.message_register = {}
#Socket server
self.socket_server = socket_server
#Message structure store
self.message_store = Message()
#Initialize the Message Queue
self.message_queue = MessageQueue()
#Register a message handler with Socket server
self.socket_server.register_handler(self.__generic_handler)
def message_exists(self, message_name):
"""Check if the message exists or not
Keyword arguments:
message_name -- The name of the message
Returns:
Bool
"""
try:
self.message_store.get_message(message_name)
except KeyError:
return False
return True
def register_handler(self, handler):
"""Register a new message handler with the socket server
Keyword arguments:
handler -- Message handling object
"""
self.socket_server.register_handler(handler)
def register_message(self, message_name, message_structure, message_topics):
"""Register a new message
Keyword arguments:
message_name -- The name of the message
message_strcuture -- The name of the structure
message_topics -- The topics to which the message should be broadcasted
Returns: Bool
"""
try:
self.message_store.add_message(message_name, message_structure)
except RuntimeError:
return False
self.message_register[message_name] = message_topics
return True
def unregister_message(self, message_name):
"""Unregister a provided message
Keyword arguments:
message_name -- The name of the message to be removed
"""
if message_name in self.message_register.keys():
self.message_store.remove_message(message_name)
del self.message_register[message_name]
def send_message(self, message_name, params={}):
"""Send a new message
Keyword arguments:
message_name -- The name of the message to be sent
params -- The parameters to be added to the message
Raises:
KeyError if the params provided do not match message structure
RuntimeError if the message sending fails
Returns:
Integer
"""
message_structure = self.__get_message_structure(message_name)
for key in params.keys():
if key not in message_structure.keys():
raise KeyError("Parameter mismatch in message structure and provided params")
else:
message_structure[key] = params[key]
message_packet = MessagePacket(message_structure)
try:
for topic in self.message_register[message_name]:
mid, packet = message_packet.get_packet()
self.socket_server.send_message(topic, packet)
self.message_queue.queue(mid)
return mid
except RuntimeError:
raise RuntimeError("Unable to send the message across the topics")
pass
def __get_message_structure(self, message_name):
"""Get the message structure
Keyword arguments:
message_name -- The name of the message whose structure needs to be
retrieved
Returns:
Mixed The message structure
"""
return self.message_store.get_message(message_name)
def __generic_handler(self, message):
"""Generic message handler
Handles the incoming messages on a generic basis by printing them and
marking the message as completed.
Keyword arguments:
message -- The incoming message object
"""
mid = message['id']
result = message['result']
self.message_queue.update_status(mid, 'Completed')
| 2.78125 | 3 |
app/push.py | erts/APNSClient | 0 | 12766729 | # push.py
#
# simple script for sending a test push notification message to device over APNs
# https://developer.apple.com/library/archive/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/CreatingtheNotificationPayload.html#//apple_ref/doc/uid/TP40008194-CH10-SW1
import apns2
DEVICE_TOKEN = ""
def push(self, device_token = DEVICE_TOKEN):
""" send push message to iOS device over APNs"""
client = apns2.APNSClient(mode = "dev", client_cert = "certs/apns-dev-cert.pem")
alert = apns2.PayloadAlert(body = "some alert content", title = "an alert title")
payload = apns2.Payload(alert = alert, content_available = True)
notification = apns2.Notification(payload = payload, priority = apns2.PRIORITY_LOW)
response = client.push(n = notification, device_token = device_token)
print('{0} - {1}, {2}, {3} '.format(response.timestamp response.reason, response.status_code, response.apns_id))
push() | 3.40625 | 3 |
analysis/2021-01-21-guilhermelowa-search-engine.py | AndreAngelucci/analises | 31 | 12766730 | #!/usr/bin/env python
# coding: utf-8
# # Buscador
#
# Esse notebook implementa um buscador simples.
# A representação pra cada texto é criada a partir da TF-IDF.
# A representação da query (consulta, ou termos buscados)
# é construída a partir do vocabulário dos textos.
# O ranqueamento dos resultados é feito de acordo com
# a semelhança cosseno da query pros textos.
#
# Há várias oportunidades de melhoria.
# Algumas delas são discutidas ao longo do notebook.
#
# Os resultados, mesmo deste buscador ingênuo,
# são bastante satisfatórios.
# O buscador é capaz de retornar leis (neste caso)
# relacionadas à localidades ou personalidades.
# No entanto, o mesmo mecanismo pode ser utilizado
# pra quaisquer outros textos, por exemplo o Diário Oficial.
# Alguns exemplos de buscas são:
#
# "winterianus" - retorna a Lei Municipal sobre citronelas;
#
# "E<NAME>" - retorna Lei Municipal que concede título de cidadão feirense;
#
# "Rua Espassonavel" - retorna Lei Municipal que cita a rua.
# In[ ]:
import numpy as np
import pandas as pd
from scripts.nlp import remove_portuguese_stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity
# In[ ]:
laws = pd.read_json("leis.json")
laws.drop(["documento"], inplace=True, axis=1)
print(laws.info())
print(laws.nunique())
# In[ ]:
laws
# In[ ]:
print(laws.loc[len(laws) - 1, "texto"])
# # Buscas por texto
#
# No notebook _similar_laws_ vimos que TF-IDF encontra Leis bastante similares entre si.
# Será que conseguimos também encontrar leis similares a uma query?
#
# Primeiro, devemos construir a representação das leis com TF-IDF.
# Após termos as representações,
# limpamos o texto da consulta utilizando o mesmo método de limpeza das leis.
# Depois, criar uma representação da consulta utilizando o IDF do modelo treinado.
# Finalmente, calcular a similaridade desta consulta
# para todas as leis na base e retornar as mais próximas.
# In[ ]:
laws["texto_limpo"] = laws["texto"].apply(remove_portuguese_stopwords)
# In[ ]:
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(laws["texto_limpo"])
X
# In[ ]:
transformer = TfidfTransformer()
X_tfidf = transformer.fit_transform(X)
X_tfidf
# In[ ]:
query = ["rua espassonavel"]
query[0] = remove_portuguese_stopwords(query[0])
query = vectorizer.transform(query)
query = transformer.transform(query)
# In[ ]:
best_matches = cosine_similarity(query, X_tfidf)
best_matches_idx = np.argsort(best_matches)
for i in range(1, 5):
idx = best_matches_idx[0, -i]
print(laws.loc[idx, "texto"])
print("\n---Next Result:---\n")
# Tcharam! Feito um buscador simples!
#
# Existem limitações.
# A sequência e composição das palavras é uma delas, por exemplo.
# Não adianta buscar pelo nome - sobrenome de uma pessoa.
# Ele vai retornar resultados onde
# algum destes termos sejam mais frequentes.
# Não existe as aspas do Google pra dizer
# "busque por este termo todo junto".
#
# Por exemplo, se eu buscar Elydio,
# o primeiro resultado é a Lei conferindo
# cidadania à Elydio <NAME>.
# Perfeito.
# Mas se eu buscar <NAME>,
# o primeiro resultado sequer tem Azevedo,
# mas o nome Lopes aparece mais de uma vez.
#
# Uma das formas de contornar essa dificuldade é
# usar bigramas ou n-gramas maiores.
#
# ## Outras opções
# ### Indexar
# Há outras formas de indexar os documentos
# e de recuperar, também simples.
# Uma outra forma de indexar, por exemplo,
# é fazer um vetor pra cada palavra
# contando as palavras vizinhas.
# E depois, o vetor do documento seria
# a soma dos vetores das palavras.
# É uma forma interessante porque
# pode gerar visualizações interessantes
# entre a similaridade das palavras.
# Por exemplo, no corpus das Leis Municipais,
# a quais palavras EDUCAÇÃO mais se assemelha?
# Ou SAÚDE? Etc.
#
# Outra forma é contar n-gramas - por exemplo,
# bi-gramas: duas palavras juntas formando um token.
# Dessa forma, você possui uma matriz maior
# e de certa forma uma relação entre a sequencialidade das palavras,
# que pode ser útil pra nomes de pessoas e bairros,
# como citado acima.
#
# ### Recuperar
# Outra forma de recuperar é por
# _local sensitive hashing_.
# Divide em vários planos múltiplas vezes
# e retorna os resultados que estão na mesma região da query.
# No entanto,
# o corpus não é grande o suficiente pra precisar essa estratégia,
# que é mais pra grandes corpora.
# O método acima
# (calcular a simlaridade cosseno e retornar os maiores valores)
# é rápido o suficiente pra parecer instantâneo.
# Talvez com uma demanda mais alta pelo servidor
# venha a necessidade de aumentar a velocidade da busca,
# porém por enquanto não é o caso.
#
# Há ainda um [novo método]
# (https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html)
# e uma lib pra isso,
# lançada pelo Google recentemente,
# no dia 28 de Julho de 2020.
#
# ### Avaliação
# Com múltiplas formas de indexar e recuperar vem o dilema:
# como avaliar se uma é melhor que a outra?
# Repetir o processo acima pra todas as opções?
# Isto é, mostrar N melhores resultados e comparar manualmente?
# Ou colocar labels em algumas leis?
# Ex: essa lei trata disso, com tais entidades.
# Checar formas de avaliação.
# Se tivesse em produção,
# poderiamos avaliar por _click through rate_ (CTR) por ex,
# mas não é o caso
| 2.765625 | 3 |
supersqlite/idxchk.py | plasticity-admin/supersqlite | 687 | 12766731 | #!/usr/bin/python
'''idxchk.py - pretty print indexes used in a query
Ported to Python by <NAME> (<EMAIL>).
Requires pysqlite2, sqlite3 (comes with Python 2.5+) or apsw.
Version 1.01 2008-03-07 Fix to list index method name thanks to <NAME>.
Added sqlite3 support.
Version 1.0 2006-07-18 Initial version.
Placed in the public domain. I know no Tcl, corrections welcome.
'''
import sys
try:
from pysqlite2 import dbapi2
sqlite_connect, SQLError = dbapi2.connect, dbapi2.OperationalError
except ImportError:
try:
from sqlite3 import dbapi2
sqlite_connect, SQLError = dbapi2.connect, dbapi2.OperationalError
except ImportError:
import apsw
sqlite_connect, SQLError = apsw.Connection, apsw.SQLError
debug = False # if true, displays SQL.
verbose = False
dbname = ''
sql = ''
if '-debug' in sys.argv:
debug = True
sys.argv.remove('-debug')
if '-v' in sys.argv:
verbose = True
sys.argv.remove('-v')
if len(sys.argv) <= 1:
print 'usage: %s [-v] [-debug] dbfile [sqlcmds ...]' % sys.argv[0]
print
print ' -v verbose output: opcodes, databases, tables, cursors'
print ' -debug show the internal SQL queries'
print ' dbfile a valid sqlite3 database file or ":memory:"'
print " sqlcmds one or more sql statements separated by ';'"
print
print 'The last sqlcmd is explained, preceeding ones are executed.'
print 'If sqlcmds is omitted, then read sqlcmds from stdin.'
sys.exit(1)
dbname = sys.argv[1]
# if sql parm is missing, read from stdin
if len(sys.argv) > 2:
sql = ' '.join(sys.argv[2:]) + ' \n;\n'
else:
sql = sys.stdin.read()
# Connect to database.
session = sqlite_connect(dbname)
def DO(sql, params={}, cur=session.cursor()):
'Run some SQL.'
if debug:
print '>>>', '\n...'.join(sql.split('\n'))
if params:
print ' %s' % params
try:
cur.execute(sql, params)
rows = []
for row in cur: # apsw doesn't support cur.fetchall()
rows.append(row)
return rows
except SQLError:
print >>sys.stderr, "BAD SQL:", sql
raise
# find the last sql statement, others are executed first
# eg, if temp tables are created and indexed, attach other db's, etc.
idxsql = ''
while len(idxsql) == 0:
sqlcmds = sql.split(';')
if sqlcmds:
presql = sqlcmds[:-1]
idxsql = sqlcmds[-1].strip()
sql = ';'.join(presql)
else:
print 'no sqlcmds to explain'
session.close()
sys.exit(2)
# execute any pre sql first
cnt = 1
for s in presql:
s = s.strip()
if s:
if verbose:
print 'exec sql %d' % cnt
print '----------------------------------------------------------'
print s.replace('\n', ' ')[:50], '.....'
print
try:
DO(s)
except SQLError as e:
print 'sql error while executing statement %d:' % cnt
print s + '\n\nerror message:\n' + str(e)
session.close()
sys.exit(3)
cnt += 1
try:
vcode = DO('EXPLAIN ' + idxsql)
except SQLError as e:
print 'sql error while explaining statement %d:' % cnt
print idxsql + '\n\nerror message:\n' + str(e)
session.close()
sys.exit(4)
# get database names, in case the presql attached any other dbs or temp tables
if verbose:
print 'dbnum dbname'
print '------ ---------------------------------------------------'
dbarr = {}
for dbnum, dbname, dbfile in DO('pragma database_list'):
dbarr[dbnum] = dbname
if verbose:
print '%6d %s (%s)' % (dbnum, dbname, dbfile)
prevint = -1
idxtbl = {}
nesting = []
cursors = []
# collect cursors on first pass
for addr, opcode, p1, p2, p3 in vcode:
if opcode == 'Integer':
prevint = p1
elif opcode == 'OpenRead':
if prevint == -1: # previous opcode was not Integer!
continue
dbnum = prevint
if dbnum not in dbarr:
# explained statement is probably creating a temp table
dbarr[dbnum] = 'temp'
if dbarr[dbnum] == 'temp':
temp = 'temp_'
else:
temp = ''
if dbarr[dbnum] != 'main' and dbarr[dbnum] != 'temp':
dbname = dbarr[dbnum] + '.'
else:
dbname = ''
if p2 == 1: # opening sqlite_master itself, skip
continue
schemasql = '''SELECT type, name, tbl_name, rootpage
FROM %(dbname)ssqlite_%(temp)smaster
WHERE rootpage = %(p2)s''' % locals()
type, name, tbl_name, rootpage = DO(schemasql)[0]
cursors.append((p1, type, dbname + name, name, tbl_name))
else:
# reset int value, if preceeding opcode not Integer
prevint = -1
if verbose:
print
print 'explain sql'
print '----------------------------------------------------------'
print idxsql
print ''
print 'opcodes'
print '----------------------------------------------------------'
for addr, opcode, p1, p2, p3 in vcode:
print '%s|%s|%s|%s|%s' % (addr, opcode, p1, p2, p3)
print
prevint = -1 # not present in the original Tcl - bug?
for addr, opcode, p1, p2, p3 in vcode:
if opcode == 'Integer':
prevint = p1
elif opcode == 'OpenRead':
if prevint == -1: # previous opcode was not Integer!
continue
dbnum = prevint
if dbnum not in dbarr:
# explained statement is probably creating a temp table
dbarr[dbnum] = 'temp'
if dbarr[dbnum] == 'temp':
temp = 'temp_'
else:
temp = ''
if dbarr[dbnum] != 'main' and dbarr[dbnum] != 'temp':
dbname = dbarr[dbnum] + '.'
else:
dbname = ''
schemasql = '''SELECT type, name, tbl_name, rootpage
FROM %(dbname)ssqlite_%(temp)smaster
WHERE rootpage = %(p2)s''' % locals()
type, name, tbl_name, rootpage = DO(schemasql)[0]
idxtab = dbname + tbl_name
#cursors.append((p1, type, dbnamename))
if type == 'index':
# get info for table, all indexes, and this index
pr_tbl_info = DO('pragma table_info(%s)' % tbl_name)
pr_idx_list = DO('pragma index_list(%s)' % tbl_name)
pr_idx_info = DO('pragma index_info(%s)' % name)
cols = []
pkcollist = []
# sort index column names and assemble index columns
ielems = []
for seq, cid, iname in pr_idx_info:
ielems.append((seq, cid, iname))
for seq, cid, iname in sorted(ielems):
cols.append(iname)
pkcollist.append(iname)
cols = '(%s)' % ','.join(cols)
# if index itself is unique
unique = ''
for iseq, iname, isuniq in pr_idx_list:
if name == iname and isuniq:
unique = ' UNIQUE'
break
cols += unique
# index is primary key if all pkcollist names are in table pk cols
i = -1
# for cid, cname, ctype, ispk in pr_tbl_info: # outdated.
for cid, cname, ctype, notnull, dflt_value, ispk in pr_tbl_info:
try:
ispk = int(ispk)
except ValueError:
continue
if ispk:
try:
i = pkcollist.index(cname)
except ValueError:
# didn't find a pk column in the list of index columns
break
# remove this column name from pkcollist
del pkcollist[i]
if i >= 0 and not pkcollist:
# found all of the table pk columns in the pkcollist
cols += ' PRIMARY KEY'
idxtbl[idxtab] = idxtbl.get(idxtab, [])
idxtbl[idxtab].append((name, cols))
elif type == 'table':
# if not in idxtbl array, add it with empty index info
if idxtab not in idxtbl:
idxtbl[idxtab] = []
if idxtab not in nesting:
nesting.append(idxtab)
elif opcode == 'NotExists' or opcode == 'MoveGe' or opcode == 'MoveLt':
# check for possible primary key usage
for cp1, ctype, ctab, cname, ctbl in cursors:
if p1 == cp1 and ctype == 'table':
idxtbl[ctab].append(('<pk>', '<integer primary key or rowid>'))
break
else:
# reset int value, if preceeding opcode not Integer
prevint = -1
if verbose:
print 'table open order (probable join table nesting)'
print '-----------------------------------------------------------'
lev = 0
for tab in nesting:
print '| ' * lev + tab
lev += 1
if lev > 1:
print '| ' * lev
print
print 'cursor type name'
print '------ ------ ----------------------------------------------'
for cur in cursors:
num, type, fullname, name, tbl = cur
print '%6d %-6.6s %s' % (num, type, fullname)
print
# remove any duplicate indexes per each table
for tbl, idxlist in idxtbl.items():
idxtbl[tbl] = sorted(list(set(idxlist)))
# pretty print in column format
# first, figure out column widths
len1 = 6
len2 = 10
len3 = 10
for tbl, idxlist in idxtbl.items():
if len(tbl) > len1:
len1 = len(tbl)
for idx, idxdef in idxlist:
if len(idx) > len2:
len2 = len(idx)
if len(idxdef) > len3:
len3 = len(idxdef)
fmt = '%-{len1}.{len1}s %-{len2}.{len2}s %-{len3}.{len3}s'
# Substitute in for each "{lenX}" in fmt:
fmt = fmt.replace('%', '%%').replace('{', '%(').replace('}', ')s') % locals()
print fmt % ('table', 'index(es)', 'column(s)')
print fmt % ('-' * len1, '-' * len2, '-' * len3)
# now print in order of table open nesting
for tbl in nesting:
t = tbl
idxlist = idxtbl[tbl]
if not idxlist:
print fmt % (tbl, '(none)', '')
else:
for ientry in idxlist:
idx, idxdef = ientry
print fmt % (tbl, idx, idxdef)
#tbl = ''
try:
del idxtbl[t]
except KeyError:
pass
# print any other indexes where index was opened, but not table
for tbl in idxtbl:
idxlist = idxtbl[tbl]
if not idxlist:
print fmt % (tbl, '(none)', '')
else:
for ientry in idxlist:
idx, idxdef = ientry
print fmt % (tbl, idx, idxdef)
#tbl = ''
print '\nSQLite version:', DO('SELECT sqlite_version()')[0][0]
session.close()
| 2.515625 | 3 |
microdrop/plugin_manager.py | cfobel/microdrop | 17 | 12766732 | <gh_stars>10-100
from StringIO import StringIO
from collections import namedtuple
from contextlib import closing
import logging
import pprint
import sys
import traceback
from pyutilib.component.core import ExtensionPoint, PluginGlobals
# TODO Update plugins to import from `pyutilib.component.core` directly
# instead of importing from here.
from pyutilib.component.core import Plugin, SingletonPlugin, implements
import path_helpers as ph
import task_scheduler
from .interfaces import IPlugin, IWaveformGenerator, ILoggingPlugin
from logging_helpers import _L, caller_name #: .. versionadded:: 2.20
logger = logging.getLogger(__name__)
ScheduleRequest = namedtuple('ScheduleRequest', 'before after')
def load_plugins(plugins_dir='plugins', import_from_parent=True):
'''
Import each Python plugin module in the specified directory and create an
instance of each contained plugin class for which an instance has not yet
been created.
Parameters
----------
plugins_dir : str
Directory containing zero or more Python plugin modules to import.
import_from_parent : bool
Add parent of specified directory to system path and import
``<parent>.<module>``.
..notes::
**Not recommended**, but kept as default to maintain legacy
protocol compatibility.
Returns
-------
list
Newly created plugins (plugins are not recreated if they were
previously loaded.)
.. versionchanged:: 2.25
Do not import hidden directories (i.e., name starts with ``.``).
.. versionchanged:: 2.30
Import from `pyutilib` submodule in plugin instead, if it exists.
'''
logger = _L() # use logger with function context
logger.info('plugins_dir=`%s`', plugins_dir)
plugins_dir = ph.path(plugins_dir).realpath()
logger.info('Loading plugins:')
plugins_root = plugins_dir.parent if import_from_parent else plugins_dir
if plugins_root not in sys.path:
sys.path.insert(0, plugins_root)
# Create an instance of each of the plugins, but set it to disabled
e = PluginGlobals.env('microdrop.managed')
initial_plugins = set(e.plugin_registry.values())
imported_plugins = set()
for package_i in plugins_dir.dirs():
if package_i.isjunction() and not package_i.readlink().isdir():
# Plugin directory is a junction/link to a non-existent target
# path.
logger.info('Skip import of `%s` (broken link to `%s`).',
package_i.name, package_i.readlink())
continue
elif package_i.name in (p.__module__.split('.')[0]
for p in initial_plugins):
# Plugin with the same name has already been imported.
logger.info('Skip import of `%s` (plugin with same name has '
'already been imported).', package_i.name)
continue
elif package_i.name.startswith('.'):
logger.info('Skip import of hidden directory `%s`.',
package_i.name)
continue
try:
plugin_module = package_i.name
if package_i.joinpath('pyutilib.py').isfile():
plugin_module = '.'.join([plugin_module, 'pyutilib'])
if import_from_parent:
plugin_module = '.'.join([plugins_dir.name, plugin_module])
import_statement = 'import {}'.format(plugin_module)
logger.debug(import_statement)
exec(import_statement)
all_plugins = set(e.plugin_registry.values())
current_plugin = list(all_plugins - initial_plugins -
imported_plugins)[0]
logger.info('\t Imported: %s (%s)', current_plugin.__name__,
package_i)
imported_plugins.add(current_plugin)
except Exception:
map(logger.info, traceback.format_exc().splitlines())
logger.error('Error loading %s plugin.', package_i.name,
exc_info=True)
# For each newly imported plugin class, create a service instance
# initialized to the disabled state.
new_plugins = []
for class_ in imported_plugins:
service = class_()
service.disable()
new_plugins.append(service)
logger.debug('\t Created new plugin services: %s',
','.join([p.__class__.__name__ for p in new_plugins]))
return new_plugins
def log_summary():
'''
Dump summary of plugins to log.
'''
observers = ExtensionPoint(IPlugin)
logging.info('Registered plugins:')
for observer in observers:
logging.info('\t %s' % observer)
observers = ExtensionPoint(IWaveformGenerator)
logging.info('Registered function generator plugins:')
for observer in observers:
logging.info('\t %s' % observer)
observers = ExtensionPoint(ILoggingPlugin)
logging.info('Registered logging plugins:')
for observer in observers:
logging.info('\t %s' % observer)
def get_plugin_names(env=None):
'''
Parameters
----------
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
list(str)
List of plugin names (e.g., ``['StepLabelPlugin', ...]``).
'''
if env is None:
env = 'pca'
e = PluginGlobals.env(env)
return list(e.plugin_registry.keys())
def get_service_class(name, env='microdrop.managed'):
'''
Parameters
----------
name : str
Plugin class name (e.g., ``App``).
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
class
Class type matching specified plugin class name.
..notes::
Returns actual class type -- **not** an instance of the plugin
service.
'''
e = PluginGlobals.env(env)
if name not in e.plugin_registry:
raise KeyError('No plugin registered with name: %s' % name)
return e.plugin_registry[name]
def get_service_instance_by_name(name, env='microdrop.managed'):
'''
Parameters
----------
name : str
Plugin name (e.g., ``microdrop.zmq_hub_plugin``).
Corresponds to ``plugin_name`` key in plugin ``properties.yml`` file.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
object
Active service instance matching specified plugin name.
Raises
------
KeyError
If no plugin is found registered with the specified name.
'''
e = PluginGlobals.env(env)
plugins = [p for i, p in enumerate(e.services) if name == p.name]
if plugins:
return plugins[0]
else:
raise KeyError('No plugin registered with name: %s' % name)
def get_service_instance_by_package_name(name, env='microdrop.managed'):
'''
Parameters
----------
name : str
Plugin Python module name (e.g., ``dmf_control_board_plugin``).
Corresponds to ``package_name`` key in plugin ``properties.yml`` file.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
object
Active service instance matching specified plugin module name.
'''
e = PluginGlobals.env(env)
plugins = [p for i, p in enumerate(e.services)
if name ==
get_plugin_package_name(p.__class__.__module__.split('.')[0])]
if plugins:
return plugins[0]
else:
raise KeyError('No plugin registered with package name: %s' % name)
def get_plugin_package_name(module_name):
'''
Parameters
----------
module_name : str
Fully-qualified class name (e.g.,
``'plugins.dmf_control_board_plugin'``).
Returns
-------
str
Relative module name (e.g., ``'dmf_control_board_plugin'``)
'''
return module_name.split('.')[-1]
def get_service_instance(class_, env='microdrop.managed'):
'''
Parameters
----------
class_ : class
Plugin class type.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
object or None
Registered service instance for the specified plugin class type.
Returns ``None`` if no service is registered for the specified plugin
class type.
'''
e = PluginGlobals.env(env)
for service in e.services:
if isinstance(service, class_):
# A plugin of this type is registered
return service
return None
def get_service_names(env='microdrop.managed'):
'''
Parameters
----------
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
Returns
-------
list
List of plugin names (e.g., ``['microdrop.step_label_plugin', ...]``).
'''
e = PluginGlobals.env(env)
service_names = []
for name in get_plugin_names(env):
plugin_class = e.plugin_registry[name]
service = get_service_instance(plugin_class, env=env)
if service is None:
_L().warn('Plugin `%s` exists in registry, but instance cannot '
'be found.', name)
else:
service_names.append(service.name)
return service_names
def get_schedule(observers, function):
'''
Generate observer order based on scheduling requests for specified
function.
Parameters
----------
observers : dict
Mapping from service names to service instances.
function : str
Name of function to generate schedule for.
Returns
-------
list
List of observer service names in scheduled order.
'''
logger = _L() # use logger with function context
# Query plugins for schedule requests for 'function'
schedule_requests = {}
for observer in observers.values():
if hasattr(observer, 'get_schedule_requests'):
schedule_requests[observer.name] =\
observer.get_schedule_requests(function)
if schedule_requests:
scheduler = task_scheduler.TaskScheduler(observers.keys())
for request in [r for name, requests in schedule_requests.items()
for r in requests]:
try:
scheduler.request_order(*request)
except AssertionError:
logger.debug('Schedule requests for `%s`', function)
map(logger.debug,
pprint.pformat(schedule_requests).splitlines())
logger.info('emit_signal(%s) could not add schedule request '
'%s', function, request)
continue
return scheduler.get_schedule()
else:
return observers.keys()
def get_observers(function, interface=IPlugin):
'''
Get dictionary of observers implementing the specified function.
Parameters
----------
function : str
Name of function to generate schedule for.
interface : class, optional
Plugin interface class.
Returns
-------
dict
Mapping from service names to service instances.
'''
observers = {}
for obs in ExtensionPoint(interface):
if hasattr(obs, function):
observers[obs.name] = obs
return observers
def emit_signal(function, args=None, interface=IPlugin):
'''
Call specified function on each enabled plugin implementing the function
and collect results.
Parameters
----------
function : str
Name of function to generate schedule for.
interface : class, optional
Plugin interface class.
Returns
-------
dict
Mapping from each service name to the respective function return value.
.. versionchanged:: 2.20
Log caller at info level, and log args and observers at debug level.
'''
logger = _L() # use logger with function context
i = 0
caller = caller_name(skip=i)
while not caller or caller == 'microdrop.plugin_manager.emit_signal':
i += 1
caller = caller_name(skip=i)
try:
observers = get_observers(function, interface)
schedule = get_schedule(observers, function)
return_codes = {}
if args is None:
args = []
elif not isinstance(args, list):
args = [args]
if not any((name in caller) for name in ('logger', 'emit_signal')):
logger.debug('caller: %s -> %s', caller, function)
if logger.getEffectiveLevel() <= logging.DEBUG:
logger.debug('args: (%s)', ', '.join(map(repr, args)))
for observer_name in schedule:
observer = observers[observer_name]
try:
f = getattr(observer, function)
logger.debug(' call: %s.%s(...)', observer.name, function)
return_codes[observer.name] = f(*args)
except Exception, why:
with closing(StringIO()) as message:
if hasattr(observer, "name"):
if interface == ILoggingPlugin:
# If this is a logging plugin, do not try to log
# since that will result in infinite recursion.
# Instead, just continue onto the next plugin.
continue
print >> message, \
'%s plugin crashed processing %s signal.' % \
(observer.name, function)
print >> message, 'Reason:', str(why)
logger.error(message.getvalue().strip())
map(logger.info, traceback.format_exc().splitlines())
return return_codes
except Exception, why:
logger.error(why, exc_info=True)
return {}
def enable(name, env='microdrop.managed'):
'''
Enable specified plugin.
Parameters
----------
name : str
Plugin name (e.g., ``microdrop.zmq_hub_plugin``).
Corresponds to ``plugin_name`` key in plugin ``properties.yml`` file.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
'''
service = get_service_instance_by_name(name, env)
if not service.enabled():
service.enable()
_L().info('[PluginManager] Enabled plugin: %s', name)
if hasattr(service, "on_plugin_enable"):
service.on_plugin_enable()
emit_signal('on_plugin_enabled', [env, service])
def disable(name, env='microdrop.managed'):
'''
Disable specified plugin.
Parameters
----------
name : str
Plugin name (e.g., ``microdrop.zmq_hub_plugin``).
Corresponds to ``plugin_name`` key in plugin ``properties.yml`` file.
env : str, optional
Name of ``pyutilib.component.core`` plugin environment (e.g.,
``'microdrop.managed``').
'''
service = get_service_instance_by_name(name, env)
if service and service.enabled():
service.disable()
if hasattr(service, "on_plugin_disable"):
service.on_plugin_disable()
emit_signal('on_plugin_disabled', [env, service])
logging.info('[PluginManager] Disabled plugin: %s' % name)
def connect_pyutilib_signal(signals, signal, *args, **kwargs):
'''
Connect pyutilib callbacks to corresponding signal in blinker namespace.
Allows code to be written using blinker signals for easier testing outside
of MicroDrop, while maintaining compatibility with pyutilib.
Parameters
----------
signals : blinker.Namespace
signal : str
Pyutilib signal name.
*args, **kwargs
Arguments passed to `pyutilib.component.core.ExtensionPoint()`
Example
-------
>>> from microdrop.interfaces import IPlugin
>>> import microdrop.app
>>>
>>> signals = blinker.Namespace()
>>> signal = 'get_schedule_requests'
>>> args = ('on_plugin_enable', )
>>> connect_pyutilib_signal(signals, signal, IPlugin)
>>> signals.signal(signal).send(*args)
[(<bound method DmfDeviceController.get_schedule_requests of <Plugin DmfDeviceController 'microdrop.gui.dmf_device_controller'>>, [ScheduleRequest(before='microdrop.gui.config_controller', after='microdrop.gui.dmf_device_controller'), ScheduleRequest(before='microdrop.gui.main_window_controller', after='microdrop.gui.dmf_device_controller')])]
'''
import functools as ft
import inspect
from microdrop.plugin_manager import ExtensionPoint
callbacks = [getattr(p, signal) for p in ExtensionPoint(*args, **kwargs) if hasattr(p, signal)]
for callback_i in callbacks:
if len(inspect.getargspec(callback_i)[0]) < 2:
# Blinker signals require _at least_ one argument (assumed to be sender).
# Wrap pyutilib signals without any arguments to make them work with blinker.
@ft.wraps(callback_i)
def _callback(*args, **kwargs):
return callback_i()
else:
_callback = callback_i
signals.signal(signal).connect(_callback, weak=False)
PluginGlobals.pop_env()
| 1.8125 | 2 |
tests/estimator/classifier/Classifier.py | mathewdgardner/sklearn-porter | 1 | 12766733 | # -*- coding: utf-8 -*-
import os
import numpy as np
import subprocess as subp
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_digits
from sklearn.utils import shuffle
from tests.utils.Timer import Timer
from tests.estimator.classifier.SeparatedData import SeparatedData
class Classifier(Timer, SeparatedData):
N_RANDOM_FEATURE_SETS = 30
N_EXISTING_FEATURE_SETS = 30
def setUp(self):
np.random.seed(5)
self._init_env()
self._start_test()
def tearDown(self):
self._clear_estimator()
self._stop_test()
def _init_env(self):
for param in ['N_RANDOM_FEATURE_SETS', 'N_EXISTING_FEATURE_SETS']:
n = os.environ.get(param, None)
if n is not None and str(n).strip().isdigit():
n = int(n)
if n > 0:
self.__setattr__(param, n)
def load_binary_data(self, shuffled=True):
samples = load_breast_cancer()
self.X = shuffle(samples.data) if shuffled else samples.data
self.y = shuffle(samples.target) if shuffled else samples.target
self.n_features = len(self.X[0])
def load_iris_data(self, shuffled=True):
samples = load_iris()
self.X = shuffle(samples.data) if shuffled else samples.data
self.y = shuffle(samples.target) if shuffled else samples.target
self.n_features = len(self.X[0])
def load_digits_data(self, shuffled=True):
samples = load_digits()
self.X = shuffle(samples.data) if shuffled else samples.data
self.y = shuffle(samples.target) if shuffled else samples.target
self.n_features = len(self.X[0])
def _clear_estimator(self):
self.estimator = None
cmd = 'rm -rf tmp'.split()
subp.call(cmd)
| 2.484375 | 2 |
code/2_process_hearings/2_4_group_speeches.py | ianpcook/info_transmission | 0 | 12766734 | <reponame>ianpcook/info_transmission
import collections
import csv
import os
import re
def read_data(src='./data/hearing_metadata_2.csv'):
reader = csv.DictReader(file(src, 'rU'))
for row in reader:
yield [row[key] for key in ('filename',)]
class SpeakerCounter(object):
def __init__(self, debug=False):
self.debug = debug
self.current_speaker = None
self.speeches = collections.defaultdict(list)
self.speaker_re = re.compile(
r"\s+(?P<speaker>"
r"(Senator|Representative|Secretary|Chairman|Chairwoman|Dr\.|Mr\.|Mrs\.|Ms\.|Rev\.|Col\.|Maj\.|Maj\. ?Gen|Gen\.|Sgt\.|Lt\. ?Col)"
r" \w+[\- ']?\w*)\.(?P<speech>.+)")
self.sentence_finished = True
def parse_line(self, line):
if not line.strip():
# Empty line - speech is definitely finished
self.current_speaker = None
self.sentence_finished = True
else:
match = self.speaker_re.match(line)
if match and self.sentence_finished:
# Found speaker name.
groups = match.groupdict()
self.current_speaker = groups['speaker']
self.speeches[groups['speaker']].append(groups['speech'].strip())
elif self.current_speaker:
# Multi-paragraph speech.
self.speeches[self.current_speaker].append(line.strip())
self.sentence_finished = any(line.rstrip().endswith(char) for char in ('.', '!', '?', '--', ']', '\'\''))
def parse_file(prefix, filename, debug=False):
path = os.path.join(prefix, filename)
parser = SpeakerCounter(debug)
for line in file(path):
parser.parse_line(line)
if debug:
result = '\n'.join(
'{}: {}'.format(*items) for items in parser.speeches.iteritems())
print result or 'No speakers found'
return parser.speeches
def store_data(writer, data, filename):
for speaker, speech in data.iteritems():
writer.writerow({
'filename': filename, 'speaker': speaker, 'speech': ' '.join(speech)})
if __name__ == '__main__':
import sys
base_dir = os.path.join(
os.path.dirname(__file__),
'../../data/sample_hearings' if '--sample' in sys.argv else
'../../data/clean_hearings_flat')
files = os.listdir(base_dir)
allowed_names = [
arg for arg in sys.argv[1:] if arg not in ('--debug', '--sample')
] if len(sys.argv) > 1 else None
debug = '--debug' in sys.argv
if not debug:
writer = csv.DictWriter(
file('./data/speeches.csv', 'w'),
['filename', 'speaker', 'speech'])
writer.writeheader()
for (filename,) in read_data():
if allowed_names and filename not in allowed_names:
continue
if debug:
print 'Processing', filename
try:
data = parse_file(base_dir, filename, debug)
except IOError:
print filename, 'can\'t be read'
if not debug:
store_data(writer, data, filename)
if debug:
print 'Done!'
| 3.015625 | 3 |
setup.py | rbshaffer/Constitute_Tools | 1 | 12766735 | <gh_stars>1-10
from distutils.core import setup
setup(
name='constitute_tools',
version='2.0',
packages=['constitute_tools'],
url='https://www.constituteproject.org/',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Auxiliary tools for the Constitute backend.'
)
| 1.179688 | 1 |
test/composite/test_softmax.py | Whadup/pytorch_scatter | 0 | 12766736 | <filename>test/composite/test_softmax.py
from itertools import product
import pytest
import torch
from torch_scatter.composite import scatter_log_softmax, scatter_softmax
from test.utils import devices, tensor, grad_dtypes
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_softmax(dtype, device):
src = tensor([0.2, 0, 0.2, -2.1, 3.2, 7, -1, float('-inf')], dtype, device)
index = tensor([0, 1, 0, 1, 1, 2, 4, 4], torch.long, device)
out = scatter_softmax(src, index)
out0 = torch.softmax(torch.tensor([0.2, 0.2], dtype=dtype), dim=-1)
out1 = torch.softmax(torch.tensor([0, -2.1, 3.2], dtype=dtype), dim=-1)
out2 = torch.softmax(torch.tensor([7], dtype=dtype), dim=-1)
out4 = torch.softmax(torch.tensor([-1, float('-inf')], dtype=dtype),
dim=-1)
expected = torch.stack([
out0[0], out1[0], out0[1], out1[1], out1[2], out2[0], out4[0], out4[1]
], dim=0)
assert torch.allclose(out, expected)
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_log_softmax(dtype, device):
src = tensor([0.2, 0, 0.2, -2.1, 3.2, 7, -1, float('-inf')], dtype, device)
index = tensor([0, 1, 0, 1, 1, 2, 4, 4], torch.long, device)
out = scatter_log_softmax(src, index)
out0 = torch.log_softmax(torch.tensor([0.2, 0.2], dtype=dtype), dim=-1)
out1 = torch.log_softmax(torch.tensor([0, -2.1, 3.2], dtype=dtype), dim=-1)
out2 = torch.log_softmax(torch.tensor([7], dtype=dtype), dim=-1)
out4 = torch.log_softmax(torch.tensor([-1, float('-inf')], dtype=dtype),
dim=-1)
expected = torch.stack([
out0[0], out1[0], out0[1], out1[1], out1[2], out2[0], out4[0], out4[1]
], dim=0)
assert torch.allclose(out, expected)
| 2.21875 | 2 |
somaticseq/utilities/attach_pileupVAF.py | bioinform/somaticseq | 159 | 12766737 | <filename>somaticseq/utilities/attach_pileupVAF.py
#!/usr/bin/env python3
# Supports Insertion/Deletion as well as SNVs
# Last updated: 8/29/2015
import math, argparse, sys, os, gzip
import re
import somaticseq.genomicFileHandler.genomic_file_handlers as genome
import somaticseq.genomicFileHandler.pileup_reader as pileup
nan = float('nan')
inf = float('inf')
parser = argparse.ArgumentParser(description='Given either a tumor-only or tumor-normal VCF file (requires SAMPLE NAME specified), and pileup file, it will attach VAF calculated from pileup file to the VCF file. The pileup file can also be streamed in.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-myvcf', '--my-vcf-file', type=str, help='My VCF', required=True, default=None)
parser.add_argument('-normal', '--normal-sample-name', type=str, help='Normal Sample Name', required=False, default='NORMAL')
parser.add_argument('-tumor', '--tumor-sample-name', type=str, help='Tumor Sample Name', required=False, default='TUMOR')
parser.add_argument('-Npileup', '--normal-pileup-file', type=str, help='Normal VCF File', required=False, default=None)
parser.add_argument('-Tpileup', '--tumor-pileup-file', type=str, help='Tumor VCF File', required=True)
parser.add_argument('-fai', '--reference-fasta-fai', type=str, help='Use the fasta.fai file to get the valid contigs', required=False, default=None)
parser.add_argument('-dict', '--reference-fasta-dict', type=str, help='Use the reference dict file to get the valid contigs', required=False, default=None)
# From pileup:
parser.add_argument('-plVAF', '--pileup-variant-allele-frequency', action='store_true', help='Variant Allele Frequency calculated from pileup file', required=False)
parser.add_argument('-plDP4', '--pileup-DP4', action='store_true', help='DP4 from pileup file', required=False)
# output file
parser.add_argument('-outfile', '--output-file', type=str, help='Output File Name', required=True)
args = parser.parse_args()
##
my_vcf = args.my_vcf_file
Tpileup = args.tumor_pileup_file
Npileup = args.normal_pileup_file
tumor_name = args.tumor_sample_name
normal_name = args.normal_sample_name
fai_file = args.reference_fasta_fai
dict_file = args.reference_fasta_dict
outfile = args.output_file
nan = float('nan')
#### Append headers according to user selection ####
header_append = []
format_append = []
if args.pileup_DP4:
header_append.append('##FORMAT=<ID=plDP4,Number=4,Type=Integer,Description="DP4 from pileup: ref forward, ref reverse, alt forward, alt reverse">')
format_append.append('plDP4')
if args.pileup_variant_allele_frequency:
header_append.append('##FORMAT=<ID=plVAF,Number=1,Type=Float,Description="Variant allele frequency calculated from pileup">')
format_append.append('plVAF')
# Start Working by opening files:
try:
my_vcf = genome.open_textfile(my_vcf)
Tpileup = genome.open_textfile(Tpileup)
outhandle = open(outfile, 'w')
Npileup = genome.open_textfile(Npileup)
except AttributeError:
pass
if Npileup:
npileup_line = Npileup.readline().rstrip('\n')
if Tpileup:
tpileup_line = Tpileup.readline().rstrip('\n')
# Add the extra headers:
out_vcf_headers = genome.vcf_header_modifier( my_vcf, addons=header_append )
# Find out where the tumor and normal samples are in the vcf files, i.e., which column.
# Then, Assuming there are two sample names in "my vcf," the one that appears first should have an index of 0, and the next one is 1:
main_header = out_vcf_headers[3].split('\t')
vcf_idxT = main_header.index(tumor_name)
idxT = vcf_idxT - 9
try:
vcf_idxN = main_header.index(normal_name)
idxN = vcf_idxN - 9
except ValueError:
vcf_idxN = None
idxN = None
# Write the headers to the output vcf file:
outhandle.write(out_vcf_headers[0] + '\n') ##fileformat=VCFv4.1
[ outhandle.write(out_i + '\n') for out_i in out_vcf_headers[1] ]
[ outhandle.write(out_i + '\n') for out_i in out_vcf_headers[2] ]
outhandle.write(out_vcf_headers[3] + '\n') #CHROM...
# Convert contig_sequence to chrom_seq dict:
if dict_file:
chrom_seq = genome.faiordict2contigorder(dict_file, 'dict')
elif fai_file:
chrom_seq = genome.faiordict2contigorder(fai_file, 'fai')
else:
raise Exception('I need a fai or dict file, or else I do not know the contig order.')
pattern_chrom = r'|'.join(chrom_seq)
r_chrom = r'(' + pattern_chrom + r')'
pattern_chr_position = r_chrom + r'\t[0-9]+'
# Figure out the order of NORMAL and TUMOR
if idxN != None:
if Npileup and idxN==0:
external_pileups = [ [Npileup, Tpileup], [npileup_line, tpileup_line] ]
elif Npileup and idx==1:
external_pileups = [ [Tpileup, Npileup], [tpileup_line, npileup_line] ]
elif not Npileup:
external_pileups = [ [Tpileup], [tpileup_line] ]
else:
external_pileups = [ [Tpileup], [tpileup_line] ]
line_i = my_vcf.readline().rstrip('\n')
while line_i:
my_coordinate = re.search( pattern_chr_position, line_i )
if my_coordinate:
my_coordinate = my_coordinate.group()
else:
print(line_i, file=sys.stderr)
raise Exception('Your VCF file has a contig that does not exist.')
# my_vcf:
vcf_i = genome.Vcf_line( line_i )
# Modify the FORMAT column:
field_items = vcf_i.get_sample_variable()
field_items.extend( format_append )
field_format_line = ':'.join( field_items )
###########################################################################################
###################### Find the same coordinate in the pileup file ########################
# Line up the order of reading the two files the same order as the sample columns in my_vcf:
samples_collect = []
for SM_idx,current_vcf in enumerate( external_pileups[0] ):
latest_pileup_run = genome.catchup(my_coordinate, external_pileups[1][SM_idx], current_vcf, chrom_seq)
latest_sample = pileup.Pileup_line(latest_pileup_run[1])
sample_append = []
# If the position exists in this samtools generated vcf file:
if latest_pileup_run[0]:
assert vcf_i.position == latest_sample.position
# Figure out alternate pattern:
first_alt_call = vcf_i.altbase.split(',')[0]
base_calls = latest_sample.base_reads()
if base_calls:
# SNV
if len(first_alt_call) == len(vcf_i.refbase):
ref_for, ref_rev, alt_for, alt_rev = base_calls[0], base_calls[1], base_calls[2].count(first_alt_call.upper()), base_calls[3].count(first_alt_call.lower())
# Insertion:
elif len(first_alt_call) > len(vcf_i.refbase):
inserted = first_alt_call[ len(vcf_i.refbase):: ]
ref_for, ref_rev, alt_for, alt_rev = base_calls[0], base_calls[1], base_calls[6].count(inserted.upper()), base_calls[7].count(inserted.lower())
# Deletion:
elif len(first_alt_call) < len(vcf_i.refbase):
deleted = vcf_i.refbase[ len(first_alt_call) :: ]
ref_for, ref_rev, alt_for, alt_rev = base_calls[0], base_calls[1], base_calls[4].count(deleted.upper()), base_calls[5].count(deleted.lower())
else:
ref_for = ref_rev = alt_for = alt_rev = 0
### Pre-defined material ###
### If user wants DP4 ###
if args.pileup_DP4:
pl_DP4 = '{},{},{},{}'.format( ref_for, ref_rev, alt_for, alt_rev )
sample_append.append( pl_DP4 )
### If user wants VAF ###
if args.pileup_variant_allele_frequency:
try:
pl_vaf = ( alt_for + alt_rev ) / ( alt_for + alt_rev + ref_for + ref_rev )
except ZeroDivisionError:
pl_vaf = 0
pl_vaf = '%.3g' % pl_vaf
sample_append.append( pl_vaf )
# Reset the current line:
sample_items = list( vcf_i.get_sample_item(idx=SM_idx, out_type='l')[1] )
sample_items.extend( sample_append )
sample_out = ':'.join( sample_items )
# Reset the current line:
external_pileups[1][SM_idx] = latest_sample.pileup_line
# New format and sample columns:
samples_collect.append( sample_out )
# If the position does not exist in pileup file:
else:
sample_items = list( vcf_i.get_sample_item(idx=SM_idx, out_type='l')[1] )
sample_append = ['.' if i!='plDP4' else '.,.,.,.' for i in format_append ]
sample_items.extend( sample_append )
sample_out = ':'.join( sample_items )
samples_collect.append( sample_out )
external_pileups[1][SM_idx] = latest_sample.pileup_line
### Write out will have a few different possible situations ###
# If NORMAL and TUMOR both exist in the designated VCF file:
if vcf_idxT and vcf_idxN:
# But the Nvcf is not supplied, modified the NORMAL column to reflect change in FORMAT column:
if not Npileup:
normal_items = list( vcf_i.get_sample_item(idx=idxN, out_type='l')[1] )
extra_normal_items = ['.' if i!='plDP4' else '.,.,.,.' for i in format_append ]
normal_out = ':'.join( extra_normal_items )
samples_collect.append( normal_out )
# Write out:
out_i = '\t'.join(( vcf_i.chromosome, str(vcf_i.position), vcf_i.identifier, vcf_i.refbase, vcf_i.altbase, vcf_i.qual, vcf_i.filters, vcf_i.info, field_format_line, samples_collect[0], samples_collect[1] ))
outhandle.write( out_i + '\n' )
# Only TUMOR exists in the designated VCF file:
if not vcf_idxN:
# Write out:
out_i = '\t'.join(( vcf_i.chromosome, str(vcf_i.position), vcf_i.identifier, vcf_i.refbase, vcf_i.altbase, vcf_i.qual, vcf_i.filters, vcf_i.info, field_format_line, samples_collect[0] ))
outhandle.write( out_i + '\n' )
# Read the next line in the designated VCF file:
line_i = my_vcf.readline().rstrip('\n')
# Close files:
my_vcf.close()
Tpileup.close()
outhandle.close()
if Npileup != None:
Npileup.close()
| 2.40625 | 2 |
tests/test_sample.py | whs2k/tweetCarousel | 0 | 12766738 | <filename>tests/test_sample.py
def add_up(nums):
return sum(nums)
def test_answer():
assert add_up([1,2,2]) == 5 | 3.078125 | 3 |
opencv_practicas/leerVideos.py | WilsonOviedo/AI-practica | 0 | 12766739 | import cv2 as cv
capture = cv.VideoCapture('opencv_practicas/VID_20200312_162550.mp4')
while True:
isTrue, frame = capture.read()
cv.imshow('video', frame)
if cv.waitKey(20) &0xFF==ord('d'):
break
capture.release()
cv.destroyAllWindows() | 2.671875 | 3 |
tests/test_repos.py | baricadr/baricadr | 1 | 12766740 | <gh_stars>1-10
import os
import tempfile
import pytest
from . import BaricadrTestCase
class TestRepos(BaricadrTestCase):
def test_get_empty(self, app):
conf = {
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
def test_get_incomplete(self, app):
conf = {
'/foo/bar': []
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
def test_overlap(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:host:google',
'user': 'someone',
'password': '<PASSWORD>'
},
'/foo/bar/some/thing': {
'backend': 'sftp',
'url': 'host:host:google',
'user': 'someone',
'password': '<PASSWORD>'
}
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
def test_overlap_reverse(self, app):
conf = {
'/foo/bar/some/thing': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>'
},
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>'
}
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
def test_overlap_symlink(self, app):
lnk_src = '/foo/bar/some/thing'
lnk_dst = '/tmp/somelink'
if os.path.isdir(lnk_dst):
os.unlink(lnk_dst)
os.symlink(lnk_src, lnk_dst)
conf = {
'/foo/bar/': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>'
},
lnk_dst: {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>'
}
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
os.unlink(lnk_dst)
def test_dir_not_exist(self, app):
with tempfile.TemporaryDirectory() as local_path:
local_path_not_exist = local_path + '/test/'
conf = {
local_path_not_exist: {
'backend': 'sftp',
'url': 'host:sftp:test-repo/',
'user': 'foo',
'password': '<PASSWORD>'
}
}
assert not os.path.exists(local_path_not_exist)
app.repos.read_conf_from_str(str(conf))
assert os.path.exists(local_path_not_exist)
def test_freeze_age_conf(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>',
'freezable': True,
'freeze_age': 12
},
}
repos = app.repos.do_read_conf(str(conf))
repo = repos['/foo/bar']
assert repo.freeze_age == 12
def test_freeze_age_conf_str(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>',
'freezable': True,
'freeze_age': 'xxxx'
},
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
def test_freeze_age_conf_none(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>',
'freezable': True
},
}
repos = app.repos.do_read_conf(str(conf))
repo = repos['/foo/bar']
assert repo.freeze_age == 180
def test_freeze_age_conf_small(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>',
'freezable': True,
'freeze_age': 1
},
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
def test_freeze_age_conf_big(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>',
'freezable': True,
'freeze_age': 100000
},
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
def test_chown_uid_conf(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>',
'chown_uid': 4586,
},
}
repos = app.repos.do_read_conf(str(conf))
repo = repos['/foo/bar']
assert repo.chown_uid == 4586
def test_chown_uid_conf_too_big(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>',
'chown_uid': 100000,
},
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
def test_chown_gid_conf(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': 'xxxxx',
'chown_gid': 4586,
},
}
repos = app.repos.do_read_conf(str(conf))
repo = repos['/foo/bar']
assert repo.chown_gid == 4586
def test_chown_gid_conf_too_big(self, app):
conf = {
'/foo/bar': {
'backend': 'sftp',
'url': 'host:google',
'user': 'someone',
'password': '<PASSWORD>',
'chown_gid': 100000,
},
}
with pytest.raises(ValueError):
app.repos.do_read_conf(str(conf))
| 2.296875 | 2 |
data_management/autopreprocess_testing/test_preprocess_directory.py | BloomTech-Labs/scribble-stadium-ds | 3 | 12766741 | import preprocess_directory
import unittest
FIXTURES_DIRECTORY_PATH = "Fixtures"
class TestGetImages(unittest.TestCase):
"""
Class for test function below for the get_all_images function from the preprocess_directory.py file
"""
def test_check_file(self):
"""
Test for the get_all_images function from the preprocess_directory.py file, verifying images are in the file.
"""
result = preprocess_directory.get_all_images(FIXTURES_DIRECTORY_PATH)
self.assertEqual(1, len(result))
self.assertTrue("test2.jpg" in result[0])
if __name__ == '__main__':
unittest.main()
| 2.890625 | 3 |
__app__/ping/ping.py | rivoric/websitemon | 1 | 12766742 | import logging
import requests
import json
import azure.functions as func
def main(req: func.HttpRequest, config: str) -> func.HttpResponse:
configuration = json.loads(config)
response = requests.get(configuration["websiteUrl"])
logging.info("%s -> %s" % (response.url, response.reason))
return func.HttpResponse(
"%s -> %s" % (response.url, response.reason),
status_code=response.status_code
)
| 2.734375 | 3 |
zinnia/urls/comments.py | Boondockers-Welcome/django-blog-zinnia | 1,522 | 12766743 | """Urls for the Zinnia comments"""
from django.conf.urls import url
from zinnia.urls import _
from zinnia.views.comments import CommentSuccess
urlpatterns = [
url(_(r'^success/$'),
CommentSuccess.as_view(),
name='comment_success'),
]
| 1.5 | 2 |
terminio/commandexecutor/find.py | SourishS/terminio | 1 | 12766744 | from terminio.commandexecutor.commandexecutor import CommandExecutor
class find(CommandExecutor):
"""docstring for find"""
def __init__(self, session):
super(find, self).__init__(session)
def execute_command(self, args):
print(args)
| 2.453125 | 2 |
DeepGM/main_cifar.py | khainb/BoMb-OT | 6 | 12766745 | <filename>DeepGM/main_cifar.py
from __future__ import print_function
import argparse
import logging
import os
import random
import imageio
import numpy as np
import torch
import torchvision.datasets as datasets
from Cifar_generator import Cifar_Generator, Discriminator
from experiments import sampling
from fid_score import calculate_fid_given_paths
from torch import optim
from torchvision import transforms
from tqdm import tqdm
from utils import save_acc
# torch.backends.cudnn.enabled = False
def main():
# train args
parser = argparse.ArgumentParser(description="AE")
parser.add_argument("--datadir", default=".data", help="path to dataset")
parser.add_argument("--outdir", default="./result", help="directory to output images")
parser.add_argument("--gpu-id", type=str, default="0", help="GPU id to use")
parser.add_argument("--m", type=int, default=100, metavar="N", help="input batch size for training (default: 100)")
parser.add_argument("--k", type=int, default=8, metavar="N", help="input num batch for training (default: 200)")
parser.add_argument(
"--epochs", type=int, default=200, metavar="N", help="number of epochs to train (default: 200)"
)
parser.add_argument("--lr", type=float, default=0.0005, metavar="LR", help="learning rate (default: 0.0005)")
parser.add_argument(
"--num-workers",
type=int,
default=8,
metavar="N",
help="number of dataloader workers if device is CPU (default: 8)",
)
parser.add_argument("--seed", type=int, default=16, metavar="S", help="random seed (default: 16)")
parser.add_argument("--latent-size", type=int, default=128, help="Latent size")
parser.add_argument("--fid-each", type=int, default=5, help="Latent size")
parser.add_argument("--method", type=str, default="OT", help="OT")
parser.add_argument("--bomb", action="store_true", help="whether to use Bomb version")
parser.add_argument("--reg", type=float, default=1, help="sinkhorn reg")
parser.add_argument("--ebomb", action="store_true", help="whether to use eBomb version")
parser.add_argument("--breg", type=float, default=1, help="sinkhorn breg")
parser.add_argument("--tau", type=float, default=1, help="tau UOT")
parser.add_argument("--mass", type=float, default=0.9, help="mass POT")
parser.add_argument("--L", type=int, default=1000, help="L")
args = parser.parse_args()
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
# Set random seed
np.random.seed(args.seed)
random.seed(args.seed)
torch.random.manual_seed(args.seed)
method = args.method
latent_size = args.latent_size
args.epochs = args.epochs * args.k
description = (
"Cifar10_"
+ method
+ "_k"
+ str(args.k)
+ "_m"
+ str(args.m)
+ "_reg"
+ str(args.reg)
+ "_tau"
+ str(args.tau)
+ "_mass"
+ str(args.mass)
+ "_"
+ str(args.L)
+ "_seed"
+ str(args.seed)
+ "_"
+ str(args.epochs)
+ "epochs"
)
if args.bomb or args.ebomb:
if args.bomb:
bomb = True
ebomb = False
description = "BoMb-" + description
else:
bomb = False
ebomb = True
description = "eBoMb" + str(args.breg) + "-" + description
model_dir = os.path.join(args.outdir, description)
else:
bomb = False
ebomb = False
model_dir = os.path.join(args.outdir, description)
# create output directories
LOG_DIR = "logs/cifar10"
CSV_DIR = "csv/cifar10"
os.makedirs(LOG_DIR, exist_ok=True)
os.makedirs(CSV_DIR, exist_ok=True)
os.makedirs(args.datadir, exist_ok=True)
os.makedirs(args.outdir, exist_ok=True)
os.makedirs(model_dir, exist_ok=True)
log_file = os.path.join(LOG_DIR, f"{description}.log")
csv_file = os.path.join(CSV_DIR, f"{description}.csv")
if os.path.exists(log_file):
os.remove(log_file)
if os.path.exists(csv_file):
os.remove(csv_file)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# config logger
logging.basicConfig(
filename=log_file,
filemode="a",
format="%(asctime)s %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
level=logging.INFO,
)
logger = logging.getLogger()
logger.info(f"Parameters are: {args}")
logger.info(
"batch size {}\nepochs {}\nAdam lr {} \n using device {}\n".format(args.m, args.epochs, args.lr, device.type)
)
# dataloader
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
args.datadir,
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
),
),
batch_size=args.m * args.k,
shuffle=True,
num_workers=args.num_workers,
)
# model
model = Cifar_Generator(image_size=32, latent_size=latent_size, num_chanel=3, hidden_chanels=64, device=device).to(
device
)
dis = Discriminator(32, args.latent_size, 3, 64).to(device)
disoptimizer = optim.Adam(dis.parameters(), lr=args.lr, betas=(0.5, 0.999))
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999))
fixednoise = torch.randn((64, latent_size)).to(device)
for epoch in range(0, args.epochs):
total_g_loss = 0.0
total_d_loss = 0.0
logger.info(f"Epoch: {epoch}")
print(f"Epoch: {epoch}")
for batch_idx, (data, y) in tqdm(enumerate(train_loader, start=0)):
g_loss, d_loss = model.train_minibatch(
optimizer,
dis,
disoptimizer,
data,
args.k,
args.m,
method,
args.reg,
args.breg,
args.tau,
args.mass,
args.L,
bomb,
ebomb,
)
total_g_loss += g_loss
total_d_loss += d_loss
total_g_loss /= batch_idx + 1
total_d_loss /= batch_idx + 1
if bomb:
logger.info("BoMb-{} Epoch: {}, G Loss: {}, D Loss: {}".format(method, epoch, total_g_loss, total_d_loss))
elif ebomb:
logger.info("eBoMb-{} Epoch: {}, G Loss: {}, D Loss: {}".format(method, epoch, total_g_loss, total_d_loss))
else:
logger.info("{} Epoch: {}, G Loss: {}, D Loss: {}".format(method, epoch, total_g_loss, total_d_loss))
if (epoch % args.fid_each == 0) or (epoch == args.epochs - 1):
save_m_dir = model_dir + "/models"
if not (os.path.isdir(save_m_dir)):
os.makedirs(save_m_dir)
torch.save(model.state_dict(), "%s/G_%06i.pth" % (save_m_dir, epoch))
count_imgs = 0
model.eval()
with torch.no_grad():
sampling(model_dir + "/sample_epoch_" + str(epoch) + ".png", fixednoise, model.decoder, 64, 32, 3)
outdir_images = model_dir + "/images"
if not (os.path.isdir(outdir_images)):
os.makedirs(outdir_images)
Nb = 11000 // args.m # write 10K sampled images for test FID computation
for i in tqdm(range(Nb)):
z = torch.randn(args.m, latent_size).cuda(device=device)
fake_images = model.decoder(z)
fake_images_np = fake_images.cpu().detach().numpy()
fake_images_np = fake_images_np.reshape(fake_images_np.shape[0], 3, 32, 32)
fake_images_np = ((fake_images_np.transpose((0, 2, 3, 1)) / 2.0 + 0.5) * 255).astype(np.uint8)
for i in range(args.m):
imageio.imwrite("%s/img_%06i.png" % (outdir_images, count_imgs), fake_images_np[i])
count_imgs += 1
model.train()
logger.info("wrote images to %s", outdir_images)
torch.cuda.empty_cache()
# Compute FID score
dataset_paths = [outdir_images, "fid_stats_cifar_test.npz"]
fid_score = calculate_fid_given_paths(dataset_paths, 4, device, 2048, args.num_workers)
logger.info(f"FID score: {fid_score}")
save_acc(csv_file, epoch, fid_score)
if __name__ == "__main__":
main()
| 2.34375 | 2 |
blog/models.py | tylerpitcher/flask-blog | 0 | 12766746 | '''
Defines database models.
'''
from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
from hashids import Hashids
import bcrypt
from blog.helpers import is_username, is_email, is_password, in_database
from blog import app
db = SQLAlchemy(app)
class User(db.Model, UserMixin):
'''
Model that represents users in the database.
'''
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), unique=True)
email = db.Column(db.String(320), unique=True)
password = db.Column(db.String(60))
posts = db.relationship('Post')
comments = db.relationship('Comment')
class Post(db.Model):
'''
Model that represents user's posts in the database.
'''
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime(timezone=True), default=func.now())
title = db.Column(db.String(64), unique=True)
hash = db.Column(db.Text, unique=True)
content = db.Column(db.Text)
username = db.Column(db.String(32), db.ForeignKey('user.username'))
comments = db.relationship('Comment', cascade="all, delete-orphan")
class Comment(db.Model):
'''
Model that represents user's comments in the database.
'''
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime(timezone=True), default=func.now())
username = db.Column(db.String(32), db.ForeignKey('user.username'))
post_hash = db.Column(db.Text, db.ForeignKey('post.hash'))
hash = db.Column(db.Text, unique=True)
msg = db.Column(db.Text)
db.create_all()
def register(username, email, password1, password2):
'''
Creates new users & adds them to the database.
Returns the new user if given valid input.
'''
if not is_email(email):
return 'Email not valid.'
users = User.query.filter_by(email=email).all()
if len(users) != 0:
return 'Email already taken.'
if not is_username(username):
return 'Username not valid.'
users = User.query.filter_by(username=username).all()
if len(users) != 0:
return 'Username already taken.'
if not is_password(password1):
return 'Password not valid.'
if password1 != password2:
return 'Passwords do not match.'
user = User(
username=username,
email=email,
password=bcrypt.hashpw(password1.encode('utf-8'), bcrypt.gensalt())
)
db.session.add(user)
db.session.commit()
return user
def login(email, password):
'''
Takes email & password to see if a user with those credentials
exists in the database.
Returns the user if they exist in the database.
'''
user = User.query.filter_by(email=email).first()
if user is None:
return None
if not bcrypt.checkpw(password.encode('utf-8'), user.password):
return None
return user
def post(user, title, content):
'''
Creates a new post by user with given title & content.
Returns the new post if given a valid title & content.
'''
if not in_database(User, user):
return None
if len(title) > 64 or len(title) < 3:
return None
existing_post = Post.query.filter_by(title=title.title()).first()
if existing_post:
return None
new_post = Post(
username=user.username,
title=title.title(),
content=content
)
db.session.add(new_post)
db.session.commit()
hashids = Hashids(min_length=5, salt=app.config['SECRET_KEY'])
new_post.hash = hashids.encode(new_post.id) + 'P'
db.session.commit()
return new_post
def comment(user, post, msg):
'''
Creates comment by user on post.
Returns the new comment if passed a real post and valid message.
'''
if not in_database(User, user) or not in_database(Post, post):
return None
if len(msg) < 2:
return None
comment = Comment(
username=user.username,
post_hash=post.hash,
msg=msg
)
db.session.add(comment)
db.session.commit()
hashids = Hashids(min_length=5, salt=app.config['SECRET_KEY'])
comment.hash = hashids.encode(comment.id) + 'C'
db.session.commit()
return comment
def delete(item):
'''
Delete entry in database table
'''
db.session.delete(item)
db.session.commit()
| 3.21875 | 3 |
administrator/views.py | babygame0ver/kisanmill | 0 | 12766747 | <filename>administrator/views.py
from django.shortcuts import render
from ufs.models import Message
from customercorner.models import Product
from django.views.generic import DetailView, ListView , CreateView , DeleteView , TemplateView
class MessageListView(ListView):
model = Message
template_name = 'administrator/complaints.html'
context_object_name = 'messages'
def get_queryset(self):
return Message.objects.all()
class MessageDetailView(DetailView):
model = Message
template_name = 'administrator/complaint_description.html'
context_object_name = 'complaint_detail'
def adminhome(request):
return render(request,'administrator/admin.html')
| 1.851563 | 2 |
stwno_canteen/csv_converters.py | somsky/stwno_canteen | 0 | 12766748 | import datetime
from .stwno_constants import CSV_MEAL_TYPE_MAP,\
CSV_NUTRITION_TYPE_MAP,\
NutritionType,\
CSV_DATE_FORMAT,\
MealType,\
NutritionType,\
STWNO_INGREDIENTS,\
STWNO_ALLERGENS
class NoValidDateStringException(Exception):
pass
class UnknownMealTypeException(Exception):
pass
class UnknownNutritionTypeException(Exception):
pass
class UnknownIngredientException(Exception):
pass
class StwnoFoodIngredient():
identifier: str
name: str
def __init__(self, identifier, name):
self.name = name
self.identifier = identifier
def convertCSVDishName(name: str) -> str:
name = name.strip()
indexBracket = name.find('(')
if indexBracket == -1:
return name
return name[0:indexBracket].strip()
def convertCSVMealType(mealType: str) -> MealType:
mealTypeID = ''.join(filter(str.isalpha, mealType))
mType = CSV_MEAL_TYPE_MAP.get(mealTypeID)
if mType != None:
return mType
else:
raise UnknownMealTypeException(
'MealType {} is not in the list of known meal types'.format(mType))
def convertCSVNutritionType(nutritionType: str) -> NutritionType:
nutType = CSV_NUTRITION_TYPE_MAP.get(nutritionType)
if nutType != None:
return nutType
else:
return NutritionType.meat
def convertCSVDate(dateStr: str) -> datetime.date:
try:
return datetime.datetime.strptime(dateStr, CSV_DATE_FORMAT).date()
except ValueError:
raise NoValidDateStringException
def convertCSVIngredientsAndAllergens(mealNameStr: str) -> str:
ingredientIdentifiers = mealNameStr.replace(' ', '').split(',')
ingredients = []
allergens = []
for identifier in ingredientIdentifiers:
name = None
if name := STWNO_INGREDIENTS.get(identifier):
ingredients.append(StwnoFoodIngredient(identifier, name))
elif name := STWNO_ALLERGENS.get(identifier):
allergens.append(StwnoFoodIngredient(identifier, name))
else:
raise UnknownIngredientException(
'Ingredient {} is neither a known inredient nor an allergene'.format(identifier))
return ingredients, allergens
| 2.609375 | 3 |
pyprobar/styleString.py | beidongjiedeguang/python-progress-bar | 16 | 12766749 | <reponame>beidongjiedeguang/python-progress-bar
from pyprobar.utils import dict_dotable
CSI = '\033['
OSC = '\033]'
OFF = CSI + '0m'
rgb_dict = {
"浅绿": (66, 227, 35),
"深绿": (28, 97, 15),
"嫩绿": (194, 250, 134),
"天青": (28, 199, 212),
"紫色": (146, 52, 247),
"浅紫": (214, 126, 209),
"浅蓝": (186, 189, 250),
"天蓝": (56, 116, 217),
"蓝1": (115, 182, 225),
"蓝2": (117, 181, 244),
"绿1": (190, 237, 199),
"绿2": (140, 199, 181),
"绿3": (190, 231, 233),
"玫瑰红": (237, 166, 178),
"粉色": (250, 205, 229),
"浅黑": (85, 85, 85),
"灰色": (112, 112, 112),
"亮灰": (204, 204, 204)
}
rgb_dict = dict_dotable(rgb_dict)
def setRGB(RGB_fore=[240, 85, 85], SRG=0, RGB_back=None):
"""Get foreground or background color chars
see https://my.oschina.net/dingdayu/blog/1537064
inputs:
RGB_fore: rgb list or tupe of foreground, e.g. [255, 0, 0]
SRG: the style of font
SRG options: see https://en.wikipedia.org/wiki/ANSI_escape_code#SGR
| 0 | Close all formats and revert to the original state
| 1 | Bold (increased intensity)
| 2 | Faint (decreased intensity)
| 3 | Italics
| 4 | Underline (single line)
| 5 | Slow Blink
| 6 | Rapid Blink
| 7 | Swap the background color with the foreground color
"""
Fore_color = f"{CSI}{SRG};38;2;{RGB_fore[0]};{RGB_fore[1]};{RGB_fore[2]}m"
if RGB_back is None:
Back_color = ''
else:
Back_color = f"{CSI}{SRG};48;2;{RGB_back[0]};{RGB_back[1]};{RGB_back[2]}m"
return Fore_color + Back_color
def rgb_str(string, RGB_fore=[240, 85, 85], SRG=0, RGB_back=None):
return setRGB(RGB_fore, SRG, RGB_back) + string + OFF
| 2.71875 | 3 |
mobility_pipeline/plot_voronoi.py | codethechange/mobility_pipeline | 0 | 12766750 | #!/usr/bin/env python3
"""Tool for plotting the Voronoi tessellation described by the provided data
Note that the seeds of the tessellation are based on the provided towers file,
not computed from the cells. The tool also prints to the console the number
of towers and number of cells. Towers without an associated cell are shown in
green, while other towers are shown in red.
"""
from matplotlib import pyplot as plt # type: ignore
from shapely.geometry import MultiPolygon # type: ignore
from descartes import PolygonPatch # type: ignore
from data_interface import(
load_voronoi_cells,
load_towers,
VORONOI_PATH,
TOWERS_PATH,
)
def plot_polygon(axes: plt.axes, polygon: MultiPolygon) -> None:
"""Add a polygon to an axes
Args:
axes: The axes to add the polygon to
polygon: The polygon to add
Returns:
None
"""
patch = PolygonPatch(polygon, facecolor=[0, 0, 0.5], edgecolor=[0, 0, 0],
alpha=0.5)
axes.add_patch(patch)
if __name__ == '__main__':
# pragma pylint: disable=invalid-name
cells = load_voronoi_cells(VORONOI_PATH)
towers = load_towers(TOWERS_PATH)
print('Number of Cells: ', len(cells), 'Number of Towers: ', len(towers))
# Learned how to plot from:
# https://chrishavlin.com/2016/11/28/shapefiles-in-python-polygons/
plt.ioff()
fig = plt.figure()
# (left, bottom, width, height) in units of fractions of figure dimensions
ax = fig.add_axes((0.1, 0.1, 0.9, 0.9))
ax.set_aspect(1)
no_coor_indices = []
for i, cell in enumerate(cells):
plot_polygon(ax, cell)
if cell.area == 0:
no_coor_indices.append(i)
for i, (lat, lng) in enumerate(towers):
color = 'red'
if i in no_coor_indices:
color = 'green'
ax.plot(lat, lng, color=color, marker='o', markersize=2, alpha=0.5)
# Showed how to auto-resize axes: https://stackoverflow.com/a/11039268
ax.relim()
ax.autoscale_view()
plt.show()
| 3.453125 | 3 |