max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
uvicorn/protocols/utils.py
|
immerrr/uvicorn
| 0
|
12780651
|
import asyncio
import time
import urllib.parse
from typing import Optional, Tuple
from asgiref.typing import WWWScope
def get_remote_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]:
socket_info = transport.get_extra_info("socket")
if socket_info is not None:
try:
info = socket_info.getpeername()
return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None
except OSError:
# This case appears to inconsistently occur with uvloop
# bound to a unix domain socket.
return None
info = transport.get_extra_info("peername")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
return (str(info[0]), int(info[1]))
return None
def get_local_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]:
socket_info = transport.get_extra_info("socket")
if socket_info is not None:
info = socket_info.getsockname()
return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None
info = transport.get_extra_info("sockname")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
return (str(info[0]), int(info[1]))
return None
def is_ssl(transport: asyncio.Transport) -> bool:
return bool(transport.get_extra_info("sslcontext"))
def get_client_addr(scope: WWWScope) -> str:
client = scope.get("client")
if not client:
return ""
return "%s:%d" % client
def get_path_with_query_string(scope: WWWScope) -> str:
path_with_query_string = urllib.parse.quote(
scope.get("root_path", "") + scope["path"]
)
if scope["query_string"]:
path_with_query_string = "{}?{}".format(
path_with_query_string, scope["query_string"].decode("ascii")
)
return path_with_query_string
class RequestResponseTiming:
# XXX: switch to "time.perf_counter" because apparently on windows
# time.monotonis is using GetTickCount64 which has ~15ms resolution (it
# caused problems in tests on windows)
#
# ref: https://github.com/python-trio/trio/issues/33#issue-202432431
def __init__(self) -> None:
self._request_start_time: Optional[float] = None
self._request_end_time: Optional[float] = None
self._response_start_time: Optional[float] = None
self._response_end_time: Optional[float] = None
def request_started(self) -> None:
self._request_start_time = time.monotonic()
@property
def request_start_time(self) -> float:
if self._request_start_time is None:
raise ValueError("request_started() was not called")
return self._request_start_time
def request_ended(self) -> None:
self._request_end_time = time.monotonic()
@property
def request_end_time(self) -> float:
if self._request_end_time is None:
raise ValueError("request_ended() was not called")
return self._request_end_time
def response_started(self) -> None:
self._response_start_time = time.monotonic()
@property
def response_start_time(self) -> float:
if self._response_start_time is None:
raise ValueError("response_started() was not called")
return self._response_start_time
def response_ended(self) -> None:
self._response_end_time = time.monotonic()
@property
def response_end_time(self) -> float:
if self._response_end_time is None:
raise ValueError("response_ended() was not called")
return self._response_end_time
def request_duration_seconds(self) -> float:
return self.request_end_time - self.request_start_time
def response_duration_seconds(self) -> float:
return self.response_end_time - self.response_start_time
def total_duration_seconds(self) -> float:
return self.response_end_time - self.request_start_time
| 2.5625
| 3
|
UTDVN_backend/UTDVN_database/apps.py
|
NewLuminous/UTDVN
| 1
|
12780652
|
<filename>UTDVN_backend/UTDVN_database/apps.py<gh_stars>1-10
from django.apps import AppConfig
class UtdvnDatabaseConfig(AppConfig):
name = 'UTDVN_database'
| 1.21875
| 1
|
hardhat/recipes/libsigc++.py
|
stangelandcl/hardhat
| 0
|
12780653
|
<filename>hardhat/recipes/libsigc++.py
from .base import GnuRecipe
class LibSigCppRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(LibSigCppRecipe, self).__init__(*args, **kwargs)
self.sha256 = '774980d027c52947cb9ee4fac6ffe2ca' \
'60cc2f753068a89dfd281c83dbff9651'
self.name = 'libsigc++'
self.version = '2.8.0'
short_version = '.'.join(self.version.split('.')[:2])
self.url = 'http://ftp.gnome.org/pub/gnome/sources/$name/' \
'%s/$name-$version.tar.xz' % short_version
| 1.84375
| 2
|
src/controller/dbcontroller.py
|
gusilva/sdr
| 0
|
12780654
|
<gh_stars>0
from src.model.model import Base, Settings, Report, FolderTracking
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from datetime import datetime
class Controller(object):
"""
Controller class to interact with db model data.
Methods
-------
getActiveProductSettings()
retrive the product settings from settings table model.
save(productid, pam_source_count, pam_destination_count)
save report data to daily report db table.
"""
def __init__(self, dbengine: create_engine) -> None:
"""
Parameters
----------
dbengine : create_engine
create engine object from sqlalchemy.
"""
Base.metadata.bind = dbengine
DBSession = sessionmaker(bind=dbengine)
self.session = DBSession()
def getActiveProductSettings(self) -> None:
"""Get active settings from db settings table.
Returns
-------
list
all active settings.
"""
products = self.session.query(Settings).filter_by(status=1).all()
return [data.serialize for data in products]
def save(
self, productid: int, pam_source_count: int, pam_destination_count: int
) -> None:
"""Save daily report to report table.
Parameters
----------
productid : int
product setting id.
pam_source_count: int
production asset management source count.
pam_destination_count: int
production asset management destination count.
"""
dt = datetime.now()
data = {
"settings_id": productid,
"pam_source_count": pam_source_count,
"pam_destination_count": pam_destination_count,
"report_date": dt,
}
report = Report(**data)
self.session.add(report)
self.session.commit()
def saveFolders(
self,
productid: int,
pam_source_missing_count: int,
pam_destination_missing_count: int,
) -> None:
"""Save tracking folders count to foldertracking table.
Parameters
----------
productid : int
product setting id.
pam_source_missing_count: int
production asset management source missing count.
pam_destination_missing_count: int
production asset management destination missing count.
"""
dt = datetime.now()
data = {
"settings_id": productid,
"pam_source_missing_count": pam_source_missing_count,
"pam_destination_missing_count": pam_destination_missing_count,
"created_date": dt,
}
tracking = FolderTracking(**data)
self.session.add(tracking)
self.session.commit()
| 2.328125
| 2
|
app.py
|
skullcandy69/Minor-Project
| 0
|
12780655
|
<filename>app.py<gh_stars>0
import time
from absl import app, logging
import cv2
import numpy as np
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset
from yolov3_tf2.utils import draw_outputs
from flask import Flask, request, Response, jsonify, send_from_directory, abort
import os
####################################
# USAGE
# python test_handwriting.py --model handwriting.model --image images/umbc_address.png
# import the necessary packages
from tensorflow.keras.models import load_model
from imutils.contours import sort_contours
import numpy as np
import argparse
import imutils
import cv2
################################
import math
import operator # for sorting li
def roundup(x): # round to nearest 10
return int(math.ceil(x / 10.0)) * 10
#################################
# load the handwriting OCR model
print("[INFO] loading handwriting OCR model...")
model = load_model("C:/repos/env/Text-Detection/handwriting.model")
######################################################
# customize your API through the following parameters
classes_path = './data/labels/coco.names'
weights_path = './weights/yolov3.tf'
tiny = False # set to True if using a Yolov3 Tiny model
size = 416 # size images are resized to for model
output_path = './detections/' # path to output folder where images with detections are saved
num_classes = 7 # number of classes in model
# load in weights and classes
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
if tiny:
yolo = YoloV3Tiny(classes=num_classes)
else:
yolo = YoloV3(classes=num_classes)
yolo.load_weights(weights_path).expect_partial()
print('weights loaded')
class_names = [c.strip() for c in open(classes_path).readlines()]
print('classes loaded')
# Initialize Flask application
app = Flask(__name__)
class finalList:
def __init__(self, name, x, y):
self.name = name
self.x = x
self.y = y
# Initialize Flask application
app = Flask(__name__)
@app.route('/', methods=['GET'])
def hello_world():
return '''<html>
<body>
<p>Click on the "Choose File" button to upload a file:</p>
<form action="/detections" method = "POST"
enctype = "multipart/form-data">
<input type="file" id="images" name="images">
<input type="submit" value="Upload Image" name="submit">
</form>
</body>
</html>'''
# API that returns JSON with classes found in images
@app.route('/detections', methods=['POST'])
def get_detections():
raw_images = []
images = request.files.getlist("images")
image_names = []
for image in images:
image_name = image.filename
image_names.append(image_name)
image.save(os.path.join(os.getcwd(), image_name))
img_raw = tf.image.decode_image(
open(image_name, 'rb').read(), channels=3)
raw_images.append(img_raw)
num = 0
# create list for final response
response = []
li = []
for j in range(len(raw_images)):
# create list of responses for current image
responses = []
raw_img = raw_images[j]
num += 1
img = tf.expand_dims(raw_img, 0)
img = transform_images(img, size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
print('time: {}'.format(t2 - t1))
# print("**",scores)
print('detections:')
for i in range(nums[0]):
# if np.array(scores[0][i])*100>30:
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
responses.append({
"class": class_names[int(classes[0][i])],
"confidence": float("{0:.2f}".format(np.array(scores[0][i]) * 100)),
"co ordinates": str("{}".format((np.array(boxes[0][i]))))
})
# print(tuple(np.array(boxes[0][i])))
# img = Image.open("C:\\Repos\\object-Detection-API\\detections\\detection.jpg")
# a,b = img.size
# print("*****")
# print(a,b)
x, y, z, h = np.array(boxes[0][i])
p = finalList(class_names[int(classes[0][i])], x, y)
li.append(p)
# print(x,y,z,h)
# crop = img.crop((x*a,y*b,z*a,h*b))
# crop.show()
response.append({
"image": image_names[j],
"detections": responses
})
# note the tuple
img = cv2.cvtColor(raw_img.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(output_path + 'detection' + '.jpg', img)
print('output saved to: {}'.format(output_path + 'detection' + str(num) + '.jpg'))
st = """
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width" />
<title>HTML Result</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.1/css/bootstrap.min.css"
integrity="<KEY>" crossorigin="anonymous">
</head>
<body>
<div class="container body-content">"""
en = """
</div>
</body>
</html>
"""
inputf = """
<div class="row justify-content-start" style="padding-top:10px;">
<label>Demo Text: </label>
</div>
<div class="row justify-content-center" style="padding-top:10px;">
<input class="form-control"></input>
</div>"""
button = """
<div class="col" style="padding-top:10px;">
<button class="btn btn-primary">Submit</button>
</div>"""
img = """
<img src="C:/repos/env/Object-Detection-API/img.png" width="150" height="150" alt="Image Here">"""
radio = """
<div class="col" style="padding-top:10px;">
<input type="radio" id="male" name="Demo text" value="male">
<label for="male">Demo Text</label><br>
</div>
"""
dropdown = """
<div class="dropdown">
<label for="cars">Dropdown:</label>
<select name="cars" id="cars" class="btn btn-primary dropdown-toggle">
<option value="1">Option 1</option>
<option value="2">Option 2</option>
<option value="3">Option 3</option>
<option value="4">Option 4</option>
</select>
</div>"""
checkbox = """
<div class="col" style="padding-top:10px;">
<input type="checkbox" id="vehicle1" name="vehicle1" value="Bike">
<label for="vehicle1"> I have a bike</label><br>
</div>
"""
text = """<div class="col" style="padding-top:10px;"> <p class="text-black-50"> You’ve probably heard of
Lorem Ipsum before – it’s the most-used dummy text excerpt out there. People use it because it has a fairly
normal distribution of letters and words (making it look like normal English), but it’s also Latin,
which means your average reader won’t get distracted by trying to read it. </p> </div> """
sorted_li = sorted(li, key=operator.attrgetter('y'))
# print("###########################")
# for m in sorted_li:
# print(m.name, m.y)
#
# print("###########################")
for i in sorted_li:
if i.name == "check box":
st += checkbox
elif i.name == "radio button":
st += radio
elif i.name == "dropdown":
st += dropdown
elif i.name == "input":
st += inputf
elif i.name == "submit":
st += button
elif i.name == "text":
st += text
else:
st += img
print(i.name, i.x, i.y)
print(st + en)
f = open("demofile3.html", "w")
f.write(st + en)
f.close()
# remove temporary images
for name in image_names:
os.remove(name)
try:
return jsonify({"response": response}), 200
except FileNotFoundError:
abort(404)
# API that returns image with detections on it
# @app.route('/image', methods= ['POST'])
# def get_image():
# image = request.files["images"]
# image_name = image.filename
# image.save(os.path.join(os.getcwd(), image_name))
# img_raw = tf.image.decode_image(
# open(image_name, 'rb').read(), channels=3)
# img = tf.expand_dims(img_raw, 0)
# img = transform_images(img, size)
# t1 = time.time()
# boxes, scores, classes, nums = yolo(img)
# t2 = time.time()
# print('time: {}'.format(t2 - t1))
# print('detections:')
# for i in range(nums[0]):
# print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
# np.array(scores[0][i]),
# np.array(boxes[0][i])))
# img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
# img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
# cv2.imwrite(output_path + 'detection.jpg', img)
# print('output saved to: {}'.format(output_path + 'detection.jpg'))
# # prepare image for response
# _, img_encoded = cv2.imencode('.png', img)
# response = img_encoded.tostring()
# #remove temporary image
# os.remove(image_name)
# try:
# return Response(response=response, status=200, mimetype='image/png')
# except FileNotFoundError:
# abort(404)
# API that returns image with detections on it
@app.route('/image', methods=['POST'])
def get_image():
image = request.files["images"]
# print("######### IMG", image)
image_name = image.filename
image.save(os.path.join(os.getcwd(), image_name))
img_raw = tf.image.decode_image(
open(image_name, 'rb').read(), channels=3)
img = tf.expand_dims(img_raw, 0)
img = transform_images(img, size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
print('time: {}'.format(t2 - t1))
print('detections:')
for i in range(nums[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(output_path + 'detection.jpg', img)
print('output saved to: {}'.format(output_path + 'detection.jpg'))
# prepare image for response
_, img_encoded = cv2.imencode('.png', img)
response = img_encoded.tostring()
######################################################################
image_path = os.path.join(os.getcwd(), 'detections/detection.jpg')
image = cv2.imread(image_path)
print(image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# perform edge detection, find contours in the edge map, and sort the
# resulting contours from left-to-right
edged = cv2.Canny(blurred, 30, 150)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sort_contours(cnts, method="left-to-right")[0]
# initialize the list of contour bounding boxes and associated
# characters that we'll be OCR'ing
chars = []
# loop over the contours
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# filter out bounding boxes, ensuring they are neither too small
# nor too large
if (w >= 5 and w <= 150) and (h >= 15 and h <= 120):
# extract the character and threshold it to make the character
# appear as white (foreground) on a black background, then
# grab the width and height of the thresholded image
roi = gray[y:y + h, x:x + w]
thresh = cv2.threshold(roi, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
(tH, tW) = thresh.shape
# if the width is greater than the height, resize along the
# width dimension
if tW > tH:
thresh = imutils.resize(thresh, width=32)
# otherwise, resize along the height
else:
thresh = imutils.resize(thresh, height=32)
# re-grab the image dimensions (now that its been resized)
# and then determine how much we need to pad the width and
# height such that our image will be 32x32
(tH, tW) = thresh.shape
dX = int(max(0, 32 - tW) / 2.0)
dY = int(max(0, 32 - tH) / 2.0)
# pad the image and force 32x32 dimensions
padded = cv2.copyMakeBorder(thresh, top=dY, bottom=dY,
left=dX, right=dX, borderType=cv2.BORDER_CONSTANT,
value=(0, 0, 0))
padded = cv2.resize(padded, (32, 32))
# prepare the padded image for classification via our
# handwriting OCR model
padded = padded.astype("float32") / 255.0
padded = np.expand_dims(padded, axis=-1)
# update our list of characters that will be OCR'd
chars.append((padded, (x, y, w, h)))
# extract the bounding box locations and padded characters
boxes = [b[1] for b in chars]
chars = np.array([c[0] for c in chars], dtype="float32")
# OCR the characters using our handwriting recognition model
preds = model.predict(chars)
# define the list of label names
labelNames = "0123456789"
labelNames += "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
labelNames = [l for l in labelNames]
lst = []
# loop over the predictions and bounding box locations together
for (pred, (x, y, w, h)) in zip(preds, boxes):
# find the index of the label with the largest corresponding
# probability, then extract the probability and label
i = np.argmax(pred)
prob = pred[i]
label = labelNames[i]
lst.append(label + ":" + str(prob))
# draw the prediction on the image
print("[INFO] {} - {:.2f}%".format(label, prob * 100))
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(image, label, (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2)
# # show the image
# cv2.imshow("Image", image)
# cv2.waitKey(0)
# remove temporary image
os.remove(image_name)
cv2.imshow("Image", image)
cv2.waitKey(0)
try:
return jsonify({"response": lst}), 200
except FileNotFoundError:
abort(404)
if __name__ == '__main__':
app.run(debug=True, host = '0.0.0.0', port=5000)
| 2.328125
| 2
|
bin/ominoes/pentominoes-3x20-loop.py
|
tiwo/puzzler
| 0
|
12780656
|
#!/usr/bin/env python
# $Id$
"""2 solutions"""
import puzzler
from puzzler.puzzles.pentominoes import Pentominoes3x20Loop
puzzler.run(Pentominoes3x20Loop)
| 1.171875
| 1
|
LeetCode/198-house-robber.py
|
leaving-voider/LeetCode.cn-Record
| 0
|
12780657
|
###############################################################################################
# 直接参考官方方法,经典题型——打家劫舍,采用动态规划
###########
# 时间复杂度:O(n),n为数组长度
# 空间复杂度:O(1),采用滚动数组,复杂度减少到最低
###############################################################################################
class Solution:
def rob(self, nums: List[int]) -> int:
len_ = len(nums)
if len_ == 0:
return 0
elif len_ == 1:
return nums[0]
first = nums[0] # 代表前n-2个最大收益
second = max(nums[0], nums[1]) # 代表n-1即上一个之前最大收益(包括上一个)
for i in range(2, len_):
# first + nums[i]代表抢,second代表不抢
first, second = second, max((first + nums[i]), second)
return second
| 3.21875
| 3
|
MouthMusicModel.py
|
ahpalmerUNR/mouthMusicStreamer
| 0
|
12780658
|
# -*- coding: utf-8 -*-
# @Author: ahpalmerUNR
# @Date: 2020-12-21 14:38:59
# @Last Modified by: ahpalmerUNR
# @Last Modified time: 2021-05-06 23:10:20
import torch
class MouthMusicMouthModel(torch.nn.Module):
def __init__(self):
super(MouthMusicMouthModel,self).__init__()
self.conv1 = torch.nn.Conv2d(3,62,7,stride=2,padding=3,bias=False)
self.batch1 = torch.nn.BatchNorm2d(62)
self.relu = torch.nn.ReLU(inplace=True)
self.maxPool1 = torch.nn.MaxPool2d(3,2,1,1)
self.conv2 = torch.nn.Conv2d(62,120,7,stride=2,padding=3,bias=False)
self.batch2 = torch.nn.BatchNorm2d(120)
self.conv3 = torch.nn.Conv2d(120,120,7,stride=2,padding=3,bias=False)
self.batch3 = torch.nn.BatchNorm2d(120)
self.maxPool2 = torch.nn.MaxPool2d(3,1,1,1)
self.conv4 = torch.nn.Conv2d(120,120,7,stride=1,padding=3,bias=False)
self.batch4 = torch.nn.BatchNorm2d(120)
self.conv5 = torch.nn.Conv2d(120,120,7,stride=1,padding=3,bias=False)
self.batch5 = torch.nn.BatchNorm2d(120)
self.conv6 = torch.nn.Conv2d(120,30,7,stride=2,padding=3,bias=False)
self.conv7 = torch.nn.Conv2d(30,12,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
out = self.relu(self.batch1(self.conv1(x)))
out = self.maxPool1(out)
out = self.relu(self.batch2(self.conv2(out)))
out = self.batch3(self.conv3(out))
out = self.maxPool2(out)
out = self.relu(self.batch4(self.conv4(out)))
out = self.relu(self.batch5(self.conv5(out)))
out = self.relu(self.conv6(out))
out = self.sigmoid(self.conv7(out))
return out
class MouthMusicEyeModel(torch.nn.Module):
def __init__(self):
super(MouthMusicEyeModel,self).__init__()
self.conv1 = torch.nn.Conv2d(3,64,7,stride=2,padding=3,bias=False)
self.batch1 = torch.nn.BatchNorm2d(64)
self.relu = torch.nn.ReLU(inplace=True)
self.maxPool1 = torch.nn.MaxPool2d(3,2,1,1)
self.conv2 = torch.nn.Conv2d(64,120,7,stride=2,padding=3,bias=False)
self.batch2 = torch.nn.BatchNorm2d(120)
self.conv3 = torch.nn.Conv2d(120,120,7,stride=2,padding=3,bias=False)
self.batch3 = torch.nn.BatchNorm2d(120)
self.maxPool2 = torch.nn.MaxPool2d(3,1,1,1)
self.conv7 = torch.nn.Conv2d(120,16,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
out = self.relu(self.batch1(self.conv1(x)))
out = self.maxPool1(out)
out = self.relu(self.batch2(self.conv2(out)))
out = self.batch3(self.conv3(out))
out = self.maxPool2(out)
out = self.sigmoid(self.conv7(out))
return out
def loadModelMouth(directory,nameRoot):
modelMouth = MouthMusicMouthModel()
modelMouth.load_state_dict(torch.load(directory+"mouth_"+nameRoot, map_location="cpu"))
return modelMouth
def loadModelEyes(directory,nameRoot):
modelEyes = MouthMusicEyeModel()
modelEyes.load_state_dict(torch.load(directory+"eye_"+nameRoot, map_location="cpu"))
return modelEyes
def loadModel(directory,nameRoot):
eyeModel = loadModelEyes(directory,nameRoot)
mouthModel = loadModelMouth(directory,nameRoot)
return mouthModel,eyeModel
| 2.46875
| 2
|
lime/util/passwords_test.py
|
toastwaffle/LiME
| 0
|
12780659
|
<gh_stars>0
"""Tests for the password descriptor.
Relies on the descriptor existing on the user model.
"""
from absl.testing import absltest
from passlib import context
from lime import app
from lime.database import models
from lime.util import testing
class PasswordsTest(absltest.TestCase):
"""Tests for the password descriptor."""
def test_check(self):
"""A password can be checked against a known hash."""
with testing.test_setup():
user = models.User(password_hash=(
<PASSWORD>'))
self.assertFalse(user.password == '<PASSWORD>')
self.assertTrue(user.password == 'password')
def test_set_and_check(self):
"""A password can be set and checked against."""
with testing.test_setup():
user = models.User()
self.assertEqual(user.password_hash, None)
user.password = 'password'
self.assertNotEmpty(user.password_hash)
self.assertTrue(user.password == 'password')
def test_update(self):
"""The password hash is updated when it uses an old hash algorithm."""
with testing.test_setup():
app.APP.config['PASSLIB_CONTEXT'] = context.CryptContext(
schemes=['bcrypt', 'md5_crypt'], deprecated='auto')
md5_hash = '$1$7BYN8u/S$OQOqfmrz8B6vSJn0nApt/.'
user = models.User(password_hash=md5_hash)
self.assertTrue(user.password == 'password')
self.assertNotEqual(user.password_hash, md5_hash)
if __name__ == '__main__':
absltest.main()
| 2.703125
| 3
|
sequence/sea_level.py
|
sequence-dev/sequence
| 1
|
12780660
|
<gh_stars>1-10
import numpy as np
from landlab import Component
from scipy import interpolate
class SeaLevelTimeSeries(Component):
_name = "Sea Level Changer"
_time_units = "y"
_unit_agnostic = True
_info = {
"sea_level__elevation": {
"dtype": "float",
"intent": "out",
"optional": False,
"units": "m",
"mapping": "grid",
"doc": "Sea level elevation",
}
}
def __init__(self, grid, filepath, kind="linear", start=0.0, **kwds):
"""Generate sea level values.
Parameters
----------
grid: ModelGrid
A landlab grid.
filepath: str
Name of csv-formatted sea-level file.
kind: str, optional
Kind of interpolation as a string (one of 'linear',
'nearest', 'zero', 'slinear', 'quadratic', 'cubic').
Default is 'linear'.
"""
super(SeaLevelTimeSeries, self).__init__(grid, **kwds)
self._filepath = filepath
self._kind = kind
self._sea_level = SeaLevelTimeSeries._sea_level_interpolator(
np.loadtxt(self._filepath, delimiter=","), kind=self._kind
)
self._time = start
@staticmethod
def _sea_level_interpolator(data, kind="linear"):
return interpolate.interp1d(
data[:, 0],
data[:, 1],
kind=kind,
copy=True,
assume_sorted=True,
bounds_error=True,
)
@property
def filepath(self):
return self._filepath
@filepath.setter
def filepath(self, new_path):
self._filepath = new_path
self._sea_level = SeaLevelTimeSeries._sea_level_interpolator(
np.loadtxt(self._filepath, delimiter=","), kind=self._kind
)
@property
def time(self):
return self._time
def run_one_step(self, dt):
self._time += dt
self.grid.at_grid["sea_level__elevation"] = self._sea_level(self.time)
class SinusoidalSeaLevel(SeaLevelTimeSeries):
def __init__(
self,
grid,
wave_length=1.0,
amplitude=1.0,
phase=0.0,
mean=0.0,
start=0.0,
linear=0.0,
**kwds
):
"""Generate sea level values.
Parameters
----------
grid: ModelGrid
A landlab grid.
"""
wave_length /= 2.0 * np.pi
super(SeaLevelTimeSeries, self).__init__(grid, **kwds)
self._sea_level = (
lambda time: (
np.sin((time - phase) / wave_length)
+ 0.3 * np.sin((2 * (time - phase)) / wave_length)
)
* amplitude
+ mean
+ linear * time
)
self._time = start
def sea_level_type(dictionary):
from .sea_level import sea_level_file
sl_type = dictionary["sl_type"]
if sl_type == "sinusoid":
return sea_level_function(dictionary)
else:
sl_file_name = dictionary["sl_file_name"]
return sea_level_file(sl_file_name, dictionary)
def sea_level_function(dictionary):
# time is an array of x values (ex. arange(0,1000, pi/4))
# amplitude is the amplitude of the sin function
# phase is th phase shift
# t is the title of the graph (String)
# xt is the title of the x axis (string)
# xy is the title of the y axis (STRING)
# Function starts at 0 or P.
# Fs is the period of the function. (10,000)
phase = dictionary["sea_level_phase"]
amplitude = dictionary["sea_level_amplitude"]
slope = dictionary["sea_level_linear"]
Fs = dictionary["sea_level_period"]
start_time = dictionary["start_time"]
run_duration = dictionary["run_duration"]
dt = dictionary["dt"]
time = np.arange(start_time, start_time + run_duration, dt)
sl_array = (
amplitude
* (
np.sin((2 * np.pi * (phase + time)) / Fs)
+ 0.3 * np.sin((2 * np.pi * (2 * phase + 2 * time)) / Fs)
)
+ slope * time
)
return time, sl_array
def sea_level_file(filename, dictionary):
"""
reading in the file above
x is an array of x values (ex. x = arange(0, 10))
y is an array of y values (ex. y = np.exp(x/2.0))
start time (default should be 0)
dt
run duration
Note: The array of x values can be pretermined to a set of
values. Goes backwards so the start will be at -12500 years
There will be a sea level array that stores these values
"""
start_time = dictionary["start_time"]
run_duration = dictionary["run_duration"]
dt = dictionary["dt"]
xes = []
ys = []
with open(filename) as f:
for line in f:
x, y = line.split()
xes.append(x)
ys.append(y)
x = []
for item in xes:
x.append(float(item))
y = []
for item in ys:
y.append(float(item))
f = interpolate.interp1d(x, y, kind="cubic")
times = np.arange(start_time, start_time + run_duration, dt)
sl_array = f(times)
return times, sl_array
| 2.46875
| 2
|
src/speech/spectrogram_model.py
|
dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture
| 6
|
12780661
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
class SpectrogramModel(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size_cnn, stride_cnn, padding_cnn, kernel_size_pool, stride_pool, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(SpectrogramModel, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size_cnn = kernel_size_cnn
self.stride_cnn = stride_cnn
self.padding_cnn = padding_cnn
self.kernel_size_pool = kernel_size_pool
self.stride_pool = stride_pool
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
self.cnn1 = nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn).to(self.device)
self.batch1 = nn.BatchNorm2d(self.out_channels)
self.cnn2 = nn.Conv2d(self.out_channels, self.out_channels, self.kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn).to(self.device)
self.batch2 = nn.BatchNorm2d(self.out_channels)
self.cnn3 = nn.Conv2d(self.out_channels, self.out_channels*2, self.kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn).to(self.device)
self.batch3 = nn.BatchNorm2d(self.out_channels*2)
self.cnn4 = nn.Conv2d(self.out_channels*2, self.out_channels*2, self.kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn).to(self.device)
self.batch4 = nn.BatchNorm2d(self.out_channels*2)
self.relu = nn.ReLU()
self.max_pool1 = nn.MaxPool2d(self.kernel_size_pool//2, stride=self.stride_pool//2)
self.max_pool = nn.MaxPool2d(self.kernel_size_pool, stride=self.stride_pool)
self.max_pool4 = nn.MaxPool2d(int(self.kernel_size_pool*5/4), stride=int(self.stride_pool*5/4))
self.lstm = nn.LSTM(int(640/160) * int(480/160), self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_directions, self.num_labels).to(self.device)
def forward(self, input, target, train=True, multi_gpu=False):
input = input.to(self.device)
target = target.to(self.device)
out = self.cnn1(input)
#print(out.shape)
out = self.batch1(out)
#print(out.shape)
out = self.relu(out)
#print(out.shape)
out = self.max_pool1(out)
#print(out.shape)
out = self.cnn2(out)
#print(out.shape)
out = self.batch2(out)
#print(out.shape)
out = self.relu(out)
#print(out.shape)
out = self.max_pool(out)
#print(out.shape)
out = self.cnn3(out)
#print(out.shape)
out = self.batch3(out)
#print(out.shape)
out = self.relu(out)
#print(out.shape)
out = self.max_pool(out)
#print(out.shape)
out = self.cnn4(out)
#print(out.shape)
out = self.batch4(out)
#print(out.shape)
out = self.relu(out)
#print(out.shape)
out = self.max_pool4(out)
#print(out.shape)
#out = torch.flatten(out, start_dim=2, end_dim=3)
out = out.view(list(out.size())[0], list(out.size())[1], -1)
#pdb.set_trace()
out, hn = self.lstm(out)
# print(out.shape)
out = torch.mean(out, dim=1)
out = self.classification(out)
loss = F.cross_entropy(out, torch.max(target, 1)[1])
if multi_gpu:
out=torch.unsqueeze(out, dim=0)
loss=torch.unsqueeze(loss,dim=0)
return out, loss
| 2.4375
| 2
|
swann/viz/tfr.py
|
alexrockhill/SwannLabResources
| 0
|
12780662
|
import os
import numpy as np
import os.path as op
import matplotlib.pyplot as plt
from matplotlib.colors import BASE_COLORS, SymLogNorm
from scipy.stats import ttest_ind
from swann.preprocessing import get_info
from swann.utils import get_config, derivative_fname
from swann.analyses import decompose_tfr, find_bursts, get_bursts
from mne.viz import iter_topography
from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR
from mne import Epochs, EvokedArray
def plot_spectrogram(rawf, raw, event, events, bl_events,
method='raw', baseline='z-score',
freqs=np.logspace(np.log(4), np.log(250), 50, base=np.e),
n_cycles=7, use_fft=True, ncols=3, plot_erp=True,
plot_bursts=False, picks=None, verbose=True,
overwrite=False):
''' Plots a bar chart of beta bursts.
Parameters
----------
rawf : pybids.BIDSlayout file
The object containing the raw data.
raw : mne.io.Raw
The raw data object.
event : str
The name of the event (e.g. `Response`).
events : np.array(n_events, 3)
The events from mne.events_from_annotations or mne.find_events
corresponding to the event and trials that are described by the name.
bl_events: np.array(n_events, 3)
The events from mne.events_from_annotations or mne.find_events
corresponding to the baseline for the event and trials
that are described by the name.
method : `raw` | `phase-locked` | `non-phase-locked` | `total'
How to plot the spectrograms:
raw -- plot without averaging power (default)
phase-locked -- just average the event-related potential (ERP)
non-phase-locked -- subtract the ERP from each epoch, do time
frequency decomposition (TFR) then average
total -- do TFR on each epoch and then average
baseline : `z-score` | `gain`
How to baseline specrogram data:
z-score -- for each frequency, subtract the median and divide
by the standard deviation (default)
gain -- divide by median
freqs : np.array
The frequencies over which to compute the spectral data.
n_cycles : int, np.array
The number of cycles to use in the Morlet transform
use_fft : bool
Use Fast Fourier Transform see `mne.time_frequency.tfr.cwt`.
ncols : int
The number of ncols to use in the plot (for `method=raw`).
plot_erp : bool
Whether to plot the event-related potential on top.
plot_bursts : bool
Whether to include vertical bars for when the bursts are detected
(for `method=raw`).
picks : None | list of str
The names of the channels to plot
'''
config = get_config()
raw = raw.copy()
raw.load_data()
if method not in ('raw', 'phase-locked', 'non-phase-locked', 'total'):
raise ValueError('Unrecognized method {}'.format(method))
if picks is None:
picks = raw.ch_names
else:
if isinstance(picks, str):
picks = [picks]
raw = raw.pick_channels(picks)
if method == 'raw' and len(picks) > 1:
raise ValueError('Only one channel can be plotted at a time '
'for raw spectrograms')
plotf = derivative_fname(rawf, 'plots/spectrograms',
'event-{}_spectrogram_{}_{}_power'.format(
event, method, baseline),
config['fig'])
if op.isfile(plotf) and not overwrite:
print('Spectrogram plot for {} already exists, '
'use `overwrite=True` to replot'.format(event))
return
if method == 'raw' and plot_bursts:
bursts = find_bursts(rawf, return_saved=True)
if isinstance(n_cycles, np.ndarray) and len(freqs) != len(n_cycles):
raise ValueError('Mismatch lengths n_cycles {} to freqs {}'.format(
n_cycles, freqs))
epochs = Epochs(raw, events, tmin=config['tmin'] - 1, baseline=None,
tmax=config['tmax'] + 1, preload=True)
# align baseline events with epochs with enough events
bl_events = np.delete(bl_events, [i for i, e in enumerate(bl_events[:, 2])
if e not in epochs.events[:, 2]], axis=0)
bl_epochs = Epochs(raw, bl_events, tmin=config['baseline_tmin'] - 1,
baseline=None, tmax=config['baseline_tmax'] + 1,
preload=True)
cropped_epochs = epochs.copy().crop(tmin=config['tmin'],
tmax=config['tmax'])
cropped_bl_epochs = bl_epochs.copy().crop(
tmin=config['baseline_tmin'], tmax=config['baseline_tmax'])
if method == 'phase-locked':
bl_evoked = EvokedArray(np.median(bl_epochs._data, axis=0),
info=bl_epochs.info, tmin=bl_epochs.tmin,
nave=len(bl_epochs))
bl_evoked_tfr = tfr_morlet(bl_evoked, freqs, n_cycles=n_cycles,
use_fft=use_fft, return_itc=False)
bl_evoked_tfr.crop(tmin=config['baseline_tmin'],
tmax=config['baseline_tmax'])
evoked = EvokedArray(np.median(epochs._data, axis=0),
info=epochs.info, tmin=epochs.tmin,
nave=len(epochs))
evoked_tfr = tfr_morlet(evoked, freqs, n_cycles=n_cycles,
use_fft=use_fft, return_itc=False)
evoked_tfr.crop(tmin=config['tmin'], tmax=config['tmax'])
evoked_tfr.data = \
evoked_tfr.data - np.median(bl_evoked_tfr.data,
axis=2)[:, :, np.newaxis]
evoked_tfr.data /= np.std(bl_evoked_tfr.data, axis=2)[:, :, np.newaxis]
else:
if method == 'non-phase-locked':
epochs._data -= np.median(epochs._data, axis=0)
epochs_data = np.zeros((len(epochs), len(epochs.ch_names), len(freqs),
len(cropped_epochs.times)))
bl_epochs_data = np.zeros((len(bl_epochs), len(bl_epochs.ch_names),
len(freqs), len(cropped_bl_epochs.times)))
epochs_tfr = EpochsTFR(epochs.info, epochs_data, cropped_epochs.times,
freqs, verbose=False)
bl_epochs_tfr = EpochsTFR(bl_epochs.info, bl_epochs_data,
cropped_bl_epochs.times, freqs,
verbose=False)
if method != 'raw':
evoked_tfr_data = np.zeros((len(epochs.ch_names), len(freqs),
len(cropped_epochs.times)))
evoked_tfr = AverageTFR(epochs.info, evoked_tfr_data,
cropped_epochs.times, freqs,
nave=len(epochs))
for i, ch in enumerate(epochs.ch_names):
if verbose:
print('\nComputing TFR ({}/{}) for {}... '
'Computing frequency'.format(i, len(epochs.ch_names),
ch), end=' ', flush=True) # noqa
this_epochs = epochs.copy().pick_channels([ch])
this_bl_epochs = bl_epochs.copy().pick_channels([ch])
for j, freq in enumerate(freqs):
if verbose:
print('{:.2f}'.format(freq), end=' ', flush=True)
this_n_cycles = (n_cycles if isinstance(n_cycles, int) else
n_cycles[i])
this_bl_epochs_tfr = \
tfr_morlet(this_bl_epochs, [freq], n_cycles=this_n_cycles,
use_fft=use_fft, average=False,
return_itc=False, verbose=False)
this_bl_epochs_tfr = this_bl_epochs_tfr.crop(
tmin=config['baseline_tmin'], tmax=config['baseline_tmax'])
this_epochs_tfr = \
tfr_morlet(this_epochs, [freq], n_cycles=this_n_cycles,
use_fft=use_fft, average=False,
return_itc=False, verbose=False)
this_epochs_tfr = this_epochs_tfr.crop(
tmin=config['tmin'], tmax=config['tmax'])
full_data = np.concatenate([this_bl_epochs_tfr.data,
this_epochs_tfr.data], axis=3)
epochs_tfr.data[:, i:i + 1, j:j + 1, :] = this_epochs_tfr.data
epochs_tfr.data[:, i:i + 1, j:j + 1, :] -= \
np.median(full_data, axis=3)[:, :, :, np.newaxis]
epochs_tfr.data[:, i:i + 1, j:j + 1, :] /= \
np.std(full_data, axis=3)[:, :, :, np.newaxis]
bl_epochs_tfr.data[:, i:i + 1, j:j + 1, :] = \
this_bl_epochs_tfr.data
bl_epochs_tfr.data[:, i:i + 1, j:j + 1, :] -= \
np.median(full_data, axis=3)[:, :, :, np.newaxis]
bl_epochs_tfr.data[:, i:i + 1, j:j + 1, :] /= \
np.std(full_data, axis=3)[:, :, :, np.newaxis]
if method != 'raw':
this_evoked_tfr = np.median(epochs_tfr.data[:, i, j],
axis=0)
this_bl_evoked_tfr = np.median(bl_epochs_tfr.data[:, i, j],
axis=0)
evoked_tfr.data[i, j] = \
this_evoked_tfr - np.median(this_bl_evoked_tfr)
evoked_tfr.data[i, j] /= np.std(this_bl_evoked_tfr)
if method == 'raw':
ch_name = epochs_tfr.ch_names[0]
vmin, vmax = np.min(epochs_tfr.data), np.max(epochs_tfr.data)
emin, emax = np.min(cropped_epochs._data), np.max(cropped_epochs._data)
if verbose:
print('Plotting spectrogram for channel {}'.format(ch_name))
if plot_bursts:
n_bursts = len(bursts[bursts['channel'] == ch_name])
print('{} bursts for this channel total'.format(n_bursts))
nrows = int(np.ceil(len(events) / ncols))
fig, axes = plt.subplots(nrows, ncols)
fig.set_size_inches(ncols, nrows)
axes = axes.flatten()
for j, this_tfr in enumerate(epochs_tfr):
evoked_data = (cropped_epochs._data[j, 0], emin, emax)
cmap = _plot_spectrogram(
axes[j], this_tfr[i], epochs_tfr.times,
vmin, vmax, freqs, evoked_data,
show_xticks=j >= len(events) - ncols,
show_yticks=j % ncols == 0,
show_ylabel=j == int(nrows / 2) * ncols)
if plot_bursts:
_plot_bursts(config, events, raw, bursts, j, axes, ch_name)
for ax in axes[len(epochs_tfr):]:
ax.axis('off')
else:
if plot_erp:
evoked_data = np.median(cropped_epochs._data, axis=0)
evoked_data -= np.median(evoked_data, axis=1)[:, np.newaxis]
evoked = EvokedArray(evoked_data, info=epochs.info,
tmin=epochs.tmin, nave=len(epochs))
emin, emax = np.min(evoked.data), np.max(evoked.data)
vmin, vmax = np.min(evoked_tfr.data), np.max(evoked_tfr.data)
if raw.info['dig'] is None:
nrows = int(len(raw.ch_names) ** 0.5)
ncols = int(len(raw.ch_names) / nrows) + 1
fig, axes = plt.subplots(nrows, ncols)
fig.set_size_inches(12, 8)
axes = axes.flatten()
for idx, ax in enumerate(axes):
if idx < len(picks):
cmap = _plot_spectrogram(
ax, evoked_tfr.data[idx], evoked_tfr.times,
vmin, vmax, freqs, ((evoked.data[idx], emin, emax) if
plot_erp else None),
show_xticks=idx >= len(picks) - ncols,
show_yticks=idx % ncols == 0,
show_ylabel=idx == int(nrows / 2) * ncols)
ax.set_title(raw.ch_names[idx])
else:
ax.axis('off')
else:
for ax, idx in iter_topography(raw.info, fig_facecolor='white',
axis_facecolor='white',
axis_spinecolor='white'):
cmap = _plot_spectrogram(
ax, this_tfr, evoked_tfr.times, vmin, vmax, freqs,
((evoked.data[idx], emin, emax) if plot_erp else None))
fig.subplots_adjust(right=0.85, hspace=0.3)
cax = fig.add_subplot(position=[0.87, 0.1, 0.05, 0.8])
cax = fig.colorbar(cmap, cax=cax, format='{:.2f}',
ticks=[vmin, vmin / 10, vmin / 100,
vmax / 100, vmax / 10, vmax])
cax.set_label(('Log {} Power {} Normalized'.format(method, baseline)
).title())
fig.suptitle('Time Frequency Decomposition for the {} '
'Event, {} Power'.format(event, baseline.capitalize()))
fig.savefig(plotf, dpi=300)
plt.close(fig)
def _plot_spectrogram(ax, this_tfr, times, vmin, vmax,
freqs, evoked_data, show_yticks=True,
show_ylabel=True, show_xticks=True):
'''Plot a single spectrogram'''
cmap = ax.imshow(this_tfr, cmap='RdYlBu_r', aspect='auto',
extent=[0, this_tfr.shape[1], 0, this_tfr.shape[0]],
norm=SymLogNorm(linthresh=(vmax - vmin) / 100,
vmin=vmin, vmax=vmax))
if evoked_data is not None:
evoked, emin, emax = evoked_data
ax2 = ax.twinx()
ax2.set_yticks([])
ax2.plot(range(this_tfr.shape[1]), evoked, alpha=0.25, color='k')
ax2.set_ylim([emin, emax])
ax.invert_yaxis()
if show_yticks:
ax.set_yticks(np.linspace(0, len(freqs), 5))
ax.set_yticklabels(['{:.2f}'.format(f) for f in
freqs[::-int(len(freqs) / 5)]])
else:
ax.set_yticklabels([])
if show_ylabel:
ax.set_ylabel('Frequency (Hz)')
ax.axvline(np.where(times == 0)[0][0], color='k')
if show_xticks:
ax.set_xlabel('Time (s)')
ax.set_xticks(np.linspace(0, len(times), 3))
ax.set_xticklabels(['{:.1f}'.format(t) for t in
np.linspace(times[0], times[-1], 3)])
else:
ax.set_xticks([])
return cmap
def _plot_bursts(config, events, raw, bursts, j, axes, ch_name):
'''Plot bursts on a single spectrogram'''
min_idx = events[j, 0] + raw.info['sfreq'] * config['tmin']
max_idx = events[j, 0] + raw.info['sfreq'] * config['tmax']
these_bursts = bursts[(bursts['channel'] == ch_name) &
(bursts['burst_end'] > min_idx) &
(bursts['burst_start'] < max_idx)]
if these_bursts.size > 0:
for burst_idx in these_bursts.index:
for start_stop in ['burst_start', 'burst_end']:
if (max_idx > these_bursts.loc[burst_idx,
start_stop] >
min_idx):
axes[j].axvline(
x=these_bursts.loc[burst_idx,
start_stop] - min_idx,
color='green')
| 2.3125
| 2
|
cinderclient/v3/services.py
|
wmore/openstack_study
| 0
|
12780663
|
<gh_stars>0
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
service interface
"""
from cinderclient import api_versions
from cinderclient.v2 import services
Service = services.Service
class ServiceManager(services.ServiceManager):
@api_versions.wraps("3.0")
def server_api_version(self):
"""Returns the API Version supported by the server.
:return: Returns response obj for a server that supports microversions.
Returns an empty list for Liberty and prior Cinder servers.
"""
try:
return self._get_with_base_url("", response_key='versions')
except LookupError:
return []
| 1.820313
| 2
|
nurbspy/nurbs_surface_bilinear.py
|
dragonbook/nurbspy
| 25
|
12780664
|
# -------------------------------------------------------------------------------------------------------------------- #
# Import packages
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
from .nurbs_surface import NurbsSurface
# -------------------------------------------------------------------------------------------------------------------- #
# Define the bilinear NURBS surface class
# -------------------------------------------------------------------------------------------------------------------- #
class NurbsSurfaceBilinear:
""" Create a NURBS representation of the bilinear patch defined by corners P00, P01, P10, and P11
Create a NURBS representation of the bilinear patch
S(u,v) = (1-v)*[(1-u)*P00 + u*P01] + v*[(1-u)*P10 + u*P11]
Note that a bilinear patch is a ruled surface with segments (P00, P01) and (P10, P11) as generating curves
S(u,v) = (1-v)*C1(u) + v*C2(u)
C1(u) = (1-u)*P00 + u*P01
C2(u) = (1-u)*P10 + u*P11
Parameters
----------
P00, P01, P10, P11 : ndarrays with shape (ndim,)
Coordinates of the corner points defining the bilinear surface (ndim=3)
References
----------
The NURBS book. Chapter 8.2
<NAME> and <NAME>
Springer, second edition
"""
def __init__(self, P00, P01, P10, P11):
# Declare input variables as instance variables
self.P00 = P00
self.P01 = P01
self.P10 = P10
self.P11 = P11
self.ndim = 3
# Check the number of dimensions of the problem
ndims = [np.shape(P00)[0], np.shape(P01)[0], np.shape(P10)[0], np.shape(P11)[0]]
if any([ndim != 3 for ndim in ndims]):
raise Exception("The input points must be three-dimensional")
# Make the bilinear patch NURBS representation
self.NurbsSurface = None
self.make_nurbs_surface()
def make_nurbs_surface(self):
""" Make a NURBS surface representation of the bilinear surface """
# Define the array of control points
n_dim, n, m = self.ndim, 2, 2
P = np.zeros((n_dim, n, m))
P[:, 0, 0] = self.P00
P[:, 1, 0] = self.P01
P[:, 0, 1] = self.P10
P[:, 1, 1] = self.P11
# Create the NURBS surface
self.NurbsSurface = NurbsSurface(control_points=P)
| 1.765625
| 2
|
tests/test_base.py
|
1xch/fliki
| 3
|
12780665
|
def test_base(app):
assert app.extensions['fliki'] is not None
def test_has_index(client):
r = client.get('/test/', follow_redirects=True)
assert b"Title: index\nSummary: base page for wiki" not in r.data
assert b"Welcome to your flask-wiki" in r.data
def test_new_page(client):
r = client.get('/test/test_page/', follow_redirects=True)
assert b"Edit Page Content" in r.data
def test_edit_page(client):
r = client.post('/test/save',
data=dict(pagekey='test_page', edit_content='a test_page'),
follow_redirects=True)
assert b"a test_page" in r.data
def test_display_page(client):
r = client.get('/test/random_page/', follow_redirects=True)
assert b"A random page" in r.data
def test_move_page(client):
r1 = client.post('/test/move',
data=dict(oldkey='random_page', newkey='new/random_page'),
follow_redirects=True)
r2 = client.get('/test/new/random_page/', follow_redirects=True)
r3 = client.get('/test/random_page/', follow_redirects=True)
assert b"A random page" in r1.data
assert r1.data == r2.data
assert b"A random page" not in r3.data
def test_delete_page(client):
r = client.post('/test/delete',
data=dict(delete='random_page'),
follow_redirects=True)
assert b"index" in r.data
| 2.4375
| 2
|
backpack/extensions/firstorder/batch_grad/batch_grad_base.py
|
jabader97/backpack
| 395
|
12780666
|
"""Calculates the batch_grad derivative."""
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, List, Tuple
from torch import Tensor
from torch.nn import Module
from backpack.core.derivatives.basederivatives import BaseParameterDerivatives
from backpack.extensions.firstorder.base import FirstOrderModuleExtension
from backpack.utils.subsampling import subsample
if TYPE_CHECKING:
from backpack.extensions.firstorder import BatchGrad
class BatchGradBase(FirstOrderModuleExtension):
"""Calculates the batch_grad derivative.
Passes the calls for the parameters to the derivatives class.
Implements functions with method names from params.
If child class wants to overwrite these methods
- for example to support an additional external module -
it can do so using the interface for parameter "param1"::
param1(ext, module, g_inp, g_out, bpQuantities):
return batch_grads
In this case, the method is not overwritten by this class.
"""
def __init__(
self, derivatives: BaseParameterDerivatives, params: List[str]
) -> None:
"""Initializes all methods.
If the param method has already been defined, it is left unchanged.
Args:
derivatives: Derivatives object used to apply parameter Jacobians.
params: List of parameter names.
"""
self._derivatives = derivatives
for param_str in params:
if not hasattr(self, param_str):
setattr(self, param_str, self._make_param_function(param_str))
super().__init__(params=params)
def _make_param_function(
self, param_str: str
) -> Callable[[BatchGrad, Module, Tuple[Tensor], Tuple[Tensor], None], Tensor]:
"""Creates a function that calculates batch_grad w.r.t. param.
Args:
param_str: Parameter name.
Returns:
Function that calculates batch_grad wrt param
"""
def param_function(
ext: BatchGrad,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
bpQuantities: None,
) -> Tensor:
"""Calculates batch_grad with the help of derivatives object.
Args:
ext: extension that is used
module: module that performed forward pass
g_inp: input gradient tensors
g_out: output gradient tensors
bpQuantities: additional quantities for second order
Returns:
Scaled individual gradients
"""
subsampling = ext.get_subsampling()
batch_axis = 0
return self._derivatives.param_mjp(
param_str,
module,
g_inp,
g_out,
subsample(g_out[0], dim=batch_axis, subsampling=subsampling),
sum_batch=False,
subsampling=subsampling,
)
return param_function
| 2.65625
| 3
|
mapping.py
|
tlfobe/LAMMPS-SIMR
| 0
|
12780667
|
#!/usr/bin/python
# Analysis scripts for mapping same phase energies to each other using signac
import signac
import numpy as np
import subprocess
import pdb
import os
import sys
import pandas
import mdtraj as md
import shutil
import yaml
#need to make yaml not just enumerate all ts and ps, but give other info like mapping type, nequil etc
project = signac.get_project()
with open('test.yaml') as fn:
schema = yaml.load(fn)
nenergies = 3000 # should be read from the files, not hard coded
nequil = 1000
nevery = 1
mapping = ['volume','harmonic'] # both harmonic and volume
integration = 'direct'
ensemble = 'NPT'
polys = schema['poly']
T = schema['T']
P = schema['p']
# probably don't need these Tstate Pstates...
Tstates = [ list(map(int, T)) for i in range(len(P))] #make an array of all states T,P
Tstates = np.array(Tstates).flatten() #flatten into np.arrays for use
Pstates = [ list(map(int, str(P[i]).split()*len(T))) for i in range(len(P))] #same thing here
Pstates = np.array(Pstates).flatten()
states = len(Tstates)
Lsizeold = dict()
Lsizenew = dict()
npoints = ()(nenergies-nequil)/nevery+1
for poly in polys:
u_kln = np.zeros([nstatesoldm nstatesnew, npoints], float)
print('start running')
# Volume Scaling Loop
for job in project.find_jobs({'poly':poly}):
job_data = json.load(job.fn('vol_eng.json'))
volumes = np.array(job_data['volume'])
volumes = volumes[nequil:]
avg_v = np.mean(volumes)
if 'volume' in mapping:
Lsizeold[job.workspace()] = avg_v**(1.0/3.0)
Lsizenew[job.workspace()] = avg_v**(1.0/3.0)
else:
Lsizeold[job.workspace()] = 1
Lsizenew[job.workspace()] = 1
with open('old_v.txt', 'a') as fn:
fn.write("%4i %4i %10.4f %10.4f +/- %6.4f\n" % (Lsizeold[job.workspace()], avg_v, np.std(volumes)
with open('new_v.txt', 'a') as fn:
fn.write("%4i %4i %10.4f %10.4f +/- %6.4f\n" % (Lsizenew[job.workspace()], avg_v, np.std(volumes)
original_v = np.zeros([states, states], float)
original_e = np.zeros([states, states], float)
new_v = np.zeros([states,states, npoints])
new_e = np.zeros([states,states, npoints])
lnJcbn = np.zeros([states,states])
for job1 in project.find_jobs({'poly':poly}):
for job2 in project.find_jobs({'poly':poly}):
# need to remove bc for calculation of volume
os.system('babel '+job1.fn('mini.xyz')+' -opdb > '+job1.fn('mini.pdb'))
#mdtraj can't read xyz files, quick convert to pdb
t = md.load(job1.fn('prod.dcd'), top=job1.fn('mini.pdb'))
tnojump = t.image_molecules()
print(len(t))
# Extract trajectories as mdtraj.Traj object
tscale = tnojump.slice(range(tnojump.n_frames), copy=True)
# Conditionals based on type of mapping
#if 'NVT' in dirtext:
# NVTvolume = tnojump[0].unitcell_volumes[0]
#if 'harmonic' in mapping:
temperature_scale = np.sqrt(job1.sp.T/job2.sp.T)
#else:
# temperature_scale = 1
#if 'NPT' in dirtext:
# for now make it work for just NPT, Harmonic, can adjust later on
tscale.xyz = np.array([ tscale.xyz[s]*np.matrix(ts.unitcell_vectors)**-1 for s, ts in enumerate(tscale)])
# Temperature mapping (So MBAR does can converge)
means = np.mean(tsacle.xyz, axis=0)
divergences = (tscale.xyz-means)*temperature_scale
tscale.xyz = divergences+means
tscale.unitcell_vectors *= Lsizenew[job2.workspace()]/Lsizeold[job1.workspace()]
#if 'NPT' in dirtext:
tscale.xyz = np.array([ tscale.xyz[s]*np.matrix(ts.unitcell_vectors) for s, ts in enumerate(tscale)])
tscale.save(job1.fn('prod_mapped.dcd'))
# Need script reruns trajectories here
| 2.390625
| 2
|
venv/lib/python2.7/dist-packages/landscape/lib/bpickle_dbus.py
|
pengwu/scapy_env
| 0
|
12780668
|
<reponame>pengwu/scapy_env
"""
Different versions of the Python DBus bindings return different types
to represent integers, strings, lists, etc. Older versions return
builtin Python types: C{int}, C{str}, C{list}, etc. Newer versions
return DBus-specific wrappers: C{Int16}, C{String}, C{Array}, etc.
Failures occur when DBus types are used because bpickle doesn't know
that an C{Int16} is really an C{int} and that an C{Array} is really a
C{list}.
L{install} and L{uninstall} can install and remove extensions that
make bpickle work with DBus types.
"""
import dbus
from landscape.lib import bpickle
def install():
"""Install bpickle extensions for DBus types."""
for type, function in get_dbus_types():
bpickle.dumps_table[type] = function
def uninstall():
"""Uninstall bpickle extensions for DBus types."""
for type, function in get_dbus_types():
del bpickle.dumps_table[type]
def dumps_utf8string(obj):
"""
Convert the specified L{dbus.types.UTF8String} to bpickle's
representation for C{unicode} data.
"""
return "u%s:%s" % (len(obj), obj)
def dumps_double(obj):
"""
Convert a dbus.types.Double into a floating point representation.
"""
return "f%r;" % float(obj)
def get_dbus_types():
"""
Generator yields C{(type, bpickle_function)} for available DBus
types.
"""
for (type_name, function) in [("Boolean", bpickle.dumps_bool),
("Int16", bpickle.dumps_int),
("UInt16", bpickle.dumps_int),
("Int32", bpickle.dumps_int),
("UInt32", bpickle.dumps_int),
("Int64", bpickle.dumps_int),
("UInt64", bpickle.dumps_int),
("Double", dumps_double),
("Array", bpickle.dumps_list),
("Dictionary", bpickle.dumps_dict),
("String", bpickle.dumps_unicode),
("UTF8String", dumps_utf8string)]:
type = getattr(dbus.types, type_name, None)
if type is not None:
yield type, function
| 2.46875
| 2
|
chinese-poem/probability.py
|
MashiMaroLjc/ML-and-DM-in-action
| 370
|
12780669
|
# coding:utf-8
#
def two(words):
"""
:param words:
:return:
"""
new = []
s = len(words)
for index in range(s):
w = words[index]
for next_index in range(index + 1, s):
next_w = words[next_index]
new.append(frozenset([w, next_w]))
return new
poemfile = open("five_poem.txt").readlines()
feature = []
n = 1
length = len(poemfile)
for poemline in poemfile:
print("finish:%.5f" % (n / length))
poemline = poemline.strip().replace("\n", "")
sentences = poemline.split(".")
temp = []
for sen in sentences:
if len(sen) != 5:
continue
temp.append(sen[:2])
feature.append(temp)
n += 1
size = len(feature)
word_fre = dict()
for fea in feature:
for word in set(fea):
word_fre[word] = word_fre.get(word, 0) + 1 / size
two_fre = dict()
two_feature = []
#
for fea in feature:
fea = list(set(fea))
two_feature.append(two(fea))
for fea in two_feature:
for word in fea:
two_fre[word] = two_fre.get(word, 0) + 1 / size
#
pro = dict()
for k, v in two_fre.items():
event = list(k)
#
key = event[0]
if key not in pro:
pro[key] = []
pro[key].append(
[event[1], two_fre[k] / word_fre[key]]
)
key = event[1]
if key not in pro:
pro[key] = []
pro[key].append(
[event[0], two_fre[k] / word_fre[key]]
)
#
import json
out = open("pro.json", "w")
json.dump(pro, out)
| 2.75
| 3
|
CloudRun/Scraper/code/main.py
|
greavr/easy-golf-booker
| 0
|
12780670
|
<gh_stars>0
import collections
import json
import logging
import os
import random
import string
import sys
import time
from datetime import date, datetime, timedelta, timezone
import google.cloud.logging
import pytz
import requests
from flask import Flask, jsonify
from google.cloud import datastore, pubsub_v1
from pytz import reference
from golfcourse import golfcourse
## Application Variables
LoggingClient = google.cloud.logging.Client()
GolfCourseList = []
## Get Envars
project_id = os.environ.get('GCP_PROJECT', '')
topic_name = os.environ.get('pubsub_topic', 'golf-bot-notify')
# App Config
app = Flask(__name__)
# Function To Send Ttxt
def send_sms(DataTosend):
# Publish Message to PubSub Queue to Notify
global project_id,topic_name
#Set TimeZone
NotificationTypes = GetNotificationTimes()
timezone = pytz.timezone(NotificationTypes['timezone'])
startTime = datetime.combine(date.today(),datetime.strptime(NotificationTypes['start'], '%H:%M').time())
startTime = timezone.localize(startTime)
endTime = datetime.combine(date.today(),datetime.strptime(NotificationTypes['end'], '%H:%M').time())
endTime = timezone.localize(endTime)
# Notifications Disabled
if not NotificationTypes['enabled']:
return
# Check if now is between notification times
RightNow = datetime.now()
RightNow = timezone.localize(RightNow)
print(f"ST: {startTime}, ET: {endTime}, RN: {RightNow}, RN-NO-TZ: {datetime.now()}, TimeZone: {reference.LocalTimezone().tzname(datetime.now())}")
if startTime <= RightNow <= endTime:
logging.info("Inside Notification Window")
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
future = publisher.publish(topic_path, DataTosend.encode("utf-8"))
else:
logging.info("Outside Notification Window")
# Send notification of date Found
def Notify(NumSlotsFound,FoundRanges,DateFound, Course, Players):
# Notification process
## SendSMS validates timeframe for communication
## This function looks for values to notify on
if NumSlotsFound == 0:
return
# Build txt body:
Body = f"Found {str(NumSlotsFound)} slot(s) on the following date {str(DateFound)} for {Players} players at {Course}:"
Body += "\n" + str(FoundRanges)
# Log Txt Body
logging.info(f"Sending Txt Message with the following details: {Body}")
# Send SMS Notification
send_sms(DataTosend=Body)
# Function To Build GolfCourse Object
def BuildGolfCourseList():
global GolfCourseList
# Get Search Times
AllSearchTimes = BuildSearchTimes()
AllOptions = GetOptions()
# Get Course List from Datastore
CourseList = GetCourseList()
#Build Array Of GolfCourse Objects
for aLocation in CourseList:
thisGolf = golfcourse(LocationName=aLocation["Name"],CourseURL=aLocation["Location"], CourseNames=aLocation["Course"], SearchTimes=AllSearchTimes, PlayerElement=aLocation["PlayerElement"], DateElement=aLocation["DateElement"], SearchDates=AllOptions["DaysOfWeek"], SearchPlayers=AllOptions["Players"])
GolfCourseList.append(thisGolf)
# Function To Get Notification Times from DataStore
def GetNotificationTimes():
global project_id
# Now try to load the keys from DS:
query = datastore.Client(project=project_id,namespace='golf-bot').query(kind="notificationTimes")
results = list(query.fetch())
return({"start":results[0]['start'],"end":results[0]['end'], "enabled" : results[0]['enabled'], "timezone" : results[0]['timezone']})
# Function To Get Course List From Datastore
def GetCourseList():
global project_id
# Now try to load the keys from DS:
query = datastore.Client(project=project_id,namespace='golf-bot').query(kind="Locations")
datastore_values = list(query.fetch())
results = []
for aSet in datastore_values:
aResult = { "Name" : aSet['Name'], "Location": aSet['Location'], "DateElement" : aSet['DateElement'], "Course" : aSet['Course'], "PlayerElement" : aSet['PlayerElement']}
results.append(aResult)
return results
# Function To Get Day Of Week from Datastore
def GetOptions():
global project_id
# Now try to load the keys from DS:
query = datastore.Client(project=project_id,namespace='golf-bot').query(kind="Options")
results = list(query.fetch())
return(results[0])
#return({"Days" : list(map(int,results[0]['DaysOfWeek'].split(','))), "Players" : list(map(int,results[0]['Players'].split(','))) })
# Function To Save Times to Datastore
def SaveFoundTimesToDataStore(Location, DataToSave):
global project_id
namespace='golf-bot'
kind = "TeeTimesFound"
#Create Array of data elements
DataToAdd = []
for aDataRow in DataToSave:
DataToAdd.append({ 'PlayerCount' : aDataRow["PlayerCount"], "Times": aDataRow['Times'], "Date": aDataRow["Date"] })
try:
# Create a Cloud Datastore client.
datastore_client = datastore.Client(project=project_id,namespace=namespace)
# Create the Cloud Datastore key for the new entity.
task_key = datastore_client.key(kind,Location)
task = datastore.Entity(key=task_key)
task['LocationName'] = Location
task['Data'] = DataToAdd
task['TimeStamp'] = datetime.now()
datastore_client.put(task)
except:
e = sys.exc_info()
logging.error(f"Error Occured: {e}, Project: {project_id}, NameSpace: {namespace}, Kind: {kind}, Name: {Location}, Data: {DataToSave}")
# Function To save All Found Times to Datastore
def SaveFoundTimesLogs(aGolfCourse):
global project_id
# DataStore Keys
namespace='golf-bot'
kind = "TeeTimeLog"
# Itterate over values
for aCourse in aGolfCourse.FoundTimes:
for ThisData in aGolfCourse.FoundTimes[aCourse]:
aKey=''.join(random.choice(string.ascii_letters) for i in range(7))
# Try writing data
try:
# Create a Cloud Datastore client.
datastore_client = datastore.Client(project=project_id,namespace=namespace)
# Create the Cloud Datastore key for the new entity.
task_key = datastore_client.key(kind,aKey)
task = datastore.Entity(key=task_key)
task['CourseName'] = aCourse
task['PlayerCount'] = ThisData["PlayerCount"]
task['Date'] = ThisData["Date"]
task['Times'] = ThisData["Times"]
task['LastUpdateTimeStamp'] = datetime.now()
datastore_client.put(task)
except:
e = sys.exc_info()
logging.error(f"Error Occured: {e}, Project: {project_id}, NameSpace: {namespace}, Kind: {kind}, Name: {aKey}")
# Get values from Datastore
def GetFoundTimes(Location: str):
global project_id
# Now try to load the keys from DS:
client = datastore.Client(project=project_id,namespace='golf-bot')
key = client.key("TeeTimesFound", Location)
query = client.get(key)
if query:
return(query['Data'])
else:
return([])
# Function to build out the search times
def BuildSearchTimes():
# Get start and end values from DataStore
global project_id
# Now try to load the keys from DS:
query = datastore.Client(project=project_id,namespace='golf-bot').query(kind="searchTimes")
results = list(query.fetch())
SearchTimes = {"start":results[0]['teeTimeStart'],"end":results[0]['teeTimeEnd']}
# Create array of times
startTime = datetime.strptime(SearchTimes['start'], '%H:%M').time()
endTime = datetime.strptime(SearchTimes['end'], '%H:%M').time()
# Create array of times between the two
step = timedelta(minutes=1)
seconds = (datetime.combine(date.today(), endTime) - datetime.combine(date.today(), startTime)).total_seconds()
array = []
for i in range(0, int(seconds), int(step.total_seconds())):
array.append(datetime.combine(date.today(), startTime) + timedelta(seconds=i))
# Format Array
array = [i.strftime('%-I:%M %p') for i in array]
return array
# Main Loop
@app.route("/", methods=['GET'])
def Main():
global GolfCourseList
# Main Function
# Get Search Values from Datastore
BuildGolfCourseList()
# Itterate through Course
for aGolfCourse in GolfCourseList:
aGolfCourse.FindSpots()
## Check if any times found
if not any(aGolfCourse.FoundTimes):
logging.info(f"Not times found for {aGolfCourse.LocationName}.")
else:
## Save Values to Log
SaveFoundTimesLogs(aGolfCourse)
# Ittereate over found
for aFoundSet in aGolfCourse.FoundTimes:
# Recall last search results looking for changes
PreviousFoundData = GetFoundTimes(aFoundSet)
ChangesFound = []
# Compare Results
if not PreviousFoundData:
# No previous results found
NewDataSet = aGolfCourse.FoundTimes[aFoundSet]
SaveFoundTimesToDataStore(Location=aFoundSet,DataToSave=NewDataSet)
# Itterate through sub sets of data
for aDataRow in NewDataSet:
ChangesFound.append({"Date" : aDataRow["Date"], "Times" : aDataRow["Times"], "NumofSlotsFound" : len(aDataRow["Times"]), "Players" : aDataRow["PlayerCount"]})
else:
# Compare results set
JustFoundData = aGolfCourse.FoundTimes[aFoundSet]
# Itterate over both lists looking for differences
for aNewDataSet in JustFoundData: # New datas
for aOldDataSet in PreviousFoundData: # Old datas
#Check date & player count
if (aOldDataSet["PlayerCount"] == aNewDataSet["PlayerCount"]) and (aOldDataSet["Date"] == aNewDataSet["Date"]):
# Check Times Found Lists
if not collections.Counter(aOldDataSet["Times"]) == collections.Counter(aNewDataSet["Times"]):
SaveFoundTimesToDataStore(Location=aFoundSet,DataToSave=aGolfCourse.FoundTimes[aFoundSet])
ChangesFound.append({"Date" : aNewDataSet["Date"], "Times" : aNewDataSet["Times"], "NumofSlotsFound" : len(aNewDataSet["Times"]), "Players" : aNewDataSet["PlayerCount"]})
# If Changes Found Send TXT
if len(ChangesFound) > 0:
# Itterate over found values
for aResultSet in ChangesFound:
if aResultSet['NumofSlotsFound'] > 0:
logging.info (f"Found {aResultSet['NumofSlotsFound']}, Times: {aResultSet['Times']}, on {aResultSet['Date']} for {aResultSet['Players']} players.")
Notify(NumSlotsFound=aResultSet['NumofSlotsFound'], FoundRanges=aResultSet['Times'],DateFound=aResultSet['Date'],Course=aFoundSet, Players=aResultSet['Players'] )
else:
# Found Zero Changes
logging.info (f"Found {len(ChangesFound)} new times")
return jsonify(success=True)
if __name__ == "__main__":
## Run APP
# Setup the logger
LoggingClient.get_default_handler()
LoggingClient.setup_logging()
app.run(host='0.0.0.0', port=8080)
| 2.34375
| 2
|
sequence_trie.py
|
JeffreyUrban/count-sequences
| 0
|
12780671
|
<filename>sequence_trie.py
from collections import Counter
import json
# TODO: Index all active nodes within deque (or move to other structure), so updating is faster.
"""
Sequence Trie:
Like a linked list, except each node links to a dictionary (count collection)
Each node stores a count of sequences of strings that reached that node
Maximum depth is specifiable
The tree tracks the present nodes of sequences passing through it
The tree accepts a symbol update and
advances the present node for all of its sequences, and the count for that node
adds a sequence at root, if the symbol matches the root symbol
On request, the tree outputs a list of all sequences of a minimum length or greater, with their counts.
This is determined by taking the count at each node at depth equal to the minimum length or greater,
and tracing back to the root.
Does not support:
Deletion of nodes
Consider later:
Adding aging factor for counts, to support sliding window
Handle timestamp offsets somehow to decouple potentially overlapping sequences
Fuzzy matching among sequences that may include different extra intermediate symbols
"""
class Node:
def __init__(self, trie, symbol, remaining_depth, parent):
self.trie = trie
self.symbol = symbol
self.remaining_depth = remaining_depth
self.parent = parent
#
self.count = 1
self.children = {}
self.sequence = []
if parent and parent.symbol is not None:
self.sequence = parent.sequence.copy()
self.sequence.append(symbol)
def increment(self):
self.count = self.count + 1
self.trie.next_active_children_by_level[self.remaining_depth].add(self)
def update(self, symbol):
# Add or update child with symbol, if depth not exceeded
if self.remaining_depth:
if symbol not in self.children:
self.children[symbol] = \
Node(trie=self.trie, symbol=symbol, remaining_depth=self.remaining_depth - 1, parent=self)
self.trie.all_child_nodes.add(self.children[symbol])
self.trie.next_active_children_by_level[self.remaining_depth - 1].add(self.children[symbol])
else:
self.children[symbol].increment()
def structure(self):
return {'Node': {
'symbol': self.symbol,
'count': self.count,
'remaining_depth': self.remaining_depth,
'children': [self.children[symbol].structure() for symbol in self.children.keys()]
}}
def __str__(self):
return json.dumps(self.structure(), indent=4)
class Trie:
def __init__(self, min_length, max_length):
self.min_length = min_length
self.max_length = max_length
#
self.root = Node(trie=self, symbol=None, remaining_depth=max_length, parent=None)
# active children are grouped by level for update to flow from leaves to root
self.present_active_children_by_level = [set() for _ in range(max_length)]
self.next_active_children_by_level = [set() for _ in range(max_length)]
self.all_child_nodes = set()
def update(self, symbol):
# Advance all active nodes with this symbol
# Update flows up trie (children before parents)
self.next_active_children_by_level = [set() for _ in range(self.max_length)]
for level in self.present_active_children_by_level:
for node in level:
node.update(symbol)
self.root.update(symbol)
for level in range(self.max_length):
self.present_active_children_by_level[level] = self.next_active_children_by_level[level]
@property
def sequences(self) -> Counter:
sequences = Counter()
for node in self.all_child_nodes:
if node.count > 1 and node.remaining_depth < self.max_length - self.min_length + 1: # At least min_length
sequences[str(node.sequence)] = node.count
return sequences
def __str__(self):
trie_string = "Trie: min_length: {}, max_length: {} \n{}"\
.format(self.min_length, self.max_length, self.root)
return trie_string
if __name__ == '__main__':
my_trie = Trie(2, 4)
my_trie.update('0')
my_trie.update('0')
my_trie.update('0')
my_trie.update('0')
print(my_trie)
print(my_trie.sequences)
# Expected Result:
# ['0', '0', '0']: count: 2, remaining_depth: 1
# ['0']: count: 4, remaining_depth: 3
# ['0', '0', '0', '0']: count: 1, remaining_depth: 0
# ['0', '0']: count: 3, remaining_depth: 2
# Counter({"['0', '0']": 3, "['0', '0', '0']": 2})
| 3.484375
| 3
|
mirrors/libs/utils/gql_related.py
|
likeyiyy/mirrorweb
| 0
|
12780672
|
import json
import re
from jinja2.loaders import BaseLoader
from werkzeug.datastructures import ImmutableMultiDict
from jinja2.sandbox import ImmutableSandboxedEnvironment
import decimal
from mirrors.common.logger import logger
class DjangoLoader(BaseLoader):
def __init__(self):
pass
def get_source(self, environment, _):
return _, None, None
variable_repr_re = re.compile(r'\{\{(.*?)\}\}')
def find_used_variable(content):
matched = variable_repr_re.findall(content)
return [each.split('.')[0] for each in matched]
def str_to_obj(str):
return json.loads(str)
default_jinja_context = {
"len": len,
'float': float,
'decimal': decimal.Decimal,
'str_to_obj': str_to_obj,
}
def jinja_render(content, context):
if not content:
content = {}
from jinja2.runtime import Undefined
env = ImmutableSandboxedEnvironment(
loader=DjangoLoader(),
cache_size=0,
undefined=Undefined,
)
context.update(default_jinja_context)
try:
return env.get_template(content).render(context)
except Exception as e:
logger.debug('----- render content failed -----')
logger.debug(content)
logger.debug('--------------- end -------------')
import traceback
traceback.print_exc()
raise
def jinja_render_many(arr, context):
_SPLITER = '^#^#^'
content = _SPLITER.join(arr)
ret = jinja_render(content, context)
return tuple(ret.split(_SPLITER))
def gql_render(str, context=None):
return str
def parse_gql(gql, model, custom_query_model=None):
from mirrors.libs.advance_search.parser import Parser
from mirrors.libs.advance_search.advsearch_visitor import AdvSearchVisitor, AdvSearchRewriteVisitor
ast = Parser.parse(gql)
# 防止前端传错误的gql,进行重写
visitor = AdvSearchRewriteVisitor()
ast = ast.accept(visitor)
visitor = AdvSearchVisitor(model, custom_query_model)
node = ast.accept(visitor)
return node
def extract_args_or_gql(request_args, key):
if key in request_args:
return request_args[key]
gql = request_args.get('gql')
if not gql:
return None
gql += '&'
re_str = r'\b{0}(__eq|__s)?=(-*\w+)\b'.format(key)
match = re.search(re_str, gql)
if match:
return match.group(2)
else:
return None
class TrackerDict(ImmutableMultiDict):
def __init__(self, *args, **kwargs):
self.keys = set()
super(TrackerDict, self).__init__(*args, **kwargs)
def tracker(self, func):
def wrapper(*args, **kwargs):
if len(args):
key = args[0]
else:
key = kwargs.get('key')
self.keys.add(key)
return func(*args, **kwargs)
return wrapper
def __getitem__(self, item):
self.keys.add(item)
return super(TrackerDict, self).__getitem__(item)
def __getattribute__(self, item):
attr = super(TrackerDict, self).__getattribute__(item)
if item in ('get', 'getlist'):
attr = self.tracker(attr)
return attr
| 2.078125
| 2
|
shell/src/dist/scripts/sibilla_py/sibilla/__init__.py
|
f-biondi/sibilla
| 0
|
12780673
|
<gh_stars>0
from .plotting_module import SibillaDataPlotter
from .profiler_module import Profiler
from .sibilla_runtime import SibillaRuntime, Map, SibillaReachabilityResult, SibillaFristPassageTimeResult, SibillaReachabilityResult
__all__= (
SibillaRuntime,
Map,
SibillaReachabilityResult,
SibillaFristPassageTimeResult,
SibillaReachabilityResult,
SibillaDataPlotter,
Profiler,
)
print('')
print('- - - - - - - - - - - - - - - - - - - - -')
print('')
print('- - - - - - SIBILLA IMPORTED - - - - - -')
print('')
print('- - - - - - - - - - - - - - - - - - - - -')
print('')
| 1.53125
| 2
|
prodrop/exceptions.py
|
adambeagle/prodrop_project
| 3
|
12780674
|
"""
exceptions.py
Author: <NAME>
PURPOSE:
Contains all custom exception classes for the prodrop package.
"""
###############################################################################
# ParseTreeError and iheritors
class ParseTreeError(Exception):
"""Base class for all parse tree related errors."""
pass
class ParseTreeSearchError(ParseTreeError):
pass
class SearchFlagError(ParseTreeSearchError):
pass
class CustomCallableError(ParseTreeSearchError):
pass
class TreeConstructionError(ParseTreeError):
pass
###############################################################################
# Other
class InputPathError(Exception):
pass
class MissingParseFilesError(Exception):
pass
class NoTreesFoundError(Exception):
pass
| 2.78125
| 3
|
train.py
|
ardamavi/Vocalize-Sign-Language
| 65
|
12780675
|
# <NAME>
import os
import numpy
from get_dataset import get_dataset
from get_model import get_model, save_model
from keras.callbacks import ModelCheckpoint, TensorBoard
epochs = 15
batch_size = 6
def train_model(model, X, X_test, Y, Y_test):
checkpoints = []
if not os.path.exists('Data/Checkpoints/'):
os.makedirs('Data/Checkpoints/')
checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=1, write_graph=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))
'''
# Creates live data:
# For better yield. The duration of the training is extended.
from keras.preprocessing.image import ImageDataGenerator
generated_data = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2) # For include left hand data add: 'horizontal_flip = True'
generated_data.fit(X)
model.fit_generator(generated_data.flow(X, Y, batch_size=batch_size), steps_per_epoch=X.shape[0]/batch_size, epochs=epochs, validation_data=(X_test, Y_test), callbacks=checkpoints)
'''
model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)
return model
def main():
X, X_test, Y, Y_test = get_dataset()
model = get_model()
model = train_model(model, X, X_test, Y, Y_test)
save_model(model)
return model
if __name__ == '__main__':
main()
| 2.390625
| 2
|
pytiff/test/test_write.py
|
ch-schiffer/pytiff
| 9
|
12780676
|
from hypothesis import HealthCheck
from hypothesis import given, settings
from hypothesis.extra import numpy as hnp
from pytiff import *
import hypothesis.strategies as st
import numpy as np
import pytest
import subprocess
import tifffile
from skimage.data import coffee
def test_write_rgb(tmpdir_factory):
img = coffee()
filename = str(tmpdir_factory.mktemp("write").join("rgb_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(img, method="tile")
with Tiff(filename) as handle:
data = handle[:]
assert np.all(img == data[:, :, :3])
with Tiff(filename, "w") as handle:
handle.write(img, method="scanline")
with Tiff(filename) as handle:
data = handle[:]
assert np.all(img == data[:, :, :3])
# scanline integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_scanline_set_rows_per_strip(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img.tif"))
rows_per_strip = 1
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline", rows_per_strip=rows_per_strip)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
assert rows_per_strip == handle[0].tags["rows_per_strip"].value
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=20, max_side=20)))
def test_write_int_slices_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img_scanline.tif"))
with Tiff(filename, "w") as handle:
handle.write(data[:, :], method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data[:,:], img)
# tile integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_tile_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
def test_write_float_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("float_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
def test_write_float_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("float_tile_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_length=16, tile_width=16)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_append_int_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("append_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with Tiff(filename, "a") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with Tiff(filename, "r") as handle:
assert handle.number_of_pages == 2
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img[0])
np.testing.assert_array_equal(data, img[1])
def test_write_chunk(tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("chunk_img.tif"))
filename = "test_chunk.tif"
data1 = np.ones((64,64), dtype=np.uint8) * 1
data2 = np.ones((64,64), dtype=np.uint8) * 2
data3 = np.ones((64,64), dtype=np.uint8) * 3
data4 = np.ones((64,64), dtype=np.uint8) * 4
with Tiff(filename, "w") as handle:
chunks = [data1, data2, data3, data4]
handle.new_page((300, 300), dtype=np.uint8, tile_length=16, tile_width=16)
row = 0
col = 0
max_row_end = 0
positions = []
for c in chunks:
shape = c.shape
row_end, col_end = row + shape[0], col + shape[1]
max_row_end = max(max_row_end, row_end)
handle[row:row_end, col:col_end] = c
# save for reading chunks
positions.append([row, row_end, col, col_end])
if col_end >= handle.shape[1]:
col = 0
row = max_row_end
else:
col = col_end
handle.save_page()
with Tiff(filename) as handle:
for pos, chunk in zip(positions, chunks):
row, row_end, col, col_end = pos
data = handle[row:row_end, col:col_end]
assert np.all(data == chunk)
with Tiff(filename) as handle:
with pytest.raises(ValueError):
handle.new_page((50, 50), np.dtype("uint8"))
handle[:, :] = np.random.rand(50, 50)
handle.save_page()
def test_write_chunk_multiple_pages(tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("multi_page_chunk_img.tif"))
data1 = np.ones((64,64), dtype=np.uint8) * 1
data2 = np.ones((64,64), dtype=np.uint8) * 2
data3 = np.ones((64,64), dtype=np.uint8) * 3
data4 = np.ones((64,64), dtype=np.uint8) * 4
with Tiff(filename, "w")as handle:
chunks = [data1, data2, data3, data4]
for c in chunks:
shape = c.shape
handle.new_page(shape, dtype=np.uint8, tile_length=16, tile_width=16)
handle[:] = c
with Tiff(filename) as handle:
for page, chunk in enumerate(chunks):
handle.set_page(page)
data = handle[:]
assert data.shape == chunk.shape
assert np.all(data == chunk)
| 2.03125
| 2
|
Server/tests/v2/views/student/report/t_facility_report.py
|
moreal/DMS-Backend
| 27
|
12780677
|
from app.models.report import FacilityReportModel
from tests.v2.views import TCBase
class TestFacilityReport(TCBase):
"""
시설 고장 신고를 테스트합니다.
"""
def __init__(self, *args, **kwargs):
super(TestFacilityReport, self).__init__(*args, **kwargs)
self.method = self.client.post
self.target_uri = '/student/report/facility'
self.content = 'hello'
self.room = 311
def setUp(self):
super(TestFacilityReport, self).setUp()
# ---
self._request = lambda *, token=self.student_access_token, content=self.content, room=self.room: self.request(
self.method,
self.target_uri,
token,
json={
'content': content,
'room': room
}
)
def testReportSuccess(self):
# (1) 시설고장 신고
resp = self._request()
# (2) status code 201
self.assertEqual(resp.status_code, 201)
# (3) response data
data = resp.json
self.assertIn('id', data)
id = data['id']
self.assertIsInstance(id, str)
self.assertEqual(len(id), 24)
# (4) 데이터베이스 확인
self.assertTrue(FacilityReportModel.objects(id=id, content=self.content, room=self.room))
def testForbidden(self):
self.assertEqual(self._request(token=self.admin_access_token).status_code, 403)
| 2.296875
| 2
|
fudbyte/apps/core/urls.py
|
niieq/fudbyte
| 0
|
12780678
|
from django.conf.urls import url
from .views import index, food_detail, post_food_comment
urlpatterns = [
url(r'^$', index),
url(r'^food/(?P<food_slug>[-\w]+)$', food_detail),
url(r'^food/(?P<food_slug>[-\w]+)/add_comment$', post_food_comment)
]
| 1.726563
| 2
|
MyVisuApp.py
|
HaikoKrais/HomeDataViewer
| 1
|
12780679
|
<filename>MyVisuApp.py
# -*- coding: utf-8 -*-
'''
Visualizes temperature and humidity in rooms and provides a weather forecast.
Classes:
MyScreens: Used to switch between the screens which provide the different contents
Scrn1: Start screen of the app. Provides navigation to the other screens
Scrn2: Shows temperatrue and humidity. Uses Two_Scales_Widget
Two_Scales_Widget: Widget providing functions to get temperature and humidity of a room and to visualize it
Scrn3: Shows the current weather at a selectable location as well as a five day forecast. Uses Weather_Widget
Weather_Widget: Widget providing functions to get and visualize the weather and forecast for a location
Scrn4: Shows the temperature and humidity graph of the last 24 hours. Uses TwoPlotWidgets
TwoPlotsWidget: Matplotlib Backend visualizing two graphs with shared x-Axis
Scrn5: Shows a corona widget displaying current and cumulative infections
Scrn6: Shows a widget representing the most common pollen for a chosen region including a forecast
See details and more explanations at: http://kraisnet.de/index.php/gebaeudedaten-erfassen-und-mit-kivy-visualisieren-2/18-gebaeudedaten-erfassen-und-mit-kivy-visualisieren
'''
import json
import os
from datetime import datetime
from time import mktime, strptime
from kivy.app import App
from kivy.clock import Clock
from kivy.properties import ListProperty
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.settings import SettingsWithSidebar
from TwoScalesWidgetApp import TwoScalesWidget as TwoScalesWidget
from WeatherWidgetApp import WeatherWidget as WeatherWidget
from TwoPlotsSharedXWidgetApp import TwoPlotsSharedXWidget as TwoPlotsSharedXWidget
from CoronaWidgetApp import CoronaWidget as CoronaWidget
from PollenWidgetApp import PollenWidget as PollenWidget
from kivy.resources import resource_add_path
class MyScreens(ScreenManager):
'''
The ScreenManager MyScreens takes care of changing between the available screens
The functions goto_screen_1 .... can be called by all elements on the screens
'''
def __init__(self, **kwargs):
'''__init__() can perform actions on instantiation. None so far'''
super(MyScreens, self).__init__(**kwargs)
def goto_scrn1(self):
'''Switches to screen 1'''
self.current = 'scrn1'
def goto_scrn2(self):
'''switches to screen 2'''
self.current = 'scrn2'
def goto_scrn3(self):
'''switches to screen 3'''
self.current = 'scrn3'
def goto_scrn4(self):
'''switches to screen 4'''
self.current = 'scrn4'
def goto_scrn5(self):
'''switches to screen 5'''
self.current = 'scrn5'
def goto_scrn6(self):
'''switches to screen 6'''
self.current = 'scrn6'
class Scrn1(Screen):
'''
Shows the start screen
Attributes:
None
'''
pass
class Scrn2(Screen):
'''
Shows the screen containing the temperature and humidity widget
Attributes:
None
'''
timestamp = ListProperty([])
temperature = ListProperty([])
humidity = ListProperty([])
def __init__(self, **kwargs):
'''
Start updating the screen regularly.
A clock will call the update function in a selectable interval.
Put all functions you want to update into the update function
Args:
**kwargs (): not used. For further development.
Returns:
Nothing.
'''
super(Scrn2, self).__init__(**kwargs)
Clock.schedule_interval(self.update_scales, 60)
Clock.schedule_once(self.update_scales)
Clock.schedule_interval(self.update_graph, 60)
Clock.schedule_once(self.update_graph)
def update_scales(self, dt):
'''
updates the scales
Args:
dt (int): interval in seconds in which the funtion will be called
Returns:
nothing
'''
low1 = float(App.get_running_app().config.get('Two Scales Widget', 'temp_lower_limit'))
high1 = float(App.get_running_app().config.get('Two Scales Widget', 'temp_upper_limit'))
low2 = float(App.get_running_app().config.get('Two Scales Widget', 'humidity_lower_limit'))
high2 = float(App.get_running_app().config.get('Two Scales Widget', 'humidity_upper_limit'))
filename = App.get_running_app().config.get('Two Scales Widget', 'data_source_scales')
self.ids.widget1.show_data(filename=filename, low1=low1, high1=high1, low2=low2, high2=high2)
def update_graph(self, dt):
'''
updates the plot
Args:
dt (int): interval in seconds in which the funtion will be called
Returns:
nothing
'''
# Read the data to show from a file and store it
filename = App.get_running_app().config.get('Two Scales Widget', 'data_source_graph')
try:
with open(filename, 'r') as read_file:
data = json.load(read_file)
print(data)
except FileNotFoundError:
print('File not found for temperature and humidity graph')
return
self.timestamp.clear()
self.temperature.clear()
self.humidity.clear()
for item in data:
self.timestamp.append(datetime.fromtimestamp(mktime(strptime(item['time_code'], '%Y-%m-%d %H:%M:%S'))))
self.temperature.append(float(item['temperature']))
self.humidity.append(float(item['humidity']))
self.ids.widget2.update_plot()
class Scrn3(Screen):
def __init__(self, **kwargs):
'''
Start updating the screen regularly.
A clock will call the update function in a selectable interval.
Put all functions you want to update into the update function
Args:
**kwargs (): not used. For further development.
Returns:
Nothing.
'''
super(Scrn3, self).__init__(**kwargs)
Clock.schedule_interval(self.update, 1800)
Clock.schedule_once(self.update)
def update(self, dt):
'''
calls funtions to update the screen.
Args:
dt (int): interval in seconds in which the funtion will be called
Returns:
(float): Scaled value.
'''
city = App.get_running_app().config.get('Weather Widget', 'city')
self.ids.widget1.download_current_weather(city=city)
self.ids.widget1.download_forecast(city=city)
class Scrn5(Screen):
def __init__(self, **kwargs):
super(Scrn5, self).__init__(**kwargs)
'''
Shows the corona widget.
A clock will call the update function in a selectable interval.
Put all functions you want to update into the update function.
During init is the update() function called. This will download the current dataset from
the ECDC. The data ist updated once a day. So the interval should be large enough.
Args:
**kwargs (): not used. For further development.
Returns:
Nothing.
'''
Clock.schedule_interval(self.update, 86400)
Clock.schedule_once(self.update)
def update(self, dt):
'''
calls funtions to update the screen.
Args:
dt (int): interval in seconds in which the funtion will be called
Returns:
(float): Scaled value.
'''
self.ids['wdgt1'].download_data_infection()
self.ids['wdgt1'].download_data_vaccination()
class Scrn6(Screen):
def __init__(self, **kwargs):
super(Scrn6, self).__init__(**kwargs)
'''
Shows the pollen widget.
A clock will call the update function in a selectable interval.
Put all functions you want to update into the update function.
During init is the update() function called. This will download the current dataset from
the DWD. The data ist updated once a day. So the interval should be large enough.
Args:
**kwargs (): not used. For further development.
Returns:
Nothing.
'''
Clock.schedule_interval(self.update, 86400)
Clock.schedule_once(self.update)
def update(self, dt):
'''
calls funtions to update the screen.
Args:
dt (int): interval in seconds in which the funtion will be called
Returns:
(float): Scaled value.
'''
self.ids['wdgt1'].download_dataset(url='https://opendata.dwd.de/climate_environment/health/alerts/s31fg.json')
class MyVisuApp(App):
def build(self):
'''
overwrites the build() function.
The appearance of the settings is set here.
Choose from the available layouts: https://kivy.org/doc/stable/api-kivy.uix.settings.html#different-panel-layouts
The preset values for the settings are loaded by the config.read() function
Args:
None
Returns:
class MyScreens().
'''
self.settings_cls = SettingsWithSidebar
fileDir = os.path.dirname(os.path.abspath(__file__))
absFilename = os.path.join(fileDir, 'mysettings.ini')
self.config.read(absFilename)
return MyScreens()
def build_settings(self, settings):
'''
overwrites the build_settings() function.
Add all necessary panels here by loading from the corresponding file.
Args:
settings
Returns:
Nothing.
'''
fileDir = os.path.dirname(os.path.abspath(__file__))
absFilename1 = os.path.join(fileDir, 'settings_weather_widget.json')
absFilename2 = os.path.join(fileDir, 'settings_two_scales_widget.json')
settings.add_json_panel('Weather Widget', self.config, absFilename1)
settings.add_json_panel('Two Scales Widget', self.config, absFilename2)
def on_config_change(self, config, section, key, value):
'''
overwrites the on_config_change() function.
define actions that shall happen when specific entries in the configuration change here.
Args:
config (kivy.config.ConfigParser):
current configuration.
section (str):
name of the section where the key belongs to.
key (str):
key as specified in the json panels.
value ():
value of the key. return value depending on the type of variable.
Returns:
class MyScreens().
'''
app = self.get_running_app()
if key == 'city':
app.root.ids['scrn3'].update(1)
if section == 'Two Scales Widget':
app.root.ids['scrn2'].update_plots(1)
app.root.ids['scrn2'].update_graph(1)
if __name__ == '__main__':
resource_add_path(r'C:\Users\49172\PycharmProjects')
MyVisuApp().run()
| 2.5625
| 3
|
data_steward/cdr_cleaner/cleaning_rules/deid/geolocation_concept_suppression.py
|
lrwb-aou/curation
| 16
|
12780680
|
"""
Sandbox and record suppress all records with a concept_id or concept_code relating to Geo Location information.
Original Issue: DC-1385
suppress all records associated with a GeoLocation identifier concepts in PPI vocabulary
The concept_ids to suppress can be determined from the vocabulary with the following regular expressions.
REGEXP_CONTAINS(concept_code, r'(SitePairing)|(City)|(ArizonaSpecific)|(Michigan)|(_Country)| \
(ExtraConsent_[A-Za-z]+((Care)|(Registered)))')AND concept_class_id = 'Question')
and also covers all the mapped standard concepts for non standard concepts that the regex filters.
"""
# Python Imports
import logging
# Third Party Imports
from google.cloud.exceptions import GoogleCloudError
# Project Imports
from common import OBSERVATION
from common import JINJA_ENV
import constants.cdr_cleaner.clean_cdr as cdr_consts
from cdr_cleaner.cleaning_rules.deid.concept_suppression import \
AbstractBqLookupTableConceptSuppression
LOGGER = logging.getLogger(__name__)
ISSUE_NUMBERS = ['DC1385']
GEO_LOCATION_SUPPRESSION_CONCEPT_TABLE = '_geolocation_identifier_concepts'
GEO_LOCATION_CONCEPT_SUPPRESSION_LOOKUP_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_dataset_id}}.{{lookup_table}}` as(
WITH
geolocation_concept_ids AS (
SELECT
DISTINCT *
FROM
`{{project_id}}.{{dataset_id}}.concept`
WHERE
REGEXP_CONTAINS(concept_code, r'(SitePairing)|(City)|(ArizonaSpecific)|(Michigan)|(_Country)|(ExtraConsent_[A-Za-z]+((Care)|(Registered)))')AND concept_class_id = 'Question')
SELECT
DISTINCT *
FROM
geolocation_concept_ids
UNION DISTINCT
SELECT
DISTINCT *
FROM
`{{project_id}}.{{dataset_id}}.concept`
WHERE
concept_id IN(
SELECT
cr.concept_id_2
FROM
geolocation_concept_ids AS c
JOIN
`{{project_id}}.{{dataset_id}}.concept_relationship` AS cr
ON
c.concept_id = cr.concept_id_1
WHERE
cr.relationship_id = 'Maps to')
)
""")
class GeoLocationConceptSuppression(AbstractBqLookupTableConceptSuppression):
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper info.
Set the issue numbers, description and affected datasets. As other
tickets may affect this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = (
'Sandbox and record suppress all records with a concept_id or concept_code '
'relating to Geo Location information. ')
super().__init__(issue_numbers=ISSUE_NUMBERS,
description=desc,
affected_datasets=[cdr_consts.CONTROLLED_TIER_DEID],
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
affected_tables=[OBSERVATION],
concept_suppression_lookup_table=
GEO_LOCATION_SUPPRESSION_CONCEPT_TABLE)
def create_suppression_lookup_table(self, client):
"""
:param client: Bigquery client
:return: None
raises google.cloud.exceptions.GoogleCloudError if a QueryJob fails
"""
concept_suppression_lookup_query = GEO_LOCATION_CONCEPT_SUPPRESSION_LOOKUP_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
sandbox_dataset_id=self.sandbox_dataset_id,
lookup_table=self.concept_suppression_lookup_table)
query_job = client.query(concept_suppression_lookup_query)
result = query_job.result()
if hasattr(result, 'errors') and result.errors:
LOGGER.error(f"Error running job {result.job_id}: {result.errors}")
raise GoogleCloudError(
f"Error running job {result.job_id}: {result.errors}")
def setup_validation(self, client, *args, **keyword_args):
pass
def validate_rule(self, client, *args, **keyword_args):
pass
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
from utils import pipeline_logging
import cdr_cleaner.clean_cdr_engine as clean_engine
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
ARGS = parser.default_parse_args()
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(GeoLocationConceptSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(GeoLocationConceptSuppression,)])
| 1.90625
| 2
|
11/solution1.py
|
msagi/advent-of-code-2021
| 0
|
12780681
|
# https://adventofcode.com/2021/day/11
def increase_energy_level(arr, arr_len_x, arr_len_y, increase_step):
for x in range(arr_len_x):
for y in range(arr_len_y):
arr[x][y] += increase_step
def flash_octopus(arr, arr_len_x, arr_len_y, x, y):
if arr[x][y] <= 9: # no flash for this octopus
return 0
arr[x][y] = -1 # register that this octopus has JUST flashed
number_of_flashes = 1
x_coords = [x-1, x, x+1]
y_coords = [y-1, y, y+1]
for x_coord in x_coords:
for y_coord in y_coords:
if 0 <= x_coord < arr_len_x and 0 <= y_coord < arr_len_y:
if -1 == arr[x_coord][y_coord]: # this neighbour has been flashed BEFORE
continue
arr[x_coord][y_coord] += 1
number_of_flashes += flash_octopus(arr, arr_len_x, arr_len_y, x_coord, y_coord) # cascade flashing
return number_of_flashes
def flash(arr, arr_len_x, arr_len_y):
number_of_flashes = 0
for x in range(arr_len_x):
for y in range(arr_len_y):
number_of_flashes += flash_octopus(arr, arr_len_x, arr_len_y, x, y)
return number_of_flashes
def clean_up(arr, arr_len_x, arr_len_y):
for x in range(arr_len_x):
for y in range(arr_len_y):
if arr[x][y] == -1:
arr[x][y] = 0
def dump_arr(arr, arr_len_x, arr_len_y):
for y in range(arr_len_y):
print(arr[y])
# load file
infile = open('11/input.txt', 'r')
lines = infile.readlines()
infile.close()
arr = []
# parse input
for line in lines:
line = line.strip()
line_values = [int(char) for char in line]
arr.append(line_values)
arr_len_x = len(arr[0])
arr_len_y = len(arr)
arr_size = arr_len_x * arr_len_y
dump_arr(arr, arr_len_x, arr_len_y)
max_step = 100
answer = 0
for step in range(max_step):
increase_energy_level(arr, arr_len_x, arr_len_y, 1)
number_of_flashes = flash(arr, arr_len_x, arr_len_y)
clean_up(arr, arr_len_x, arr_len_y)
answer += number_of_flashes
print(answer) # 1749
print('OK')
| 3.296875
| 3
|
app/index_tags.py
|
patrickhoefler/secta
| 0
|
12780682
|
#!/usr/bin/env python3
import codecs
from collections import defaultdict
import errno
import json
import os
def tree():
"""
http://recursive-labs.com/blog/2012/05/31/one-line-python-tree-explained/
"""
return defaultdict(tree)
def index_tags():
"""
Iterate through all locally saved JSON files
and generate the Stack Exchange Cross Tag index
"""
# Get the script directory
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
# Let's make our index a defaultdict with autovivification
index = tree()
# Iterate through all files in the data directory
for file in os.listdir(os.path.join(SCRIPT_DIR, '../data')):
# Load the JSON file containing the tags for a site
with codecs.open(
os.path.join(SCRIPT_DIR, '../data/' + file),
'r',
encoding='utf-8'
) as input_file:
tags = json.load(input_file)
# The site ID is the filename minus the (.json) at the end
site_id = file[:-5]
# Iterate through all tags and add them to the index
for tag in tags['items']:
index[tag['name']][site_id] = tag['count'] # Autovivification ftw!
# Create the index directory
try:
os.makedirs(os.path.join(SCRIPT_DIR, '../index'))
# If the directory already exists, ignore the error, otherwise report it
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# Save the tag index to a local JSON file
with codecs.open(
os.path.join(SCRIPT_DIR, '../index/index_tags.json'),
'w',
encoding='utf-8'
) as output_file:
json.dump(index, output_file, ensure_ascii=False)
# Some status information for the console
print('Successfully created the tag index.')
# If the script is called directly, execute the index_tags() function
if __name__ == '__main__':
index_tags()
| 3.078125
| 3
|
src/cms/templatetags/offer_filters.py
|
digitalfabrik/coldaid-backend
| 4
|
12780683
|
from django import template
register = template.Library()
@register.filter
def active_since(region_offers, offer_template):
return region_offers.filter(template=offer_template).first().created_date
| 1.765625
| 2
|
rbfmorph/rbf_func.py
|
utkarshmech/rbfmorph
| 2
|
12780684
|
import numpy as np
def rd(c1, c2):
return np.sqrt((c1[0]-c2[0])**2+(c1[1]-c2[1])**2+(c1[2]-c2[2])**2)
#rbf as global support spline type
#Gaussian Spline
def rbf(r):
return np.exp(-r**2)
#Spline polynomial
def rbf1(r,deg):
return r**deg
# Global
def rbf2(r):
return np.exp(-r**2)
# %% codecell
| 2.796875
| 3
|
saltverifier/server.py
|
virtru-ops/salt-verifier
| 1
|
12780685
|
<reponame>virtru-ops/salt-verifier
"""
A simple challenge response server
Request:
encrypted-message-of-challenge
Response:
status-code;body-of-response
Good response:
200-299;base64 of rsa-sha256 of challenge
Bad response:
400-599;error-message
"""
import os
import sys
import argparse
import time
import hashlib
import zmq
import M2Crypto
import logging
import pwd
# Setup the logging to be json
logging.basicConfig(
format='{"message": "%(message)s", "level": "%(levelname)s", timestamp: "%(asctime)s"}',
level=logging.INFO,
stream=sys.stderr
)
DESCRIPTION = 'A simple challenge response server for this salt minions key'
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('--port', type=int, default=4533,
help='Port to run the server')
parser.add_argument('--run-as', help='Username to run process as')
parser.add_argument('--private-key-path', default='/etc/salt/pki/minion/minion.pem',
help="The path to the minion's key")
parser.add_argument('--private-key-load-timeout', default=900.0, type=float,
help="Time in seconds to wait to load the private key")
parser.add_argument('--private-key-load-interval', default=0.1, type=float,
help="Interval in seconds to check for private key during loading")
class PrivateKeyDoesNotExist(Exception):
pass
class ChallengeResponseServer():
def __init__(self, port, private_key):
self._port = port
self._private_key = private_key
def serve(self):
logging.info('Starting salt-verifier challenge response server')
try:
self._server_loop()
except KeyboardInterrupt:
logging.info('Shutting down salt-verifier challenge response server')
def _server_loop(self):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind('tcp://0.0.0.0:%s' % self._port)
while True:
# Wait a little to start a bit... for good measure
time.sleep(1)
# Wait for the next request from the client
encrypted_challenge_message = socket.recv()
logging.info('received message')
logging.debug(encrypted_challenge_message)
# Attempt to decrypt the message
try:
challenge_message = self.decrypt(encrypted_challenge_message)
except M2Crypto.RSA.RSAError, e:
logging.exception('Exception occured decrypting')
socket.send('400;Cannot Decrypt message')
continue
except Exception, e:
logging.exception('Internal server error occurred')
socket.send('500;Internal server error')
continue
signed_challenge = self.sign(challenge_message)
socket.send('200;%s' % signed_challenge.encode('base64'))
def decrypt(self, base64_cipher_message):
"""Decrypt a message"""
cipher_message = base64_cipher_message.decode('base64')
return self._private_key.private_decrypt(
cipher_message,
M2Crypto.RSA.pkcs1_oaep_padding
)
def sign(self, message):
"""RSASSA-PSS sign the sha256 digest of a message"""
message_digest = hashlib.sha256()
message_digest.update(message)
return self._private_key.sign_rsassa_pss(message_digest.digest())
def load_private_key(private_key_path, timeout, interval):
current_time = time.time()
ready = False
count = 0
while current_time + timeout > time.time():
# Report to the logs that we're waiting for the private key... but
# don't always do it. It'll get too noisy unnecessarily. With the
# default settings this will run every 2 seconds
if count % 20 == 0:
logging.info('Checking for private key @ "%s"' % private_key_path)
if os.path.exists(private_key_path):
ready = True
break
count += 1
time.sleep(interval)
if not ready:
raise PrivateKeyDoesNotExist(
'No private key exists @ %s\n' % private_key_path
)
logging.info('Private key found @ "%s"' % private_key_path)
# Load the private key
private_key = M2Crypto.RSA.load_key(private_key_path)
return private_key
def run(args=None):
args = args or sys.argv[1:]
parsed_args = parser.parse_args(args)
port = parsed_args.port
private_key_path = os.path.abspath(parsed_args.private_key_path)
try:
private_key = load_private_key(
private_key_path,
parsed_args.private_key_load_timeout,
parsed_args.private_key_load_interval
)
except PrivateKeyDoesNotExist, e:
logging.info(e.message)
sys.exit(1)
except KeyboardInterrupt:
logging.info('Shutting down. Was waiting for private key.')
sys.exit(1)
# Downgrade user to the setuid if --run-as is set on command line
run_as = parsed_args.run_as
if run_as:
try:
passwd = pwd.getpwnam(run_as)
except KeyError:
logging.error('No user called %s. Exiting' % run_as)
return
os.setuid(passwd.pw_uid)
server = ChallengeResponseServer(port, private_key)
server.serve()
| 2.96875
| 3
|
AlphaRemover.py
|
partywithemanuel/alpha_remover
| 0
|
12780686
|
import wx
import matplotlib.pyplot as plt
import os
import sys
import subprocess
from PIL import Image
class App(wx.Frame):
def __init__(self, parent, title):
super(App, self).__init__(parent, title = title,size = (640,300))
panel = wx.Panel(self)
sizer = wx.GridBagSizer(5, 4)
#description
text = wx.StaticText(panel, label="Folder path")
sizer.Add(text, pos=(0, 0), flag=wx.TOP|wx.LEFT, border=10)
#input field
self.path = wx.TextCtrl(panel)
sizer.Add(self.path, pos=(1, 0), span=(1, 4), flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=10)
self.path.Bind(wx.EVT_TEXT,self.OnKeyTyped)
#hint
text2 = wx.StaticText(panel, style = wx.TE_MULTILINE, label=" Hint: hold Option after right-click on folder to show option to copy its path.")
sizer.Add(text2, pos=(2, 0), span=(1, 3), flag=wx.BOTTOM|wx.TOP|wx.LEFT, border=5)
#button open folder
button_open = wx.Button(panel, label="Choose folder", size=(140, 24))
button_open.Bind(wx.EVT_BUTTON, self.onDir)
sizer.Add(button_open, pos=(1, 4), flag=wx.ALIGN_RIGHT|wx.RIGHT, border=10)
#button execute
button_execute = wx.Button(panel, label="Remove Alpha", size=(140, 24))
self.Bind(wx.EVT_BUTTON, self.OnClickedExecute, button_execute)
sizer.Add(button_execute, pos=(2, 4), flag=wx.ALIGN_RIGHT|wx.RIGHT|wx.BOTTOM, border=10)
#output
line = wx.TextCtrl(panel, wx.ID_ANY,style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL|wx.TE_RICH2)
sizer.Add(line, pos=(3, 0), span=(1, 5), flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
sys.stdout = line
sizer.AddGrowableCol(1)
sizer.AddGrowableCol(3)
sizer.AddGrowableRow(3)
panel.SetSizer(sizer)
def OnKeyTyped(self, event):
self.path = event.GetString()
def OnClickedExecute(self, event):
button_execute = event.GetEventObject().GetLabel()
program(self.path)
def OnClickedOpen(self, event):
button_open = event.GetEventObject().GetLabel()
onDir(self.path)
#open folder modal
def onDir(self, event):
dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if dlg.ShowModal() == wx.ID_OK:
print ("Selected folder is %s" % dlg.GetPath())
self.path = dlg.GetPath()
def remove_alpha(image):
color=(255, 255, 255)
image.load() # treba za split()
background = Image.new('RGB', image.size, color)
background.paste(image, mask=image.split()[3]) # 3 je alpha channel
return background
def program(path):
for subdir, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(".png"):
pic_path = path+filename
pic = Image.open(os.path.join(subdir, filename))
if pic.mode in ('RGBA', 'LA') or (pic.mode == 'P' and 'transparency' in pic.info):
pic=remove_alpha(pic)
pic.save(os.path.join(subdir, filename))
print(os.path.join(subdir, filename))
proc = subprocess.Popen("ping %s", shell=True, stdout=subprocess.PIPE)
line = proc.stdout.readline()
app = wx.App(redirect=True)
ex = App(None, 'PNG Alpha remover')
ex.Show()
app.MainLoop()
| 2.46875
| 2
|
chapter4/pizza.py
|
sharad16j/Expert-Python-Programming-Third-Edition
| 112
|
12780687
|
<filename>chapter4/pizza.py
class Pizza:
def __init__(self, toppings):
self.toppings = toppings
def __repr__(self):
return "Pizza with " + " and ".join(self.toppings)
@classmethod
def recommend(cls):
"""Recommend some pizza with arbitrary toppings,"""
return cls(['spam', 'ham', 'eggs'])
class VikingPizza(Pizza):
@classmethod
def recommend(cls):
"""Use same recommendation as super but add extra spam"""
recommended = super(VikingPizza).recommend()
recommended.toppings += ['spam'] * 5
return recommended
if __name__ == "__main__":
print("Ordinary pizza recomendation:", Pizza.recommend())
print("Viking pizza recomendation:", VikingPizza.recommend())
| 3.875
| 4
|
examples/nlp/bert/test_glue_pytorch_bert.py
|
AFDWang/Hetu
| 82
|
12780688
|
<reponame>AFDWang/Hetu<filename>examples/nlp/bert/test_glue_pytorch_bert.py
from tqdm import tqdm
import os
import math
import logging
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.optim import SGD
import numpy as np
from pytorch_bert import BertForSequenceClassification
from bert_config import BertConfig
from load_data import DataLoaderForGlue
import time
import argparse
def params_from_official_pytorch_pretrained_model(state_dict):
weights_path = './pretrained_params/bert-base-uncased/pytorch_model.bin'
pretrained_state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pretrained_state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if 'intermediate.dense' in key:
new_key = key.replace('intermediate.dense', 'intermediate.dense_act.Linear')
if 'pooler.dense' in key:
new_key = key.replace('pooler.dense', 'pooler.dense_act.Linear')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pretrained_state_dict[new_key] = pretrained_state_dict.pop(old_key)
for key in state_dict.keys():
if 'bert.' in key:
state_dict[key] = pretrained_state_dict[key]
print("Successfully loaded pretrained parameters from %s"%weights_path)
return state_dict
def params_from_pytorch_pretrained_model(state_dict, model_path):
pytorch_state_dict = torch.load(model_path, map_location='cpu' if not torch.cuda.is_available() else None)
for key in state_dict.keys():
if 'bert.' in key:
state_dict[key] = pytorch_state_dict[key]
return state_dict
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
setup_seed(123)
def finetune(args):
cuda_condition = torch.cuda.is_available()
device = torch.device("cuda:%d"%args.gpu_id if cuda_condition else "cpu")
task_name = args.task_name
if task_name in ['sst-2','cola', 'mrpc']:
num_labels = 2
elif task_name in ['mnli']:
num_labels = 3
num_epochs = args.epochs
lr = args.lr
config = BertConfig(vocab_size=args.vocab_size,
hidden_size=args.hidden_size,
num_hidden_layers=args.num_hidden_layers,
num_attention_heads=args.num_attention_heads,
intermediate_size=args.hidden_size*4,
max_position_embeddings=args.seq_length,
attention_probs_dropout_prob=args.dropout_prob,
hidden_dropout_prob=args.dropout_prob,
batch_size=args.train_batch_size,
hidden_act=args.hidden_act)
model = BertForSequenceClassification(config=config, num_labels=num_labels)
model.to(device)
dataloader = DataLoaderForGlue(task_name=task_name, batch_size = config.batch_size)
dataloader_dev = DataLoaderForGlue(task_name=task_name, batch_size = config.batch_size, datatype='dev')
#initialize parameters
for m in model.modules():
if isinstance(m, (nn.Linear, nn.Embedding)):
nn.init.xavier_normal_(m.weight)
# #save parameters
# params = model.state_dict()
# for key, val in params.items():
# params[key] = val.cpu().numpy()
# torch.save(params, "pytorch_params_glue.file")
start_model = 'random'
# load parameters of BERT from official pretrained pytorch model
# state_dict = params_from_official_pytorch_pretrained_model(model.state_dict())
# start_model = 'pytorch_official'
# load parameters of BERT from pretrained pytorch model
# load_ep, load_i = 2, 145000
# #pytorch_model_path = './pretrained_params/pytorch_pretrained_params/epoch_%d_iter_%d.params'%(load_ep,load_i)
# pytorch_model_path = './pretrained_params/pytorch_pretrained_params_adam/epoch_%d_iter_%d.params'%(load_ep,load_i)
# state_dict= params_from_pytorch_pretrained_model(model.state_dict(), pytorch_model_path)
# start_model = 'pytorch_ep%d_iter%d'%(load_ep, load_i)
# model.load_state_dict(state_dict)
opt = Adam(model.parameters(), lr=lr, betas=(0.9,0.999), eps=1e-8, weight_decay = args.adam_weight_decay)
opt_name = 'Adam'
# opt = SGD(model.parameters(), lr=lr)
# opt_name = 'SGD'
for ep in range(num_epochs):
for i in range(dataloader.batch_num):
start_time = time.time()
batch_data = dataloader.get_batch(i)
input_ids = torch.LongTensor(batch_data['input_ids']).to(device)
token_type_ids = torch.LongTensor(batch_data['token_type_ids']).to(device)
attention_mask = torch.LongTensor(batch_data['attention_mask']).to(device)
label_ids = torch.LongTensor(batch_data['label_ids']).to(device)
model.train()
opt.zero_grad()
loss, logits = model(input_ids, token_type_ids, attention_mask, label_ids)
loss.backward()
opt.step()
loss_out = loss.item()
pred = logits.argmax(dim=1)
acc = torch.eq(pred, label_ids).float().mean().item()
end_time = time.time()
print('[Epoch %d] (Iteration %d): Loss = %.3f, Accuracy = %.4f Time = %.3f'%(ep,i,loss_out, acc, end_time-start_time))
# # validate model on dev set
# acc_list=[]
# for i in range(dataloader_dev.batch_num):
# batch_data = dataloader_dev.get_batch(i)
# input_ids = torch.LongTensor(batch_data['input_ids']).to(device)
# token_type_ids = torch.LongTensor(batch_data['token_type_ids']).to(device)
# attention_mask = torch.LongTensor(batch_data['attention_mask']).to(device)
# label_ids = torch.LongTensor(batch_data['label_ids']).to(device)
# model.eval()
# start_time = time.time()
# loss, logits = model(input_ids, token_type_ids, attention_mask, label_ids)
# end_time = time.time()
# loss_out = loss.item()
# pred = logits.argmax(dim=1)
# acc = torch.eq(pred, label_ids).float().mean().item()
# acc_list.append(acc)
# print('[Validate] (Iteration %d): Loss = %.3f, Accuracy = %.4f Time = %.3f'%(i,loss_out, acc, end_time-start_time))
# print('\tDev accuracy after epoch %d is %.4f'%(ep, np.mean(np.array(acc_list))))
save_path = './finetuned_params/pytorch_finetuned_params/%s/'%start_model
# save_path = './finetuned_params/pytorch_finetuned_params_adam_pretrain/%s/'%start_model
save_file = '%s_epoch_%d_%s.params'%(task_name,ep,opt_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(model.state_dict(), save_path+save_file)
print('Saved model to %s.'%(save_path+save_file))
def validate(args):
cuda_condition = torch.cuda.is_available()
device = torch.device("cuda:%d"%args.gpu_id if cuda_condition else "cpu")
task_name = args.task_name
if task_name in ['sst-2','cola', 'mrpc']:
num_labels = 2
elif task_name in ['mnli']:
num_labels = 3
config = BertConfig(vocab_size=args.vocab_size,
hidden_size=args.hidden_size,
num_hidden_layers=args.num_hidden_layers,
num_attention_heads=args.num_attention_heads,
intermediate_size=args.hidden_size*4,
max_position_embeddings=args.seq_length,
attention_probs_dropout_prob=0.0,
hidden_dropout_prob=0.0,
batch_size=args.train_batch_size,
hidden_act=args.hidden_act)
model = BertForSequenceClassification(config=config, num_labels=num_labels)
model.to(device)
dataloader = DataLoaderForGlue(task_name=task_name, batch_size = config.batch_size, datatype='dev')
start_model = 'random'
# start_model = 'pytorch_official'
# load_ep, load_i =2, 145000
# start_model = 'pytorch_ep%d_iter%d'%(load_ep, load_i)
load_finetune_ep = 3
opt_name = 'Adam'
# opt_name = 'SGD'
save_path = './finetuned_params/pytorch_finetuned_params/%s/'%start_model
#save_path = './finetuned_params/pytorch_finetuned_params_adam_pretrain/%s/'%start_model
save_file = '%s_epoch_%d_%s.params'%(task_name,load_finetune_ep,opt_name)
state_dict = torch.load(save_path+save_file, map_location='cpu' if not torch.cuda.is_available() else None)
model.load_state_dict(state_dict)
# validate model on dev set
acc_list=[]
for i in range(dataloader.batch_num):
start_time = time.time()
batch_data = dataloader.get_batch(i)
input_ids = torch.LongTensor(batch_data['input_ids']).to(device)
token_type_ids = torch.LongTensor(batch_data['token_type_ids']).to(device)
attention_mask = torch.LongTensor(batch_data['attention_mask']).to(device)
label_ids = torch.LongTensor(batch_data['label_ids']).to(device)
model.eval()
loss, logits = model(input_ids, token_type_ids, attention_mask, label_ids)
loss_out = loss.item()
pred = logits.argmax(dim=1)
acc = torch.eq(pred, label_ids).float().mean().item()
acc_list.append(acc)
end_time = time.time()
print('[Validate] (Iteration %d): Loss = %.3f, Accuracy = %.4f Time = %.3f'%(i,loss_out, acc, end_time-start_time))
print('\tDev accuracy after epoch %d is %.4f'%(load_finetune_ep, np.mean(np.array(acc_list))))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--gpu_id', type=int, default=0, help='Id of GPU to run.'
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Training batch size"
)
parser.add_argument(
"--task_name", type=str, default='sst-2', help="Glue task to finetune."
)
parser.add_argument(
"--vocab_size", type=int, default=30522, help="Total number of vocab"
)
parser.add_argument(
"--hidden_size", type=int, default=768, help="Hidden size of transformer model",
)
parser.add_argument(
"--num_hidden_layers", type=int, default=12, help="Number of layers"
)
parser.add_argument(
"-a",
"--num_attention_heads",
type=int,
default=12,
help="Number of attention heads",
)
parser.add_argument(
"-s", "--seq_length", type=int, default=128, help="Maximum sequence len"
)
parser.add_argument("-e", "--epochs", type=int,
default=10, help="Number of epochs")
parser.add_argument("--lr", type=float, default=1e-5,
help="Learning rate of adam")
parser.add_argument(
"--adam_weight_decay", type=float, default=0.01, help="Weight_decay of adam"
)
parser.add_argument(
"--hidden_act", type=str, default='gelu', help="Hidden activation to use."
)
parser.add_argument(
"--dropout_prob", type=float, default=0.1, help="Dropout rate."
)
args = parser.parse_args()
finetune(args)
#validate(args)
| 2.1875
| 2
|
config.py
|
Johnsoneer/The-Quote-Book
| 0
|
12780689
|
from app.env import env
'''
Uses environment variables to secure secrets whilst giving our server the ability to
access certain key parameters for things like emailing users managing csrf tokens.
'''
class Config(object):
SECRET_KEY = env.SECRET_KEY
DATABASE_USER = env.DATABASE_USER
DATABASE_PASSWORD = env.DATABASE_PASSWORD
SQLALCHEMY_DATABASE_URI = 'postgresql://{username}:{password}@quot<EMAIL>:5432/postgres'.format(username = DATABASE_USER, password = DATABASE_PASSWORD)
SQLALCHEMY_TRACK_MODIFICATIONS = False
QUOTES_PER_PAGE = 6
MAIL_SERVER = env.MAIL_SERVER
MAIL_PORT = int(env.MAIL_PORT or 25)
MAIL_USE_TLS = env.MAIL_USE_TLS is not None
MAIL_USERNAME = env.MAIL_USERNAME
MAIL_PASSWORD = env.MAIL_PASSWORD
ADMINS = ['<EMAIL>']
| 2.546875
| 3
|
index.py
|
n-schilling/datadog-synthetic-scheduler
| 1
|
12780690
|
import json
import logging
import os
import sys
import boto3
import urllib3
urllib3.disable_warnings()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
http_pool = urllib3.PoolManager()
secretsmanager_client = boto3.client('secretsmanager')
def changeSyntheticStatus(new_status):
logger.info(f"Start changing Datadog Synthetic status to {new_status}")
try:
datadog_secret_name = os.getenv('datadogSecretName', 'Datadog_API_Key')
except:
logger.error("One of the environmet variable is missing")
raise
try:
get_secret_value_response = secretsmanager_client.get_secret_value(
SecretId=datadog_secret_name
)
if 'SecretString' in get_secret_value_response:
secret_value_str = get_secret_value_response['SecretString']
else:
logger.error(
f"Could not extract secret {datadog_secret_name} from Secrets Manager")
raise
secret_value = json.loads(secret_value_str)
dd_api_key = secret_value['datadog']['api_key']
dd_app_key = secret_value['datadog']['app_key']
except:
logger.error(
"There was an error while getting the parameter from the parameter store")
raise
synthetic_public_id = os.getenv('syntheticPublicId')
datadog_api_endpoint = os.getenv('datadogApiEndpoint')
datadog_endpoint_url = datadog_api_endpoint + \
'synthetics/tests/' + synthetic_public_id + '/status'
logger.info(
f"Changing status to {new_status} for Datadog Synthetic with ID {synthetic_public_id} against endpoint {datadog_endpoint_url}")
body_json = json.dumps({
"new_status": new_status,
})
put_response = http_pool.request('PUT', datadog_endpoint_url,
headers={
'Content-Type': 'application/json',
'DD-API-KEY': dd_api_key,
'DD-APPLICATION-KEY': dd_app_key
},
body=body_json)
if (put_response.status) != 200:
logger.error(
f"HTTP Call to change the status of Datadog Synthetic {synthetic_public_id} to {new_status} failed.")
logger.error(f"HTTP status is {put_response.status}")
raise
else:
decoded_response = json.loads(put_response.data.decode('utf-8'))
if decoded_response: # HTTP response is either true or false
logger.info(
f"Status of Datadog Synthetic {synthetic_public_id} was successfully changed to {new_status}")
else:
logger.error(
f"HTTP Call was successfull but the status of Datadog Synthetic {synthetic_public_id} was NOT changed to {new_status}. Response was {decoded_response}")
raise
def handler(event, context):
logger.info("Start with Datadog Synthetic Scheduler")
try:
synthetic_set_status = event['syntheticSetStatus']
except:
logger.error("Could not extract Synthetic destination status from event")
raise
changeSyntheticStatus(synthetic_set_status)
logger.info("End of Datadog Synthetic Scheduler")
if __name__ == "__main__":
handler(0, 0)
| 2.078125
| 2
|
orders/migrations/0007_auto_20191223_1502.py
|
yun-mh/uniwalk
| 0
|
12780691
|
# Generated by Django 2.2.5 on 2019-12-23 06:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_auto_20191222_2318'),
]
operations = [
migrations.AlterField(
model_name='order',
name='order_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='注文日時'),
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product', models.CharField(max_length=250)),
('quantity', models.IntegerField()),
('price', models.IntegerField()),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Order')),
],
),
]
| 1.640625
| 2
|
python/test/lib/sibra/state/bandwidth_test.py
|
cschutijser/scion
| 1
|
12780692
|
<gh_stars>1-10
# Copyright 2016 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`bandwidth_test` --- lib.sibra.state.bandwidth unit tests
==============================================================
"""
# External packages
import nose
import nose.tools as ntools
# SCION
from lib.sibra.state.bandwidth import LinkBandwidth
from lib.sibra.util import BWSnapshot
class TestLinkBandwidthUpdate(object):
"""
Unit tests for lib.sibra.state.bandwidth.LinkBandwidth.update
Note: these tests do not mock out BWSnapshot, as it would make testing too
complex to be useful.
"""
def test(self):
inst = LinkBandwidth("owner", BWSnapshot(100, 100))
for i, bw in enumerate([50, 0, -10, -20, 0, 0, -20]):
inst.resvs[i] = BWSnapshot(bw, bw)
updates = []
for idx, bw in [(0, -10), (1, -10), (2, +10), (6, 10)]:
updates.append((idx, BWSnapshot(bw, bw)))
# Call
inst.update(updates)
# Tests
for i, bw in enumerate([40, -10, 0, -20, 0, 0, -10]):
tick = BWSnapshot(bw, bw)
ntools.eq_(inst.resvs[i], tick)
if __name__ == "__main__":
nose.run(defaultTest=__name__)
| 1.960938
| 2
|
napalm_eltex/__init__.py
|
noc-uac-ssc/napalm-eltex
| 1
|
12780693
|
"""napalm-eltex package."""
from napalm_eltex.eltex import CEDriver
__all__ = ('CEDriver',)
| 1.046875
| 1
|
test/unit/test_arn.py
|
robin-aws/aws-encryption-sdk-python
| 0
|
12780694
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test suite for aws_encryption_sdk.internal.arn functions."""
import pytest
from aws_encryption_sdk.exceptions import MalformedArnError
from aws_encryption_sdk.internal.arn import Arn
class TestArn(object):
def test_malformed_arn_missing_arn(self):
arn = "aws:kms:us-east-1:222222222222:key/aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_malformed_arn_missing_service(self):
arn = "aws:us-east-1:222222222222:key/aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_malformed_arn_missing_region(self):
arn = "arn:aws:222222222222:key/aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_malformed_arn_missing_account(self):
arn = "arn:aws:us-east-1:key/aaaaaaaa-1111-2222-<KEY>"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_malformed_arn_missing_resource_type(self):
arn = "arn:aws:us-east-1:222222222222"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_parse_key_arn_success(self):
arn_str = "arn:aws:kms:us-east-1:222222222222:key/aaaaaaaa-1111-2222-3333-<KEY>"
arn = Arn.from_str(arn_str)
assert arn.partition == "aws"
assert arn.service == "kms"
assert arn.region == "us-east-1"
assert arn.account_id == "222222222222"
assert arn.resource_type == "key"
assert arn.resource_id == "aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"
def test_parse_alias_arn_success(self):
arn_str = "arn:aws:kms:us-east-1:222222222222:alias/aws/service"
arn = Arn.from_str(arn_str)
assert arn.partition == "aws"
assert arn.service == "kms"
assert arn.region == "us-east-1"
assert arn.account_id == "222222222222"
assert arn.resource_type == "alias"
assert arn.resource_id == "aws/service"
| 2.109375
| 2
|
myapp/models.py
|
tainanpopo/Django-PostgreSQL
| 0
|
12780695
|
from django.db import models
# Create your models here.
class Student(models.Model):
name = models.CharField(max_length = 20, null = False)#建立字串長度最大為20,且欄位不可空白。
sex = models.CharField(max_length = 2, default = 'M', null = False)
birthday = models.DateField(null = False)
phone = models.CharField(max_length = 20, null = False)
class Meta:
db_table = "student"
| 2.78125
| 3
|
meg_runtime/ui/manager.py
|
ibedard16/Runtime
| 0
|
12780696
|
<gh_stars>0
"""MEG UI Manager
"""
from PyQt5 import QtWidgets, uic
from os.path import dirname
import pkg_resources
import sys
from meg_runtime.ui.mainmenupanel import MainMenuPanel
from meg_runtime.ui.clonepanel import ClonePanel
from meg_runtime.ui.repopanel import RepoPanel
from meg_runtime.logger import Logger
class UIManager(QtWidgets.QStackedWidget):
"""Main UI manager for the MEG system."""
PANELS = [
ClonePanel,
MainMenuPanel,
RepoPanel,
]
def __init__(self, **kwargs):
"""UI manager constructor."""
super().__init__(**kwargs)
for panel in self.PANELS:
self.addWidget(panel(self))
self.change_view(MainMenuPanel)
def open_clone_panel(self):
""""Download" or clone a project."""
# TODO
self.change_view(ClonePanel)
def clone(self):
"""Clone a repository."""
# TODO
self.change_view(RepoPanel)
def return_to_main_menu(self):
"""Return to the main menu screen"""
self.change_view(MainMenuPanel)
def change_view(self, panel):
"""Change the current panel being viewed. """
self.setCurrentIndex(self.PANELS.index(panel))
# TODO: Add more menu opening/closing methods here
def ui_run(**kwargs):
"""Start the UI loop."""
app = QtWidgets.QApplication([])
manager = UIManager(**kwargs)
manager.show()
return app.exec_()
| 2.34375
| 2
|
mathutils/gram.py
|
saridut/FloriPy
| 0
|
12780697
|
#!/usr/bin/env python
'''This module contains routines to perform Gram-Schmidt orthonormalization on
a sequence of vectors.
'''
import numpy as np
import numpy.linalg as la
def gso(A, overwrite=False, out=None):
'''Performs Gram-Schmidt orthonormalization on a sequence of vectors.
Parameters
----------
A : ndarray
(M x N) ndarray with M <= N. The rows of A contain the sequence of
vectors.
overwrite : bool, optional
If `True`, the matrix A is overwritten.
out : ndarray, optional
(M x N) ndarray with M <= N. The rows of `out` contain the sequence of
orthonormal vectors. If `overwrite = True`, `out` is neglected.
Returns
-------
output : ndarray
(M x N) ndarray with M <= N. The rows of `out` contain the sequence of
orthonormal vectors.
Notes
-----
See <NAME> <NAME>, Matrix Computations, 3rd edition, Section 5.2.8,
Algorithm 5.2.5, p. 231.
'''
assert A.shape[0] <= A.shape[1]
M = A.shape[0]
if overwrite:
output = A
else:
if out is not None:
output = out
else:
output = np.zeros_like(A)
output[:,:] = A
for i in range(M):
output[i,:] = output[i,:]/la.norm(output[i,:])
for j in range(i+1, M):
output[j,:] = output[j,:] - np.dot(output[j,:], output[i,:])*output[i,:]
return output
if __name__ == '__main__':
A = np.random.random((6,6))
print('A')
print(A)
out = gso(A)
print('\n')
print(out)
print('\n')
print(np.dot(out.T, out))
for i in range(A.shape[0]):
for j in range(A.shape[0]):
print(i, j, np.dot(out[i,:], out[j,:]))
print('\n')
| 3.671875
| 4
|
src/train_spp.py
|
atreyasha/spp-explainability
| 1
|
12780698
|
<reponame>atreyasha/spp-explainability<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from tqdm import tqdm
from glob import glob
from math import ceil
from functools import partial
from collections import OrderedDict
from tensorboardX import SummaryWriter
from torch.optim import Adam
from torch.nn import NLLLoss, Module, Embedding
from torch.nn.functional import log_softmax
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.metrics import accuracy_score
from sklearn.model_selection import ParameterGrid
from typing import List, Union, Tuple, cast, Any, Callable, Optional
from .utils.parser_utils import ArgparseFormatter
from .utils.data_utils import (vocab_from_text, read_labels, read_docs,
read_embeddings, Vocab, PAD_TOKEN_INDEX)
from .utils.model_utils import (shuffled_chunked_sorted, chunked_sorted,
to_cuda, enable_gradient_clipping, timestamp,
Batch, Semiring, LogSpaceMaxProductSemiring,
MaxSumSemiring)
from .utils.logging_utils import (stdout_root_logger, add_file_handler,
remove_all_file_handlers)
from .arg_parser import (spp_arg_parser, train_arg_parser, logging_arg_parser,
tqdm_arg_parser, hardware_arg_parser,
grid_train_arg_parser)
from .torch_module_spp import SoftPatternClassifier
import numpy as np
import argparse
import logging
import signal
import torch
import copy
import json
import sys
import os
# get root LOGGER in case script is called by another
LOGGER = logging.getLogger(__name__)
# define exit-codes
FINISHED_EPOCHS = 0
PATIENCE_REACHED = 1
INTERRUPTION = 2
def signal_handler(filename: str, *args):
save_exit_code(filename, INTERRUPTION)
sys.exit()
def save_exit_code(filename: str, code: int) -> None:
with open(filename, "w") as output_file_stream:
output_file_stream.write("%s\n" % code)
def get_exit_code(filename: str) -> int:
with open(filename, "r") as input_file_stream:
exit_code = int(input_file_stream.readline().strip())
return exit_code
def parse_configs_to_args(args: argparse.Namespace,
prefix: str = "",
training: bool = True) -> argparse.Namespace:
# make copy of existing argument namespace
args = copy.deepcopy(args)
# check for json configs and add them to list
json_files = []
json_files.append(
os.path.join(args.model_log_directory, prefix + "model_config.json"))
if training:
json_files.append(
os.path.join(args.model_log_directory,
prefix + "training_config.json"))
# raise error if any of them are missing
for json_file in json_files:
if not os.path.exists(json_file):
raise FileNotFoundError("File not found: %s" % json_file)
# update argument namespace with information from json files
for json_file in json_files:
with open(json_file, "r") as input_file_stream:
args.__dict__.update(json.load(input_file_stream))
return args
def set_hardware(args: argparse.Namespace) -> Optional[torch.device]:
# set torch number of threads
if args.torch_num_threads is None:
LOGGER.info("Using default number of CPU threads: %s" %
torch.get_num_threads())
else:
torch.set_num_threads(args.torch_num_threads)
LOGGER.info("Using specified number of CPU threads: %s" %
args.torch_num_threads)
# specify gpu device if relevant
if args.gpu:
gpu_device: Optional[torch.device]
gpu_device = torch.device(args.gpu_device)
LOGGER.info("Using GPU device: %s" % args.gpu_device)
else:
gpu_device = None
LOGGER.info("Using CPU device")
# return device
return gpu_device
def get_grid_config(args: argparse.Namespace) -> dict:
with open(args.grid_config, "r") as input_file_stream:
grid_dict = json.load(input_file_stream)
return grid_dict
def get_grid_args_superset(
args: argparse.Namespace,
param_grid_mapping: dict) -> List[argparse.Namespace]:
args_superset = []
# ensure param_grid_mapping keys are integers
param_grid_mapping = {
int(key): value
for key, value in param_grid_mapping.items()
}
for i in sorted(param_grid_mapping.keys()):
param_grid_instance = param_grid_mapping[i]
args_copy = copy.deepcopy(args)
for key in param_grid_instance:
if key in args.__dict__:
setattr(args_copy, key, param_grid_instance[key])
args_superset.append(args_copy)
return args_superset
def get_pattern_specs(args: argparse.Namespace) -> 'OrderedDict[int, int]':
# convert pattern_specs string in OrderedDict
pattern_specs: 'OrderedDict[int, int]' = OrderedDict(
sorted(
(
[int(y) for y in x.split("-")] # type: ignore
for x in args.patterns.split("_")),
key=lambda t: t[0]))
# log diagnositc information on input patterns
LOGGER.info("Patterns: %s" % pattern_specs)
# return final objects
return pattern_specs
def set_random_seed(args: argparse.Namespace) -> None:
# set global random seed if specified
np.random.seed(args.seed)
torch.manual_seed(args.seed)
def get_vocab(args: argparse.Namespace) -> Vocab:
# read valid and train vocabularies
train_vocab = vocab_from_text(args.train_data)
LOGGER.info("Training vocabulary size: %s" % len(train_vocab))
valid_vocab = vocab_from_text(args.valid_data)
LOGGER.info("Validation vocabulary size: %s" % len(valid_vocab))
# combine valid and train vocabularies into combined object
vocab_combined = valid_vocab | train_vocab
LOGGER.info("Combined vocabulary size: %s" % len(vocab_combined))
# return final Vocab object
return vocab_combined
def get_embeddings(args: argparse.Namespace,
vocab_combined: Vocab) -> Tuple[Vocab, torch.Tensor, int]:
# read embeddings file and output intersected vocab
vocab, embeddings, word_dim = read_embeddings(args.embeddings,
vocab_combined)
# convert embeddings to torch FloatTensor
embeddings = np.vstack(embeddings).astype(np.float32)
embeddings = torch.from_numpy(embeddings)
# return final tuple
return vocab, embeddings, word_dim
def get_vocab_diagnostics(vocab: Vocab, vocab_combined: Vocab,
word_dim: int) -> None:
# show output of tokens lost during vocabulary extraction
missing = [
token for token in vocab_combined.names if token not in vocab.names
]
LOGGER.info("GloVe embedding dimensions: %s" % word_dim)
LOGGER.info("GloVe-intersected vocabulary size: %s" % len(vocab))
LOGGER.info("Number of tokens not found in GloVe vocabulary: %s" %
len(missing))
LOGGER.info("Lost tokens: %s" % missing)
def get_train_valid_data(
args: argparse.Namespace, vocab: Vocab
) -> Tuple[List[List[str]], List[List[str]], List[Tuple[List[int], int]],
List[Tuple[List[int], int]], int]:
# read train data
train_input, train_text = read_docs(args.train_data, vocab)
LOGGER.info("Sample training text: %s" % train_text[:10])
train_input = cast(List[List[int]], train_input)
train_text = cast(List[List[str]], train_text)
train_labels = read_labels(args.train_labels)
num_classes = len(set(train_labels))
train_data = list(zip(train_input, train_labels))
# read validation data
valid_input, valid_text = read_docs(args.valid_data, vocab)
LOGGER.info("Sample validation text: %s" % valid_text[:10])
valid_input = cast(List[List[int]], valid_input)
valid_text = cast(List[List[str]], valid_text)
valid_labels = read_labels(args.valid_labels)
valid_data = list(zip(valid_input, valid_labels))
# truncate data if necessary
if args.max_train_instances is not None:
train_data = train_data[:args.max_train_instances]
valid_data = valid_data[:args.max_train_instances]
train_text = train_text[:args.max_train_instances]
valid_text = valid_text[:args.max_train_instances]
# log diagnostic information
LOGGER.info("Number of classes: %s" % num_classes)
LOGGER.info("Training instances: %s" % len(train_data))
LOGGER.info("Validation instances: %s" % len(valid_data))
# return final tuple object
return train_text, valid_text, train_data, valid_data, num_classes
def get_semiring(args: argparse.Namespace) -> Semiring:
# define semiring as per argument provided and log
if args.semiring == "MaxSumSemiring":
semiring = MaxSumSemiring
elif args.semiring == "MaxProductSemiring":
semiring = LogSpaceMaxProductSemiring
LOGGER.info("Semiring: %s" % args.semiring)
return semiring
def dump_configs(args: argparse.Namespace,
model_log_directory: str,
prefix: str = "") -> None:
# create dictionaries to fill up
spp_args_dict = {}
train_args_dict = {}
# extract real arguments and fill up model dictionary
for action in spp_arg_parser()._actions:
spp_args_dict[action.dest] = getattr(args, action.dest)
# extract real arguments and fill up training dictionary
for action in train_arg_parser()._actions:
train_args_dict[action.dest] = getattr(args, action.dest)
# dump soft patterns model arguments for posterity
with open(os.path.join(model_log_directory, prefix + "model_config.json"),
"w") as output_file_stream:
json.dump(spp_args_dict, output_file_stream, ensure_ascii=False)
# dump training arguments for posterity
with open(
os.path.join(model_log_directory, prefix + "training_config.json"),
"w") as output_file_stream:
json.dump(train_args_dict, output_file_stream, ensure_ascii=False)
def save_checkpoint(epoch: int, update: int, samples_seen: int,
model: torch.nn.Module, optimizer: torch.optim.Optimizer,
scheduler: Optional[ReduceLROnPlateau],
numpy_epoch_random_state: Tuple, train_loss: float,
best_valid_loss: float, best_valid_loss_index: int,
best_valid_acc: float, filename: str) -> None:
torch.save(
{
"epoch":
epoch,
"update":
update,
"samples_seen":
samples_seen,
"model_state_dict":
model.state_dict(),
"optimizer_state_dict":
optimizer.state_dict(),
"scheduler_state_dict":
scheduler.state_dict() if scheduler is not None else None,
"train_loss":
train_loss,
"best_valid_loss":
best_valid_loss,
"best_valid_loss_index":
best_valid_loss_index,
"best_valid_acc":
best_valid_acc,
"numpy_epoch_random_state":
numpy_epoch_random_state,
"numpy_last_random_state":
np.random.get_state(),
"torch_last_random_state":
torch.random.get_rng_state()
}, filename)
def train_batch(model: Module,
batch: Batch,
num_classes: int,
gold: List[int],
optimizer: torch.optim.Optimizer,
loss_function: torch.nn.modules.loss._Loss,
gpu_device: Optional[torch.device] = None) -> torch.Tensor:
# set optimizer gradients to zero
optimizer.zero_grad()
# compute model loss
loss = compute_loss(model, batch, num_classes, gold, loss_function,
gpu_device)
# compute loss gradients for all parameters
loss.backward()
# perform a single optimization step
optimizer.step()
# detach loss from computational graph and return tensor data
return loss.detach()
def compute_loss(model: Module, batch: Batch, num_classes: int,
gold: List[int], loss_function: torch.nn.modules.loss._Loss,
gpu_device: Optional[torch.device]) -> torch.Tensor:
# compute model outputs given batch
output = model.forward(batch)
# return loss over output and gold
return loss_function(
log_softmax(output, dim=1).view(batch.size(), num_classes),
to_cuda(gpu_device)(torch.LongTensor(gold)))
def evaluate_metric(model: Module,
data: List[Tuple[List[int], int]],
batch_size: int,
gpu_device: Optional[torch.device],
metric: Callable[[List[int], List[int]], Any],
max_doc_len: Optional[int] = None) -> Any:
# instantiate local storage variable
predicted = []
aggregate_gold = []
# chunk data into sorted batches and iterate
for batch in chunked_sorted(data, batch_size):
# create batch and parse gold output
batch, gold = Batch( # type: ignore
[x for x, y in batch],
model.embeddings, # type: ignore
to_cuda(gpu_device),
0.,
max_doc_len), [y for x, y in batch]
# get raw output using model
output = model.forward(batch) # type: ignore
# get predicted classes from raw output
predicted.extend(torch.argmax(output, 1).tolist())
aggregate_gold.extend(gold)
# return output of metric
return metric(aggregate_gold, predicted)
def train_inner(train_data: List[Tuple[List[int], int]],
valid_data: List[Tuple[List[int], int]],
model: Module,
num_classes: int,
epochs: int,
evaluation_period: int,
only_epoch_eval: bool,
model_log_directory: str,
learning_rate: float,
batch_size: int,
disable_scheduler: bool = False,
scheduler_patience: int = 10,
scheduler_factor: float = 0.1,
gpu_device: Optional[torch.device] = None,
clip_threshold: Optional[float] = None,
max_doc_len: Optional[int] = None,
word_dropout: float = 0,
patience: int = 30,
resume_training: bool = False,
disable_tqdm: bool = False,
tqdm_update_period: int = 1) -> None:
# create signal handlers in case script receives termination signals
# adapted from: https://stackoverflow.com/a/31709094
for specific_signal in [
signal.SIGINT, signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT
]:
signal.signal(
specific_signal,
partial(signal_handler,
os.path.join(model_log_directory, "exit_code")))
# initialize general local variables
updates_per_epoch = ceil(len(train_data) / batch_size)
patience_reached = False
# load model checkpoint if training is being resumed
if resume_training and len(
glob(os.path.join(model_log_directory, "*last*.pt"))) > 0:
model_checkpoint = torch.load(glob(
os.path.join(model_log_directory, "*last*.pt"))[0],
map_location=torch.device("cpu"))
model.load_state_dict(
model_checkpoint["model_state_dict"]) # type: ignore
if (model_checkpoint["update"] + # type: ignore
1) == updates_per_epoch: # type: ignore
current_epoch: int = model_checkpoint["epoch"] + 1 # type: ignore
current_update: int = 0
else:
current_epoch: int = model_checkpoint["epoch"] # type: ignore
current_update: int = model_checkpoint["update"] + 1 # type: ignore
best_valid_loss: float = model_checkpoint[ # type: ignore
"best_valid_loss"] # type: ignore
best_valid_loss_index: int = model_checkpoint[ # type: ignore
"best_valid_loss_index"] # type: ignore
best_valid_acc: float = model_checkpoint[ # type: ignore
"best_valid_acc"] # type: ignore
# check for edge-case failures
if current_epoch >= epochs:
# log information at the end of training
LOGGER.info("%s training epoch(s) previously completed, exiting" %
epochs)
# save exit-code and final processes
save_exit_code(os.path.join(model_log_directory, "exit_code"),
FINISHED_EPOCHS)
return None
elif best_valid_loss_index >= patience:
LOGGER.info("Patience threshold previously reached, exiting")
# save exit-code and final processes
save_exit_code(os.path.join(model_log_directory, "exit_code"),
PATIENCE_REACHED)
return None
else:
resume_training = False
current_epoch = 0
current_update = 0
best_valid_loss_index = 0
best_valid_loss = float("inf")
best_valid_acc = float("-inf")
# send model to correct device
if gpu_device is not None:
LOGGER.info("Transferring model to GPU device: %s" % gpu_device)
model.to(gpu_device)
# instantiate Adam optimizer
LOGGER.info("Initializing Adam optimizer with LR: %s" % learning_rate)
optimizer = Adam(model.parameters(), lr=learning_rate)
# load optimizer state dictionary
if resume_training:
optimizer.load_state_dict(
model_checkpoint["optimizer_state_dict"]) # type: ignore
# instantiate negative log-likelihood loss which is summed over batch
LOGGER.info("Using NLLLoss with sum reduction")
loss_function = NLLLoss(weight=None, reduction="sum")
# enable gradient clipping in-place if provided
if clip_threshold is not None and clip_threshold > 0:
LOGGER.info("Enabling gradient clipping with threshold: %s" %
clip_threshold)
enable_gradient_clipping(model, clip_threshold)
# initialize learning rate scheduler if relevant
if not disable_scheduler:
LOGGER.info(("Initializing learning rate scheduler with "
"factor=%s and patience=%s") %
(scheduler_factor, scheduler_patience))
scheduler: Optional[ReduceLROnPlateau]
scheduler = ReduceLROnPlateau(optimizer,
mode='min',
factor=scheduler_factor,
patience=scheduler_patience,
verbose=True)
if resume_training:
scheduler.load_state_dict(
model_checkpoint["scheduler_state_dict"]) # type: ignore
else:
scheduler = None
# initialize tensorboard writer if provided
LOGGER.info("Initializing tensorboard writer in directory: %s" %
os.path.join(model_log_directory, "events"))
writer = SummaryWriter(os.path.join(model_log_directory, "events"))
# set numpy and torch RNG back to previous states before training
if resume_training:
if current_update == 0:
np.random.set_state(
model_checkpoint["numpy_last_random_state"]) # type: ignore
else:
np.random.set_state(
model_checkpoint["numpy_epoch_random_state"]) # type: ignore
torch.random.set_rng_state(
model_checkpoint["torch_last_random_state"]) # type: ignore
# loop over epochs
for epoch in range(current_epoch, epochs):
# set model on train mode and enable autograd
model.train()
torch.autograd.set_grad_enabled(True)
# initialize loop variables
if resume_training and epoch == current_epoch and current_update != 0:
train_loss: Union[float,
torch.Tensor] = model_checkpoint[ # type: ignore
"train_loss"] # type: ignore
samples_seen: int = model_checkpoint[ # type: ignore
"samples_seen"] # type: ignore
else:
train_loss = 0.
samples_seen = 0
# cache numpy random state for model checkpoint
numpy_epoch_random_state = np.random.get_state()
# main training loop
LOGGER.info("Training SoPa++ model")
with tqdm(shuffled_chunked_sorted(train_data, batch_size),
position=0,
mininterval=0.05,
disable=disable_tqdm,
unit="batch",
desc="Training [Epoch %s/%s]" %
(epoch + 1, epochs)) as train_tqdm_batches:
# loop over train batches
for update, batch in enumerate(train_tqdm_batches):
# return to previous update and random state, if relevant
if (resume_training and epoch == current_epoch
and current_update != 0):
if update < current_update:
continue
elif update == current_update:
np.random.set_state(model_checkpoint[ # type: ignore
"numpy_last_random_state"]) # type: ignore
# create batch object and parse out gold labels
batch, gold = Batch(
[x[0] for x in batch],
model.embeddings, # type: ignore
to_cuda(gpu_device),
word_dropout,
max_doc_len), [x[1] for x in batch]
# find aggregate loss across samples in batch
train_batch_loss = train_batch(model, batch, num_classes, gold,
optimizer, loss_function,
gpu_device)
# add batch loss to train_loss
train_loss += train_batch_loss # type: ignore
# increment samples seen
samples_seen += batch.size()
# update tqdm progress bar
if (update + 1) % tqdm_update_period == 0 or (
update + 1) == len(train_tqdm_batches):
train_tqdm_batches.set_postfix(
batch_loss=train_batch_loss.item() / batch.size())
# start evaluation routine
if (not only_epoch_eval and (update + 1) % evaluation_period
== 0) or (update + 1) == len(train_tqdm_batches):
# update tqdm batches counter
train_tqdm_batches.update()
# set valid loss to zero
update_number = (epoch * updates_per_epoch) + (update + 1)
valid_loss: Union[float, torch.Tensor] = 0.
# set model on eval mode and disable autograd
model.eval()
torch.autograd.set_grad_enabled(False)
# compute mean train loss over updates and accuracy
# NOTE: mean_train_loss contains stochastic noise
LOGGER.info("Evaluating SoPa++ on training set")
train_loss = cast(torch.Tensor, train_loss)
mean_train_loss = train_loss.item() / samples_seen
train_acc = evaluate_metric(model, train_data, batch_size,
gpu_device, accuracy_score,
max_doc_len)
# add training loss data
writer.add_scalar("loss/train_loss", mean_train_loss,
update_number)
writer.add_scalar("accuracy/train_accuracy", train_acc,
update_number)
# add named parameter data
for name, param in model.named_parameters():
writer.add_scalar("parameter_mean/" + name,
param.detach().mean(), update_number)
writer.add_scalar("parameter_std/" + name,
param.detach().std(), update_number)
if param.grad is not None:
writer.add_scalar("gradient_mean/" + name,
param.grad.detach().mean(),
update_number)
writer.add_scalar("gradient_std/" + name,
param.grad.detach().std(),
update_number)
# loop over static valid set
LOGGER.info("Evaluating SoPa++ on validation set")
with tqdm(chunked_sorted(valid_data, batch_size),
position=0,
mininterval=0.05,
disable=disable_tqdm,
unit="batch",
desc="Validating [Epoch %s/%s] [Batch %s/%s]" %
(epoch + 1, epochs, update + 1,
updates_per_epoch)) as valid_tqdm_batches:
for valid_update, batch in enumerate(
valid_tqdm_batches):
# create batch object and parse out gold labels
batch, gold = Batch(
[x[0] for x in batch],
model.embeddings, # type: ignore
to_cuda(gpu_device),
0.,
max_doc_len), [x[1] for x in batch]
# find aggregate loss across valid samples in batch
valid_batch_loss = compute_loss(
model, batch, num_classes, gold, loss_function,
gpu_device)
# add batch loss to valid_loss
valid_loss += valid_batch_loss # type: ignore
if (valid_update +
1) % tqdm_update_period == 0 or (
valid_update +
1) == len(valid_tqdm_batches):
valid_tqdm_batches.set_postfix(
batch_loss=valid_batch_loss.item() /
batch.size())
# compute mean valid loss and accuracy
valid_loss = cast(torch.Tensor, valid_loss)
mean_valid_loss = valid_loss.item() / len(valid_data)
valid_acc = evaluate_metric(model, valid_data, batch_size,
gpu_device, accuracy_score,
max_doc_len)
# set model on train mode and enable autograd
model.train()
torch.autograd.set_grad_enabled(True)
# add valid loss data to tensorboard
writer.add_scalar("loss/valid_loss", mean_valid_loss,
update_number)
writer.add_scalar("accuracy/valid_accuracy", valid_acc,
update_number)
# log out report of current evaluation state
LOGGER.info("Epoch: {}/{}, Batch: {}/{}".format(
epoch + 1, epochs, (update + 1), updates_per_epoch))
LOGGER.info("Mean training loss: {:.3f}, "
"Training accuracy: {:.3f}%".format(
mean_train_loss, train_acc * 100))
LOGGER.info("Mean validation loss: {:.3f}, "
"Validation accuracy: {:.3f}%".format(
mean_valid_loss, valid_acc * 100))
# apply learning rate scheduler after evaluation
if scheduler is not None:
scheduler.step(valid_loss)
# check for loss improvement and save model if necessary
# optionally increment patience counter or stop training
# NOTE: loss values are summed over all data (not mean)
if valid_loss.item() < best_valid_loss:
# log information and update records
LOGGER.info("New best validation loss")
if valid_acc > best_valid_acc:
best_valid_acc = valid_acc
LOGGER.info("New best validation accuracy")
# update patience related diagnostics
best_valid_loss = valid_loss.item()
best_valid_loss_index = 0
LOGGER.info("Patience counter: %s/%s" %
(best_valid_loss_index, patience))
# find previous best checkpoint(s)
legacy_checkpoints = glob(
os.path.join(model_log_directory, "*_best_*.pt"))
# save new best checkpoint
model_save_file = os.path.join(
model_log_directory,
"spp_checkpoint_best_{}_{}.pt".format(
epoch, (update + 1)))
LOGGER.info("Saving best checkpoint: %s" %
model_save_file)
save_checkpoint(epoch, update, samples_seen, model,
optimizer, scheduler,
numpy_epoch_random_state,
train_loss.item(), best_valid_loss,
best_valid_loss_index, best_valid_acc,
model_save_file)
# delete previous best checkpoint(s)
for legacy_checkpoint in legacy_checkpoints:
os.remove(legacy_checkpoint)
else:
# update patience related diagnostics
best_valid_loss_index += 1
LOGGER.info("Patience counter: %s/%s" %
(best_valid_loss_index, patience))
# create hook to exit training if patience reached
if best_valid_loss_index == patience:
patience_reached = True
# find previous last checkpoint(s)
legacy_checkpoints = glob(
os.path.join(model_log_directory, "*_last_*.pt"))
# save latest checkpoint
model_save_file = os.path.join(
model_log_directory,
"spp_checkpoint_last_{}_{}.pt".format(
epoch, (update + 1)))
LOGGER.info("Saving last checkpoint: %s" % model_save_file)
save_checkpoint(epoch, update, samples_seen, model,
optimizer,
scheduler, numpy_epoch_random_state,
train_loss.item(), best_valid_loss,
best_valid_loss_index, best_valid_acc,
model_save_file)
# delete previous last checkpoint(s)
for legacy_checkpoint in legacy_checkpoints:
os.remove(legacy_checkpoint)
# hook to stop training in case patience was reached
# if it was reached strictly before last epoch and update
if patience_reached:
if not (epoch == max(range(epochs)) and
(update + 1) == len(train_tqdm_batches)):
LOGGER.info("Patience threshold reached, "
"stopping training")
# save exit-code and final processes
save_exit_code(
os.path.join(model_log_directory, "exit_code"),
PATIENCE_REACHED)
return None
# log information at the end of training
LOGGER.info("%s training epoch(s) completed, stopping training" % epochs)
# save exit-code and final processes
save_exit_code(os.path.join(model_log_directory, "exit_code"),
FINISHED_EPOCHS)
def train_outer(args: argparse.Namespace, resume_training=False) -> None:
# create model log directory
os.makedirs(args.model_log_directory, exist_ok=True)
# execute code while catching any errors
try:
# update LOGGER object with file handler
global LOGGER
add_file_handler(LOGGER,
os.path.join(args.model_log_directory, "session.log"))
# check for configs and exit codes, decide next steps conditionally
if resume_training:
try:
args = parse_configs_to_args(args)
exit_code_file = os.path.join(args.model_log_directory,
"exit_code")
if not os.path.exists(exit_code_file):
LOGGER.info(
"Exit-code file not found, continuing training")
else:
exit_code = get_exit_code(exit_code_file)
if exit_code == 0:
LOGGER.info(
("Exit-code 0: training epochs have already "
"been reached"))
return None
elif exit_code == 1:
LOGGER.info(
("Exit-code 1: patience threshold has already "
"been reached"))
return None
elif exit_code == 2:
LOGGER.info(
("Exit-code 2: interruption during previous "
"training, continuing training"))
except FileNotFoundError:
if args.grid_training:
resume_training = False
else:
raise
# log namespace arguments and model directory
LOGGER.info(args)
LOGGER.info("Model log directory: %s" % args.model_log_directory)
# set gpu and cpu hardware
gpu_device = set_hardware(args)
# get relevant patterns
pattern_specs = get_pattern_specs(args)
# set initial random seeds
set_random_seed(args)
# get input vocab
vocab_combined = get_vocab(args)
# get final vocab, embeddings and word_dim
vocab, embeddings, word_dim = get_embeddings(args, vocab_combined)
# add word_dim into arguments
args.word_dim = word_dim
# show vocabulary diagnostics
get_vocab_diagnostics(vocab, vocab_combined, word_dim)
# get embeddings as torch Module
embeddings = Embedding.from_pretrained(embeddings,
freeze=args.static_embeddings,
padding_idx=PAD_TOKEN_INDEX)
# get training and validation data
_, _, train_data, valid_data, num_classes = get_train_valid_data(
args, vocab)
# get semiring
semiring = get_semiring(args)
# create SoftPatternClassifier
model = SoftPatternClassifier(
pattern_specs,
num_classes,
embeddings, # type:ignore
vocab,
semiring,
args.tau_threshold,
args.no_wildcards,
args.bias_scale,
args.wildcard_scale,
args.dropout)
# log information about model
LOGGER.info("Model: %s" % model)
# print model diagnostics and dump files
LOGGER.info("Total model parameters: %s" %
sum(parameter.nelement()
for parameter in model.parameters()))
dump_configs(args, args.model_log_directory)
vocab.dump(args.model_log_directory)
# execute inner train function
train_inner(train_data, valid_data, model, num_classes, args.epochs,
args.evaluation_period, args.only_epoch_eval,
args.model_log_directory, args.learning_rate,
args.batch_size, args.disable_scheduler,
args.scheduler_patience, args.scheduler_factor, gpu_device,
args.clip_threshold, args.max_doc_len, args.word_dropout,
args.patience, resume_training, args.disable_tqdm,
args.tqdm_update_period)
finally:
# update LOGGER object to remove file handler
remove_all_file_handlers(LOGGER)
def main(args: argparse.Namespace) -> None:
# depending on training type, create appropriate argparse namespaces
if args.grid_training:
# redefine models log directory
args.models_directory = os.path.join(args.models_directory,
"spp_grid_train_" + timestamp())
os.makedirs(args.models_directory, exist_ok=True)
# dump current training configs
dump_configs(args, args.models_directory, "base_")
# get grid config and add random iterations to it
grid_dict = get_grid_config(args)
# add random seed into grid if necessary
if args.num_random_iterations > 1:
seed = list(range(0, args.num_random_iterations))
grid_dict["seed"] = seed
# dump parameter grid to file for re-use
param_grid_mapping = {
i: param_grid_instance
for i, param_grid_instance in enumerate(ParameterGrid(grid_dict))
}
with open(os.path.join(args.models_directory, "grid_config.json"),
"w") as output_file_stream:
json.dump(param_grid_mapping,
output_file_stream,
ensure_ascii=False)
# process new args superset
args_superset = get_grid_args_superset(args, param_grid_mapping)
else:
# make trivial superset
args_superset = [args]
# loop and train
for i, args in enumerate(args_superset):
args.model_log_directory = os.path.join(
args.models_directory, "spp_single_train_" +
timestamp() if not args.grid_training else "spp_single_train_" +
str(i))
train_outer(args, resume_training=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=ArgparseFormatter,
parents=[
train_arg_parser(),
grid_train_arg_parser(),
spp_arg_parser(),
hardware_arg_parser(),
logging_arg_parser(),
tqdm_arg_parser()
])
LOGGER = stdout_root_logger(
logging_arg_parser().parse_known_args()[0].logging_level)
main(parser.parse_args())
| 1.90625
| 2
|
mopack/builders/bfg9000.py
|
jimporter/mopack
| 0
|
12780699
|
import os
from . import Builder, BuilderOptions
from .. import types
from ..environment import get_cmd
from ..freezedried import FreezeDried
from ..log import LogFile
from ..path import pushd
from ..shell import ShellArguments
_known_install_types = ('prefix', 'exec-prefix', 'bindir', 'libdir',
'includedir')
@FreezeDried.fields(rehydrate={'extra_args': ShellArguments})
class Bfg9000Builder(Builder):
type = 'bfg9000'
_version = 1
_path_bases = ('srcdir', 'builddir')
class Options(BuilderOptions):
type = 'bfg9000'
_version = 1
@staticmethod
def upgrade(config, version):
return config
def __init__(self):
self.toolchain = types.Unset
def __call__(self, *, toolchain=types.Unset, config_file,
_symbols, _child_config=False):
if not _child_config and self.toolchain is types.Unset:
T = types.TypeCheck(locals(), _symbols)
config_dir = os.path.dirname(config_file)
T.toolchain(types.maybe_raw(types.path_string(config_dir)))
@staticmethod
def upgrade(config, version):
return config
def __init__(self, name, *, extra_args=None, submodules, **kwargs):
super().__init__(name, **kwargs)
T = types.TypeCheck(locals(), self._expr_symbols)
T.extra_args(types.shell_args(none_ok=True))
def set_usage(self, usage=None, **kwargs):
if usage is None:
usage = 'pkg_config'
super().set_usage(usage, **kwargs)
def _toolchain_args(self, toolchain):
return ['--toolchain', toolchain] if toolchain else []
def _install_args(self, deploy_paths):
args = []
for k, v in deploy_paths.items():
if k in _known_install_types:
args.extend(['--' + k, v])
return args
def build(self, pkgdir, srcdir):
builddir = self._builddir(pkgdir)
bfg9000 = get_cmd(self._common_options.env, 'BFG9000', 'bfg9000')
ninja = get_cmd(self._common_options.env, 'NINJA', 'ninja')
with LogFile.open(pkgdir, self.name) as logfile:
with pushd(srcdir):
logfile.check_call(
bfg9000 + ['configure', builddir] +
self._toolchain_args(self._this_options.toolchain) +
self._install_args(self._common_options.deploy_paths) +
self.extra_args.fill(srcdir=srcdir, builddir=builddir)
)
with pushd(builddir):
logfile.check_call(ninja)
def deploy(self, pkgdir, srcdir):
ninja = get_cmd(self._common_options.env, 'NINJA', 'ninja')
with LogFile.open(pkgdir, self.name, kind='deploy') as logfile:
with pushd(self._builddir(pkgdir)):
logfile.check_call(ninja + ['install'])
| 1.976563
| 2
|
LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/443_String_Compression.py
|
Sycamore-City-passerby/ML
| 0
|
12780700
|
<gh_stars>0
class Solution:
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
n = len(chars)
i , count = 0 , 1
for j in range(1,n+1):
if j < n and chars[j] == chars[j-1]:
count += 1
else:
chars[i] = chars[j-1]
i += 1
if count > 1:
for m in str(count):
chars[i] = m
i += 1
count = 1
return i
"""
Given an array of characters, compress it in-place.
The length after compression must always be smaller than or equal to the original array.
Every element of the array should be a character (not int) of length 1.
After you are done modifying the input array in-place, return the new length of the array.
Follow up:
Could you solve it using only O(1) extra space?
Example 1:
Input:
["a","a","b","b","c","c","c"]
Output:
Return 6, and the first 6 characters of the input array should be: ["a","2","b","2","c","3"]
Explanation:
"aa" is replaced by "a2". "bb" is replaced by "b2". "ccc" is replaced by "c3".
"""
| 3.796875
| 4
|
statsmodels/examples/tsa/ex_var_reorder.py
|
yarikoptic/statsmodels
| 20
|
12780701
|
from __future__ import print_function
import statsmodels.api as sm
from statsmodels.tsa.vector_ar.tests.test_var import TestVARResults
test_VAR = TestVARResults()
test_VAR.test_reorder()
| 1.304688
| 1
|
pypei/utils.py
|
dwu402/pyDD
| 2
|
12780702
|
<filename>pypei/utils.py
from matplotlib import dates as mdates
from matplotlib import pyplot as plt
def form_xmonths(ax: plt.Axes, dspec=r'1 %b %Y', majors=1, minors=15, mindspec=r'15 %b'):
"""Formats an pyplot axis with ticks and labels at the first of each Month"""
date_format = mdates.DateFormatter(dspec)
min_date_format = mdates.DateFormatter(mindspec)
major_format = mdates.DayLocator(majors)
minor_format = mdates.DayLocator(minors)
ax.xaxis.set_major_locator(major_format)
ax.xaxis.set_minor_locator(minor_format)
ax.xaxis.set_major_formatter(date_format)
ax.xaxis.set_minor_formatter(min_date_format)
| 3.0625
| 3
|
Main/migrations/0069_auto_20210502_1223.py
|
Muhammet-Yildiz/Ecommerce_Website-HepsiOrada
| 10
|
12780703
|
# Generated by Django 3.1.4 on 2021-05-02 09:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Main', '0068_remove_product_discount'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='advantage',
new_name='generalTitle',
),
]
| 1.53125
| 2
|
python/vision/error_functions.py
|
Oilgrim/ivs_sim
| 0
|
12780704
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 14:58:01 2017
@author: lracuna
"""
import autograd.numpy as np
def validation_points_error(Xi, Xo, Hestimated):
Xi = np.copy(Xi)
Xo = np.copy(Xo)
sum = 0
for i in range(Xo.shape[1]):
sum += geometric_distance(Xo[:,i],Xi[:,i],Hestimated)
return sum/Xo.shape[1]
def homography_matrix_error(Htrue, Hestimated):
return np.sqrt(np.sum((Htrue - Hestimated)**2))
def h_norm2d(x):
#Normalize points
for i in range(3):
x[i] = x[i]/x[2]
return x
def d(x1, x2):
return np.linalg.norm(h_norm2d(x1)-h_norm2d(x2))
def sym_transfer_error(Xo,Xi,H):
"""Symetric transfer error
Xo: Object points in 2D Homogeneous Coordinates (3xn)
Xi: Image points in 2D Homogeneous Coordinates (3xn)
"""
Xo = np.copy(Xo)
Xi = np.copy(Xi)
H = np.copy(H)
error1 = d(Xi,np.dot(H,Xo))
error2 = d(Xo,np.dot(np.linalg.inv(H),Xi))
return error1 + error2
def transfer_error(Xo,Xi,H):
"""transfer error including normalization
Xo: Object points in 2D Homogeneous Coordinates (3xn)
Xi: Image points in 2D Homogeneous Coordinates (3xn)
"""
Xo = np.copy(Xo)
Xi = np.copy(Xi)
H = np.copy(H)
return d(Xi,np.dot(H,Xo))
def algebraic_distance(Xo,Xi,H):
"""
Xi point measured in the image
Xo real value of the model point
H an estimated homography
as defined in Multiple View Geometry in Computer vision
"""
Xo = np.copy(Xo)
Xi = np.copy(Xi)
H = np.copy(H)
Xio = np.dot(H,Xo)
return (Xio[0]*Xi[2]-Xi[0]*Xio[2])**2 + (Xi[1]*Xio[2] - Xi[2]*Xio[1])**2
def geometric_distance(Xo,Xi,H):
"""
Xi point measured in the image
Xo real value of the model point
H an estimated homography
as defined in Multiple View Geometry in Computer vision
"""
Xo = np.copy(Xo)
Xi = np.copy(Xi)
H = np.copy(H)
Xio = np.dot(H,Xo)
return np.sqrt((Xi[0]/Xi[2] - Xio[0]/Xio[2])**2+(Xi[1]/Xi[2] - Xio[1]/Xio[2])**2)
def geometric_distance_points(Xo,Xi,H):
geom_distances = list()
for i in range(Xo.shape[1]):
geom_distances.append(geometric_distance(Xo[:,i],Xi[:,i],H))
return np.mean(geom_distances)
def volker_metric(A):
A = np.copy(A)
# nomarlize each row
#A = A/np.linalg.norm(A,axis=1, ord = 1, keepdims=True)
for i in range(A.shape[0]):
squared_sum = 0
for j in range(A.shape[1]):
squared_sum += np.sqrt(A[i,j]**2)
A[i,:] = A[i,:] / squared_sum
# compute the dot product
As = np.dot(A,A.T)
# we are interested only on the upper top triangular matrix coefficients
metric = 0
start = 1
for i in range(As.shape[0]):
for j in range(start,As.shape[0]):
metric = metric + As[i,j]**2
start = start +1
#An alternative would be to use only the coefficients which correspond
# to different points.
#metric = np.sqrt(np.sum(As[[0,2,4,6],[1,3,5,7]]**2))
#X vs X
#metric = np.sum(As[[0,0,0,2,2,4],[2,4,6,4,6,6]]**2)
#Y vs Y
#metric = metric + np.sum(As[[1,1,1,3,3,5],[3,5,7,5,7,7]]**2)
return metric
def calculate_A_matrix(Xo, Xi):
""" Calculate the A matrix for the DLT algorithm: A.H = 0
Inputs:
Xo: Object points in 3D Homogeneous Coordinates (3xn), Z coorinate removed
since the points should be on a plane
Xi: Image points in 2D Homogeneous Coordinates (3xn)
"""
Xo = np.copy(Xo)
Xi = np.copy(Xi)
Npts = Xo.shape[1]
A = np.zeros((2*Npts,9))
O = np.zeros(3)
for i in range(0, Npts):
X = Xo[:,i].T
u = Xi[0,i]
v = Xi[1,i]
w = Xi[2,i]
A[2*i,:] = np.array([O, -w*X, v*X]).reshape(1, 9)
A[2*i+1,:] = np.array([w*X, O, -u*X]).reshape(1, 9)
return A
def get_matrix_conditioning_number(M):
#return np.linalg.norm(M,2)*np.linalg.norm(np.linalg.pinv(M),2)
return np.linalg.cond(M)
def get_matrix_pnorm_condition_number(M):
#https://de.mathworks.com/help/symbolic/cond.html?requestedDomain=www.<EMAIL>
return np.linalg.norm(M,2)*np.linalg.norm(np.linalg.pinv(M),2)
def condition_number(A):
U, s, V = np.linalg.svd(A,full_matrices=False)
greatest_singular_value = s[0]
smallest_singular_value = s[-2]
return greatest_singular_value/smallest_singular_value
def rot_matrix_error(R0, R1, method = 'unit_quaternion_product'):
""" R0, R1 are 3x3 or 4x4 homogeneous Rotation matrixes
returns: the value of the error depending on the method """
if ((R0.shape != (4,4)) and (R0.shape != (3,3))):
print ("Error in the R0 input rotation matrix shape, must be 3x3 or 4x4")
print R0
return -1
if ((R1.shape != (4,4)) and (R1.shape != (3,3))):
print ("Error in the R1 input rotation matrix shape, must be 3x3 or 4x4")
print R1
return -1
if R0.shape == (3,3):
R = np.eye(4)
R[:3,:3] = R0
R0 = R
if R1.shape == (3,3):
R = np.eye(4)
R[:3,:3] = R1
R1 = R
if(method == 'unit_quaternion_product' ):
## From the paper "Metrics for 3D Rotations: Comparison and Analysis" <NAME>
# The 3D rotation error is computed using the inner product of unit quaterions
#We use the ros library TF to convert rotation matrix into unit quaternions
from tf import transformations
q0 = transformations.quaternion_from_matrix(R0)
q1 = transformations.quaternion_from_matrix(R1)
# We convert into unit quaternions
q0 = q0 / np.linalg.norm(q0)
q1 = q1 / np.linalg.norm(q1)
#Find the error as defined in the paper
rot_error = 1 - np.linalg.norm(np.dot(q0,q1))
if(method == 'angle'):
#option 2 find the angle of this rotation. In particular, the above is invalid
#for very large error angles (error > 90 degrees) and is imprecise for large
#error angles (angle > 45 degrees).
E = R1.dot(R0.T)
from cv2 import Rodrigues
rot_vector, J = Rodrigues(E[:3,:3])
angle = np.linalg.norm(rot_vector)
rot_error = np.rad2deg(angle)
return rot_error
def calc_estimated_pose_error(tvec_ref, rmat_ref, tvec_est, rmat_est):
# Translation error percentual
tvec_error = np.linalg.norm(tvec_est[:3] - tvec_ref[:3])/np.linalg.norm(tvec_ref[:3])*100.
#Rotation matrix error
rmat_error = rot_matrix_error(rmat_ref,rmat_est, method = 'angle')
return tvec_error, rmat_error
def low_upper_bound_homography_error(cam, ObjectPoints):
"""
param cam:
Camera object, has the current pose of the camera and the camera intrinsics.
param ObjectPoints:
Set of 4 different object points in 3D homogeneous coordinates (4x4)
"""
# We dont care about the Z coordinate
Xo = ObjectPoints[[0,1,3],:]
# Lets project the points using the simulated camera
imagePoints = np.array(cam.project(ObjectPoints, False))
# Calculate the A matrix of the homography
A_true = ef.calculate_A_matrix(Xo, imagePoints)
A_noisy_list = []
H_noisy_list = []
A_noisy_norm_list = []
H_noisy_norm_list = []
for i in range(1000):
imagePoints_noisy = cam.addnoise_imagePoints(imagePoints, mean = 0, sd = 4)
A_noisy = ef.calculate_A_matrix(Xo, imagePoints_noisy)
#DLT TRANSFORM
Xi = imagePoints_noisy
Hnoisy_dlt,_,_ = homo2d.homography2d(Xo,Xi)
Hnoisy_dlt = Hnoisy_dlt/Hnoisy_dlt[2,2]
A_noisy_list.append(A_noisy)
H_noisy_list.append(Hnoisy_dlt)
# WE calculate norms
A_noisy_norm_list.append(np.linalg.norm(A_noisy))
H_noisy_norm_list.append(np.linalg.norm(Hnoisy_dlt))
A_noise_mean = np.mean(A_noisy_norm_list, axis=0)
H_noisy_mean = np.mean(H_noisy_norm_list, axis=0)
i_max = A_noisy_norm_list.index(max(A_noisy_norm_list))
A_noise_mean = A_noisy_list[i_max]
H_noisy_mean = H_noisy_list[i_max]
# TRUE VALUE OF HOMOGRAPHY OBTAINED FROM CAMERA PARAMETERS
H_true = cam.homography_from_Rt()
H_noisy_norm = np.linalg.norm(H_noisy_mean)
H_true_norm = np.linalg.norm(H_true)
A_noisy_norm = np.linalg.norm(A_noise_mean)
A_true_norm = np.linalg.norm(A_true)
cond = condition_number(A_true)
rel_error = np.linalg.norm(H_true-H_noisy_mean)/H_true_norm
Upper = cond*(np.linalg.norm(A_true-A_noise_mean))/A_true_norm
e1 = np.dot(A_true,H_true.reshape(9,1))
e2 = np.dot(A_noise_mean,H_noisy_mean.reshape(9,1))
lower = np.linalg.norm(e1-e2)/(H_noisy_norm*A_noisy_norm)
return rel_error, lower, Upper
def lower_bound_homography_error(cam, ObjectPoints):
pass
| 2.828125
| 3
|
ordia/app/__init__.py
|
nyurik/ordia
| 0
|
12780705
|
<reponame>nyurik/ordia
"""app."""
from __future__ import absolute_import, division, print_function
from flask import Flask
from ..base import Base
def create_app(max_ids=None):
"""Create webapp.
Factory for webapp.
Returns
-------
app : flask.app.Flask
Flask app object.
"""
app = Flask(__name__)
from .views import main as main_blueprint
app.register_blueprint(main_blueprint)
app.base = Base(max_ids=max_ids)
return app
| 2.34375
| 2
|
caldp/sysexit.py
|
spacetelescope/caldp
| 2
|
12780706
|
"""This module defines context managers which are used to trap exceptions
and exit Python cleanly with specific exit_codes which are then seen as
the numerical exit status of the process and ultimately Batch job.
The exit_on_exception() context manager is used to bracket a block of code
by mapping all exceptions onto some log output and a call to sys.exit():
with exit_on_exception(exit_codes.SOME_CODE, "Parts of the ERROR", "message output", "on exception."):
... the code you're trapping to SOME_CODE when things go wrong ...
The exit_on_exception() manager also enables simulating errors by defining the
CALDP_SIMULATE_ERROR=N environment variable. When the manager is called with a
code matching CALDP_SIMULATE_ERROR, instead of running the code block it fakes
an exception by performing the corresponding log output and sys.exit() call. A
few error codes are simulated more directly, particularly memory errors.
The exit_receiver() manager is used to bracket the top level of your code,
nominally main(), and land the CaldpExit() exception raised by
exit_on_exception() after the stack has been unwound and cleanup functions
performed. exit_receiver() then exits Python with the error code originally
passed into exit_on_exception().
>>> from caldp import log
>>> log.set_test_mode()
>>> log.reset()
"""
import sys
import os
import contextlib
import traceback
import resource
import time
import random
from caldp import log
from caldp import exit_codes
# ==============================================================================
class CaldpExit(SystemExit):
"""Handle like SystemExit, but we definitely threw it."""
class SubprocessFailure(Exception):
"""A called subprocess failed and may require signal reporting.
In Python, a negative subprocess returncode indicates that the absolete
value of the returncode is a signal number which killed the subprocess.
For completeness, in Linux, the program exit_code is a byte value. If the
sign bit is set, a signal and/or core dump occurred. The byte reported as
exit_code may be unsigned. The lower bits of the returncode define either
the program's exit status or a signum identifying the signal which killed
the process.
"""
def __init__(self, returncode):
self.returncode = returncode
@contextlib.contextmanager
def exit_on_exception(exit_code, *args):
"""exit_on_exception is a context manager which issues an error message
based on *args and then does sys.exit(exit_code) if an exception is
raised within the corresponding "with block".
>>> with exit_on_exception(1, "As expected", "it did not fail."):
... print("do it.")
do it.
>>> try: #doctest: +ELLIPSIS
... with exit_on_exception(2, "As expected", "it failed."):
... raise Exception("It failed!")
... print("do it.")
... except SystemExit:
... log.divider()
... print("Trapping SystemExit normally caught by exit_reciever() at top level.")
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - As expected it failed.
ERROR - Traceback (most recent call last):
ERROR - File ".../sysexit.py", line ..., in exit_on_exception
ERROR - yield
ERROR - File "<doctest ...exit_on_exception[1]>", line ..., in <module>
ERROR - raise Exception("It failed!")
ERROR - Exception: It failed!
EXIT - CMDLINE_ERROR[2]: The program command line invocation was incorrect.
INFO - ---------------------------------------------------------------------------
Trapping SystemExit normally caught by exit_reciever() at top level.
Never printed 'do it.' SystemExit is caught for testing.
If CALDP_SIMULATE_ERROR is set to one of exit_codes, it will cause the
with exit_on_exception() block to act as if a failure has occurred:
>>> os.environ["CALDP_SIMULATE_ERROR"] = "2"
>>> try: #doctest: +ELLIPSIS
... with exit_on_exception(2, "As expected a failure was simulated"):
... print("should not see this")
... except SystemExit:
... pass
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - As expected a failure was simulated
ERROR - Traceback (most recent call last):
ERROR - File ".../sysexit.py", line ..., in exit_on_exception
ERROR - raise RuntimeError(f"Simulating error = {simulated_code}")
ERROR - RuntimeError: Simulating error = 2
EXIT - CMDLINE_ERROR[2]: The program command line invocation was incorrect.
>>> os.environ["CALDP_SIMULATE_ERROR"] = str(exit_codes.CALDP_MEMORY_ERROR)
>>> try: #doctest: +ELLIPSIS
... with exit_on_exception(2, "Memory errors don't have to match"):
... print("Oh unhappy day.")
... except SystemExit:
... pass
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - Memory errors don't have to match
ERROR - Traceback (most recent call last):
ERROR - File ".../sysexit.py", line ..., in exit_on_exception
ERROR - raise MemoryError("Simulated CALDP MemoryError.")
ERROR - MemoryError: Simulated CALDP MemoryError.
EXIT - CALDP_MEMORY_ERROR[32]: CALDP generated a Python MemoryError during processing or preview creation.
>>> os.environ["CALDP_SIMULATE_ERROR"] = str(exit_codes.OS_MEMORY_ERROR)
>>> try: #doctest: +ELLIPSIS
... with exit_on_exception(2, "Memory errors don't have to match"):
... print("Oh unhappy day.")
... except SystemExit:
... pass
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - Memory errors don't have to match
ERROR - Traceback (most recent call last):
ERROR - File ".../sysexit.py", line ..., in exit_on_exception
ERROR - raise OSError("Cannot allocate memory...")
ERROR - OSError: Cannot allocate memory...
EXIT - OS_MEMORY_ERROR[34]: Python raised OSError(Cannot allocate memory...), possibly fork failure.
>>> os.environ["CALDP_SIMULATE_ERROR"] = "999"
>>> with exit_on_exception(3, "Only matching error codes are simulated."):
... print("should print normally")
should print normally
>>> del os.environ["CALDP_SIMULATE_ERROR"]
>>> saved, os._exit = os._exit, lambda x: print(f"os._exit({x})")
>>> with exit_receiver(): #doctest: +ELLIPSIS
... with exit_on_exception(exit_codes.STAGE1_ERROR, "Failure running processing stage1."):
... raise SubprocessFailure(-8)
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - Failure running processing stage1.
ERROR - Traceback (most recent call last):
ERROR - File ".../caldp/sysexit.py", line ..., in exit_on_exception
ERROR - yield
ERROR - File "<doctest caldp.sysexit.exit_on_exception[...]>", line ..., in <module>
ERROR - raise SubprocessFailure(-8)
ERROR - caldp.sysexit.SubprocessFailure: -8
EXIT - Killed by UNIX signal SIGFPE[8]: 'Floating-point exception (ANSI).'
EXIT - STAGE1_ERROR[23]: An error occurred in this instrument's stage1 processing step. e.g. calxxx
os._exit(23)
>>> with exit_receiver(): #doctest: +ELLIPSIS
... with exit_on_exception(exit_codes.STAGE1_ERROR, "Failure running processing stage1."):
... raise OSError("Something other than memory")
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - Failure running processing stage1.
ERROR - Traceback (most recent call last):
ERROR - File ".../sysexit.py", line ..., in exit_on_exception
ERROR - yield
ERROR - File "<doctest ...sysexit.exit_on_exception[...]>", line ..., in <module>
ERROR - raise OSError("Something other than memory")
ERROR - OSError: Something other than memory
EXIT - STAGE1_ERROR[23]: An error occurred in this instrument's stage1 processing step. e.g. calxxx
os._exit(23)
>>> os._exit = saved
"""
simulated_code = int(os.environ.get("CALDP_SIMULATE_ERROR", "0"))
try:
if simulated_code == exit_codes.CALDP_MEMORY_ERROR:
raise MemoryError("Simulated CALDP MemoryError.")
elif simulated_code == exit_codes.OS_MEMORY_ERROR:
raise OSError("Cannot allocate memory...")
elif simulated_code == exit_codes.SUBPROCESS_MEMORY_ERROR:
print("MemoryError", file=sys.stderr) # Output to process log determines final program exit status
raise RuntimeError("Simulated subprocess memory error with subsequent generic program exception.")
elif simulated_code == exit_codes.CONTAINER_MEMORY_ERROR:
log.info("Simulating hard memory error by allocating memory")
_ = bytearray(1024 * 2 ** 30) # XXXX does not trigger container limit as intended
elif exit_code == simulated_code:
raise RuntimeError(f"Simulating error = {simulated_code}")
yield
# don't mask memory errors or nested exit_on_exception handlers
except MemoryError:
_report_exception(exit_codes.CALDP_MEMORY_ERROR, args)
raise CaldpExit(exit_codes.CALDP_MEMORY_ERROR)
except OSError as exc:
if "Cannot allocate memory" in str(exc) + repr(exc):
_report_exception(exit_codes.OS_MEMORY_ERROR, args)
raise CaldpExit(exit_codes.OS_MEMORY_ERROR)
else:
_report_exception(exit_code, args)
raise CaldpExit(exit_code)
except CaldpExit:
raise
# below as always exit_code defines what will be CALDP's program exit status.
# in contrast, exc.returncode is the subprocess exit status of a failed subprocess which may
# define an OS signal that killed the process.
except SubprocessFailure as exc:
_report_exception(exit_code, args, exc.returncode)
raise CaldpExit(exit_code)
except Exception:
_report_exception(exit_code, args)
raise CaldpExit(exit_code)
def _report_exception(exit_code, args=None, returncode=None):
"""Issue trigger output for exit_on_exception, including `exit_code` and
error message defined by `args`, as well as traceback.
"""
log.divider("Fatal Exception", func=log.error)
if args:
log.error(*args)
for line in traceback.format_exc().splitlines():
if line != "NoneType: None":
log.error(line)
if returncode and returncode < 0:
print(exit_codes.explain_signal(-returncode))
print(exit_codes.explain(exit_code))
@contextlib.contextmanager
def exit_receiver():
"""Use this contextmanager to bracket your top level code and land the sys.exit()
exceptions thrown by _raise_exit_exception() and exit_on_exception().
This program structure enables sys.exit() to fully unwind the stack doing
cleanup, then calls the low level os._exit() function which does no cleanup
as the "last thing".
If SystemExit is not raised by the code nested in the "with" block then
exit_receiver() essentially does nothing.
The program is exited with the numerical code passed to sys.exit().
>>> saved, os._exit = os._exit, lambda x: print(f"os._exit({x})")
>>> with exit_receiver(): #doctest: +ELLIPSIS
... print("Oh happy day.")
Oh happy day.
os._exit(0)
Generic unhandled exceptions are mapped to GENERIC_ERROR (1):
>>> def foo():
... print("foo!")
... bar()
>>> def bar():
... print("bar!")
... raise RuntimeError()
>>> with exit_receiver(): #doctest: +ELLIPSIS
... foo()
foo!
bar!
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - Untrapped non-memory exception.
ERROR - Traceback (most recent call last):
ERROR - File ".../caldp/sysexit.py", line ..., in exit_receiver
ERROR - yield # go off and execute the block
ERROR - File "<doctest caldp.sysexit.exit_receiver[...]>", line ..., in <module>
ERROR - foo()
ERROR - File "<doctest caldp.sysexit.exit_receiver[...]>", line ..., in foo
ERROR - bar()
ERROR - File "<doctest caldp.sysexit.exit_receiver[...]>", line ..., in bar
ERROR - raise RuntimeError()
ERROR - RuntimeError
EXIT - GENERIC_ERROR[1]: An error with no specific CALDP handling occurred somewhere.
os._exit(1)
MemoryError is remapped to CALDP_MEMORY_ERROR (32) inside exit_on_exception or not:
>>> with exit_receiver(): #doctest: +ELLIPSIS
... raise MemoryError("CALDP used up all memory directly.")
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - Untrapped memory exception.
ERROR - Traceback (most recent call last):
ERROR - File ".../caldp/sysexit.py", line ..., in exit_receiver
ERROR - yield # go off and execute the block
ERROR - File "<doctest caldp.sysexit.exit_receiver[...]>", line ..., in <module>
ERROR - raise MemoryError("CALDP used up all memory directly.")
ERROR - MemoryError: CALDP used up all memory directly.
EXIT - CALDP_MEMORY_ERROR[32]: CALDP generated a Python MemoryError during processing or preview creation.
os._exit(32)
Inside exit_on_exception, exit status is remapped to the exit_code parameter
of exit_on_exception():
>>> with exit_receiver(): #doctest: +ELLIPSIS
... raise OSError("Cannot allocate memory...")
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - Untrapped OSError cannot callocate memory
ERROR - Traceback (most recent call last):
ERROR - File ".../sysexit.py", line ..., in exit_receiver
ERROR - yield # go off and execute the block
ERROR - File "<doctest ...sysexit.exit_receiver[...]>", line ..., in <module>
ERROR - raise OSError("Cannot allocate memory...")
ERROR - OSError: Cannot allocate memory...
EXIT - OS_MEMORY_ERROR[34]: Python raised OSError(Cannot allocate memory...), possibly fork failure.
os._exit(34)
>>> with exit_receiver(): #doctest: +ELLIPSIS
... raise OSError("Some non-memory os error.")
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - Untrapped OSError, generic.
ERROR - Traceback (most recent call last):
ERROR - File ".../sysexit.py", line ..., in exit_receiver
ERROR - yield # go off and execute the block
ERROR - File "<doctest ...sysexit.exit_receiver[...]>", line ..., in <module>
ERROR - raise OSError("Some non-memory os error.")
ERROR - OSError: Some non-memory os error.
EXIT - GENERIC_ERROR[1]: An error with no specific CALDP handling occurred somewhere.
os._exit(1)
>>> with exit_receiver(): #doctest: +ELLIPSIS
... with exit_on_exception(exit_codes.STAGE1_ERROR, "Stage1 processing failed for <ippssoot>"):
... raise RuntimeError("Some obscure error")
ERROR - ----------------------------- Fatal Exception -----------------------------
ERROR - Stage1 processing failed for <ippssoot>
ERROR - Traceback (most recent call last):
ERROR - File ".../sysexit.py", line ..., in exit_on_exception
ERROR - yield
ERROR - File "<doctest ...sysexit.exit_receiver[...]>", line ..., in <module>
ERROR - raise RuntimeError("Some obscure error")
ERROR - RuntimeError: Some obscure error
EXIT - STAGE1_ERROR[23]: An error occurred in this instrument's stage1 processing step. e.g. calxxx
os._exit(23)
>>> os._exit = saved
"""
try:
# log.info("Container memory limit is: ", get_linux_memory_limit())
yield # go off and execute the block
code = exit_codes.SUCCESS
except CaldpExit as exc:
code = exc.code
# Already reported deeper
except MemoryError:
code = exit_codes.CALDP_MEMORY_ERROR
_report_exception(code, ("Untrapped memory exception.",))
except OSError as exc:
if "Cannot allocate memory" in str(exc) + repr(exc):
code = exit_codes.OS_MEMORY_ERROR
args = ("Untrapped OSError cannot callocate memory",)
else:
code = exit_codes.GENERIC_ERROR
args = ("Untrapped OSError, generic.",)
_report_exception(code, args)
except BaseException: # Catch absolutely everything.
code = exit_codes.GENERIC_ERROR
_report_exception(code, ("Untrapped non-memory exception.",))
os._exit(code)
def get_linux_memory_limit(): # pragma: no cover
"""This generally shows the full address space by default.
>> limit = get_linux_memory_limit()
>> assert isinstance(limit, int)
"""
if os.path.isfile("/sys/fs/cgroup/memory/memory.limit_in_bytes"):
with open("/sys/fs/cgroup/memory/memory.limit_in_bytes") as limit:
mem = int(limit.read())
return mem
else:
raise RuntimeError("get_linux_memory_limit() failed.") # pragma: no cover
def set_process_memory_limit(mem_in_bytes):
"""This can be used to limit the available address space / memory to
something less than is allocated to the container. Potentially that
will cause Python to generate a MemoryError rather than forcing a
container memory limit kill.
"""
resource.setrlimit(resource.RLIMIT_AS, (mem_in_bytes, mem_in_bytes)) # pragma: no cover
# ==============================================================================
def retry(func, max_retries=3, min_sleep=1, max_sleep=60, backoff=2, exceptions=(Exception, SystemExit)):
"""a decorator for retrying a function call on exception
max_retries: number of times to retry
min_sleep: starting value for backing off, in seconds
max_sleep: sleep value not to exceed, in seconds
backoff: the exponential factor
exceptions: tuple of exceptions to catch and retry
"""
def decor(*args, **kwargs):
tried = 0
while tried < max_retries:
try:
return func(*args, **kwargs)
except exceptions as e:
# otherwise e is lost to the namespace cleanup,
# and we may need to raise it later
exc = e
tried += 1
sleep = exponential_backoff(tried)
log.warning(
f"{func.__name__} raised exception, using retry {tried} of {max_retries}, sleeping for {sleep} seconds "
)
time.sleep(sleep)
# if we're here, no attempt to call func() succeeded
raise exc
return decor
def exponential_backoff(iteration, min_sleep=1, max_sleep=64, backoff=2):
"""given the current number of attempts, return a sleep time using an exponential backoff algorithm
iteration: the current amount of retries used
min_sleep: minimum value to wait before retry, in seconds
max_sleep: maximum value to wait before retry, in seconds
note: if you allow too many retries that cause the backoff to exceed max_sleep,
you will lose the benefit of jitter
see i.e. https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
# random uniform number(0.5,1) * backoff^iteration, but clip to min_backoff, max_backoff
return max(min(random.uniform(0.5, 1) * backoff ** iteration, max_sleep), min_sleep)
# ==============================================================================
def test(): # pragma: no cover
from doctest import testmod
import caldp.sysexit
temp, os._exit = os._exit, lambda x: print(f"os._exit({x})")
test_result = testmod(caldp.sysexit)
os._exit = temp
return test_result
if __name__ == "__main__": # pragma: no cover
print(test())
| 3.65625
| 4
|
migrations/0004_auto_20180503_0115.py
|
contraslash/authentication-django
| 0
|
12780707
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-03 01:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0003_auto_20161030_0401'),
]
operations = [
migrations.RemoveField(
model_name='permission',
name='role',
),
migrations.RemoveField(
model_name='userprofile',
name='image',
),
migrations.RemoveField(
model_name='userprofile',
name='role',
),
migrations.DeleteModel(
name='Permission',
),
migrations.DeleteModel(
name='Role',
),
]
| 1.632813
| 2
|
particle/__init__.py
|
mikolasan/pyroguelike
| 0
|
12780708
|
'''
https://github.com/Mekire/pygame-particles
'''
from .particle import Emitter
| 1.148438
| 1
|
iatransfer/research/transfer/utils.py
|
KamilPiechowiak/iatransfer
| 4
|
12780709
|
<filename>iatransfer/research/transfer/utils.py
from typing import Dict
def get_transfer_method_name(transfer_method: Dict) -> str:
name = [transfer_method["transfer"]]
if "matching" in transfer_method:
name.append(transfer_method["matching"])
if "standardization" in transfer_method:
name.append(transfer_method["standardization"])
if "score" in transfer_method:
name.append(transfer_method["score"])
return "-".join(name)
| 2.9375
| 3
|
moya/context/sortmodifiers.py
|
moyaproject/moya
| 129
|
12780710
|
<reponame>moyaproject/moya
"""Hacked up script to sort modifiers"""
# Couldn't find a tool for this
"""
import io
with io.open('modifiers.py', 'rt') as f:
iter_lines = iter(f)
while 1:
line = next(iter_lines, None)
if line.startswith('class ExpressionModifiers('):
break
defs = []
while 1:
line = next(iter_lines, None)
if line is None:
break
if line.lstrip().startswith('def'):
defs.append([])
if defs:
defs[-1].append(line)
for d in sorted(defs, key=lambda m: m[0]):
print ''.join(d),
"""
| 2.6875
| 3
|
app/__init__.py
|
fauzaanirsyaadi/belajar-full-stack
| 0
|
12780711
|
#ini adalah file pertama yang akan dibaca
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from .models.users import User
| 1.8125
| 2
|
duconv/src/lr_schedule.py
|
kingcong/MindSpore_Code
| 77
|
12780712
|
<filename>duconv/src/lr_schedule.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Noam
"""
import mindspore.ops as P
import mindspore.common.dtype as mstype
from mindspore.nn.learning_rate_schedule import LearningRateSchedule
from mindspore import Tensor
class Noam(LearningRateSchedule):
"""
Noam learning_rate
"""
def __init__(self, d_model, warmup_steps, learning_rate=1.0):
super().__init__()
self.d_model = d_model
self.warmup_steps = warmup_steps
self.learning_rate = learning_rate
self.pow = P.Pow()
self.min = P.Minimum()
self.cast = P.Cast()
self.const0 = Tensor(-0.5, mstype.float32)
self.const1 = Tensor(-1.5, mstype.float32)
def construct(self, global_step):
"""
construct
"""
p = self.cast(self.min(
self.pow(global_step, self.const0),
self.pow(self.warmup_steps, self.const1)*global_step), mstype.float32)
return self.learning_rate * self.pow(self.d_model, self.const0) * p
| 2.046875
| 2
|
metastudio/lib/ext/meta-jython/metastudio.py
|
tovganesh/metastudio
| 4
|
12780713
|
<gh_stars>1-10
#
# metastudio.py
#
# Wrappers for accessing MeTA Studio APIs. Where ever possible these
# wrappers are consistent with the ones provided in BeanShell (the default
# scripting interface for MeTA Studio).
# However, due to some restriction in Python, like no support for function
# overloading, certain functions may be missing all together!
#
# @author <NAME>
# 25 March, 2009
#
# updated: 27 May, 2009 ; updated to reflect few additions to BeanShell
# updated: 20 March, 2011 ; updated to reflect few additions to BeanShell
#
import string
from java.io import File
from java.util import ArrayList
from java.io import BufferedReader,InputStreamReader,FileInputStream,\
BufferedWriter,OutputStreamWriter,FileOutputStream
from javax.swing import JFileChooser
from org.meta.molecule import Molecule, MoleculeBuilder, Atom
from org.meta.moleculereader import MoleculeFileReaderFactory
from org.meta.common import Utility
from org.meta.common.resource import StringResource
from org.meta.math.geom import Point3D, BoundingBox
from org.meta.math import Vector3D, Matrix3D
from org.meta.net import FederationServiceDiscovery
from org.meta.shell.idebeans import MoleculeViewerFrame, IDEFileChooser, MoleculeListDialog, IDEWidget
from org.meta.shell.idebeans.eventhandlers import MainMenuEventHandlers
from org.meta.shell.idebeans.viewers.impl.moleculeviewer import MoleculeScene
from org.meta.config.impl import AtomInfo
from org.meta.math.qm.property import ElectronDensity, MODensity
from org.meta.molecule.property.electronic import GridProperty
def getIDEInstance():
return MainMenuEventHandlers.getInstance(None).getIdeInstance()
def molecule(title):
mol = Utility.getDefaultImplFor(Molecule).newInstance()
mol.setTitle(title)
return mol
def atom(sym, charge, center):
return Atom(sym, charge, center)
def point(x, y, z):
return Point3D(x, y, z)
def boundingbox(upperLeft, bottomRight):
return BoundingBox(upperLeft, bottomRight)
def vector3d(*args):
if (len(args) == 3):
return Vector3D(args[0], args[1], args[2])
else:
return Vector3D(args[0])
def matrix3d():
return Matrix3D()
def atomInfo():
return AtomInfo.getInstance();
def buildConnectivity(mol, makeZMatrix="true", copyConnectivity="false"):
mb = Utility.getDefaultImplFor(MoleculeBuilder).newInstance()
if (copyConnectivity == "false"):
try:
if (makeZMatrix == "false"):
for i in range(0, len(mol)):
mb.makeConnectivity(mol[i])
else:
for i in range(0, len(mol)):
mb.makeZMatrix(mol[i])
except:
if (makeZMatrix == "false"):
mb.makeConnectivity(mol)
else:
mb.makeZMatrix(mol)
else:
try:
if (makeZMatrix == "false"):
mb.makeConnectivity(mol[0])
else:
mb.makeZMatrix(mol[0])
for i in range(1, len(mol)):
atms = mol[0].getAtoms();
iatms = mol[i].getAtoms();
while (atms.hasNext() and iatms.hasNext()):
iatms.next().setConnectedList(atms.next().getConnectedList())
except:
if (makeZMatrix == "false"):
mb.makeConnectivity(mol)
else:
mb.makeZMatrix(mol)
def buildSimpleConnectivity(mol):
mb = Utility.getDefaultImplFor(MoleculeBuilder).newInstance()
try:
for i in range(0, len(mol)):
mb.makeSimpleConnectivity(mol)
except:
mb.makeSimpleConnectivity(mol)
def showMolecule(mol):
ideInstance = getIDEInstance()
mvf = MoleculeViewerFrame(ideInstance)
mvf.getMoleculeViewer().disableUndo()
try:
for i in range(0, len(mol)):
mvf.addScene(MoleculeScene(mol[i]))
except:
mvf.addScene(MoleculeScene(mol))
mvf.getMoleculeViewer().enableUndo()
ideInstance.getWorkspaceDesktop().addInternalFrame(mvf)
def showFileDialog(title="Open", type="open"):
type = type.lower()
if type == "open":
fileChooser = IDEFileChooser()
fileChooser.setDialogTitle(title)
if (fileChooser.showOpenDialog(getIDEInstance()) == JFileChooser.APPROVE_OPTION):
return fileChooser.getSelectedFile().getAbsolutePath()
else:
return None
elif type == "save":
fileChooser = IDEFileChooser()
fileChooser.setDialogTitle(title)
if (fileChooser.showSaveDialog(getIDEInstance()) == JFileChooser.APPROVE_OPTION):
return fileChooser.getSelectedFile().getAbsolutePath()
else:
return None
else:
fileChooser = IDEFileChooser()
fileChooser.setDialogTitle(title)
if (fileChooser.showOpenDialog(getIDEInstance()) == JFileChooser.APPROVE_OPTION):
return fileChooser.getSelectedFile().getAbsolutePath()
else:
return None
def arraylistToPylist(arlist):
list = []
for i in range(0, arlist.size()): list.append(arlist.get(i))
return list
def arraylist():
return ArrayList()
def discover_meta(ipExpression=None, timeout=100):
if ipExpression == None:
nl = FederationServiceDiscovery.getInstance().discoverMeTA()
else:
inst = FederationServiceDiscovery.getInstance()
inst.setTimeout(timeout)
nl = inst.discoverMeTA(ipExpression)
return arraylistToPylist(nl)
def list_meta(timeout=100):
inst = FederationServiceDiscovery.getInstance()
inst.setTimeout(timeout)
return arraylistToPylist(inst.listMeTA())
def readMoleculeFile(file):
mfr = Utility.getDefaultImplFor(MoleculeFileReaderFactory).newInstance()
typ = file[file.index(".")+1:len(file)]
rdr = mfr.getReader(typ)
return rdr.readMoleculeFile(file)
def openFile(fileName, mode):
if mode=="r":
return BufferedReader(InputStreamReader(FileInputStream(fileName)))
elif mode=="w":
return BufferedWriter(OutputStreamWriter(FileOutputStream(fileName)))
def readMultiMoleculeFile(file, makeZMatrix="true", copyConnectivity="false"):
mfr = Utility.getDefaultImplFor(MoleculeFileReaderFactory).newInstance()
typ = file[file.index(".")+1:len(file)]
rdr = mfr.getReader(typ)
br = openFile(file, "r")
molList = []
try:
i = 0
while 1:
mol = rdr.readMolecule(br)
if mol.getNumberOfAtoms() == 0: break
if ((mol.getTitle()==None) or (mol.getTitle()=="") \
or (mol.getTitle()=="Untitled") \
or (mol.getTitle().index("Molecule")==0)):
mol.setTitle(Utility.getFileNamesSansExtension(File(file)) + "-" + repr(i))
molList.append(mol)
i += 1
except:
print "Warning: Could not read the complete file " + file
br.close()
buildConnectivity(molList, makeZMatrix, copyConnectivity)
return molList
def showMoleculeListDialog():
ml = MoleculeListDialog(getIDEInstance())
return ml.showListDialog()
def play(fileName):
from javazoom.jl.player import Player
Player(FileInputStream(fileName)).play()
def getDefaultClass(theClass):
return Utility.getDefaultImplFor(theClass)
def getDesktopInstance():
ideInstance = getIDEInstance()
if (ideInstance == None):
return None
else:
return ideInstance.getWorkspaceDesktop()
def getActiveFrame():
wd = getDesktopInstance()
if (wd == None): return None
else: wd.getActiveFrame()
def getLoadedMoleculeScenes():
ideInstance = getIDEInstance()
if (ideInstance == None): return None
wd = getDesktopInstance()
if (wd == None): return None
frameList = wd.getFrameList()
sceneList = []
for frame in frameList:
try:
scenes = frame.getSceneList()
for scene in scenes: sceneList.append(scene)
except:
pass
return sceneList
def getLoadedMolecules():
ideInstance = getIDEInstance()
if (ideInstance == None): return None
currentWorkspace = ideInstance.getCurrentWorkspace()
if (currentWorkspace == None): return None
workspaceItems = currentWorkspace.getWorkspaceItems()
moleculeList = []
for item in workspaceItems:
try:
item.getItemData().getData().getNumberOfAtoms() # just a way to see that this molecule object
moleculeList.append(item.getItemData().getData())
except:
pass
return moleculeList
def getMolecule(index):
return getLoadedMoleculeScenes()[index].getMolecule()
def getSelectionStack(vframe):
sal = []
for scene in vframe.getSelectionList():
for sa in scene.getSelectionStack():
sal.append(sa)
return sa
def messageBox(msg, title="", type="normal"):
from javax.swing import JOptionPane
if type == "normal":
JOptionPane.showMessageDialog(getIDEInstance(), msg, title, JOptionPane.INFORMATION_MESSAGE)
elif type == "warning":
JOptionPane.showMessageDialog(getIDEInstance(), msg, title, JOptionPane.WARNING_MESSAGE)
elif type == "warn":
JOptionPane.showMessageDialog(getIDEInstance(), msg, title, JOptionPane.WARNING_MESSAGE)
elif type == "error":
JOptionPane.showMessageDialog(getIDEInstance(), msg, title, JOptionPane.ERROR_MESSAGE)
else:
JOptionPane.showMessageDialog(getIDEInstance(), msg, title, JOptionPane.INFORMATION_MESSAGE)
def color(*args):
from java.awt import Color
if (len(args) == 1): return Color.decode(args[0])
else:
c = args
return Color(c[0], c[1], c[2])
def button(text, isToggle, bg):
from javax.swing import JButton, JToggleButton
if isToggle:
tb = JToggleButton(text)
bg.add(tb)
return tb
else: return JButton(text)
def buttongroup():
from javax.swing import ButtonGroup
return ButtonGroup()
def checkbox(text):
from javax.swing import JCheckBox
return javax.swing.JCheckBox(text)
def combobox(values):
from javax.swing import JComboBox
return JComboBox(values)
def frame(txtTitle=""):
from javax.swing import JFrame
return JFrame(txtTitle)
def label(txt):
from javax.swing import JLabel
return JLabel(txt)
def listbox(values):
from javax.swing import JList
return JList(values)
def panel():
from javax.swing import JPanel
return JPanel()
def radiobutton(text, bg):
from javax.swing import JRadioButton
rb = JRadioButton(text)
bg.add(rb)
return rb
def scratchpad():
from org.meta.shell.idebeans import ScratchPad
ideInstance = getIDEInstance()
sp = ScratchPad(ideInstance)
ideInstance.getWorkspaceDesktop().addInternalFrame(sp)
return sp
def degrees(radian):
from org.meta.math import MathUtil
return MathUtil.toDegrees(radian)
def radians(degree):
from org.meta.math import MathUtil
return MathUtil.toRadians(degree)
def diagonalize(mat):
from org.meta.math.la import DiagonalizerFactory
d = DiagonalizerFactory.getInstance().getDefaultDiagonalizer()
d.diagonalize(mat)
return d
def getRemoteAppDir():
return StringResource.getInstance().getRemoteAppDir()
def getWidgetsPanel():
return getIDEInstance().getWorkspaceExplorer().getWidgetsPanel()
def widget(id):
return IDEWidget(id)
def moleculeScene(mol):
return MoleculeScene(mol)
def hfscf(molecule, basisSet):
from java.text import DecimalFormat
from java.lang import System
from org.meta.math.qm import *
print("Starting computation for " + repr(molecule) + " at " + repr(basisSet) + " basis")
t1 = System.currentTimeMillis()
bfs = BasisFunctions(molecule, basisSet)
t2 = System.currentTimeMillis()
print("Number of basis functions : " + repr(bfs.getBasisFunctions().size()))
print("Time till setting up basis : " + repr(t2-t1) + " ms")
oneEI = OneElectronIntegrals(bfs, molecule)
t2 = System.currentTimeMillis()
print("Time till 1EI evaluation : " + repr(t2-t1) + " ms")
twoEI = TwoElectronIntegrals(bfs)
t2 = System.currentTimeMillis()
print("Time till 2EI evaluation : " + repr(t2-t1) + " ms")
scfm = SCFMethodFactory.getInstance().getSCFMethod(molecule, oneEI, twoEI, SCFType.HARTREE_FOCK)
scfm.scf()
print("Final Energy : " + repr(scfm.getEnergy()))
return scfm.getEnergy()
def mp2scf(molecule, basisSet):
from java.text import DecimalFormat
from java.lang import System
from org.meta.math.qm import *
print("Starting computation for " + repr(molecule) + " at " + repr(basisSet) + " basis")
t1 = System.currentTimeMillis()
bfs = BasisFunctions(molecule, basisSet)
t2 = System.currentTimeMillis()
print("Number of basis functions : " + repr(bfs.getBasisFunctions().size()))
print("Time till setting up basis : " + repr(t2-t1) + " ms")
oneEI = OneElectronIntegrals(bfs, molecule)
t2 = System.currentTimeMillis()
print("Time till 1EI evaluation : " + repr(t2-t1) + " ms")
twoEI = TwoElectronIntegrals(bfs)
t2 = System.currentTimeMillis()
print("Time till 2EI evaluation : " + repr(t2-t1) + " ms")
scfm = SCFMethodFactory.getInstance().getSCFMethod(molecule, oneEI, twoEI, SCFType.MOLLER_PLESSET)
scfm.scf()
print("Final Energy : " + repr(scfm.getEnergy()))
return scfm.getEnergy()
def mmEnergy(molecule):
from org.meta.math.mm import *
from java.lang import System
print("Starting MM Energy for " + repr(molecule))
mm = MMEnergyMethodFactory.getInstance().getMMEnergyMethod(molecule)
t1 = System.currentTimeMillis()
energy = mm.getEnergy()
t2 = System.currentTimeMillis()
print("Energy = " + repr(energy) + " a.u.")
print("Total Time : " + repr(t2-t1) + " ms")
return energy
def federationode(nodeName, noOfProcessors=1):
from java.net import InetAddress
from org.meta.net import FederationNode
fNode = FederationNode(InetAddress.getByName(nodeName))
fNode.setNoOfProcessors(noOfProcessors)
return fNode
def generateBshScript(pyScript, funName):
bshScript = "import org.meta.common.resource.StringResource; import java.io.File; import org.python.util.PythonInterpreter;\n"
bshScript = bshScript + funName + "() { \n"
bshScript = bshScript + "File pluginDir = new File(StringResource.getInstance().getPluginDir());"
bshScript = bshScript + "Properties props = new Properties();"
bshScript = bshScript + "props.setProperty(\"python.home\", \"../lib/ext/meta-jython\");"
bshScript = bshScript + "props.setProperty(\"python.cachedir\", StringResource.getInstance().getRemoteAppDir());"
bshScript = bshScript + "props.setProperty(\"python.path\", \"../lib/ext/meta-jython\" "
bshScript = bshScript + " + File.pathSeparatorChar + pluginDir.getName());"
bshScript = bshScript + "PythonInterpreter.initialize(System.getProperties(), props, new String[] {\"\"});"
bshScript = bshScript + "PythonInterpreter pyInterpreter = new PythonInterpreter();"
bshScript = bshScript + "pyInterpreter.setOut(System.out);"
bshScript = bshScript + "pyInterpreter.setErr(System.err);"
bshScript = bshScript + "data = getData(); pyInterpreter.set(\"__data\", data);"
pyf = open(pyScript, "r")
lines = pyf.readlines()
pyf.close()
pyl = "\\ndef getData():\\n\\t return __data\\n\\ndef setData(data):\\n\\tglobal __data\\n\\t__data=data\\n\\n"
for line in lines: pyl += line.strip("\n").replace("\"", "\\\"") + "\\n"
bshScript = bshScript + "pyInterpreter.exec(\"" + pyl + "\");\n"
bshScript = bshScript + "setData(pyInterpreter.get(\"__data\", java.util.ArrayList.class));"
bshScript = bshScript + " } \n " + funName + "();\n"
return bshScript
def mapreduce(mapScript, reduceScript, data):
from org.meta.net.mapreduce.impl import MapFunctionScriptHelper, ReduceFunctionScriptHelper, MapReduceControllerFederationImpl
# every thing is BeanShell, so we need to generate stub code here
mf = MapFunctionScriptHelper(mapScript)
mf.setBshScript(generateBshScript(mapScript, "pymap"))
rf = ReduceFunctionScriptHelper(reduceScript)
rf.setBshScript(generateBshScript(reduceScript, "pyreduce"))
mapReduceController = MapReduceControllerFederationImpl()
mapReduceController.executeTask(mf, rf, data)
return mapReduceController.getTaskData()
def getDefaultMoleculeGrid(mol):
bb = mol.getBoundingBox().expand(5) # 5 angstroms in each direction
return GridProperty(bb, 100)
def moDensity(scfm, gp, monumber=None):
if (monumber == None): MODensity(scfm).compute(gp)
else: MODensity(scfm, monumber).compute(gp)
def electronDensity(scfm, gp):
ElectronDensity(scfm).compute(gp)
| 1.835938
| 2
|
mesService/modules/AngularInterface/planmanage/plan_schedule.py
|
mabotech/mes_sv
| 2
|
12780714
|
# @createTime : 2019/10/30 9:13
# @author : Mou
# @fileName: plan-schedule.py
# 计划排产部分前端接口
import json
from flask import current_app
from flask import request
from mesService import config_dict
from mesService.lib.pgwrap.db import connection
class PlanSchedule(object):
def getsortlist(self):
reqparam = request.data
try:
reqparam = json.loads(reqparam)
count = reqparam['count']
wipordertype = reqparam['wipordertype']
base_sql = "select get_wipsortlist(%d,%d);"%(count,wipordertype)
result = current_app.db.query_one(base_sql)
except:
result = {
"status":"server error",
"message":"search error"
}
res = json.dumps(result)
return res
if result:
return result[0]
else:
result = {
"status":"error",
"message":"search error"
}
res = json.dumps(result)
return res
| 2.421875
| 2
|
python/baekjoon/step/15-greedy/ATM.py
|
bum12ark/algorithm
| 1
|
12780715
|
<gh_stars>1-10
"""
출처: https://www.acmicpc.net/problem/11399
"""
# param
size = int(input())
p_times = list(map(int, input().split()))
# solution
p_times.sort()
prev = result = 0
for t in p_times:
prev += t
result += prev
# result
print(result)
| 2.8125
| 3
|
contract_analysis.py
|
jasonrubenstein18/MLB_FA
| 0
|
12780716
|
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
import plotly_express
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# Read in data
batter_data = pd.read_csv("~/Desktop/MLB_FA/Data/fg_bat_data.csv")
del batter_data['Age']
print(len(batter_data))
print(batter_data.head())
pitcher_data = pd.read_csv("~/Desktop/MLB_FA/Data/fg_pitch_data.csv")
del pitcher_data['Age']
print(len(pitcher_data))
print(pitcher_data.head())
salary_data = pd.read_csv("~/Desktop/MLB_FA/Data/salary_data.csv")
print(len(salary_data))
injury_data = pd.read_csv("~/Desktop/MLB_FA/Data/injury_data_use.csv")
# Check for whether there is overlap between injury data and the salary data players
# injury_data_players = injury_data['Player'].unique()
# mutual = salary_data[salary_data['Player'].isin(injury_data_players)] # 945 out of 1135 players included
# excl = salary_data[~salary_data['Player'].isin(injury_data_players)]
# print(len(excl['Player'].unique())) # 129 unique players injury data omitted; use mlb.com trans for these
# Define inflation
def npv(df, rate):
r = rate
df['Salary'] = pd.to_numeric(df['Salary'])
df['AAV'] = salary_data['Salary'] / df['Years']
df['NPV'] = 0
df['NPV'] = round(df['AAV'] * (1 - (1 / ((1 + r) ** df['Years']))) / r, 2)
return df
salary_data = npv(salary_data, 0.05)
# Lagged metrics to see if there is carryover value / value in continuity
class Metrics:
def lagged_batter(df):
df['WAR'] = pd.to_numeric(df['WAR'])
df['y_n1_war'] = df.groupby("Name")['WAR'].shift(1)
df['y_n2_war'] = df.groupby("Name")['y_n1_war'].shift(1)
df['y_n3_war'] = df.groupby("Name")['y_n2_war'].shift(1)
df['y_n4_war'] = df.groupby("Name")['y_n3_war'].shift(1)
df['y_n5_war'] = df.groupby("Name")['y_n4_war'].shift(1)
df['y_n6_war'] = df.groupby("Name")['y_n5_war'].shift(1)
df['wOBA'] = pd.to_numeric(df['wOBA'])
df['y_n1_wOBA'] = df.groupby("Name")['wOBA'].shift(1)
df['y_n2_wOBA'] = df.groupby("Name")['y_n1_wOBA'].shift(1)
df['y_n3_wOBA'] = df.groupby("Name")['y_n2_wOBA'].shift(1)
df['y_n4_wOBA'] = df.groupby("Name")['y_n3_wOBA'].shift(1)
df['wRC+'] = pd.to_numeric(df['wRC+'])
df['y_n1_wRC+'] = df.groupby("Name")['wRC+'].shift(1)
df['y_n2_wRC+'] = df.groupby("Name")['y_n1_wRC+'].shift(1)
df['y_n1_war_pa'] = df.groupby("Name")['WAR_PA'].shift(1)
df['y_n2_war_pa'] = df.groupby("Name")['y_n1_war_pa'].shift(1)
df['y_n3_war_pa'] = df.groupby("Name")['y_n2_war_pa'].shift(1)
df['y_n4_war_pa'] = df.groupby("Name")['y_n3_war_pa'].shift(1)
df['y_n5_war_pa'] = df.groupby("Name")['y_n4_war_pa'].shift(1)
df['y_n6_war_pa'] = df.groupby("Name")['y_n5_war_pa'].shift(1)
df["BB%"] = df["BB%"].apply(lambda x: x.replace("%", ""))
df['BB%'] = pd.to_numeric(df['BB%'])
df["K%"] = df["K%"].apply(lambda x: x.replace("%", ""))
df['K%'] = pd.to_numeric(df['K%'])
df.rename(columns={'BB%': 'BBpct', 'K%': 'Kpct'}, inplace=True)
return df
def lagged_pitcher(df):
df['WAR'] = pd.to_numeric(df['WAR'])
df['y_n1_war'] = df.groupby("Name")['WAR'].shift(1)
df['y_n2_war'] = df.groupby("Name")['y_n1_war'].shift(1)
df['y_n3_war'] = df.groupby("Name")['y_n2_war'].shift(1)
df['y_n4_war'] = df.groupby("Name")['y_n3_war'].shift(1)
df['y_n5_war'] = df.groupby("Name")['y_n4_war'].shift(1)
df['y_n6_war'] = df.groupby("Name")['y_n5_war'].shift(1)
# df['ERA-'] = pd.to_numeric(df['ERA-'])
# df['y_n1_ERA-'] = df.groupby("Name")['ERA-'].shift(1)
# df['y_n2_ERA-'] = df.groupby("Name")['y_n1_ERA-'].shift(1)
df['xFIP'] = pd.to_numeric(df['xFIP'])
df['y_n1_xFIP'] = df.groupby("Name")['xFIP'].shift(1)
df['y_n2_xFIP'] = df.groupby("Name")['y_n1_xFIP'].shift(1)
df['y_n1_war_tbf'] = df.groupby("Name")['WAR_TBF'].shift(1)
df['y_n2_war_tbf'] = df.groupby("Name")['y_n1_war_tbf'].shift(1)
df['y_n3_war_tbf'] = df.groupby("Name")['y_n2_war_tbf'].shift(1)
df['y_n4_war_tbf'] = df.groupby("Name")['y_n3_war_tbf'].shift(1)
df['y_n5_war_tbf'] = df.groupby("Name")['y_n4_war_tbf'].shift(1)
df['y_n6_war_tbf'] = df.groupby("Name")['y_n5_war_tbf'].shift(1)
df['BB%'] = df['BB%'].astype(str)
df["BB%"] = df["BB%"].apply(lambda x: x.replace("%", ""))
df['BB%'] = pd.to_numeric(df['BB%'])
df['K%'] = df['K%'].astype(str)
df["K%"] = df["K%"].apply(lambda x: x.replace("%", ""))
df['K%'] = pd.to_numeric(df['K%'])
df['K-BB%'] = df['K-BB%'].astype(str)
df["K-BB%"] = df["K-BB%"].apply(lambda x: x.replace("%", ""))
df['K-BB%'] = pd.to_numeric(df['K-BB%'])
df['SwStr%'] = df['SwStr%'].astype(str)
df["SwStr%"] = df["SwStr%"].apply(lambda x: x.replace("%", ""))
df['SwStr%'] = pd.to_numeric(df['SwStr%'])
df['LOB%'] = df['LOB%'].astype(str)
df["LOB%"] = df["LOB%"].apply(lambda x: x.replace("%", ""))
df['LOB%'] = pd.to_numeric(df['LOB%'])
# df['CB%'] = df['CB%'].astype(str)
# df["CB%"] = df["CB%"].apply(lambda x: x.replace("%", ""))
# df['CB%'] = pd.to_numeric(df['CB%'])
df.rename(columns={'BB%': 'BBpct', 'K%': 'Kpct', 'K-BB%': 'K_minus_BBpct', 'CB%': 'CBpct',
'SwStr%': 'Swstrpct'}, inplace=True)
return df
def fix_position(df):
df['Position'] = np.where(df['Position'] == "OF", "CF", df['Position'])
df['Position'] = np.where((df['Position'] == "LF") | (df['Position'] == "RF"),
"Corner Outfield", df['Position'])
df['Position'] = np.where(df['Position'] == "P", "RP", df['Position'])
# df['Position'] = np.where(df['Position'] == "SP", 1, df['Position'])
# df['Position'] = np.where(df['Position'] == "C", 2, df['Position'])
# df['Position'] = np.where(df['Position'] == "1B", 3, df['Position'])
# df['Position'] = np.where(df['Position'] == "2B", 4, df['Position'])
# df['Position'] = np.where(df['Position'] == "3B", 5, df['Position'])
# df['Position'] = np.where(df['Position'] == "SS", 6, df['Position'])
# df['Position'] = np.where(df['Position'] == "Corner Outfield", 7, df['Position'])
# df['Position'] = np.where(df['Position'] == "CF", 8, df['Position'])
# df['Position'] = np.where(df['Position'] == "RP", 9, df['Position'])
# df['Position'] = np.where(df['Position'] == "DH", 10, df['Position'])
return df
def rate_stats_batter(df):
df['WAR_PA'] = df['WAR'] / df['PA'] # add in rate based WAR (per PA, game played, etc)
df['oWAR_PA'] = df['oWAR'] / df['PA']
df['WAR_PA'] = round(df['WAR_PA'], 3)
df['oWAR_PA'] = round(df['oWAR_PA'], 3)
return df
def rate_stats_pitcher(df):
df['WAR_TBF'] = df['WAR'] / df['TBF'] # add in rate based WAR (per IP, etc)
# df['WAR_IP'] = df['WAR'] / df['IP']
df['wFB_TBF'] = df['wFB'] / df['TBF']
df['WAR_TBF'] = round(df['WAR_TBF'], 3)
# df['WAR_IP'] = round(df['WAR_IP'], 3)
df['wFB_TBF'] = round(df['wFB_TBF'], 3)
return df
def injury_engineering(df):
df['two_year_inj_avg'] = 0
df.loc[:, "two_year_inj_avg"] = (
df.groupby("Player")["injury_duration"].shift(1) / df.groupby("Player")["injury_duration"].shift(
2) - 1)
df['Injury'] = df['Injury'].fillna("None")
df['injury_duration'] = df['injury_duration'].fillna(0)
return df
def short_season_fix_batter(df):
df['WAR_162'] = np.where(df['Year'] == 2021, df['WAR']*2.3, df['WAR'])
df['PA_162'] = np.where(df['Year'] == 2021, df['PA']*2.3, df['PA'])
df['oWAR_162'] = np.where(df['Year'] == 2021, df['oWAR'] * 2.3, df['oWAR'])
df['dWAR_162'] = np.where(df['Year'] == 2021, df['dWAR'] * 2.3, df['dWAR'])
return df
def short_season_fix_pitcher(df):
df['WAR_162'] = np.where(df['Year'] == 2021, df['WAR']*2.3, df['WAR'])
df['IP_162'] = np.where(df['Year'] == 2021, df['IP']*2.3, df['IP'])
return df
class NonLinearVars():
def fg_batter_vars(df):
df['WAR_sq'] = np.where(df['WAR'] > 0, df['WAR'] ** 2, df['WAR'] * 2)
df['y_n1_war_sq'] = np.where(df['y_n1_war'] > 0, df['y_n1_war'] ** 2, df['y_n1_war'] * 2)
df['y_n2_war_sq'] = np.where(df['y_n2_war'] > 0, df['y_n2_war'] ** 2, df['y_n2_war'] * 2)
df['y_n3_war_sq'] = np.where(df['y_n3_war'] > 0, df['y_n3_war'] ** 2, df['y_n3_war'] * 2)
df['y_n4_war_sq'] = np.where(df['y_n4_war'] > 0, df['y_n4_war'] ** 2, df['y_n4_war'] * 2)
df['y_n5_war_sq'] = np.where(df['y_n5_war'] > 0, df['y_n5_war'] ** 2, df['y_n5_war'] * 2)
df['y_n6_war_sq'] = np.where(df['y_n6_war'] > 0, df['y_n6_war'] ** 2, df['y_n6_war'] * 2)
df['y_n1_wOBA_sq'] = df['y_n1_wOBA'] ** 2
df['y_n2_wOBA_sq'] = df['y_n2_wOBA'] ** 2
df['y_n1_wRC+_sq'] = df['y_n1_wRC+'] ** 2
df['y_n2_wRC+_sq'] = df['y_n2_wRC+'] ** 2
return df
def fg_pitcher_vars(df):
df['WAR_sq'] = df['WAR'] **2
df['y_n1_war_sq'] = np.where(df['y_n1_war'] > 0, df['y_n1_war'] ** 2, df['y_n1_war'] * 2)
df['y_n2_war_sq'] = np.where(df['y_n2_war'] > 0, df['y_n2_war'] ** 2, df['y_n2_war'] * 2)
df['y_n3_war_sq'] = np.where(df['y_n3_war'] > 0, df['y_n3_war'] ** 2, df['y_n3_war'] * 2)
df['y_n4_war_sq'] = np.where(df['y_n4_war'] > 0, df['y_n4_war'] ** 2, df['y_n4_war'] * 2)
df['y_n5_war_sq'] = np.where(df['y_n5_war'] > 0, df['y_n5_war'] ** 2, df['y_n5_war'] * 2)
df['y_n6_war_sq'] = np.where(df['y_n6_war'] > 0, df['y_n6_war'] ** 2, df['y_n6_war'] * 2)
# df['ERA-_sq'] = df['ERA-'] **2
# df['y_n1_ERA-_sq'] = df['y_n1_ERA-'] **2
# df['y_n2_ERA-_sq'] = df['y_n2_ERA-'] **2
df['xFIP_sq'] = df['xFIP'] **2
df['y_n1_xFIP_sq'] = df['y_n1_xFIP'] **2
df['y_n2_xFIP_sq'] = df['y_n2_xFIP'] **2
return df
def salary_vars(df):
# df['Age'] = df['Age'].astype('int')
df['Age_sq'] = df['Age'] ** 2
df['Age_log'] = np.log(df['Age'])
return df
# Attach the injury data to the players, merge on player and year
def merge_injuries(salary_df, injury_df):
merged_df = pd.merge(salary_df, injury_df, how='left', left_on=['Player', 'Season'], right_on=['Player', 'Year'])
del merged_df['Year']
return merged_df
# MA
print(len(salary_data))
salary_data = merge_injuries(salary_data, injury_data)
print(len(salary_data))
salary_data['injury_duration'] = salary_data['injury_duration'].fillna(0)
salary_data = Metrics.injury_engineering(salary_data)
# Lag
batter_data = Metrics.short_season_fix_batter(batter_data)
batter_data = Metrics.rate_stats_batter(batter_data)
batter_data = Metrics.lagged_batter(batter_data)
pitcher_data = Metrics.short_season_fix_pitcher(pitcher_data)
pitcher_data = Metrics.rate_stats_pitcher(pitcher_data)
pitcher_data = Metrics.lagged_pitcher(pitcher_data)
# Position fix
salary_data = Metrics.fix_position(salary_data)
# Non Linears
batter_data = NonLinearVars.fg_batter_vars(batter_data)
pitcher_data = NonLinearVars.fg_pitcher_vars(pitcher_data)
salary_data = NonLinearVars.salary_vars(salary_data)
# Merge data sets (one pitcher, one batter)
batter_merged = pd.merge(batter_data, salary_data, left_on=['Name', 'Year'], right_on=['Player', 'Season'])
batter_merged = batter_merged[(batter_merged['Position'] != "SP") & (batter_merged['Position'] != "RP")] # remove P's
print(len(batter_merged))
pitcher_merged = pd.merge(pitcher_data, salary_data, left_on=['Name', 'Year'], right_on=['Player', 'Season'])
pitcher_merged = pitcher_merged[(pitcher_merged['Position'] == "SP") | (pitcher_merged['Position'] == "RP")] # keep P's
print(len(pitcher_merged))
# Begin modeling
# train_data_batter = batter_merged[(batter_merged['Year'] != max(batter_merged['Year']))]
# train_data_pitcher = pitcher_merged[(pitcher_merged['Year'] != max(pitcher_merged['Year']))]
train_data_batter = batter_merged.loc[~batter_merged['NPV'].isnull()]
train_data_pitcher = pitcher_merged.loc[~pitcher_merged['NPV'].isnull()]
test_data_batter = batter_merged[
# (batter_merged['Year'] == max(batter_merged['Year']))
# &
(np.isnan(batter_merged['NPV']))]
test_data_pitcher = pitcher_merged[
# (pitcher_merged['Year'] == max(pitcher_merged['Year']))
# &
(np.isnan(pitcher_merged['NPV']))]
train_data_batter.to_csv('~/Desktop/MLB_FA/Data/train_data_batter.csv', index=False)
train_data_pitcher.to_csv('~/Desktop/MLB_FA/Data/train_data_pitcher.csv', index=False)
test_data_batter.to_csv('~/Desktop/MLB_FA/Data/test_data_batter.csv', index=False)
test_data_pitcher.to_csv('~/Desktop/MLB_FA/Data/test_data_pitcher.csv', index=False)
fit = ols('NPV ~ C(Position) + WAR_sq + WAR + Age', data=train_data_batter).fit()
fit.summary() # 0.597 r-sq, 0.587 adj r-sq
# Plot NPV / WAR to see nonlinear relationship
plot_data = train_data_batter[(train_data_batter['Year'] > 2010)]
fig = plotly_express.scatter(plot_data, x="dWAR", y="NPV", color='Position',
hover_data=['Player', 'Position', 'Year', 'Prev Team'],
title="dWAR, NPV Colored By Position (since {})".format(min(plot_data['Year'])))
fig.show()
# Plot WAR / Rate WAR
plot_data = batter_data[(batter_data['Year'] == 2021) & (batter_data['PA'] > 100)]
fig = plotly_express.scatter(plot_data, x="PA", y="dWAR", color='Name')
fig.update_layout(
hoverlabel=dict(
bgcolor="white",
font_size=10,
font_family="Arial"
)
)
fig.show()
# remove linear WAR
# Let's add a season factor and qualifying offer
fit = ols('NPV ~ C(Position) + C(Season) + WAR_sq + Age + Qual + WAR_PA', data=train_data_batter).fit()
fit.summary()
# Getting better, but there's more unexplained variance. Let's try log of Age and prior season's WAR
# Log Age
fit = ols('NPV ~ C(Position) + C(Season) + y_n1_war_sq + WAR_sq + Age_log + Qual + WAR_PA + y_n1_war_pa',
data=train_data_batter).fit()
fit.summary()
# Still marginally improving. Up to around 50% of the variance explained.
# WAR is a counting stat, let's add in base-running UBR, non-log Age
# UBR
fit = ols('NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + UBR + Qual', data=train_data_batter).fit()
fit.summary()
# Try some new variables (e.g. OPS, ISO, wRC+, wOBA, y_n2_war_sq, etc)
fit = ols('NPV ~ C(Position) + y_n2_war_sq + y_n1_war_sq + WAR_sq + Age + UBR + Qual + wOBA + ISO',
data=train_data_batter).fit()
fit.summary()
# Now let's consider only deals signed for multiple-years
train_data_batter_multiyear = train_data_batter[(train_data_batter['Years'] > 1)]
fit = ols('NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + UBR + Qual', data=train_data_batter_multiyear).fit()
fit.summary()
# Single year only
train_data_batter_single = train_data_batter[(train_data_batter['Years'] == 1)]
fit = ols('NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + Qual', data=train_data_batter_single).fit()
fit.summary()
# So what are team's using to assess these single year contracts?
fit = ols('NPV ~ ISO + WAR_sq + y_n1_war_sq + y_n2_war_sq + wGDP + BABIP + Qual', data=train_data_batter_single).fit()
fit.summary()
# Now add injury duration
fit = ols('NPV ~ ISO + WAR_sq + y_n1_war_sq + y_n2_war_sq + injury_duration + Qual', data=train_data_batter).fit()
fit.summary()
# Kitchen sink
fit_rate = ols('NPV ~ BBpct + Kpct + AVG + OBP + SLG + OPS + ISO + Spd + BABIP + UBR + wGDP + wSB + wRC + '
'wRAA + wOBA + WAR + dWAR + oWAR + Year + WAR_PA + oWAR_PA + y_n1_war + y_n2_war + y_n3_war + '
'y_n4_war + y_n5_war + y_n6_war + y_n1_wOBA + y_n2_wOBA + y_n3_wOBA + y_n4_wOBA + '
'y_n1_war_pa + y_n2_war_pa + y_n3_war_pa + y_n4_war_pa + y_n5_war_pa + y_n6_war_pa +'
'WAR_sq + y_n1_war_sq + y_n2_war_sq + y_n3_war_sq + y_n4_war_sq + y_n5_war_sq + y_n6_war_sq + '
'y_n1_wOBA_sq + y_n2_wOBA_sq + Position + Age + Qual + injury_duration', data=train_data_batter).fit()
fit_rate.summary()
# Remove unwanted vars
fit_rate = ols('NPV ~ Kpct + Year + y_n1_war +'
'y_n1_wOBA + y_n2_war_pa + WAR_sq + y_n1_war_sq +'
'Age + Qual', data=train_data_batter).fit()
fit_rate.summary()
# PITCHERS
train_data_pitcher['pos_dummy'] = np.where(train_data_pitcher['Position'] == "SP", 1, 0)
fit = ols('NPV ~ WAR_sq + Age + Qual + pos_dummy + FBv + Kpct + y_n1_war_sq', data=train_data_pitcher).fit()
fit.summary()
# Predict WAR
fit = ols('WAR ~ FBv + Kpct + BBpct + FIP + IP + wFB + pos_dummy', data=train_data_pitcher).fit()
fit.summary()
# Let's add in injury duration
train_data_pitcher['injury_duration_log'] = np.log(train_data_pitcher['injury_duration'])
fit = ols('NPV ~ WAR_sq + Age + Qual + injury_duration + pos_dummy', data=train_data_pitcher).fit()
fit.summary()
# Add FBv
fit = ols('NPV ~ WAR_sq + Age + Qual + injury_duration + FBv + pos_dummy', data=train_data_pitcher).fit()
fit.summary()
# Kpct
fit = ols('NPV ~ WAR_sq + Age + Qual + injury_duration + FBv + Kpct + pos_dummy + BBpct', data=train_data_pitcher).fit()
fit.summary()
# CBv
fit = ols('NPV ~ Age + Qual + injury_duration + FBv + Kpct + CBv + pos_dummy', data=train_data_pitcher).fit()
fit.summary()
# Rate stats
fit_rate = ols(
'NPV ~ Age + WAR_TBF + y_n1_war_tbf + y_n2_war_tbf + FBv + xFIP_sq + pos_dummy + injury_duration + Qual',
data=train_data_pitcher).fit()
fit_rate.summary()
multi_year_pitcher = train_data_pitcher[(train_data_pitcher['Years'] > 1)]
fit_rate_multi = ols(
'NPV ~ Age + WAR_TBF + y_n1_war_tbf + y_n2_war_tbf + FBv + xFIP_sq + pos_dummy + injury_duration',
data=multi_year_pitcher).fit()
fit_rate_multi.summary()
# Change position and Season to random effect
batter_grp = batter_merged.groupby(['Season']).agg({
'NPV': sum,
'WAR': sum,
'Name': 'nunique'
}).reset_index()
batter_grp['NPV'] = batter_grp['NPV'] / 1000000
fig = plotly_express.bar(batter_grp, x="Season", y="NPV",
color_continuous_scale=plotly_express.colors.qualitative.D3,
title="Yearly total NPV and total WAR")
fig.add_trace(go.Scatter(x=batter_grp['Season'], y=batter_grp['WAR'], line=dict(color='red'), name='WAR'),
row=1, col=1)
fig.show()
# Create figure with secondary y-axis
fig = make_subplots(specs=[[{"secondary_y": True}]])
# Add traces
fig.add_trace(
go.Bar(x=batter_grp['Season'], y=batter_grp['NPV'], name="NPV total"),
secondary_y=False,
)
fig.add_trace(
go.Scatter(x=batter_grp['Season'], y=batter_grp['WAR'], name="WAR total"),
secondary_y=True,
)
# Add figure title
fig.update_layout(
title_text="Yearly total NPV and total WAR"
)
# Set x-axis title
fig.update_xaxes(title_text="Off-Season Year")
# Set y-axes titles
fig.update_yaxes(title_text="<b>NPV</b> total ($ Millions)", secondary_y=False)
fig.update_yaxes(title_text="<b>WAR</b> total", secondary_y=True)
fig.show()
| 2.9375
| 3
|
16/main.py
|
DVRodri8/advent-of-code-2019
| 0
|
12780717
|
<reponame>DVRodri8/advent-of-code-2019<filename>16/main.py
pattern = [0,1,0,-1]
simple = "59768092839927758565191298625215106371890118051426250855924764194411528004718709886402903435569627982485301921649240820059827161024631612290005106304724846680415690183371469037418126383450370741078684974598662642956794012825271487329243583117537873565332166744128845006806878717955946534158837370451935919790469815143341599820016469368684893122766857261426799636559525003877090579845725676481276977781270627558901433501565337409716858949203430181103278194428546385063911239478804717744977998841434061688000383456176494210691861957243370245170223862304663932874454624234226361642678259020094801774825694423060700312504286475305674864442250709029812379"
offset = int(simple[:7])
simple *= 10000
print(len(simple))
simple = simple[offset:]
seq = [int(i) for i in simple]
cpy = seq[:]
F = 100
print(len(seq))
# Part 2
# we can eliminate left part
# offset is bigger than sequence length / 2
# so pos i is sum from i to end
for phase in range(F):
neg = 0
suma = sum(seq)
new_seq = []
print(phase)
for i in range(len(seq)):
new_seq.append( int(str(suma-neg)[-1]) )
neg += seq[i]
seq = new_seq
naseq = seq[:]
seq = cpy[:]
'''
# Trivial part 1
apats = []
for c in range(1,len(seq)+1):
apat = []
for i in pattern: apat.extend([i]*c)
apats.append(apat)
for phase in range(1,F+1):
new_seq = []
for c in range(1,len(seq)+1):
apat = apats[c-1]
sa = 1
digits = []
for n in seq:
digits.append( n*(apat[sa]) )
sa = (sa+1) % len(apat)
# if c==2: print(digits)
new_seq.append(int(str(sum(digits))[-1]))
seq = new_seq
print(*seq[:8], sep='')
'''
print(*naseq[:8], sep='')
| 3.0625
| 3
|
logs/main_stringio.py
|
nekomaruh/Rendimiento_Escolar_en_Chile_2010-2019
| 0
|
12780718
|
# Libraries
import pandas as pd
import numpy as np
import interface
import time
def get_dataframes(start_time, year=2010):
dataframes = None
columns_to_drop = interface.get_columns_to_drop()
amount_csv = interface.get_amount_of_csv()
for year in range(year, year+amount_csv):
print('------------------------------------------------------------')
path = "datasets/Rendimiento por estudiante "+str(year)+".csv"
# Leemos los datos y separamos por ; porque algunos nombres de establecimientos poseen comas y dan error
encoding = 'utf-8'
if year == 2014 or year == 2015:
encoding = 'latin'
if year == 2016 or year == 2018 or year == 2019:
encoding += '-sig'
print('Reading: '+path+' ('+encoding+')')
interface.get_time(start_time)
df = pd.read_csv(path, sep=';', low_memory=False, encoding=encoding)
interface.get_ram(info='File loaded')
interface.get_time(start_time)
df.columns = map(str.upper, df.columns)
drop = []
df_columns = df.columns.values.tolist()
for column in columns_to_drop:
if column in df_columns:
drop.append(column)
#print('Dropped tables:', drop)
df.drop(columns=drop, inplace=True, axis=1)
# Limpiar datos: Están en todos los años
df.fillna({'SIT_FIN': '-'}, inplace=True)
df['SIT_FIN'] = df['SIT_FIN'].replace([' '], '-')
df['COD_SEC'] = df['COD_SEC'].replace([' '], 0)
df['COD_ESPE'] = df['COD_ESPE'].replace([' '], 0)
df["PROM_GRAL"] = df["PROM_GRAL"].str.replace(',', ".").astype(float)
# Faltan estos datos, rellenar vacios
if year <= 2012:
df["COD_PRO_RBD"] = np.nan # Está en 2013+
df["COD_JOR"] = np.nan # Está en 2013+
if year <= 2013: # Esta solo en los años 2010-2013
df['INT_ALU'] = df['INT_ALU'].replace(['.'], 2)
df['INT_ALU'] = df['INT_ALU'].replace([' '], 2)
df["COD_ENSE2"] = np.nan # Está en 2014+
if year >= 2014: # Rellenar con vacíos
df['INT_ALU'] = np.nan
#print('Cantidad de datos:', len(df))
if dataframes is None:
dataframes = df
else:
dataframes = pd.concat([dataframes, df], ignore_index=True)
#print(df.dtypes)
del df
#print(dataframes.columns.values.tolist())
interface.get_ram(info='Added year to dataframe: ' + str(year))
interface.get_time(start_time)
print('------------------------------------------------------------')
interface.get_ram(info='Instance dataframe 2010-2019')
interface.get_time(start_time)
return dataframes
if __name__ == "__main__":
# Inicio del programa
interface.get_ram(info='Starting program')
start_time = time.time()
# Cargar los datos base a la base de datos
interface.drop_dimensions()
interface.create_dimensions()
interface.insert_static_dimensions()
interface.get_time(start_time)
# Instanciar todos los dataframes en uno general ya limpiados
df = get_dataframes(start_time)
# Convertir la variable MRUN
interface.get_ram(info='Converting dataframe types')
interface.get_time(start_time)
df['MRUN'] = df['MRUN'].astype('string')
interface.get_ram(info='Types converted')
interface.get_time(start_time)
# Crear comunas de establecimiento y alumno, estan en todos los años (no está en la documentación)
headers_com = ["COD_COM", "NOM_COM"]
# Comunas donde están los establecimientos
data_com_rbd = [df["COD_COM_RBD"], df["NOM_COM_RBD"]]
df_com_rbd = pd.concat(data_com_rbd, axis=1, keys=headers_com)
# Comunas donde provienen los alumnos
data_com_alu = [df["COD_COM_ALU"], df["NOM_COM_ALU"]]
df_com_alu = pd.concat(data_com_alu, axis=1, keys=headers_com)
# Concatenamos las columnas
df_com = pd.concat([df_com_rbd,df_com_alu])
df_com = df_com.drop_duplicates(subset=['COD_COM'])
df_com = df_com.reset_index(drop=True)
# Insertamos datos a la dimensión comuna
interface.insert_dim_comuna(df_com.values.tolist())
interface.get_time(start_time)
# Elimina residuales ram
del headers_com, data_com_rbd, df_com_rbd, data_com_alu, df_com_alu, df_com
df.drop(columns=['NOM_COM_RBD','NOM_COM_ALU'], inplace=True, axis=1)
interface.get_ram(info='Dispose columns "comuna"')
interface.get_time(start_time)
# Agregar establecimientos
data_establecimiento = [df["RBD"], df["DGV_RBD"], df["NOM_RBD"], df["RURAL_RBD"], df["COD_DEPE"], df["COD_REG_RBD"], df["COD_SEC"], df["COD_COM_RBD"]]
headers_establecimiento = ['rbd', 'dgv_rbd', 'nom_rbd', 'rural_rbd', 'cod_depe', 'cod_reg_rbd', 'cod_sec', 'cod_com']
interface.copy_from_stringio(table_name='establecimiento', data=data_establecimiento, headers=headers_establecimiento, remove_duplicates=['rbd','dgv_rbd'])
del data_establecimiento, headers_establecimiento
df.drop(columns=['NOM_RBD','RURAL_RBD','COD_DEPE','COD_REG_RBD','COD_SEC','COD_COM_RBD'], inplace=True, axis=1)
interface.get_ram(info='Dispose columns "establecimiento"')
interface.get_time(start_time)
# Agregar alumnos
data_alumno = [df["MRUN"], df["FEC_NAC_ALU"], df["GEN_ALU"], df["COD_COM_ALU"], df["INT_ALU"]]
headers_alumno = ["mrun", "fec_nac_alu", "gen_alu", "cod_com", "int_alu"]
interface.copy_from_stringio(table_name='alumno', data=data_alumno, headers=headers_alumno, remove_duplicates=['mrun'])
del data_alumno, headers_alumno
df.drop(columns=['FEC_NAC_ALU','GEN_ALU','COD_COM_ALU','INT_ALU'], inplace=True, axis=1)
interface.get_ram(info='Dispose columns "alumnos"')
interface.get_time(start_time)
"""
### TESTING ###
print('DROP TESTING')
df.drop(columns=['NOM_COM_RBD','NOM_COM_ALU','NOM_RBD','RURAL_RBD','COD_DEPE','COD_REG_RBD','COD_SEC','COD_COM_RBD','FEC_NAC_ALU','GEN_ALU','COD_COM_ALU','INT_ALU'], inplace=True, axis=1)
print('TESTING DROPPED')
### TESTING ###
"""
# Agregar notas
data_notas = [df["AGNO"], df["MRUN"], df["RBD"], df["DGV_RBD"], df["PROM_GRAL"], df["SIT_FIN"], df['ASISTENCIA'], df['LET_CUR'], df["COD_ENSE"], df["COD_ENSE2"], df["COD_JOR"]]
head_notas = ['agno', 'mrun', 'rbd', 'dgv_rbd', 'prom_gral', 'sit_fin', 'asistencia', 'let_cur', 'cod_ense', 'cod_ense2', 'cod_jor']
interface.copy_from_stringio(table_name='notas', data=data_notas, headers=head_notas, remove_duplicates=['agno','mrun'])
del data_notas, head_notas
interface.get_ram(info='Inserted all data to database')
interface.get_time(start_time)
del df
interface.get_ram(info='Dispose dataframe and finish program')
interface.get_time(start_time)
| 3.203125
| 3
|
DeepLib/Nvidia/NvOsd.py
|
bluetiger9/DeepEye
| 57
|
12780719
|
<reponame>bluetiger9/DeepEye<filename>DeepLib/Nvidia/NvOsd.py<gh_stars>10-100
## Copyright (c) 2020 <NAME> (<EMAIL>). All rights reserved.
## Licence: MIT
from DeepLib import *
import pyds
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
def osd_sink_pad_buffer_probe(pad,info,u_data):
frame_number=0
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_PERSON:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_ROADSIGN:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.glist_get_nvds_frame_meta()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
print(pyds.get_string(py_nvosd_text_params.display_text))
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
| 2.40625
| 2
|
web/premises/migrations/0030_report_reason.py
|
mehrdad-shokri/arguman.org
| 1
|
12780720
|
<filename>web/premises/migrations/0030_report_reason.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('premises', '0029_contention_related_nouns'),
]
operations = [
migrations.AddField(
model_name='report',
name='reason',
field=models.TextField(help_text='Please explain that why the premise is a fallacy.', null=True, verbose_name='Reason'),
preserve_default=True,
),
]
| 1.75
| 2
|
tests/test_log_format.py
|
jnbellinger/lta
| 1
|
12780721
|
# test_log_format.py
"""Unit tests for lta/log_format.py."""
import sys
from requests.exceptions import HTTPError
from .test_util import ObjectLiteral
from lta.log_format import StructuredFormatter
class LiteralRecord(ObjectLiteral):
"""
LiteralRecord is a literal LogRecord.
This class creates an ObjectLiteral that also implements the (getMessage)
method which is often called on LogRecord objects.
This is useful for creating LogRecord literals to be used as return
values from mocked API calls.
"""
def getMessage(self):
"""Format the log message."""
return self.msg % self.args
def test_constructor_default() -> None:
"""Test that StructuredFormatter can be created without any parameters."""
sf = StructuredFormatter()
assert sf.component_type is None
assert sf.component_name is None
assert sf.indent is None
assert sf.separators == (',', ':')
def test_constructor_supplied() -> None:
"""Test that StructuredFormatter can be created with parameters."""
sf = StructuredFormatter(component_type="Picker", component_name="test-picker", ndjson=False)
assert sf.component_type == "Picker"
assert sf.component_name == "test-picker"
assert sf.indent == 4
assert sf.separators == (', ', ': ')
def test_format_default() -> None:
"""Test that StructuredFormatter (no params) provides proper output."""
sf = StructuredFormatter()
log_record = LiteralRecord(
name="lta.picker",
msg="ConnectionError trying to PATCH /status/picker with heartbeat",
args=[],
levelname="ERROR",
levelno=40,
pathname="/home/pmeade/github/lta/lta/picker.py",
filename="picker.py",
module="picker",
exc_info=None,
exc_text=None,
stack_info=None,
lineno=102,
funcName="patch_status_heartbeat",
created=1547003161.046467,
msecs=46.46706581115723,
relativeCreated=93.13035011291504,
thread=140013641434880,
threadName="MainThread",
processName="MainProcess",
process=8147
)
json_text = sf.format(log_record)
assert json_text.startswith("{")
assert json_text.endswith("}")
assert json_text.find("\n") == -1
assert json_text.find("component_type") == -1
assert json_text.find("component_name") == -1
assert json_text.find("timestamp") != -1
def test_format_supplied() -> None:
"""Test that StructuredFormatter (with params) provides proper output."""
sf = StructuredFormatter(component_type="Picker", component_name="test-picker", ndjson=False)
log_record = LiteralRecord(
name="lta.picker",
msg="ConnectionError trying to PATCH /status/picker with heartbeat",
args=[],
levelname="ERROR",
levelno=40,
pathname="/home/pmeade/github/lta/lta/picker.py",
filename="picker.py",
module="picker",
exc_info=None,
exc_text=None,
stack_info=None,
lineno=102,
funcName="patch_status_heartbeat",
created=1547003161.046467,
msecs=46.46706581115723,
relativeCreated=93.13035011291504,
thread=140013641434880,
threadName="MainThread",
processName="MainProcess",
process=8147
)
json_text = sf.format(log_record)
assert json_text.startswith("{")
assert json_text.endswith("}")
assert json_text.find("\n") != -1
assert json_text.find("component_type") != -1
assert json_text.find("component_name") != -1
assert json_text.find("timestamp") != -1
def test_missing_exc_info() -> None:
"""Test that StructuredFormatter (no params) provides proper output."""
sf = StructuredFormatter()
log_record = LiteralRecord(
name="lta.picker",
msg="ConnectionError trying to PATCH /status/picker with heartbeat",
args=[],
levelname="ERROR",
levelno=40,
pathname="/home/pmeade/github/lta/lta/picker.py",
filename="picker.py",
module="picker",
exc_text=None,
stack_info=None,
lineno=102,
funcName="patch_status_heartbeat",
created=1547003161.046467,
msecs=46.46706581115723,
relativeCreated=93.13035011291504,
thread=140013641434880,
threadName="MainThread",
processName="MainProcess",
process=8147
)
json_text = sf.format(log_record)
assert json_text.startswith("{")
assert json_text.endswith("}")
assert json_text.find("\n") == -1
assert json_text.find("component_type") == -1
assert json_text.find("component_name") == -1
assert json_text.find("timestamp") != -1
def test_exc_info_tuple() -> None:
"""Test that StructuredFormatter (no params) provides proper output."""
sf = StructuredFormatter()
log_record = LiteralRecord(
name="lta.picker",
msg="ConnectionError trying to PATCH /status/picker with heartbeat",
args=[],
levelname="ERROR",
levelno=40,
pathname="/home/pmeade/github/lta/lta/picker.py",
filename="picker.py",
module="picker",
exc_text=None,
stack_info=None,
lineno=102,
funcName="patch_status_heartbeat",
created=1547003161.046467,
msecs=46.46706581115723,
relativeCreated=93.13035011291504,
thread=140013641434880,
threadName="MainThread",
processName="MainProcess",
process=8147
)
try:
raise HTTPError("451 Unavailable For Legal Reasons")
except HTTPError:
log_record.exc_info = sys.exc_info()
json_text = sf.format(log_record)
assert json_text.startswith("{")
assert json_text.endswith("}")
assert json_text.find("\n") == -1
assert json_text.find("component_type") == -1
assert json_text.find("component_name") == -1
assert json_text.find("timestamp") != -1
| 3.09375
| 3
|
StructuralCausalModels/structural_equation.py
|
Black-Swan-ICL/PySCMs
| 1
|
12780722
|
<reponame>Black-Swan-ICL/PySCMs
# TODO add string representation of Structural Equation
class StructuralEquation:
"""A class to represent structural equations.
Structural Equations are assignments of the sort
.. math::
X_{i} := g((X_j)_{j ~\in ~J}, U_i),
where :math:`U_i` is an exogenous (random) variable.
Parameters
----------
index_lhs : int
The index of the structural variable on the left-hand side of the
structural equation (the ":math:`i`").
indices_rhs : list
The indices of the structural variables on the right-hand side of the
structural equation (the ":math:`j`'s in :math:`J`").
exogenous_variable : scipy.stats.rv_continuous or scipy.stats.rv_discrete
The exogenous variable (the ":math:`U_i`").
function : callable
The function defining the functional form of the assignment in the
structural equation (the ":math:`g`").
"""
def __init__(self, index_lhs, indices_rhs, exogenous_variable, function):
self.index_lhs = index_lhs
self.indices_rhs = indices_rhs
self.exogenous_variable = exogenous_variable
self.function = function
def generate_data(self, data):
"""Generates samples from a structural equation.
Parameters
----------
data : pandas.DataFrame
A dataframe containing data at least for the structural variables on
the right-hand side of the structural equation.
If it contains data for the structural variable on the left-hand
side of the structural equation, that data will be overwritten.
Returns
-------
pandas.DataFrame
The samples.
"""
sample_size = data.shape[0]
inputs = [data.loc[:, i].values for i in self.indices_rhs]
data.loc[:, self.index_lhs] = self.function(
self.exogenous_variable.rvs(size=sample_size),
*inputs
)
return data
| 3.15625
| 3
|
RobotFramework/inventories/production/connectivity_check_v2.py
|
dmmar/netascode
| 36
|
12780723
|
# Example
# -------
#
# connectivity_check_v2.py
from pyats import aetest
import re
import logging
# get your logger for your script
logger = logging.getLogger(__name__)
class CommonSetup(aetest.CommonSetup):
# CommonSetup-SubSec1
@aetest.subsection
def check_topology(
self,
testbed,
HQ_C1_name = 'HQ-C1',
HQ_C2_name = 'HQ-C2',
HQ_C3_name = 'HQ-C3',
HQ_C4_name = 'HQ-C4',
BR1_C1_name = 'BR1-C1',
BR2_C1_name = 'BR2-C1'):
HQ_C1 = testbed.devices[HQ_C1_name]
HQ_C2 = testbed.devices[HQ_C2_name]
HQ_C3 = testbed.devices[HQ_C3_name]
HQ_C4 = testbed.devices[HQ_C4_name]
BR1_C1 = testbed.devices[BR1_C1_name]
BR2_C1 = testbed.devices[BR2_C1_name]
# add them to testscript parameters
self.parent.parameters.update(
HQ_C1 = HQ_C1,
HQ_C2 = HQ_C2,
HQ_C3 = HQ_C3,
HQ_C4 = HQ_C4,
BR1_C1 = BR1_C1,
BR2_C1 = BR2_C1)
# CommonSetup-SubSec
@aetest.subsection
def establish_connections(self, steps, HQ_C1, HQ_C2, HQ_C3, HQ_C4, BR1_C1, BR2_C1):
with steps.start('Connecting to %s' % HQ_C1.name):
HQ_C1.connect()
with steps.start('Connecting to %s' % HQ_C2.name):
HQ_C2.connect()
with steps.start('Connecting to %s' % HQ_C3.name):
HQ_C3.connect()
with steps.start('Connecting to %s' % HQ_C4.name):
HQ_C4.connect()
with steps.start('Connecting to %s' % BR1_C1.name):
BR1_C1.connect()
with steps.start('Connecting to %s' % BR2_C1.name):
BR2_C1.connect()
@aetest.subsection
def setup_ip_addresses(self, steps, HQ_C1, HQ_C2, HQ_C3, HQ_C4, BR1_C1, BR2_C1):
with steps.start('Setup static IPv4 to %s' % HQ_C1.name):
HQ_C1.execute('ip 10.255.100.10/27 10.255.100.1')
with steps.start('Setup static IPv4 to %s' % HQ_C2.name):
HQ_C2.execute('ip 10.255.100.40/27 10.255.100.33')
with steps.start('Setup static IPv4 to %s' % HQ_C3.name):
HQ_C3.execute('ip 10.255.100.70/27 10.255.100.65')
with steps.start('Setup static IPv4 to %s' % HQ_C4.name):
HQ_C4.execute('ip 10.255.100.100/27 10.255.100.97')
with steps.start('Setup static IPv4 to %s' % BR1_C1.name):
BR1_C1.execute('ip 10.1.100.10/27 10.1.100.1')
with steps.start('Setup static IPv4 to %s' % BR2_C1.name):
BR2_C1.execute('ip 10.2.100.10/27 10.2.100.1')
# TestCases
class TESTCASE_1_PING_FROM_HQ_CLIENTS_TO_ISP(aetest.Testcase):
@aetest.test
def T1_PING_FROM_HQ_C1_TO_ISP(self, HQ_C1):
try:
result = HQ_C1.execute('ping 8.8.8.8 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
@aetest.test
def T2_PING_FROM_HQ_C2_TO_ISP(self, HQ_C2):
try:
result = HQ_C2.execute('ping 8.8.8.8 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
@aetest.test
def T3_PING_FROM_HQ_C3_TO_ISP(self, HQ_C3):
try:
result = HQ_C3.execute('ping 8.8.8.8 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto=['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
@aetest.test
def T4_PING_FROM_HQ_C4_TO_ISP(self, HQ_C4):
try:
result = HQ_C4.execute('ping 8.8.8.8 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto=['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
class TESTCASE_2_PING_FROM_BR1_CLIENTS_TO_ISP(aetest.Testcase):
@aetest.test
def T1_PING_FROM_BR1_C1_TO_ISP(self, BR1_C1):
try:
result = BR1_C1.execute('ping 8.8.8.8 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
class TESTCASE_3_PING_FROM_BR2_CLIENTS_TO_ISP(aetest.Testcase):
@aetest.test
def T1_PING_FROM_BR2_C1_TO_ISP(self, BR2_C1):
try:
result = BR2_C1.execute('ping 8.8.8.8 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
class TESTCASE_4_PING_FROM_HQ_CLIENTS_TO_HQ_S1(aetest.Testcase):
@aetest.test
def T1_PING_FROM_HQ_C1_TO_HQ_S1(self, HQ_C1):
try:
result = HQ_C1.execute('ping 10.255.255.2 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
@aetest.test
def T2_PING_FROM_HQ_C2_TO_HQ_S1(self, HQ_C2):
try:
result = HQ_C2.execute('ping 10.255.255.2 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
@aetest.test
def T3_PING_FROM_HQ_C3_TO_HQ_S1(self, HQ_C3):
try:
result = HQ_C3.execute('ping 10.255.255.2 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto=['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
@aetest.test
def T4_PING_FROM_HQ_C4_TO_HQ_S1(self, HQ_C4):
try:
result = HQ_C4.execute('ping 10.255.255.2 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto=['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
class TESTCASE_5_PING_FROM_BR1_CLIENTS_TO_HQ_S1(aetest.Testcase):
@aetest.test
def T1_PING_FROM_BR1_C1_TO_HQ_S1(self, BR1_C1):
try:
result = BR1_C1.execute('ping 10.255.255.2 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
class TESTCASE_6_PING_FROM_BR2_CLIENTS_TO_HQ_S1(aetest.Testcase):
@aetest.test
def T1_PING_FROM_BR2_C1_TO_HQ_S1(self, BR2_C1):
try:
result = BR2_C1.execute('ping 10.255.255.2 -c 5')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('timeout', result) or re.search('not reachable|unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
class TESTCASE_7_TRACEROUTE_FROM_HQ_CLIENTS_TO_ISP(aetest.Testcase):
@aetest.test
def T1_TRACE_FROM_HQ_C1_TO_ISP(self, HQ_C1):
try:
result = HQ_C1.execute('trace 8.8.8.8 -P 6')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('\* \* \*', result) or re.search('Destination host unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
@aetest.test
def T2_TRACE_FROM_HQ_C2_TO_ISP(self, HQ_C2):
try:
result = HQ_C2.execute('trace 8.8.8.8 -P 6')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('\* \* \*', result) or re.search('Destination host unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
@aetest.test
def T3_TRACE_FROM_HQ_C3_TO_ISP(self, HQ_C3):
try:
result = HQ_C3.execute('trace 8.8.8.8 -P 6')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('\* \* \*', result) or re.search('Destination host unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
@aetest.test
def T4_TRACE_FROM_HQ_C4_TO_ISP(self, HQ_C4):
try:
result = HQ_C4.execute('trace 8.8.8.8 -P 6')
except Exception as e:
self.failed('Something go wrong'.format(str(e)), goto = ['exit'])
else:
match = re.search('\* \* \*', result) or re.search('Destination host unreachable', result)
print('################')
print('Result is =>', result)
print('Math is =>', match)
print('################')
if match:
print('Math is => FIND', match)
print('################')
self.failed()
else:
print('Math is => NOT FIND')
print('################')
# CommonCleanup
class CommonCleanup(aetest.CommonCleanup):
@aetest.subsection
def disconnect(self, steps, HQ_C1, HQ_C2, HQ_C3, HQ_C4, BR1_C1, BR2_C1):
with steps.start('Disconnecting from %s' % HQ_C1.name):
HQ_C1.disconnect()
with steps.start('Disconnecting from %s' % HQ_C2.name):
HQ_C2.disconnect()
with steps.start('Disconnecting from %s' % HQ_C3.name):
HQ_C3.disconnect()
with steps.start('Disconnecting from %s' % HQ_C4.name):
HQ_C4.disconnect()
with steps.start('Disconnecting from %s' % BR1_C1.name):
BR1_C1.disconnect()
with steps.start('Disconnecting from %s' % BR2_C1.name):
BR2_C1.disconnect()
if __name__ == '__main__':
import argparse
from pyats.topology import loader
parser = argparse.ArgumentParser()
parser.add_argument('--testbed', dest = 'testbed',
type = loader.load)
args, unknown = parser.parse_known_args()
aetest.main(**vars(args))
| 2.296875
| 2
|
Scripts/chardetect-script.py
|
linshenping/Python27
| 0
|
12780724
|
<filename>Scripts/chardetect-script.py
#!C:\Python27\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'chardet==3.0.4','console_scripts','chardetect'
__requires__ = 'chardet==3.0.4'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('chardet==3.0.4', 'console_scripts', 'chardetect')()
)
| 1.789063
| 2
|
wxpythonopengl.py
|
aole/boilerplate
| 1
|
12780725
|
import wx
import numpy as np
import time
from wx import glcanvas
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.arrays import vbo
from OpenGL.GL import shaders
from readobj import Obj3D
__author__ = '<NAME>'
__version__ = '0.1.0'
vertexShader = """
#version 120
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
"""
fragmentShader = """
#version 120
void main() {
gl_FragColor = vec4( .9, .9, .9, 1 );
}
"""
class GLFrame( glcanvas.GLCanvas ):
"""A simple class for using OpenGL with wxPython."""
near_plane = 0.1
far_plane = 100
world_pos = (0, 0, -6)
world_rot = (0, 0, 0)
def __init__(self, parent):
self.GLinitialized = False
attribList = (glcanvas.WX_GL_RGBA, # RGBA
glcanvas.WX_GL_DOUBLEBUFFER, # Double Buffered
glcanvas.WX_GL_DEPTH_SIZE, 24) # 24 bit
super(GLFrame, self).__init__( parent, attribList=attribList )
#
# Create the canvas
self.context = glcanvas.GLContext( self )
self.left_down = False
#
# Set the event handlers.
self.Bind(wx.EVT_ERASE_BACKGROUND, self.processEraseBackgroundEvent)
self.Bind(wx.EVT_SIZE, self.processSizeEvent)
self.Bind(wx.EVT_PAINT, self.processPaintEvent)
self.Bind(wx.EVT_MOUSEWHEEL, self.processWheelEvent)
self.Bind(wx.EVT_MOTION, self.processMotion)
self.Bind(wx.EVT_LEFT_DOWN, self.processLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.processLeftUp)
#
# Canvas Proxy Methods
def GetGLExtents(self):
"""Get the extents of the OpenGL canvas."""
return self.GetClientSize()
#
# wxPython Window Handlers
def processLeftDown( self, event ):
self.last_pos = event.GetPosition()
self.left_down = True
def processLeftUp( self, event ):
self.left_down = False
def processMotion( self, event ):
if self.left_down:
pos = event.GetPosition()
diff = (pos-self.last_pos)
self.world_rot = ( self.world_rot[0]+diff[1], self.world_rot[1]+diff[0], self.world_rot[2] )
# print( )
self.last_pos = pos
self.Refresh( False )
def processWheelEvent( self, event ):
delta = event.GetWheelRotation() / 100
self.world_pos = ( self.world_pos[0], self.world_pos[1], self.world_pos[2]+delta )
self.Refresh( False )
def processEraseBackgroundEvent( self, event ):
"""Process the erase background event."""
pass # Do nothing, to avoid flashing on MSWin
def processSizeEvent( self, event ):
self.Show()
self.SetCurrent( self.context )
size = self.GetGLExtents()
self.OnReshape( size.width, size.height )
self.Refresh( False )
event.Skip()
def processPaintEvent(self, event):
self.SetCurrent( self.context )
# This is a 'perfect' time to initialize OpenGL ... only if we need to
if not self.GLinitialized:
self.OnInitGL()
self.GLinitialized = True
self.OnDraw()
event.Skip()
#
# GLFrame OpenGL Event Handlers
def OnInitGL(self):
"""Initialize OpenGL for use in the window."""
glClearColor(1, 1, 1, 1)
VERTEX_SHADER = shaders.compileShader( vertexShader, GL_VERTEX_SHADER )
FRAGMENT_SHADER = shaders.compileShader( fragmentShader, GL_FRAGMENT_SHADER )
self.shader = shaders.compileProgram( VERTEX_SHADER, FRAGMENT_SHADER )
cube = Obj3D( 'testdata\cube.obj' )
data = cube.getVerticesFlat()
self.vbo = vbo.VBO( np.array( data, 'f' ) )
def OnReshape( self, width, height ):
"""Reshape the OpenGL viewport based on the dimensions of the window."""
glViewport( 0, 0, width, height )
glMatrixMode( GL_PROJECTION )
glLoadIdentity()
# glOrtho( -0.5, 0.5, -0.5, 0.5, -1, 1 )
gluPerspective( 45.0, width/height, self.near_plane, self.far_plane )
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def OnDraw( self ):
glPushMatrix()
glTranslate( self.world_pos[0], self.world_pos[1], self.world_pos[2] )
glRotated( self.world_rot[1], 0, 1, 0 )
glRotated( self.world_rot[0], 1, 0, 0 )
glClear( GL_COLOR_BUFFER_BIT )
shaders.glUseProgram( self.shader )
self.vbo.bind()
glEnableClientState( GL_VERTEX_ARRAY );
glVertexPointerf( self.vbo )
glDrawArrays( GL_TRIANGLES, 0, len( self.vbo ) )
self.vbo.unbind()
glDisableClientState( GL_VERTEX_ARRAY );
shaders.glUseProgram( 0 )
glPopMatrix()
self.SwapBuffers()
class Window( wx.Frame ):
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
self.initUI()
def initUI( self ):
panel = GLFrame(self)
panel.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
wx.StaticText( panel, label='Boilerplate Code', pos=( 10, 10 ) )
fmenu = wx.Menu()
self.popupMenu = wx.Menu()
fitem = fmenu.Append( wx.ID_OPEN, '&Open\tCtrl+O', 'Open file' )
self.popupMenu.Append( wx.ID_OPEN, '&Open\tCtrl+O', 'Open file' )
self.Bind( wx.EVT_MENU, self.onOpen, fitem )
fmenu.AppendSeparator()
fitem = fmenu.Append( wx.ID_EXIT, 'E&xit\tCtrl+Q', 'Exit Application' )
self.popupMenu.Append( wx.ID_EXIT, 'E&xit\tCtrl+Q', 'Exit Application' )
self.Bind(wx.EVT_MENU, self.onQuit, fitem)
mbar = wx.MenuBar()
mbar.Append( fmenu, '&File' )
self.SetMenuBar( mbar )
self.Show()
def OnRightDown(self, event):
self.PopupMenu( self.popupMenu, event.GetPosition() )
def onQuit( self, event ):
self.Close()
def onOpen( self, event ):
print( 'open' )
class Application( wx.App ):
def run( self ):
frame = Window(None, -1, 'Boilerplate Window', size=(400,300))
frame.Show()
self.MainLoop()
self.Destroy()
Application().run()
| 2.375
| 2
|
backend/error_handlers.py
|
UIC-InDeXLab/CovidInsights
| 0
|
12780726
|
<reponame>UIC-InDeXLab/CovidInsights
from backend import app
from flask import jsonify
invalid_country_msg = "Invalid country name. Please check using /list/countries to see valid country names."
invalid_region_msg = "Invalid region name. Please check using /list/regions to see names of regions."
invalid_country_or_no_regions_msg = "Country name invalid or this country doesn't have regional data. " \
"Refer to /list/countries and /list/regions"
invalid_window_msg = "A GET parameter 'window' must be provided such that: "\
"1 <= window <= (number of days available in data)"
invalid_type_msg = "GET parameter 'type' should be one of: 'deaths', 'recovered', 'cases'. Default is 'cases'."
invalid_date_fmt = "GET parameter 'date' either invalid or not in format YYYY-MM-DD."
date_out_of_range = "Provided date is out of range of data available."
data_type_invalid_for_region = "The requested data type is not available for the given location."
invalid_similarity_measure = "Similarity measure not understood. Choose from: euclidean, normalized."
similarity_measure_not_available = "Similarity measure not available for this location because population data " \
"unavailable."
@app.errorhandler(400)
def bad_request(error):
return {'error': {
'code': 400,
'message': error.description
}}, 400
@app.errorhandler(404)
def not_found(error):
return {'error': {
'code': 404,
'message': error.description
}}, 404
@app.errorhandler(405)
def server_error(error):
return {'error': {
'code': 405,
'message': error.description
}}, 405
@app.errorhandler(500)
def server_error(error):
return {'error': {
'code': 500,
'message': error.description
}}, 500
| 2.359375
| 2
|
app/config.py
|
Tiebe/EarnApp-Earning-Monitor
| 0
|
12780727
|
<gh_stars>0
import os
import io
import json
from time import sleep
class Configuration:
def __init__(self) -> None:
self.check_for_existing_config()
# Delay before checking env. Solves docker issues.
sleep(2)
# if config doesn't exist
if self.config_file_exists:
self.__want_to_reset_config()
if self.__reuse_config == True:
self.load_config()
else:
self.ask_config()
else:
self.ask_config()
self.fix_bugs()
def fix_bugs(self):
self.__fix_delay_bug()
def ask_config(self):
self.AUTH = (input("Enter the oauth-refresh-token from EarnApp dashboard\n\t: ")
if os.environ.get("AUTH") is None else os.environ.get("AUTH"))
# 10 Minutes recommended by Vita
self.DELAY = (10 if os.environ.get("DELAY")
is None else int(os.environ.get("DELAY")))
self.INTERVAL = (60 if os.environ.get("INTERVAL") is None
else int(os.environ.get("INTERVAL")))
self.WEBHOOK_URL = (input("Enter the Discord WebHook URL\n\t: ") if os.environ.get(
"WEBHOOK_URL") is None else os.environ.get("WEBHOOK_URL"))
self.AUTOMATIC_REDEEM = (input("Do you want to use automatic redeeming?\n\t[i] This helps getting your "
"money faster.\n\t[i] If you don't want to use this feature just put 0 here else put the belance that has to be exceeted here [>2.5]\n\t: ")) if os.environ.get("AUTOMATIC_REDEEM") is None \
else os.environ.get("AUTOMATIC_REDEEM")
self.create_config()
def __want_to_reset_config(self):
if os.environ.get('container', False) == 'docker':
self.__reuse_config = True
return
got_response = False
while(not got_response):
response = input("Want to use existing configuration? (yes/no): ")
if response.lower() == "yes":
got_response = True
self.__reuse_config = True
elif response.lower() == "no":
got_response = True
self.__reuse_config = False
else:
print("Didn't quite understand, try again!")
def check_for_existing_config(self):
self.home_directory = os.path.expanduser("~")
self.program_data_folder = ".earnapp-earning-monitor"
self.config_file_name = "config.json"
self.program_directory = os.path.join(self.home_directory, self.program_data_folder)
self.config_file_path = os.path.join(self.program_directory, self.config_file_name)
self.config_file_exists = os.path.exists(self.config_file_path)
def create_config(self):
if os.environ.get('container', False) == 'docker':
print("Detected container runtime.")
else:
# If config file doesn't exist
if not self.config_file_exists:
# If direcotry doesn't exist, create dir
if not os.path.exists(self.program_directory):
os.mkdir(self.program_directory)
config = {
"AUTH": self.AUTH,
"DELAY": self.DELAY,
"INTERVAL": self.INTERVAL,
"WEBHOOK_URL": self.WEBHOOK_URL,
"AUTOMATIC_REDEEM": self.AUTOMATIC_REDEEM,
}
with io.open(self.config_file_path, "w", encoding="utf-8") as stream:
json.dump(config, stream, indent=2)
def load_config(self):
with io.open(self.config_file_path, "r", encoding="utf-8") as stream:
try:
config_data = json.load(stream)
self.AUTH = config_data["AUTH"]
self.DELAY = config_data["DELAY"]
self.INTERVAL = config_data["INTERVAL"]
self.WEBHOOK_URL = config_data["WEBHOOK_URL"]
self.AUTOMATIC_REDEEM = config_data["AUTOMATIC_REDEEM"]
except:
print("Looks like your config file is missing paramters... Please reconfigure.")
exit(1)
def __fix_delay_bug(self):
if self.DELAY < 0 or self.DELAY >= 60:
print('Found invalid delay configuration. Fixing..!')
self.DELAY = 10 # Standart
self.create_config()
if __name__ == "__main__":
config = Configuration()
| 2.359375
| 2
|
django_monitor/price_monitor/urls/__init__.py
|
jasonljc/enterprise-price-monitor
| 0
|
12780728
|
from django.conf.urls import url
from .. import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^add/$', views.request_add)
]
| 1.492188
| 1
|
writeFullVP.py
|
maq18/TopoScope
| 0
|
12780729
|
<filename>writeFullVP.py
import copy, random, os
from collections import defaultdict
from hierarchy import Hierarchy
class GetFullVP(object):
def __init__(self, groupSize, dirs):
self.groupSize = groupSize
self.dir = dirs
if not os.path.exists(self.dir):
os.mkdir(self.dir)
self.VP2AS = defaultdict(set)
self.VP2path = defaultdict(set)
self.fullVP = set()
self.partialVP = set()
self.VPGroup = list()
self.fileNum = -1
self.tier = Hierarchy('asrel.txt')
def getFullVP(self):
with open('aspaths.txt') as f:
for line in f:
ASes = line.strip().split('|')
for AS in ASes:
self.VP2AS[ASes[0]].add(AS)
self.VP2path[ASes[0]].add(line.strip())
for VP in self.VP2AS.keys():
if 65000*0.8 < len(self.VP2AS[VP]):
self.fullVP.add(VP)
else:
self.partialVP.add(VP)
def fullVPGroup(self):
VP_copy1, VP_copy2 = list(), list()
for VP in self.fullVP:
if VP in self.tier.clique or VP in self.tier.high:
VP_copy1.append(VP)
else:
VP_copy2.append(VP)
while len(VP_copy1) >= self.groupSize:
tmp = list()
for _ in range(self.groupSize):
index = random.randint(0, len(VP_copy1) - 1)
tmp.append(VP_copy1.pop(index))
self.VPGroup.append(tmp)
while len(VP_copy2) >= self.groupSize:
tmp = list()
for _ in range(self.groupSize):
index = random.randint(0, len(VP_copy2) - 1)
tmp.append(VP_copy2.pop(index))
self.VPGroup.append(tmp)
tmp = []
for VP in VP_copy2 + VP_copy1:
tmp.append(VP)
if len(tmp) > self.groupSize:
self.VPGroup.append(tmp[:self.groupSize])
tmp = tmp[self.groupSize:]
for VP in self.partialVP:
tmp.append(VP)
self.VPGroup.append(tmp)
self.fileNum = len(self.VPGroup)
def writeFullVPPath(self):
for i in range(self.fileNum):
f = open(self.dir + 'fullVPPath' + str(i) + '.txt', 'w')
for VP in self.VPGroup[i]:
for path in self.VP2path[VP]:
f.write(path + '\n')
f.close()
def inferTopo(self):
for i in range(self.fileNum):
os.system("perl asrank.pl " + self.dir + "fullVPPath" + str(i) + ".txt > " + self.dir + "fullVPRel" + str(i) + ".txt")
def run(self):
self.getFullVP()
self.fullVPGroup()
self.writeFullVPPath()
self.inferTopo()
| 2.546875
| 3
|
setup.py
|
chrisbarr/bilious-rutabaga
| 0
|
12780730
|
<filename>setup.py<gh_stars>0
from setuptools import setup, find_packages
setup(
name = 'bucket_lister',
packages = find_packages(),
version = '0.2.3',
description = 'List S3 buckets for an account',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/chrisbarr/bilious-rutabaga',
download_url = 'https://github.com/chrisbarr/bilious-rutabaga/tarball/0.1',
keywords = ['aws', 's3'],
classifiers = [],
install_requires = ['boto>=2.38.0'],
)
| 1.507813
| 2
|
PP.py
|
Mukela12/philosophical
| 1
|
12780731
|
from flask import Flask, render_template, url_for, request, redirect,flash
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
@app.route("/", methods=["POST", "GET"])
def Base():
if request.method == "POST":
name = request.form["name"]
email = request.form["email"]
message = request.form["message"]
return redirect(url_for('Thankyou'))
else:
return render_template('index.html')
@app.route('/Thankyou', methods=["POST", "GET"])
def Thankyou():
return render_template('Thankyou2.html')
if __name__ == "__main__":
app.run(debug=True)
| 2.796875
| 3
|
esteira/pipeline/stage.py
|
guilhermewebdev/esteira
| 0
|
12780732
|
<filename>esteira/pipeline/stage.py
from .task import Task
class Stage(Task):
before_script = []
script = []
repo_dir = ''
def __init__(self, repo_dir, image=None, external_envs={}):
self.repo_dir = repo_dir
super().__init__(external_envs=external_envs, image=image)
def each_script(self, scripts):
image = self.client.images.get(self.image)
for script in scripts:
print(f'> {script}')
self.container = self.client.containers.run(
image,
command=script,
stderr=True,
stdin_open=False,
working_dir='/builds',
volumes={
self.repo_dir: {
'bind': '/builds',
'mode': 'rw'
}
},
environment=self.variables,
detach=True,
hostname=f'{self.__class__.__name__}'.lower()
)
for log in self.container.logs(stream=True, stderr=True, follow=True):
print(log.decode('utf-8'))
response = self.container.wait()
assert response.get('StatusCode') == 0, 'Code returned ' + str(response.get('StatusCode'))
assert response.get('Error') == None, str(response.get('Error'))
image = self.container.commit(f'{self.__class__.__name__}'.lower())
def run(self):
self.each_script(self.before_script)
self.each_script(self.script)
self.destroy()
| 2.265625
| 2
|
__init__.py
|
IBM/alchemy-config
| 0
|
12780733
|
<filename>__init__.py
#*****************************************************************#
# (C) Copyright IBM Corporation 2020. #
# #
# The source code for this program is not published or otherwise #
# divested of its trade secrets, irrespective of what has been #
# deposited with the U.S. Copyright Office. #
#*****************************************************************#
from .aconfig import *
| 1.414063
| 1
|
Pomodoro-GUI/main.py
|
twbm/Git-Learning-Thingy
| 1
|
12780734
|
from tkinter import *
import time
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
# ---------------------------- TIMER RESET ------------------------------- #
# ---------------------------- TIMER MECHANISM ------------------------------- #
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Work Timer")
window.config(padx=100, pady=50, bg=YELLOW)
title_label = Label(text="Timer",bg=YELLOW, fg=GREEN, font=(FONT_NAME, 35, 'bold'))
title_label.grid(column=1, row=0)
canvs = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
tomato = PhotoImage(file='tomato.png')
canvs.create_image(100, 112, image=tomato)
canvs.create_text(100, 130, text='00:00', fill='white', font=(FONT_NAME, 36, 'bold'))
canvs.grid(row=1, column=1)
start = Button(text='Start',highlightthickness=0, command=None)
start.grid(column=0, row=2)
reset = Button(text='Reset', highlightthickness=0,command=None)
reset.grid(column=2, row=2)
check_marks = Label(text="✓", bg=YELLOW, fg=GREEN, font=(FONT_NAME, 35, 'bold'))
check_marks.grid(column=1, row=3)
window.mainloop()
| 3.09375
| 3
|
test/run.py
|
mcr/uoscore-uedhoc
| 0
|
12780735
|
#!/usr/bin/env python3
# Copyright (c) 2021 Fraunhofer AISEC. See the COPYRIGHT
# file at the top-level directory of this distribution.
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import shutil
import os
import subprocess
import tarfile
from pathlib import Path
from collections import namedtuple
arc = namedtuple("arc", "board, cpu_arc")
build_path = 'build'
build_lib_test_path = 'build_lib_test'
results_path = 'packaged'
def remove_folder(path):
"""
Removes a folder.
"""
if os.path.exists(path):
shutil.rmtree(path)
def clean_all():
"""
Removes all build artefacts and the already saved static libraries in
folder packaged/.
"""
print("\nClean all!\n")
clean()
remove_folder(results_path)
def clean():
"""
Removes all build artefacts.
"""
remove_folder(build_path)
remove_folder(build_lib_test_path)
def execute_ext(cmd):
"""
Executes an external program.
cmd: program with arguments
"""
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
for line in process.stdout:
print(line)
if "FAIL" in str(line):
exit()
def build(name, opt, arc):
"""
Builds a static library.
name: name of the library -- libuoscore.a or libuedhoc.a
opt: optimization level
arc: the name of the architecture (the Zephyr OS board name)
"""
# crate a file containing make variable indicating the optimization level
# and the library which we want to build -- osocre or edhoc
print("\n")
print("===================================================================")
print("\nBuilding " + name + " for architecture " +
arc.cpu_arc + " with optimization " + opt + "\n")
print("===================================================================")
os.mkdir(build_lib_test_path)
f = open(build_lib_test_path + "/opt", "x")
f.write("OPT = " + opt + "\n")
f.write("LIB_NAME = " + name + "\n")
f.close()
m = open("src/main.h", "w+")
if (name == 'libuoscore.a'):
m.write("#define OSCORE_TESTS")
if (name == 'libuedhoc.a'):
m.write("#define EDHOC_TESTS")
m.close
# build with west
execute_ext(['west', 'build', '-b='+arc.board])
def save(name, arc):
"""
Saves a oscore or edhoc library for a specific architecture in folder
packaged.
name: name of the library -- libuoscore.a or libuedhoc.a
arc: the name of the architecture (the Zephyr OS board name)
"""
print("\nSaving!\n")
Path(results_path).mkdir(parents=True, exist_ok=True)
name_only = os.path.splitext(os.path.basename(name))[0]
t = tarfile.open(results_path + '/' + name_only +
'_' + arc.cpu_arc + '.tar.gz', 'x')
t.add(build_lib_test_path + '/' + 'libtest.a', arcname=name)
if (name == 'libuedhoc.a'):
t.add('../modules/edhoc/edhoc.h', arcname='edhoc.h')
if (name == 'libuoscore.a'):
t.add('../modules/oscore/oscore.h', arcname='oscore.h')
t.close()
def test(arc):
"""
Tests a static library agains the test vectors.
arc: architecture
"""
if (
(arc.board == 'native_posix_64') |
(arc.board == 'native_posix')):
print("\nTesting!\n")
execute_ext(['west', 'build', '-t', 'run'])
else:
execute_ext(['west', 'flash'])
input(
"Examine the results printed over the debugger and press Enter to continue...")
def run_tests(name, arc):
"""
Builds, tests and saves an oscore or an edhoc static library for a specific
architecture. The tests are executed for libraries build with different
optimizations.
name: name of the library -- libuoscore.a or libuedhoc.a
arc: the name of the architecture (the Zephyr OS board name)
"""
opt = ("-O0", "-O1", "-O2", "-O3")
for o in opt:
clean()
build(name, o, arc)
test(arc)
save(name, arc)
def main():
"""
Builds static libraries from uOSCORE and uEDHOC for different
architectures, tests the libraries agains the test vectors and saves the
tested libraries in the folder packaged
"""
clean_all()
# x86
#run_tests('libuoscore.a', arc('native_posix', 'x86'))
run_tests('libuedhoc.a', arc('native_posix', 'x86'))
# x86-64
#run_tests('libuoscore.a', arc('native_posix_64', 'x86-64'))
#run_tests('libuedhoc.a', arc('native_posix_64', 'x86-64'))
# to run the following tests a real hardware must be connect to the PC
# executing this script. The results of the test can be examined over a serial consol such as GTKterm
# Cortex M0
#run_tests('libuoscore.a', arc('nrf51dk_nrf51422', 'cortex-m0'))
#run_tests('libuedhoc.a', arc('nrf51dk_nrf51422', 'cortex-m0'))
# Cortex M3
#run_tests('libuoscore.a', arc('nucleo_l152re', 'cortex-m3'))
#run_tests('libuedhoc.a', arc('nucleo_l152re', 'cortex-m3'))
# Cortex M4
#run_tests('libuoscore.a', arc('nrf52dk_nrf52832','cortex-m4'))
#run_tests('libuedhoc.a', arc('nrf52dk_nrf52832','cortex-m4'))
#run_tests('libuoscore.a', arc('nrf52840dk_nrf52840','cortex-m4'))
#run_tests('libuedhoc.a', arc('nrf52840dk_nrf52840','cortex-m4'))
# Cortex M33
#run_tests('libuoscore.a', arc('nrf9160dk_nrf9160', 'cortex-m33'))
#run_tests('libuedhoc.a', arc('nrf9160dk_nrf9160', 'cortex-m33'))
if __name__ == "__main__":
main()
| 2.4375
| 2
|
src/gamesbyexample/fizzbuzzgame.py
|
asweigart/gamesbyexample
| 83
|
12780736
|
"""FizzBuzz Game, by <NAME> <EMAIL>
A number game where you also race against the clock.
Tags: tiny, beginner, game, math"""
__version__ = 0
import sys, time
print('''Fizz Buzz Game, by <NAME> <EMAIL>
Starting with 1, enter increasing numbers.
However, if the number is a multiple of 3, type "fizz" instead of
the number. If the number is a multiple of 5, type "buzz". If the
the number of is a multiple of 3 and 5, type "fizzbuzz".
So the pattern is:
1 2 fizz 4 buzz fizz 7 8 fizz buzz 11 fizz 13 14 fizzbuzz 16...
A doom clock is counting down. Entering correct responses gives you
more time. How long can you keep entering the correct pattern?''')
input('Press Enter to begin...')
number = 1
doomClock = time.time() + 10 # Player starts with 10 seconds.
while True: # Main game loop.
# Determine the correct response for the current number:
if number % 3 == 0 and number % 5 == 0:
correctResponse = 'fizzbuzz'
elif number % 3 == 0:
correctResponse = 'fizz'
elif number % 5 == 0:
correctResponse = 'buzz'
else:
correctResponse = str(number)
# For the first 16 responses, give them the answer:
if number <= 16:
hint = '(Enter ' + correctResponse + ') '
elif number == 17:
hint = '(You are on your own now!) '
else:
hint = ''
# Get the player's response:
response = input('Next response: ' + hint)
response = response.lower().replace(' ', '')
# See if the player has lost:
if response != correctResponse:
print('NOOOOO! Correct response: ' + correctResponse)
print('Thanks for playing!')
sys.exit()
elif time.time() > doomClock:
print('NOOOOO! You have run out of time!')
print('Thanks for playing!')
sys.exit()
# If the player was right, add 2 seconds to the doom clock.
doomClock += 2
secondsRemaining = round(doomClock - time.time(), 1)
print('DOOM CLOCK: ' + str(secondsRemaining) + ' seconds remaining')
print()
number += 1 # Proceed to the next number to enter.
| 4.0625
| 4
|
review_ladder/models.py
|
miri64/review_ladder
| 0
|
12780737
|
<reponame>miri64/review_ladder<filename>review_ladder/models.py<gh_stars>0
from django.db import models, transaction
from django.db.models import Q, Count
from django.conf import settings
from django.core import validators
import datetime
import dateutil.parser
GITHUB_REPO = "%s/%s" % (settings.GITHUB_REPO_USER, settings.GITHUB_REPO_NAME)
if hasattr(settings, "GITHUB_SINCE"):
START_DATE = dateutil.parser.parse(settings.GITHUB_SINCE)
else:
START_DATE = datetime.datetime.fromtimestamp(0)
# Create your models here.
class User(models.Model):
id = models.IntegerField(primary_key=True, unique=True)
avatar_url = models.URLField()
name = models.CharField(max_length=30, unique=True, db_index=True)
def __str__(self):
return self.name
def _filtered_stats(self, since=None, until=None):
comments = self.comments.exclude(pr__author=self)
merges = self.merges
if since:
comments = comments.filter(date__gte=since)
merges = merges.filter(date__gte=since)
if until:
comments = comments.filter(date__lte=until)
merges = merges.filter(date__lte=until)
return comments, merges
def score(self, since=None, until=None):
comments, merges = self._filtered_stats(since, until)
return sum(c.type for c in comments) + (Comment.MRG * merges.count())
def stats(self, since=None, until=None):
comments, merges = self._filtered_stats(since, until)
return {
"approvals": comments.filter(type=Comment.ACK).count(),
"change_requests": comments.filter(type=Comment.CRQ).count(),
"comments": comments.filter(type=Comment.COM).count(),
"merges": merges.count(),
}
@classmethod
def get_ranking(cls, limit=20, since=None, until=None):
maintainers_query = sorted(cls.objects
.annotate(comments_num=Count("comments"))
.annotate(merges_num=Count("merges"))
.filter(Q(comments_num__gt=0) | Q(merges_num__gt=0)),
key=lambda u: u.score(since, until),
reverse=True)
maintainers = []
for maintainer in maintainers_query:
if maintainer.score(since, until):
maintainers.append({
"name": maintainer.name,
"avatar_url": maintainer.avatar_url,
"score": maintainer.score(since, until),
"stats": maintainer.stats(since, until)
})
if len(maintainers) == limit:
break
return maintainers
@classmethod
def from_github_json(cls, json_user):
return cls.objects.get_or_create(
id=json_user["id"],
avatar_url=json_user["avatar_url"],
name=json_user["login"]
)
class PullRequest(models.Model):
class Meta:
unique_together = (("repo", "number"), )
indexes = [
models.Index(fields=["repo", "number"]),
]
repo = models.CharField(max_length=100,
validators=[validators.RegexValidator("[^/]+/[^/]+")])
number = models.IntegerField()
author = models.ForeignKey("User", null=True, blank=True,
on_delete=models.SET_NULL)
assignees = models.ManyToManyField("User", related_name="assignments")
def __str__(self):
return "%s#%d" % (self.repo, self.number)
@classmethod
def from_github_json(cls, json_pr, json_events=[]):
with transaction.atomic():
author, _ = User.from_github_json(json_pr["user"])
pr, created = cls.objects.update_or_create(
repo=GITHUB_REPO,
number=json_pr["number"],
author=author,
)
for json_event in json_events:
assignee = None
op = lambda user: None
if json_event["event"] == "review_requested":
assignee, _ = User.from_github_json(json_event["requested_reviewer"])
op = pr.assignees.add
elif json_event["event"] == "assigned":
assignee, _ = User.from_github_json(json_event["assignee"])
op = pr.assignees.add
elif json_event["event"] == "review_request_removed":
assignee, _ = User.from_github_json(json_event["requested_reviewer"])
op = pr.assignees.remove
elif json_event["event"] == "assigned":
assignee, _ = User.from_github_json(json_event["assignee"])
op = pr.assignees.remove
op(assignee)
pr.save()
return pr, created
class Comment(models.Model):
COM = .1 # comment
CRQ = 4.0 # change request
ACK = 5.0 # approval
MRG = 5.0 # merge (not a comment, but to keep the scores together it is here)
JSON_COMMENT_LOT = {
"commented": COM,
"dismissed": COM,
"changes_requested": CRQ,
"approved": ACK
}
id = models.IntegerField(primary_key=True, unique=True)
pr = models.ForeignKey("PullRequest", on_delete=models.CASCADE,
related_name="comments")
user = models.ForeignKey("User", on_delete=models.CASCADE,
related_name="comments")
type = models.FloatField(choices=((COM, "comment"),
(CRQ, "change Request"),
(ACK, "approval")),
default=COM)
date = models.DateTimeField()
@classmethod
def from_github_json(cls, json_comment, pr, type=COM):
date = dateutil.parser.parse(json_comment["created_at"])
if date >= START_DATE:
user, _ = User.from_github_json(json_comment["user"])
return cls.objects.update_or_create(
id=json_comment["id"],
pr=pr,
user=user,
defaults={"type": type, "date": date}
)
@classmethod
def from_github_review_json(cls, json_review, pr):
if json_review["state"].lower() not in cls.JSON_COMMENT_LOT:
# we don't count "pending" etc.
return None, False
json_review["created_at"] = json_review["submitted_at"]
return cls.from_github_json(json_review, pr,
cls.JSON_COMMENT_LOT[json_review["state"].lower()])
class Merge(models.Model):
sha = models.CharField(max_length=40,
validators=[validators.RegexValidator("[a-f0-9A-F]+")],
primary_key=True, unique=True)
pr = models.OneToOneField("PullRequest")
author = models.ForeignKey("User", related_name="merges")
date = models.DateTimeField()
def __str__(self):
return self.sha[:7]
@classmethod
def from_github_json(cls, json_commit, pr):
date = dateutil.parser.parse(json_commit["commit"]["author"]["date"])
if date >= START_DATE:
author, _ = User.from_github_json(json_commit["author"])
return cls.objects.update_or_create(
sha=json_commit["sha"],
author=author,
pr=pr,
defaults={"date": date}
)
| 2.296875
| 2
|
unit_tests/glhe/profiles/test_external_base.py
|
stianchris/GLHE
| 2
|
12780738
|
<gh_stars>1-10
import os
import tempfile
import unittest
from glhe.profiles.external_base import ExternalBase
class TestExternalBase(unittest.TestCase):
@staticmethod
def add_instance():
temp_dir = tempfile.mkdtemp()
temp_csv = os.path.join(temp_dir, 'temp.csv')
with open(temp_csv, 'w') as f:
f.write(',b,c\n1/1/2018 0:00,1,4\n1/1/2018 0:02,2,3\n1/1/2018 0:04,3,6\n')
return ExternalBase(temp_csv, 0)
def test_get_value(self):
tst = self.add_instance()
self.assertAlmostEqual(tst.get_value(0), 1.0, delta=0.001)
self.assertAlmostEqual(tst.get_value(60), 1.5, delta=0.001)
self.assertAlmostEqual(tst.get_value(120), 2.0, delta=0.001)
| 2.28125
| 2
|
example_evaluate_with_diff.py
|
ducha-aiki/manifold-diffusion
| 118
|
12780739
|
<gh_stars>100-1000
# EXAMPLE_EVALUATE Code to evaluate example results on ROxford and RParis datasets.
# Revisited protocol has 3 difficulty setups: Easy (E), Medium (M), and Hard (H),
# and evaluates the performance using mean average precision (mAP), as well as mean precision @ k (mP@k)
#
# More details about the revisited annotation and evaluation can be found in:
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Revisiting Oxford and Paris: Large-Scale Image Retrieval Benchmarking, CVPR 2018
#
# Authors: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., 2018
# Added diffusion: <NAME>.
import os
import numpy as np
from scipy.io import loadmat
from dataset import configdataset
from download import download_datasets, download_features
from evaluate import compute_map
#---------------------------------------------------------------------
# Set data folder and testing parameters
#---------------------------------------------------------------------
# Set data folder, change if you have downloaded the data somewhere else
data_root = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'data')
# Check, and, if necessary, download test data (Oxford and Pairs),
# revisited annotation, and example feature vectors for evaluation
download_datasets(data_root)
download_features(data_root)
# Set test dataset: roxford5k | rparis6k
test_dataset = 'roxford5k'
#---------------------------------------------------------------------
# Evaluate
#---------------------------------------------------------------------
print('>> {}: Evaluating test dataset...'.format(test_dataset))
# config file for the dataset
# separates query image list from database image list, when revisited protocol used
cfg = configdataset(test_dataset, os.path.join(data_root, 'datasets'))
# load query and database features
print('>> {}: Loading features...'.format(test_dataset))
features = loadmat(os.path.join(data_root, 'features', '{}_resnet_rsfm120k_gem.mat'.format(test_dataset)))
Q = features['Q']
X = features['X']
K = 100 # approx 50 mutual nns
QUERYKNN = 10
R = 2000
alpha = 0.9
from diffussion import *
# perform search
print('>> {}: Retrieval...'.format(test_dataset))
sim = np.dot(X.T, Q)
qsim = sim_kernel(sim).T
sortidxs = np.argsort(-qsim, axis = 1)
for i in range(len(qsim)):
qsim[i,sortidxs[i,QUERYKNN:]] = 0
qsim = sim_kernel(qsim)
A = np.dot(X.T, X)
W = sim_kernel(A).T
W = topK_W(W, K)
Wn = normalize_connection_graph(W)
plain_ranks = np.argsort(-sim, axis=0)
cg_ranks = cg_diffusion(qsim, Wn, alpha)
cg_trunk_ranks = dfs_trunk(sim, A, alpha = alpha, QUERYKNN = QUERYKNN )
fast_spectral_ranks = fsr_rankR(qsim, Wn, alpha, R)
alg_names = ['Plain', 'Diffusion cg', 'Diffusion trunkated', 'Spectral R=2000']
alg_ranks = [plain_ranks, cg_ranks,cg_trunk_ranks, fast_spectral_ranks ]
for rn in range(len(alg_names)):
ranks = alg_ranks[rn]
name = alg_names[rn]
# revisited evaluation
gnd = cfg['gnd']
# evaluate ranks
ks = [1, 5, 10]
# search for easy
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['easy']])
g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['hard']])
gnd_t.append(g)
mapE, apsE, mprE, prsE = compute_map(ranks, gnd_t, ks)
# search for easy & hard
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['easy'], gnd[i]['hard']])
g['junk'] = np.concatenate([gnd[i]['junk']])
gnd_t.append(g)
mapM, apsM, mprM, prsM = compute_map(ranks, gnd_t, ks)
# search for hard
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['hard']])
g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['easy']])
gnd_t.append(g)
mapH, apsH, mprH, prsH = compute_map(ranks, gnd_t, ks)
print(name)
print('>> {}: mAP E: {}, M: {}, H: {}'.format(test_dataset, np.around(mapE*100, decimals=2), np.around(mapM*100, decimals=2), np.around(mapH*100, decimals=2)))
print('>> {}: mP@k{} E: {}, M: {}, H: {}'.format(test_dataset, np.array(ks), np.around(mprE*100, decimals=2), np.around(mprM*100, decimals=2), np.around(mprH*100, decimals=2)))
| 2.34375
| 2
|
leads/views.py
|
beniman8/django_crm
| 0
|
12780740
|
from django.core.mail import send_mail
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render,redirect,reverse
from django.http import HttpResponse
from .models import Lead,Agent,Category
from .forms import LeadForm, LeadModelForm,CustomUserCreationForm,AssignAgentForm,LeadCategoryUpdateForm
from django.views import generic
from agents.mixins import OrganizerAndLoginRequiredMixin
#CRUD+L - Create, Retrieve, Update and Delete + List
class SignupView(generic.CreateView):
template_name='registration/signup.html'
form_class=CustomUserCreationForm
def get_success_url(self):
return reverse("login")
class LandingPageView(generic.TemplateView):
template_name='landing.html'
def landing_page(request):
return render(request, 'landing.html')
class HomePageView(LoginRequiredMixin,generic.ListView):
template_name='leads/home.html'
context_object_name = "leads"
def get_queryset(self):
user = self.request.user
# initial queryset of the leads for the entire organisation
if user.is_organizer:
queryset=Lead.objects.filter(organisation=user.userprofile, agent__isnull=False)
else:
queryset=Lead.objects.filter(organisation=user.agent.organisation, agent__isnull=False)
# filter for the agent that is logged in
queryset = queryset.filter(agent__user=user)
return queryset
def get_context_data(self,**kwargs):
context = super(HomePageView,self).get_context_data(**kwargs)
user = self.request.user
if user.is_organizer:
queryset=Lead.objects.filter(organisation=user.userprofile, agent__isnull=True)
context.update({
"unassigned_leads":queryset
})
return context
def home_page(request):
leads = Lead.objects.all()
context={
'leads':leads,
}
return render(request, 'leads/home.html', context)
class LeadDetailView(LoginRequiredMixin,generic.DetailView):
template_name='leads/detail.html'
context_object_name = "lead"
def get_queryset(self):
user = self.request.user
# initial queryset of the leads for the entire organisation
if user.is_organizer:
queryset=Lead.objects.filter(organisation=user.userprofile)
else:
queryset=Lead.objects.filter(organisation=user.agent.organisation)
# filter for the agent that is logged in
queryset = queryset.filter(agent__user=user)
return queryset
def lead_detail(request,pk):
lead = Lead.objects.get(id=pk)
context = {
'lead':lead,
}
return render(request, 'leads/detail.html', context)
class LeadCreateView(OrganizerAndLoginRequiredMixin,generic.CreateView):
template_name='leads/create.html'
form_class=LeadModelForm
def get_success_url(self):
return reverse("leads:home")
def form_valid(self,form):
lead = form.save(commit=False)
lead.organisation = self.request.user.userprofile
lead.save()
send_mail(
subject="A lead has been created",
message="Go to the site to check it out",
from_email='<EMAIL>',
recipient_list=['<EMAIL>']
)
return super(LeadCreateView,self).form_valid(form)
def lead_create(request):
form = LeadModelForm()
if request.method == "POST":
form = LeadModelForm(request.POST)
if form.is_valid():
form.save()
return redirect("/")
context = {
'form':form
}
return render(request, 'leads/create.html', context)
class LeadUpdateView(OrganizerAndLoginRequiredMixin,generic.UpdateView):
template_name='leads/update.html'
form_class=LeadModelForm
def get_queryset(self):
user = self.request.user
# initial queryset of the leads for the entire organisation
return Lead.objects.filter(organisation=user.userprofile)
def get_success_url(self):
return reverse("leads:home")
def lead_update(request,pk):
lead = Lead.objects.get(id=pk)
form = LeadModelForm(instance=lead)
if request.method == "POST":
form = LeadModelForm(request.POST,instance=lead)
if form.is_valid():
form.save()
return redirect("/")
context = {
'form':form
}
return render(request, 'leads/update.html', context)
class LeadDeleteView(OrganizerAndLoginRequiredMixin,generic.DeleteView):
template_name='leads/delete.html'
def get_queryset(self):
user = self.request.user
# initial queryset of the leads for the entire organisation
return Lead.objects.filter(organisation=user.userprofile)
def get_success_url(self):
return reverse("leads:home")
def lead_delete(request,pk):
lead = Lead.objects.get(id=pk)
lead.delete()
return redirect('/')
class AssignAgentView(OrganizerAndLoginRequiredMixin,generic.FormView):
template_name='leads/assign_agent.html'
form_class=AssignAgentForm
def get_form_kwargs(self,**kwargs):
kwargs = super(AssignAgentView,self).get_form_kwargs(**kwargs)
kwargs.update({"request":self.request})
return kwargs
def get_success_url(self):
return reverse("leads:home")
def form_valid(self,form):
agent = form.cleaned_data["agent"]
lead = Lead.objects.get(id=self.kwargs["pk"])
lead.agent = agent
lead.save()
return super(AssignAgentView,self).form_valid(form)
class CategoryListView(LoginRequiredMixin,generic.ListView):
template_name = "leads/category_list.html"
context_object_name = "category_list"
def get_context_data(self,**kwargs):
context = super(CategoryListView,self).get_context_data(**kwargs)
user = self.request.user
# initial queryset of the leads for the entire organisation
if user.is_organizer:
queryset=Lead.objects.filter(organisation=user.userprofile)
else:
queryset=Lead.objects.filter(organisation=user.agent.organisation)
context.update({
"unassigned_lead_count":queryset.filter(category__isnull=True).count()
})
return context
def get_queryset(self):
user = self.request.user
# initial queryset of the leads for the entire organisation
if user.is_organizer:
queryset=Category.objects.filter(organisation=user.userprofile)
else:
queryset=Category.objects.filter(organisation=user.agent.organisation)
return queryset
class CategoryDetailView(LoginRequiredMixin,generic.DetailView):
template_name="leads/category_detail.html"
context_object_name = "category"
# def get_context_data(self,**kwargs):
# context = super(CategoryDetailView,self).get_context_data(**kwargs)
# # qs = Lead.objects.filter(category=self.get_object()) this is kind of the same as the leads variable
# leads = self.get_object().leads.all()
# # self.get_object().lead_set.all() this is how to call all the leads related to the category when it is beig used as a foreing key
# # if you have a related name set in the model you can use self.get_object()./**Insert Realated name**//.all() -> self.get_object().leads.all()
# context.update({
# "leads":leads
# })
# return context
def get_queryset(self):
user = self.request.user
# initial queryset of the leads for the entire organisation
if user.is_organizer:
queryset=Category.objects.filter(organisation=user.userprofile)
else:
queryset=Category.objects.filter(organisation=user.agent.organisation)
return queryset
class LeadCategoryUpdateView(LoginRequiredMixin,generic.UpdateView):
template_name='leads/category_update.html'
form_class=LeadCategoryUpdateForm
def get_queryset(self):
user = self.request.user
# initial queryset of the leads for the entire organisation
if user.is_organizer:
queryset=Lead.objects.filter(organisation=user.userprofile)
else:
queryset=Lead.objects.filter(organisation=user.agent.organisation)
queryset = queryset.filter(agent__user=user)
return queryset
def get_success_url(self):
return reverse("leads:detail-view",kwargs={"pk":self.get_object().id})
# def lead_create(request):
# form = LeadForm()
# if request.method == "POST":
# form = LeadForm(request.POST)
# if form.is_valid():
# first_name=form.cleaned_data['first_name']
# last_name=form.cleaned_data['last_name']
# age=form.cleaned_data['age']
# agent = Agent.objects.first()
# Lead.objects.create(
# first_name=first_name,
# last_name=last_name,
# age=age,
# agent=agent)
# return redirect("/")
# context = {
# 'form':form
# }
# return render(request, 'leads/create.html', context)
# def lead_update(request,pk):
# lead = Lead.objects.get(id=pk)
# form = LeadForm()
# if request.method == "POST":
# form = LeadForm(request.POST)
# if form.is_valid():
# first_name=form.cleaned_data['first_name']
# last_name=form.cleaned_data['last_name']
# age=form.cleaned_data['age']
# agent = Agent.objects.first()
# lead.first_name=first_name,
# lead.last_name=last_name,
# lead.age=age,
# lead.agent=agent
# lead.save()
# return redirect("/")
# context = {
# 'form':form
# }
# return render(request, 'leads/update.html', context)
| 1.984375
| 2
|
test/test_loss_metrics/test_loss.py
|
imabackstabber/segment-with-nn
| 0
|
12780741
|
# coding=utf-8
''' test case for loss
'''
import tensorflow as tf
from segelectri.loss_metrics.loss import FocalLoss, LovaszLoss, DiceLoss, BoundaryLoss
class TestLoss(tf.test.TestCase):
def setUp(self):
self.y_true = tf.random.uniform((2, 512, 512),
minval=0,
maxval=3,
dtype=tf.int64)
self.y_pred = tf.random.uniform((2, 512, 512, 3),
minval=0,
maxval=1,
dtype=tf.float32)
def test_focal_loss(self):
focall = FocalLoss()
loss = focall(self.y_true, self.y_pred)
self.assertAllEqual(loss.shape, ())
def test_lovasz_loss(self):
lovaszl = LovaszLoss()
loss = lovaszl(self.y_true, self.y_pred)
self.assertAllEqual(loss.shape, ())
def test_cross_entropy_loss(self):
scce = tf.keras.losses.SparseCategoricalCrossentropy()
loss = scce(self.y_true, self.y_pred)
self.assertAllEqual(loss.shape, ())
def test_dice_loss(self):
dicel = DiceLoss()
loss = dicel(self.y_true, self.y_pred)
self.assertAllEqual(loss.shape, ())
def test_boundary_loss(self):
boundaryl = BoundaryLoss()
loss = boundaryl(self.y_true, self.y_pred)
self.assertAllEqual(loss.shape, ())
| 2.265625
| 2
|
miplearn/solvers/tests/test_internal_solver.py
|
GregorCH/MIPLearn
| 0
|
12780742
|
<filename>miplearn/solvers/tests/test_internal_solver.py<gh_stars>0
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from io import StringIO
import pyomo.environ as pe
from miplearn import BasePyomoSolver, GurobiSolver
from miplearn.solvers import RedirectOutput
from . import _get_instance, _get_internal_solvers
logger = logging.getLogger(__name__)
def test_redirect_output():
import sys
original_stdout = sys.stdout
io = StringIO()
with RedirectOutput([io]):
print("Hello world")
assert sys.stdout == original_stdout
assert io.getvalue() == "Hello world\n"
def test_internal_solver_warm_starts():
for solver_class in _get_internal_solvers():
logger.info("Solver: %s" % solver_class)
instance = _get_instance(solver_class)
model = instance.to_model()
solver = solver_class()
solver.set_instance(instance, model)
solver.set_warm_start(
{
"x": {
0: 1.0,
1: 0.0,
2: 0.0,
3: 1.0,
}
}
)
stats = solver.solve(tee=True)
assert stats["Warm start value"] == 725.0
solver.set_warm_start(
{
"x": {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
}
}
)
stats = solver.solve(tee=True)
assert stats["Warm start value"] is None
solver.fix(
{
"x": {
0: 1.0,
1: 0.0,
2: 0.0,
3: 1.0,
}
}
)
stats = solver.solve(tee=True)
assert stats["Lower bound"] == 725.0
assert stats["Upper bound"] == 725.0
def test_internal_solver():
for solver_class in _get_internal_solvers():
logger.info("Solver: %s" % solver_class)
instance = _get_instance(solver_class)
model = instance.to_model()
solver = solver_class()
solver.set_instance(instance, model)
stats = solver.solve_lp()
assert round(stats["Optimal value"], 3) == 1287.923
solution = solver.get_solution()
assert round(solution["x"][0], 3) == 1.000
assert round(solution["x"][1], 3) == 0.923
assert round(solution["x"][2], 3) == 1.000
assert round(solution["x"][3], 3) == 0.000
stats = solver.solve(tee=True)
assert len(stats["Log"]) > 100
assert stats["Lower bound"] == 1183.0
assert stats["Upper bound"] == 1183.0
assert stats["Sense"] == "max"
assert isinstance(stats["Wallclock time"], float)
assert isinstance(stats["Nodes"], int)
solution = solver.get_solution()
assert solution["x"][0] == 1.0
assert solution["x"][1] == 0.0
assert solution["x"][2] == 1.0
assert solution["x"][3] == 1.0
# Add a brand new constraint
if isinstance(solver, BasePyomoSolver):
model.cut = pe.Constraint(expr=model.x[0] <= 0.0, name="cut")
solver.add_constraint(model.cut)
elif isinstance(solver, GurobiSolver):
x = model.getVarByName("x[0]")
solver.add_constraint(x <= 0.0, name="cut")
else:
raise Exception("Illegal state")
# New constraint should affect solution and should be listed in
# constraint ids
assert solver.get_constraint_ids() == ["eq_capacity", "cut"]
stats = solver.solve()
assert stats["Lower bound"] == 1030.0
if isinstance(solver, GurobiSolver):
# Extract the new constraint
cobj = solver.extract_constraint("cut")
# New constraint should no longer affect solution and should no longer
# be listed in constraint ids
assert solver.get_constraint_ids() == ["eq_capacity"]
stats = solver.solve()
assert stats["Lower bound"] == 1183.0
# New constraint should not be satisfied by current solution
assert not solver.is_constraint_satisfied(cobj)
# Re-add constraint
solver.add_constraint(cobj)
# Constraint should affect solution again
assert solver.get_constraint_ids() == ["eq_capacity", "cut"]
stats = solver.solve()
assert stats["Lower bound"] == 1030.0
# New constraint should now be satisfied
assert solver.is_constraint_satisfied(cobj)
# Relax problem and make cut into an equality constraint
solver.relax()
solver.set_constraint_rhs("cut", 0.5)
solver.set_constraint_sense("cut", "=")
stats = solver.solve()
assert round(stats["Lower bound"]) == 1179.0
def test_iteration_cb():
for solver_class in _get_internal_solvers():
logger.info("Solver: %s" % solver_class)
instance = _get_instance(solver_class)
solver = solver_class()
solver.set_instance(instance)
count = 0
def custom_iteration_cb():
nonlocal count
count += 1
return count < 5
solver.solve(iteration_cb=custom_iteration_cb)
assert count == 5
| 2.234375
| 2
|
serialize/dynamo.py
|
thorwhalen/ut
| 4
|
12780743
|
"""Dynamo access"""
import os
from time import sleep
import boto.dynamodb2
from .khan_logger import KhanLogger
__author__ = 'mattjmorris'
class Dynamo(object):
def __init__(self, access_key=None, secret=None):
"""
If access_key and/or secret are not passed in, assumes we are accessing erenev's aws account and that the
access info is stored as environment variables on the current server.
Connection and Table are available to clients via self properties, in case clients wish to use those objects
directly.
"""
access_key = access_key or os.getenv('VEN_S3_ACCESS_KEY')
secret = secret or os.getenv('VEN_S3_SECRET')
self.connection = boto.dynamodb2.connect_to_region(region_name='eu-west-1', aws_access_key_id=access_key,
aws_secret_access_key=secret)
self.logger = KhanLogger(origin=self.__class__.__name__)
def modify_throughput(self, requested_read, requested_write, table):
"""
Used to change the throughput of a specific table
"""
read, write, num_dec_today, table_status = self.get_table_info(table)
while requested_read != read or requested_write != write:
self.logger.info(msg="Modifying {} from {}, {} to {}, {}".format(table.table_name, read, write,
requested_read, requested_write))
new_read, new_write = self._new_read_write(read, requested_read, write, requested_write)
self.logger.info(msg="going to request read {} and write {}".format(new_read, new_write))
if (new_read < read or new_write < write) and num_dec_today >= 4:
# Todo - replace with custom error and handle in client code
raise ValueError("Sorry, can't do any more decreases today.")
table.update(throughput={'read': new_read, 'write': new_write})
sleep_secs = 30
table_status = 'UPDATING'
self.logger.info(msg="Sleeping for {} secs before starting".format(sleep_secs))
sleep(sleep_secs)
while table_status == 'UPDATING':
self.logger.info(msg="Sleeping for {} secs".format(sleep_secs))
sleep(sleep_secs)
read, write, num_dec_today, table_status = self.get_table_info(table)
return read, write
def _new_read_write(self, read, requested_read, write, requested_write):
"""
Ensures that we change throughput in the correct amounts so as to not cause DDB to yell at us.
"""
if requested_read == 0:
read_change_prop = 0
else:
read_change_prop = requested_read / float(read)
# max increase allowed is a doubling
if read_change_prop > 2:
new_read = read * 2
else:
new_read = requested_read
if requested_write == 0:
write_change_prop = 0
else:
write_change_prop = requested_write / float(write)
if write_change_prop > 2:
new_write = write * 2
else:
new_write = requested_write
return new_read, new_write
def get_table_info(self, table):
"""
Returns meta information about the table, such as read speed, write speed, current status,
and number of decreases today. Useful for figuring out how to change throughput.
"""
desc = table.describe()
status = desc['Table']['TableStatus']
throughput = desc['Table']['ProvisionedThroughput']
num_decreases = throughput['NumberOfDecreasesToday']
read = throughput['ReadCapacityUnits']
write = throughput['WriteCapacityUnits']
return read, write, num_decreases, status
| 2.875
| 3
|
scripts/python/road_segments/add_dicast_ids.py
|
OSADP/Pikalert-Vehicle-Data-Translator-
| 2
|
12780744
|
<filename>scripts/python/road_segments/add_dicast_ids.py
#!/usr/bin/env python
"""Add dicast site ids to road segment file"""
# ============================================================================== #
# #
# (c) Copyright, 2015 University Corporation for Atmospheric Research (UCAR). #
# All rights reserved. #
# #
# File: $RCSfile: fileheader,v $ #
# Version: $Revision: 1.1 $ Dated: $Date: 2010/10/04 14:44:18 $ #
# #
# ============================================================================== #
import log_msg
import os
import shutil
import sys
from optparse import OptionParser
from netCDF4 import Dataset
def add_dicast_ids(road_segment_nc_file, dicast_site_file, out_file):
r"""Add dicast ids to road segment nc file
Parameters
----------
road_segment_nc_file : string
dicast_site_file : string
out_file : string
Returns
-------
0 : success
1 : error
"""
# copy road_segment_nc_file to out_file
try:
shutil.copyfile(road_segment_nc_file, out_file)
except:
return 1
# open out_file for updating
netcdf_file = Dataset(out_file,"a")
# extract aux_id values
aux_id = netcdf_file.variables["aux_id"]
aux_id_string = []
for ind in range(aux_id.shape[0]):
aux_id_string.append(aux_id[ind].tostring().rstrip('\x00'))
# create map from aux_id to dicast numbers
dicast_dict = {}
for line in open(dicast_site_file):
spl = line.strip().split(";")
if spl[0] == "site_num":
# skip csv header
continue
(site_num, n, site_id, lat, lon, elev, n, desc, state, co, site_type) = spl
dicast_dict[site_id] = site_num
# find dicast ids for aux_id values
dicast_ids = []
for aux_string in aux_id_string:
dicast_ids.append(dicast_dict[aux_string])
netcdf_file.createDimension('dicast_id_len', size=8)
dicast_ids_var = netcdf_file.createVariable('dicast_id', 'c', ('point_num', 'dicast_id_len'))
dicast_ids_var[:] = dicast_ids
netcdf_file.close()
# open out_file and append dicast id values
return 0
def main():
#/d2/vii/data/static/config/CO_logicast_road_sites_extended.asc
usage_str = "%prog road_segment_file dicast_file out_file"
parser = OptionParser(usage = usage_str)
(options, args) = parser.parse_args()
if len(args) < 3:
parser.print_help()
sys.exit(2)
road_segment_file = args[0]
dicast_file = args[1]
out_file = args[2]
add_dicast_ids(road_segment_file, dicast_file, out_file)
if __name__ == "__main__":
main()
| 2.234375
| 2
|
pji/service/section/info/__init__.py
|
HansBug/pji
| 0
|
12780745
|
<filename>pji/service/section/info/__init__.py
from .base import SectionInfoTemplate, SectionInfo
from .general import SectionInfoType, load_info_template
from .local import LocalSectionInfoTemplate, LocalSectionInfo
from .mapping import SectionInfoMappingTemplate, SectionInfoMapping
from .static import StaticSectionInfoTemplate, StaticSectionInfo
from .tag import TagSectionInfoTemplate, TagSectionInfo
| 1.28125
| 1
|
Main.py
|
dixantmittal/CS5242-CNN-implementation
| 0
|
12780746
|
# A bit of setup
from __future__ import print_function
import Models
import code_base.solver as slvr
from code_base.data_utils import *
from code_base.layers import *
from code_base.solver import Solver
settings.time_analysis['logger_enabled'] = False
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def getSolver(model, data, alpha, alpha_decay, epoch=10, batch_size=128):
return Solver(model, data, num_epochs=epoch, batch_size=batch_size,
update_rule='adam',
optim_config={
'learning_rate': alpha,
}, lr_decay=alpha_decay, verbose=True, print_every=1)
def train_model(model_key):
slvr._file.write('\n\n>>>> MODEL - ' + model_key + ' <<<<')
model = Models.Models[model_key]
solver = getSolver(model=model, data=data, alpha=3e-3, alpha_decay=0.5, epoch=15)
start = datetime.datetime.now()
solver.train()
end = datetime.datetime.now()
slvr._file.write('\nTotal time taken: ' + str(end - start))
slvr._file.flush()
model_key = model_key + '_alpha3e-3'
save_metrics(solver,model_key)
save_model(model, './models/cnn_model_' + model_key + '.p')
def save_metrics(solver, model_key):
pickle.dump(solver.loss_history,open('./metrics/'+model_key+'_loss_history.p','wb'))
pickle.dump(solver.train_acc_history,open('./metrics/'+model_key+'_train_acc_history.p','wb'))
pickle.dump(solver.val_acc_history,open('./metrics/'+model_key+'_val_acc_history.p','wb'))
data = pickle.load(open('./data.p', 'rb'), encoding='latin1')
# create augmented data - mirror image
# aug_X_train = np.flip(data['X_train'], 3)
# data['X_train'] = np.concatenate((data['X_train'], aug_X_train), 0)
# data['y_train'] = np.concatenate((data['y_train'], data['y_train']), 0)
for k, v in data.items():
print('%s: ' % k, v.shape)
train_model('conv32_filter7_fc256_drop0')
train_model('conv32_filter7_fc256_drop02')
# train_model('conv64_filter5_fc512_drop0')
# train_model('conv64_filter5_fc512_drop03')
# train_model('conv128_filter3_fc1024_drop0')
# train_model('conv128_filter3_fc1024_drop04')
| 2.109375
| 2
|
ds/lru.py
|
haandol/algorithm_in_python
| 0
|
12780747
|
class LRUCache(object):
def __init__(self, size):
self.size = size
self.lru = {}
self.cache = {}
self.counter = 0
def put(self, key, value):
if key not in self.cache:
if len(self.lru) >= self.size:
k, _ = min(self.lru.iteritems(), key=lambda x: x[1])
self.lru.pop(k)
self.cache.pop(k)
self.counter += 1
self.lru[key] = self.counter
self.cache[key] = value
def get(self, key):
if key in self.cache:
self.counter += 1
self.lru[key] = self.counter
return self.cache[key]
return None
if "__main__" == __name__:
cache = LRUCache(3)
cache.put('a', 1)
cache.put('b', 2)
cache.put('c', 3)
assert 3 == len(cache.cache)
print cache.cache
cache.put('d', 4)
cache.put('a', 5)
assert 3 == len(cache.cache)
assert None is cache.get('b')
assert 4 == cache.get('d')
assert 5 == cache.get('a')
| 3.609375
| 4
|
app/speech.py
|
liliangbin/webBot
| 2
|
12780748
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from aip import AipSpeech
# 语音识别模块。。
# baidu app use my tel to login
class Speech(object):
def __init__(self):
self.APP_ID = '16250780'
self.APP_KEY = 'xcnrNwkhe61iYGoaZVRNpnma'
self.SECRET_KEY = '<KEY>'
self.client = AipSpeech(self.APP_ID, self.APP_KEY, self.SECRET_KEY)
def get_file_content(self, filePath='audio.pcm'):
with open(filePath, 'rb') as fp:
return fp.read()
def asr(self, filepath):
back = self.client.asr(self.get_file_content(filepath), 'pcm', 16000, {
'dev_pid': 1536,
})
print back
return back.get('result')[0].encode('utf-8')
| 3.03125
| 3
|
2020_Challenge/submissions/Andres_Duque/functions.py
|
UCHIC/CIWS-VisChallenge
| 2
|
12780749
|
<filename>2020_Challenge/submissions/Andres_Duque/functions.py
"""
<NAME>
"""
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import scipy
from sklearn.decomposition import PCA
import pandas as pd
from matplotlib.patches import Rectangle, Circle
import matplotlib as mpl
import matplotlib.colors as clr
cmap = matplotlib.cm.get_cmap('Set2')
def summarize_df(df, feature):
'''
This function summarizes the dataframe by one of the features (feature)
by each of the uses, by day and hour
'''
# Get all the possible uses
uses = np.unique(df.Label)
# Find the accumulated volume consumption for all the uses
df_use = pd.DataFrame(0, index=np.arange(len(df)), columns = uses)
for i in range(df.shape[0]):
use = df.Label[i]
df_use.loc[i, use] = df[feature].loc[i]
df_use['Dates'] = df['EndTime']
df_use['Days'] = df_use['Dates'].str.partition(" ")[0].str.partition("-")[2]
df_use['Hours'] = df_use['Dates'].str.partition(" ")[2].str.partition(":")[0]
df_use['Hours_float'] = df_use['Hours'].astype(np.float)
days = np.unique(df_use['Days'])
# Compute the accumulated use by each day
Accumulated_days_use = pd.DataFrame(0, index=np.arange(len(days)),
columns = uses)
for i in range(Accumulated_days_use.shape[0]):
dayuse = df_use[df_use['Days'] == days[i]][uses]
Accumulated_days_use.loc[i] = dayuse.sum()
Accumulated_days_use.plot(subplots=True)
Accumulated_days_use['Total'] = Accumulated_days_use.sum(axis= 1)
Accumulated_days_use['Days'] = days
Accumulated = df_use[uses].cumsum()
Accumulated['Total'] = Accumulated.sum(axis= 1)
Accumulated['Dates'] = df_use['Dates']
Accumulated['Days'] = df_use['Days']
Accumulated_hours = pd.DataFrame(0, index=np.arange(24),
columns = uses)
for i in range(24):
houruse = df_use[df_use['Hours_float'] == i][uses]
Accumulated_hours.loc[i] = houruse.sum()
Accumulated_hours['Total'] = Accumulated_hours.sum(axis= 1)
Accumulated_hours['Hours'] = np.arange(0,24)
return df_use, Accumulated, Accumulated_days_use, Accumulated_hours
# df_use, Accumulated, Accumulated_days = summarize_df(df, 'Volume(gal)')
def day_hour_use(df_use, use, colorm):
'''This function plots the combined days and hours
in a colormap plot'''
cmap = clr.LinearSegmentedColormap.from_list('custom blue',
['#ffffff',colorm],
N=256)
days = np.unique(df_use['Days'])
df_day_hour = pd.DataFrame(0, index=np.arange(0,24),
columns = days)
for i in days:
for j in range(24):
houruse = df_use[(df_use['Days'] == i) & (df_use['Hours_float'] == j)][use]
df_day_hour.loc[j, i] = houruse.sum()
fig = plt.figure()
plt.matshow(df_day_hour, cmap = cmap)
plt.xticks(range(len(days)), days, fontsize = 5, rotation = 90)
plt.yticks(range(24), range(24), fontsize = 10)
plt.xlabel('Days')
plt.ylabel('Hours')
plt.title('Volume consumption by day and hour {}'.format(use))
plt.colorbar(shrink = 0.5)
def plot_daily_use(Accumulated_days, uses):
'''This function plots the daily consumption
by each of the (uses) input in the argument'''
colors = plt.cm.Set2(range(len(uses)))
fig, ax = plt.subplots(len(uses),1, figsize = (20,20))
days = Accumulated_days['Days']
# plot color map to identify days and hours of use
for k in range(len(uses)):
cmap = clr.LinearSegmentedColormap.from_list('custom blue',
['#ffffff',colors[k]],
N=256)
ax[k].matshow(np.array(Accumulated_days[uses[k]]).reshape(1,-1), cmap = cmap)
ax[k].set_xticks(range(len(days)))
ax[k].set_xticklabels(days, fontsize=18)
ax[k].set_yticks([])
ax[k].xaxis.set_ticks_position('bottom')
# Extract the major use by day
for (i, j), z in np.ndenumerate(np.array(Accumulated_days[uses[k]]).reshape(1,-1)):
ax[k].text(j, i, '{:0.1f}'.format(z), ha='center', va='center',
fontsize=20)
ax[k].set_title("Daily use {} (Volume(gal))".format(uses[k]), fontsize=30)
def plot_hour_use(Accumulated_hours, uses):
'''This function plots the hourly agregatted consumption
by each of the (uses) input in the argument'''
colors = plt.cm.Set2(range(len(uses)))
fig, ax = plt.subplots(len(uses),1, figsize = (30,20))
hours = Accumulated_hours['Hours']
# plot color map to identify days and hours of use
for k in range(len(uses)):
cmap = clr.LinearSegmentedColormap.from_list('custom blue',
['#ffffff',colors[k]],
N=256)
ax[k].matshow(np.array(Accumulated_hours[uses[k]]).reshape(1,-1), cmap = cmap)
ax[k].set_xticks(range(len(hours)))
ax[k].set_xticklabels(hours, fontsize=18)
ax[k].set_yticks([])
ax[k].xaxis.set_ticks_position('bottom')
# Extract the major use by day
for (i, j), z in np.ndenumerate(np.array(Accumulated_hours[uses[k]]).reshape(1,-1)):
ax[k].text(j, i, '{:0.1f}'.format(z), ha='center', va='center',
fontsize=20)
ax[k].set_title("Hourly use {} (Volume(gal))".format(uses[k]), fontsize=30)
def plot_accumulated_use(Accumulated, uses):
days = Accumulated['Days']
fig, ax = plt.subplots(figsize = (10,10))
colors = plt.cm.Set2(range(len(uses)))
uses_sum = uses[uses != 'Total']
end = Accumulated[uses_sum].iloc[-1]
for k in range(len(uses)):
ax.plot(Accumulated[uses[k]], label = uses[k], color = colors[k])
if uses[k] == 'Total':
ax.annotate('Percentage = {:.2%}'
.format(1),
fontsize=9,
fontweight='bold',
xy=(len(days), Accumulated[uses[k]].iloc[-1]),
xycoords='data',
xytext=(-150, -30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
bbox=dict(boxstyle="round4, pad=0.1", fc="red", ec="red",
alpha =0.2, lw=1))
else:
ax.annotate('Percentage = {:.2%}'
.format(Accumulated[uses[k]].iloc[-1]/end.sum()),
fontsize=9,
fontweight='bold',
xy=(len(days), Accumulated[uses[k]].iloc[-1]),
xycoords='data',
xytext=(100, 0),
textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
bbox=dict(boxstyle="round4, pad=0.1", fc="red",alpha =0.2, ec="red", lw=1))
ax.legend()
ax.set_xticks(np.arange(0,len(days),100))
ax.set_xticklabels(days.iloc[np.arange(0,len(days),100)])
ax.set_xlabel('Days', fontsize = 15)
ax.set_ylabel('Accumulated consumption Volume(gal)', fontsize = 15)
ax.set_title('Consumption progression over time', fontsize = 20)
def plot_feature_use(df, use, q, colorm, feature):
data = np.array(df[feature][df['Label'] == use])
dates = df['StartTime'][df['Label'] == use]
Qv = np.quantile(data, q)
indices = np.where(data > Qv)[0]
fig, ax = plt.subplots(figsize = (20,10))
ax.plot(data, color = colorm)
for k in indices:
ax.annotate('Date = {}'
.format(dates.iloc[k]),
fontsize=9,
fontweight='bold',
xy=(k, data[k]),
xycoords='data',
xytext=(0, 10),
textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
bbox=dict(boxstyle="round4, pad=0.1", fc="red", ec="red",
alpha =0.2, lw=1))
ax.set_ylim(0,)
ax.set_ylabel('Volume(gal)', fontsize = 15)
ax.set_xlabel('Number of each time it is used', fontsize = 15)
# ax.set_xticks(np.arange(0,len(data),20))
# ax.set_xticklabels(dates.iloc[np.arange(0,len(data),20)])
ax.set_title("Total uses of {}, {} with the observaitons above the {} percentile annotated by date".format(use, feature, q*100),
fontsize = 20)
| 2.859375
| 3
|
tests/test_config_debug.py
|
nmichlo/eunomia
| 3
|
12780750
|
<reponame>nmichlo/eunomia
import re
from eunomia.config.nodes import SubNode
from tests.test_backend_obj import _make_config_group
from tests.util import temp_capture_stdout
# ========================================================================= #
# Test Config Objects #
# ========================================================================= #
def test_debug_groups():
root = _make_config_group(suboption='suboption1')
with temp_capture_stdout() as out:
root.debug_tree_print()
color_out = out.getvalue()
assert color_out == ' \x1b[90m\x1b[0m\x1b[35m/\x1b[0m\n \x1b[90m├\x1b[93m╌\x1b[0m \x1b[90m/:\x1b[0m \x1b[33mdefault\x1b[0m\n \x1b[90m├\x1b[95m─\x1b[0m \x1b[90m\x1b[0m\x1b[35m/subgroup\x1b[0m\n \x1b[90m│\x1b[0m \x1b[90m├\x1b[93m╌\x1b[0m \x1b[90m/subgroup:\x1b[0m \x1b[33msuboption1\x1b[0m\n \x1b[90m│\x1b[0m \x1b[90m╰\x1b[93m╌\x1b[0m \x1b[90m/subgroup:\x1b[0m \x1b[33msuboption2\x1b[0m\n \x1b[90m╰\x1b[95m─\x1b[0m \x1b[90m\x1b[0m\x1b[35m/subgroup2\x1b[0m\n \x1b[90m╰\x1b[95m─\x1b[0m \x1b[90m/subgroup2\x1b[0m\x1b[35m/subgroup3\x1b[0m\n \x1b[90m├\x1b[93m╌\x1b[0m \x1b[90m/subgroup2/subgroup3:\x1b[0m \x1b[33msuboption1\x1b[0m\n \x1b[90m╰\x1b[93m╌\x1b[0m \x1b[90m/subgroup2/subgroup3:\x1b[0m \x1b[33msuboption2\x1b[0m\n'
with temp_capture_stdout() as out:
root.debug_tree_print(colors=False)
normal_out = out.getvalue()
assert normal_out == ' /\n ├╌ /: default\n ├─ /subgroup\n │ ├╌ /subgroup: suboption1\n │ ╰╌ /subgroup: suboption2\n ╰─ /subgroup2\n ╰─ /subgroup2/subgroup3\n ├╌ /subgroup2/subgroup3: suboption1\n ╰╌ /subgroup2/subgroup3: suboption2\n'
# https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
assert normal_out == re.sub(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])', '', color_out)
with temp_capture_stdout() as out:
root.debug_tree_print(colors=False, show_defaults=True)
normal_out_defaults = out.getvalue()
assert normal_out_defaults == ' /\n ├╌ /: default [subgroup: suboption1]\n ├─ /subgroup\n │ ├╌ /subgroup: suboption1\n │ ╰╌ /subgroup: suboption2\n ╰─ /subgroup2\n ╰─ /subgroup2/subgroup3\n ├╌ /subgroup2/subgroup3: suboption1\n ╰╌ /subgroup2/subgroup3: suboption2\n'
root = _make_config_group(suboption=SubNode('suboption${=1}'))
with temp_capture_stdout() as out:
root.debug_tree_print(colors=False, show_defaults=True)
normal_out_defaults_special = out.getvalue()
assert normal_out_defaults_special == ' /\n ├╌ /: default [subgroup: suboption${=1}]\n ├─ /subgroup\n │ ├╌ /subgroup: suboption1\n │ ╰╌ /subgroup: suboption2\n ╰─ /subgroup2\n ╰─ /subgroup2/subgroup3\n ├╌ /subgroup2/subgroup3: suboption1\n ╰╌ /subgroup2/subgroup3: suboption2\n'
# TODO: test other flags and suboption cases
# ========================================================================= #
# END #
# ========================================================================= #
| 2.171875
| 2
|