max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
Dataset/Leetcode/test/7/279.py | kkcookies99/UAST | 0 | 12765251 | class Solution:
def XXX(self, x: int) -> int:
if x == 0:
return 0
res = str(x)
sign = 1
if res[0] == '-':
res = res[1:]
sign = -1
res = res[::-1]
if res[0] == '0':
res = res[1:]
resint = int(res)*sign
if(resint<-2147483648 or resint>2147483647):
return 0
else:
return resint
| 3.03125 | 3 |
torchvision/prototype/datasets/_builtin/semeion.py | yoshitomo-matsubara/vision | 2 | 12765252 | import functools
import io
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
CSVParser,
)
from torchvision.prototype.datasets.decoder import raw
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
HttpResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import image_buffer_from_array, hint_sharding, hint_shuffling
from torchvision.prototype.features import Image, Label
class SEMEION(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"semeion",
type=DatasetType.RAW,
categories=10,
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _collate_and_decode_sample(
self,
data: Tuple[str, ...],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
image_data = torch.tensor([float(pixel) for pixel in data[:256]], dtype=torch.uint8).reshape(16, 16)
label_data = [int(label) for label in data[256:] if label]
if decoder is raw:
image = Image(image_data.unsqueeze(0))
else:
image_buffer = image_buffer_from_array(image_data.numpy())
image = decoder(image_buffer) if decoder else image_buffer # type: ignore[assignment]
label_idx = next((idx for idx, one_hot_label in enumerate(label_data) if one_hot_label))
return dict(image=image, label=Label(label_idx, category=self.info.categories[label_idx]))
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
return dp
| 2.390625 | 2 |
cloudrunner/node/matcher.py | CloudRunnerIO/cloudrunner | 5 | 12765253 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 CloudRunner.IO
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
SPLITTER = re.compile(r'\s+|,|;')
LOG = logging.getLogger('Matcher')
class CaseInsensitiveDict(dict):
def __init__(self, dict_):
super(CaseInsensitiveDict, self).__init__()
all_keys = dict_.keys()
for k in all_keys:
val = dict_.pop(k)
super(CaseInsensitiveDict, self).__setitem__(k.lower(), val)
def __setitem__(self, key, value, default=None):
raise Exception("Dict is readonly")
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def get(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def __contains__(self, key):
return super(CaseInsensitiveDict, self).__contains__(key.lower())
class Matcher(object):
"""
Provides basic matching functions for node targets
"""
def __init__(self, node_id, meta):
self.node_id = node_id
self.meta = CaseInsensitiveDict(meta)
def is_match(self, target_str):
targets = SPLITTER.split(target_str)
targets = [t.strip() for t in targets if t.strip()]
def _match(target):
try:
if '=' in target:
# we have specific selector
k, _, v = target.partition('=')
if k not in self.meta:
return False
val = self.meta.get(k)
if isinstance(val, basestring):
return re.match(self.prepare_re(v), self.meta.get(k),
re.I)
elif isinstance(val, (int, long)):
return int(v) == val
elif isinstance(val, float):
return float(v) == val
else:
return False
else:
return re.match(self.prepare_re(target),
self.node_id, re.I)
except Exception, ex:
LOG.exception(ex)
return
return filter(_match, targets)
def prepare_re(self, match):
return '^%s$' % match.replace(".", "\.").replace("*", ".*")
| 2.296875 | 2 |
kolab/multiese/parser.py | KuramitsuLab/kolab | 0 | 12765254 | import pegtree as pg
from pegtree import ParseTree
from pegtree.visitor import ParseTreeVisitor
import tree as ntree
import pprint
peg = pg.grammar('multiese.pegtree')
parser = pg.generate(peg)
def fix(tree):
a = [tree.epos_]
for t in tree:
fix(t)
a.append(t.epos_)
for key in tree.keys():
a.append(fix(tree.get(key)).epos_)
tree.epos_ = max(a)
return tree
class MultieseParser(ParseTreeVisitor):
def __init__(self):
ParseTreeVisitor.__init__(self)
def parse(self, s: str):
tree = parser(s)
node = self.visit(tree)
return node
def acceptChunk(self, tree: ParseTree):
s = str(tree)
node = ntree.parse(s)
return ntree.系列(*node.flatten()).simplify()
def acceptSeq(self, tree: ParseTree):
ns = []
# print(repr(tree))
for t in tree:
node = self.visit(t)
node.flatten(ns)
return ntree.系列(*ns)
def acceptBlock(self, tree: ParseTree):
# print(repr(tree)) (1+2)*3
return ntree.グループ(self.visit(tree[0]))
def acceptChoice(self, tree: ParseTree):
ns = []
for t in tree:
ns.append(self.visit(t))
node = ntree.Choice(ns)
if ns[0].__class__.__name__ != '助詞':
ntree.update_choice_dic(node.stringfy()) # 類義語辞書を更新する
return node
def acceptExpression(self, tree: ParseTree):
s = str(fix(tree))
return ntree.コード(s)
def acceptSymbol(self, tree: ParseTree):
s = str(fix(tree))
return ntree.コード(s)
def acceptAnnotation(self, tree: ParseTree):
name = str(tree[0]) # アノテーション種類
ns = [self.visit(t) for t in tree[1:]]
return ntree.annotation(name, ns)
mult = MultieseParser()
def multiese_parser(s: str):
return mult.parse(s)
def test_for_nobu(s):
print(s)
print('=>', repr(mult.parse(s)))
print()
if __name__ == '__main__':
test_for_nobu('データフレームdfを降順にソートする')
test_for_nobu('望遠鏡で泳ぐ子犬を見た')
test_for_nobu('望遠鏡で{泳ぐ子犬}を見た')
test_for_nobu('望遠鏡で[子犬|とうきび]を見た')
test_for_nobu('@type(df, データフレーム)について、望遠鏡で子犬を見てない') # 否 見た 80%
test_for_nobu('@type(df)の先頭を見る') # Keyword
test_for_nobu('望遠鏡で{子犬が泳ぐ}様子を見たら、math.pi+1を実行する')
| 2.65625 | 3 |
inclassapr23.py | seanmacb/COMP-115-Exercises | 0 | 12765255 | #Making a nice litte x o o o x o o o x tic tac toe board
def printer(board):
for i in range(3):
for j in range (3):
print(board[i][j], end="")
if j<2:
print(" | ", end="")
print()
if i<2:
print("--+----+--")
def main():
tictactoe=[[" " for i in range (3)] for j in range (3)]
for i in range (3):
for j in range(3):
if i==j:
tictactoe[i][j]="X"
else:
tictactoe[i][j]="O"
printer(tictactoe)
main() | 3.84375 | 4 |
packages/w3af/w3af/plugins/tests/crawl/failing_spider.py | ZooAtmosphereGroup/HelloPackages | 3 | 12765256 | <filename>packages/w3af/w3af/plugins/tests/crawl/failing_spider.py
"""
failing_spider.py
Copyright 2012 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from w3af.plugins.crawl.web_spider import web_spider
class failing_spider(web_spider):
"""
This is a test plugin that will raise exceptions.
Only useful for testing, see test_discover_exception_handling.py
:author: <NAME> (<EMAIL>)
"""
def __init__(self):
web_spider.__init__(self)
self.blacklist = ('2.html',)
def crawl(self, fuzzable_req):
"""
Raises an exception if the fuzzable_req ends with something in the
blacklist.
"""
for ending in self.blacklist:
if fuzzable_req.get_url().url_string.endswith(ending):
raise Exception('UnitTest')
return super(failing_spider, self).crawl(fuzzable_req)
| 2.234375 | 2 |
CarND-Camera-Calibration/challenge/challenge.py | vikramriyer/CarND-Advanced-Lane-Lines | 1 | 12765257 | <reponame>vikramriyer/CarND-Advanced-Lane-Lines
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load( open( "wide_dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Read in an image
img = cv2.imread('test_image2.png')
# img_size = (img.shape[1], img.shape[0])
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
# MODIFY THIS FUNCTION TO GENERATE OUTPUT
# THAT LOOKS LIKE THE IMAGE ABOVE
def corners_unwarp(img, nx, ny, mtx, dist):
# Pass in your image into this function
# Write code to do the following steps
# 1) Undistort using mtx and dist
img = cv2.undistort(img, mtx, dist, None, mtx)
# 2) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_size = (gray.shape[1], gray.shape[0])
# 3) Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
'''
x_min, y_min = 1000, -1000
x_max, y_max = -1000, -1000
for row in corners[::2]:
pair = row[0]
x_min = min(x_min, pair[0])
x_max = max(x_max, pair[0])
y_min = min(y_min, pair[1])
y_max = max(y_max, pair[1])
srcpts = [[x_min, y_min], [x_max, y_max], [x_min, y_max], [x_min, y_max]]
dstpts = [[200,0],[200,375],[1100, 150],[1100, 800]]
'''
offset = 100
# srcpts = [[325, 0],[315, 975],[1100,100],[1100,800]]
# dstpts = [[325,0],[315,975],[1200,50],[1200,850]]
# 4) If corners found:
if ret:
# a) draw corners
img = cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
# b) define 4 source points src = np.float32([[,],[,],[,],[,]])
#Note: you could pick any four of the detected corners
# as long as those four corners define a rectangle
#One especially smart way to do this would be to use four well-chosen
# corners that were automatically detected during the undistortion steps
#We recommend using the automatic detection of corners in your code
# src = np.float32(srcpts)
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
print(src)
# c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# dst = np.float32(dstpts)
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
print(dst)
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# e) use cv2.warpPerspective() to warp your image to a top-down view
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped, M
top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(top_down)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
| 2.671875 | 3 |
andinopy/nextion_util.py | andino-systems/andinopy | 0 | 12765258 | # _ _ _
# / \ _ __ __| (_)_ __ ___ _ __ _ _
# / _ \ | '_ \ / _` | | '_ \ / _ \| '_ \| | | |
# / ___ \| | | | (_| | | | | | (_) | |_) | |_| |
# /_/ \_\_| |_|\__,_|_|_| |_|\___/| .__/ \__, |
# |_| |___/
# by <NAME>
import time
import serial
import sys
import os
e = bytearray([0xFF, 0xFF, 0xFF])
def get_baud_rate(dev_port: serial.Serial, diagnostics: bool = False):
def diag_print(text: str):
if diagnostics:
print(text)
for baud_rate in (2400, 4800, 9600, 19200, 38400, 57600, 115200, 921600, 512000, 256000, 250000, 230400):
dev_port.baudrate = baud_rate
dev_port.timeout = 3000 / baud_rate + 0.2
diag_print(f"trying with {baud_rate} baud")
dev_port.write(e)
dev_port.write("connect".encode('ascii'))
dev_port.write(e)
r = dev_port.read(128)[:-3]
if 'comok' in str(r):
diag_print(f"Connected with {baud_rate} baud")
status, unknown1, model, firmware, mcucode, nextion_serial, nextion_flash_size = str(r).strip("\xff").split(
',')
if status.split(' ')[1] == "1":
diag_print('Touchscreen: enabled')
else:
diag_print('Touchscreen: disabled')
diag_print(
f"Model:{model}\nFirmware:{firmware}\nMCU-Code:{mcucode}\nSerial:{nextion_serial}\nFlashSize:{nextion_flash_size}")
return baud_rate
return False
def force_max_baud(dev_port, filesize, diagnostics=False):
def diag_print(text: str):
if diagnostics:
print(text)
for baud in [921600, 512000, 256000, 250000, 230400, 115200, 57600, 38400, 31250, 19200, 9600]:
diag_print(f"Trying {baud} baud")
diag_print(f"SENDING: whmi-wri {filesize},{baud},0")
dev_port.write(f"whmi-wri {filesize},{baud},0".encode("ascii"))
dev_port.write(e)
time.sleep(0.4)
dev_port.baudrate = baud
dev_port.timeout = 0.5
time.sleep(.1)
r = dev_port.read(1)
if 0x05 in r:
return True
return False
def upload_image(dev_port, filename, filesize):
with open(filename, 'rb') as image:
data_count = 0
while 1:
data = image.read(4096)
if len(data) < 1:
break
data_count += len(data)
dev_port.timeout = 5
dev_port.write(data)
sys.stdout.write('\rUpload, %3.1f%%...' % (data_count / float(filesize) * 100.0))
sys.stdout.flush()
time.sleep(.5)
r = dev_port.read(1)
if 0x05 not in r:
return False
return True
def flash(port: str, tft_file: str):
port = serial.Serial(port, 9600, timeout=None)
if not port.isOpen():
port.open()
if not get_baud_rate(port, diagnostics=True):
print("Baud Rate could not be specified")
exit(1)
file_size = os.path.getsize(tft_file)
if not force_max_baud(port, file_size, diagnostics=True):
print("Could not force baud rate")
exit(1)
if not upload_image(port, tft_file, file_size):
print("could not upload tft File")
exit(1)
if __name__ == "__main__":
if len(sys.argv) != 2:
print('usage:\npython3 nextion_util.py file_to_upload.tft')
file = sys.argv[1]
flash("/dev/ttyAMA0", file)
exit(0)
| 2.46875 | 2 |
openff/bespokefit/executor/services/coordinator/worker.py | openforcefield/openff-bespokefit | 7 | 12765259 | <reponame>openforcefield/openff-bespokefit<gh_stars>1-10
import asyncio
import logging
import time
import redis
from openff.bespokefit.executor.services import current_settings
from openff.bespokefit.executor.services.coordinator.storage import (
TaskStatus,
get_n_tasks,
get_task,
peek_task_status,
pop_task_status,
push_task_status,
save_task,
)
_logger = logging.getLogger(__name__)
async def _process_task(task_id: int) -> bool:
task = get_task(task_id)
task_status = task.status
if task.status == "success" or task.status == "errored":
return True
if task.running_stage is None:
task.running_stage = task.pending_stages.pop(0)
await task.running_stage.enter(task)
stage_status = task.running_stage.status
await task.running_stage.update()
task_state_message = f"[task id={task_id}] transitioned from {{0}} -> {{1}}"
if task.status != task_status and task_status == "waiting":
print(task_state_message.format(task_status, task.status), flush=True)
if stage_status != task.running_stage.status:
print(
f"[task id={task_id}] {task.running_stage.type} transitioned from "
f"{stage_status} -> {task.running_stage.status}",
flush=True,
)
if task.running_stage.status in {"success", "errored"}:
task.completed_stages.append(task.running_stage)
task.running_stage = None
if task.status != task_status and task_status != "waiting":
print(task_state_message.format(task_status, task.status), flush=True)
save_task(task)
return False
async def cycle(): # pragma: no cover
settings = current_settings()
n_connection_errors = 0
while True:
sleep_time = settings.BEFLOW_COORDINATOR_MAX_UPDATE_INTERVAL
try:
start_time = time.perf_counter()
# First update any running tasks, pushing them to the 'complete' queue if
# they have finished, so as to figure out how many new tasks can be moved
# from running to waiting.
task_id = peek_task_status(TaskStatus.running)
processed_task_ids = set()
while task_id is not None:
if task_id in processed_task_ids:
break
has_finished = await _process_task(task_id)
# Needed to let other async threads run even if there are hundreds of
# tasks running
await asyncio.sleep(0.0)
push_task_status(
pop_task_status(TaskStatus.running),
TaskStatus.running if not has_finished else TaskStatus.complete,
)
processed_task_ids.add(task_id)
task_id = peek_task_status(TaskStatus.running)
n_running_tasks = get_n_tasks(TaskStatus.running)
n_tasks_to_queue = min(
settings.BEFLOW_COORDINATOR_MAX_RUNNING_TASKS - n_running_tasks,
get_n_tasks(TaskStatus.waiting),
)
for _ in range(n_tasks_to_queue):
push_task_status(
pop_task_status(TaskStatus.waiting), TaskStatus.running
)
n_connection_errors = 0
# Make sure we don't cycle too often
sleep_time = max(sleep_time - (time.perf_counter() - start_time), 0.0)
except (KeyboardInterrupt, asyncio.CancelledError):
break
except (
ConnectionError,
redis.exceptions.ConnectionError,
redis.exceptions.BusyLoadingError,
) as e:
n_connection_errors += 1
if n_connection_errors >= 3:
raise e
if isinstance(e, redis.exceptions.RedisError):
_logger.warning(
f"Failed to connect to Redis - {3 - n_connection_errors} attempts "
f"remaining."
)
await asyncio.sleep(sleep_time)
| 2.125 | 2 |
client/python/test/test_upload_locations.py | google/strabo | 70 | 12765260 | #
# Copyright 2015 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from strabo.connection import StraboConnection
from strabo.location import Location, LocationSet
import unittest
import os
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
class UploadTest(unittest.TestCase):
def test_simple_upload(self):
response = Location(2, 20).nearest_neighbors(self.set1, 1).run()
self.assertEquals(response, {'result': [{'lat': 0.11, 'lon': 10.22}]})
def test_location_set_map(self):
response = self.set1.map(lambda x: x.nearest_neighbor(self.set2)).run()
self.assertTrue('result' in response)
self.assertItemsEqual(response['result'], [{'lat': 31.0, 'lon': -120.01},
{'lat': -2.0, 'lon': 8.0}])
def setUp(self):
self.set1 = LocationSet(filename =
os.path.join(TEST_DIR, 'testdata', 'upload_locations_test.csv'),
id_column = 'id',
lat_column = 'lat',
lon_column = 'lon')
self.set2 = LocationSet(filename =
os.path.join(TEST_DIR, 'testdata', 'second_test_location_set.csv'),
id_column = 'point_id',
lat_column = 'latitude',
lon_column = 'longitude')
def tearDown(self):
self.assertEquals(self.set1.clear().run(), {'result': {"num_rows_affected": 2}})
self.assertEquals(self.set2.clear().run(), {'result': {"num_rows_affected": 3}})
if __name__ == '__main__':
unittest.main()
| 2.390625 | 2 |
src/workspace.py | monkey2000/wtfcf | 1 | 12765261 | #!/usr/bin/env python3
# coding=UTF-8
import os
import sys
import shutil
import ConfigParser
from logger import *
def generate_workspace(config, info):
cwd = os.path.abspath(os.getcwd())
ws_dir = cwd + '/' + info['contest_id'] + info['problem_id']
sample_dir = ws_dir + '/sample'
if os.path.isdir(ws_dir):
fatal('Directory {} do exist, exit.'.format(os.path.basename(ws_dir)))
os.mkdir(ws_dir)
if config:
template_file = os.path.expandvars(config.get('wtf_cf', 'template_file'))
shutil.copy(template_file, ws_dir + '/solution.' + config.get('wtf_cf', 'template_suffix'))
os.mkdir(sample_dir)
for (index, sample) in enumerate(info['test_samples']):
with open(sample_dir + '/' + str(index) + '.in', 'w') as in_file:
in_file.write(sample['input'])
with open(sample_dir + '/' + str(index) + '.out', 'w') as out_file:
out_file.write(sample['output'])
conf = info
conf['samples'] = len(info['test_samples'])
conf['test_samples'] = None
conf['compile_command'] = config.get('wtf_cf', 'compile_command') if config else 'echo "There\'s no global config file." && exit -1'
config_file = ConfigParser.ConfigParser()
config_file.add_section('project')
for (key, value) in conf.items():
if value:
config_file.set('project', key, value)
config_file.set('project', 'solution', 'solution.' + (config.get('wtf_cf', 'template_suffix') if config else 'cxx'))
with open(ws_dir + '/config.ini', 'w') as file:
config_file.write(file)
| 2.359375 | 2 |
astropop/polarimetry/__init__.py | rudnerlq/astropop | 3 | 12765262 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .dualbeam import * # noqa
| 0.96875 | 1 |
test_mptt/models.py | kollivier/django-tree-tests | 0 | 12765263 | <reponame>kollivier/django-tree-tests
from django.db import models
# Create your models here.
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
class Object(models.Model):
name = models.CharField(max_length=50)
class TreeNode(MPTTModel):
name = models.CharField(max_length=50)
parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children')
| 2.609375 | 3 |
socketL/zhangll/client/reader.py | MaochengXiong/PythonLearning | 0 | 12765264 | <gh_stars>0
import threading
import json
import status
class Reader(threading.Thread):
def __init__(self, threadId, socket):
threading.Thread.__init__(self)
self.threadId = threadId
self.socket = socket
def run(self):
while True:
receivedBytes = self.socket.recv(1024)
print(str(receivedBytes))
receivedJson = json.loads(receivedBytes)
code = receivedJson['code']
if code == 3 or code == 5:
status.status = True
print(receivedJson['data']) | 3.015625 | 3 |
search/graph.py | almonteloya/project2 | 0 | 12765265 | <filename>search/graph.py
import networkx as nx
class Graph:
"""
Class to contain a graph perform BFS search
Atributtes:
graph: a graph read by networkx
Methods:
bfs: a method to perform breath first search or traversal
"""
def __init__(self, filename: str):
"""
Initialization of graph object which serves as a container for
methods to load data
"""
self.graph = nx.read_adjlist(filename, create_using=nx.DiGraph, delimiter=";")
def bfs(self, start, end=None):
"""
TODO: write a method that performs a breadth first traversal and pathfinding on graph G
* If there's no end node, just return a list with the order of traversal
* If there is an end node and a path exists, return a list of the shortest path
* If there is an end node and a path does not exist, return None
"""
##check if starting node exists
if start not in self.graph.nodes():
return ("The start node is not in the graph")
#Check if end node exists
if end and end not in self.graph.nodes():
return ("The end node is not in the graph")
# Check if start node is the end node
if start==end:
return ("Start node is end node: " +start )
## Check if node has neigbors
if len(list(self.graph.neighbors(start)))==0:
return print ("Node doesn't have neighbor: "+ start)
## If end node is not defined then do traversal
if end==None:
return self.BFS_Traversal(start)
## If end node defined then do bfs traversal and shortest path
else:
return self.ShortestPath_BFS(start, end)
def ShortestPath_BFS(self, start:str, goal:str) -> list:
"""
Find the shortest path between two nodes using Breath first search
Parameters:
start: string specifying the starting node
goal: string specifying ending node
"""
# Keeping the nodes that were already visited
exploredNodes = []
## initiliazing queue with starting node
queue = [[start]]
while queue: #while queue not empty
path = queue.pop(0) ## selecting furst element of path
currentNode = path[-1] ## selecting the last element from path
exploredNodes.append(currentNode) ##add node to visited nodes
#For each neighbor of the current node
for neighbor in self.graph.neighbors(currentNode):
if neighbor == goal:
return (path + [neighbor])
#if we reach end node then return the path and the last node
else:
if neighbor not in exploredNodes: ##if we haven't explore it then we append it to visited and to path
exploredNodes.append(neighbor)
queue.append(path + [neighbor])
return None ##If we iterate over the whole network and not meet our goal there's no path
def BFS_Traversal(self, start:str):
"""
Traverse using BFS from a specific node
Parameters
start: string specifying starting point
"""
# keep a record of visited nodes and queue
explored = []
queue = [start]
# while our queue not empty
while queue:
node = queue.pop(0) ## take the first node in the queueu and explore its neightbors
if node not in explored:
explored.append(node) # added to explore neighbors so we don't visit it again
for neighbor in self.graph.neighbors(node):
queue.append(neighbor) ## add neighbr to queue
return explored
| 4.15625 | 4 |
main.py | myungseokang/portfolio-backend | 0 | 12765266 | <reponame>myungseokang/portfolio-backend
from typing import List
from fastapi import Depends, FastAPI, HTTPException
from sqlalchemy.orm import Session
from utils.users import get_user_by_email, create_user, get_users, get_user
from schemas.users import User, UserCreate
from database import SessionLocal, engine, Base
Base.metadata.create_all(bind=engine)
app = FastAPI()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.post("/users/", response_model=User)
def create_user_view(user: UserCreate, db: Session = Depends(get_db)):
db_user = get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
return create_user(db=db, user=user)
@app.get("/users/", response_model=List[User])
def read_users_view(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = get_users(db, skip=skip, limit=limit)
return users
@app.get("/users/{user_id}", response_model=User)
def read_user_view(user_id: int, db: Session = Depends(get_db)):
db_user = get_user(db, user_id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
| 2.296875 | 2 |
python/yugabyte_db_thirdparty/packager.py | pkj415/yugabyte-db-thirdparty | 0 | 12765267 | <filename>python/yugabyte_db_thirdparty/packager.py
from yugabyte_db_thirdparty.git_util import get_git_sha1
from yugabyte_db_thirdparty.util import (
compute_file_sha256,
create_symlink_and_log,
log_and_run_cmd,
remove_path,
YB_THIRDPARTY_DIR,
)
from yugabyte_db_thirdparty.checksums import CHECKSUM_SUFFIX
import os
import logging
import subprocess
import time
EXCLUDE_PATTERNS_RELATIVE_TO_ARCHIVE_ROOT = [
'.git',
'a.out', # TODO: figure out what generates this file.
'build',
'download',
'src',
'venv',
]
GENERAL_EXCLUDE_PATTERNS = ['*.pyc', '*.o']
MAX_UPLOAD_ATTEMPTS = 20
ARCHIVE_SUFFIX = '.tar.gz'
class Packager:
build_dir_parent: str
archive_dir_name: str
archive_tarball_name: str
archive_tarball_path: str
archive_checksum_path: str
git_sha1: str
def __init__(self) -> None:
self.build_dir_parent = os.path.dirname(YB_THIRDPARTY_DIR)
self.archive_dir_name = os.path.basename(YB_THIRDPARTY_DIR)
self.archive_tarball_name = self.archive_dir_name + ARCHIVE_SUFFIX
self.archive_tarball_path = os.path.join(self.build_dir_parent, self.archive_tarball_name)
self.archive_checksum_path = self.archive_tarball_path + CHECKSUM_SUFFIX
self.git_sha1 = get_git_sha1(YB_THIRDPARTY_DIR)
def create_package(self) -> None:
if os.path.exists(self.archive_tarball_path):
logging.info("File already exists, deleting: %s", self.archive_tarball_path)
os.remove(self.archive_tarball_path)
# Create a symlink with a constant name so we can copy the file around and use it for
# creating artifacts for pull request builds.
archive_symlink_path = os.path.join(YB_THIRDPARTY_DIR, 'archive' + ARCHIVE_SUFFIX)
archive_checksum_symlink_path = archive_symlink_path + CHECKSUM_SUFFIX
tar_cmd = ['tar']
patterns_to_exclude = EXCLUDE_PATTERNS_RELATIVE_TO_ARCHIVE_ROOT + [
os.path.basename(file_path) for file_path in [
archive_symlink_path, archive_checksum_symlink_path
]
]
for excluded_pattern in patterns_to_exclude:
tar_cmd.extend([
'--exclude',
'%s/%s' % (self.archive_dir_name, excluded_pattern)
])
for excluded_pattern in GENERAL_EXCLUDE_PATTERNS:
tar_cmd.extend(['--exclude', excluded_pattern])
tar_cmd.extend(['-czf', self.archive_tarball_path, self.archive_dir_name])
log_and_run_cmd(tar_cmd, cwd=self.build_dir_parent)
sha256 = compute_file_sha256(self.archive_tarball_path)
with open(self.archive_checksum_path, 'w') as sha256_file:
sha256_file.write('%s %s\n' % (sha256, self.archive_tarball_name))
logging.info(
"Archive SHA256 checksum: %s, created checksum file: %s",
sha256, self.archive_checksum_path)
for file_path in [archive_symlink_path, archive_checksum_symlink_path]:
remove_path(file_path)
create_symlink_and_log(self.archive_tarball_path, archive_symlink_path)
create_symlink_and_log(self.archive_checksum_path, archive_checksum_symlink_path)
def upload_package(self, tag: str) -> None:
hub_cmd = [
'hub', 'release', 'create', tag,
'-m', 'Release %s' % tag,
'-a', self.archive_tarball_path,
'-a', self.archive_checksum_path,
'-t', self.git_sha1
]
delay_sec = 10
for attempt_index in range(1, MAX_UPLOAD_ATTEMPTS + 1):
try:
log_and_run_cmd(hub_cmd, cwd=YB_THIRDPARTY_DIR)
break
except subprocess.CalledProcessError as ex:
if attempt_index == MAX_UPLOAD_ATTEMPTS:
raise
logging.exception(
"Failed to upload release (attempt %d out of %d). Waiting for %d sec.",
attempt_index, MAX_UPLOAD_ATTEMPTS, delay_sec)
time.sleep(delay_sec)
delay_sec += 2
| 2.234375 | 2 |
WebApp/forms.py | sunniqua97/Vote-Power | 0 | 12765268 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, EqualTo, Email
# Form for address
class AddressForm(FlaskForm):
address = StringField('Address',validators=[DataRequired()])
submit = SubmitField('Submit')
# Form for email notifications
class EmailForm(FlaskForm):
fullName = StringField('<NAME>',validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
address = StringField('Address',validators=[DataRequired()])
submit = SubmitField('Sign Up')
| 2.859375 | 3 |
src/materia/actions/action.py | kijanac/Materia | 0 | 12765269 | <gh_stars>0
import abc
__all__ = ["Action", "ActionSignal"]
class Action(abc.ABC):
@abc.abstractmethod
def run(self, node, tasks, links, done):
pass
class ActionSignal(Exception):
def __init__(self, message=None, result=None, actions=None):
# result and actions have default values only
# so they can come after message which has a default value
super().__init__(message)
self.result = result
self.actions = actions
| 3.1875 | 3 |
orgconfig/__init__.py | yiwenlong/fabric-easy-dev | 14 | 12765270 | # -*- encoding: utf-8 -*-
#
# Copyright 2020 Yiwenlong(<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import yaml
from orgconfig.msp import static_msp_support
from orgconfig.deploy import deploy_builder
from utils.fileutil import mkdir_if_need
KEY_ORGANIZATIONS = "Organizations"
KEY_PEERS = "Peers"
KEY_ORDERERS = "Orderers"
KEY_NAME = "Name"
class Node(dict):
def __getattr__(self, item):
return self[item]
def __init__(self, org, msp_holder, deploy_build, **values):
super(Node, self).__init__()
self.update(values)
self.Org = org
self.Domain = "%s.%s" % (self.Name, self.Org.Domain)
self.msp_holder = msp_holder
self.deploy_handler = deploy_build(self, self.Org.Dir)
self.FullName = "%s.%s" % (self.Org.Name, self.Name)
class Organization(dict):
def __getattr__(self, item):
return self[item]
def __init__(self, target_dir, msp_support=static_msp_support, **values):
super().__init__()
self.update(values)
self.logger = logging.getLogger("organization")
self.Dir = os.path.join(target_dir, self.Name)
mkdir_if_need(self.Dir)
self.msp_support = msp_support(self)
self.logger.debug("Config organization: %s, mspid: %s" % (self.Name, self.MSPID))
self.logger.debug("\tOrganization directory: %s" % self.Dir)
self.msp_support.create_msp()
msp_holder = self.msp_support.msp_holder
self.PeerNodes = {n[KEY_NAME]: Node(self, msp_holder.node_msp_holder(n[KEY_NAME]), deploy_builder("Peer"), **n)
for n in self.Peers}
self.OrdererNodes = {o[KEY_NAME]: Node(self, msp_holder.node_msp_holder(o[KEY_NAME]), deploy_builder("Orderer"), **o)
for o in self.Orderers}
def deploy_peers(self):
for peer in self.PeerNodes.values():
peer.deploy_handler.deploy()
def deploy_orderers(self, genesis_block):
for orderer in self.OrdererNodes.values():
orderer.deploy_handler.deploy(genesis_block)
def msp_dir(self):
return self.msp_support.msp_holder.org_msp_dir
def admin(self):
return self.msp_support.msp_holder.admin_msp_holder()
def default_endorser(self):
for peer in self.PeerNodes.values():
return peer
def tree_walk_peers(self, invoke_func):
for peer in self.PeerNodes.values():
invoke_func(peer)
def config_organizations(raw_conf, target_dir):
return {org["Name"]: Organization(target_dir, **org) for org in raw_conf}
def find_node(org_map, node):
org_name, node_name = str(node).split(".")
if org_name not in org_map:
raise ValueError("Organization not found: %s" % org_name)
org = org_map[org_name]
if node_name in org.PeerNodes:
return org.PeerNodes[node_name]
if node_name in org.OrdererNodes:
return org.OrdererNodes[node_name]
raise ValueError("Node not found: %s" % node)
def find_user(org_map, user):
org_name, user_name = str(user).split(".")
if org_name not in org_map:
raise ValueError("Organization not found: %s" % org_name)
org = org_map[org_name]
return org.msp_support.msp_holder.user_msp_holder(user_name)
| 1.835938 | 2 |
rep2recall/engine/search.py | patarapolw/rep2recall-py | 2 | 12765271 | from datetime import datetime, timedelta
import re
from typing import Union, Callable, Any
import math
import functools
from uuid import uuid4
from .typing import IParserResult
ANY_OF = {"template", "front", "mnemonic", "entry", "deck", "tag"}
IS_DATE = {"created", "modified", "nextReview"}
IS_STRING = {"template", "front", "back", "mnemonic", "deck", "tag", "entry"}
class SearchParser:
def __init__(self):
self.error = None
self.is_ = set()
self.sort_by = None
self.desc = False
def parse(self, q: str) -> IParserResult:
try:
return IParserResult(
cond=self._parse(q),
is_=self.is_,
sortBy=self.sort_by,
desc=self.desc
)
except ValueError:
return IParserResult(cond=dict())
def _parse(self, q: str):
for method in [
self._remove_brackets,
self._parse_sep(" OR "),
self._parse_sep(" "),
self._parse_neg,
self._parse_full_expr,
self._parse_partial_expr
]:
try:
return method(q.strip())
except ValueError as e:
self.error = e
raise self.error
def _remove_brackets(self, q: str):
if re.fullmatch(r"\([^)]+\)", q):
return self._parse(q[1:-1])
raise ValueError("Not bracketed")
def _parse_sep(self, sep: str):
brackets = dict()
def _escape_brackets(m):
id_ = uuid4().hex
brackets[id_] = m.group(0)
return id_
def _parse_sep_inner(q: str):
q = re.sub(r"\([^)]+\)", _escape_brackets, q)
tokens = q.split(sep)
for i, t in enumerate(tokens):
for k, v in brackets.items():
tokens[i] = tokens[i].replace(k, v)
if len(tokens) >= 2:
parsed_tokens = list(filter(lambda x: x, (self._parse(t) for t in tokens)))
if len(parsed_tokens) > 1:
k = "$or" if sep == " OR " else "$and"
return {k: parsed_tokens}
elif len(parsed_tokens) == 1:
return parsed_tokens[0]
else:
return dict()
raise ValueError(f"Not separated by '{sep}'")
return _parse_sep_inner
def _parse_neg(self, q: str):
if q and q[0] == "-":
kw = "-sortBy:"
if q.startswith(kw) and q != kw:
self.sort_by = q[len(kw):]
return None
return {"$not": self._parse(q)}
raise ValueError("Not negative")
def _parse_full_expr(self, q: str):
m = re.fullmatch(r'([\w-]+)(:|~|[><]=?|=)([\w-]+|"[^"]+")', q)
if m:
k, op, v = m.groups()
if len(v) > 2 and v[0] == '"' and v[-1] == '"':
v = v[1:-1]
else:
m1 = re.fullmatch(r"\d+(?:\.\d+)?", v)
if m1:
v = float(v)
if k == "is":
if v == "due":
k = "nextReview"
op = "<="
v = str(datetime.now())
elif v == "leech":
k = "srsLevel"
op = "="
v = 0
elif v == "new":
k = "nextReview"
v = "NULL"
elif v == "marked":
k = "tag"
op = "="
v = "marked"
else:
self.is_ = v
return None
if k in {"due", "nextReview"} and op == ":":
k = "nextReview"
op = "<="
elif k in {"created", "modified"} and op == ":":
op = ">="
elif k == "sortBy":
self.sort_by = v
return None
if v == "NULL":
return {"$or": [
{k: ""},
{k: {"$exists": False}}
]}
if k in IS_DATE:
try:
v = str(datetime.now() + parse_timedelta(v))
if op == ":":
if k == "nextReview":
op = "<="
else:
op = ">="
except ValueError:
pass
if op == ":":
if isinstance(v, str) or k in IS_STRING:
v = {"$regex": re.escape(str(v))}
elif op == "~":
v = {"$regex": str(v)}
elif op == ">=":
v = {"$gte": v}
elif op == ">":
v = {"$gt": v}
elif op == "<=":
v = {"$lte": v}
elif op == "<":
v = {"$lt": v}
return {k: v}
raise ValueError("Not full expression")
@staticmethod
def _parse_partial_expr(q: str):
if q and ":" not in q:
or_cond = []
for a in ANY_OF:
if a in IS_STRING:
or_cond.append({a: {"$regex": re.escape(q)}})
else:
or_cond.append({a: q})
or_cond.append({"@*": {"$regex": re.escape(q)}})
return {"$or": or_cond}
raise ValueError("Not partial expression")
def mongo_filter(cond: Union[str, dict]) -> Callable[[dict], bool]:
if isinstance(cond, str):
cond = SearchParser().parse(cond).cond
return mongo_filter(cond)
def inner_filter(item: dict) -> bool:
for k, v in cond.items():
if k[0] == "$":
if k == "$and":
return all(mongo_filter(x)(item) for x in v)
elif k == "$or":
return any(mongo_filter(x)(item) for x in v)
elif k == "$not":
return not mongo_filter(v)(item)
else:
item_k = dot_getter(item, k)
if isinstance(v, dict) and any(k0[0] == "$" for k0 in v.keys()):
return _mongo_compare(item_k, v)
elif isinstance(item_k, list):
if v not in item_k:
return False
elif item_k != v:
return False
return True
return inner_filter
def parse_timedelta(s: str) -> timedelta:
if s == "NOW":
return timedelta()
m = re.search("([-+]?\\d+)(\\S*)", s)
if m:
if m[2] in {"m", "min"}:
return timedelta(minutes=int(m[1]))
elif m[2] in {"h", "hr"}:
return timedelta(hours=int(m[1]))
elif m[2] in {"d"}:
return timedelta(days=int(m[1]))
elif m[2] in {"w", "wk"}:
return timedelta(weeks=int(m[1]))
elif m[2] in {"M", "mo"}:
return timedelta(days=30 * int(m[1]))
elif m[2] in {"y", "yr"}:
return timedelta(days=365 * int(m[1]))
raise ValueError("Invalid timedelta")
def sorter(sort_by: str, desc: bool) -> Callable[[Any], bool]:
def pre_cmp(a, b):
m = _sort_convert(a)
n = _sort_convert(b)
if isinstance(m, (float, int, str)):
if type(m) == type(n):
return 1 if m > n else 0 if m == n else -1
elif isinstance(m, str):
return 1
else:
return -1
else:
return 0
return functools.cmp_to_key(lambda x, y: -pre_cmp(dot_getter(x, sort_by, False), dot_getter(y, sort_by, False))
if desc else pre_cmp(dot_getter(x, sort_by, False), dot_getter(y, sort_by, False)))
def dot_getter(d: dict, k: str, get_data: bool = True) -> Any:
if k[0] == "@":
return data_getter(d, k[1:])
v = d
for kn in k.split("."):
if isinstance(v, dict):
if kn == "*":
v = list(v.values())
else:
v = v.get(kn, dict())
elif isinstance(v, list):
try:
v = v[int(kn)]
except (IndexError, ValueError):
v = None
break
else:
break
if isinstance(v, dict) and len(v) == 0:
v = None
if get_data and k not in {"nextReview", "srsLevel"}:
data = data_getter(d, k)
if data is not None:
if v is not None:
if isinstance(data, list):
if isinstance(v, list):
v = [*v, *data]
elif v is not None:
v = [v, *data]
else:
v = data
else:
if isinstance(v, list):
v = [*v, data]
elif v is not None:
v = [v, data]
else:
v = data
else:
v = data
return v
def data_getter(d: dict, k: str) -> Union[str, None]:
k = k.lower()
try:
if k == "*":
# noinspection PyTypeChecker
return [v0["value"] for v0 in d["data"] if not v0["value"].startswith("@nosearch\n")]
else:
if d["data"]:
for v0 in d["data"]:
if v0["key"].lower() == k:
return v0["value"]
except AttributeError:
pass
return None
def _mongo_compare(v, v_obj: dict) -> bool:
for op, v0 in v_obj.items():
try:
if op == "$regex":
if isinstance(v, list):
return any(re.search(str(v0), str(b), flags=re.IGNORECASE) for b in v)
else:
return re.search(str(v0), str(v), flags=re.IGNORECASE) is not None
elif op == "$substr":
if isinstance(v, list):
return any(str(v0) in str(b) for b in v)
else:
return str(v0) in str(v)
elif op == "$startswith":
if isinstance(v, list):
return any(str(b).startswith(str(v0)) for b in v)
else:
return str(v).startswith(str(v0))
elif op == "$exists":
return (v is not None) == v0
else:
try:
_v = int(v)
_v0 = int(v0)
v, v0 = _v, _v0
except ValueError:
pass
if op == "$gte":
return v >= v0
elif op == "$gt":
return v > v0
elif op == "$lte":
return v <= v0
elif op == "$lt":
return v < v0
except TypeError:
pass
return False
def _sort_convert(x) -> Union[float, str]:
if x is None:
return -math.inf
elif isinstance(x, bool):
return math.inf if x else -math.inf
elif isinstance(x, int):
return float(x)
return str(x)
| 2.578125 | 3 |
dict/create_dict.py | janbodnar/Python-Course | 13 | 12765272 | #!/usr/bin/python
# create_dict.py
weekend = { "Sun": "Sunday", "Mon": "Monday" }
vals = dict(one=1, two=2)
capitals = {}
capitals["svk"] = "Bratislava"
capitals["deu"] = "Berlin"
capitals["dnk"] = "Copenhagen"
d = { i: object() for i in range(4) }
print (weekend)
print (vals)
print (capitals)
print (d)
| 3.734375 | 4 |
webexdev-mtgquality/lambda_function.py | netone-g/Webex-QoE-Dashboard | 0 | 12765273 | <gh_stars>0
import json
import os
import logging
import decimal
import base64
import urllib.request
import urllib.parse
import boto3
from botocore.exceptions import ClientError
from urllib import error
logger = logging.getLogger()
logger.setLevel(logging.INFO)
REGION = os.environ['AWS_DEFAULT_REGION']
def lambda_handler(event, context):
logging.info(event)
SecretReturn = json.loads(get_secret(os.environ['SECRETMANAGER_NAME'], REGION))
Token = json.loads(get_secret(os.environ['SECRETMANAGER_TOKEN_NAME'], REGION))
hostEmail = SecretReturn['hostEmail']
CLIENT_ID = SecretReturn['CLIENT_ID']
CLIENT_SECRET = SecretReturn['CLIENT_SECRET']
ACCESS_TOKEN = Token['ACCESS_TOKEN']
REFRESH_TOKEN = Token['REFRESH_TOKEN']
latest_token = json.loads(oauth(ACCESS_TOKEN, REFRESH_TOKEN, CLIENT_ID, CLIENT_SECRET))
put_secret(os.environ['SECRETMANAGER_TOKEN_NAME'], latest_token['access_token'], latest_token['refresh_token'])
try:
mtg_res = get_meetings(ACCESS_TOKEN, hostEmail)
active_meetingid = mtg_res['items'][0]['id']
qual_res = get_meeting_quality(ACCESS_TOKEN, active_meetingid)
logging.info("get_meeting_quality: {}".format(json.dumps(qual_res, indent=4, ensure_ascii=False)))
qual_res = qual_res['items']
for i, item in enumerate(qual_res):
payload = {
"meetingid": qual_res[i]['meetingInstanceId'],
"useremail": qual_res[i]['webexUserEmail'],
"jointime": qual_res[i]['joinTime'],
"leavetime": qual_res[i]['leaveTime'],
"serverregion": qual_res[i]['serverRegion'],
"items": json.loads(json.dumps(qual_res[i]), parse_float=decimal.Decimal)
}
db_response = write_dynamodb(payload, os.environ['DYNAMODB_TABLENAME'])
if db_response['ResponseMetadata']['HTTPStatusCode'] == 200:
logging.info("DB Input Success.")
else:
logging.info("DB Input Failed.")
except Exception as e:
logger.error(f'{e}')
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda!')
}
def oauth(access_token: str, refresh_token: str, client_id: str, client_secret: str):
url = "https://webexapis.com/v1/access_token"
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Bearer " + access_token
}
data = {
"grant_type": "refresh_token",
"client_id": client_id,
"client_secret": client_secret,
"refresh_token": refresh_token
}
data = urllib.parse.urlencode(data).encode('utf-8')
req = urllib.request.Request(url, data, headers)
try:
with urllib.request.urlopen(req) as f:
update_token_result = f.read().decode("utf-8")
logging.info("update_token_result: {}".format(update_token_result))
except error.HTTPError as e:
update_token_result = "null"
logging.error(e)
logging.error('Error:Could NOT UPDATE Token')
return update_token_result
def get_meetings(token: str, hostEmail: str):
meetingType = "meeting"
state = "inProgress"
hostEmail = hostEmail
url = "https://webexapis.com/v1/meetings" + "?meetingType=" + meetingType + "&state=" + state + "&hostEmail=" + hostEmail
headers = {
"Content-Type": "application/json; charset=UTF-8",
"Authorization": "Bearer " + token
}
req = urllib.request.Request(url, data={}, method="GET", headers=headers)
with urllib.request.urlopen(req) as f:
result = json.load(f)
return result
def get_meeting_quality(token: str, id: str):
url = "https://analytics.webexapis.com/v1/meeting/qualities" + "?meetingId=" + id
headers = {
"Content-Type": "application/json; charset=UTF-8",
"Authorization": "Bearer " + token
}
req = urllib.request.Request(url, method="GET", headers=headers)
with urllib.request.urlopen(req) as f:
result = json.load(f)
return result
def get_secret(secret_name: str, region_name: str):
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
raise e
else:
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return secret
def put_secret(secret_name: str, access_token: str, refresh_token: str):
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager'
)
response = client.put_secret_value(
SecretId=secret_name,
SecretString='{"ACCESS_TOKEN":"' + access_token + '",' + '"REFRESH_TOKEN":"' + refresh_token + '"}',
)
return response
def write_dynamodb(message: dict, tablename: str):
dynamoDB = boto3.resource("dynamodb")
table = dynamoDB.Table(tablename)
response = table.put_item(
Item=message
)
return response
| 1.90625 | 2 |
backend/lib/oauth2.py | ale-cci/demo-oauth-openid | 0 | 12765274 | <reponame>ale-cci/demo-oauth-openid
# Signing using rsa
# https://stackoverflow.com/questions/49116579/sign-a-byte-string-with-sha256-in-python
import math
import random
import string
import time
import flask
import cryptography
import json
import base64
# Required for jwt signature
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import hashes
def create_client_id():
adjective = random.choice(['good', 'new', 'first', 'last', 'long', 'great', 'little'])
color = random.choice(['red', 'green', 'blue', 'purple', 'magenta', 'lime', 'black', 'gray', 'white'])
animal = random.choice(['shrimp', 'gorilla', 'lion', 'crockodile', 'panda', 'tiger', 'giraffe'])
return '-'.join((adjective, color, animal))
def create_client_secret(secret_len=128):
return ''.join(random.choices(string.printable[:36], k=secret_len))
def _create_jwt_head():
return {
'alg': 'RS256',
'kid': '1',
'typ': 'JWT',
}
def int_to_b64(val):
size = int(math.log(val, 256)) + 1
hex_bytes = val.to_bytes(size, 'big')
b64_bytes = base64.b64encode(hex_bytes)
return b64_bytes.rstrip(b'=').replace(b'+', b'-').replace(b'/', b'_').decode('utf-8')
def _create_jwt_body(claims):
'''
{'at_hash': 'Jrbq0PyuRTFWYTZ4z2qjRg',
'aud': '359913789820-tfbqpn1mpan21vgjb408i42rd1ruc9mv.apps.googleusercontent.com',
'azp': '359913789820-tfbqpn1mpan21vgjb408i42rd1ruc9mv.apps.googleusercontent.com',
'email': '<EMAIL>',
'email_verified': True,
'exp': 1613679310,
'iat': 1613675710,
'iss': 'https://accounts.google.com',
'sub': '102559048848623069948'}
'''
reserved_claims = {
'at_hash', 'aud', 'azp', 'exp', 'iat', 'iss', 'sub', 'nbf'
}
token_claims = {
k: v for k, v in claims.items()
if k not in reserved_claims
}
iat = time.time()
exp = iat + 3600
token_claims.update({
'iss': 'https://localhost:8000',
'sub': flask.session['user_id'],
'iat': iat,
'exp': exp,
})
return token_claims
def _create_checksum(message, keypath):
# Read private key
with open(keypath, "rb") as key_file:
private_key = serialization.load_ssh_private_key(
key_file.read(),
None,
default_backend()
)
# Sign a message using the key
signature = private_key.sign(
message,
padding=padding.PKCS1v15(),
algorithm=hashes.SHA256()
)
return base64.b64encode(signature).rstrip(b'=')
def create_jwt(keypath, claims):
'''
'''
iss = time.time()
head = _create_jwt_head()
body = _create_jwt_body(claims)
b64_head = base64.b64encode(json.dumps(head).encode('utf-8')).rstrip(b'=')
b64_body = base64.b64encode(json.dumps(body).encode('utf-8')).rstrip(b'=')
message = b'.'.join((b64_head, b64_body))
checksum = _create_checksum(message, keypath)
output = b'.'.join((b64_head, b64_body, checksum))
return output.replace(b'+', b'-').replace(b'/', b'_')
def pubkey_info(keypath):
with open(keypath, 'rb') as pubkey:
public_key = serialization.load_ssh_public_key(pubkey.read())
pn = public_key.public_numbers()
n = int_to_b64(pn.n)
e = int_to_b64(pn.e)
return {
'kty': 'RSA',
'alg': 'RS256',
'use': 'sig',
'kid': '1',
'n': n,
'e': e,
}
| 2.59375 | 3 |
codigo/setup.py | mattyws/desafio-iafront | 0 | 12765275 | <gh_stars>0
from setuptools import setup, find_packages
setup(
name='desafio_iafront',
version='',
packages=find_packages(),
url='',
license='',
author='<NAME>',
author_email='',
description='',
install_requires=[
"scikit-learn==0.23.1",
"click==7.1.2",
"bokeh==2.1.1",
"dataset-loader==1.6",
'pandas==1.1.0',
'numpy==1.19.1'
],
entry_points={
'console_scripts': [
'prepara-pedidos=desafio_iafront.jobs.pedidos:main',
'cria-visitas=desafio_iafront.jobs.create_visits:main',
'analise-instancias-cluster=desafio_iafront.jobs.graphics:plot_analise_pontos_cluster',
'analise-conversao-por-cep=desafio_iafront.jobs.graphics:plot_conversao_by_cep',
'analise-conversao-por-departamento=desafio_iafront.jobs.graphics:plot_conversao_by_departament',
'escala-visitas=desafio_iafront.jobs.escala_pedidos:main',
'escala-analise-distribuicao-scatter=desafio_iafront.jobs.graphics:scatter_scale_analysis',
'escala-analise-distribuicao-histograma=desafio_iafront.jobs.graphics:histogram_scale_analysis',
'analise-conversao-temporal=desafio_iafront.jobs.graphics:analise_conversao_temporal',
'analise-conversao-cluster=desafio_iafront.jobs.graphics:plot_conversao',
'particiona-conversao=desafio_iafront.jobs.particiona_dados:particiona_conversao_cluster',
'birch-clustering=desafio_iafront.jobs.clusters:birch',
'mean-shift-clustering=desafio_iafront.jobs.clusters:mean_shift',
'gaussian-mixtures-clustering=desafio_iafront.jobs.clusters:gaussian_mixtures',
'agglomerative-clustering=desafio_iafront.jobs.clusters:agglomerative',
'kmeans-clustering=desafio_iafront.jobs.clusters:kmeans',
'optics-clustering=desafio_iafront.jobs.clusters:optics',
'affinity-clustering=desafio_iafront.jobs.clusters:affinity',
'spectral-clustering=desafio_iafront.jobs.clusters:spectral'
]
}
)
| 1.507813 | 2 |
cogs/mal.py | loomkoom/appuselfbotmain | 0 | 12765276 | <gh_stars>0
import sys
import subprocess
import os
import requests
import re
import asyncio
import gc
import tokage
import discord
import pytz
import json
from datetime import datetime, timedelta
from discord.ext import commands
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
from cogs.utils.checks import load_optional_config, get_google_entries, embed_perms
'''Module for MyAnimeList search of anime, manga, and light novels.'''
class Mal:
def __init__(self, bot):
self.bot = bot
self.t_client = tokage.Client()
# Taken from https://stackoverflow.com/questions/2659900/python-slicing-a-list-into-n-nearly-equal-length-partitions
def partition(self, lst, n):
if n > 1:
division = len(lst) / n
return [lst[round(division * i):round(division * (i + 1))] for i in range(n)]
else:
return [lst]
@staticmethod
async def google_results(type, query):
loop = asyncio.get_event_loop()
config = load_optional_config()
try:
entries, root = await get_google_entries('site:myanimelist.net {} {}'.format(type, query))
result = entries[0]
except RuntimeError:
try:
search_url = "https://www.googleapis.com/customsearch/v1?q=site:myanimelist.net {} {} ".format(type, query) + "&start=" + '1' + "&key=" + \
os.environ['google_api_key'] + "&cx=" + os.environ[
'custom_search_engine']
r = await loop.run_in_executor(None, requests.get, search_url)
response = r.content.decode('utf-8')
result = json.loads(response)['items'][0]['link']
except:
return False, None
return True, result
# Mal search (chained with either anime or manga)
@commands.group(pass_context=True)
async def mal(self, ctx):
"""Search MyAnimeList for an anime/manga. Ex: >mal anime Steins;Gate
For >mal anime and >mal manga, put [link] after the anime/manga part to just get the link instead of the full info.
Ex: >mal anime [link] Steins;Gate
For >mal va, put [more] to get some more info. (Takes more time) Ex: >mal va [more] saori hayami"""
if ctx.invoked_subcommand is None:
await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + 'Invalid Syntax. See `>help mal` for more info on how to use this command.')
# Anime search for Mal
@mal.command(pass_context=True)
async def anime(self, ctx, *, msg: str = None):
"""Search the anime database. Ex: >mal anime Steins;Gate"""
if msg:
fetch = await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Searching...')
if msg.startswith('[link]'):
msg = msg[6:]
link = True
else:
link = False
found, result = await self.google_results('anime', msg)
if found:
anime_id = re.findall('/anime/(.*)/', result)
try:
results = await self.t_client.get_anime(int(anime_id[0]))
except IndexError:
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'No results.')
finally:
gc.collect()
else:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'No results.')
await self.bot.delete_message(fetch)
return await self.bot.delete_message(ctx.message)
if not embed_perms(ctx.message) or link is True:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'https://myanimelist.net/anime/%s' % results.id)
await self.bot.delete_message(fetch)
return await self.bot.delete_message(ctx.message)
# Formatting embed
selection = results
synopsis = BeautifulSoup(selection.synopsis, 'lxml')
em = discord.Embed(description='{}'.format('https://myanimelist.net/anime/%s' % selection.id),
colour=0x0066CC)
try:
english = selection.english
if english:
em.add_field(name='English Title', value=english, inline=False)
except:
pass
em.add_field(name='Type', value=selection.type)
episodes = 'Unknown' if selection.episodes == '0' else selection.episodes
em.add_field(name='Episodes', value=episodes)
score = '?' if selection.score[0] == 0 else str(selection.score[0]) + '/10'
em.add_field(name='Score', value=score)
em.add_field(name='Status', value=selection.status)
try:
synop = synopsis.get_text()[:400].split('.')
text = ''
for i in range(0, len(synop)-1):
text += synop[i] + '.'
except:
text = synopsis.get_text()
em.add_field(name='Synopsis',
value=text + ' [Read more »](https://myanimelist.net/anime/%s)' % selection.id)
em.add_field(name='Airing Time:', value=selection.air_time)
em.set_thumbnail(url=selection.image)
em.set_author(name=selection.title,
icon_url='https://myanimelist.cdn-dena.com/img/sp/icon/apple-touch-icon-256.png')
em.set_footer(text='MyAnimeList Anime Search')
await self.bot.send_message(ctx.message.channel, embed=em)
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
else:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Specify an anime to search for.')
# Manga search for Mal
@mal.command(pass_context=True)
async def manga(self, ctx, *, msg: str = None):
"""Search the manga database. Ex: >mal manga Boku no Hero Academia"""
if msg:
fetch = await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Searching...')
if msg.startswith('[link]'):
msg = msg[6:]
link = True
else:
link = False
found, result = await self.google_results('manga', msg)
if found:
manga_id = re.findall('/manga/(.*)/', result)
try:
results = await self.t_client.get_manga(int(manga_id[0]))
except IndexError:
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'No results.')
gc.collect()
else:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'No results.')
await self.bot.delete_message(fetch)
return await self.bot.delete_message(ctx.message)
if not embed_perms(ctx.message) or link is True:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'https://myanimelist.net/manga/%s' % results.id)
await self.bot.delete_message(fetch)
return await self.bot.delete_message(ctx.message)
# Formatting embed
selection = results
synopsis = BeautifulSoup(selection.synopsis, 'lxml')
em = discord.Embed(description='{}'.format('https://myanimelist.net/manga/%s' % selection.id),
colour=0x0066CC)
try:
english = selection.english
if english:
em.add_field(name='English Title', value=english, inline=False)
except:
pass
em.add_field(name='Type', value=selection.type)
chapters = 'Unknown' if selection.chapters == '0' else selection.chapters
em.add_field(name='Chapters', value=chapters)
score = '?' if selection.score[0] == 0 else str(selection.score[0]) + '/10'
em.add_field(name='Score', value=score)
em.add_field(name='Status', value=selection.status)
try:
synop = synopsis.get_text()[:400].split('.')
text = ''
for i in range(0, len(synop)-1):
text += synop[i] + '.'
except:
text = synopsis.get_text()
em.add_field(name='Synopsis',
value=text + ' [Read more »](https://myanimelist.net/manga/%s)' % selection.id)
em.add_field(name='Airing Time:', value=selection.publish_time)
em.set_thumbnail(url=selection.image)
em.set_author(name=selection.title,
icon_url='https://myanimelist.cdn-dena.com/img/sp/icon/apple-touch-icon-256.png')
em.set_footer(text='MyAnimeList Manga Search')
await self.bot.send_message(ctx.message.channel, embed=em)
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
else:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'No results')
@staticmethod
async def get_next_weekday(startdate, day):
days = {
"Monday": 0,
"Tuesday": 1,
"Wednesday": 2,
"Thursday": 3,
"Friday": 4,
"Saturday": 5,
"Sunday": 6
}
weekday = days[day]
d = datetime.strptime(startdate, '%Y-%m-%d')
t = timedelta((7 + weekday - d.weekday()) % 7)
return (d + t).strftime('%Y-%m-%d')
async def get_remaining_time(self, anime):
day = anime.broadcast.split(" at ")[0][:-1]
hour = anime.broadcast.split(" at ")[1].split(" ")[0]
jp_time = datetime.now(pytz.timezone("Japan"))
air_date = await self.get_next_weekday(jp_time.strftime('%Y-%m-%d'), day)
time_now = jp_time.replace(tzinfo=None)
show_airs = datetime.strptime('{} - {}'.format(air_date, hour.strip()), '%Y-%m-%d - %H:%M')
remaining = show_airs - time_now
if remaining.days < 0:
return '6 Days {} Hours and {} Minutes.'.format(remaining.seconds // 3600, (remaining.seconds // 60) % 60)
else:
return '{} Days {} Hours and {} Minutes.'.format(remaining.days, remaining.seconds // 3600, (remaining.seconds // 60) % 60)
@mal.command(pass_context=True, alias=['character'])
async def char(self, ctx, *, query):
"""Finds specified character actor on MyAnimeList"""
fetch = await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Searching...')
found, result = await self.google_results('character', query)
if found:
char_id = re.findall('/character/(.*)/', result)
else:
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
return await self.bot.send_message(ctx.message.channel)
try:
selection = await self.t_client.get_character(char_id[0])
except IndexError:
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'No results.')
em = discord.Embed(description='{}'.format('https://myanimelist.net/character/%s' % selection.id),
colour=0x0066CC)
em.add_field(name='Anime', value=selection.animeography[0]['name'], inline=False)
if len(selection.raw_voice_actors) > 1:
va = None
for actor in selection.raw_voice_actors:
if actor['language'] == 'Japanese':
va = actor['name']
break
if not va:
va = selection.raw_voice_actors[0]['name']
else:
va = selection.raw_voice_actors[0]['name']
em.add_field(name='Voice Actor', value=va)
em.add_field(name='Favorites', value=selection.favorites)
em.set_image(url=selection.image)
em.set_author(name=selection.name,
icon_url='https://myanimelist.cdn-dena.com/img/sp/icon/apple-touch-icon-256.png')
em.set_footer(text='MyAnimeList Character Search')
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
@mal.command(pass_context=True, alias=['actor', 'voiceactor', 'person', 'voice'])
async def va(self, ctx, *, query):
"""Finds specified voice actor on MyAnimeList"""
if query.startswith('[more] '):
query = query[7:]
more_info = True
else:
more_info = False
fetch = await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Searching...')
found, result = await self.google_results('people', query)
if found:
va_id = re.findall('/people/(.*)/', result)
else:
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
return await self.bot.send_message(ctx.message.channel)
# No way to get va name so must parse html and grab name from title -_-
request_headers = {
"Accept-Language": "en-US,en;q=0.5",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Referer": "http://thewebsite.com",
"Connection": "keep-alive"
}
loop = asyncio.get_event_loop()
try:
req = Request(result, headers=request_headers)
webpage = await loop.run_in_executor(None, urlopen, req)
except:
return await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + 'Exceeded daily request limit. Try again tomorrow, sorry!')
soup = BeautifulSoup(webpage, 'html.parser')
va_name = soup.title.string.split(' - MyAnimeList')[0]
try:
selection = await self.t_client.get_person(va_id[0])
except IndexError:
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'No results.')
em = discord.Embed(description='{}'.format('https://myanimelist.net/people/%s' % selection.id),
colour=0x0066CC)
em.add_field(name='Favorites', value=selection.favorites)
if more_info:
em.add_field(name='Total Roles', value='Fetching...')
em.add_field(name='Most Popular Role', value='Fetching...', inline=False)
em.set_image(url=selection.image)
em.set_author(name=va_name,
icon_url='https://myanimelist.cdn-dena.com/img/sp/icon/apple-touch-icon-256.png')
em.set_footer(text='MyAnimeList Voice Actor Search')
va_info = await self.bot.send_message(ctx.message.channel, content=None, embed=em)
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
# get_char on each character in the va role list
if more_info:
all_chars = []
for character in selection.voice_acting:
id = character['character']['link'].split('/')[2]
all_chars.append(id)
try:
chunk_generator = self.partition(all_chars, int(len(all_chars)/5))
chunk_list = [chunk for chunk in chunk_generator]
args = [sys.executable, 'cogs/utils/mal_char_find.py']
self.bot.mal_finder = []
for chunk in chunk_list:
p = subprocess.Popen(args + chunk, stdout=subprocess.PIPE)
self.bot.mal_finder.append(p)
while all(None is p.poll() for p in self.bot.mal_finder):
await asyncio.sleep(1)
txt = ''
for p in self.bot.mal_finder:
txt += p.communicate()[0].decode('utf-8')
all_roles = []
role_list = txt.split('\n')
for role in role_list:
if ' | ' in role:
char, favs = role.split(' | ')
all_roles.append((char.strip(), int(favs.strip())))
all_roles = sorted(all_roles, key=lambda x: x[1], reverse=True)
unique_roles = set(tup[0] for tup in all_roles)
em.set_field_at(index=1, name='Roles', value=str(len(unique_roles)))
em.set_field_at(index=2, name='Most Popular Role', value=all_roles[0][0] + '\nFavorites: ' + str(all_roles[0][1]), inline=False)
except ZeroDivisionError:
em.set_field_at(index=1, name='Roles', value='None')
em.set_field_at(index=2, name='Most Popular Role', value='None', inline=False)
await self.bot.edit_message(va_info, new_content=None, embed=em)
@mal.command(pass_context=True, name="next")
async def next_(self, ctx, *, query):
"""Time till next episode air date for specified anime"""
search = await self.bot.say(self.bot.bot_prefix + "Searching...")
found, result = await self.google_results('anime', query)
if found:
anime_id = re.findall('/anime/(.*)/', result)[0]
try:
anime = await self.t_client.get_anime(anime_id)
except Exception as e:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + ":exclamation: Oops!\n {}: {}".format(type(e).__name__, e))
await self.bot.delete_message(search)
return await self.bot.delete_message(ctx.message)
else:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Failed to find given anime.')
await self.bot.delete_message(search)
return await self.bot.delete_message(ctx.message)
if anime.status == "Finished Airing":
remaining = "This anime has finished airing!\n" + anime.air_time
else:
remaining = await self.get_remaining_time(anime)
embed = discord.Embed(title=anime.title, color=0x0066CC)
embed.add_field(name="Next Episode", value=remaining)
embed.set_footer(text='MyAnimeList Episode Time Search')
embed.set_author(name='MyAnimeList', icon_url='https://myanimelist.cdn-dena.com/img/sp/icon/apple-touch-icon-256.png')
embed.set_thumbnail(url=anime.image)
await self.bot.delete_message(search)
await self.bot.delete_message(ctx.message)
await self.bot.send_message(ctx.message.channel, embed=embed)
def setup(bot):
bot.add_cog(Mal(bot))
| 2.3125 | 2 |
project-simple-00.py | MaxZar1/project-simple | 0 | 12765277 | # to jest komentarz
WIDTH = 550
HEIGHT = 550
| 1.070313 | 1 |
src/metrics/data/_classification.py | exprmntr/crowd-kit | 0 | 12765278 | from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from crowdkit.aggregation.base_aggregator import BaseAggregator
from crowdkit.aggregation import MajorityVote
def _check_answers(answers: pd.DataFrame) -> None:
if not isinstance(answers, pd.DataFrame):
raise TypeError('Working only with pandas DataFrame')
assert 'task' in answers, 'There is no "task" column in answers'
assert 'performer' in answers, 'There is no "performer" column in answers'
assert 'label' in answers, 'There is no "label" column in answers'
def _label_probability(row: pd.Series, label: Any, n_labels: int) -> float:
"""Numerator in the Bayes formula"""
return row['skill'] if row['label'] == label else (1.0 - row['skill']) / (n_labels - 1)
def _task_consistency(row: pd.Series) -> float:
"""Posterior probability for a single task"""
return row[row['aggregated_label']] / row['denominator'] if row['denominator'] != 0 else 0.0
def consistency(answers: pd.DataFrame,
performers_skills: Optional[pd.Series] = None,
aggregator: BaseAggregator = MajorityVote(),
by_task: bool = False) -> Union[float, pd.Series]:
"""
Consistency metric: posterior probability of aggregated label given performers skills
calculated using standard Dawid-Skene model.
Args:
answers (pandas.DataFrame): A data frame containing `task`, `performer` and `label` columns.
performers_skills (Optional[pandas.Series]): performers skills e.g. golden set skills. If not provided,
uses aggregator's `performers_skills` attribute.
aggregator (aggregation.BaseAggregator): aggregation method, default: MajorityVote
by_task (bool): if set, returns consistencies for every task in provided data frame.
Returns:
Union[float, pd.Series]
"""
_check_answers(answers)
aggregated = aggregator.fit_predict(answers)
if performers_skills is None and hasattr(aggregator, 'skills_'):
performers_skills = aggregator.skills_
else:
raise AssertionError('This aggregator is not supported. Please, provide performers skills.')
answers = answers.copy(deep=False)
answers.set_index('task', inplace=True)
answers = answers.reset_index().set_index('performer')
answers['skill'] = performers_skills
answers.reset_index(inplace=True)
labels = pd.unique(answers.label)
for label in labels:
answers[label] = answers.apply(lambda row: _label_probability(row, label, len(labels)), axis=1)
labels_proba = answers.groupby('task').prod()
labels_proba['aggregated_label'] = aggregated
labels_proba['denominator'] = labels_proba[list(labels)].sum(axis=1)
consistecies = labels_proba.apply(_task_consistency, axis=1)
if by_task:
return consistecies
else:
return consistecies.mean()
def _task_uncertainty(row, labels):
if row['denominator'] == 0:
row[labels] = 1 / len(labels)
else:
row[labels] /= row['denominator']
softmax = row[labels]
log_softmax = np.log(row[list(labels)])
return -np.sum(softmax * log_softmax)
def uncertainty(answers, performers_skills, by_task: bool = False) -> Union[float, pd.Series]:
"""
Label uncertainty metric: entropy of labels probability distribution.
Args:
answers (pandas.DataFrame): A data frame containing `task`, `performer` and `label` columns.
performers_skills (pandas.Series): performers skills e.g. golden set skills. If not provided,
uses aggregator's `performers_skills` attribute.
by_task (bool): if set, returns consistencies for every task in provided data frame.
Returns:
Union[float, pd.Series]
"""
_check_answers(answers)
answers = answers.copy(deep=False)
answers.set_index('task', inplace=True)
answers = answers.reset_index().set_index('performer')
answers['skill'] = performers_skills
answers.reset_index(inplace=True)
labels = pd.unique(answers.label)
for label in labels:
answers[label] = answers.apply(lambda row: _label_probability(row, label, len(labels)), axis=1)
labels_proba = answers.groupby('task').prod()
labels_proba['denominator'] = labels_proba[list(labels)].sum(axis=1)
uncertainties = labels_proba.apply(lambda row: _task_uncertainty(row, list(labels)), axis=1)
if by_task:
return uncertainties
else:
return uncertainties.mean()
| 2.796875 | 3 |
annotations/admin.py | connectik/digital-manifesto | 0 | 12765279 | from __future__ import unicode_literals, absolute_import
from django.contrib import admin
from . import models
@admin.register(models.Annotation)
class AnnotationAdmin(admin.ModelAdmin):
search_fields = ('text',)
fields = (
'user',
'text_object',
'annotator_schema_version',
'text',
'quote',
'uri',
'range_start',
'range_end',
'range_start_offset',
'range_end_offset',
'tags',
)
| 1.6875 | 2 |
tools/regVariation/maf_cpg_filter.py | bopopescu/phyG | 2 | 12765280 | #!/usr/bin/env python
#<NAME>
#Adapted from bx/scripts/maf_mask_cpg.py
"""
Mask out potential CpG sites from a maf. Restricted or inclusive definition
of CpG sites can be used. The total fraction masked is printed to stderr.
usage: %prog < input > output restricted
-m, --mask=N: Character to use as mask ('?' is default)
"""
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
try:
pkg_resources.require( "numpy" )
except:
pass
import bx.align
import bx.align.maf
from bx.cookbook import doc_optparse
import sys
import bx.align.sitemask.cpg
assert sys.version_info[:2] >= ( 2, 4 )
def main():
options, args = doc_optparse.parse( __doc__ )
try:
inp_file, out_file, sitetype, definition = args
if options.mask:
mask = int(options.mask)
else:
mask = 0
except:
print >> sys.stderr, "Tool initialization error."
sys.exit()
reader = bx.align.maf.Reader( open(inp_file, 'r') )
writer = bx.align.maf.Writer( open(out_file,'w') )
mask_chr_dict = {0:'#', 1:'$', 2:'^', 3:'*', 4:'?', 5:'N'}
mask = mask_chr_dict[mask]
if sitetype == "CpG":
if int(definition) == 1:
cpgfilter = bx.align.sitemask.cpg.Restricted( mask=mask )
defn = "CpG-Restricted"
else:
cpgfilter = bx.align.sitemask.cpg.Inclusive( mask=mask )
defn = "CpG-Inclusive"
else:
cpgfilter = bx.align.sitemask.cpg.nonCpG( mask=mask )
defn = "non-CpG"
cpgfilter.run( reader, writer.write )
print "%2.2f percent bases masked; Mask character = %s, Definition = %s" %(float(cpgfilter.masked)/float(cpgfilter.total) * 100, mask, defn)
if __name__ == "__main__":
main()
| 2.625 | 3 |
minigest/tributi/models/accertamento_rata/__init__.py | ctrlmaniac/minigest | 0 | 12765281 | from .erario import AccertamentoRataSezErario
from .rata import AccertamentoRata
__all__ = ["AccertamentoRataSezErario", "AccertamentoRata"]
| 1.039063 | 1 |
src/register/migrations/0001_initial.py | saurabhs92/django-organ-donatation | 0 | 12765282 | <filename>src/register/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-13 18:46
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Donor',
fields=[
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('gender', models.IntegerField()),
('email', models.EmailField(max_length=254, unique=True)),
('date_of_birth', models.DateField()),
('contact_no', models.PositiveIntegerField(max_length=10, unique=True, validators=[django.core.validators.RegexValidator(code='Invalid number', message='Length has to be 10', regex='^\\d{10}$')])),
('emergeny_contact', models.PositiveIntegerField(max_length=10, unique=True, validators=[django.core.validators.RegexValidator(code='Invalid number', message='Length has to be 10', regex='^\\d{10}$')])),
('street_add1', models.CharField(max_length=50)),
('street_add2', models.CharField(max_length=50)),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('postal_code', models.PositiveIntegerField()),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('registered', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Organ',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
],
),
migrations.AddField(
model_name='donor',
name='organ',
field=models.ManyToManyField(to='register.Organ'),
),
]
| 1.875 | 2 |
laia/models/htr/__init__.py | basbeu/PyLaia | 2 | 12765283 | <reponame>basbeu/PyLaia
from __future__ import absolute_import
from laia.models.htr.conv_block import ConvBlock
from laia.models.htr.dummy_model import DummyModel
from laia.models.htr.laia_crnn import LaiaCRNN
from laia.models.htr.gated_crnn import GatedConv2d, GatedCRNN
| 0.929688 | 1 |
test_all_data.py | GabrielComanescu/fashion_Mnist | 0 | 12765284 | <filename>test_all_data.py
from Model import ConvNet
import torch
from torch.utils.data import DataLoader
import data
from sklearn.metrics import confusion_matrix
model_test = ConvNet()
model_test.load_state_dict(torch.load('models/model.pt'))
if torch.cuda.is_available():
model_test.cuda()
model_test.eval()
train_data, test_data = data.prepare_data()
test_load_all = DataLoader(test_data, batch_size=len(test_data))
print('Test all data')
#Test all test data at once
with torch.no_grad():
correct = 0
for X_test, y_test in test_load_all:
if torch.cuda.is_available():
X_test = X_test.cuda()
y_test = y_test.cuda()
X_test = X_test.view(-1,1,28,28)
y_val = model_test.forward(X_test)
predicted = torch.max(y_val,1)[1]
correct += (predicted == y_test).sum()
#Print accuracy
print(f'accuracy: {correct.item()*100/len(test_data)}%')
#Print confusion matrix
arr = confusion_matrix(y_test.cpu().view(-1), predicted.cpu().view(-1))
print(arr)
| 2.765625 | 3 |
setup.py | RaymondKlass/entity-extract | 0 | 12765285 | <reponame>RaymondKlass/entity-extract<filename>setup.py
#! /usr/bin/env python
from setuptools import setup
setup(name="entity-extractor",
version="0.01",
author="<NAME>",
author_email="<EMAIL>",
description="Entity Extraction, Recognition, and Translation to RDF",
license="MIT",
packages=["entity_extract"],
install_requires=[
"nltk",
"mock >= 1.0.1",
"mwclient"
],
url="https://github.com/RaymondKlass/entity-extract"
) | 1.265625 | 1 |
core/gym/cloudsim_env.py | kangyifei/CloudSimPy | 0 | 12765286 | <filename>core/gym/cloudsim_env.py<gh_stars>0
import gym
from core.gym.abstract_agent import AbstractAgent
from core.cluster import Cluster
from core.scheduler import Scheduler
from core.cooling_equipment import CoolingEquipment
from core.simulation import Simulation
from gym import spaces
from core.config import MachineConfig, JobConfig, CoolingEquipmentConfig
from typing import List, Dict
from ray.rllib.agents import ppo,dqn
d=dqn.DQNTrainer()
p=ppo.PPOTrainer(env='')
p.train()
class CloudSimEnv(gym.Env):
def render(self, mode='human'):
pass
def __init__(self, machines_config_list: List[MachineConfig],
agent: AbstractAgent,
simulation: Simulation,
cooling_equipment_config: CoolingEquipmentConfig = None) -> None:
super().__init__()
self.__init_observation_action_space__(machines_config_list, cooling_equipment_config)
# self.__init_cluster__(machines_config_list, task_config_list, cooling_equipment_config)
self.destroyed = False
self.agent = agent
self.simulation = simulation
self.cluster: Cluster = simulation.cluster
self.cooling_equipment: CoolingEquipment = simulation.cooling_equipment
self.task_broker = simulation.task_broker
# def __init_cluster__(self, machines_config_list: List[MachineConfig],
# task_config_list: List[JobConfig],
# cooling_equipment_config: CoolingEquipmentConfig = None) -> None:
# # init class instance
# self.agent=None
# self.env = simpy.Environment()
# self.cluster = Cluster()
# self.cluster.add_machines(machines_config_list)
# self.task_broker = Broker(self.env, task_config_list)
# if cooling_equipment_config is not None:
# self.cooling_equipment = CoolingEquipment(cooling_equipment_config)
# self.job_added_event = self.env.event()
# self.job_finished_event = self.env.event()
# # attach all instance to self
# self.cluster.attach(self)
# self.task_broker.attach(self)
# # make process ready
# if cooling_equipment_config is not None:
# self.env.process(self.cooling_equipment.run())
# self.env.process(self.task_broker.run())
def __init_observation_action_space__(self, machines_config_list: List[MachineConfig],
cooling_equipment_config: CoolingEquipmentConfig = None) -> None:
temp_observation_space = []
temp_action_space = []
# add machines params
for machine_config in machines_config_list:
temp_observation_space.append(spaces.Box(low=0, high=machine_config.cpu_capacity, shape=(1,)))
temp_observation_space.append(spaces.Box(low=0, high=machine_config.memory_capacity, shape=(1,)))
temp_observation_space.append(spaces.Box(low=0, high=machine_config.disk_capacity, shape=(1,)))
temp_action_space.append(spaces.Discrete(len(machines_config_list)))
# add cooling equipments params
if cooling_equipment_config is not None:
state_paramslist = cooling_equipment_config.state_paramslist
for state_param_key in state_paramslist:
temp_observation_space.append(spaces.Box(low=state_paramslist[state_param_key]["low"],
high=state_paramslist[state_param_key]["high"],
shape=(1,)))
control_paramslist = cooling_equipment_config.control_paramslist
for control_param_key in control_paramslist:
temp_action_space.append(spaces.Box(low=control_paramslist[control_param_key]["low"],
high=control_paramslist[control_param_key]["high"],
shape=(1,)))
# add coming task params including cpu,mem,disk
maxmum_cpu_capacity = max([machine_config.cpu_capacity for machine_config in machines_config_list])
maxmum_mem_capacity = max([machine_config.memory_capacity for machine_config in machines_config_list])
maxmum_disk_capacity = max([machine_config.disk_capacity for machine_config in machines_config_list])
temp_observation_space.append(spaces.Box(low=0, high=maxmum_cpu_capacity, shape=(1,)))
temp_observation_space.append(spaces.Box(low=0, high=maxmum_mem_capacity, shape=(1,)))
temp_observation_space.append(spaces.Box(low=0, high=maxmum_disk_capacity, shape=(1,)))
# transform to real space
self.observation_space = spaces.Tuple(tuple(temp_observation_space))
self.action_space = spaces.Tuple(tuple(temp_action_space))
def __get_next_task__(self):
return self.cluster.tasks_which_has_waiting_instance[0]
# action:{"machine":0,"cooling_equipment":List[]}
def step(self, action: Dict):
if action["machine"] is not None:
scheduled_machine = self.cluster.machines[action["machine"]]
self.next_task.start_task_instance(scheduled_machine)
print("new task started at ", scheduled_machine)
action_cooling_params_list = action["cooling_equipment"]
i = 0
for paramskey in self.cooling_equipment.control_paramslist:
self.cooling_equipment.control_paramslist[paramskey] = action_cooling_params_list[i]
i += 1
self.cooling_equipment.update_self()
self.cooling_equipment.update_cluster()
return self.__get_observation__(False), self.__get__reward__(), self.finished, None
else:
action_cooling_params_list = action["cooling_equipment"],
i = 0
for paramskey in self.cooling_equipment.control_paramslist:
self.cooling_equipment.control_paramslist[paramskey] = action_cooling_params_list[i]
i += 1
self.cooling_equipment.update_self()
self.cooling_equipment.update_cluster()
return self.__get_observation__(False), self.__get__reward__(), self.finished, None
def __get_observation__(self, new_job=True):
observation = []
for machine in self.cluster.machines:
observation.append(machine.cpu)
observation.append(machine.memory)
observation.append(machine.disk)
for paramskey in self.cooling_equipment.state_paraslist:
observation.append(self.cooling_equipment.state_paraslist[paramskey])
if new_job:
self.next_task = self.__get_next_task__()
observation.append(self.next_task.task_config.cpu)
observation.append(self.next_task.task_config.memory)
observation.append(self.next_task.task_config.disk)
else:
observation.append(0)
observation.append(0)
observation.append(0)
return observation
def __get__reward__(self):
return 1
def job_added_schedule(self):
ob = self.__get_observation__(new_job=True)
reward = self.__get__reward__()
while self.cluster.tasks_which_has_waiting_instance:
# choose action
action = self.agent.choose_action(ob)
# do task schedule action
scheduled_machine = self.cluster.machines[action[0]]
self.next_task.start_task_instance(scheduled_machine)
print("new task started at ", scheduled_machine)
# do cooling schedule action
action_cooling_params_list = action[1:]
i = 0
for paramskey in self.cooling_equipment.control_paramslist:
self.cooling_equipment.control_paramslist[paramskey] = action_cooling_params_list[i]
i += 1
# agent learn
self.agent.learn(ob, action, reward)
# update cooling equipment
self.cooling_equipment.update_self()
# get new ob,reward
ob = self.__get_observation__(new_job=True)
reward = self.__get__reward__()
def job_finished_schedule(self):
ob = self.__get_observation__(new_job=False)
reward = self.__get__reward__()
while self.cluster.tasks_which_has_waiting_instance:
# choose action
action = self.agent.choose_action(ob)
# do cooling schedule action
action_cooling_params_list = action[1:]
i = 0
for paramskey in self.cooling_equipment.control_paramslist:
self.cooling_equipment.control_paramslist[paramskey] = action_cooling_params_list[i]
i += 1
# agent learn
self.agent.learn(ob, action, reward)
# update cooling equipment
self.cooling_equipment.update_self()
self.cooling_equipment.update_cluster()
# get new ob,reward
ob = self.__get_observation__(new_job=False)
reward = self.__get__reward__()
def run(self):
while not self.simulation.finished:
yield self.simulation.job_added_event | self.simulation.job_finished_event
if self.simulation.job_added_event.ok:
print("job added")
self.job_added_schedule()
else:
print("job finished")
self.job_finished_schedule()
self.destroyed = True
@property
def finished(self):
return self.task_broker.destroyed \
and len(self.cluster.unfinished_jobs) == 0
def reset(self):
pass
| 2.078125 | 2 |
01_scikit-learn-intelex_Intro/lab/scikit_dbscan.py | IntelSoftware/scikit-learn_essentials | 1 | 12765287 |
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearnex import patch_sklearn
patch_sklearn()
import numpy as np
from sklearn.cluster import DBSCAN
from daal4py.oneapi import sycl_context
X = np.array([[1., 2.], [2., 2.], [2., 3.],
[8., 7.], [8., 8.], [25., 80.]], dtype=np.float32)
with sycl_context("gpu"):
clustering = DBSCAN(eps=3, min_samples=2).fit(X)
print("DBSCAN components: ", clustering.components_, "\nDBSCAN labels: ",clustering.labels_)
resultsDict = {}
resultsDict['X'] = X
resultsDict['labels'] = clustering.labels_
resultsDict['components'] = clustering.components_
import pickle
with open('resultsDict.pkl', 'wb') as handle:
pickle.dump(resultsDict, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 1.90625 | 2 |
tests/test_shell.py | eanderton/dash | 0 | 12765288 | <gh_stars>0
"""Tests for the dcsh shell module."""
import dcsh.printer as printer
import dcsh.shell as shell
import unittest
from mock import patch
from StringIO import StringIO
class TestDCShell(unittest.TestCase):
def setUp(self):
self.settings = {
'sudo': False,
'debug': False,
'prompt': 'foo',
'prompt_style': 'prompt',
'debug_prompt': 'debug foo',
'debug_prompt_style': 'debug_prompt',
'intro': 'baz',
'dc_commands': {
'cmd1': {'help': 'cmd1 help'},
},
'tasks': {
'task1': {
'help': 'task1 help',
'compiled_args': ['gorf'],
},
},
'dc_path': '/usr/bin/docker-compose',
'environment': {},
'stylesheet': {
'prompt': {'color': 'yellow'},
'debug_prompt': {'color': 'red'},
}
}
self.stream = StringIO()
p = printer.StylePrinter(self.stream)
self._shell_printer = patch('dcsh.shell.printer', p).start()
self._show_printer = patch('dcsh.show.printer', p).start()
def tearDown(self): # noqa: E303
patch.stopall()
def test_prompt(self):
sh = shell.DcShell(self.settings)
self.assertEquals(sh.prompt, '\x1b[33mfoo\x1b[0m ')
self.assertEquals(sh.intro, 'baz')
def test_debug_prompt(self):
self.settings['debug'] = True
sh = shell.DcShell(self.settings)
self.assertEquals(sh.prompt, '\x1b[31mdebug foo\x1b[0m ')
self.assertEquals(sh.intro, 'baz')
def test_run_command(self):
sh = shell.DcShell(self.settings)
with patch('dcsh.shell.run_compose', return_value=None) as fn:
self.assertIsNone(sh.onecmd('cmd1 foo bar baz'))
fn.assert_called_once_with('cmd1', 'foo', 'bar', 'baz')
def test_run_task(self):
sh = shell.DcShell(self.settings)
with patch('dcsh.shell.run_compose', return_value=None) as fn:
self.assertIsNone(sh.onecmd('task1 foo bar baz'))
fn.assert_called_once_with('gorf', 'foo', 'bar', 'baz')
def test_pipe(self):
with patch('stat.S_ISFIFO', return_value=True):
sh = shell.DcShell(self.settings)
self.assertEquals(sh.prompt, '')
self.assertEquals(sh.intro, '')
def test_exit(self):
sh = shell.DcShell(self.settings)
self.assertRaises(shell.ShellExit, sh.onecmd, 'exit')
self.assertEqual(self.stream.getvalue(), 'Exiting DCSH\n')
def test_debug_exit(self):
self.settings['debug'] = True
sh = shell.DcShell(self.settings)
self.assertRaises(shell.ShellExit, sh.onecmd, 'exit')
self.assertEqual(self.stream.getvalue(), '')
def test_get_names(self):
sh = shell.DcShell(self.settings)
self.assertEqual(sh.get_names(), dir(sh))
def test_emptyline(self):
sh = shell.DcShell(self.settings)
self.assertIsNone(sh.emptyline())
def test_do_dc(self):
sh = shell.DcShell(self.settings)
with patch('dcsh.shell.run_compose', return_value=None) as fn:
self.assertIsNone(sh.do_dc('foo bar baz'))
fn.assert_called_once_with('foo', 'bar', 'baz')
def test_do_show(self):
sh = shell.DcShell(self.settings)
with patch('dcsh.shell.do_show') as fn:
self.assertIsNone(sh.do_show('foo bar baz'))
fn.assert_called_once_with()
def test_do_help(self):
sh = shell.DcShell(self.settings)
with patch('dcsh.shell.do_help') as fn:
self.assertIsNone(sh.do_help('foo bar baz'))
fn.assert_called_once_with()
def test_do_build(self):
sh = shell.DcShell(self.settings)
with patch('dcsh.shell.run_compose', return_value=None) as fn:
self.assertIsNone(sh.do_build('foo bar baz'))
fn.assert_called_once_with('build', 'foo', 'bar', 'baz')
def test_do_EOF(self):
sh = shell.DcShell(self.settings)
self.assertTrue(sh.do_EOF(None))
def test_cmdloop(self):
sh = shell.DcShell(self.settings)
def do_ctrl_c(cmdargs):
raise KeyboardInterrupt()
sh.do_ctrl_c = do_ctrl_c
sh.cmdqueue = [
'ctrl_c',
'exit'
]
sh.cmdloop()
self.assertEqual(self.stream.getvalue(),
'bazKeyboardInterrupt\n' +
'Exiting DCSH\n')
| 2.359375 | 2 |
engine/models.py | scwall/cocktail_engine | 2 | 12765289 | #!/usr/bin/python3
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
class SolenoidValve(models.Model):
"""
Model for the solenoid valves in database
"""
number = models.IntegerField(validators=[MinValueValidator(1),
MaxValueValidator(6)])
step = models.IntegerField()
first_pin = models.IntegerField()
second_pin = models.IntegerField()
class Bottle(models.Model):
"""
Model for the bottles in database
"""
name = models.CharField(max_length=80)
empty = models.BooleanField(default=False)
image = models.ImageField(upload_to='bottle_picture', blank=True, null=True, )
solenoid_valve = models.OneToOneField(SolenoidValve, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Cocktail(models.Model):
"""
Model for the cocktails in database
"""
name = models.CharField(max_length=80)
description = models.TextField()
bottles = models.ManyToManyField(Bottle,
through='BottlesBelongsCocktails',
related_name='cocktails')
image = models.ImageField(upload_to='cocktail_picture', blank=True, null=True, )
def __str__(self):
return self.name
class BottlesBelongsCocktails(models.Model):
"""
Model for the relation many to many between cocktail and bottle
"""
bottle = models.ForeignKey(Bottle, on_delete=models.CASCADE)
cocktail = models.ForeignKey(Cocktail, on_delete=models.CASCADE)
dose = models.IntegerField()
@property
def bottle_detail(self):
"""
:return the name of bottle
"""
return '{}'.format(self.bottle)
@property
def cocktail_detail(self):
"""
:return the name of cocktail
"""
return '{}'.format(self.cocktail)
@property
def dose_detail(self):
"""
:return the number of dose
"""
return '{}'.format(self.dose)
def __str__(self):
return str(self.dose)
| 2.640625 | 3 |
Assignment_1.py | ankit9437/Zelthy | 0 | 12765290 | import smtplib #Python Library
print("Subject?")
sub=input() #Used to take subject
print("Body?")
message = input() #Used for taking message for email
print("Recipient")
receivers_mail = input() #Used to take receiver's mail
print("Sender's Mail?")
sender_mail= input() #Used to take sender's mail
try:
password = input('Enter the password') #Enter password of sender's email ID
smtpObj = smtplib.SMTP('gmail.com',587)
smtpObj.login(sender_mail,password)
smtpObj.sendmail(sender_mail, receivers_mail, message)
print("Successfully sent email") #If the email is sent successfullt
except Exception:
print("Error: unable to send email") #If the email is not successful | 3.5625 | 4 |
napalm_ansible/__init__.py | CPintoB/napalm-automation-napalm-ansible | 0 | 12765291 | <reponame>CPintoB/napalm-automation-napalm-ansible
from __future__ import unicode_literals, print_function
import os
import ansible
from distutils.version import LooseVersion
message = """
To ensure Ansible can use the NAPALM modules you will have
to add the following configurtion to your Ansible configuration
file (ansible.cfg):
[defaults]
library = {path}/modules
{action_plugins}
For more details on ansible's configuration file visit:
https://docs.ansible.com/ansible/latest/intro_configuration.html
"""
def main():
path = os.path.dirname(__file__)
if LooseVersion(ansible.__version__) < LooseVersion('2.3.0.0'):
action_plugins = ""
else:
action_plugins = "action_plugins = {path}/plugins/action".format(path=path)
print(message.format(path=path, action_plugins=action_plugins).strip())
| 1.882813 | 2 |
api-server/apps/resource/hyperv/hypervisor.py | run4life/hyperv-api-server | 0 | 12765292 | import wmi
class HypervisorClient(object):
def get_wmi_client(self, server, user, password):
conn = wmi.connect_server(server=server, user=user, password=password)
client = wmi.WMI(wmi=conn)
return client
def _get_cpu_info(self,client):
# cpus = client.query(
# "SELECT Architecture, Name, Manufacturer, MaxClockSpeed, "
# "NumberOfCores, NumberOfLogicalProcessors FROM Win32_Processor "
# "WHERE ProcessorType = 3")
cpus = client.Win32_Processor()
cpus_list = []
for cpu in cpus:
cpu_info = {'Architecture': cpu.Architecture,
'Name': cpu.Name,
'Manufacturer': cpu.Manufacturer,
'MaxClockSpeed': cpu.MaxClockSpeed,
'NumberOfCores': cpu.NumberOfCores,
'NumberOfLogicalProcessors':
cpu.NumberOfLogicalProcessors}
cpus_list.append(cpu_info)
print cpus_list
def _get_memory_info(self, client):
# mems = client.query("SELECT TotalVisibleMemorySize, "
# "FreePhysicalMemory "
# "FROM win32_operatingsystem")[0]
mem_info = client.win32_operatingsystem
mems_list = []
mem_info = {'TotalVisibleMemorySize': mem_info.TotalVisibleMemorySize,
'FreePhysicalMemory': mem_info.FreePhysicalMemory}
return mem_info
def get_hypervisor_list(self):
# client = self.get_wmi_client("172.30.126.56", "administrator", "<PASSWORD>", r"root\virtualization\v2")
# vms_obj = client.query("select Name,ElementName,Status,EnabledState from Msvm_ComputerSystem")
# for vm in vms_obj:
# print vm
client = self.get_wmi_client("172.30.126.56", "administrator", "<PASSWORD>")
cpu = self._get_cpu_info(client)
print cpu
memory = self._get_memory_info(client)
print memory | 2.8125 | 3 |
optimizers/layers.py | ppeigne/optimizers | 0 | 12765293 | from typing import Tuple
from initialization import *
from activations import *
class Layer():
def __init__(self, n_units, activation):#, initialization=None):
self.n_units = n_units
#self.activation = self._select_activation(activation)
#self.initialization = initialization if initialization else self._select_initialization(self)
def _select_activation(self, activation):
raise NotImplementedError
def _select_initilization(self):
raise NotImplementedError
class DeepLayer(Layer):
def __init__(self, n_units, activation='relu', dropout=0):
self.n_units = n_units
self.activation = self._select_activation(activation)
self.initialization = self._select_initialization(activation)
self.dropout_rate = dropout
def _select_activation(self, activation):
activations = {
'sigmoid': sigmoid,
# 'tanh'; ,
'relu': relu
# 'leaky_relu': leaky_relu,
# 'lrelu': ,
# 'prelu': ,
# 'elu': ,
}
return activations[activation]
def _select_initialization(self, activation):
initializations = {
'sigmoid': initialize_tanh,
# 'tanh'; initialize_tanh,
'relu': initialize_relu
# 'leaky_relu': ,
# 'lrelu': ,
# 'prelu': ,
# 'elu': ,
}
return initializations[activation]
class Dense(DeepLayer):
#def _init__(self)
def _generate_params(self, input_dim):
parameters = {
'W': self.initialization((self.n_units, input_dim)),
'b': np.zeros(1),
'Z': np.zeros(self.n_units),
'A': np.zeros(self.n_units),
'g': self.activation
}
gradients = {
'dW': np.zeros((self.n_units, input_dim)),
'db': np.zeros(1),
'dZ': np.zeros(self.n_units),
'dA': np.zeros(self.n_units),
'dg': self.activation # FIXME
}
return parameters, gradients
class Flatten(Layer):
def __init__(self, input_dim: Tuple[int, int]) -> None:
self.n_units = input_dim[0] * input_dim[1]
class Input(Layer):
def __init__(self, input_dim: int) -> None:
self.n_units = input_dim
def _generate_params(self, input_dim):
parameters = {'A': np.zeros(self.n_units)}
gradients = {'dA': np.zeros(self.n_units)}
return parameters, gradients
class Network():
def __init__(self, architecture):
self.depth = len(architecture)
self.params, self.gradients = self._generate_params(architecture)
def _generate_params(self, architecture):
params = []
gradients = []
for l in range(self.depth):
input_dim = architecture[l-1].n_units
tmp_params, tmp_gradients = architecture[l]._generate_params(input_dim)
params.append(tmp_params)
gradients.append(tmp_gradients)
# params.append(architecture[l]._generate_params(input_dim))
return params, gradients
def forward(self, X):
self.params[0]['A'] = X
for l in range(1, self.depth):
self.params[l]['Z'] = self.params[l]['W'] @ self.params[l-1]['A'] + self.params[l]['b']
self.params[l]['A'] = self.params[l]['g'](self.params[l]['Z'])
return self.params[self.depth-1]['A']
def gradient(self, X, y, result):
_, m = X.shape
self.gradients[self.depth-1]['dA'] = self.params[self.depth-1]['A']# FIXME
for l in range(self.depth, 0):
self.gradients[l]['dZ'] = self.gradients[l]['dA'] * self.gradients[l]['dg'](self.params[l]['Z'])
self.gradients[l]['dW'] = (self.gradients[l]['dZ'] @ self.params[l-1]['A'].T) / m
self.gradients[l]['db'] = (np.sum(self.gradients[l]['dZ'], axis=1, keepdims=True)) / m
self.gradients[l-1]['dA'] = self.params[l]['W'].T @ self.gradients[l]['dZ']
x = [Input(12),
Dense(2, activation='sigmoid', dropout=.2),
Dense(8),
Dense(2, activation='sigmoid', dropout=.5)]
model = Network(x)
#for i,p in enumerate(model.params):
# print(f"W[{i}]",p['W'].shape)
# print(f"A[{i}]",p['A'].shape)
# print(i)
# print(p)
X = np.random.random((12,10)) * 10
res = model.forward(X)
print(res)
y_ = (np.random.random((2, 10)) > .5) * 1
g = model.gradient(X, y_, res) | 3.078125 | 3 |
Tracked.py | FLUX-SE/TrackedHQ_python_wrapper | 0 | 12765294 | import requests
from endpoints.projects import Projects
from endpoints.lists import Lists
from endpoints.todos import Todos
from endpoints.labels import Labels
class Tracked:
def __init__(self, email_address: str, api_token: str, basecamp_account_id: int):
self.email_address = email_address
self.api_token = api_token
self.basecamp_account_id = basecamp_account_id
self.session = requests.Session()
@property
def projects(self):
return Projects(self)
@property
def lists(self):
return Lists(self)
@property
def todos(self):
return Todos(self)
@property
def labels(self):
return Labels(self)
"""
========================================================
Examples
========================================================
tracked = Tracked(...)
# === PROJECTS ===
# Get all projects
tracked.projects.list()
# === LISTS ===
# Get all lists for a project
tracked.lists.list(project_id)
# === TODOS ===
# List todos from project
tracked.todos.list(project_id)
# Update Kanban list and/or position for a to-do
tracked.todos.update(project_id, todo_id, position, list_name)
# === LABELS ===
# Create a label
tracked.labels.create(project_id, "TestLabel", "#00ffff")
# List labels
tracked.labels.list(project_id)
# Add a label to a todo
tracked.labels.add(project_id, label_id, todo_id)
# Get labels of a todo
tracked.labels.get(basecamp_project_id, basecamp_todo_id)
========================================================
"""
| 2.5 | 2 |
blog/admin.py | KimLHill/MSP4 | 0 | 12765295 | from django.contrib import admin
from .models import Blog
# Blog information
class BlogAdmin(admin.ModelAdmin):
list_display = (
'name',
'author',
'description',
'image',
)
ordering = ('name',)
admin.site.register(Blog, BlogAdmin)
| 1.828125 | 2 |
examples/graph/gnn_citations.py | IMvision12/keras-io | 0 | 12765296 | <filename>examples/graph/gnn_citations.py
"""
Title: Node Classification with Graph Neural Networks
Author: [<NAME>](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/05/30
Last modified: 2021/05/30
Description: Implementing a graph neural network model for predicting the topic of a paper given its citations.
"""
"""
## Introduction
Many datasets in various machine learning (ML) applications have structural relationships
between their entities, which can be represented as graphs. Such application includes
social and communication networks analysis, traffic prediction, and fraud detection.
[Graph representation Learning](https://www.cs.mcgill.ca/~wlh/grl_book/)
aims to build and train models for graph datasets to be used for a variety of ML tasks.
This example demonstrate a simple implementation of a [Graph Neural Network](https://arxiv.org/pdf/1901.00596.pdf)
(GNN) model. The model is used for a node prediction task on the [Cora dataset](https://relational.fit.cvut.cz/dataset/CORA)
to predict the subject of a paper given its words and citations network.
Note that, **we implement a Graph Convolution Layer from scratch** to provide better
understanding of how they work. However, there is a number of specialized TensorFlow-based
libraries that provide rich GNN APIs, such as [Spectral](https://graphneural.network/),
[StellarGraph](https://stellargraph.readthedocs.io/en/stable/README.html), and
[GraphNets](https://github.com/deepmind/graph_nets).
"""
"""
## Setup
"""
import os
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## Prepare the Dataset
The Cora dataset consists of 2,708 scientific papers classified into one of seven classes.
The citation network consists of 5,429 links. Each paper has a binary word vector of size
1,433, indicating the presence of a corresponding word.
### Download the dataset
The dataset has two tap-separated files: `cora.cites` and `cora.content`.
1. The `cora.cites` includes the citation records with two columns:
`cited_paper_id` (target) and `citing_paper_id` (source).
2. The `cora.content` includes the paper content records with 1,435 columns:
`paper_id`, `subject`, and 1,433 binary features.
Let's download the dataset.
"""
zip_file = keras.utils.get_file(
fname="cora.tgz",
origin="https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz",
extract=True,
)
data_dir = os.path.join(os.path.dirname(zip_file), "cora")
"""
### Process and visualize the dataset
Then we load the citations data into a Pandas DataFrame.
"""
citations = pd.read_csv(
os.path.join(data_dir, "cora.cites"),
sep="\t",
header=None,
names=["target", "source"],
)
print("Citations shape:", citations.shape)
"""
Now we display a sample of the `citations` DataFrame.
The `target` column includes the paper ids cited by the paper ids in the `source` column.
"""
citations.sample(frac=1).head()
"""
Now let's load the papers data into a Pandas DataFrame.
"""
column_names = ["paper_id"] + [f"term_{idx}" for idx in range(1433)] + ["subject"]
papers = pd.read_csv(
os.path.join(data_dir, "cora.content"),
sep="\t",
header=None,
names=column_names,
)
print("Papers shape:", papers.shape)
"""
Now we display a sample of the `papers` DataFrame. The DataFrame includes the `paper_id`
and the `subject` columns, as well as 1,433 binary column representing whether a term exists
in the paper or not.
"""
print(papers.sample(5).T)
"""
Let's display the count of the papers in each subject.
"""
print(papers.subject.value_counts())
"""
We convert the paper ids and the subjects into zero-based indices.
"""
class_values = sorted(papers["subject"].unique())
class_idx = {name: id for id, name in enumerate(class_values)}
paper_idx = {name: idx for idx, name in enumerate(sorted(papers["paper_id"].unique()))}
papers["paper_id"] = papers["paper_id"].apply(lambda name: paper_idx[name])
citations["source"] = citations["source"].apply(lambda name: paper_idx[name])
citations["target"] = citations["target"].apply(lambda name: paper_idx[name])
papers["subject"] = papers["subject"].apply(lambda value: class_idx[value])
"""
Now let's visualize the citation graph. Each node in the graph represents a paper,
and the color of the node corresponds to its subject. Note that we only show a sample of
the papers in the dataset.
"""
plt.figure(figsize=(10, 10))
colors = papers["subject"].tolist()
cora_graph = nx.from_pandas_edgelist(citations.sample(n=1500))
subjects = list(papers[papers["paper_id"].isin(list(cora_graph.nodes))]["subject"])
nx.draw_spring(cora_graph, node_size=15, node_color=subjects)
"""
### Split the dataset into stratified train and test sets
"""
train_data, test_data = [], []
for _, group_data in papers.groupby("subject"):
# Select around 50% of the dataset for training.
random_selection = np.random.rand(len(group_data.index)) <= 0.5
train_data.append(group_data[random_selection])
test_data.append(group_data[~random_selection])
train_data = pd.concat(train_data).sample(frac=1)
test_data = pd.concat(test_data).sample(frac=1)
print("Train data shape:", train_data.shape)
print("Test data shape:", test_data.shape)
"""
## Implement Train and Evaluate Experiment
"""
hidden_units = [32, 32]
learning_rate = 0.01
dropout_rate = 0.5
num_epochs = 300
batch_size = 256
"""
This function compiles and trains an input model using the given training data.
"""
def run_experiment(model, x_train, y_train):
# Compile the model.
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_acc", patience=50, restore_best_weights=True
)
# Fit the model.
history = model.fit(
x=x_train,
y=y_train,
epochs=num_epochs,
batch_size=batch_size,
validation_split=0.15,
callbacks=[early_stopping],
)
return history
"""
This function displays the loss and accuracy curves of the model during training.
"""
def display_learning_curves(history):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
ax1.plot(history.history["loss"])
ax1.plot(history.history["val_loss"])
ax1.legend(["train", "test"], loc="upper right")
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Loss")
ax2.plot(history.history["acc"])
ax2.plot(history.history["val_acc"])
ax2.legend(["train", "test"], loc="upper right")
ax2.set_xlabel("Epochs")
ax2.set_ylabel("Accuracy")
plt.show()
"""
## Implement Feedforward Network (FFN) Module
We will use this module in the baseline and the GNN models.
"""
def create_ffn(hidden_units, dropout_rate, name=None):
fnn_layers = []
for units in hidden_units:
fnn_layers.append(layers.BatchNormalization())
fnn_layers.append(layers.Dropout(dropout_rate))
fnn_layers.append(layers.Dense(units, activation=tf.nn.gelu))
return keras.Sequential(fnn_layers, name=name)
"""
## Build a Baseline Neural Network Model
### Prepare the data for the baseline model
"""
feature_names = set(papers.columns) - {"paper_id", "subject"}
num_features = len(feature_names)
num_classes = len(class_idx)
# Create train and test features as a numpy array.
x_train = train_data[feature_names].to_numpy()
x_test = test_data[feature_names].to_numpy()
# Create train and test targets as a numpy array.
y_train = train_data["subject"]
y_test = test_data["subject"]
"""
### Implement a baseline classifier
We add five FFN blocks with skip connections, so that we generate a baseline model with
roughly the same number of parameters as the GNN models to be built later.
"""
def create_baseline_model(hidden_units, num_classes, dropout_rate=0.2):
inputs = layers.Input(shape=(num_features,), name="input_features")
x = create_ffn(hidden_units, dropout_rate, name=f"ffn_block1")(inputs)
for block_idx in range(4):
# Create an FFN block.
x1 = create_ffn(hidden_units, dropout_rate, name=f"ffn_block{block_idx + 2}")(x)
# Add skip connection.
x = layers.Add(name=f"skip_connection{block_idx + 2}")([x, x1])
# Compute logits.
logits = layers.Dense(num_classes, name="logits")(x)
# Create the model.
return keras.Model(inputs=inputs, outputs=logits, name="baseline")
baseline_model = create_baseline_model(hidden_units, num_classes, dropout_rate)
baseline_model.summary()
"""
### Train the baseline classifier
"""
history = run_experiment(baseline_model, x_train, y_train)
"""
Let's plot the learning curves.
"""
display_learning_curves(history)
"""
Now we evaluate the baseline model on the test data split.
"""
_, test_accuracy = baseline_model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
"""
### Examine the baseline model predictions
Let's create new data instances by randomly generating binary word vectors with respect to
the word presence probabilities.
"""
def generate_random_instances(num_instances):
token_probability = x_train.mean(axis=0)
instances = []
for _ in range(num_instances):
probabilities = np.random.uniform(size=len(token_probability))
instance = (probabilities <= token_probability).astype(int)
instances.append(instance)
return np.array(instances)
def display_class_probabilities(probabilities):
for instance_idx, probs in enumerate(probabilities):
print(f"Instance {instance_idx + 1}:")
for class_idx, prob in enumerate(probs):
print(f"- {class_values[class_idx]}: {round(prob * 100, 2)}%")
"""
Now we show the baseline model predictions given these randomly generated instances.
"""
new_instances = generate_random_instances(num_classes)
logits = baseline_model.predict(new_instances)
probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy()
display_class_probabilities(probabilities)
"""
## Build a Graph Neural Network Model
### Prepare the data for the graph model
Preparing and loading the graphs data into the model for training is the most challenging
part in GNN models, which is addressed in different ways by the specialised libraries.
In this example, we show a simple approach for preparing and using graph data that is suitable
if your dataset consists of a single graph that fits entirely in memory.
The graph data is represented by the `graph_info` tuple, which consists of the following
three elements:
1. `node_features`: This is a `[num_nodes, num_features]` NumPy array that includes the
node features. In this dataset, the nodes are the papers, and the `node_features` are the
word-presence binary vectors of each paper.
2. `edges`: This is `[num_edges, num_edges]` NumPy array representing a sparse
[adjacency matrix](https://en.wikipedia.org/wiki/Adjacency_matrix#:~:text=In%20graph%20theory%20and%20computer,with%20zeros%20on%20its%20diagonal.)
of the links between the nodes. In this example, the links are the citations between the papers.
3. `edge_weights` (optional): This is a `[num_edges]` NumPy array that includes the edge weights, which *quantify*
the relationships between nodes in the graph. In this example, there are no weights for the paper citations.
"""
# Create an edges array (sparse adjacency matrix) of shape [2, num_edges].
edges = citations[["source", "target"]].to_numpy().T
# Create an edge weights array of ones.
edge_weights = tf.ones(shape=edges.shape[1])
# Create a node features array of shape [num_nodes, num_features].
node_features = tf.cast(
papers.sort_values("paper_id")[feature_names].to_numpy(), dtype=tf.dtypes.float32
)
# Create graph info tuple with node_features, edges, and edge_weights.
graph_info = (node_features, edges, edge_weights)
print("Edges shape:", edges.shape)
print("Nodes shape:", node_features.shape)
"""
### Implement a graph convolution layer
We implement a graph convolution module as a [Keras Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer?version=nightly).
Our `GraphConvLayer` performs the following steps:
1. **Prepare**: The input node representations are processed using a FFN to produce a *message*. You can simplify
the processing by only applying linear transformation to the representations.
2. **Aggregate**: The messages of the neighbours of each node are aggregated with
respect to the `edge_weights` using a *permutation invariant* pooling operation, such as *sum*, *mean*, and *max*,
to prepare a single aggregated message for each node. See, for example, [tf.math.unsorted_segment_sum](https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum)
APIs used to aggregate neighbour messages.
3. **Update**: The `node_repesentations` and `aggregated_messages`—both of shape `[num_nodes, representation_dim]`—
are combined and processed to produce the new state of the node representations (node embeddings).
If `combination_type` is `gru`, the `node_repesentations` and `aggregated_messages` are stacked to create a sequence,
then processed by a GRU layer. Otherwise, the `node_repesentations` and `aggregated_messages` are added
or concatenated, then processed using a FFN.
The technique implemented use ideas from [Graph Convolutional Networks](https://arxiv.org/abs/1609.02907),
[GraphSage](https://arxiv.org/abs/1706.02216), [Graph Isomorphism Network](https://arxiv.org/abs/1810.00826),
[Simple Graph Networks](https://arxiv.org/abs/1902.07153), and
[Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493).
Two other key techniques that are not covered are [Graph Attention Networks](https://arxiv.org/abs/1710.10903)
and [Message Passing Neural Networks](https://arxiv.org/abs/1704.01212).
"""
class GraphConvLayer(layers.Layer):
def __init__(
self,
hidden_units,
dropout_rate=0.2,
aggregation_type="mean",
combination_type="concat",
normalize=False,
*args,
**kwargs,
):
super(GraphConvLayer, self).__init__(*args, **kwargs)
self.aggregation_type = aggregation_type
self.combination_type = combination_type
self.normalize = normalize
self.ffn_prepare = create_ffn(hidden_units, dropout_rate)
if self.combination_type == "gated":
self.update_fn = layers.GRU(
units=hidden_units,
activation="tanh",
recurrent_activation="sigmoid",
dropout=dropout_rate,
return_state=True,
recurrent_dropout=dropout_rate,
)
else:
self.update_fn = create_ffn(hidden_units, dropout_rate)
def prepare(self, node_repesentations, weights=None):
# node_repesentations shape is [num_edges, embedding_dim].
messages = self.ffn_prepare(node_repesentations)
if weights is not None:
messages = messages * tf.expand_dims(weights, -1)
return messages
def aggregate(self, node_indices, neighbour_messages):
# node_indices shape is [num_edges].
# neighbour_messages shape: [num_edges, representation_dim].
num_nodes = tf.math.reduce_max(node_indices) + 1
if self.aggregation_type == "sum":
aggregated_message = tf.math.unsorted_segment_sum(
neighbour_messages, node_indices, num_segments=num_nodes
)
elif self.aggregation_type == "mean":
aggregated_message = tf.math.unsorted_segment_mean(
neighbour_messages, node_indices, num_segments=num_nodes
)
elif self.aggregation_type == "max":
aggregated_message = tf.math.unsorted_segment_max(
neighbour_messages, node_indices, num_segments=num_nodes
)
else:
raise ValueError(f"Invalid aggregation type: {self.aggregation_type}.")
return aggregated_message
def update(self, node_repesentations, aggregated_messages):
# node_repesentations shape is [num_nodes, representation_dim].
# aggregated_messages shape is [num_nodes, representation_dim].
if self.combination_type == "gru":
# Create a sequence of two elements for the GRU layer.
h = tf.stack([node_repesentations, aggregated_messages], axis=1)
elif self.combination_type == "concat":
# Concatenate the node_repesentations and aggregated_messages.
h = tf.concat([node_repesentations, aggregated_messages], axis=1)
elif self.combination_type == "add":
# Add node_repesentations and aggregated_messages.
h = node_repesentations + aggregated_messages
else:
raise ValueError(f"Invalid combination type: {self.combination_type}.")
# Apply the processing function.
node_embeddings = self.update_fn(h)
if self.combination_type == "gru":
node_embeddings = tf.unstack(node_embeddings, axis=1)[-1]
if self.normalize:
node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1)
return node_embeddings
def call(self, inputs):
"""Process the inputs to produce the node_embeddings.
inputs: a tuple of three elements: node_repesentations, edges, edge_weights.
Returns: node_embeddings of shape [num_nodes, representation_dim].
"""
node_repesentations, edges, edge_weights = inputs
# Get node_indices (source) and neighbour_indices (target) from edges.
node_indices, neighbour_indices = edges[0], edges[1]
# neighbour_repesentations shape is [num_edges, representation_dim].
neighbour_repesentations = tf.gather(node_repesentations, neighbour_indices)
# Prepare the messages of the neighbours.
neighbour_messages = self.prepare(neighbour_repesentations, edge_weights)
# Aggregate the neighbour messages.
aggregated_messages = self.aggregate(node_indices, neighbour_messages)
# Update the node embedding with the neighbour messages.
return self.update(node_repesentations, aggregated_messages)
"""
### Implement a graph neural network node classifier
The GNN classification model follows the [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843) approach,
as follows:
1. Apply preprocessing using FFN to the node features to generate initial node representations.
2. Apply one or more graph convolutional layer, with skip connections, to the node representation
to produce node embeddings.
3. Apply post-processing using FFN to the node embeddings to generat the final node embeddings.
4. Feed the node embeddings in a Softmax layer to predict the node class.
Each graph convolutional layer added captures information from a further level of neighbours.
However, adding many graph convolutional layer can cause oversmoothing, where the model
produces similar embeddings for all the nodes.
Note that the `graph_info` passed to the constructor of the Keras model, and used as a *property*
of the Keras model object, rather than input data for training or prediction.
The model will accept a **batch** of `node_indices`, which are used to lookup the
node features and neighbours from the `graph_info`.
"""
class GNNNodeClassifier(tf.keras.Model):
def __init__(
self,
graph_info,
num_classes,
hidden_units,
aggregation_type="sum",
combination_type="concat",
dropout_rate=0.2,
normalize=True,
*args,
**kwargs,
):
super(GNNNodeClassifier, self).__init__(*args, **kwargs)
# Unpack graph_info to three elements: node_features, edges, and edge_weight.
node_features, edges, edge_weights = graph_info
self.node_features = node_features
self.edges = edges
self.edge_weights = edge_weights
# Set edge_weights to ones if not provided.
if self.edge_weights is None:
self.edge_weights = tf.ones(shape=edges.shape[1])
# Scale edge_weights to sum to 1.
self.edge_weights = self.edge_weights / tf.math.reduce_sum(self.edge_weights)
# Create a process layer.
self.preprocess = create_ffn(hidden_units, dropout_rate, name="preprocess")
# Create the first GraphConv layer.
self.conv1 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv1",
)
# Create the second GraphConv layer.
self.conv2 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv2",
)
# Create a postprocess layer.
self.postprocess = create_ffn(hidden_units, dropout_rate, name="postprocess")
# Create a compute logits layer.
self.compute_logits = layers.Dense(units=num_classes, name="logits")
def call(self, input_node_indices):
# Preprocess the node_features to produce node representations.
x = self.preprocess(self.node_features)
# Apply the first graph conv layer.
x1 = self.conv1((x, self.edges, self.edge_weights))
# Skip connection.
x = x1 + x
# Apply the second graph conv layer.
x2 = self.conv2((x, self.edges, self.edge_weights))
# Skip connection.
x = x2 + x
# Postprocess node embedding.
x = self.postprocess(x)
# Fetch node embeddings for the input node_indices.
node_embeddings = tf.gather(x, input_node_indices)
# Compute logits
return self.compute_logits(node_embeddings)
"""
Let's test instantiating and calling the GNN model.
Notice that if you provide `N` node indices, the output will be a tensor of shape `[N, num_classes]`,
regardless of the size of the graph.
"""
gnn_model = GNNNodeClassifier(
graph_info=graph_info,
num_classes=num_classes,
hidden_units=hidden_units,
dropout_rate=dropout_rate,
name="gnn_model",
)
print("GNN output shape:", gnn_model([1, 10, 100]))
gnn_model.summary()
"""
### Train the GNN model
Note that we use the standard *supervised* cross-entropy loss to train the model.
However, we can add another *self-supervised* loss term for the generated node embeddings
that makes sure that neighbouring nodes in graph have similar representations, while faraway
nodes have dissimilar representations.
"""
x_train = train_data.paper_id.to_numpy()
history = run_experiment(gnn_model, x_train, y_train)
"""
Let's plot the learning curves
"""
display_learning_curves(history)
"""
Now we evaluate the GNN model on the test data split.
The results may vary depending on the training sample, however the GNN model always outperforms
the baseline model in terms of the test accuracy.
"""
x_test = test_data.paper_id.to_numpy()
_, test_accuracy = gnn_model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
"""
### Examine the GNN model predictions
Let's add the new instances as nodes to the `node_features`, and generate links
(citations) to existing nodes.
"""
# First we add the N new_instances as nodes to the graph
# by appending the new_instance to node_features.
num_nodes = node_features.shape[0]
new_node_features = np.concatenate([node_features, new_instances])
# Second we add the M edges (citations) from each new node to a set
# of existing nodes in a particular subject
new_node_indices = [i + num_nodes for i in range(num_classes)]
new_citations = []
for subject_idx, group in papers.groupby("subject"):
subject_papers = list(group.paper_id)
# Select random x papers specific subject.
selected_paper_indices1 = np.random.choice(subject_papers, 5)
# Select random y papers from any subject (where y < x).
selected_paper_indices2 = np.random.choice(list(papers.paper_id), 2)
# Merge the selected paper indices.
selected_paper_indices = np.concatenate(
[selected_paper_indices1, selected_paper_indices2], axis=0
)
# Create edges between a citing paper idx and the selected cited papers.
citing_paper_indx = new_node_indices[subject_idx]
for cited_paper_idx in selected_paper_indices:
new_citations.append([citing_paper_indx, cited_paper_idx])
new_citations = np.array(new_citations).T
new_edges = np.concatenate([edges, new_citations], axis=1)
"""
Now let's update the `node_features` and the `edges` in the GNN model.
"""
print("Original node_features shape:", gnn_model.node_features.shape)
print("Original edges shape:", gnn_model.edges.shape)
gnn_model.node_features = new_node_features
gnn_model.edges = new_edges
gnn_model.edge_weights = tf.ones(shape=new_edges.shape[1])
print("New node_features shape:", gnn_model.node_features.shape)
print("New edges shape:", gnn_model.edges.shape)
logits = gnn_model.predict(tf.convert_to_tensor(new_node_indices))
probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy()
display_class_probabilities(probabilities)
"""
Notice that the probabilities of the expected subjects
(to which several citations are added) are higher compared to the baseline model.
"""
| 2.9375 | 3 |
cris/reviews/controller.py | umarmiti/COMP-4350--Group-8 | 1 | 12765297 | <reponame>umarmiti/COMP-4350--Group-8
from flask import Blueprint, jsonify, render_template, request, flash, session
from cris import db
from model import Review
from decimal import *
from cris.users.controller import check_follower
from cris.users.model import User
mod = Blueprint('reviews', __name__, url_prefix='/reviews')
@mod.route('/_submit_review', methods=['POST'])
def submit_review():
if request.method == 'POST':
data = request.json
#print data
cid = request.json['cid']
rscr = request.json['rscr']
rdesc = request.json['rdesc']
rvote = request.json['rvote']
upvote = request.json['upvote']
downvote = request.json['downvote']
#find username if logged in
username = None
if 'username' in session:
username = session['username']
review = Review(cid, rscr, rdesc, rvote, upvote, downvote, username)
db.session.add(review)
db.session.commit()
return jsonify(rdesc = rdesc, rscr = rscr, rvote=rvote, upvote = upvote, downvote = downvote)
@mod.route('/_delete_review', methods=['POST'])
def delete_review():
if request.method == 'POST':
key = request.json['id']
target = Review.query.filter_by(id=key).first()
if target:
db.session.delete(target)
db.session.commit()
flash('Review was deleted successfully', 'success')
return jsonify(success = 'true')
else:
return jsonify(success = 'false')
@mod.route('/_update_review', methods=['POST'])
def update_review():
if request.method == 'POST':
rID = request.json['id']
target = Review.query.filter_by(id=rID).first()
if target:
target.rscr = request.json['rscr']
target.rdesc = request.json['rdesc']
target.rvote = request.json['rvote']
target.upvote = request.json['upvote']
target.downvote = request.json['downvote']
db.session.commit()
return jsonify(id = rID, rdesc = target.rdesc, rscr = target.rscr, rvote=target.rvote, upvote = target.upvote, downvote = target.downvote)
else:
return "Update Failed"
@mod.route('/_query_by_course')
def query_by_course():
results = []
following = []
temp_dict = {}
key = request.args.get('key', '')
reviews = Review.query.filter_by(cid=key).all()
if 'username' in session:
curr_user = User.query.get(session['username'])
if curr_user != None:
following = curr_user.get_followers()
if len(reviews) > 0:
for review in reviews:
temp_dict = review.serialize
if review.username is not None:
review_user = User.query.get(review.username)
if len(following) > 0 and review_user in following:
temp_dict['followed'] = True
else:
temp_dict['followed'] = False
else:
temp_dict['followed'] = False
results.append(temp_dict)
#first sort by the review rating tuple
results = sorted(results, key=lambda k: k['rvote'], reverse=True)
#second sort by the review followed tuple
if len(following) > 0:
results = sorted(results, key=lambda k: k['followed'], reverse=True)
else:
if len(reviews) > 0:
for review in reviews:
temp_dict = review.serialize
temp_dict['followed'] = False
results.append(temp_dict)
#only sort by the review rating tuple
results = sorted(results, key=lambda k: k['rvote'], reverse=True)
return jsonify(reviews = results)
@mod.route('/_query_by_user')
def query_by_user():
key = request.args.get('key', '')
results = Review.query.filter_by(username=key).all()
return jsonify(reviews = [i.serialize for i in results])
@mod.route('/_vote', methods=['POST'])
def calculate_vote():
if request.method == 'POST':
data = request.json
#print data
pk = request.json['key']
num = request.json['index']
upvote = request.json['upvote']
downvote = request.json['downvote']
r = Review.query.get(pk)
if upvote is not None:
upvote+=1
r.upvote = upvote
db.session.commit()
else:
downvote+=1
r.downvote = downvote
db.session.commit()
getcontext().prec = 2
if r.downvote:
r.rvote = Decimal(r.upvote)/Decimal(r.downvote)
else:
r.rvote = r.upvote
db.session.commit()
return jsonify(score=r.rvote, up=r.upvote, down=r.downvote, i=num)
| 2.171875 | 2 |
jinahub/encoders/audio/VGGISHAudioEncoder/tests/unit/test_exec.py | vivek2301/executors | 0 | 12765298 | <reponame>vivek2301/executors<filename>jinahub/encoders/audio/VGGISHAudioEncoder/tests/unit/test_exec.py
from pathlib import Path
import librosa
import pytest
from jina import Document, DocumentArray, Executor
from tensorflow.python.framework import ops
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert str(ex.vgg_model_path).endswith('vggish_model.ckpt')
assert str(ex.pca_model_path).endswith('vggish_pca_params.ckpt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder()
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
@pytest.mark.gpu
def test_embedding_dimension_gpu():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder(device='/GPU:0')
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
| 1.859375 | 2 |
manage.py | ganggas95/E-Wisata | 0 | 12765299 | <reponame>ganggas95/E-Wisata
from flask_migrate import MigrateCommand
from flask_script import Manager
from app import app
from app.commands import InitDbCommand
# Setup Flask-Script with command line commands
manager = Manager(app)
manager.add_command('db', MigrateCommand)
manager.add_command('init_db', InitDbCommand)
if __name__ == "__main__":
# python manage.py # shows available commands
# python manage.py runserver --help # shows available runserver options
manager.run() | 1.875 | 2 |
cotacao.py | cassianodess/CotacaoMoedas | 1 | 12765300 | import requests # Pega informações de API's
from datetime import datetime # Data e hora atual
from openpyxl import Workbook # Cria arquivo Excel
from openpyxl.styles import Alignment, Font # Estilos para células
import pandas as pd # Nesse caso, estou usando p/ transformar em html
# Busca as informações do site
requisicao = requests.get(
"https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
# Converte para json
dados = requisicao.json()
# Pegando os dados: dólar, euro e bitcoin
cotacao_dolar = dados["USDBRL"]["bid"]
cotacao_euro = dados["EURBRL"]["bid"]
cotacao_bitcoin = dados["BTCBRL"]["bid"]
# Criando um arquivo cotacao.xlsx
wb = Workbook()
tabela = wb.active
# Título da tabela
tabela.title = "Cotacao Moedas"
# Nomeando as Células
tabela["A1"] = "Moedas"
tabela["B1"] = "Cotação"
tabela["C1"] = "Última Atualização"
# Nome das moedas
tabela["A2"] = "Dólar"
tabela["A3"] = "Euro"
tabela["A4"] = "Bitcoin"
# Valor das cotações
tabela["B2"] = float(cotacao_dolar)
tabela["B3"] = float(cotacao_euro)
tabela["B4"] = float(cotacao_bitcoin)
# Formatação da data atual
tabela["C2"] = datetime.now().strftime("Às %Hh:%Mm:%Ss - %d/%m/%Y")
tabela["C3"] = datetime.now().strftime("Às %Hh:%Mm:%Ss - %d/%m/%Y")
tabela["C4"] = datetime.now().strftime("Às %Hh:%Mm:%Ss - %d/%m/%Y")
# Alinha todas as células preenchidas
celulas = ["a", "b", "c"]
for letra in celulas:
for i in range(1, 5):
if i == 1:
# Deixa a primeira linha em negrito
tabela[letra+str(i)].font = Font(bold=True)
tabela[letra+str(i)].alignment = Alignment("center", "center")
# Salvando a planilha
wb.save("cotacao.xlsx")
# Transformando em html, caso queira enviar para e-mails.
tabela = pd.read_excel("cotacao.xlsx")
tabela.to_html("cotacao.html")
print(tabela)
| 3.1875 | 3 |
server/external/youtube-dl/youtube_dl/extractor/inc.py | yycc179/urlp | 0 | 12765301 | <filename>server/external/youtube-dl/youtube_dl/extractor/inc.py
from __future__ import unicode_literals
from .common import InfoExtractor
from .kaltura import KalturaIE
class IncIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?inc\.com/(?:[^/]+/)+(?P<id>[^.]+).html'
_TESTS = [{
'url': 'http://www.inc.com/tip-sheet/bill-gates-says-these-5-books-will-make-you-smarter.html',
'md5': '7416739c9c16438c09fa35619d6ba5cb',
'info_dict': {
'id': '1_wqig47aq',
'ext': 'mov',
'title': 'Bill Gates Says These 5 Books Will Make You Smarter',
'description': 'md5:bea7ff6cce100886fc1995acb743237e',
'timestamp': 1474414430,
'upload_date': '20160920',
'uploader_id': '<EMAIL>',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.inc.com/video/david-whitford/founders-forum-tripadvisor-steve-kaufer-most-enjoyable-moment-for-entrepreneur.html',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
partner_id = self._search_regex(
r'var\s+_?bizo_data_partner_id\s*=\s*["\'](\d+)', webpage, 'partner id')
kaltura_id = self._parse_json(self._search_regex(
r'pageInfo\.videos\s*=\s*\[(.+)\];', webpage, 'kaltura id'),
display_id)['vid_kaltura_id']
return self.url_result(
'kaltura:%s:%s' % (partner_id, kaltura_id), KalturaIE.ie_key())
| 2.53125 | 3 |
Students/abinash_flask/day1/app.py | Throttlerz-devs/flask-tutorial | 7 | 12765302 | from flask import Flask, render_template
# create a flask application name app
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True) | 2.5 | 2 |
pyaims/python/soma/aims/io_ext.py | brainvisa/aims-free | 4 | 12765303 |
import numpy as np
import atexit
import sys
aims = sys.modules['soma.aims']
'''
IO formats readers / writers written in python for aims.
Currently:
Numpy format for matrices
'''
class NpyFormat(aims.FileFormat_SparseOrDenseMatrix):
def read(self, filename, obj, context, options=None):
mat = np.load(filename)
# currently we need to perform a full copy of the array because
# we cannot prevent it from deleting (when getting back to the C++
# layer the python wrapper is destroyed and any references it holds
# also disapear.
# Or we should manually increment the ref counter in the numpy PyObject
# but it would never be destroyed then.
#vol = aims.Volume(mat)
#vol._npy = mat
vol = aims.Volume(mat.shape, dtype=mat.dtype)
np.asarray(vol)[:, :, 0, 0] = mat
if isinstance(obj, aims.carto.AllocatorContext):
# return the object variant
options = context
context = obj
obj = aims.SparseOrDenseMatrix()
obj.setMatrix(vol)
return obj
# otherwise fill in obj
obj.setMatrix(vol)
return True
def write(self, filename, obj, options):
mat = np.asarray(obj.asDense())
np.save(filename, mat)
hdr = obj.header()
aims.write(hdr, '%s.minf' % filename)
return True
class NpyFinderFormat(aims.FinderFormat):
def check(self, filename, finder):
if filename.endswith('.npy'):
hdr = {
'file_type': 'NPY',
'object_type': 'SparseMatrix',
'data_type': 'DOUBLE',
}
finder.setHeader(hdr)
finder.setObjectType('SparseMatrix')
finder.setDataType('DOUBLE')
return True
return False
def remove_python_formats():
aims.Finder.unregisterFormat('NUMPY')
aims.FileFormatDictionary_SparseOrDenseMatrix.unregisterFormat('NUMPY')
aims.Finder.registerFormat('NUMPY', NpyFinderFormat(), ['npy'])
aims.FileFormatDictionary_SparseOrDenseMatrix.registerFormat(
'NUMPY', NpyFormat(), ['npy'])
atexit.register(remove_python_formats)
| 2.265625 | 2 |
setup.py | chvogl/snII_cosmo_tools | 0 | 12765304 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='snII_cosmo_tools',
url='https://github.com/chvogl/snII_cosmo_tools.git',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
package_data={'snII_cosmo_tools': ['templates/*.html']}
)
| 1.289063 | 1 |
SpiceMix/estimateParameters.py | ma-compbio/SpiceMix | 18 | 12765305 | import sys, time, itertools, resource, logging
from multiprocessing import Pool, Process
from util import psutil_process, print_datetime, array2string, PyTorchDType as dtype
import torch
import numpy as np
import gurobipy as grb
from scipy.special import loggamma
from sampleForIntegral import integrateOfExponentialOverSimplexInduction2
def estimateParametersY(self, max_iter=10):
logging.info(f'{print_datetime()}Estimating M and sigma_yx_inv')
As = []
Bs = []
sizes = np.fromiter(map(np.size, self.YTs), dtype=float)
for YT, E, XT in zip(self.YTs, self.Es, self.XTs):
if self.dropout_mode == 'raw':
As.append(YT.T @ XT)
Bs.append(XT.T @ XT)
else:
raise NotImplementedError
m = grb.Model('M')
m.setParam('OutputFlag', False)
m.Params.Threads = 1
if self.M_constraint == 'sum2one':
vM = m.addVars(self.GG, self.K, lb=0.)
m.addConstrs((vM.sum('*', i) == 1 for i in range(self.K)))
else:
raise NotImplementedError
for iiter in range(max_iter):
obj = 0
for beta, YT, sigma_yx_inv, A, B, G, XT in zip(self.betas, self.YTs, self.sigma_yx_invs, As, Bs, self.Gs, self.XTs):
# constant terms
if self.dropout_mode == 'raw':
t = YT.ravel()
else:
raise NotImplementedError
obj += beta * sigma_yx_inv**2 * np.dot(t, t)
# linear terms
t = -2 * beta * sigma_yx_inv**2 * A
obj += grb.quicksum([t[i, j] * vM[i, j] for i in range(G) for j in range(self.K)])
# quadratic terms
if self.dropout_mode == 'raw':
t = beta * sigma_yx_inv**2 * B
t[np.diag_indices(self.K)] += 1e-5
obj += grb.quicksum([t[i, i] * vM[k, i] * vM[k, i] for k in range(G) for i in range(self.K)])
t *= 2
obj += grb.quicksum([t[i, j] * vM[k, i] * vM[k, j] for k in range(G) for i in range(self.K) for j in range(i+1, self.K)])
else:
raise NotImplementedError
del t, beta, YT, A, B, G
kk = 0
if kk != 0:
obj += grb.quicksum([kk/2 * vM[k, i] * vM[k, i] for k in range(self.GG) for i in range(self.K)])
m.setObjective(obj, grb.GRB.MINIMIZE)
m.optimize()
self.M = np.array([[vM[i, j].x for j in range(self.K)] for i in range(self.GG)])
if self.M_constraint in ['sum2one', 'none']:
pass
elif self.M_constraint == 'L1':
self.M /= np.abs(self.M).sum(0, keepdims=True)
elif self.M_constraint == 'L2':
self.M /= np.sqrt((self.M ** 2).sum(0, keepdims=True))
else:
raise NotImplementedError
last_sigma_yx_invs = np.copy(self.sigma_yx_invs)
ds = np.array([
np.dot(YT.ravel(), YT.ravel()) - 2*np.dot(A.ravel(), self.M[:G].ravel()) + np.dot(B.ravel(), (self.M[:G].T @ self.M[:G]).ravel())
for YT, A, B, G in zip(self.YTs, As, Bs, self.Gs)
])
if self.sigma_yx_inv_mode == 'separate':
t = ds / sizes
self.sigma_yx_invs = 1. / np.sqrt(t)
elif self.sigma_yx_inv_mode == 'average':
t = np.dot(self.betas, ds) / np.dot(self.betas, sizes)
self.sigma_yx_invs = np.full(self.num_repli, 1 / (np.sqrt(t) + 1e-20))
elif self.sigma_yx_inv_mode.startswith('average '):
idx = np.array(list(map(int, self.sigma_yx_inv_mode.split(' ')[1:])))
t = np.dot(self.betas[idx], ds[idx]) / np.dot(self.betas[idx], sizes[idx])
self.sigma_yx_invs = np.full(self.num_repli, 1 / (np.sqrt(t) + 1e-20))
else:
raise NotImplementedError
d = self.sigma_yx_invs - last_sigma_yx_invs
logging.info(f"{print_datetime()}At iter {iiter}, σ_yxInv: {array2string(d)} -> {array2string(self.sigma_yx_invs)}")
if (np.abs(d) / self.sigma_yx_invs).max() < 1e-5 or self.num_repli <= 1 or self.sigma_yx_inv_mode.startswith('average'):
break
# emission
Q_Y = -np.dot(self.betas, sizes) / 2
# partition function - Pr [ Y | X, Theta ]
Q_Y -= np.dot(self.betas, sizes) * np.log(2*np.pi) / 2
Q_Y += (sizes * self.betas * np.log(self.sigma_yx_invs)).sum()
return Q_Y
def estimateParametersX(self, iiter):
logging.info(f'{print_datetime()}Estimating Sigma_x_inv and prior_xs')
device = self.PyTorch_device
Bs = []
talphas = []
talpha_es = []
tC = torch.zeros([self.K, self.K], dtype=dtype, device=device)
tnus = []
for YT, E, XT, beta in zip(self.YTs, self.Es, self.XTs, self.betas):
tXT = torch.tensor(XT, dtype=dtype, device=device)
N, G = YT.shape
if self.dropout_mode == 'raw':
Bs.append(XT.T @ XT)
else:
raise NotImplementedError
talphas.append(tXT.sum(0))
talpha_es.append(torch.tensor(list(map(len, E)), dtype=dtype, device=device) @ tXT)
tXT.div_(tXT.sum(1, keepdim=True).add_(1e-30))
tnu = torch.empty([N, self.K], dtype=dtype, device=device)
for tnui, ei in zip(tnu, E):
tnui.copy_(tXT[ei].sum(0))
tnus.append(tnu)
tC.add_(alpha=beta, other=tXT.t() @ tnu)
del tXT
Q_X = 0
if all(prior_x[0] == 'Gaussian' for prior_x in self.prior_xs) and self.pairwise_potential_mode == 'linear':
raise NotImplementedError
elif self.pairwise_potential_mode in ['linear', 'linear w/ shift']:
raise NotImplementedError
elif all(prior_x[0] in ['Exponential shared', 'Exponential shared fixed'] for prior_x in self.prior_xs) and self.pairwise_potential_mode == 'normalized':
prior_xs_old = self.prior_xs
self.prior_xs = []
for N, prior_x, talpha in zip(self.Ns, prior_xs_old, talphas):
if prior_x[0] == 'Exponential shared':
lambda_x, = prior_x[1:]
lambda_x = talpha.mean().div_(N).pow(-1).cpu().data.numpy()
Q_X -= lambda_x * talpha.sum().cpu().data.numpy()
Q_X += N*self.K*np.log(lambda_x) - N*loggamma(self.K)
prior_x = prior_x[:1] + (np.full(self.K, lambda_x), )
self.prior_xs.append(prior_x)
elif prior_x[0] == 'Exponential shared fixed':
lambda_x, = prior_x[1:]
Q_X -= lambda_x.mean() * talpha.sum().cpu().data.numpy()
self.prior_xs.append(prior_x)
else:
raise NotImplementedError
del prior_xs_old
if not all(self.Es_empty):
# valid_diter = 1
# valid_diter = 7
# valid_diter = 31
# valid_diter = 97
valid_diter = 331
# valid_diter = 997
# valid_diter = 3343
# valid_diter = 7177
# valid_diter = 9973
max_iter = 1e4
max_iter = int(max_iter)
batch_sizes = [512, ] * self.num_repli
# requires_grad = True
requires_grad = False
var_list = []
optimizers = []
schedulars = []
tSigma_x_inv = torch.tensor(self.Sigma_x_inv, dtype=dtype, device=device, requires_grad=requires_grad)
var_list += [
tSigma_x_inv,
]
schedular = None
optimizer = torch.optim.Adam([tSigma_x_inv], lr=1e-2)
schedular = torch.optim.lr_scheduler.StepLR(optimizer, valid_diter, gamma=0.98)
optimizers.append(optimizer)
if schedular: schedulars.append(schedular)
del optimizer, schedular
tprior_xs = []
for prior_x in self.prior_xs:
if prior_x[0] in ['Exponential shared', 'Exponential shared fixed']:
lambda_x, = prior_x[1:]
tlambda_x = torch.tensor(lambda_x, dtype=dtype, device=device, requires_grad=requires_grad)
tprior_xs.append((prior_x[0], tlambda_x,))
var_list.append(tlambda_x)
del lambda_x
else:
raise NotImplementedError
for t in var_list: t.grad = torch.zeros_like(t)
tdiagBs = [torch.tensor(np.diag(B).copy(), dtype=dtype, device=device) for B in Bs]
tNus = [tnu.sum(0) for tnu in tnus]
tNu2s = [tnu.t() @ tnu for tnu in tnus]
talpha_e_all = torch.zeros_like(talpha_es[0])
for beta, talpha_e in zip(self.betas, talpha_es): talpha_e_all.add_(alpha=beta, other=talpha_e)
NEs = [sum(map(len, E)) for E in self.Es]
tnEs = [torch.tensor(list(map(len, E)), dtype=dtype, device=device) for E in self.Es]
tZTs = [torch.tensor(XT, dtype=dtype, device=device) for XT in self.XTs]
for tZT in tZTs: tZT.div_(tZT.sum(1, keepdim=True))
# Sigma_x_inv_ub = 1.
# Sigma_x_inv_lb = -1.
Sigma_x_inv_lb = None
Sigma_x_inv_ub = None
Sigma_x_inv_constraint = None # complete matrix
# Sigma_x_inv_constraint = 'diagonal' # diagonal matrix
# Sigma_x_inv_constraint = 'diagonal same' # diagonal matrix, diagonal values are all the same
row_idx, col_idx = np.triu_indices(self.K, 0)
assumption_str = 'mean-field'
# assumption_str = None
# assumption_str = 'independent'
random_flag = assumption_str in [
'independent',
'mean-field',
]
n_samples = 0
regenerate_diter = int(1e10)
tZes = [None] * self.num_repli
nsample4integral = 64
if assumption_str == None:
raise NotImplementedError
elif assumption_str == 'mean-field':
pass
elif assumption_str == 'independent':
raise NotImplementedError
else:
raise NotImplementedError
if assumption_str in [None, 'independent']:
tC.div_(2)
loggamma_K = loggamma(self.K)
__t__, func, last_func = 0, None, torch.empty([], dtype=dtype, device=device).fill_(np.nan)
best_func, best_iter = torch.empty([], dtype=dtype, device=device).fill_(np.nan), -1
tSigma_x_inv_best = None
for __t__ in range(max_iter + 1):
if not requires_grad:
for t in var_list: t.grad.zero_()
else:
for optimizer in optimizers:
optimizer.zero_grad()
assert (tSigma_x_inv - tSigma_x_inv.t()).abs().max() < 1e-15
if Sigma_x_inv_lb is not None:
tSigma_x_inv.clamp_(min=Sigma_x_inv_lb)
if Sigma_x_inv_ub is not None:
tSigma_x_inv.clamp_(max=Sigma_x_inv_ub)
if Sigma_x_inv_constraint in ['diagonal', 'diagonal same']:
tSigma_x_inv.triu_().tril_()
if Sigma_x_inv_constraint in ['diagonal same']:
tSigma_x_inv[(range(self.K), range(self.K))] = tSigma_x_inv[(range(self.K), range(self.K))].mean()
func = torch.zeros([], dtype=dtype, device=device)
# if requires_grad:
func_grad = torch.zeros([], dtype=dtype, device=device, requires_grad=True)
# pairwise potential
tSigma_x_inv.grad.add_(tC)
if requires_grad:
func_grad = func_grad + tC.view(-1) @ tSigma_x_inv.view(-1)
else:
func.add_(tC.view(-1) @ tSigma_x_inv.view(-1))
for N, E_empty, NE, tnE, E, beta, tZT, tZe, talpha, tnu, tNu, tNu2, tdiagB, tprior_x in zip(
self.Ns,
self.Es_empty, NEs, tnEs, self.Es, self.betas, tZTs, tZes,
talphas, tnus, tNus, tNu2s, tdiagBs,
tprior_xs,
):
if E_empty:
continue
if assumption_str == 'mean-field':
if tprior_x[0] in ['Exponential shared', 'Exponential shared fixed']:
if __t__ % valid_diter == 0:
idx = slice(None)
else:
idx = np.random.choice(N, min(nsample4integral, N), replace=False)
tnu = tnu[idx].contiguous()
c = NE / tnE[idx].sum()
# Z_z
teta = tnu @ tSigma_x_inv
teta.grad = torch.zeros_like(teta)
# torch.manual_seed(iiter)
if iiter > 1 or __t__ > 100:
# tlogZ = integrateOfExponentialOverSimplexSampling(teta, requires_grad=requires_grad, seed=iiter*max_iter+__t__)
tlogZ = integrateOfExponentialOverSimplexInduction2(teta, grad=c, requires_grad=requires_grad, device=device)
else:
# tlogZ = integrateOfExponentialOverSimplexSampling(teta, requires_grad=requires_grad, seed=iiter*max_iter+__t__)
tlogZ = integrateOfExponentialOverSimplexInduction2(teta, grad=c, requires_grad=requires_grad, device=device)
if requires_grad:
func_grad = func_grad.add(beta*c, tlogZ.sum())
else:
func.add_(alpha=beta*c, other=tlogZ.sum())
tSigma_x_inv.grad.addmm_(alpha=beta, mat1=tnu.t(), mat2=teta.grad)
else:
raise NotImplementedError
elif assumption_str == None:
raise NotImplementedError
elif assumption_str == 'independent':
raise NotImplementedError
else:
raise NotImplementedError
if requires_grad:
func_grad.backward()
func = func + func_grad
# prior on Σ_x^inv
# num_burnin_iter = 200
# if iiter <= num_burnin_iter:
# kk = 1e-1 * np.dot(betas, list(map(len, Es))) * 1e-1**((num_burnin_iter-iiter+1)/num_burnin_iter)
# else:
# kk = 1e-1 * np.dot(betas, list(map(len, Es)))
kk = self.lambda_SigmaXInv * np.dot(self.betas, NEs)
tSigma_x_inv.grad.add_(kk, tSigma_x_inv)
func.add_(kk / 2, tSigma_x_inv.pow(2).sum())
# normalize gradient by the weighted sizes of data sets
tSigma_x_inv.grad.div_(np.dot(self.betas, NEs))
func.div_(np.dot(self.betas, list(map(len, self.YTs))))
tSigma_x_inv.grad.add_(tSigma_x_inv.grad.clone().t()).div_(2)
if Sigma_x_inv_lb is not None:
tSigma_x_inv.grad[(tSigma_x_inv <= Sigma_x_inv_lb) * (tSigma_x_inv.grad > 0)] = 0
if Sigma_x_inv_ub is not None:
tSigma_x_inv.grad[(tSigma_x_inv >= Sigma_x_inv_ub) * (tSigma_x_inv.grad < 0)] = 0
if Sigma_x_inv_constraint in ['diagonal', 'diagonal same']:
tSigma_x_inv.grad.triu_().tril_()
if Sigma_x_inv_constraint in ['diagonal same']:
tSigma_x_inv.grad[(range(self.K), range(self.K))] = tSigma_x_inv.grad[(range(self.K), range(self.K))].mean()
# setting flags
best_flag = False
if not random_flag or __t__ % valid_diter == 0:
best_flag = not best_func <= func
if best_flag:
best_func, best_iter = func, __t__
tSigma_x_inv_best = tSigma_x_inv.clone().detach()
stop_flag = True
# stop_flag = False
stop_tSigma_x_inv_grad_pseudo = 1e-1
stop_flag &= (tSigma_x_inv.grad.abs() / (tSigma_x_inv.abs() + stop_tSigma_x_inv_grad_pseudo)).abs().max().item() < 1e-2
for tprior_x in tprior_xs:
if tprior_x[0] in ['Exponential shared', ]:
tlambda_x, = tprior_x[1:]
stop_flag &= tlambda_x.grad.abs().max().item() < 1e-4
del tlambda_x
elif tprior_x[0] in ['Exponential shared fixed', ]:
pass
else:
raise NotImplementedError
if random_flag:
stop_flag &= not bool(func <= last_func - 1e-3*valid_diter)
else:
stop_flag &= not bool(func <= last_func - 1e-3)
stop_flag |= random_flag and not __t__ < best_iter + 2*valid_diter
# stop_flag |= best_func == func and __t__ > best_iter + 20
if random_flag and __t__ % valid_diter != 0:
stop_flag = False
if __t__ >= max_iter:
stop_flag = True
warning_flag = bool(func > last_func + 1e-10)
warning_flag &= not random_flag or __t__ % valid_diter == 0
# warning_flag = True
if __t__ % valid_diter == 0 or stop_flag or warning_flag or (regenerate_diter != 1 and (__t__ % regenerate_diter == 0 or (__t__+1) % regenerate_diter == 0)):
print(
f'At iter {__t__},\t'
f'func = {(func - last_func).item():.2e} -> {func.item():.2e}\t'
f'Σ_x^inv: {tSigma_x_inv.max().item():.1e} - {tSigma_x_inv.min().item():.1e} = {tSigma_x_inv.max() - tSigma_x_inv.min():.1e} '
f'grad = {tSigma_x_inv.grad.min().item():.2e} {tSigma_x_inv.grad.max().item():.2e}\t'
f'var/grad = {(tSigma_x_inv.grad.abs()/(tSigma_x_inv.abs() + stop_tSigma_x_inv_grad_pseudo)).abs().max().item():.2e}'
# f'δ_x: {tdelta_x.max().item():.1e} - {tdelta_x.min().item():.1e} = {tdelta_x.max() - tdelta_x.min():.1e} '
# f'grad = {tdelta_x.grad.min().item():.2e} {tdelta_x.grad.max().item():.2e}'
, end=''
)
if warning_flag: print('\tWarning', end='')
if best_flag:
print('\tbest', end='')
print()
sys.stdout.flush()
# stop_flag = True
if not stop_flag:
for optimizer in optimizers: optimizer.step()
for schedular in schedulars: schedular.step()
if stop_flag: break
if not random_flag or __t__ % valid_diter == 0:
last_func = func
tSigma_x_inv = tSigma_x_inv_best
func = best_func
self.Sigma_x_inv = tSigma_x_inv.cpu().data.numpy()
Q_X -= func.mul_(np.dot(self.betas, list(map(len, self.YTs)))).item()
elif all(prior_x[0] == 'Exponential' for prior_x in self.prior_xs) and self.pairwise_potential_mode == 'normalized':
raise NotImplementedError
else:
raise NotImplementedError
return Q_X
| 1.90625 | 2 |
evaluation/easy_evaluation.py | eldrin/MTLMusicRepresentation | 5 | 12765306 | # import numpy as np
from sklearn.pipeline import Pipeline
# from sklearn.svm import SVC, SVR
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report
# import fire
def easy_eval_clf(X, y, verbose=True):
""""""
# init model (SVC)
# clf = SVC(kernel='linear')
clf = SGDClassifier(n_jobs=-1)
if X.shape[1] < X.shape[0] * 2:
preproc = StandardScaler()
else: # empirical dimension reduction for extreme cases
preproc = PCA(n_components=X.shape[0] * 2, whiten=True)
pl = Pipeline([('preproc', preproc), ('clf', clf)])
# fire cross validation
y_ = cross_val_predict(pl, X, y, cv=10)
# simple evaluation
acc = accuracy_score(y, y_)
cr = classification_report(y, y_)
if verbose:
print(cr)
print
print('Accuracy : {:.2%}'.format(acc))
return acc
class EasyEval:
""""""
def __init__(self, n_trial=1):
""""""
self.n_trial = n_trial
def eval(self):
for n in xrange(self.n_trial):
pass # do evaluation here
if __name__ == '__main__':
pass
| 2.75 | 3 |
illusion_testing/training/KWS_LSTM/chunk_model.py | robust-systems-group/illusion_system | 4 | 12765307 | <reponame>robust-systems-group/illusion_system
#
# Copyright (C) 2020 by The Board of Trustees of Stanford University
# This program is free software: you can redistribute it and/or modify it under
# the terms of the Modified BSD-3 License as published by the Open Source
# Initiative.
# If you use this program in your research, we request that you reference the
# Illusion paper, and that you send us a citation of your work.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the BSD-3 License for more details.
# You should have received a copy of the Modified BSD-3 License along with this
# program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
#
import argparse
import json
import math
import os
import sys
import time
import numpy as np
def size_to_string(n):
s = ''
for i in n.shape:
s+= '[' + str(i) + ']'
return s
def ndarray_to_string(x):
s = np.array2string(x, separator=',',threshold=2**32)
s = s.replace('[', '{');
s = s.replace(']', '}');
return s;
weights = np.load('model.npz')
weights_i = np.split(weights['lstm1.cell.weight_i'].flatten(),4*8)
weights_h = np.split(weights['lstm1.cell.weight_h'].flatten(),4*8)
bias = np.split(weights['lstm1.cell.bias'].flatten(),4*8)
fc_weights = np.hsplit(weights['hidden2keyword.weight'],8)
fc_bias = weights['hidden2keyword.bias']
f = open('model_chunked_LSTM.c', 'w')
names = [[ 'lstm_i_H','lstm_h_H','lstm_B', 'fc_H', 'fc_B'],['const int8_t ', 'const int16_t layer', 'const int32_t layer']]
numpy_files = {}
for i in range(8):
wi = np.concatenate(weights_i[i::8])
wh = np.concatenate(weights_h[i::8])
b = np.concatenate(bias[i::8])
fc_w = fc_weights[i].flatten()
f.write(names[1][0]+names[0][0] +"_"+str(i) + size_to_string(wi) + ' = \n')
f.write(ndarray_to_string(wi))
f.write(';\n')
f.write(names[1][0]+names[0][1] +"_"+str(i) + size_to_string(wh) + ' = \n')
f.write(ndarray_to_string(wh))
f.write(';\n')
f.write(names[1][0]+names[0][2] +"_"+str(i) + size_to_string(b) + ' = \n')
f.write(ndarray_to_string(b))
f.write(';\n')
f.write(names[1][0]+names[0][3] +"_"+str(i) + size_to_string(fc_w) + ' = \n')
f.write(ndarray_to_string(fc_w))
f.write(';\n')
f.write(names[1][0]+names[0][3] + size_to_string(fc_bias) + ' = \n')
f.write(ndarray_to_string(fc_bias))
f.write(';\n')
f.close()
| 1.929688 | 2 |
bot/helper/mirror_utils/download_utils/direct_link_generator.py | styloxyash1/mybot | 0 | 12765308 |
import json
import re
import urllib.parse
from os import popen
from random import choice
import requests
from bs4 import BeautifulSoup
from bot.helper.ext_utils.exceptions import DirectDownloadLinkException
def direct_link_generator(link: str):
""" direct links generator """
if not link:
raise DirectDownloadLinkException("`No links found!`")
elif 'zippyshare.com' in link:
return zippy_share(link)
elif 'yadi.sk' in link:
return yandex_disk(link)
elif 'cloud.mail.ru' in link:
return cm_ru(link)
elif 'mediafire.com' in link:
return mediafire(link)
elif 'osdn.net' in link:
return osdn(link)
elif 'github.com' in link:
return github(link)
else:
raise DirectDownloadLinkException(f'No Direct link function found for {link}')
def zippy_share(url: str) -> str:
""" ZippyShare direct links generator
Based on https://github.com/LameLemon/ziggy"""
dl_url = ''
try:
link = re.findall(r'\bhttps?://.*zippyshare\.com\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No ZippyShare links found`\n")
session = requests.Session()
base_url = re.search('http.+.com', link).group()
response = session.get(link)
page_soup = BeautifulSoup(response.content, "lxml")
scripts = page_soup.find_all("script", {"type": "text/javascript"})
for script in scripts:
if "getElementById('dlbutton')" in script.text:
url_raw = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);',
script.text).group('url')
math = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);',
script.text).group('math')
dl_url = url_raw.replace(math, '"' + str(eval(math)) + '"')
break
dl_url = base_url + eval(dl_url)
name = urllib.parse.unquote(dl_url.split('/')[-1])
return dl_url
def yandex_disk(url: str) -> str:
""" Yandex.Disk direct links generator
Based on https://github.com/wldhx/yadisk-direct"""
try:
link = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0]
except IndexError:
reply = "`No Yandex.Disk links found`\n"
return reply
api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
try:
dl_url = requests.get(api.format(link)).json()['href']
return dl_url
except KeyError:
raise DirectDownloadLinkException("`Error: File not found / Download limit reached`\n")
def cm_ru(url: str) -> str:
""" cloud.mail.ru direct links generator
Using https://github.com/JrMasterModelBuilder/cmrudl.py"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*cloud\.mail\.ru\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No cloud.mail.ru links found`\n")
command = f'vendor/cmrudl.py/cmrudl -s {link}'
result = popen(command).read()
result = result.splitlines()[-1]
try:
data = json.loads(result)
except json.decoder.JSONDecodeError:
raise DirectDownloadLinkException("`Error: Can't extract the link`\n")
dl_url = data['download']
return dl_url
def mediafire(url: str) -> str:
""" MediaFire direct links generator """
try:
link = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No MediaFire links found`\n")
page = BeautifulSoup(requests.get(link).content, 'lxml')
info = page.find('a', {'aria-label': 'Download file'})
dl_url = info.get('href')
return dl_url
def osdn(url: str) -> str:
""" OSDN direct links generator """
osdn_link = 'https://osdn.net'
try:
link = re.findall(r'\bhttps?://.*osdn\.net\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No OSDN links found`\n")
page = BeautifulSoup(
requests.get(link, allow_redirects=True).content, 'lxml')
info = page.find('a', {'class': 'mirror_link'})
link = urllib.parse.unquote(osdn_link + info['href'])
mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr')
urls = []
for data in mirrors[1:]:
mirror = data.find('input')['value']
urls.append(re.sub(r'm=(.*)&f', f'm={mirror}&f', link))
return urls[0]
def github(url: str) -> str:
""" GitHub direct links generator """
try:
re.findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No GitHub Releases links found`\n")
download = requests.get(url, stream=True, allow_redirects=False)
try:
dl_url = download.headers["location"]
return dl_url
except KeyError:
raise DirectDownloadLinkException("`Error: Can't extract the link`\n")
def useragent():
"""
useragent random setter
"""
useragents = BeautifulSoup(
requests.get(
'https://developers.whatismybrowser.com/'
'useragents/explore/operating_system_name/android/').content,
'lxml').findAll('td', {'class': 'useragent'})
user_agent = choice(useragents)
return user_agent.text
| 2.609375 | 3 |
users/urls.py | bhatiaisb5300/DevelopmentRobotix | 0 | 12765309 | from rest_framework.routers import DefaultRouter
from django.urls import path,include,re_path
from . import views
from .views import UserProfileViewSet,accountView,loginView,registerView
router = DefaultRouter()
router.register('profiles',UserProfileViewSet,base_name='user-profile-viewset')
urlpatterns = [
path('',include(router.urls)),
path('user/',accountView,name='account_email_verification_sent'),
path('login/',loginView,name='account_login'),
path('register/',registerView,name='account_signup'),
#re_path(r'email_exists/(?P<data>\w+|[\w.%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4})$',views.email_exists,name="email_exists"),
path('email_exists/<str:email>/',views.email_exists, name="email_exists"),
path('forgot_password/',views.forgot_password, name="forgot_password"),
path('change/',views.change, name="change"),
]
| 2.0625 | 2 |
mutester/__main__.py | XPerianer/Mutester | 1 | 12765310 | import argparse
import logging
import math
import time
from threading import Thread
from typing import List
import pandas as pd
from mutester.data_analysis import DataAnalysis
from mutester.data_crawler import DataCrawler
def analysis_thread(repository_path, environment_path, mutant_ids: List[int], results: List[DataAnalysis], timeout):
data_analysis = DataAnalysis(repository_path, environment_path, timeout)
data_analysis.collect_data(mutant_ids)
results.append(data_analysis)
# data_analysis.store_data_to_disk(args.filename, args.merge)
def store_data_to_disk(filename: str, merge: str, datas: List[DataAnalysis]):
mutants_and_tests = pd.DataFrame()
if merge != '':
mutants_and_tests = pd.read_pickle(merge)
print('Read in {} executions to merge from {}'.format(len(mutants_and_tests), merge))
for data_analysis in datas:
mutants_and_tests = mutants_and_tests.append(
data_analysis.mutants.set_index('mutant_id').join(data_analysis.executions.set_index('mutant_id'),
lsuffix='_mutant', rsuffix='_execution').reset_index(),
ignore_index=True,
)
timestring = time.strftime("%Y%m%d-%H%M%S")
pickle_name = timestring + '_' + filename + '.pkl'
mutants_and_tests.to_pickle(pickle_name)
print("Wrote: {}\n".format(pickle_name))
total_tests = len(mutants_and_tests)
print(mutants_and_tests)
total_failed_tests = len(mutants_and_tests[mutants_and_tests["outcome"] == False])
print('Total number of tests: {}\n Total failed number of tests: {}'.format(total_tests, total_failed_tests))
return pickle_name
def main():
argument_parser = argparse.ArgumentParser(
description='Run mutation testing with record of failed test to pandas dataframe'
)
argument_parser.add_argument('repository_path',
help='Path to the repository to be tested')
argument_parser.add_argument('environment_path',
help='Path to the python environment to run the tests. Make sure the module is '
'installed in -e mode, and that pytest, pytest-json, mutmut are available')
argument_parser.add_argument('interval_start',
help='Test to start with')
argument_parser.add_argument('interval_end',
help='Test to end with (exlusive)')
argument_parser.add_argument('-m', '--merge',
help='Add a path to the pickle file, the end result should be merged with.'
'Helpful if the process was aborted, and you want to run some tests again.',
default='')
argument_parser.add_argument('--filename', action='store', default='dataframe')
argument_parser.add_argument('-v', '--verbose', action='store_true')
argument_parser.add_argument('-j', '--thread_count', action='store', default=1)
argument_parser.add_argument('--timed_testruns', action='store', default=2,
help='Number of dry testruns to find out after which time the testsruns should be '
'aborted')
args = argument_parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
timed_run_count = int(args.timed_testruns)
timed_crawler = DataCrawler(args.repository_path, args.environment_path)
# TODO: Baseline runs only if pytest-json and pytest-timeout have been installed, which happens later
start_time = time.time()
for _ in range(timed_run_count):
timed_crawler.execute_test(-1)
test_baseline_time = math.ceil((time.time() - start_time) / timed_run_count)
logging.info('Measured %i seconds of runtime\n Test with higher than 10 times the baseline will be killed',
test_baseline_time)
thread_count = int(args.thread_count)
threads = []
interval_start = int(args.interval_start)
interval_end = int(args.interval_end)
interval_length = int((interval_end - interval_start) / thread_count)
results = []
for thread_number in range(thread_count - 1):
thread_interval_start = interval_start + thread_number * interval_length
mutant_ids = list(range(thread_interval_start, thread_interval_start + interval_length))
threads.append(Thread(target=analysis_thread,
args=(args.repository_path, args.environment_path, mutant_ids, results,
test_baseline_time * 10)))
threads.append(Thread(target=analysis_thread,
args=(args.repository_path, args.environment_path,
list(range(interval_start + (thread_count - 1) * interval_length, interval_end)),
results, test_baseline_time * 10)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
store_data_to_disk(args.filename, args.merge, results)
return 0
if __name__ == "__main__":
exit(main())
| 2.578125 | 3 |
NLP/Text2SQL-BASELINE/text2sql/dataproc/sql_preproc_v2.py | zhangyimi/Research | 1,319 | 12765311 | <gh_stars>1000+
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQL pre-processor for model decoder
Filname: sql_preproc.py
Authors: ZhangAo(@<EMAIL>)
Date: 2021-01-25 18:00:55
"""
import sys
import os
import traceback
import logging
import json
import collections
import collections.abc
import copy
import itertools
import shutil
from pathlib import Path
import attr
import numpy as np
import paddle
import paddle.nn.functional as F
from text2sql.dataproc import vocab
from text2sql.utils import serialization
def get_field_presence_info(ast_wrapper, node, field_infos):
"""get_field_presence_info"""
present = []
for field_info in field_infos:
field_value = node.get(field_info.name)
is_present = field_value is not None and field_value != []
maybe_missing = field_info.opt or field_info.seq
is_builtin_type = field_info.type in ast_wrapper.primitive_types
if maybe_missing and is_builtin_type:
# TODO: make it possible to deal with "singleton?"
present.append(is_present and type(field_value).__name__)
elif maybe_missing and not is_builtin_type:
present.append(is_present)
elif not maybe_missing and is_builtin_type:
present.append(type(field_value).__name__)
elif not maybe_missing and not is_builtin_type:
assert is_present
present.append(True)
return tuple(present)
@attr.s
class DecoderSQLItem:
"""DecoderSQLItem"""
tree = attr.ib()
orig_code = attr.ib()
sql_query = attr.ib(default="")
class SQLPreproc(object):
"""SQLPreproc"""
def __init__(self, base_path,
grammar_class,
predict_value=True,
min_freq=3,
max_count=5000,
use_seq_elem_rules=False,
is_cached=False):
"""init
Args:
base_path (TYPE): if is_cached is False, base_path is the asdl grammar file.
if is_cached is True, base_path is path to cached directory.
grammar_class (TYPE): grammar class, like grammars.dusql.DuSQLLanguage
predict_value (TYPE): Default is True
min_freq (TYPE): Default is 3
max_count (TYPE): Default is 5000
use_seq_elem_rules (TYPE): Default is False
is_cached (TYPE): Default is False
Raises: NULL
"""
self.base_path = base_path
self.predict_value = predict_value
self.vocab = None
self.all_rules = None
self.rules_mask = None
# key: train/dev/val/test/...
# value: examples
self.items = collections.defaultdict(list)
self.sum_type_constructors = collections.defaultdict(set)
self.field_presence_infos = collections.defaultdict(set)
self.seq_lengths = collections.defaultdict(set)
self.primitive_types = set()
if not is_cached:
self.grammar = grammar_class(self.base_path)
self.ast_wrapper = self.grammar.ast_wrapper
self.vocab_builder = vocab.VocabBuilder(min_freq, max_count)
else:
self.grammar = None
self.ast_wrapper = None
self.load(grammar_class)
self.use_seq_elem_rules = use_seq_elem_rules
if self.predict_value:
self.format_sql_value = self.transfer_sql_value
else:
self.format_sql_value = self.fix_sql_value
def _get_val_index(self, val, value_dict):
def _float(val):
try:
return True, str(int(float(val)))
except Exception as e:
return False, ''
val = str(val)
if val in value_dict:
return value_dict[val]
is_float, new_val = _float(val)
if is_float and new_val in value_dict:
return value_dict[new_val]
new_val = val.replace('.', '')
candi = []
for v, idx in value_dict.items():
v = v.replace('.', '')
if v.startswith(new_val) or new_val.startswith(v):
candi.append((v, idx))
if len(candi) == 1:
return candi[0][1]
elif len(candi) > 1:
candi.sort(key=lambda x: len(x[0]), reverse=True)
return candi[0][1]
return -1
def transfer_sql_value(self, sql_json, value_dict):
"""transfer value str to int index
Args:
sql_json (TYPE): [in/out]
value_dict (TYPE): NULL
Returns: TODO
Raises: NULL
"""
if 'cond_conn_op' in sql_json: # NL2SQL 的json 格式
self.transfer_simple_sql_value(sql_json, value_dict)
return
def _trans_cond(cond):
"""transfer condition value"""
val1 = cond[3]
val2 = cond[4]
if type(val1) is dict:
self.transfer_sql_value(val1, value_dict)
if val2 is not None:
val2 = self._get_val_index(val2, value_dict)
cond[4] = val2 if val2 >= 0 else 0
return
val1 = self._get_val_index(val1, value_dict)
if val2 is not None:
val2 = self._get_val_index(val2, value_dict)
if val1 == -1:
val1 = 0
logging.debug('lost value: %s. candidates: %s', cond[3], ', '.join(value_dict.keys()))
logging.debug('sql is: %s', json.dumps(sql_json, ensure_ascii=False))
if val2 == -1:
val2 = 0
cond[3] = val1
cond[4] = val2
for table_unit in sql_json['from']['table_units']:
if type(table_unit[1]) is dict:
self.transfer_sql_value(table_unit[1], value_dict)
for cond in sql_json['where'][::2]:
_trans_cond(cond)
for cond in sql_json['having'][::2]:
_trans_cond(cond)
if sql_json['limit'] is not None:
limit = str(sql_json['limit'])
else:
limit = '0'
if limit in value_dict:
sql_json['limit'] = value_dict[limit]
else:
logging.debug('value of limit is lost: %s. candidates: %s', limit, ', '.join(value_dict.keys()))
sql_json['limit'] = value_dict['0']
if sql_json['intersect'] is not None:
self.transfer_sql_value(sql_json['intersect'], value_dict)
if sql_json['union'] is not None:
self.transfer_sql_value(sql_json['union'], value_dict)
if sql_json['except'] is not None:
self.transfer_sql_value(sql_json['except'], value_dict)
def transfer_simple_sql_value(self, sql_json, value_dict):
"""
Args:
sql_json (TYPE): NULL
value_dict (TYPE): NULL
Returns: TODO
Raises: NULL
"""
for cond in sql_json['conds']:
value = cond[2]
new_val = self._get_val_index(value, value_dict)
if new_val == -1:
new_val = 0
cond[2] = new_val
def fix_sql_value(self, sql_json, value_dict):
"""fix sql value to 'value' token
Args:
sql_json (TYPE): NULL
value_dict (TYPE):
Returns: TODO
Raises: NULL
"""
def _fix_cond_value(cond):
"""transfer condition value"""
val1 = cond[3]
val2 = cond[4]
if type(val1) is dict:
self.fix_sql_value(val1, value_dict)
if val2 is not None:
val2 = self._get_val_index('value', value_dict)
cond[4] = val2 if val2 >= 0 else 0
return
val1 = self._get_val_index('value', value_dict)
if val2 is not None:
val2 = self._get_val_index('value', value_dict)
if val1 == -1:
val1 = 0
logging.info('lost value: %s. candidates: %s', cond[3], ', '.join(value_dict.keys()))
logging.debug('sql is: %s', json.dumps(sql_json, ensure_ascii=False))
if val2 == -1:
val2 = 0
cond[3] = val1
cond[4] = val2
for table_unit in sql_json['from']['table_units']:
if type(table_unit[1]) is dict:
self.fix_sql_value(table_unit[1], value_dict)
for cond in sql_json['where'][::2]:
_fix_cond_value(cond)
for cond in sql_json['having'][::2]:
_fix_cond_value(cond)
if sql_json['limit'] is not None:
limit = 'value'
else:
limit = 'empty'
assert limit in value_dict
sql_json['limit'] = value_dict[limit]
if sql_json['intersect'] is not None:
self.fix_sql_value(sql_json['intersect'], value_dict)
if sql_json['union'] is not None:
self.fix_sql_value(sql_json['union'], value_dict)
if sql_json['except'] is not None:
self.fix_sql_value(sql_json['except'], value_dict)
def add_item(self, section, sql_json, value_list):
"""add an item"""
value_dict = {val: idx for idx, val in enumerate(value_list)}
self.format_sql_value(sql_json, value_dict)
parsed = self.grammar.parse(sql_json, section)
self.ast_wrapper.verify_ast(parsed) # will raise AssertionError, if varify failed
root = parsed
if section == 'train':
for token in self._all_tokens(root):
self.vocab_builder.add_word(token)
self._record_productions(root)
item = DecoderSQLItem(tree=root, orig_code=sql_json)
self.items[section].append(item)
return item
def clear_items(self):
"""clear items"""
self.items = collections.defaultdict(list)
def _construct_cache_path(self, root_path):
"""
Args:
root_path (TYPE): NULL
Returns: TODO
Raises: NULL
"""
root_path = Path(root_path)
self.vocab_path = root_path / 'dec_vocab.json'
self.observed_productions_path = root_path / 'observed_productions.json'
self.grammar_rules_path = root_path / 'grammar_rules.json'
self.grammar_file = root_path / 'grammar.asdl'
def save(self, save_path):
"""save parsed items to disk"""
os.makedirs(save_path, exist_ok=True)
self._construct_cache_path(save_path)
self.vocab = self.vocab_builder.finish()
self.vocab.save(self.vocab_path)
""" sql preproc 不负责存储data部分
for section, items in self.items.items():
with open(os.path.join(self.data_dir, section + '.jsonl'), 'w') as f:
for item in items:
f.write(json.dumps(attr.asdict(item)) + '\n')
"""
# observed_productions
self.sum_type_constructors = serialization.to_dict_with_sorted_values(self.sum_type_constructors)
self.field_presence_infos = serialization.to_dict_with_sorted_values(self.field_presence_infos, key=str)
self.seq_lengths = serialization.to_dict_with_sorted_values(self.seq_lengths)
self.primitive_types = sorted(self.primitive_types)
with open(self.observed_productions_path, 'w') as f:
json.dump({
'sum_type_constructors': self.sum_type_constructors,
'field_presence_infos': self.field_presence_infos,
'seq_lengths': self.seq_lengths,
'primitive_types': self.primitive_types,
}, f, indent=2, sort_keys=True)
# grammar
self.all_rules, self.rules_mask = self._calculate_rules()
with open(self.grammar_rules_path, 'w') as f:
json.dump({
'all_rules': self.all_rules,
'rules_mask': self.rules_mask,
}, f, indent=2, sort_keys=True)
shutil.copy2(self.base_path, self.grammar_file)
def load(self, grammar_class):
"""load parsed items from disk"""
self._construct_cache_path(self.base_path)
self.grammar = grammar_class(self.grammar_file)
self.ast_wrapper = self.grammar.ast_wrapper
self.vocab = vocab.Vocab.load(self.vocab_path)
observed_productions = json.load(open(self.observed_productions_path))
self.sum_type_constructors = observed_productions['sum_type_constructors']
self.field_presence_infos = observed_productions['field_presence_infos']
self.seq_lengths = observed_productions['seq_lengths']
self.primitive_types = observed_productions['primitive_types']
grammar = json.load(open(self.grammar_rules_path))
self.all_rules = serialization.tuplify(grammar['all_rules'])
self.rules_mask = grammar['rules_mask']
def _record_productions(self, tree):
"""_record_productions"""
queue = [(tree, False)]
while queue:
node, is_seq_elem = queue.pop()
node_type = node['_type']
# Rules of the form:
# expr -> Attribute | Await | BinOp | BoolOp | ...
# expr_seq_elem -> Attribute | Await | ... | Template1 | Template2 | ...
for type_name in [node_type] + node.get('_extra_types', []):
if type_name in self.ast_wrapper.constructors:
sum_type_name = self.ast_wrapper.constructor_to_sum_type[type_name]
if is_seq_elem and self.use_seq_elem_rules:
self.sum_type_constructors[sum_type_name + '_seq_elem'].add(type_name)
else:
self.sum_type_constructors[sum_type_name].add(type_name)
# Rules of the form:
# FunctionDef
# -> identifier name, arguments args
# | identifier name, arguments args, stmt* body
# | identifier name, arguments args, expr* decorator_list
# | identifier name, arguments args, expr? returns
# ...
# | identifier name, arguments args, stmt* body, expr* decorator_list, expr returns
assert node_type in self.ast_wrapper.singular_types
field_presence_info = get_field_presence_info(
self.ast_wrapper,
node,
self.ast_wrapper.singular_types[node_type].fields)
self.field_presence_infos[node_type].add(field_presence_info)
for field_info in self.ast_wrapper.singular_types[node_type].fields:
field_value = node.get(field_info.name, [] if field_info.seq else None)
to_enqueue = []
if field_info.seq:
# Rules of the form:
# stmt* -> stmt
# | stmt stmt
# | stmt stmt stmt
self.seq_lengths[field_info.type + '*'].add(len(field_value))
to_enqueue = field_value
else:
to_enqueue = [field_value]
for child in to_enqueue:
if isinstance(child, collections.abc.Mapping) and '_type' in child:
queue.append((child, field_info.seq))
else:
self.primitive_types.add(type(child).__name__)
def _calculate_rules(self):
"""_calculate_rules"""
offset = 0
all_rules = []
rules_mask = {}
# Rules of the form:
# expr -> Attribute | Await | BinOp | BoolOp | ...
# expr_seq_elem -> Attribute | Await | ... | Template1 | Template2 | ...
for parent, children in sorted(self.sum_type_constructors.items()):
assert not isinstance(children, set)
rules_mask[parent] = (offset, offset + len(children))
offset += len(children)
all_rules += [(parent, child) for child in children]
# Rules of the form:
# FunctionDef
# -> identifier name, arguments args
# | identifier name, arguments args, stmt* body
# | identifier name, arguments args, expr* decorator_list
# | identifier name, arguments args, expr? returns
# ...
# | identifier name, arguments args, stmt* body, expr* decorator_list, expr returns
for name, field_presence_infos in sorted(self.field_presence_infos.items()):
assert not isinstance(field_presence_infos, set)
rules_mask[name] = (offset, offset + len(field_presence_infos))
offset += len(field_presence_infos)
all_rules += [(name, presence) for presence in field_presence_infos]
# Rules of the form:
# stmt* -> stmt
# | stmt stmt
# | stmt stmt stmt
for seq_type_name, lengths in sorted(self.seq_lengths.items()):
assert not isinstance(lengths, set)
rules_mask[seq_type_name] = (offset, offset + len(lengths))
offset += len(lengths)
all_rules += [(seq_type_name, i) for i in lengths]
return tuple(all_rules), rules_mask
def _all_tokens(self, root):
"""_all_tokens"""
queue = [root]
while queue:
node = queue.pop()
type_info = self.ast_wrapper.singular_types[node['_type']]
for field_info in reversed(type_info.fields):
field_value = node.get(field_info.name)
if field_info.type in self.grammar.pointers:
pass
elif field_info.type in self.ast_wrapper.primitive_types:
for token in self.grammar.tokenize_field_value(field_value):
yield token
elif isinstance(field_value, (list, tuple)):
queue.extend(field_value)
elif field_value is not None:
queue.append(field_value)
if __name__ == "__main__":
"""run some simple test cases"""
pass
| 1.9375 | 2 |
codes_auto/617.merge-two-binary-trees.py | smartmark-pro/leetcode_record | 0 | 12765312 | #
# @lc app=leetcode.cn id=617 lang=python3
#
# [617] merge-two-binary-trees
#
None
# @lc code=end | 1.25 | 1 |
thread/multi_threading.py | hupantingxue/advancedpython | 2 | 12765313 | #-*- coding: utf-8 -*-
from threading import Thread
import time
def loop(idx, nsec):
print("start loop", idx, " at ", time.ctime())
time.sleep(nsec)
print("start loop", idx, " at ", time.ctime())
def main():
print("Process start at ", time.ctime())
thread0 = Thread(target=loop, args=(0, 4))
thread0.start()
thread1 = Thread(target=loop, args=(1, 2))
thread1.start()
thread0.join()
thread1.join()
print("Process done at ", time.ctime())
if "__main__" == __name__:
main()
| 3.296875 | 3 |
scripts/data-restification-kit/import_data.py | AuthEceSoftEng/ma-made-core | 0 | 12765314 | <reponame>AuthEceSoftEng/ma-made-core
from datasetsHandler import datasetsHandler
import dataImporter
from settings import HOST as host
from settings import SERVICE_PORT as host_port
import json
def importData(dataset, port):
# Initialize the datasets handler
dH = datasetsHandler('datasets')
# Initialize the data importer
dI = dataImporter.dataImporter(dH)
# Import data (change the url according with your configuration)
result = dI.import_data('http://' + host + ':' + str(port) + '/api/v1/', dataset)
result = json.loads(result)
if result['exitStatus'] == 'Success':
return json.dumps({ 'exitStatus': 'Success', 'url': 'http://' + host + ':' + str(host_port) + '/api/v1/' + dataset });
else:
return json.dumps({ 'exitStatus': 'Failure', 'logs': result['logs'] })
| 2.125 | 2 |
components/example_01/nodes/telegram.py | Legodev/Flask-Drawflow | 0 | 12765315 | from components.base.drawflownodebase import DrawflowNodeBase
class TelegramNode(DrawflowNodeBase):
def __init__(self):
super().__init__()
self.name('telegram')
self.title('Telegram send message')
self.input('msg', str)
self.icon('fab fa-telegram')
self.html("""
<div>
<div class="title-box"><i class="fab fa-telegram-plane"></i> Telegram bot</div>
<div class="box">
<p>Send to telegram</p>
<p>select channel</p>
<select df-channel>
<option value="channel_1">Channel 1</option>
<option value="channel_2">Channel 2</option>
<option value="channel_3">Channel 3</option>
<option value="channel_4">Channel 4</option>
</select>
</div>
</div>
""")
| 2.734375 | 3 |
deep-scratch/steps/step47.py | jayChung0302/myml | 0 | 12765316 | <reponame>jayChung0302/myml
if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable, Model, as_variable
import dezero.functions as F
import dezero.layers as L
from dezero.models import MLP
from dezero import optimizers
model = MLP((10, 3))
x = np.array([[0.2, -0.4]])
y = model(x)
print(y)
def softmax1d(x):
x = as_variable(x)
y = F.exp(x)
sum_y = F.sum(y)
return y / sum_y
p = softmax1d(y)
print(p)
# Softmax 는 exp term 이 들어가기 때문에 결과값이 너무 커지거나 작아지기 쉬움.
# overflow 방지책이 필요
def softmax_simple(x, axis=1):
x = as_variable(x)
y = exp(x)
sum_y = sum(y, axis=axis, keepdims=True)
return y / sum_y
x = np.array([[0.2, -0.4], [0.3, 0.5], [1.3, -3.2], [2.1, 0.3]])
t = np.array([2, 0, 1, 0])
y = model(x)
loss = F.softmax_cross_entropy_simple(y, t)
print(loss)
| 2.65625 | 3 |
backend/ocr-modules/OCR-Attn/RunOCREngine.py | refrainfl/CapstonProject | 0 | 12765317 | <reponame>refrainfl/CapstonProject<gh_stars>0
import os
import sys
import LIST2JSON
import DrawTextBox
from ocr import OCR
from types import SimpleNamespace
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional")
def OCREngine(image):
# 이하 opt을 변경하여 기학습된 모델을 교체할 수 있음
# Parameters가 변경되었을 경우 OCR-Attn/text_recognize/recognition.py을 참조할 것.
path_abs = os.path.dirname(os.path.abspath(__file__))
opt = SimpleNamespace()
opt.detect_trained_model = f"{path_abs}/models/craft_mlt_25k.pth"
opt.detect_result_folder = f"{path_abs}/images/box/"
opt.recognize_image_folder = f"{path_abs}/images/box/"
opt.recognize_saved_model = f"{path_abs}/models/TPS-VGG-BiLSTM-Attn.pth"
opt.recognize_Transformation = "TPS"
opt.recognize_FeatureExtraction = "VGG"
opt.recognize_SequenceModeling = "BiLSTM"
opt.recognize_Prediction = "Attn"
start = OCR(opt)
result = start.run(image)
print("#Model :", opt.detect_trained_model,
"\n Network Model :", opt.recognize_Transformation, opt.recognize_FeatureExtraction,
opt.recognize_SequenceModeling, opt.recognize_Prediction,
"\n Results :\n", result)
json_path = os.path.dirname(image) + "/" + os.path.basename(image) + ".json"
LIST2JSON.tojsonsingle(result, json_path)
DrawTextBox.Draw(image, result)
if __name__ == '__main__':
image_path = sys.argv[1] # 명령행 인자
OCREngine(image_path)
# OCREngine('C:/Users/Fair/PycharmProjects/Module/OCR-Attn/test/demo.png')
| 2.40625 | 2 |
csrc/setup.py | tarepan/HiPPO | 57 | 12765318 | <reponame>tarepan/HiPPO
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension
ext_modules = []
extension = CppExtension('hippo', ['hippo.cpp', 'hippolegs.cpp', 'hippolegt.cpp'], extra_compile_args=['-march=native'])
ext_modules.append(extension)
setup(
name='hippo',
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension})
| 1.523438 | 2 |
libs/Utils.py | leftstick/sublime-quick-move | 0 | 12765319 | <filename>libs/Utils.py
import sublime
import os.path
def getBaseName(fullpath):
return os.path.basename(fullpath)
def oneThingSelected(paths):
return len(paths) == 1
def removeNonPathEnd(path):
return path.rstrip('\\/')
def getParent(path):
return os.path.dirname(path)
def revert(view):
while view.is_dirty():
view.run_command('undo'); | 2.25 | 2 |
Python/293.py | JWang169/LintCodeJava | 1 | 12765320 | class Solution:
def generatePossibleNextMoves(self, s: str) -> List[str]:
results = []
for i in range(len(s) - 1):
if s[i: i + 2] == "++":
results.append(s[:i] + "--" + s[i + 2:])
return results | 3.234375 | 3 |
working/timer.py | satchm0h/ChamTimer | 0 | 12765321 | <filename>working/timer.py
'''
What we are trying to do here is support simple periodic wall clock timers.
They come in two forms. First is a simple periodic timer. It runs the provided
callback Every N seconds. Second is a nested timer. It runs the provided
last_callback every N seconds (primary period), then again M seconds later
(secondary period). this allows for use cases like "Turn on the humidifyer for
five minutes every hour". TODO : Allow for optional secondary callback...should
just do the primary calback if not defined
'''
from sys import stderr
from time import time
from datetime import datetime, time as dt_time
import logging
# Custom Exception
class TimerInvalidSecondaryPeriod (ValueError):
def __init__(self, period, message=None):
if not message:
message = "Invalid secondary timer provided to Timer: "+ period.str()
self.message = message
print(message, file=stderr)
pass
# Factory functiuon to create wall-clock based timers.
def new_wall_clock_timer(name, callback, period, on_hour=0, on_min=0, on_sec=0, secondary_period=None):
# figure out when the next valid primary period trigger time is.
right_now = datetime.now()
logging.info("New Timer Creation: '%s' OnHour: %d, OnMin: %d, Primary: %d, Secondary: %s"
% (name, on_hour, on_min, period, str(secondary_period)))
logging.info("\tNow: " + right_now.strftime('%Y-%m-%d %H:%M:%S'))
initial_start = first_trigger = datetime.combine(right_now.date(), dt_time(on_hour, on_min, on_sec)).timestamp()
# If right now is in the middle of period, run the last_callback
if secondary_period:
if right_now.timestamp() > initial_start and \
right_now.timestamp() < initial_start + secondary_period:
logging.info("\tStarting up in the middle of SECONDARY timer cycle - Triggering")
callback()
while first_trigger <= right_now.timestamp():
first_trigger += secondary_period
elif right_now.timestamp() > initial_start and \
right_now.timestamp() < initial_start + period:
logging.info("\tStarting up in the middle of PRIMARY timer cycle - Triggering")
callback()
while first_trigger <= right_now.timestamp():
first_trigger += period
logging.info("\tNext Trigger: " + datetime.fromtimestamp(first_trigger).strftime('%Y-%m-%d %H:%M:%S'))
return Timer(name, first_trigger, period, callback, secondary_period)
class Timer:
def __init__(self, name, next_trigger, primary_period, callback, secondary_period=None):
self.name = name
self.next_trigger = next_trigger
self.primary_period = primary_period
self.secondary_period = secondary_period
self.active_timer = "PRIMARY"
self.callback = callback
if secondary_period and secondary_period > primary_period:
raise TimerInvalidSecondaryPeriod(self.name, self.secondary_period)
def get_as_dict(self):
return {
'name': self.name,
'next_trigger': self.next_trigger,
'primary_period': self.primary_period,
'secondary_period': self.secondary_period,
'active_timer': self.active_timer,
}
def is_time_to_run(self):
if time() > self.next_trigger:
return True
else:
return False
def run(self, force=False):
# This is a no-op if it is not yet time to run.
if force is False and self.is_time_to_run() is False:
logging.warning("Running %s, but it's not time yet\n" % (self.name))
return
# First things first, lets run the callback
# We'll return the value of the callback if it does not raise exception
rval = self.callback()
if self.active_timer == "PRIMARY":
if self.secondary_period:
self.next_trigger += self.secondary_period
self.active_timer = "SECONDARY"
else:
self.next_trigger += self.primary_period
elif self.active_timer == "SECONDARY":
self.next_trigger += self.primary_period - self.secondary_period
self.active_timer = "PRIMARY"
else:
logging.warning("WARNING : Timer found in unknown state, resetting to PRIMARY", file=stderr)
self.active_timer = "PRIMARY"
#logging.info("\tNext Run Trigger: " + datetime.fromtimestamp(datetime.now().timestamp() + self.next_trigger).strftime('%Y-%m-%d %H:%M:%S'))
return rval
| 3.359375 | 3 |
py/path-sum-iii.py | ckclark/leetcode | 0 | 12765322 | from collections import Counter
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
self.subsum += cur.val
yield self.subsum_counter[self.subsum - self.needsum]
self.subsum_counter[self.subsum] += 1
for x in self.dfs(cur.left):
yield x
for x in self.dfs(cur.right):
yield x
self.subsum_counter[self.subsum] -= 1
self.subsum -= cur.val
def pathSum(self, root, needsum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
self.subsum = 0
self.needsum = needsum
self.subsum_counter = Counter()
self.subsum_counter[0] += 1
return sum(self.dfs(root))
| 3.578125 | 4 |
build/piman.app/pysnmp/entity/rfc3413/oneliner/ntforg.py | jackgisel/team-athens | 0 | 12765323 | <filename>build/piman.app/pysnmp/entity/rfc3413/oneliner/ntforg.py
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
# All code in this file belongs to obsolete, compatibility wrappers.
# Never use interfaces below for new applications!
#
from pysnmp.hlapi.asyncore import *
from pysnmp.hlapi.asyncore import sync
from pysnmp.hlapi.varbinds import *
from pysnmp.hlapi.lcd import *
from pyasn1.compat.octets import null
from pysnmp.entity import config
from pysnmp.entity.rfc3413 import context
__all__ = ['AsynNotificationOriginator',
'NotificationOriginator',
'MibVariable']
MibVariable = ObjectIdentity
class ErrorIndicationReturn(object):
def __init__(self, *vars):
self.__vars = vars
def __getitem__(self, i):
return self.__vars[i]
def __nonzero__(self):
return bool(self)
def __bool__(self):
return bool(self.__vars[0])
def __str__(self):
return str(self.__vars[0])
class AsynNotificationOriginator(object):
vbProcessor = NotificationOriginatorVarBinds()
lcd = NotificationOriginatorLcdConfigurator()
def __init__(self, snmpEngine=None, snmpContext=None):
if snmpEngine is None:
self.snmpEngine = snmpEngine = SnmpEngine()
else:
self.snmpEngine = snmpEngine
if snmpContext is None:
self.snmpContext = context.SnmpContext(self.snmpEngine)
config.addContext(
self.snmpEngine, '' # this is leaky
)
else:
self.snmpContext = snmpContext
self.mibViewController = self.vbProcessor.getMibViewController(self.snmpEngine)
def __del__(self):
self.uncfgNtfOrg()
def cfgNtfOrg(self, authData, transportTarget, notifyType):
return self.lcd.configure(
self.snmpEngine, authData, transportTarget, notifyType
)
def uncfgNtfOrg(self, authData=None):
return self.lcd.unconfigure(self.snmpEngine, authData)
def makeVarBinds(self, varBinds):
return self.vbProcessor.makeVarBinds(
self.snmpEngine, varBinds
)
def unmakeVarBinds(self, varBinds, lookupNames, lookupValues):
return self.vbProcessor.unmakeVarBinds(
self.snmpEngine, varBinds, lookupNames or lookupValues
)
def sendNotification(self, authData, transportTarget,
notifyType, notificationType,
varBinds=(), # legacy, use NotificationType instead
cbInfo=(None, None),
lookupNames=False, lookupValues=False,
contextEngineId=None, # XXX ordering incompatibility
contextName=null):
def __cbFun(snmpEngine, sendRequestHandle, errorIndication,
errorStatus, errorIndex, varBinds, cbCtx):
cbFun, cbCtx = cbCtx
try:
# we need to pass response PDU information to user for INFORMs
return cbFun and cbFun(
sendRequestHandle,
errorIndication,
errorStatus, errorIndex,
varBinds,
cbCtx
)
except TypeError:
# a backward compatible way of calling user function
return cbFun(
sendRequestHandle,
errorIndication,
cbCtx
)
# for backward compatibility
if contextName is null and authData.contextName:
contextName = authData.contextName
if not isinstance(notificationType,
(ObjectIdentity, ObjectType, NotificationType)):
if isinstance(notificationType[0], tuple):
# legacy
notificationType = ObjectIdentity(notificationType[0][0], notificationType[0][1], *notificationType[1:])
else:
notificationType = ObjectIdentity(notificationType)
if not isinstance(notificationType, NotificationType):
notificationType = NotificationType(notificationType)
return sendNotification(
self.snmpEngine,
authData, transportTarget,
ContextData(contextEngineId or self.snmpContext.contextEngineId,
contextName),
notifyType, notificationType.addVarBinds(*varBinds),
__cbFun,
cbInfo,
lookupNames or lookupValues
)
asyncSendNotification = sendNotification
class NotificationOriginator(object):
vbProcessor = NotificationOriginatorVarBinds()
def __init__(self, snmpEngine=None, snmpContext=None, asynNtfOrg=None):
# compatibility attributes
self.snmpEngine = snmpEngine or SnmpEngine()
self.mibViewController = self.vbProcessor.getMibViewController(self.snmpEngine)
# the varBinds parameter is legacy, use NotificationType instead
def sendNotification(self, authData, transportTarget, notifyType,
notificationType, *varBinds, **kwargs):
if 'lookupNames' not in kwargs:
kwargs['lookupNames'] = False
if 'lookupValues' not in kwargs:
kwargs['lookupValues'] = False
if not isinstance(notificationType,
(ObjectIdentity, ObjectType, NotificationType)):
if isinstance(notificationType[0], tuple):
# legacy
notificationType = ObjectIdentity(notificationType[0][0], notificationType[0][1], *notificationType[1:])
else:
notificationType = ObjectIdentity(notificationType)
if not isinstance(notificationType, NotificationType):
notificationType = NotificationType(notificationType)
for (errorIndication,
errorStatus,
errorIndex,
rspVarBinds) in sync.sendNotification(self.snmpEngine, authData,
transportTarget,
ContextData(kwargs.get('contextEngineId'),
kwargs.get('contextName', null)),
notifyType,
notificationType.addVarBinds(*varBinds),
**kwargs):
if notifyType == 'inform':
return errorIndication, errorStatus, errorIndex, rspVarBinds
else:
break
| 1.828125 | 2 |
LaneDetect.py | Eng-Mo/CarND-Advanced-Lane-Lines | 0 | 12765324 |
# coding: utf-8
# In[1]:
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpimg
import numpy as np
from IPython.display import HTML
import os, sys
import glob
import moviepy
from moviepy.editor import VideoFileClip
from moviepy.editor import *
from IPython import display
from IPython.core.display import display
from IPython.display import Image
import pylab
import scipy.misc
# In[2]:
def region_of_interest(img):
mask = np.zeros(img.shape, dtype=np.uint8) #mask image
roi_corners = np.array([[(200,675), (1200,675), (700,430),(500,430)]],
dtype=np.int32) # vertisies seted to form trapezoidal scene
channel_count = 1#img.shape[2] # image channels
ignore_mask_color = (255,)*channel_count
cv2.fillPoly(mask, roi_corners, ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
# In[3]:
def ColorThreshold(img): # Threshold Yellow anf White Colos from RGB, HSV, HLS color spaces
HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# For yellow
yellow = cv2.inRange(HSV, (20, 100, 100), (50, 255, 255))
# For white
sensitivity_1 = 68
white = cv2.inRange(HSV, (0,0,255-sensitivity_1), (255,20,255))
sensitivity_2 = 60
HSL = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
white_2 = cv2.inRange(HSL, (0,255-sensitivity_2,0), (255,255,sensitivity_2))
white_3 = cv2.inRange(img, (200,200,200), (255,255,255))
bit_layer = yellow | white | white_2 | white_3
return bit_layer
# In[4]:
from skimage import morphology
def SobelThr(img): # Sobel edge detection extraction
gray=img
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=15)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=15)
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
scaled_sobelx = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
scaled_sobely = np.uint8(255*abs_sobely/np.max(abs_sobely))
binary_outputabsx = np.zeros_like(scaled_sobelx)
binary_outputabsx[(scaled_sobelx >= 70) & (scaled_sobelx <= 255)] = 1
binary_outputabsy = np.zeros_like(scaled_sobely)
binary_outputabsy[(scaled_sobely >= 100) & (scaled_sobely <= 150)] = 1
mag_thresh=(100, 200)
gradmag = np.sqrt(sobelx**2 + sobely**2)
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_outputmag = np.zeros_like(gradmag)
binary_outputmag[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
combinedS = np.zeros_like(binary_outputabsx)
combinedS[(((binary_outputabsx == 1) | (binary_outputabsy == 1))|(binary_outputmag==1)) ] = 1
return combinedS
# In[5]:
def combinI(b1,b2): ##Combine color threshold + Sobel edge detection
combined = np.zeros_like(b1)
combined[((b1 == 1)|(b2 == 255)) ] = 1
return combined
# In[6]:
def prespectI(img): # Calculate the prespective transform and warp the Image to the eye bird view
src=np.float32([[728,475],
[1058,690],
[242,690],
[565,475]])
dst=np.float32([[1058,20],
[1058,700],
[242,700],
[242,20]])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (1280,720), flags=cv2.INTER_LINEAR)
return (warped, M)
# In[7]:
def undistorT(imgorg): # Calculate Undistortion coefficients
nx =9
ny = 6
objpoints = []
imgpoints = []
objp=np.zeros((6*9,3),np.float32)
objp[:,:2]=np.mgrid[0:6,0:9].T.reshape(-1,2)
images=glob.glob('./camera_cal/calibration*.jpg')
for fname in images: # find corner points and Make a list of calibration images
img = cv2.imread(fname)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (6,9),None)
# If found, draw corners
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
# Draw and display the corners
#cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
return cv2.calibrateCamera(objpoints,imgpoints,gray.shape[::-1],None,None)
# In[8]:
def undistresult(img, mtx,dist): # undistort frame
undist= cv2.undistort(img, mtx, dist, None, mtx)
return undist
# In[9]:
def LineFitting(wimgun): #Fit Lane Lines
# Set minimum number of pixels found to recenter window
minpix = 20
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
histogram = np.sum(wimgun[350:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((wimgun, wimgun, wimgun))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
nwindows = 9
# Set height of windows
window_height = np.int(wimgun.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = wimgun.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin =80
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = wimgun.shape[0] - (window+1)*window_height
win_y_high = wimgun.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, wimgun.shape[0]-1, wimgun.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw on and an image to show the selection window
# out_img = np.dstack((wimgun, wimgun, wimgun))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
# plt.imshow(out_img)
# # plt.savefig("./output_images/Window Image"+str(n)+".png")
# plt.show()
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# plt.title("r")
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
# plt.imshow(result)
# # plt.savefig("./output_images/Line Image"+str(n)+".png")
# plt.show()
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
#print(left_curverad, right_curverad)
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# y_eval = np.max(ploty)
# # Calculate the new radias of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# # left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
# # right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
camera_center=wimgun.shape[0]/2
# #lane_center = (right_fitx[719] + left_fitx[719])/2
car_position = (camera_center- (left_fitx[-1]+right_fitx[-1])/2)*xm_per_pix
# print(left_curverad1, right_curverad1, lane_offset)
return (left_fit, ploty,right_fit,left_curverad, right_curverad,car_position)
# Create an image to draw the lines on
def unwrappedframe(img,pm, Minv, left_fit,ploty,right_fit):
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
warp_zero = np.zeros_like(pm).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
return cv2.addWeighted(img, 1, newwarp, 0.3, 0)
| 2.640625 | 3 |
tests/test_zz_jvm_kill.py | edurand/cellom2tif | 4 | 12765325 | <filename>tests/test_zz_jvm_kill.py<gh_stars>1-10
from cellom2tif import cellom2tif
import bioformats as bf
import pytest
cfile = 'test-data/d1/MFGTMP_120628160001_C18f00d0.C01'
def test_read_image():
im = cellom2tif.read_image(cfile)
assert im.shape == (512, 512)
def test_read_image_from_reader():
rdr = bf.ImageReader(cfile)
im = cellom2tif.read_image(rdr)
assert im.shape == (512, 512)
def test_done():
cellom2tif.done()
assert cellom2tif.VM_KILLED
def test_vm_killed_error():
cellom2tif.done()
with pytest.raises(RuntimeError) as err:
cellom2tif.read_image(cfile)
assert err.value.args[0].startswith('The Java Virtual Machine')
| 2.109375 | 2 |
src/atcoder/abc056/a/sol_0.py | kagemeka/competitive-programming | 1 | 12765326 | import typing
def main() -> typing.NoReturn:
a, b = input().split()
print('H' if a == b else 'D')
main() | 3.1875 | 3 |
tests/keys.py | snikch/chia-blockchain | 0 | 12765327 | from blspy import AugSchemeMPL
from src.types.coin_solution import CoinSolution
from src.types.spend_bundle import SpendBundle
from src.wallet.puzzles import p2_delegated_puzzle
from src.wallet.puzzles.puzzle_utils import make_create_coin_condition
from tests.util.key_tool import KeyTool
from src.util.ints import uint32
from src.wallet.derive_keys import master_sk_to_wallet_sk
MASTER_KEY = AugSchemeMPL.key_gen(bytes([1] * 32))
def puzzle_program_for_index(index: uint32):
return p2_delegated_puzzle.puzzle_for_pk(
bytes(master_sk_to_wallet_sk(MASTER_KEY, index).get_g1())
)
def puzzle_hash_for_index(index: uint32):
return puzzle_program_for_index(index).get_hash()
def conditions_for_payment(puzzle_hash_amount_pairs):
conditions = [
make_create_coin_condition(ph, amount)
for ph, amount in puzzle_hash_amount_pairs
]
return conditions
def make_default_keyUtil():
keychain = KeyTool()
private_keys = [master_sk_to_wallet_sk(MASTER_KEY, uint32(i)) for i in range(10)]
secret_exponents = [int.from_bytes(bytes(_), "big") for _ in private_keys]
keychain.add_secret_exponents(secret_exponents)
return keychain
DEFAULT_KEYTOOL = make_default_keyUtil()
def spend_coin(coin, conditions, index, keychain=DEFAULT_KEYTOOL):
solution = p2_delegated_puzzle.solution_for_conditions(
puzzle_program_for_index(index), conditions
)
return build_spend_bundle(coin, solution, keychain)
def build_spend_bundle(coin, solution, keychain=DEFAULT_KEYTOOL):
coin_solution = CoinSolution(coin, solution)
signature = keychain.signature_for_solution(solution, bytes(coin))
return SpendBundle([coin_solution], signature)
| 2 | 2 |
python/design/0225_implement_stack_using_queues.py | linshaoyong/leetcode | 6 | 12765328 | class MyStack(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.data = []
def push(self, x):
"""
Push element x onto stack.
:type x: int
:rtype: None
"""
q = [x]
while self.data:
q.append(self.data.pop(0))
self.data = q
def pop(self):
"""
Removes the element on top of the stack and returns that element.
:rtype: int
"""
return self.data.pop(0)
def top(self):
"""
Get the top element.
:rtype: int
"""
return self.data[0]
def empty(self):
"""
Returns whether the stack is empty.
:rtype: bool
"""
return len(self.data) == 0
def test_mystack():
stack = MyStack()
stack.push(1)
stack.push(2)
assert 2 == stack.top()
assert 2 == stack.pop()
assert stack.empty() is False
| 4.25 | 4 |
copy farture.py | wy1157497582/arcpy | 1 | 12765329 | <gh_stars>1-10
#-*- coding:utf-8-*-
import arcpy,os
inPath = u"F:\第四批数据\beizh\ls.mdb"
arcpy.env.workspace = inPath
arcpy.env.overwriteoutput=1
outPath =u"F:"
outName = "dian.mdb"
try:
#'''''复制要素类函数'''
def copyAll(outPath,outName):
for fc in arcpy.ListFeatureClasses():
#print(" Feature Class: {0}".format(fc))
arcpy.FeatureClassToFeatureClass_conversion(fc, outPath , os.path.splitext(fc)[0])
#iFeatureClassFull = None
#[python] view plain copy print?在CODE上查看代码片派生到我的代码片
#输出路径1,直接将原数据库中的要素类输出到新的数据库中
outPath1 = outPath + os.sep + outName
arcpy.CreateFileGDB_management(outPath, outName)
copyAll(outPath1, outName)
for iFD in arcpy.ListDatasets("","Feature"):
#print("Feature Dataset {0}:".format(iFD))
desc = arcpy.Describe(iFD)
sr = desc.spatialReference
arcpy.CreateFeatureDataset_management(outPath1, iFD , sr)
#更改工作空间
arcpy.env.workspace = inPath + r"/" + str(iFD)
#输出路径2,将原数据库要素数据集中的要素类输出到新数据库要素数据集中
#outPath2 = outPath1 + r"/" + str(iFD)
#copyAll(outPath2, outName)
#更改工作空间
#arcpy.env.workspace = inPath
except arcpy.ExecuteError:
print(arcpy.GetMessages()) | 2.21875 | 2 |
hathor/p2p/states/hello.py | khengleng/khathor | 0 | 12765330 | import json
from typing import TYPE_CHECKING, Any, Dict
import hathor
from hathor.conf import HathorSettings
from hathor.p2p.messages import ProtocolMessages
from hathor.p2p.states.base import BaseState
from hathor.p2p.utils import get_genesis_short_hash, get_settings_hello_dict
if TYPE_CHECKING:
from hathor.p2p.protocol import HathorProtocol # noqa: F401
settings = HathorSettings()
class HelloState(BaseState):
def __init__(self, protocol: 'HathorProtocol') -> None:
super().__init__(protocol)
self.cmd_map.update({
ProtocolMessages.HELLO: self.handle_hello,
})
def _app(self) -> str:
return f'Hathor v{hathor.__version__}'
def _get_hello_data(self) -> Dict[str, Any]:
""" Returns a dict with information about this node that will
be sent to a peer.
"""
protocol = self.protocol
remote = protocol.transport.getPeer()
return {
'app': self._app(),
'network': protocol.network,
'remote_address': '{}:{}'.format(remote.host, remote.port),
'genesis_short_hash': get_genesis_short_hash(),
'timestamp': protocol.node.reactor.seconds(),
'settings_dict': get_settings_hello_dict(),
'capabilities': [],
}
def on_enter(self) -> None:
# After a connection is made, we just send a HELLO message.
self.send_hello()
def send_hello(self) -> None:
""" Send a HELLO message, identifying the app and giving extra
information about this node to the peer.
"""
data = self._get_hello_data()
self.send_message(ProtocolMessages.HELLO, json.dumps(data))
def handle_hello(self, payload: str) -> None:
""" Executed when a HELLO message is received. It basically
checks the application compatibility.
"""
protocol = self.protocol
try:
data = json.loads(payload)
except ValueError:
protocol.send_error_and_close_connection('Invalid payload.')
return
required_fields = {'app', 'network', 'remote_address', 'genesis_short_hash', 'timestamp', 'capabilities'}
# settings_dict is optional
if not set(data).issuperset(required_fields):
# If data does not contain all required fields
protocol.send_error_and_close_connection('Invalid payload.')
return
if data['app'] != self._app():
self.log.warn('different versions', theirs=data['app'], ours=self._app())
if data['network'] != protocol.network:
protocol.send_error_and_close_connection('Wrong network.')
return
if data['genesis_short_hash'] != get_genesis_short_hash():
protocol.send_error_and_close_connection('Different genesis.')
return
if abs(data['timestamp'] - protocol.node.reactor.seconds()) > settings.MAX_FUTURE_TIMESTAMP_ALLOWED/2:
protocol.send_error_and_close_connection('Nodes timestamps too far apart.')
return
settings_dict = get_settings_hello_dict()
if 'settings_dict' in data and data['settings_dict'] != settings_dict:
# If settings_dict is sent we must validate it
protocol.send_error_and_close_connection(
'Settings values are different. {}'.format(json.dumps(settings_dict))
)
return
protocol.app_version = data['app']
protocol.change_state(protocol.PeerState.PEER_ID)
| 2.40625 | 2 |
similarIV/processing_data.py | pinedbean/similarIV | 1 | 12765331 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .instant_function import data_vars
def create_categorical_onehot(df,category_columns):
category_dataframe = []
for category_column in category_columns:
category_dataframe.append(pd.get_dummies(df[category_column],prefix='col_'+category_column))
category_dataframe_feature = pd.concat(category_dataframe,axis=1)
return category_dataframe_feature
def create_norm_continuos_columns(df, continuos_columns):
df_norm = df[continuos_columns].fillna(0)
norm_continuos_columns = (df_norm[continuos_columns]-df_norm[continuos_columns].mean())/(df_norm[continuos_columns].std())
mean_dict = dict(df[continuos_columns].mean())
std_dict = dict(df[continuos_columns].std())
return norm_continuos_columns, mean_dict, std_dict
def combine_continus_norm_and_categorical_onehot_and_sep_target(df, continuos_columns, category_columns, target_columns):
norm_continuos_columns, mean_dict, std_dict = create_norm_continuos_columns(df, continuos_columns)
category_dataframe_feature = create_categorical_onehot(df,category_columns)
target_df = df[target_columns]
feature_df = pd.concat([norm_continuos_columns, category_dataframe_feature], axis=1)
feature_columns = feature_df.columns
return feature_df, target_df, mean_dict, std_dict, feature_columns
def get_IV(feature_df, target_df):
final_iv, IV = data_vars(feature_df, target_df)
ivs = np.zeros(len(feature_df.columns))
for i,col in enumerate(feature_df.columns):
ivs[i] = IV[IV['VAR_NAME']==col]['IV'].values[0]
return IV, ivs
def norm_mat(x):
return x/(np.sqrt(np.sum(x**2,1))).reshape(-1,1)
def get_pos_feat(feature_df, target_df, ivs):
pos_feat = feature_df.loc[target_df[target_df==1].index].values*ivs
pos_feat_norm = norm_mat(pos_feat)
return pos_feat_norm
def process_test_data(df, continuos_columns, category_columns, mean_dict, std_dict, feature_columns):
df_norm = df[continuos_columns].fillna(0)
norm_continuos_columns = (df[mean_dict] - list(mean_dict.values()))/list(std_dict.values())
category_dataframe = []
for category_column in category_columns:
category_dataframe.append(pd.get_dummies(df[category_column],prefix='col_'+category_column))
category_dataframe_feature = pd.concat(category_dataframe,axis=1)
feature_test = pd.concat([norm_continuos_columns, category_dataframe_feature], axis=1)
non_in_test_columns = list(set(list(feature_columns)) - set(list(feature_test.columns)))
for non_in_test_column in non_in_test_columns:
feature_test[non_in_test_column] = 0
feature_test = feature_test[feature_columns]
return feature_test | 3.046875 | 3 |
sancho/dglload.py | el-assistant/sancho | 0 | 12765332 | <filename>sancho/dglload.py<gh_stars>0
"""
This module implents data loading into DGL library
"""
from defaults import *
import dgl
import torch as th
from parsing import ParsedText
from neo4jschema import ASTnode as N4node
def get_coo(node: N4Node):
assert node.kind == "module"
visited = {}
# TODO: tag nodes by file name to improve query
def make_dgl_graph(parsed: ParsedText) -> dgl.DGLGraph:
assert parsed.tree
pass
| 2.046875 | 2 |
src/main.py | Nenma/ga-qas-sm | 0 | 12765333 | <reponame>Nenma/ga-qas-sm
'Main module.'
import json
import random
import tweepy
import crawler.twitter_crawler as tc
import controller
def get_tweet():
possible_trends = tc.get_trends()
selected_trend = random.choice(possible_trends)
selected_tweet = controller.get_selected_tweet(selected_trend)
return selected_tweet
def post_tweet():
print('Get credentials')
keys_json = open('config/keys.json', 'r').read()
keys = json.loads(keys_json)
consumer_key = keys['tw_consumer_key']
consumer_secret = keys['tw_consumer_secret']
access_token = keys['tw_access_token']
access_token_secret = keys['tw_access_token_secret']
print('Authenticate')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
print('Get tweet')
tweet = get_tweet()
print(f'Post tweet: {tweet}')
api.update_status('[GA-QAS-SM Bot]\n\n' + tweet)
if __name__ == '__main__':
post_tweet() | 2.578125 | 3 |
urbarium/character/migrations/0005_auto_20200518_1218.py | nikerzetic/zacasno-ime | 0 | 12765334 | # Generated by Django 3.0.4 on 2020-05-18 10:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('character', '0004_auto_20200518_1215'),
]
operations = [
migrations.AlterField(
model_name='character',
name='notes',
field=models.TextField(blank=True, default='', max_length=1000),
preserve_default=False,
),
]
| 1.390625 | 1 |
CV/SemSegPaddle/train.py | zhangyimi/Research | 1,319 | 12765335 | <filename>CV/SemSegPaddle/train.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# GPU memory garbage collection optimization flags
os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0"
import sys
import timeit
import argparse
import pprint
import shutil
import functools
import paddle
import numpy as np
import paddle.fluid as fluid
from src.utils.metrics import ConfusionMatrix
from src.utils.config import cfg
from src.utils.timer import Timer, calculate_eta
from src.utils import dist_utils
from src.datasets import build_dataset
from src.models.model_builder import build_model
from src.models.model_builder import ModelPhase
from src.models.model_builder import parse_shape_from_file
from eval import evaluate
from vis import visualize
def parse_args():
parser = argparse.ArgumentParser(description='semseg-paddle')
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file for training (and optionally testing)',
default=None,
type=str)
parser.add_argument(
'--use_gpu',
dest='use_gpu',
help='Use gpu or cpu',
action='store_true',
default=False)
parser.add_argument(
'--use_mpio',
dest='use_mpio',
help='Use multiprocess I/O or not',
action='store_true',
default=False)
parser.add_argument(
'--log_steps',
dest='log_steps',
help='Display logging information at every log_steps',
default=10,
type=int)
parser.add_argument(
'--debug',
dest='debug',
help='debug mode, display detail information of training',
action='store_true')
parser.add_argument(
'--use_vdl',
dest='use_vdl',
help='whether to record the data during training to VisualDL',
action='store_true')
parser.add_argument(
'--vdl_log_dir',
dest='vdl_log_dir',
help='VisualDL logging directory',
default=None,
type=str)
parser.add_argument(
'--do_eval',
dest='do_eval',
help='Evaluation models result on every new checkpoint',
action='store_true')
parser.add_argument(
'opts',
help='See utils/config.py for all options',
default=None,
nargs=argparse.REMAINDER)
return parser.parse_args()
def save_checkpoint(exe, program, ckpt_name):
"""
Save checkpoint for evaluation or resume training
"""
filename= '{}_{}_{}_epoch_{}.pdparams'.format(str(cfg.MODEL.MODEL_NAME),
str(cfg.MODEL.BACKBONE), str(cfg.DATASET.DATASET_NAME), ckpt_name)
ckpt_dir = cfg.TRAIN.MODEL_SAVE_DIR
print("Save model checkpoint to {}".format(ckpt_dir))
if not os.path.isdir(ckpt_dir):
os.makedirs(ckpt_dir)
fluid.io.save_params(exe, ckpt_dir, program, filename)
return ckpt_dir
def load_checkpoint(exe, program):
"""
Load checkpoiont from pretrained model directory for resume training
"""
print('Resume model training from:', cfg.TRAIN.RESUME_MODEL_DIR)
if not os.path.exists(cfg.TRAIN.RESUME_MODEL_DIR):
raise ValueError("TRAIN.PRETRAIN_MODEL {} not exist!".format(
cfg.TRAIN.RESUME_MODEL_DIR))
fluid.io.load_persistables(
exe, cfg.TRAIN.RESUME_MODEL_DIR, main_program=program)
model_path = cfg.TRAIN.RESUME_MODEL_DIR
# Check is path ended by path spearator
if model_path[-1] == os.sep:
model_path = model_path[0:-1]
epoch_name = os.path.basename(model_path)
# If resume model is final model
if epoch_name == 'final':
begin_epoch = cfg.SOLVER.NUM_EPOCHS
# If resume model path is end of digit, restore epoch status
elif epoch_name.isdigit():
epoch = int(epoch_name)
begin_epoch = epoch + 1
else:
raise ValueError("Resume model path is not valid!")
print("Model checkpoint loaded successfully!")
return begin_epoch
def print_info(*msg):
if cfg.TRAINER_ID == 0:
print(*msg)
def train(cfg):
startup_prog = fluid.Program()
train_prog = fluid.Program()
drop_last = True
dataset = build_dataset(cfg.DATASET.DATASET_NAME,
file_list=cfg.DATASET.TRAIN_FILE_LIST,
mode=ModelPhase.TRAIN,
shuffle=True,
data_dir=cfg.DATASET.DATA_DIR,
base_size= cfg.DATAAUG.BASE_SIZE, crop_size= cfg.DATAAUG.CROP_SIZE, rand_scale=True)
def data_generator():
if args.use_mpio:
data_gen = dataset.multiprocess_generator(
num_processes=cfg.DATALOADER.NUM_WORKERS,
max_queue_size=cfg.DATALOADER.BUF_SIZE)
else:
data_gen = dataset.generator()
batch_data = []
for b in data_gen:
batch_data.append(b)
if len(batch_data) == (cfg.TRAIN_BATCH_SIZE // cfg.NUM_TRAINERS):
for item in batch_data:
yield item[0], item[1], item[2]
batch_data = []
# If use sync batch norm strategy, drop last batch if number of samples
# in batch_data is less then cfg.BATCH_SIZE to avoid NCCL hang issues
if not cfg.TRAIN.SYNC_BATCH_NORM:
for item in batch_data:
yield item[0], item[1], item[2]
# Get device environment
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id) if args.use_gpu else fluid.CPUPlace()
places = fluid.cuda_places() if args.use_gpu else fluid.cpu_places()
# Get number of GPU
dev_count = cfg.NUM_TRAINERS if cfg.NUM_TRAINERS > 1 else len(places)
print_info("#device count: {}".format(dev_count))
cfg.TRAIN_BATCH_SIZE = dev_count * int(cfg.TRAIN_BATCH_SIZE_PER_GPU)
print_info("#train_batch_size: {}".format(cfg.TRAIN_BATCH_SIZE))
print_info("#batch_size_per_dev: {}".format(cfg.TRAIN_BATCH_SIZE_PER_GPU))
py_reader, avg_loss, lr, pred, grts, masks = build_model(
train_prog, startup_prog, phase=ModelPhase.TRAIN)
py_reader.decorate_sample_generator(
data_generator, batch_size=cfg.TRAIN_BATCH_SIZE_PER_GPU, drop_last=drop_last)
exe = fluid.Executor(place)
exe.run(startup_prog)
exec_strategy = fluid.ExecutionStrategy()
# Clear temporary variables every 100 iteration
if args.use_gpu:
exec_strategy.num_threads = fluid.core.get_cuda_device_count()
exec_strategy.num_iteration_per_drop_scope = 100
build_strategy = fluid.BuildStrategy()
if cfg.NUM_TRAINERS > 1 and args.use_gpu:
dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog)
exec_strategy.num_threads = 1
if cfg.TRAIN.SYNC_BATCH_NORM and args.use_gpu:
if dev_count > 1:
# Apply sync batch norm strategy
print_info("Sync BatchNorm strategy is effective.")
build_strategy.sync_batch_norm = True
else:
print_info(
"Sync BatchNorm strategy will not be effective if GPU device"
" count <= 1")
compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(
loss_name=avg_loss.name,
exec_strategy=exec_strategy,
build_strategy=build_strategy)
# Resume training
begin_epoch = cfg.SOLVER.BEGIN_EPOCH
if cfg.TRAIN.RESUME_MODEL_DIR:
begin_epoch = load_checkpoint(exe, train_prog)
# Load pretrained model
elif os.path.exists(cfg.TRAIN.PRETRAINED_MODEL_DIR):
print_info('Pretrained model dir: ', cfg.TRAIN.PRETRAINED_MODEL_DIR)
load_vars = []
load_fail_vars = []
def var_shape_matched(var, shape):
"""
Check whehter persitable variable shape is match with current network
"""
var_exist = os.path.exists(
os.path.join(cfg.TRAIN.PRETRAINED_MODEL_DIR, var.name))
if var_exist:
var_shape = parse_shape_from_file(
os.path.join(cfg.TRAIN.PRETRAINED_MODEL_DIR, var.name))
return var_shape == shape
return False
for x in train_prog.list_vars():
if isinstance(x, fluid.framework.Parameter):
shape = tuple(fluid.global_scope().find_var(
x.name).get_tensor().shape())
if var_shape_matched(x, shape):
load_vars.append(x)
else:
load_fail_vars.append(x)
fluid.io.load_vars(
exe, dirname=cfg.TRAIN.PRETRAINED_MODEL_DIR, vars=load_vars)
for var in load_vars:
print_info("Parameter[{}] loaded sucessfully!".format(var.name))
for var in load_fail_vars:
print_info(
"Parameter[{}] don't exist or shape does not match current network, skip"
" to load it.".format(var.name))
print_info("{}/{} pretrained parameters loaded successfully!".format(
len(load_vars),
len(load_vars) + len(load_fail_vars)))
else:
print_info(
'Pretrained model dir {} not exists, training from scratch...'.
format(cfg.TRAIN.PRETRAINED_MODEL_DIR))
fetch_list = [avg_loss.name, lr.name]
if args.debug:
# Fetch more variable info and use streaming confusion matrix to
# calculate IoU results if in debug mode
np.set_printoptions(
precision=4, suppress=True, linewidth=160, floatmode="fixed")
fetch_list.extend([pred.name, grts.name, masks.name])
cm = ConfusionMatrix(cfg.DATASET.NUM_CLASSES, streaming=True)
if args.use_vdl:
if not args.vdl_log_dir:
print_info("Please specify the log directory by --vdl_log_dir.")
exit(1)
from visualdl import LogWriter
log_writer = LogWriter(args.vdl_log_dir)
# trainer_id = int(os.getenv("PADDLE_TRAINER_ID", 0))
# num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
step = 0
all_step = cfg.DATASET.TRAIN_TOTAL_IMAGES // cfg.TRAIN_BATCH_SIZE
if cfg.DATASET.TRAIN_TOTAL_IMAGES % cfg.TRAIN_BATCH_SIZE and drop_last != True:
all_step += 1
all_step *= (cfg.SOLVER.NUM_EPOCHS - begin_epoch + 1)
avg_loss = 0.0
timer = Timer()
timer.start()
if begin_epoch > cfg.SOLVER.NUM_EPOCHS:
raise ValueError(
("begin epoch[{}] is larger than cfg.SOLVER.NUM_EPOCHS[{}]").format(
begin_epoch, cfg.SOLVER.NUM_EPOCHS))
if args.use_mpio:
print_info("Use multiprocess reader")
else:
print_info("Use multi-thread reader")
for epoch in range(begin_epoch, cfg.SOLVER.NUM_EPOCHS + 1):
py_reader.start()
while True:
try:
if args.debug:
# Print category IoU and accuracy to check whether the
# traning process is corresponed to expectation
loss, lr, pred, grts, masks = exe.run(
program=compiled_train_prog,
fetch_list=fetch_list,
return_numpy=True)
cm.calculate(pred, grts, masks)
avg_loss += np.mean(np.array(loss))
step += 1
if step % args.log_steps == 0:
speed = args.log_steps / timer.elapsed_time()
avg_loss /= args.log_steps
category_acc, mean_acc = cm.accuracy()
category_iou, mean_iou = cm.mean_iou()
print_info((
"epoch={}/{} step={}/{} lr={:.5f} loss={:.4f} acc={:.5f} mIoU={:.5f} step/sec={:.3f} | ETA {}"
).format(epoch, cfg.SOLVER.NUM_EPOCHS, step, all_step, lr[0], avg_loss, mean_acc,
mean_iou, speed,
calculate_eta(all_step - step, speed)))
print_info("Category IoU: ", category_iou)
print_info("Category Acc: ", category_acc)
if args.use_vdl:
log_writer.add_scalar('Train/mean_iou', mean_iou,
step)
log_writer.add_scalar('Train/mean_acc', mean_acc,
step)
log_writer.add_scalar('Train/loss', avg_loss,
step)
log_writer.add_scalar('Train/lr', lr[0],
step)
log_writer.add_scalar('Train/step/sec', speed,
step)
sys.stdout.flush()
avg_loss = 0.0
cm.zero_matrix()
timer.restart()
else:
# If not in debug mode, avoid unnessary log and calculate
loss, lr = exe.run(
program=compiled_train_prog,
fetch_list=fetch_list,
return_numpy=True)
avg_loss += np.mean(np.array(loss))
step += 1
if step % args.log_steps == 0 and cfg.TRAINER_ID == 0:
avg_loss /= args.log_steps
speed = args.log_steps / timer.elapsed_time()
print((
"epoch={}/{} step={}/{} lr={:.5f} loss={:.4f} step/sec={:.3f} | ETA {}"
).format(epoch, cfg.SOLVER.NUM_EPOCHS, global_step, all_step, lr[0], avg_loss, speed,
calculate_eta(all_step - global_step, speed)))
if args.use_vdl:
log_writer.add_scalar('Train/loss', avg_loss,
step)
log_writer.add_scalar('Train/lr', lr[0],
step)
log_writer.add_scalar('Train/speed', speed,
step)
sys.stdout.flush()
avg_loss = 0.0
timer.restart()
except fluid.core.EOFException:
py_reader.reset()
break
except Exception as e:
print(e)
if epoch % cfg.TRAIN.SNAPSHOT_EPOCH == 0 and cfg.TRAINER_ID == 0:
ckpt_dir = save_checkpoint(exe, train_prog, epoch)
if args.do_eval:
print("Evaluation start")
_, mean_iou, _, mean_acc = evaluate(
cfg=cfg,
ckpt_dir=ckpt_dir,
use_gpu=args.use_gpu,
use_mpio=args.use_mpio)
if args.use_vdl:
log_writer.add_scalar('Evaluate/mean_iou', mean_iou,
step)
log_writer.add_scalar('Evaluate/mean_acc', mean_acc,
step)
# Use VisualDL to visualize results
if args.use_vdl and cfg.DATASET.VIS_FILE_LIST is not None:
visualize(
cfg=cfg,
use_gpu=args.use_gpu,
vis_file_list=cfg.DATASET.VIS_FILE_LIST,
vis_dir="visual",
ckpt_dir=ckpt_dir,
log_writer=log_writer)
# save final model
if cfg.TRAINER_ID == 0:
save_checkpoint(exe, train_prog, 'final')
if args.use_vdl:
log_writer.close()
def main(args):
if args.cfg_file is not None:
cfg.update_from_file(args.cfg_file)
if args.opts:
cfg.update_from_list(args.opts)
cfg.TRAINER_ID = int(os.getenv("PADDLE_TRAINER_ID", 0))
cfg.NUM_TRAINERS = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
cfg.check_and_infer()
print_info(pprint.pformat(cfg))
train(cfg)
if __name__ == '__main__':
args = parse_args()
start = timeit.default_timer()
main(args)
end = timeit.default_timer()
print("training time: {} h".format(1.0*(end-start)/3600))
| 1.90625 | 2 |
l10n_ve_isrl/models/account_invoice_inherit.py | Chief0-0/Localizacion_ERP_V12 | 0 | 12765336 | <filename>l10n_ve_isrl/models/account_invoice_inherit.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME> <<EMAIL>>
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
class AccountInvoiceTaxInherit(models.Model):
_inherit = "account.invoice.tax"
subject_amount = fields.Float(string="Subject Amount")
subject_amount_total = fields.Float(related="subject_amount", readonly=True)
withholding = fields.Boolean(string="Withholdings", default=False)
date = fields.Date(string="Date")
class AccountInvoiceLineInherit(models.Model):
_inherit = "account.invoice.line"
income_tax = fields.Many2many("account.tax", string="Retenciones")
@api.onchange("product_id")
def onchange_product_id(self):
partner_taxes = self.partner_id.income_tax
if partner_taxes:
withholdings = []
for tax in partner_taxes:
if tax.type_tax_use == "purchase":
withholdings.append(tax.id)
return {"domain": {"income_tax": [("id", "in", withholdings)]}} # noqa
class AccountInvoiceInherit(models.Model):
_inherit = "account.invoice"
tax_withholdings = fields.Monetary(
compute="set_income_taxes", string="Tax withholdings"
)
total_taxes = fields.Monetary(compute="set_income_taxes", string="Tax")
total_retiva = fields.Monetary(compute="set_income_taxes", string="IVA Retenido")
@api.onchange("invoice_line_ids")
def set_income_taxes(self):
taxes_grouped = self.get_taxes_values()
invoice_lines = self.invoice_line_ids
result = []
withholding_total = []
total_taxes = []
retiva = []
for line in invoice_lines:
for tax in line.income_tax:
amount = -self._tax_withholdings(tax, line.price_subtotal)
withholding_total.append(amount)
result.append(
(
0,
0,
{
"invoice_id": self.id,
"name": tax.name,
"tax_id": tax.id,
"amount": amount,
"subject_amount": line.price_subtotal,
"base": line.price_subtotal,
"manual": False,
"account_analytic_id": line.account_analytic_id.id or False,
"account_id": tax.account_id.id,
"withholding": "True",
"date": self.date,
},
)
)
self.tax_line_ids = result
self.tax_withholdings = sum(withholding_total)
#self.total_taxes = sum(total_taxes)
#self.total_retiva = sum(retiva)
return
def _tax_withholdings(self, tax, base):
if tax.person_type == "PNR":
factor = 83.3334
try:
uvt = self.env["tributary.unit"].search([], order="date desc")[0].amount
except BaseException:
raise ValidationError(
_("No value has been " "configured for the tax unit (UVT)")
)
retention_percentage = tax.amount
subtract = uvt * (retention_percentage / 100) * factor
isrl = base * (retention_percentage / 100) - subtract
if isrl < 0:
isrl = -isrl
return isrl
else:
retention_percentage = tax.amount
isrl = base * (retention_percentage / 100)
return isrl
@api.multi
def print_isrl_retention(self):
if self.tax_withholdings >= 0:
raise UserError(_("Nothing to print."))
return (
self.env['report'].get_action(self, 'l10n_ve_isrl.report_isrl_document')
)
| 2.125 | 2 |
purchases_storage/source/api/api.py | icYFTL/RTULAB_Service | 0 | 12765337 | <reponame>icYFTL/RTULAB_Service
from source.database import *
from . import app
from .utils import *
from flask import request
from datetime import datetime
from pytz import timezone
@app.route('/purchases', methods=['GET'])
def on_root():
return Reply.ok()
@app.route('/purchases/new_purchase', methods=['POST'])
def on_new_purchase():
data = request.json
if not data:
return Reply.bad_request(error='Empty json')
if not isinstance(data, dict):
return Reply.bad_request(error='Invalid json. It must be a dict.')
_check = check_args_important(('name', 'total', 'user_id', 'shop_id', 'method'), **data)
if not _check[0]:
return Reply.bad_request(error=f'Empty important {_check[1]} field passed')
items_methods = methods.ItemsMethods()
purchase_methods = methods.PurchasesMethods()
users_methods = methods.UsersMethods()
if data.get('ts') and not isinstance(data.get('ts'), int):
return Reply.bad_request(error='Invalid ts field. It must be int.')
elif not isinstance(data['name'], str):
return Reply.bad_request(error='Invalid name field. It must be str.')
elif not isinstance(data['total'], int):
return Reply.bad_request(error='Invalid total field. It must be int.')
elif not isinstance(data['method'], str):
return Reply.bad_request(error='Invalid method field. It must be str.')
elif not isinstance(data['shop_id'], int):
return Reply.bad_request(error='Invalid shop_id field. It must be int.')
ts = data.get('ts') or int(datetime.now(timezone('Europe/Moscow')).timestamp())
item = items_methods.get_item(name=data['name']) or items_methods.add_item(models.Item(data['name']))
total = abs(data['total'])
method = data.get('method')
shop_id = data['shop_id']
user = users_methods.get_user(id=data['user_id'])
if not user:
user = users_methods.add_user(models.User(id=data['user_id']))
purchase = purchase_methods.add_purchase(models.Purchase(item.id, ts, total, user.id, shop_id, method))
return Reply.created(purchase_id=purchase.id)
@app.route('/purchases/get_purchases', methods=['GET'])
def on_get_purchase():
data = request.args
if not data:
return Reply.bad_request(error='Empty args')
_check = check_args_non_important(('user_id', 'purchase_id'), **data)
if not _check:
return Reply.bad_request(error='Empty or invalid required query string params')
purchase_methods = methods.PurchasesMethods()
items_methods = methods.ItemsMethods()
if data.get('user_id'):
result = purchase_methods.get_purchases(user_id=data['user_id'])
elif data.get('purchase_id'):
result = purchase_methods.get_purchases(id=data['purchase_id'])
else:
return Reply.bad_request(error='Invalid args passed')
if not result:
return Reply.not_found()
result = [x.get_dict() for x in result]
for x in result:
x['item'] = items_methods.get_item(id=x['item_id']).get_dict()
del x['item_id']
return Reply.ok(purchases=result)
| 2.5 | 2 |
wikipedia-bot.py | sinasystem/wikipediabot | 0 | 12765338 | # Required modules and libraries
from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters
import telegram
from telegram import InlineQueryResultArticle, ParseMode, \
InputTextMessageContent
import requests
import wikipediaapi
import re
from uuid import uuid4
# Variables used
wiki_wiki = wikipediaapi.Wikipedia('fa')
message = "What do you wanna search for?"
aboutmsg = "Searching Wikipedia has never been easier! Just send a topic."
# Private chat with bot
# uses en wikipedia for english and fa wikipedia for persian inputs
def echo(update, context):
output = re.search(r'^[a-zA-Z]+\Z', update.message.text)
if output:
wiki_wiki = wikipediaapi.Wikipedia('en')
else:
wiki_wiki = wikipediaapi.Wikipedia('fa')
page_py = wiki_wiki.page(update.message.text)
if page_py.exists():
wikimsg = (page_py.fullurl)
update.message.reply_text(wikimsg)
else:
update.message.reply_text("Your search querry had no results.")
# In-line mode
def inlinequery(update, context):
"""Handle the inline query."""
query = update.inline_query.query
output = re.search(r'^[a-zA-Z]+\Z', query)
if output:
wiki_wiki = wikipediaapi.Wikipedia('en')
else:
wiki_wiki = wikipediaapi.Wikipedia('fa')
page_py = wiki_wiki.page(query)
if page_py.exists():
wikimsg = (page_py.fullurl)
pagetitle= page_py.title
results = [InlineQueryResultArticle(
description="Searching for" + " " + query+ " " + "in Wikipedia",
id=uuid4(),
title=pagetitle,
input_message_content=InputTextMessageContent(
message_text=wikimsg))
]
update.inline_query.answer(results)
else:
results = [
InlineQueryResultArticle(
id=uuid4(),
title="No results",
input_message_content=InputTextMessageContent(query)
)]
update.inline_query.answer(results)
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id,text =message)
def about(update, context):
context.bot.send_message(chat_id=update.effective_chat.id,text =aboutmsg)
updater = Updater(token = 'TOKEN', use_context=True)
bot = telegram.Bot(token='TOKEN')
dispatcher = updater.dispatcher
def main():
start_handler = CommandHandler('start',start)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(CommandHandler('about',about))
dispatcher.add_handler(MessageHandler(Filters.text, echo))
dispatcher.add_handler(InlineQueryHandler(inlinequery))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| 2.875 | 3 |
main.py | Lonikoo/Sorting_Visualization | 1 | 12765339 | from data import DataSeq
from bubblesort import BubbleSort
from bucketsort import BucketSort
from combsort import CombSort
from cyclesort import CycleSort
from heapsort import HeapSort
from insertionsort import InsertionSort
from mergesort import MergeSort
from monkeysort import MonkeySort
from quicksort import QuickSort
from radixsort import RadixSort
from selectionsort import SelectionSort
from shellsort import ShellSort
import argparse
parser=argparse.ArgumentParser(description="Sort Visulization")
parser.add_argument('-l','--length',type=int,default=64)
parser.add_argument('-i','--interval',type=int,default=1)
parser.add_argument('-t','--sort-type',type=str,default='BubbleSort',
choices=["BubbleSort","BucketSort","CombSort",
"CycleSort","HeapSort","InsertionSort",
"MergeSort","MonkeySort","QuickSort",
"RadixSort","SelectionSort","ShellSort",])
parser.add_argument('-r','--resample', action='store_true')
parser.add_argument('-s','--sparse', action='store_true')
parser.add_argument('-n','--no-record', action='store_true')
args=parser.parse_args()
if __name__ == "__main__":
MAXLENGTH=1000
Length= args.length if args.length<MAXLENGTH else MAXLENGTH
Interval= args.interval
SortType= args.sort_type
Resampling=args.resample
Sparse= args.sparse
NoRecord= args.no_record
try:
SortMethod=eval(SortType)
except:
print("Sort Type Not Found! Please Check if %s Exists or Not!"%SortType)
exit()
ds=DataSeq(Length, time_interval=Interval, sort_title=SortType, is_resampling=Resampling, is_sparse=Sparse, record=not NoRecord)
ds.Visualize()
ds.StartTimer()
SortMethod(ds)
ds.StopTimer()
ds.SetTimeInterval(0)
ds.Visualize() | 2.75 | 3 |
labs/04_conv_nets_2/solutions/geom_avg.py | souillade/Deep | 1 | 12765340 | heatmap_1_r = imresize(heatmap_1, (50,80)).astype("float32")
heatmap_2_r = imresize(heatmap_2, (50,80)).astype("float32")
heatmap_3_r = imresize(heatmap_3, (50,80)).astype("float32")
heatmap_geom_avg = np.power(heatmap_1_r * heatmap_2_r * heatmap_3_r, 0.333)
display_img_and_heatmap("dog.jpg", heatmap_geom_avg)
| 2.46875 | 2 |
Python/2311.py | alinemarchiori/URI_Exercises_Solved | 0 | 12765341 | quantidade, menor, maior, soma = 0,0,0,0
quantidade = int(input())
while(quantidade>0):
menor, maior, soma, final= 10,0,0,0
nome = input()
dificuldade = float(input())
notas = input().split()
notas = list(notas)
for i in range(len(notas)):
notas[i] = float(notas[i])
notas.sort()
cont = 0
for i in range(len(notas)-1):
if(cont != 0):
soma +=notas[i]
else:
cont+=1
final = soma*dificuldade
print(nome + " {:.2f}".format(final))
quantidade-=1 | 3.609375 | 4 |
datafeeds/winddatabasefeeds/stockfeedswinddatabase.py | liuqiuxi/datafeeds | 1 | 12765342 | # -*- coding:utf-8 -*-
# @Time : 2019-12-27 16:11
# @Author : liuqiuxi
# @Email : <EMAIL>
# @File : stockfeedswinddatabase.py
# @Project : datafeeds
# @Software: PyCharm
# @Remark : This is class of stock market
import datetime
import copy
import pandas as pd
import numpy as np
from datafeeds.utils import BarFeedConfig
from datafeeds.winddatabasefeeds import BaseWindDataBase
from datafeeds import logger
class AShareCalendarWindDataBase(BaseWindDataBase):
LOGGER_NAME = "AShareCalendarWindDataBase"
def __init__(self):
super(AShareCalendarWindDataBase, self).__init__()
self.__table_name_dict = {"AShareCalendarWindDataBase": "AShareCalendar"}
def get_calendar(self, begin_datetime, end_datetime):
connect = self.connect()
begin_datetime = begin_datetime.strftime("%Y%m%d")
end_datetime = end_datetime.strftime("%Y%m%d")
table_name = self.__table_name_dict.get(self.LOGGER_NAME)
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
sqlClause = ("select trade_days as dateTime from " + table_parameter + " where trade_days >= " +
"'" + begin_datetime + "' and trade_days <= '" + end_datetime + "' ")
data = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
data.rename(columns={"datetime": "dateTime"}, inplace=True)
data.drop_duplicates(subset=["dateTime"], inplace=True)
data.sort_values(by="dateTime", inplace=True)
data.reset_index(inplace=True, drop=True)
data.loc[:, "dateTime"] = data.loc[:, "dateTime"].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data = pd.DataFrame(data={"dateTime": data.loc[:, "dateTime"]})
connect.close()
return data
class AShareQuotationWindDataBase(BaseWindDataBase):
LOGGER_NAME = "AShareQuotationWindDataBase"
def __init__(self):
super(AShareQuotationWindDataBase, self).__init__()
self.__need_adjust_columns = ["preClose", "open", "high", "low", "close", "volume", "avgPrice"]
self.__table_name_dict = {"AShareQuotationWindDataBase": "AShareEODPrices"}
def get_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted="F"):
limit_numbers = BarFeedConfig.get_wind().get("LimitNumbers")
if len(securityIds) < limit_numbers:
data = self.__get_quotation(securityIds=securityIds, items=items, frequency=frequency,
begin_datetime=begin_datetime, end_datetime=end_datetime, adjusted=adjusted)
else:
data = pd.DataFrame()
for i in range(int(len(securityIds) / limit_numbers) + 1):
data0 = self.__get_quotation(securityIds=securityIds[i*limit_numbers: i*limit_numbers + limit_numbers],
items=items, frequency=frequency, begin_datetime=begin_datetime,
end_datetime=end_datetime, adjusted=adjusted)
data = pd.concat(objs=[data, data0], axis=0, join="outer")
data.sort_values(by=["securityId", "dateTime"], axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
return data
def __get_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted):
connect = self.connect()
begin_datetime = begin_datetime.strftime("%Y%m%d")
end_datetime = end_datetime.strftime("%Y%m%d")
table_name = self.__table_name_dict.get(self.LOGGER_NAME)
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
if frequency != 86400:
raise BaseException("[%s] we can't supply frequency: %d " % (self.LOGGER_NAME, frequency))
if len(securityIds) == 1:
sqlClause = ("select * from " + table_parameter + " where trade_dt >= '" + begin_datetime + "' " +
"and trade_dt <= '" + end_datetime + "' and s_info_windcode = '" + securityIds[0] + "'")
else:
sqlClause = ("select * from " + table_parameter + " where trade_dt >= '" + begin_datetime + "' and " +
"trade_dt <= '" + end_datetime + "' and s_info_windcode in " + str(tuple(securityIds)) + "")
data = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
rename_dict = BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME)
data.rename(columns=rename_dict, inplace=True)
# change some parameters value to normal value
data.loc[:, 'dateTime'] = data.loc[:, 'dateTime'].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data.loc[:, "Chg"] = data.loc[:, "Chg"] / 100
data.loc[:, "amount"] = data.loc[:, "amount"] * 1000
# use adjfactor get adj price
if adjusted in ["F", "B"]:
data = data.groupby(by="securityId").apply(lambda x: self.__get_adj_price(DataFrame=x, adjusted=adjusted))
data.reset_index(inplace=True, drop=True)
data.sort_values(by=["securityId", "dateTime"], axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
# choose items to data
log = logger.get_logger(name=self.LOGGER_NAME)
default_items = list(rename_dict.values())
real_items = []
for item in items:
if item in ["securityId", "dateTime"]:
log.info("There is no need add item: %s to parameters items" % item)
elif item in default_items:
real_items.append(item)
else:
log.warning("item %s not in default items, so we remove this item to data" % item)
data = data.loc[:, ["dateTime", "securityId"] + real_items].copy(deep=True)
connect.close()
return data
def __get_adj_price(self, DataFrame, adjusted):
data = DataFrame.copy(deep=True)
data.sort_values(by="dateTime", axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
if adjusted == "F":
adjfactor = data.loc[:, "adjfactor"][len(data) - 1]
elif adjusted == "B":
adjfactor = data.loc[:, "adjfactor"][0]
else:
raise ValueError("[%s] adjusted: %s did't support" % (self.LOGGER_NAME, adjusted))
data.loc[:, "adjfactor"] = data.loc[:, "adjfactor"].apply(lambda x: x / adjfactor)
columns = copy.deepcopy(self.__need_adjust_columns)
for column in columns:
data.loc[:, column] = data.loc[:, column] * data.loc[:, "adjfactor"]
return data
class AShareIPOWindDataBase(BaseWindDataBase):
LOGGER_NAME = "AShareIPOWindDataBase"
def __init__(self):
super(AShareIPOWindDataBase, self).__init__()
self.__table_name_dict = {"AShareIPOWindDataBase": "AShareIPO"}
def get_initial_public_offering(self, securityIds):
connect = self.connect()
table_name = self.__table_name_dict.get(self.LOGGER_NAME)
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
sqlClause = "select * from " + table_parameter + ""
data = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
default_items = list(BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME))
drop_items = list(set(data.columns) - set(default_items))
data.drop(labels=drop_items, axis=1, inplace=True)
rename_dict = BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME)
data.rename(columns=rename_dict, inplace=True)
data0 = pd.DataFrame({"securityId": securityIds})
data = pd.merge(left=data, right=data0, on="securityId", how="right")
# change parameters numbers
data.loc[:, "amount"] = data.loc[:, "amount"] * 10000
data.loc[:, "collection"] = data.loc[:, "collection"] * 10000
data.loc[:, "subDate"] = data.loc[:, "subDate"].apply(
lambda x: datetime.datetime.strptime(x, "%Y%m%d") if isinstance(x, datetime.datetime) else None)
data.loc[:, "listDate"] = data.loc[:, "listDate"].apply(
lambda x: datetime.datetime.strptime(x, "%Y%m%d") if isinstance(x, datetime.datetime) else None)
data.sort_values(by="securityId", axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
return data
class AShareDayVarsWindDataBase(BaseWindDataBase):
LOGGER_NAME = "AShareDayVarsWindDataBase"
def __init__(self):
super(AShareDayVarsWindDataBase, self).__init__()
self.__table_name_dict = {"AShareDayVarsWindDataBase": ["AShareEODDerivativeIndicator",
"AShareEODPrices",
"AShareST"]}
def get_value(self, date_datetime):
connect = self.connect()
table_name = self.__table_name_dict.get(self.LOGGER_NAME)[0]
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
date_datetime = date_datetime.strftime("%Y%m%d")
sqlClause = "select * from " + table_parameter + " where trade_dt = "+ date_datetime +""
data = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
default_items = list(BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME))
drop_items = list(set(data.columns) - set(default_items))
data.drop(labels=drop_items, axis=1, inplace=True)
rename_dict = BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME)
data.rename(columns=rename_dict, inplace=True)
# change parameters numbers
data.loc[:, "dateTime"] = data.loc[:, "dateTime"].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data.loc[:, "upLimit"] = np.where(data.loc[:, "upOrdown"] == 1, True, False)
data.loc[:, "downLimit"] = np.where(data.loc[:, "upOrdown"] == -1, True, False)
data.loc[:, "turnover"] = data.loc[:, "turnover"] / 100
data.loc[:, "turnover_free"] = data.loc[:, "turnover_free"] / 100
data.loc[:, "totalValue"] = data.loc[:, "totalValue"] * 10000
data.loc[:, "marketValue"] = data.loc[:, "marketValue"] * 10000
data.drop(labels=["upOrdown"], axis=1, inplace=True)
# find stock whether suspend
table_name = self.__table_name_dict.get(self.LOGGER_NAME)[1]
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
sqlClause = ("select s_info_windcode, trade_dt, s_dq_tradestatus from " + table_parameter + " " \
"where trade_dt = '"+ date_datetime +"'")
data0 = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
data0.rename(columns=rename_dict, inplace=True)
data0.loc[:, "dateTime"] = data0.loc[:, "dateTime"].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data0.loc[:, "isNotSuspended"] = np.where(data0.loc[:, "s_dq_tradestatus"] == "交易", True, False)
data0 = data0.loc[:, ["securityId", "dateTime", "isNotSuspended"]].copy(deep=True)
data = pd.merge(left=data, right=data0, how="outer", on=("dateTime", "securityId"))
# find stock whether ST
table_name = self.__table_name_dict.get(self.LOGGER_NAME)[2]
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
sqlClause = ("select s_info_windcode, entry_dt, remove_dt, s_type_st from " + table_parameter + " " \
"where entry_dt <= '" + date_datetime + "'")
data0 = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
data0.rename(columns=rename_dict, inplace=True)
data0.loc[:, "entry_dt"] = data0.loc[:, "entry_dt"].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data0.loc[:, "remove_dt"] = data0.loc[:, "remove_dt"].apply(
lambda x: np.nan if x == None else datetime.datetime.strptime(x, "%Y%m%d"))
date_datetime = datetime.datetime.strptime(date_datetime, "%Y%m%d")
data0.loc[:, "isST"] = np.where(pd.isnull(data0.loc[:, "remove_dt"]), True,
np.where(data0.loc[:, "remove_dt"] > date_datetime, True, False))
data0 = data0.loc[data0.loc[:, "isST"] == True, ["securityId", "isST"]].copy(deep=True)
data = pd.merge(left=data, right=data0, how="left", on="securityId")
data.loc[:, "isST"] = np.where(data.loc[:, "isST"] == True, True, False)
return data
| 2.578125 | 3 |
plugins/sumtia/sumtia.py | RodPy/Turtlebots.activity | 0 | 12765343 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Butiá Team <EMAIL>
# Butia is a free open plataform for robotics projects
# www.fing.edu.uy/inco/proyectos/butia
# Universidad de la República del Uruguay
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import apiSumoUY
import math
from TurtleArt.tapalette import make_palette
from TurtleArt.taprimitive import Primitive, ArgSlot
from TurtleArt.tatype import TYPE_INT, TYPE_NUMBER
from gettext import gettext as _
from plugins.plugin import Plugin
class Sumtia(Plugin):
def __init__(self, parent):
Plugin.__init__(self)
self.tw = parent
self.vel = 10
self._inited = False
self.api = apiSumoUY.apiSumoUY()
def setup(self):
palette = make_palette('sumtia', ["#00FF00","#008000"], _('SumBot'), translation=_('sumtia'))
palette.add_block('updateState',
style='basic-style',
label=_('update information'),
prim_name='updateState',
help_string=_('update information from the server'))
self.tw.lc.def_prim('updateState', 0,
Primitive(self.updateState))
palette.add_block('sendVelocities',
style='basic-style-2arg',
label=_('speed SumBot'),
prim_name='sendVelocities',
default=[10,10],
help_string=_('submit the speed to the SumBot'))
self.tw.lc.def_prim('sendVelocities', 2,
Primitive(self.sendVelocities, arg_descs=[ArgSlot(TYPE_NUMBER), ArgSlot(TYPE_NUMBER)]))
palette.add_block('setVel',
style='basic-style-1arg',
label=_('speed SumBot'),
prim_name='setVel',
default=[10],
help_string=_('set the default speed for the movement commands'))
self.tw.lc.def_prim('setVel', 1,
Primitive(self.setVel, arg_descs=[ArgSlot(TYPE_NUMBER)]))
palette.add_block('forwardSumtia',
style='basic-style',
label=_('forward SumBot'),
prim_name='forwardSumtia',
help_string=_('move SumBot forward'))
self.tw.lc.def_prim('forwardSumtia', 0,
Primitive(self.forward))
palette.add_block('backwardSumtia',
style='basic-style',
label=_('backward SumBot'),
prim_name='backwardSumtia',
help_string=_('move SumBot backward'))
self.tw.lc.def_prim('backwardSumtia', 0,
Primitive(self.backward))
palette.add_block('stopSumtia',
style='basic-style',
label=_('stop SumBot'),
prim_name='stopSumtia',
help_string=_('stop the SumBot'))
self.tw.lc.def_prim('stopSumtia', 0,
Primitive(self.stop))
palette.add_block('leftSumtia',
style='basic-style',
label=_('left SumBot'),
prim_name='leftSumtia',
help_string=_('turn left the SumBot'))
self.tw.lc.def_prim('leftSumtia', 0,
Primitive(self.left))
palette.add_block('rightSumtia',
style='basic-style',
label=_('right SumBot'),
prim_name='rightSumtia',
help_string=_('turn right the SumBot'))
self.tw.lc.def_prim('rightSumtia', 0,
Primitive(self.right))
palette.add_block('angleToCenter',
style='box-style',
label=_('angle to center'),
prim_name='angleToCenter',
help_string=_('get the angle to the center of the dohyo'))
self.tw.lc.def_prim('angleToCenter', 0,
Primitive(self.angleToCenter, TYPE_INT))
palette.add_block('angleToOpponent',
style='box-style',
label=_('angle to Enemy'),
prim_name='angleToOpponent',
help_string=_('get the angle to the Enemy'))
self.tw.lc.def_prim('angleToOpponent', 0,
Primitive(self.angleToOpponent, TYPE_INT))
palette.add_block('getX',
style='box-style',
label=_('x coor. SumBot'),
prim_name='getX',
help_string=_('get the x coordinate of the SumBot'))
self.tw.lc.def_prim('getX', 0,
Primitive(self.getX, TYPE_INT))
palette.add_block('getY',
style='box-style',
label=_('y coor. SumBot'),
prim_name='getY',
help_string=_('get the y coordinate of the SumBot'))
self.tw.lc.def_prim('getY', 0,
Primitive(self.getY, TYPE_INT))
palette.add_block('getOpX',
style='box-style',
label=_('x coor. Enemy'),
prim_name='getOpX',
help_string=_('get the x coordinate of the Enemy'))
self.tw.lc.def_prim('getOpX', 0,
Primitive(self.getOpX, TYPE_INT))
palette.add_block('getOpY',
style='box-style',
label=_('y coor. Enemy'),
prim_name='getOpY',
help_string=_('get the y coordinate of the Enemy'))
self.tw.lc.def_prim('getOpY', 0,
Primitive(self.getOpY, TYPE_INT))
palette.add_block('getRot',
style='box-style',
label=_('rotation SumBot'),
prim_name='getRot',
help_string=_('get the rotation of the Sumbot'))
self.tw.lc.def_prim('getRot', 0,
Primitive(self.getRot, TYPE_INT))
palette.add_block('getOpRot',
style='box-style',
label=_('rotation Enemy'),
prim_name='getOpRot',
help_string=_('get the rotation of the Enemy'))
self.tw.lc.def_prim('getOpRot', 0,
Primitive(self.getOpRot, TYPE_INT))
palette.add_block('getDistCenter',
style='box-style',
label=_('distance to center'),
prim_name='getDistCenter',
help_string=_('get the distance to the center of the dohyo'))
self.tw.lc.def_prim('getDistCenter', 0,
Primitive(self.getDistCenter, TYPE_INT))
palette.add_block('getDistOp',
style='box-style',
label=_('distance to Enemy'),
prim_name='getDistOp',
help_string=_('get the distance to the Enemy'))
self.tw.lc.def_prim('getDistOp', 0,
Primitive(self.getDistOp, TYPE_INT))
############################### Turtle signals ############################
def stop(self):
if self._inited:
self.api.enviarVelocidades(0,0)
def quit(self):
if self._inited:
self.api.liberarRecursos()
###########################################################################
# Sumtia helper functions for apiSumoUY.py interaction
def sendVelocities(self,vel_izq = 0, vel_der = 0):
self.api.enviarVelocidades(vel_izq, vel_der)
def setVel(self,vel = 0):
self.vel = int(vel)
def forward(self):
self.api.enviarVelocidades(self.vel, self.vel)
def backward(self):
self.api.enviarVelocidades(-self.vel, -self.vel)
def left(self):
self.api.enviarVelocidades(-self.vel, self.vel)
def right(self):
self.api.enviarVelocidades(self.vel, -self.vel)
def getX(self):
return self.api.getCoorX()
def getY(self):
return self.api.getCoorY()
def getOpX(self):
return self.api.getCoorXOp()
def getOpY(self):
return self.api.getCoorYOp()
def getRot(self):
return self.api.getRot()
def getOpRot(self):
return self.api.getRotOp()
def angleToCenter(self):
rot = math.degrees(math.atan2(self.api.getCoorY(), self.api.getCoorX())) + (180 - self.getRot())
return (rot - 360) if abs(rot) > 180 else rot
def angleToOpponent(self):
x = self.getX() - self.getOpX()
y = self.getY() - self.getOpY()
rot = math.degrees(math.atan2(y, x)) + (180 - self.getRot())
return (rot - 360) if abs(rot) > 180 else rot
def getDistCenter(self):
return math.sqrt(math.pow(self.getX(), 2) + math.pow(self.getY(), 2))
def getDistOp(self):
return math.sqrt(math.pow(self.getX() - self.getOpX(), 2) +
math.pow(self.getY() - self.getOpY(), 2))
def updateState(self):
if not(self._inited):
self.api.setPuertos()
self.api.conectarse()
self._inited = True
self.api.getInformacion()
| 2.703125 | 3 |
server/yubikeys/apps.py | msdrigg/DuoAutoFiller | 0 | 12765344 | from django.apps import AppConfig
class YubikeysConfig(AppConfig):
name = 'yubikeys'
| 1.039063 | 1 |
formatting.py | apoorv-jain/youtube-transcript-api | 0 | 12765345 | import sys
import os
class SrtFormatter():
def _secs_to_minutes_hours(self , time):
add_formatting = lambda a: '0' + a if len(a) == 1 else a
milli_secs = time - int(time)
secs = add_formatting(str(int(time)%60))
mins = add_formatting((int(time)//60).__str__())
hours = add_formatting((int(time)//3600).__str__())
return f"{hours}:{mins}:{secs},{int(milli_secs*1000)}"
def _format(self, transcript_data):
prev_time = 0
final_srt = ''
for index,each in enumerate(transcript_data) :
start_time = each['start']
end_time = each['start'] + each['duration']
final_srt += f"{index+1}\n"
final_srt += f'{self._secs_to_minutes_hours(start_time)} --> {self._secs_to_minutes_hours(end_time)}\n'
final_srt += each['text'] + '\n\n'
return final_srt
def format_and_save(self , transcript_data , location = os.getcwd() , file_name = r'Transcript'):
file_name +=r'.srt'
path_list = location.split(os.sep)
final_path = os.sep.join(path_list) + "/" + file_name
with open(final_path, 'w', encoding = "utf-8") as srt_file:
final_srt = self._format(transcript_data)
srt_file.write(final_srt)
srt_file.close()
| 2.875 | 3 |
app/app/models.py | UWA-CITS3200-18-2021/ReSQ | 1 | 12765346 | <reponame>UWA-CITS3200-18-2021/ReSQ
from sqlalchemy.sql.expression import null
from app import db, login_manager
from app.globals import queueType, statusType, roleType, invalidChar
from flask_login import UserMixin
from sqlalchemy import Column, Integer, String, Enum, DateTime, Text
from sqlalchemy.orm import validates
from werkzeug.security import generate_password_hash, check_password_hash
# Checking Types
from sqlalchemy.orm.attributes import InstrumentedAttribute
import types
# User table
class BaseModel(db.Model):
__abstract__ = True
def to_dict(self):
classVars = vars(type(self)) # get any "default" attrs defined at the class level
instanceVars = vars(self) # get any attrs defined on the instance (self)
allVars = dict(classVars)
allVars.update(instanceVars)
# filter out private attributes, functions and SQL_Alchemy references
publicVars = {key: value for key, value in allVars.items() if not (key.startswith('_') or (
isinstance(value, types.FunctionType)) or (isinstance(value, InstrumentedAttribute)))}
return publicVars
class User(BaseModel, UserMixin):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String(64), index=True, unique=True, nullable=False)
password_hash = Column(String(128), nullable=False)
role = Column(Text, nullable=False)
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password, method='<PASSWORD>')
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@validates
def validate_role(self, key, role):
if role not in roleType:
raise ValueError("Invalid roleType")
else:
return role
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
# Queue model for db
class Queue(BaseModel):
id = Column(Integer, primary_key=True)
studentName = Column(String(64), nullable=False)
studentNumber = Column(Integer, nullable=False)
unitCode = Column(String(8), nullable=False)
enquiry = Column(Text, nullable=False)
queue = Column(Text, nullable=False)
status = Column(Text, nullable=False)
enterQueueTime = Column(DateTime, nullable=False)
changeSessionTime = Column(DateTime)
exitSessionTime = Column(DateTime)
def __init__(self, studentName, studentNumber, unitCode, enquiry, queue, status, enterQueueTime):
self.studentName = studentName
self.studentNumber = studentNumber
self.unitCode = unitCode
self.enquiry = enquiry
self.queue = queue
self.status = status
self.enterQueueTime = enterQueueTime
def __repr__(self):
return f"<Queue #{self.id}{{Name: {self.studentName}, ID: {self.studentNumber}, Unit: {self.unitCode}, Enquiry: {self.enquiry}, Queue: {self.queue}>"
@validates('studentName')
def validates_studentName(self, key, studentName):
for char in studentName:
if char.isnumeric() or char in invalidChar:
raise ValueError("Invalid character in studentName")
return studentName
@validates('studentNumber')
def validate_studentNumber(self, key, studentNumber):
if len(studentNumber) != 8:
raise ValueError("studentNumber must be 8 digits")
return int(studentNumber)
@validates('unitCode')
def validate_unitCode(self, key, unitCode):
for i, c in enumerate(unitCode):
if (i < 4 and not (c.isupper() or c.islower())) or (i >= 4 and not c.isnumeric()):
raise ValueError("unitCode must be of the form CCCCNNNN")
return unitCode
@validates('enquiry')
def validate_enquiry(self, key, enquiry):
return enquiry
@validates('queue')
def validate_queue(self, key, queue):
if queue not in queueType:
raise ValueError('Queue is an invalid type')
else:
return queue
@validates('status')
def validate_status(self, key, status):
if status not in statusType:
raise ValueError('Status is an invalid type')
else:
return status
| 2.421875 | 2 |
static/stitch_images.py | thabat12/TetraNet | 0 | 12765347 | import cv2
import tensorflow as tf
import numpy as np
from keras.models import Model
from keras.models import load_model
from numpy import asarray
from PIL import Image, ImageOps
import azure_get_unet as azure_predict
# Since we are using the Azure API, there is not need to save the model to the local filesystem
# model = load_model("/static/model/trees-v1.h5")
# model prediction returns array of prediction
# input is a numpy array
def predict_frame(image):
image = np.expand_dims(image, 0)
result = model.predict(image, batch_size=1)
result = np.squeeze(result, 0)
result = tf.argmax(result, -1)
return result
# for resizing the images after predicting the frames
def resize_frame(arr, shape):
result = Image.fromarray(arr)
result = result.resize(shape)
result = asarray(result)
return result
# change the alpha values of the segmentation masks for overlay
def convert_mask_alpha(image_arr):
img_transparent = Image.fromarray(image_arr)
imga = img_transparent.convert('RGBA')
imga_data = imga.getdata()
newData = []
for item in imga_data:
if (item[0] == 0):
newData.append((0,0,0,0))
else:
# orange transparent mask
newData.append((255,170,0,100))
img_transparent.close()
imga.putdata(newData)
imga = np.array(imga)
return imga
# generate the list for the segmentation frames based on video path
def get_segmentation_frames(video_path):
# Step 1: create the cv2 video capture object
vidObj = cv2.VideoCapture(video_path)
# Step 2: capture the video frames and predict segmentation,
# then append the segmented frames
mask_frames = []
count = 0
success = 1
while (True):
success, image = vidObj.read()
if (success == 0):
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Using PIL to get the proper coloration from the cv2 capture
image = Image.fromarray(image)
# 128x128 grayscale for UNet model processing
image = image.resize((128, 128))
image = ImageOps.grayscale(image)
image = asarray(image)
# with the incoming frame, convert to numpy and uint8 dtype
# and resize frames to 1080p values
append = predict_frame(image)
append = np.array(append)
append = append.astype('uint8')
append = resize_frame(append, (480, 270))
# list 1920x1080p numpy arrays
mask_frames.append(append)
# Step 3: convert the lists to numpy, and cast into usable
# black/ white array data for the video writer
mask_frames = np.array(mask_frames)
mask_frames = mask_frames * 255
# just a sanity check for the VideoWriter
mask_frames = mask_frames.astype('uint8')
# return usable arrays for video writing
return mask_frames
# This function will overlay the mask frames with the original video frames
def get_segmentation_frames_compiled(video_path):
# Step 1: retrieve the full sized segmentation frames
print('Generating segmentation frames...')
mask_frames_list = get_segmentation_frames(video_path)
print('Segmentation frames finished')
# Step 2: make a new cv2 video capture object for recycling the image files
vidObj = cv2.VideoCapture(video_path)
compiled_list = []
frame = 0
success = 1
# per frame, compile the values
while (True):
success, image = vidObj.read()
if (success == 0):
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = image.resize((480, 270))
image = image.convert('RGBA')
image = np.array(image)
mask = convert_mask_alpha(mask_frames_list[frame])
add_imgs = cv2.addWeighted(image, 1.0, mask, 0.4, 0.0)
add_imgs = Image.fromarray(add_imgs).convert('RGB')
add_imgs = asarray(add_imgs)
compiled_list.append(add_imgs)
frame += 1
# return the RGBA data list
compiled_list = np.array(compiled_list)
print('Frames are finished compiling')
return compiled_list
# expects uint8, numpy preferrable
def frames_to_video(imput_list, name, isRGB):
out = cv2.VideoWriter(name + '.mp4', cv2.VideoWriter_fourcc(*'MP4V'), 24, (480, 270), isRGB)
for i in range(len(imput_list)):
out.write(imput_list[i])
print('finished')
# input will be a PIL image
def overlay_mask_to_img(original_image):
mask = original_image
mask = mask.resize((128,128))
mask = ImageOps.grayscale(mask)
mask = asarray(mask)
mask = predict_frame(mask)
mask = np.array(mask)
mask = mask.astype('uint8')
mask = convert_mask_alpha(mask)
mask = Image.fromarray(mask)
mask = mask.resize((1200, 600))
original_image = original_image.convert('RGBA')
original_image = asarray(original_image)
original_image = original_image.astype('uint8')
mask = asarray(mask).astype('uint8')
print(original_image.shape)
add_imgs = cv2.addWeighted(original_image, 1.0, mask, 0.4, 0.0)
return add_imgs | 2.828125 | 3 |
custom_libs/Project3/plotter.py | drkostas/COSC522 | 1 | 12765348 | <reponame>drkostas/COSC522
import matplotlib.pyplot as plt
import numpy as np
from typing import *
class Plotter:
nx: np.ndarray
fx: np.ndarray
px: np.ndarray
px1: np.ndarray
def __init__(self, nx: np.ndarray, fx: np.ndarray, px1: np.ndarray, px: np.ndarray):
self.nx = nx
self.fx = fx
self.px = px
self.px1 = px1
def plot_fx_px1_histograms(self, bins=10):
fig, ax = plt.subplots(1, 2, figsize=(11, 4))
# fX histogram
fx_c1 = self.fx[self.fx[:, -1] == 0][:, :-1].flatten()
fx_c2 = self.fx[self.fx[:, -1] == 1][:, :-1].flatten()
ax[0].hist([fx_c1, fx_c2], stacked=True, color=["tab:blue", "tab:orange"], bins=bins)
ax[0].set_title("fX Histogram")
ax[0].set_xlabel("fX")
ax[0].set_ylabel("Count")
# ax[0].margins(0.1) # 1% padding in all directions
ax[0].legend({"Class 1": "tab:blue", "Class 2": "tab:orange"})
ax[0].grid(True)
# fX histogram
px1_c1 = self.px1[self.px1[:, -1] == 0][:, :-1].flatten()
px1_c2 = self.px1[self.px1[:, -1] == 1][:, :-1].flatten()
ax[1].hist([px1_c1, px1_c2], stacked=True, color=["tab:blue", "tab:orange"], bins=bins)
# Annotate Plot
ax[1].set_title("pX1 Histogram")
ax[1].set_xlabel("pX1")
ax[1].set_ylabel("Count")
# ax[1].margins(0.1) # 1% padding in all directions
ax[1].legend({"Class 1": "tab:blue", "Class 2": "tab:orange"})
ax[1].grid(True)
# Fig config
fig.tight_layout()
@staticmethod
def plot_roc(confusion_matrix_data: List[Dict]):
cm_data_sorted = sorted(confusion_matrix_data, key=lambda row: row['fpr'])
x = [cm_row['fpr'] for cm_row in cm_data_sorted]
y = [cm_row['tpr'] for cm_row in cm_data_sorted]
point_labels = [cm_row['priors'] for cm_row in cm_data_sorted]
fig, ax = plt.subplots(1, 1, figsize=(11, 11))
ax.plot(x, y, '-', color='tab:orange')
previous_point = 0
y_step = 0.3
x_step = 0.01
for px, py, pl in zip(x, y, point_labels):
# Don't annotate everything
if abs(previous_point - px) > 0.1:
previous_point = px
pxl = px - x_step
pyl = py - y_step
y_step -= 0.032
x_step += 0.008
pl_round = f'Priors: ({pl[0]:.2f}, {pl[1]:.2f})'
ax.annotate(pl_round, xy=(px, py), xytext=(pxl, pyl),
bbox=dict(boxstyle="round", fc="none", ec="gray"),
arrowprops=dict(facecolor='black', arrowstyle="fancy",
fc="0.6", ec="none",
connectionstyle="angle3,angleA=0,angleB=-90"))
# Annotate Plot
ax.set_title('ROC for pX using Case 3 for different priors')
ax.set_xlabel('FPR')
ax.set_ylabel('TPR')
ax.set_xticks(np.arange(0, 1.1, 0.1))
ax.set_yticks(np.arange(0, 1.1, 0.1))
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_xbound(0, 1)
ax.set_ybound(0, 1)
ax.grid(True)
ax.set_aspect('equal')
# Fig Config
fig.tight_layout()
| 2.84375 | 3 |
source/dtmil/visualizations.py | ajayarunachalam/ADOPT | 11 | 12765349 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 15:55:46 2019
@author: dweckler
"""
import numpy as np, matplotlib.pyplot as plt
from keras import backend as T
import time
import os
from .utilities import flat_avg
from dtmil.configuration.config_dtmil import get_json_config_data
from .prediction_data import Prediction_Data
import math
#%%class def
class Visualizer:
#TODO: Redesign this to work with multiple sources without depending on having all the data at once
def __init__(self, myData, myModel, sample_idx = None, guidelines = True, prediction_data = None, dataset_dir = None, input_json_data = None):
self.myData = myData
self.myModel = myModel
self._current_sample = sample_idx
##FIXME: make this update the visualization parameters every run (grab location of config file from myData?)
if (input_json_data is not None):
json_data = input_json_data
else:
_, json_data, _ = get_json_config_data(dataset_dir)
self.visualization_params = json_data['visualization']
##FIXME: Make this more able to be manually defined
sf = 0.25
self.xvec_scale_factor = sf
self.xvec_timeline=np.arange((self.myData.maxlen-1)*sf,-sf,-sf)
#this is to account for the extra value in the start and end indeces. Will be best practice to fix in the future
self.xvec_temp_time_lookup = np.copy(self.xvec_timeline)
self.xvec_temp_time_lookup = np.append(self.xvec_temp_time_lookup,self.xvec_timeline[-1])
if sample_idx == None:
print(f"sample index is set to None, using default value")
sample_idx = 0
if prediction_data:
self.prediction_data = prediction_data
else:
self.prediction_data = Prediction_Data(myData,myModel,sample_idx)
self.guidelines = guidelines
if (guidelines):
self.get_guidelines()
@classmethod
def frompredictiondata(cls, prediction_data, guidelines = True):
#initialize from preditcion data
return cls(prediction_data.myData, prediction_data.myModel, prediction_data.current_sample, prediction_data = prediction_data)
#%%plot sample timeline function
@property
def current_sample(self):
return self._current_sample
@current_sample.setter
def current_sample(self,value):
self._current_sample = value
self.prediction_data = Prediction_Data(self.myData,self.myModel,value)
def plot_sample_timeline(self, figure_size = None, saveFig = True):
myModel = self.myModel
model_output_directory = myModel.model_output_directory
xtest = myModel.xtest
if (saveFig):
plt.switch_backend('agg')
# function to get an intermediate layer's output (instance probabilities)
inst_layer_output_fn = T.function([myModel.model.layers[0].input],[myModel.model.layers[-2].output])
temp=xtest
L=inst_layer_output_fn([temp])[0]
nex=int(temp.shape[0]/2)
plt.figure(figsize=figure_size)
plt.subplot(2,1,1)
plt.plot(np.transpose(L[:nex,:,0]),'g')
plt.ylim([-0.1,1.1])
#plt.xlabel('Time to adverse event',fontsize=14)
#plt.xlabel('Sample timeline',fontsize=14)
plt.ylabel('Probability of \n adverse event',fontsize=14)
# plt.xticks([0,10,20],['1000 ft \n altitude', '10 mi', '20 mi'],rotation=0)
#plt.gca().invert_xaxis()
plt.subplot(2,1,2)
plt.plot(np.transpose(L[nex:,:,0]),'r')
plt.ylim([-0.1,1.1])
#plt.gca().invert_xaxis()
plt.xlabel('sample timeline',fontsize=14)
#plt.xticks([0,10,20],['1000 ft \n altitude', '10 mi', '20 mi'],rotation=0)
plt.ylabel('Probability of \n adverse event',fontsize=14)
temp=self.myData.xvalid
L=inst_layer_output_fn([temp])[0]
nex=int(temp.shape[0]/2)
np.where(L[nex:,80:,0]>0.5)[0][:10]
if(saveFig):
plt.savefig(os.path.join(model_output_directory,"timeline.png"))
#%%batch visualization function
#FIXME: text sizing
def visualize_sample_parameters(self,figure_size = None, saveFig = False, file_output_dir = "",file_output_type = "pdf",num_columns = 5, subplot_aspect_ratio = (1,1), subplot_size = 3.6):
myData = self.myData
# myModel = self.myModel
if (saveFig):
plt.switch_backend('agg')
#specify the variables to be included in the plot
correlated_states = myData.correlated_states.tolist()
trained_states = myData.parameter_selection.tolist()
parameters_to_plot=correlated_states + trained_states
correlated_indeces = len(correlated_states)
num_plots = len(parameters_to_plot) + 1
num_rows = math.ceil(float(num_plots)/float(num_columns))
if figure_size is None:
width = 4*num_columns
height = num_rows * 3.5
figure_size = (width,height)
fig, axs = plt.subplots(num_rows,num_columns, figsize= figure_size)
axs=axs.ravel()
starting_index = -1-myData.maxlen+1
for pltIdx in np.arange(len(parameters_to_plot)):
selected_parameter = parameters_to_plot[pltIdx]
plot_title = "{}".format(myData.header[selected_parameter])
#add holdout to the title if it's within the correlated indeces
if (pltIdx < correlated_indeces):
plot_title = plot_title + "(H/O)"
self.plot_parameter(selected_parameter,axs[pltIdx],starting_index, plot_title = plot_title)
# plot precursor score in a separate subplot
pltIdx=pltIdx+1
self.plot_precursor_score(axs[pltIdx],'Precursor Score')
fig.tight_layout()
# save figure if needed
if saveFig:
suffix = "_{}".format(self.myData.get_filename(self.current_sample))
file_label, file_dataset_type = self.myData.get_grouping(self.current_sample)
filename = "{}_{}".format(file_label,file_dataset_type)
save_figure(self.myModel,suffix,fig,file_output_dir,filename,file_output_type = 'pdf')
#self.save_figure(fig,file_output_dir)
def special_ranking_visualization(self, states_to_visualize,sorted_ranking_sums,figure_size = (10,10), saveFig = False, file_output_dir = "",file_output_type = "pdf"):
myData = self.myData
fig, axs = plt.subplots(3,3, figsize= figure_size)
axs=axs.ravel()
self.plot_precursor_score(axs[1],'Precursor Score')
for i in range(6):
selected_parameter = states_to_visualize[i]
plot_title = "{} ({})".format(myData.header[selected_parameter],sorted_ranking_sums[i])
#add holdout to the title if it's within the correlated indeces
self.plot_parameter(selected_parameter,axs[i+3],0, plot_title = plot_title)
#TODO: same as below except ordered ranking parameters with a variable number of columns and such
#output with values of ranking
#figure out what the values mean to report to bryan tomorrow
def visualize_top_ranking_parameters(self,ranking_group,feature_num_limit=None,num_columns = 4,displayfig = False):
file_output_dir = "feature_ranking"
myData = self.myData
if (not displayfig):
plt.switch_backend('agg')
#get as many as we can
#score_pair_lists = ranking_group.top_ranking_scores(1)
#response_windows_lists = ranking_group.top_response_windows(1)
response_windows_lists = ranking_group.ordered_response_windows_list
if(feature_num_limit is not None):
if len(response_windows_lists[0])> feature_num_limit:
response_windows_lists = [lst[0:feature_num_limit] for lst in response_windows_lists]
num_windows = len(response_windows_lists)
#print(feature_num_limit,len(response_windows_lists[0]),len(response_windows_lists[1]))
for idx,response_windows in enumerate(response_windows_lists):
parameter_selection = [window.attribute_index for window in response_windows]
# print([window.ranking_score for window in response_windows])
# print([window.most_important_sd_response for window in response_windows])
score_list = [round(window.ranking_score,3) for window in response_windows]
sd_response_list = []
for window in response_windows:
most_important_response = window.most_important_sd_response
if most_important_response is not None:
sd_response_list.append(str(most_important_response))
else:
sd_response_list.append("n/a")
#sd_response_list = [round(window.most_important_sd_response,3) for window in response_windows]
num_plots = len(response_windows) + 1
num_rows = math.ceil(float(num_plots)/float(num_columns))
width = 4*num_columns
height = num_rows * 3.5
figsize = (width,height)
fig, axs = plt.subplots(num_rows,num_columns, figsize= figsize)
axs=axs.ravel()
fig.tight_layout()
xvec_timeline = self.xvec_timeline
plot_idx = 0
axs[plot_idx].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r',linewidth=2,label = "Default")
axs[plot_idx].set_title("Precursor Score",fontsize=10)
axs[plot_idx].set_ylim([0,1])
axs[plot_idx].invert_xaxis()
if(self.guidelines):
axs[plot_idx].plot(self.xvec_timeline,self.precursor_score_guideline,'k--')
graph_colors = ['b','g','k','y','c','m','k','w']
color_idx = 0
sd_disturbances = ranking_group.parent.standard_deviation_disturbances
#TODO: condense everything below into one function (rather than writing the same code twice)
parameter_window_indeces = [ranking_group.parameter_list.index(i) for i in parameter_selection]
parameter_windows = [ranking_group.parameter_windows[i] for i in parameter_window_indeces]
#if this process isn't behind an if statement, the algorithm will output blank graphs
#furthermore, it will cause some of the following graphs to come out blank as well
#the cause of this is unknown, but may be useful to investigate in the future
if len(parameter_windows)>0:
#TODO: Figure out why this conditional became necessary and the one above stopped working? (maybe some revisions impacted it?)
if len(parameter_windows[0].start_indeces)>0:
start_index = parameter_windows[0].start_indeces[idx]
end_index = parameter_windows[0].end_indeces[idx]
window_start_idx = self.xvec_temp_time_lookup[start_index]
window_end_idx = self.xvec_temp_time_lookup[end_index]
axs[plot_idx].axvspan(window_start_idx, window_end_idx, alpha=0.1, color='k')
for index,window in enumerate(parameter_windows):
color_idx = 0
plot_idx = index+1
axs[plot_idx].invert_xaxis()
#axs[plot_idx].set(adjustable='box', aspect=1)
axs[plot_idx].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r', label = "Default",linewidth=2)
axs[plot_idx].axvspan(window_start_idx, window_end_idx, alpha=0.1, color='k')
for precursor_score in window.modified_precursor_scores:
selected_parameter = parameter_selection[index]
disturbance = sd_disturbances[color_idx]
if disturbance > 0:
label = "+ {} σ response".format(disturbance)
else:
label = "- {} σ response".format(abs(disturbance))
axs[plot_idx].plot(xvec_timeline,precursor_score,graph_colors[color_idx],linewidth=2,label = label)
axs[plot_idx].set_title("{} \n({}, {} σ response)".format(myData.header[selected_parameter],score_list[index],sd_response_list[index]),fontsize=10)
axs[plot_idx].set_ylim([0,1])
if(self.guidelines):
axs[plot_idx].plot(self.xvec_timeline,self.precursor_score_guideline,'k--')
color_idx += 1
if(plot_idx>1):
handles, labels = axs[plot_idx].get_legend_handles_labels()
fig.legend(handles, labels, loc='lower right')
#save the figure
plt.tight_layout()
file_label, file_dataset_type = self.myData.get_grouping(ranking_group.data_ID)
filename = "{}_{}_ranking".format(file_label,file_dataset_type)
suffix = "_{}".format(self.myData.get_filename(ranking_group.data_ID))
if num_windows > 1:
suffix = "{}_precursor_event_{}".format(suffix,idx)
save_figure(self.myModel,suffix,fig,file_output_dir,filename,output_time = False)
else:
#TODO:
print("Precursor score for {} does not cross threshold?".format(self.myData.get_filename(ranking_group.data_ID)))
else:
print("Precursor score for {} does not cross threshold!".format(self.myData.get_filename(ranking_group.data_ID)))
# def visualize_ranking_data(self,ranking_group, output_file = None, parameter_selection = None, num_columns = 7, subplot_aspect_ratio = (1,1), subplot_size = 3.6):
# myData = self.myData
# print("generating ranking data plot")
#
# if parameter_selection is None:
# parameter_selection = myData.parameter_selection.tolist()
#
# #all the paramaeters plus the precursor score in its own plot
# num_plots = len(parameter_selection) + 1
# num_rows = math.ceil(float(num_plots)/float(num_columns))
# dx, dy = subplot_aspect_ratio
# figsize = plt.figaspect(float(dy * num_rows) / float(dx * num_columns)) * subplot_size
#
# fig, axs = plt.subplots(num_rows,num_columns, figsize= figsize)
# #fig, axs = plt.subplots(numRows,numColumns)
# axs=axs.ravel()
# fig.tight_layout()
# #xvec_timeline=np.arange((myData.maxlen-1)*0.25,-0.25,-0.25)
#
# xvec_timeline = self.xvec_timeline
#
# axs[0].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r',linewidth=2)
# axs[0].set_title("Normal",fontsize=10)
# axs[0].set_ylim([0,1])
# axs[0].invert_xaxis()
#
# graph_colors = ['b','g','k','y']
# color_idx = 0
#
# parameter_window_indeces = [ranking_group.parameter_list.index(i) for i in parameter_selection]
# parameter_windows = [ranking_group.parameter_windows[i] for i in parameter_window_indeces]
#
# for index,window in enumerate(parameter_windows):
# color_idx = 0
# plot_idx = index+1
# axs[plot_idx].invert_xaxis()
#
# for precursor_score in window.modified_precursor_scores:
# selected_parameter = parameter_selection[index]
#
# axs[plot_idx].plot(xvec_timeline,precursor_score,graph_colors[color_idx],linewidth=2)
# axs[plot_idx].set_title("{} ({})".format(myData.header[selected_parameter],selected_parameter),fontsize=10)
# axs[plot_idx].set_ylim([0,1])
# axs[plot_idx].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r',linewidth=1)
# color_idx += 1
#%%save figure
def save_figure(self, fig,file_output_dir,file_output_type = 'pdf'):
save_figure(self.myModel,self.current_sample,fig,file_output_dir,"parameters_graph",file_output_type = 'pdf')
#%%plot precursor score
def plot_precursor_score(self, plot_axis, plot_title = "Precursor Score", start_index = None, end_index = None):
precursor_score = self.prediction_data.precursor_score
plot_axis.plot(self.xvec_timeline[start_index:end_index], precursor_score[start_index:end_index],'r',linewidth=2)
if(self.guidelines):
plot_axis.plot(self.xvec_timeline[start_index:end_index],self.precursor_score_guideline[start_index:end_index],'k--')
plot_axis.invert_xaxis()
plot_axis.set_title(plot_title,fontsize=10)
plot_axis.set_ylim([0,1])
#%%plot indivudual parameter
def plot_parameter(self, selected_parameter, plot_axis,starting_index = 0,end_index = None,plot_title = "", precIdx = None):
##FIXME: Make this more able to be manually defined
xvec_timeline=self.xvec_timeline
#FIXME: Make Prediction Data update states_orig ("visualization_sample")
parameter_values = self.prediction_data.visualization_window[starting_index:end_index,selected_parameter]
# plot time series variable
plot_axis.plot(xvec_timeline[starting_index:end_index],parameter_values,linewidth=2)
##plot the guidelines
# if discrete variable, use discrete nominal data as guideline, else use continuous nominal data
if selected_parameter in self.visualization_params["binary_parameters"]:
plot_axis.plot(xvec_timeline[starting_index:end_index],self.discrete_nominal_guideline[starting_index:end_index,selected_parameter],'k--',linewidth=2)
plot_axis.set_ylim([-0.1,1.1])
else:
plot_axis.plot(xvec_timeline[starting_index:end_index],self.nominal_guideline[0,starting_index:end_index,selected_parameter],'k--',linewidth=2)
plot_axis.plot(xvec_timeline[starting_index:end_index],self.nominal_guideline[1,starting_index:end_index,selected_parameter],'k--',linewidth=2)
##use this if we are dealing with multiple precursor score predictions, otherwise use the one genereated upon class initialization
if (precIdx):
precursor_indeces = precIdx
else:
precursor_indeces = self.prediction_data.precursor_indeces
# plot precursor time instants as an overlay
if len(precursor_indeces)>0:
precursor_overlay_values = self.prediction_data.visualization_window[precursor_indeces,selected_parameter]
self.precursor_overlay_values = precursor_overlay_values
if(end_index):
if end_index >= precursor_indeces[0]:
precursor_end_index = (np.abs(precursor_indeces - (end_index))).argmin()
print(precursor_end_index,end_index)
plot_axis.plot(xvec_timeline[precursor_indeces][0:precursor_end_index],precursor_overlay_values[0:precursor_end_index],'ro', alpha = 0.4)
else:
plot_axis.plot(xvec_timeline[precursor_indeces],precursor_overlay_values,'ro', alpha = 0.4)
#
if plot_title == "":
plot_title = "{} ({})".format(self.myData.header[selected_parameter],selected_parameter)
plot_axis.set_title(plot_title,fontsize=10)
# # invert x-axis so that distance to touchdown reduces as we go towards rightside of the plot
plot_axis.invert_xaxis()
#%%get guidelines
def get_guidelines(self):
myData = self.myData
optimal_values=myData.states_orig[:,np.concatenate((myData.I_opt,myData.I_opt_valid),axis=0)]
#determine guidelines
guideline_type = self.visualization_params["guideline_type"]
if guideline_type == 1:
optimal_standard_dev = np.std(optimal_values, axis=1)
optimal_mean = np.mean(optimal_values,axis = 1)
avg_guideline =flat_avg(optimal_mean)
sdev_guideline = flat_avg(optimal_standard_dev)
sdev_scale = 2.5
upper_guideline = avg_guideline + sdev_scale * sdev_guideline
lower_guideline = avg_guideline - sdev_scale * sdev_guideline
nominal_guideline = np.array([lower_guideline, upper_guideline])
else:
# get nominal percentiles for plotting
nominal_guideline=np.percentile(optimal_values,[10,90],axis=1)
self.nominal_guideline = nominal_guideline
# Get nominal values assuming binary (note that we will only use this if the variable is binary)
self.discrete_nominal_guideline=np.mean(optimal_values,axis=1)
self.precursor_score_guideline = np.full(optimal_values.shape[0],self.prediction_data.precursor_threshold)
def save_figure(myModel, figure_suffix, fig,file_output_dir,filename,file_output_type = 'pdf', output_time = True):
time_start = time.time()
print("Saving figure: {}".format(figure_suffix))
model_output_directory = myModel.model_output_directory
if model_output_directory != "":
model_output_directory = os.path.join(model_output_directory,file_output_dir)
if not os.path.exists(model_output_directory):
print(f"creating directory {model_output_directory}")
os.makedirs(model_output_directory)
filename = "{}{}.{}".format(filename,figure_suffix,"pdf")
filepath = os.path.join(model_output_directory,filename)
#print("Saving figure: {}".format(filepath))
fig.savefig(filepath,format= file_output_type)
# if(output_time):
# print("Total time to save figure: {}".format(time.time()-time_start))
def visualize(myData, myModel,sample_idx = 0, savefig = False):
vis = Visualizer(myData,myModel,sample_idx)
vis.plot_sample_timeline(figure_size = (8,6), saveFig = savefig)
print("Visualizing Sample {}".format(sample_idx))
vis.visualize_sample_parameters(figure_size=(32,24),saveFig = savefig)
| 2.109375 | 2 |
sem6000/bluetooth_lowenergy_interface/timeout_decorator.py | moormaster/voltcraft-sem-6000 | 2 | 12765350 | import threading
def DisconnectAfterTimeout(timeout):
def Decorator(function):
def decorated_function(*s, **d):
def disconnect():
disconnectable = s[0]
disconnectable.disconnect()
timer = threading.Timer(timeout, disconnect)
timer.start()
return_value = None
try:
return_value = function(*s, **d)
finally:
timer.cancel()
return return_value
return decorated_function
return Decorator
| 3.09375 | 3 |