max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
wifi_conf/wifi_conf_client.py | mbunse/wifi_conf | 1 | 12761751 | <gh_stars>1-10
from socket_client_server.socket_client_server import Sock_Client
import os
SERVER_ADDRESS = os.getenv("WIFI_CONF_SOCKET", '/var/run/wifi_conf/wifi_conf.socket')
def start_access_point(ssid):
"""Switch wifi to access point mode.
`ssid`: Name of wifi to be spawned.
"""
sock_client = Sock_Client(SERVER_ADDRESS, timeout_in_sec=20)
# Construct message
data = {
"action": "configure_access_point",
"data": {
"ssid": ssid
}
}
answer = sock_client.send(data)
if answer["status"] != 0:
raise Exception(answer["message"])
return
def stop_access_point():
"""
Swicht back to wifi client mode from access point mode.
"""
sock_client = Sock_Client(SERVER_ADDRESS, timeout_in_sec=20)
# Construct message
data = {
"action": "unconfigure_access_point",
}
answer = sock_client.send(data)
if answer["status"] != 0:
raise Exception(answer["message"])
return
def set_wifi_ssid_and_password(ssid, password):
"""Set ssid and password for wifi
client mode during access point mode.
`ssid`: SSID of wifi network to be joined in client mode.
`password`: Password for wifi network to be joined in client mode.
"""
sock_client = Sock_Client(SERVER_ADDRESS, timeout_in_sec=20)
# Construct message
data = {
"action": "set_wifi_ssid_and_password",
"data": {
"ssid": ssid,
"password": password
}
}
answer = sock_client.send(data)
if answer["status"] != 0:
raise Exception(answer["message"])
return | 2.828125 | 3 |
otcextensions/sdk/dms/v1/message.py | spielkind/python-otcextensions | 0 | 12761752 | <filename>otcextensions/sdk/dms/v1/message.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
from openstack import _log
from otcextensions.sdk.dms.v1 import _base
_logger = _log.setup_logging('openstack')
class Message(_base.Resource):
resources_key = 'messages'
base_path = '/queues/%(queue_id)s/messages'
# capabilities
allow_create = True
# Properties
#: Queue id
queue_id = resource.URI('queue_id')
messages = resource.Body('messages', type=list)
| 1.710938 | 2 |
posts/admin.py | syqu22/django-react-blog | 0 | 12761753 | from django.contrib import admin, messages
from django_summernote.admin import SummernoteModelAdmin
from posts.models import Post, Tag
@admin.register(Post)
class PostAdmin(SummernoteModelAdmin):
summernote_fields = ('body',)
list_display = ('title', 'id', 'is_public', 'slug',
'author', 'edited_at', 'created_at')
list_filter = ('is_public', 'created_at', 'edited_at',)
search_fields = ['title', 'slug', 'author']
prepopulated_fields = {'slug': ('title',)}
actions = ['make_public', 'make_unpublic']
def make_public(modeladmin, request, queryset):
queryset.update(is_public=True)
messages.success(
request, 'Selected Post(s) are now public !')
def make_unpublic(modeladmin, request, queryset):
queryset.update(is_public=False)
messages.success(
request, 'Selected Post(s) are no longer public!')
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ('name', 'id')
list_filter = ('name',)
search_fields = ('name',)
| 2.046875 | 2 |
core/clients/__init__.py | Latterlig96/airflow-model-trainer | 0 | 12761754 | from .minio import MinioHandler | 1.046875 | 1 |
heatmap.py | footoredo/pytorch-a2c-ppo-acktr-gail | 0 | 12761755 | import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def plot(gather_count, filename):
gather_count = np.log(gather_count + 1)
sns.color_palette("light:b", as_cmap=True)
ax = sns.heatmap(gather_count, vmax=8, vmin=0, cmap="Purples",
xticklabels=False, yticklabels=False, cbar=False,
square=True)
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
[i.set_linewidth(2) for i in ax.spines.values()]
plt.tight_layout()
# plt.show()
plt.savefig(filename)
def main():
gather_count = [[0, 0, 0, 0, 0], [0, 0, 1, 0, 1], [0, 1, 2, 0, 4], [0, 0, 0, 7, 24], [0, 0, 5, 18, 4549]]
gather_count = np.array(gather_count)
gather_count = np.log(gather_count + 1)
plot(gather_count)
if __name__ == "__main__":
main()
| 2.703125 | 3 |
airflow/dags/manual_ingestion.py | vanshkumar/health-equity-tracker | 8 | 12761756 | '''Manual data ingestion DAG.'''
from util import create_bq_ingest_operator
# Ingore the Airflow module, it is installed in both our dev and prod environments
from airflow.models import Variable # type: ignore
from airflow import DAG # type: ignore
from airflow.utils.dates import days_ago # type: ignore
default_args = {
'start_date': days_ago(0),
}
manual_ingestion_dag = DAG(
'manual_ingestion_dag',
default_args=default_args,
schedule_interval=None,
description='Triggering for manual uploads.')
# Manual Uplaods
manual_uploads_payload = {'message': {'is_airflow_run': True,
'gcs_bucket': Variable.get('GCS_MANUAL_UPLOADS_BUCKET'),
'id': 'MANUAL_UPLOADS'}}
manual_uploads_bq_operator = create_bq_ingest_operator(
'manual_uploads_task', manual_uploads_payload, manual_ingestion_dag)
| 2.125 | 2 |
main.py | manojpandey/hackinthenorth | 0 | 12761757 | import cv2
import numpy as np
import copy
import datetime
from close_n import best_match
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# face_cascade = cv2.CascadeClassifier('./haarcascade_profileface.xml')
cap = cv2.VideoCapture(0)
n = 0
d = np.zeros((480, 640, 3), dtype='int')
n_frames = 0
while True:
n_frames += 1
ret, frame = cap.read()
# print ret, frame
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if n_frames % 10 == 0:
faces = face_cascade.detectMultiScale(frame, 1.3, 5)
print len(faces)
for ix in xrange(len(faces)):
(x, y, w, h) = faces[ix]
# cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = copy.deepcopy(frame[y:y + h, x:x + w])
ij = cv2.resize(roi_color, (100, 100))
# res, qal = best_match(ij)
# if 'img' in res:
# v = 'laksh'
# else:
# v = 'manoj'
# print v, datetime.datetime.now()
d[y:y + h, x:x + w] += frame[y:y + h, x:x + w]
if n >= 5:
pass
# line 43
cv2.imwrite(
'data/image' + str(n) + '.jpg',
frame[y - 10:y + h + 10, x - 10:x + w + 10])
n += 1
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
# 47-49
# if i >= 100:
# i = 0
cv2.imshow('face_crop', roi_color)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print d.shape
cv2.imwrite('face.jpg', d)
# print np.max(d), np.max(frame)
cap.release()
cv2.destroyAllWindows()
| 2.25 | 2 |
lib/diskdevs.py | nicciniamh/fstabedit | 2 | 12761758 | <gh_stars>1-10
#!/usr/bin/env python3
# Get and list filesystems
#
# This only works on linux systems with procfs mounted.
#
# Uses /proc/partitions and /dev/disk/by-{uuid,label} to build the devs
# dictionary.
#
# Run from a command-line, this script will list all partitions and uuid
# and/or labels if set without being root.
#
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# A lot of this functionality duplicates what the shell command blkid does and will
# attempt to use the output of blkid before probing /dev and /proc to get this information
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import os, sys, re, glob, subprocess, shlex
if float(sys.version[:3]) < 3:
from builtins import open
from builtins import object
from future import standard_library
standard_library.install_aliases()
class devices(object):
'''
Class to query partition and filesystem information using various attributes.
Build and query a dictionary of disk partitions listing attributes such
as uuid, label, partuuid, partlabel.
Partitions can be queried by each of these as well as the device node itself.
lists of devices, labels, and uuids can be obtained for partitions or
filesystems to use to query with by<attribute>
a rescan method is provided for getting new data if the device list changes
dynamically.
'''
def __init__(self):
self.partitions = {}
self.rescan()
#
# This is kind of a hack. I need a binding for libblkid that can give me all this information
# that is also documented.
#
def __parseBlkId(self):
'''
Attempt to return a partitions dict by parsing the output of blkid(8)
'''
partitions = {}
for x in subprocess.getoutput('blkid 2>/dev/null').split('\n'):
dev,info = x.split(':')
partitions[dev] = {"device":dev}
fields = shlex.split(info.strip())
for f in fields:
k,v = f.split('=')
k = k.lower()
partitions[dev][k] = v
if len(list(partitions.keys())):
return partitions
return None
def rescan(self):
'''
Rescan for partitions. This will update any added or removed since first run.
'''
p = self.__parseBlkId()
if p:
self.partitions = p
return
self.partitions = {}
for l in open('/proc/partitions').read().split('\n')[1:]: # Get all but first line
l = l.strip()
if l == '':
continue
maj,minor,blocks,dev = re.split("\s+",l)
if minor != '0' and re.search(r'[0-9]',dev):
dev = '/dev/{}'.format(dev)
ptype = None
self.partitions[dev] = {'device':dev}
for d in glob.glob('/dev/disk/by-uuid/*'):
dev = os.path.realpath(d)
self.partitions[dev]['uuid'] = os.path.basename(d)
for d in glob.glob('/dev/disk/by-label/*'):
dev = os.path.realpath(d)
self.partitions[dev]['label'] = os.path.basename(d)
for d in glob.glob('/dev/disk/by-partlabel/*'):
dev = os.path.realpath(d)
d = d.replace('/dev/disk/by-partlabel','').decode('string-escape').decode('string-escape')
self.partitions[dev]['partlabel'] = os.path.basename(d)
for d in glob.glob('/dev/disk/by-partuuid/*'):
dev = os.path.realpath(d)
self.partitions[dev]['partuuid'] = os.path.basename(d)
def byDevice(self,device):
''' Return partition dictionary for given device name. e.g: /dev/sda1
or None if not found.
'''
for d in list(self.partitions.keys()):
if 'device' in list(self.partitions[d].keys()):
if self.partitions[d]['device'] == device:
return self.partitions[d]
return None
def allDevices(self):
''' Return a list strings of all device nodes '''
return list(self.partitions.keys())
def byUUID(self,uuid):
''' Return partition dictionary for given filesystem UUID
or None if not found.
'''
for d in list(self.partitions.keys()):
if 'uuid' in list(self.partitions[d].keys()):
if self.partitions[d]['uuid'] == uuid:
return self.partitions[d]
return None
def allUUIDs(self):
''' Return a list of strings of all filesystem UUIDs or None if none exist. '''
uuids = []
for d,p in list(self.partitions.items()):
if 'uuid' in list(p.keys()):
uuids.append(p['uuid'])
if len(uuids):
return uuids
return None
def byPartUUID(self,uuid):
''' Return partition dictionary for given partition UUID
or None if not found.
'''
for d in list(self.partitions.keys()):
if 'partuuid' in list(self.partitions[d].keys()):
if self.partitions[d]['partuuid'] == uuid:
return self.partitions[d]
return None
def allPartUUIDs(self):
''' Return a list of strings of all partition UUIDs or None if none exist. '''
uuids = []
for d,p in list(self.partitions.items()):
if 'partuuid' in list(p.keys()):
uuids.append(p['partuuid'])
if len(uuids):
return uuids
return None
def byLabel(self,label):
''' Return partition dictionary for given filesystem label
or None if not found.
'''
for d in list(self.partitions.keys()):
if 'label' in list(self.partitions[d].keys()):
if self.partitions[d]['label'] == label:
return self.partitions[d]
return None
def allLabels(self):
''' Return a list of strings of all filesystem labels or None if none exist. '''
labels = []
for d,p in list(self.partitions.items()):
if 'label' in list(p.keys()):
labels.append(p['label'])
if len(labels):
return labels
return None
def byPartLabel(self,label):
''' Return partition dictionary for given partition label
or None if not found.
'''
for d in list(self.partitions.keys()):
if 'partlabel' in list(self.partitions[d].keys()):
if self.partitions[d]['partlabel'] == label:
return self.partitions[d]
return None
def allPartLabels(self):
''' Return a list of strings of all partition labels or None if none exist. '''
labels = []
for d,p in list(self.partitions.items()):
if 'partlabel' in list(p.keys()):
labels.append(p['partlabel'])
if len(labels):
return labels
return None
if __name__ == "__main__":
# flimsy command-line processing
arg = None
dev = devices()
if len(sys.argv) > 1:
if sys.argv[1].lower() == '--showall':
arg = 'all'
else: #sys.argv[1] == '--help' or sys.argv[1] == '-h':
print('{0} usage: {0} [--showall]'.format(os.path.basename(sys.argv[0])))
sys.exit(0)
def show(what=None):
'''
Show all partitions defined on the system, 'all' shows as much info as is defined.
'''
if what and what == 'all':
partkeys = ['uuid','label','partuuid','partlabel','type']
else:
partkeys = ['uuid','label','type']
devKeys = list(dev.partitions.keys())
devKeys.sort()
for d in devKeys:
part = dev.partitions[d]
linedata = ['{}:'.format(d)]
for k in partkeys:
if k in list(part.keys()):
linedata.append('{}="{}"'.format(k.upper(),part[k]))
print(' '.join(linedata))
show(arg)
| 2.171875 | 2 |
goturn/loader/loader_imagenet.py | amosarbiv/Tracking | 0 | 12761759 | # Date: Nrupatunga: Tuesday 04 July 2017
# Email: <EMAIL>
# Name: Nrupatunga
# Description: loading Imagenet dataset
from __future__ import print_function
import sys
import os
import cv2
import glob
from annotation import annotation
import xml.etree.ElementTree as ET
from ..logger.logger import setup_logger
from ..helper import config
kMaxRatio = 0.66
class loader_imagenet:
"""Docstring for loader_imagenetdet. """
def __init__(self, imagenet_folder, annotations_folder, logger):
"""TODO: to be defined1. """
self.logger = logger
self.imagenet_folder = imagenet_folder
self.annotations_folder = annotations_folder
if not os.path.isdir(imagenet_folder):
logger.error('{} is not a valid directory'.format(imagenet_folder))
def loaderImageNetDet(self):
"""TODO: Docstring for get_videos.
:returns: TODO
"""
logger = self.logger
imagenet_folder = self.imagenet_folder
imagenet_subdirs = sorted(self.find_subfolders(self.annotations_folder))
num_annotations = 0
list_of_annotations_out = []
for i, imgnet_sub_folder in enumerate(imagenet_subdirs):
annotations_files = sorted(glob.glob(os.path.join(self.annotations_folder, imgnet_sub_folder, '*.xml')))
logger.info('Loading {}/{} - annotation file from folder = {}'.format(i+1, len(imagenet_subdirs), imgnet_sub_folder))
for ann in annotations_files:
list_of_annotations, num_ann_curr = self.load_annotation_file(ann)
num_annotations = num_annotations + num_ann_curr
if len(list_of_annotations) == 0:
continue
list_of_annotations_out.append(list_of_annotations)
logger.info('Found {} annotations from {} images'.format(num_annotations, len(list_of_annotations_out)))
# save it for future use
self.list_of_annotations_out = list_of_annotations_out
self.num_annotations = num_annotations
return list_of_annotations_out
def find_subfolders(self, imagenet_folder):
"""TODO: Docstring for find_subfolders.
:vot_folder: directory for vot videos
:returns: list of video sub directories
"""
return [dir_name for dir_name in os.listdir(imagenet_folder) if os.path.isdir(os.path.join(imagenet_folder, dir_name))]
def load_annotation_file(self, annotation_file):
"""TODO: Docstring for load_annotation_file.
:returns: TODO
"""
list_of_annotations = []
num_annotations = 0
root = ET.parse(annotation_file).getroot()
folder = root.find('folder').text
filename = root.find('filename').text
size = root.find('size')
disp_width = int(size.find('width').text)
disp_height = int(size.find('height').text)
for obj in root.findall('object'):
bbox = obj.find('bndbox')
xmin = int(bbox.find('xmin').text)
xmax = int(bbox.find('xmax').text)
ymin = int(bbox.find('ymin').text)
ymax = int(bbox.find('ymax').text)
width = xmax - xmin
height = ymax - ymin
if width > (kMaxRatio * disp_width) or height > (kMaxRatio * disp_height):
continue
if ((xmin < 0) or (ymin < 0) or (xmax <= xmin) or (ymax <= ymin)):
continue
objAnnotation = annotation()
objAnnotation.setbbox(xmin, xmax, ymin, ymax)
objAnnotation.setWidthHeight(disp_width, disp_height)
objAnnotation.setImagePath(os.path.join(folder, filename))
list_of_annotations.append(objAnnotation)
num_annotations = num_annotations + 1
return list_of_annotations, num_annotations
def load_annotation(self, image_num, annotation_num):
"""TODO: Docstring for load_annotation.
:returns: TODO
"""
logger = self.logger
images = self.list_of_annotations_out
list_annotations = images[image_num]
random_ann = list_annotations[annotation_num]
img_path = os.path.join(self.imagenet_folder, random_ann.image_path + '.JPEG')
if config.DEBUG:
img_path = "/media/nrupatunga/Data-Backup/DL/goturn/ILSVRC2014/ILSVRC2014_DET_train/ILSVRC2014_train_0005/ILSVRC2014_train_00059375.JPEG"
random_ann.bbox.x1 = 243
random_ann.bbox.y1 = 157
random_ann.bbox.x2 = 278
random_ann.bbox.y2 = 176
random_ann.disp_height = 375
random_ann.disp_width = 500
image = cv2.imread(img_path)
img_height = image.shape[0]
img_width = image.shape[1]
sc_factor_1 = 1.0
if img_height != random_ann.disp_height or img_width != random_ann.disp_width:
logger.info('Image Number = {}, Annotation Number = {}, Image file = {}'.format(image_num, annotation_num, img_path))
logger.info('Image Size = {} x {}'.format(img_width, img_height))
logger.info('Display Size = {} x {}'.format(random_ann.disp_width, random_ann.disp_height))
sc_factor_1 = (img_height * 1.) / random_ann.disp_height
sc_factor_2 = (img_width * 1.) / random_ann.disp_width
logger.info('Factor: {} {}'.format(sc_factor_1, sc_factor_2))
bbox = random_ann.bbox
bbox.x1 = bbox.x1 * sc_factor_1
bbox.x2 = bbox.x2 * sc_factor_1
bbox.y1 = bbox.y1 * sc_factor_1
bbox.y2 = bbox.y2 * sc_factor_1
return image, bbox
if '__main__' == __name__:
logger = setup_logger(logfile=None)
objLoaderImgNet = loader_imagenet('/media/nrupatunga/data/datasets/ILSVRC2014/ILSVRC2014_DET_train/', '/media/nrupatunga/data/datasets/ILSVRC2014/ILSVRC2014_DET_bbox_train/', logger)
dict_list_of_annotations = objLoaderImgNet.loaderImageNetDet()
| 2.578125 | 3 |
xmediusmailrelayserver/servicehelpers.py | xmedius/xmedius-mailrelayserver | 0 | 12761760 | import win32serviceutil
import win32service
import win32event
import servicemanager
import socket
import logging
from logging import handlers
import threading
from xmediusmailrelayserver import server
from xmediusmailrelayserver.server import start_server
from os.path import dirname, join
from os import mkdir, stat
import yaml
import io
def handle_command_line(argv):
return win32serviceutil.HandleCommandLine(XMRSServiceRunner, None, argv)
class XMRSServiceRunner (win32serviceutil.ServiceFramework):
_svc_name_ = "xmediusmailrelayserver"
_svc_display_name_ = "XMedius Mail Relay Server"
_svc_description_ = "Relays emails to chosen server according to recipient patterns"
IsStopping = False
def __init__(self,args):
win32serviceutil.ServiceFramework.__init__(self,args)
self._WaitStop = threading.Event()
socket.setdefaulttimeout(60)
def SvcStop(self):
self.IsStopping = True
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
logger = logging.getLogger("XMediusMailRelayServer")
logger.info('Service stopped.')
self._WaitStop.set()
def SvcDoRun(self):
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_,''))
self.main()
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self._WaitStop.wait()
def main(self):
logger = logging.getLogger('XMediusMailRelayServer')
localpath = dirname(__file__)
logfile = join(localpath, 'trace', 'server.log')
logpath = dirname(logfile)
try:
stat(logpath)
except:
mkdir(logpath)
file_hdlr = logging.handlers.RotatingFileHandler(logfile, maxBytes=100*1024*1024, backupCount=10)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_hdlr.setFormatter(formatter)
logger.addHandler(file_hdlr)
config = yaml.safe_load(io.open(join(localpath, 'config.yml')))
if int(config['Debug']) == 1:
logging.getLogger('').setLevel(logging.DEBUG)
logging.getLogger('mail.log').addHandler(file_hdlr)
else:
logging.getLogger('').setLevel(logging.INFO)
logger.info('Running in service mode')
start_server()
| 2.125 | 2 |
analysis/paper_plot.py | MGheini/unify-parameter-efficient-tuning | 101 | 12761761 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
import os
from collections import defaultdict
labelsize = 16
legendsize = 14
mpl.rcParams['xtick.labelsize'] = labelsize
mpl.rcParams['ytick.labelsize'] = labelsize
mpl.rcParams['axes.labelsize'] = labelsize
mpl.rcParams['axes.titlesize'] = labelsize
mpl.rcParams['font.size'] = labelsize
plt.style.use('seaborn-deep')
# plt.rcParams.update({
# "text.usetex": True,
# "font.family": "sans-serif",
# "font.sans-serif": ["Helvetica"]})
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['text.usetex'] = True
colormap = plt.cm.gist_ncar
def plot_ax(ax, params, ys, legends, ylabel, full, title=None, add_legend=True):
labelsize = 20
legendsize = 20
mpl.rcParams['xtick.labelsize'] = labelsize
mpl.rcParams['ytick.labelsize'] = labelsize
mpl.rcParams['axes.labelsize'] = labelsize
mpl.rcParams['axes.titlesize'] = labelsize
mpl.rcParams['font.size'] = labelsize
color_base = ["blue", "red", "green", "tab:orange", "purple", "tab:cyan"]
markers = ["o", "v", "s", "*", "8"]
sorted_xs = list(set([x for xs in params for x in xs]))
sorted_xs = sorted(sorted_xs)
xticks = [format(xx) for xx in sorted_xs]
for ii, (x, y) in enumerate(zip(params[::-1], ys[::-1])):
ax.plot(x, y, c=color_base[ii], marker=markers[ii], ms=10, linewidth=3)
ax.set_xlim(ax.get_xlim()[0], 15)
p1 = ax.get_xlim()
p1 = [p1[0]-0.1, p1[1]+1.0]
p2 = [full, full]
ax.plot(p1, p2, "--", ms=6, c="black", linewidth=2)
# ax.set_xscale('log', basex=10)
legends = legends[::-1] + ["Full Fine-tuning", "Ours"]
if add_legend:
ax.legend(legends, loc="best", fontsize=legendsize)
# ax.set_xticks(sorted_xs, xticks)
if title is not None:
ax.set(xlabel=r"Fine-tuned Parameters (\%)", ylabel=ylabel)
else:
ax.set(title=title, xlabel=r"Fine-tuned Parameters (\%)", ylabel=ylabel)
ax.grid()
ax.set_facecolor("white")
def plot_intro():
color_base = ["blue", "purple", "green", "tab:orange", "red", "tab:cyan"]
# color_base = ["blue", "blue", "blue", "blue", "red", "tab:cyan"]
color_base = ["dodgerblue", "mediumvioletred", "olivedrab", "goldenrod", "firebrick", "tab:cyan"]
color_base = ["dodgerblue", "hotpink", "olivedrab", "goldenrod", "crimson", "tab:cyan"]
color_base = ["gray", "dodgerblue", "olivedrab", "hotpink", "crimson", "tab:cyan"]
markers = ["o", "v", "s", "*", "D"]
markers = ["o", "o", "o", "o", "D"]
fig, ax = plt.subplots(1, 1)
full = 21.94
legends = ["Full Fine-tuning", "BitFit", "PrefixTuning", "Adapter", "LoRA", "Ours"]
params = [0.08, 3.6, 12.3, 14.4, 6.7]
xsum = [17.32, 20.46, 20.98, 20.5, 21.9]
for ii, (param, r2) in enumerate(zip(params, xsum)):
ax.scatter(param, r2, c=color_base[ii], marker=markers[ii], edgecolor='black', linewidth=1, s=300)
ax.set_xlim(ax.get_xlim()[0], 15)
p1 = ax.get_xlim()
p1 = [p1[0]-0.1, p1[1]+1.0]
p2 = [full, full]
ax.plot(p1, p2, "--", ms=6, c="black", linewidth=2)
# ax.legend(legends, loc='best', fontsize=12)
ax.grid()
ax.set_facecolor("white")
ax.set(xlabel=r"Fine-tuned Parameters (\%)", ylabel="ROUGE-2")
fig.set_size_inches(5, 5)
fig.savefig("intro.pdf", bbox_inches='tight')
def compute_params(r):
base = 200 * 2 * 3 * 1024 * 12
base_params = 3.6
print(r * 1.0 / base * base_params)
return r * 1.0 / base * base_params
def format(n):
return r"{:.1f}%".format(n)
def plot_overview():
d, L = 1024, 12
# fig, axes = plt.subplots(2, 1)
# percentage of parameters
params_bitfit = [0.08]
# params_prompt = [compute_params(d * 1), compute_params(d * 30), compute_params(d * 200), compute_params(d * 300)]
params_prompt = [compute_params(d * 300)]
params_pt = [compute_params(1 * 2 * 3 * d * L), compute_params(30 * 2 * 3 * d * L),
compute_params(200 * 2 * 3 * d * L), compute_params(512 * 2 * 3 * d * L)]
params_hously_adapter_ffn_ho = [compute_params(30 * 2 * 2 * d * L),
compute_params(200 * 2 * 2 * d * L),
compute_params(512 * 2 * 2 * d * L), compute_params(1024 * 2 * 2 * d * L)]
params_lora_attn = [compute_params(1*4*3*d*L), compute_params(30*4*3*d*L), compute_params(200*4*3*d*L),
compute_params(400*4*3*d*L)]
params_lora_ffn = [compute_params(1*10*2*d*L), compute_params(102*10*2*d*L), compute_params(120*10*2*d*L)]
params_hously_adapter_attn_ho = [compute_params(1 * 2 * 3 * d * L), compute_params(30 * 2 * 3 * d * L),
compute_params(200 * 2 * 3 * d * L),
compute_params(512 * 2 * 3 * d * L), compute_params(1024 * 2 * 3 * d * L)]
# print("prompt: 300")
# print(params_prompt)
# print("pt: 1, 30, 200, 512")
# print(params_pt)
# print("ho/hi ffn: 1, 30, 200, 512, 1024")
# print(params_hously_adapter_ffn_ho)
# print("ho/hi attn: 1, 30, 200, 512, 1024")
# print(params_hously_adapter_attn_ho)
# print("lora attn: 1, 30, 200, 400")
# print(params_lora_attn)
# print("lora ffn: 1, 102, 120")
# print(params_lora_ffn)
# xsum
xsum_bitfit = [17.32]
# xsum_prompt = [5.33, 14, 15.49, 15.98] # 1, 30?, 200, 300
# xsum_prompt = [15.98] # 300
xsum_pt = [18.14, 20.01, 20.46, 20.40] # 1, 30, 200, 512
xsum_hously_adapter_ffn_ho = [17, 18.81, 20.4, 20.58, 20.98] # 1, 30, 200?, 512?, 1024?
xsum_hously_adapter_ffn_ho = [18.81, 20.4, 20.58, 20.98] # 1, 30, 200?, 512?, 1024?
xsum_lora_attn = [17.4, 19.59, 20.29, 20.5] # 1, 30, 200, 400
# mt
mt_bitfit = [26.4]
# mt_prompt = [6.0, 16.7, 21] # 1, 30, 200
# mt_prompt = [21] # 200
mt_pt = [30.2, 35.2, 35.6, 35.1] # 1, 30, 200, 512
mt_hously_adapter_ffn_ho = [24.3, 33.0, 35.6, 36.3, 36.7] # 1, 30, 200, 512, 1024
mt_hously_adapter_ffn_ho = [33.0, 35.6, 36.3, 36.7] # 1, 30, 200, 512, 1024
mt_lora_attn = [25.5, 34.2, 36.2, 36.6] # 1, 30, 200, 400
# legends = ["BitFit (bias)", "PromptTuning (input)", "PrefixTuning (attn)", "Adapter (ffn)", "LoRA (attn)"]
# plot_ax(axes[0], [params_bitfit, params_prompt, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
# [xsum_bitfit, xsum_prompt, xsum_pt, xsum_hously_adapter_ffn_ho, xsum_lora_attn], legends, "ROUGE-2", full=21.94, ours=21.90,
# title="(a) abstractive text summarization", add_legend=False)
# plot_ax(axes[1], [params_bitfit, params_prompt, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
# [mt_bitfit, mt_prompt, mt_pt, mt_hously_adapter_ffn_ho, mt_lora_attn], legends, "BLEU", full=37.3, ours=37.5,
# title="(b) machine translation")
fig, ax = plt.subplots(1, 1)
legends = ["BitFit", "PrefixTuning", "Adapter", "LoRA"]
plot_ax(ax, [params_bitfit, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
[xsum_bitfit, xsum_pt, xsum_hously_adapter_ffn_ho, xsum_lora_attn], legends, "XSum ROUGE-2", full=21.94,
title=None, add_legend=False)
fig.set_size_inches(5, 5)
fig.savefig("xsum_overview.pdf", bbox_inches='tight')
fig, ax = plt.subplots(1, 1)
plot_ax(ax, [params_bitfit, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
[mt_bitfit, mt_pt, mt_hously_adapter_ffn_ho, mt_lora_attn], legends, "MT BLEU", full=37.3,
title=None)
fig.set_size_inches(5,5)
fig.savefig("mt_overview.pdf", bbox_inches='tight')
def plot_table4():
color_base = ["blue", "red", "green", "tab:orange", "tab:cyan", "purple", ]
markers = ["o", "v", "s", "*", "D"]
fig, ax = plt.subplots(1, 1)
ylabel = "XSum ROUGE-2"
params_pt = [3.6, 9.2]
params_lora = [7.2]
params_adapter = [3.6, 9.2]
r2_pt = [20.46, 20.40]
r2_lora = [20.29]
r2_adapter = [20.31, 20.83]
ffn_params_lora = [6.1]
ffn_r2_lora = [21.31]
ffn_params_adapter = [2.4, 6.1, 12.3]
ffn_r2_adapter = [20.66, 20.98, 21.24]
ax.plot(params_pt, r2_pt, c=color_base[0], marker=markers[0], ms=10, linewidth=2)
ax.plot(params_adapter, r2_adapter, c=color_base[0], marker=markers[1], ms=10, linewidth=2)
ax.plot(params_lora, r2_lora, c=color_base[0], marker=markers[2], ms=10, linewidth=2)
ax.plot(ffn_params_adapter, ffn_r2_adapter, "--", c=color_base[1], marker=markers[1], ms=10, linewidth=2)
ax.plot(ffn_params_lora, ffn_r2_lora, "--", c=color_base[1], marker=markers[2], ms=10, linewidth=2)
# legends = ["attn-PT", "attn-PA", "attn-LoRA", "ffn-PA",
# "ffn-LoRA"]
# ax.legend(legends, loc="lower right", fontsize=12)
ax.set(xlabel=r"Fine-tuned Parameters (\%)", ylabel=ylabel)
ax.grid()
ax.set_facecolor("white")
fig.set_size_inches(5, 3)
fig.savefig("xsum_modification_position.pdf", bbox_inches='tight')
fig, ax = plt.subplots(1, 1)
ylabel = "MT BLEU"
params_pt = [3.6, 9.2]
params_lora = [7.2]
params_adapter = [3.6, 9.2]
bleu_pt = [35.6, 35.1]
bleu_lora = [36.2]
bleu_adapter = [35.6, 36.2]
ffn_params_lora = [6.1]
ffn_params_adapter = [2.4, 6.1, 12.3]
ffn_bleu_lora = [36.5]
ffn_bleu_adapter = [36.4, 37.1, 37.3]
ax.plot(params_pt, bleu_pt, c=color_base[0], marker=markers[0], ms=10, linewidth=2)
ax.plot(params_adapter, bleu_adapter, c=color_base[0], marker=markers[1], ms=10, linewidth=2)
ax.plot(params_lora, bleu_lora, c=color_base[0], marker=markers[2], ms=10, linewidth=2)
ax.plot(ffn_params_adapter, ffn_bleu_adapter, "--", c=color_base[1], marker=markers[1], ms=10, linewidth=2)
ax.plot(ffn_params_lora, ffn_bleu_lora, "--", c=color_base[1], marker=markers[2], ms=10, linewidth=2)
# legends = ["attn-Prefix Tuning", "attn-Parallel Adapter", "attn-LoRA", "ffn-Parallel Adaptaer", "ffn-LoRA"]
# ax.legend(legends, loc="lower right", fontsize=12, bbox_to_anchor=(1.27, 0.005))
legends = ["Prefix (attn)", "PA (attn)", "LoRA (attn)", "PA (ffn)", "LoRA (ffn)"]
ax.legend(legends, loc="lower right", fontsize=12, bbox_to_anchor=(1.11, 0.00))
ax.set(xlabel=r"Fine-tuned Parameters (\%)", ylabel=ylabel)
ax.grid()
ax.set_facecolor("white")
fig.set_size_inches(5, 3)
fig.savefig("mt_modification_position.pdf", bbox_inches='tight')
# plot_overview()
plot_intro()
# plot_table4() | 2.40625 | 2 |
dbix/__init__.py | alexbodn/python-dbix | 0 | 12761762 | <reponame>alexbodn/python-dbix
# -*- coding: utf-8 -*-
"""Top-level package for dbix."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.3.0'
| 0.886719 | 1 |
python/caty/testutil/setup.py | hidaruma/caty | 0 | 12761763 | <reponame>hidaruma/caty
#!/usr/bin/env python
#coding:utf-8
import cmd, glob, os
from caty.shell.system import setup_caty, StdStream, StreamWrapper
from caty.core.script.builder import CommandCombinator
from caty.core.script.parser import NothingTodo
from caty.jsontools import pp
from caty.mafs.authorization import AuthoriToken
from caty.session.value import create_env_skelton, create_variable
import caty
class TestEnv(object):
def __init__(self, site):
facilities = site.create_facilities(None)
site.init_env(facilities, True, [u'test', u'console'], {})
self.site = site
self.registrar = site.registrar
self.shell_interpreter = site.interpreter.shell_mode(facilities)
self.file_interpreter = site.interpreter.shell_mode(facilities)
self.interpreter = self.file_interpreter
facilities['interpreter'] = self.interpreter
self.facilities = facilities
def build_command(cls, opts, args):
c = cls(opts, args)
c.set_facility(self.facilities)
c._prepare()
return c
def setup_testsite():
s = setup_caty()
return TestEnv(s)
| 1.976563 | 2 |
tensorflow_graphics/rendering/light/tests/point_light_test.py | drebain/graphics | 1 | 12761764 | <gh_stars>1-10
#Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for point light."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import flagsaver
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.rendering.light import point_light
from tensorflow_graphics.util import test_case
def fake_brdf(incoming_light_direction, outgoing_light_direction,
surface_point_normal):
del incoming_light_direction, surface_point_normal # Unused.
return outgoing_light_direction
def returning_zeros_brdf(incoming_light_direction, outgoing_light_direction,
surface_point_normal):
del incoming_light_direction, outgoing_light_direction # Unused.
return tf.zeros_like(surface_point_normal)
def random_tensor(tensor_shape):
return np.random.uniform(low=-100.0, high=100.0, size=tensor_shape)
class PointLightTest(test_case.TestCase):
@parameterized.parameters(
# Light direction is parallel to the surface normal.
([1.], [[0., 0., 1.]], [2., 0., 0.], [1.0 / (4. * math.pi), 0., 0.]),
# Light direction is parallel to the surface normal and the reflected
# light fall off is included in the calculation.
([1.], [[0., 0., 1.]], [2., 0., 0.], \
[0.25 / (4. * math.pi), 0., 0.], True),
# Light direction is perpendicular to the surface normal.
([1.], [[3., 0., 0.]], [1., 2., 3.], [0., 0., 0.]),
# Angle between surface normal and the incoming light direction is pi/3.
([1.], [[math.sqrt(3), 0., 1.]], \
[0., 1., 0.], [0., 0.125 / (4. * math.pi), 0.]),
# Angle between surface normal and the incoming light direction is pi/4.
([1.], [[0., 1., 1.]], [1., 1., 0.],
[0.25 / (4. * math.pi), 0.25 / (4. * math.pi), 0.]),
# Light has 3 radiances.
([2., 4., 1.], [[0., 1., 1.]], [1., 1., 0.],
[0.5 / (4. * math.pi), 1. / (4. * math.pi), 0.]),
# Light is behind the surface.
([1.], [[0., 0., -2.]], [7., 0., 0.], [0., 0., 0.]),
# Observation point is behind the surface.
([1.], [[0., 0., 2.]], [5., 0., -2.], [0., 0., 0.]),
# Light and observation point are behind the surface.
([1.], [[0., 0., -2.]], [5., 0., -2.], [0., 0., 0.]),
)
def test_estimate_radiance_preset(self,
light_radiance,
light_pos,
observation_pos,
expected_result,
reflected_light_fall_off=False):
"""Tests the output of estimate radiance function with various parameters.
In this test the point on the surface is always [0, 0, 0] ,the surface
normal is [0, 0, 1] and the fake brdf function returns the (normalized)
direction of the outgoing light as its output.
Args:
light_radiance: An array of size K representing the point light radiances.
light_pos: An array of size [3,] representing the point light positions.
observation_pos: An array of size [3,] representing the observation point.
expected_result: An array of size [3,] representing the expected result of
the estimated reflected radiance function.
reflected_light_fall_off: A boolean specifying whether or not to include
the fall off of the reflected light in the calculation. Defaults to
False.
"""
tensor_size = np.random.randint(1, 3) + 1
tensor_shape = np.random.randint(1, 10, size=tensor_size).tolist()
lights_tensor_size = np.random.randint(1, 3) + 1
lights_tensor_shape = np.random.randint(
1, 10, size=lights_tensor_size).tolist()
point_light_radiance = np.tile(light_radiance, lights_tensor_shape + [1])
point_light_position = np.tile(light_pos, lights_tensor_shape + [1])
surface_point_normal = np.tile([0.0, 0.0, 1.0], tensor_shape + [1])
surface_point_position = np.tile([0.0, 0.0, 0.0], tensor_shape + [1])
observation_point = np.tile(observation_pos, tensor_shape + [1])
expected = np.tile(expected_result,
tensor_shape + lights_tensor_shape + [1])
pred = point_light.estimate_radiance(
point_light_radiance,
point_light_position,
surface_point_position,
surface_point_normal,
observation_point,
fake_brdf,
reflected_light_fall_off=reflected_light_fall_off)
self.assertAllClose(expected, pred)
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_estimate_radiance_jacobian_random(self):
"""Tests the Jacobian of the point lighting equation."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 10, size=tensor_size).tolist()
light_tensor_size = np.random.randint(1, 3)
lights_tensor_shape = np.random.randint(
1, 10, size=light_tensor_size).tolist()
point_light_radiance_init = random_tensor(lights_tensor_shape + [1])
point_light_position_init = random_tensor(lights_tensor_shape + [3])
surface_point_position_init = random_tensor(tensor_shape + [3])
surface_point_normal_init = random_tensor(tensor_shape + [3])
observation_point_init = random_tensor(tensor_shape + [3])
def estimate_radiance_fn(point_light_position, surface_point_position,
surface_point_normal, observation_point):
return point_light.estimate_radiance(point_light_radiance_init,
point_light_position,
surface_point_position,
surface_point_normal,
observation_point, fake_brdf)
self.assert_jacobian_is_correct_fn(estimate_radiance_fn, [
point_light_position_init, surface_point_position_init,
surface_point_normal_init, observation_point_init
])
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_estimate_radiance_jacobian_preset(self):
"""Tests the Jacobian of the point lighting equation.
Verifies that the Jacobian of the point lighting equation is correct when
the light direction is orthogonal to the surface normal.
"""
delta = 1e-5
point_light_radiance_init = np.array(1.0).reshape((1, 1))
point_light_position_init = np.array((delta, 1.0, 0.0)).reshape((1, 3))
surface_point_position_init = np.array((0.0, 0.0, 0.0))
surface_point_normal_init = np.array((1.0, 0.0, 0.0))
observation_point_init = np.array((delta, 3.0, 0.0))
def estimate_radiance_fn(point_light_position, surface_point_position,
surface_point_normal, observation_point):
return point_light.estimate_radiance(point_light_radiance_init,
point_light_position,
surface_point_position,
surface_point_normal,
observation_point, fake_brdf)
self.assert_jacobian_is_correct_fn(estimate_radiance_fn, [
point_light_position_init, surface_point_position_init,
surface_point_normal_init, observation_point_init
])
@parameterized.parameters(
((1, 1), (1, 3), (3,), (3,), (3,)),
((4, 1, 1), (4, 1, 3), (1, 3), (1, 3), (1, 3)),
((3, 2, 1), (3, 2, 3), (2, 3), (2, 3), (2, 3)),
((1, 1), (3,), (1, 3), (1, 2, 3), (1, 3)),
((4, 5, 1), (3, 4, 5, 3), (1, 3), (1, 2, 2, 3), (1, 2, 3)),
((1,), (1, 2, 2, 3), (1, 2, 3), (1, 3), (3,)),
)
def test_estimate_radiance_shape_exception_not_raised(self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
point_light.estimate_radiance, shape, brdf=returning_zeros_brdf)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 1), (3,), (3,),
(3,)),
("must have exactly 3 dimensions in axis -1", (5, 1), (5, 2), (3,), (3,),
(3,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 4), (3,), (3,),
(3,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 3), (1,), (3,),
(3,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 3), (2,), (3,),
(3,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 3), (4,), (3,),
(3,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 3), (3,), (1,),
(3,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 3), (3,), (2,),
(3,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 3), (3,), (4,),
(3,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 3), (3,), (3,),
(4,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 3), (3,), (3,),
(2,)),
("must have exactly 3 dimensions in axis -1", (1, 1), (1, 3), (3,), (3,),
(1,)),
("Not all batch dimensions are broadcast-compatible.", (1, 3, 1),
(1, 3, 3), (2, 3), (4, 3), (3,)),
("Not all batch dimensions are broadcast-compatible.", (1, 3, 1),
(1, 4, 3), (2, 3), (3,), (3,)),
)
def test_estimate_radiance_shape_exception_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(
point_light.estimate_radiance,
error_msg,
shape,
brdf=returning_zeros_brdf)
def test_estimate_radiance_value_exceptions_raised(self):
"""Tests that the value exceptions are raised correctly."""
point_light_radiance = random_tensor(tensor_shape=(1, 1))
point_light_position = random_tensor(tensor_shape=(1, 3))
surface_point_position = random_tensor(tensor_shape=(3,))
surface_point_normal = random_tensor(tensor_shape=(3,))
observation_point = random_tensor(tensor_shape=(3,))
# Verify that an InvalidArgumentError is raised as the given
# surface_point_normal is not normalized.
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
point_light.estimate_radiance(point_light_radiance,
point_light_position,
surface_point_position,
surface_point_normal, observation_point,
returning_zeros_brdf))
if __name__ == "__main__":
test_case.main()
| 1.796875 | 2 |
drive_dedup.py | aaronshim/drive-api-scripts | 0 | 12761765 | from __future__ import print_function
from apiclient import discovery, errors
from httplib2 import Http
from oauth2client import file, client, tools
import json
# Set up auth for the API
SCOPES = 'https://www.googleapis.com/auth/drive.readonly.metadata'
# SCOPES = 'https://www.googleapis.com/auth/drive'
store = file.Storage('storage.json') # This stores the OAuth tokens.
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_id.json', SCOPES)
creds = tools.run_flow(flow, store)
DRIVE = discovery.build('drive', 'v3', http=creds.authorize(Http()))
# Query! (While chasing the next page tokens.)
has_next = True
files_by_hash = {}
files_with_md5 = 0 # How many files with valid MD5's did we count?
total_files_seen = 0 # How many files were seen, MD5 or not?
request = DRIVE.files().list(fields='*')
response = request.execute()
while has_next:
files = response.get('files', [])
for f in files:
if 'md5Checksum' in f:
if f['md5Checksum'] in files_by_hash:
files_by_hash[f['md5Checksum']].append(f)
print("%s conflicts with %s, both with a MD5 hash of %s." % (f['id'], files_by_hash[f['md5Checksum']][0]['id'], f['md5Checksum']))
else:
files_by_hash[f['md5Checksum']] = [f]
files_with_md5+=1
total_files_seen += len(files)
print("%d files analyzed." % total_files_seen)
if response.get('nextPageToken'):
request = DRIVE.files().list_next(previous_request=request, previous_response=response)
response = request.execute()
else:
has_next = False
print("%d total files checked for MD5." % files_with_md5)
with open('md5dedup.json', 'w') as outfile:
json.dump(files_by_hash, outfile) | 2.859375 | 3 |
src/components/decode_encode/decrypt.py | DuckyMomo20012/flask-server | 3 | 12761766 | <reponame>DuckyMomo20012/flask-server
from function_support import *
# đường dẫn đến file ảnh đã mã hóa
path_Name = "Encrypt.png"
# tên file ảnh đã được mã hóa
save_file = "Decrypt.png"
# phần dư khi lưu khi chia cho 256 để đưa mã màu về khoảng [0,255]
quotient = "quotient.txt"
if __name__ == "__main__":
img = Decrypted(
imgEncryptedPath=path_Name,
privateKeyPath="rsa.txt",
imgDecryptedSaveDst=save_file,
quotientPath=quotient,
)
cv2.imshow("image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 2.5 | 2 |
planner/progression/mcts.py | yijiangh/pyplanners | 23 | 12761767 | import random
import math
import numpy
from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan
from misc.numerical import INF
from misc.functions import randomize
def random_policy(current_vertex):
edges = current_vertex.get_successors()
if not edges:
return None # current_vertex
return random.choice(edges)
def greedy_policy(current_vertex, weight=1, shuffle=True):
# TODO: function that returns the policy
# TODO: use evaluators
edges = current_vertex.get_successors()
if not edges:
return None
if shuffle:
edges = randomize(edges)
return min(edges, key=lambda e: e.cost + weight*e.sink.get_h_cost())
##################################################
def random_walk(start, goal, generator, _=None, policy=random_policy, max_steps=INF, debug=None, **kwargs):
space = StateSpace(generator, start, max_extensions=INF, **kwargs)
current_vertex = space.root
edge_path = []
while space.is_active() and len(edge_path) < max_steps:
#current_vertex.generate_all()
space.new_iteration(current_vertex)
if debug is not None:
debug(current_vertex)
if test_goal(current_vertex, goal):
operator_path = [edge.operator for edge in edge_path]
plan = Plan(start, operator_path)
return Solution(plan, space)
#return space.solution(current_vertex)
edge = policy(current_vertex)
if edge is None:
break
edge_path.append(edge)
current_vertex = edge.sink
return space.failure()
##################################################
MAX_ROLLOUT = 100 # 100 | INF
class TreeNode(object):
def __init__(self, vertex, parent_edge=None, parent_node=None):
self.vertex = vertex
self.parent_edge = parent_edge
self.parent_node = parent_node
self.rollouts = [] # TODO: rename to estimates?
self.children = [] # TODO: map from edges to nodes
if self.parent_node is not None:
self.parent_node.children.append(self)
def is_leaf(self):
return not bool(self.children)
# def is_explored(self):
# return set(self.vertex.get_successors()) == {child.vertex for child in self.children}
def num_rollouts(self):
return len(self.rollouts)
def get_estimate(self):
if not self.rollouts:
return INF
return numpy.average(self.rollouts)
def get_uct(self, c=math.sqrt(2)):
# https://en.wikipedia.org/wiki/Monte_Carlo_tree_search
estimate = -self.get_estimate()
if (self.parent_node is None) or (c == 0):
return estimate
diverse = math.sqrt(math.log(self.parent_node.num_rollouts()) / self.num_rollouts())
if c == INF:
return diverse
return estimate + c*diverse
def ancestors(self):
if self.parent_node is None:
return []
return self.parent_node.ancestors() + [self.parent_node]
def descendants(self):
nodes = [self]
for child in self.children:
nodes.extend(child.descendants())
return nodes
def random_leaf(self):
if self.is_leaf(): # is_leaf | is_explored
return self
child = random.choice(self.children)
return child.random_leaf()
def uniform_leaf(self):
leaves = list(filter(TreeNode.is_leaf, self.descendants()))
return random.choice(leaves)
def uct_leaf(self, **kwargs):
if self.is_leaf(): # is_leaf | is_explored
return self
best_child = max(self.children, key=lambda n: n.get_uct(**kwargs))
return best_child.uct_leaf()
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.vertex)
##################################################
def goal_rollout(vertex, goal):
if test_goal(vertex, goal):
return 0
return 1 # TODO: min action cost
def deadend_rollout(vertex, goal):
if test_goal(vertex, goal):
return 0
if not vertex.get_successors():
return MAX_ROLLOUT
return 1
def heuristic_rollout(vertex, goal):
return vertex.get_h_cost()
def simulation(start_vertex, goal, policy=random_policy, max_steps=5):
current_vertex = start_vertex
path = []
while len(path) < max_steps:
if test_goal(current_vertex, goal):
# TODO: greedy version
break
edge = policy(current_vertex)
if edge is None:
break
path.append(edge)
current_vertex = edge.sink
return path
def simulated_rollout(vertex, goal, evaluator=deadend_rollout, **kwargs):
path = simulation(vertex, goal, **kwargs)
cost = 0
estimates = [cost + evaluator(vertex, goal)]
for edge in path:
cost += edge.cost
estimates.append(cost + evaluator(vertex, goal))
return estimates[-1]
#return numpy.average(estimates)
def simulated_rollouts(vertex, goal, num=1, **kwargs):
assert num >= 1
return numpy.average([simulated_rollout(vertex, goal, **kwargs) for _ in range(num)])
##################################################
def mcts(start, goal, generator, _=None, debug=None, **kwargs):
# TODO: dynamic programming instead of independent tree
# https://gist.github.com/qpwo/c538c6f73727e254fdc7fab81024f6e1
# https://github.com/pbsinclair42/MCTS/blob/master/mcts.py
# https://github.com/int8/monte-carlo-tree-search/blob/master/mctspy/tree/search.py
space = StateSpace(generator, start, max_extensions=INF, **kwargs)
root = TreeNode(space.root)
while space.is_active():
#leaf = root.uniform_leaf()
#leaf = root.random_leaf()
leaf = root.uct_leaf()
vertex = leaf.vertex
space.new_iteration(vertex)
if debug is not None:
debug(vertex)
if test_goal(vertex, goal):
return space.solution(vertex)
for edge in vertex.get_successors(): # TODO: sample a subset
new_vertex = edge.sink
if test_goal(new_vertex, goal):
return space.solution(new_vertex)
node = TreeNode(new_vertex, parent_edge=edge, parent_node=leaf)
#rollout = goal_rollout(new_vertex, goal)
#rollout = deadend_rollout(new_vertex, goal)
#rollout = heuristic_rollout(new_vertex, goal)
#rollout = simulated_rollout(new_vertex, goal)
rollout = simulated_rollouts(new_vertex, goal, num=3)
for ancestor in reversed(node.ancestors() + [node]):
ancestor.rollouts.append(rollout)
if ancestor.parent_edge is not None:
rollout += ancestor.parent_edge.cost
return space.failure()
| 2.84375 | 3 |
app/main/views/templates.py | karlchillmaid/notifications-admin | 0 | 12761768 | from datetime import datetime, timedelta
from string import ascii_uppercase
from dateutil.parser import parse
from flask import abort, flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required
from markupsafe import Markup
from notifications_python_client.errors import HTTPError
from notifications_utils.formatters import nl2br
from notifications_utils.recipients import first_column_headings
from app import current_service, service_api_client, template_statistics_client
from app.main import main
from app.main.forms import (
ChooseTemplateType,
EmailTemplateForm,
LetterTemplateForm,
SearchTemplatesForm,
SetTemplateSenderForm,
SMSTemplateForm,
)
from app.main.views.send import get_example_csv_rows, get_sender_details
from app.template_previews import TemplatePreview, get_page_count_for_letter
from app.utils import (
email_or_sms_not_enabled,
get_template,
user_has_permissions,
)
form_objects = {
'email': EmailTemplateForm,
'sms': SMSTemplateForm,
'letter': LetterTemplateForm
}
page_headings = {
'email': 'email',
'sms': 'text message'
}
@main.route("/services/<service_id>/templates/<uuid:template_id>")
@login_required
@user_has_permissions('view_activity', 'send_messages')
def view_template(service_id, template_id):
if not current_user.has_permissions('view_activity'):
return redirect(url_for(
'.send_one_off', service_id=service_id, template_id=template_id
))
template = service_api_client.get_service_template(service_id, str(template_id))['data']
if template["template_type"] == "letter":
letter_contact_details = service_api_client.get_letter_contacts(service_id)
default_letter_contact_block_id = next(
(x['id'] for x in letter_contact_details if x['is_default']), None
)
else:
default_letter_contact_block_id = None
return render_template(
'views/templates/template.html',
template=get_template(
template,
current_service,
expand_emails=True,
letter_preview_url=url_for(
'.view_letter_template_preview',
service_id=service_id,
template_id=template_id,
filetype='png',
),
show_recipient=True,
page_count=get_page_count_for_letter(template),
),
default_letter_contact_block_id=default_letter_contact_block_id,
)
@main.route("/services/<service_id>/start-tour/<uuid:template_id>")
@login_required
@user_has_permissions('view_activity')
def start_tour(service_id, template_id):
template = service_api_client.get_service_template(service_id, str(template_id))['data']
if template['template_type'] != 'sms':
abort(404)
return render_template(
'views/templates/start-tour.html',
template=get_template(
template,
current_service,
show_recipient=True,
),
help='1',
)
@main.route("/services/<service_id>/templates")
@main.route("/services/<service_id>/templates/<template_type>")
@login_required
@user_has_permissions('view_activity', 'send_messages')
def choose_template(service_id, template_type='all'):
templates = service_api_client.get_service_templates(service_id)['data']
letters_available = (
'letter' in current_service['permissions'] and
current_user.has_permissions('view_activity')
)
available_template_types = list(filter(None, (
'email',
'sms',
'letter' if letters_available else None,
)))
templates = [
template for template in templates
if template['template_type'] in available_template_types
]
has_multiple_template_types = len({
template['template_type'] for template in templates
}) > 1
template_nav_items = [
(label, key, url_for('.choose_template', service_id=current_service['id'], template_type=key), '')
for label, key in filter(None, [
('All', 'all'),
('Text message', 'sms'),
('Email', 'email'),
('Letter', 'letter') if letters_available else None,
])
]
templates_on_page = [
template for template in templates
if (
template_type in ['all', template['template_type']] and
template['template_type'] in available_template_types
)
]
if current_user.has_permissions('view_activity'):
page_title = 'Templates'
else:
page_title = 'Choose a template'
return render_template(
'views/templates/choose.html',
page_title=page_title,
templates=templates_on_page,
show_search_box=(len(templates_on_page) > 7),
show_template_nav=has_multiple_template_types and (len(templates) > 2),
template_nav_items=template_nav_items,
template_type=template_type,
search_form=SearchTemplatesForm(),
)
@main.route("/services/<service_id>/templates/<template_id>.<filetype>")
@login_required
@user_has_permissions('view_activity', 'send_messages')
def view_letter_template_preview(service_id, template_id, filetype):
if filetype not in ('pdf', 'png'):
abort(404)
db_template = service_api_client.get_service_template(service_id, template_id)['data']
return TemplatePreview.from_database_object(db_template, filetype, page=request.args.get('page'))
def _view_template_version(service_id, template_id, version, letters_as_pdf=False):
return dict(template=get_template(
service_api_client.get_service_template(service_id, template_id, version=version)['data'],
current_service,
expand_emails=True,
letter_preview_url=url_for(
'.view_template_version_preview',
service_id=service_id,
template_id=template_id,
version=version,
filetype='png',
) if not letters_as_pdf else None
))
@main.route("/services/<service_id>/templates/<template_id>/version/<int:version>")
@login_required
@user_has_permissions('view_activity')
def view_template_version(service_id, template_id, version):
return render_template(
'views/templates/template_history.html',
**_view_template_version(service_id=service_id, template_id=template_id, version=version)
)
@main.route("/services/<service_id>/templates/<template_id>/version/<int:version>.<filetype>")
@login_required
@user_has_permissions('view_activity')
def view_template_version_preview(service_id, template_id, version, filetype):
db_template = service_api_client.get_service_template(service_id, template_id, version=version)['data']
return TemplatePreview.from_database_object(db_template, filetype)
@main.route("/services/<service_id>/templates/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_templates')
def add_template_by_type(service_id):
form = ChooseTemplateType(
include_letters='letter' in current_service['permissions']
)
if form.validate_on_submit():
if form.template_type.data == 'letter':
blank_letter = service_api_client.create_service_template(
'Untitled',
'letter',
'Body',
service_id,
'Main heading',
'normal',
)
return redirect(url_for(
'.view_template',
service_id=service_id,
template_id=blank_letter['data']['id'],
))
if email_or_sms_not_enabled(form.template_type.data, current_service['permissions']):
return redirect(url_for(
'.action_blocked',
service_id=service_id,
notification_type=form.template_type.data,
return_to='add_new_template',
template_id='0'
))
else:
return redirect(url_for(
'.add_service_template',
service_id=service_id,
template_type=form.template_type.data,
))
return render_template('views/templates/add.html', form=form)
@main.route("/services/<service_id>/templates/action-blocked/<notification_type>/<return_to>/<template_id>")
@login_required
@user_has_permissions('manage_templates')
def action_blocked(service_id, notification_type, return_to, template_id):
if notification_type == 'sms':
notification_type = 'text messages'
elif notification_type == 'email':
notification_type = 'emails'
return render_template(
'views/templates/action_blocked.html',
service_id=service_id,
notification_type=notification_type,
return_to=return_to,
template_id=template_id
)
@main.route("/services/<service_id>/templates/add-<template_type>", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_templates')
def add_service_template(service_id, template_type):
if template_type not in ['sms', 'email', 'letter']:
abort(404)
if 'letter' not in current_service['permissions'] and template_type == 'letter':
abort(403)
form = form_objects[template_type]()
if form.validate_on_submit():
if form.process_type.data == 'priority':
abort_403_if_not_admin_user()
try:
new_template = service_api_client.create_service_template(
form.name.data,
template_type,
form.template_content.data,
service_id,
form.subject.data if hasattr(form, 'subject') else None,
form.process_type.data
)
except HTTPError as e:
if (
e.status_code == 400 and
'content' in e.message and
any(['character count greater than' in x for x in e.message['content']])
):
form.template_content.errors.extend(e.message['content'])
else:
raise e
else:
return redirect(
url_for('.view_template', service_id=service_id, template_id=new_template['data']['id'])
)
if email_or_sms_not_enabled(template_type, current_service['permissions']):
return redirect(url_for(
'.action_blocked',
service_id=service_id,
notification_type=template_type,
return_to='templates',
template_id='0'
))
else:
return render_template(
'views/edit-{}-template.html'.format(template_type),
form=form,
template_type=template_type,
heading_action='Add',
)
def abort_403_if_not_admin_user():
if not current_user.platform_admin:
abort(403)
@main.route("/services/<service_id>/templates/<template_id>/edit", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_templates')
def edit_service_template(service_id, template_id):
template = service_api_client.get_service_template(service_id, template_id)['data']
template['template_content'] = template['content']
form = form_objects[template['template_type']](**template)
if form.validate_on_submit():
if form.process_type.data != template['process_type']:
abort_403_if_not_admin_user()
subject = form.subject.data if hasattr(form, 'subject') else None
new_template = get_template({
'name': form.name.data,
'content': form.template_content.data,
'subject': subject,
'template_type': template['template_type'],
'id': template['id'],
'process_type': form.process_type.data,
'reply_to_text': template['reply_to_text']
}, current_service)
template_change = get_template(template, current_service).compare_to(new_template)
if template_change.placeholders_added and not request.form.get('confirm'):
example_column_headings = (
first_column_headings[new_template.template_type] +
list(new_template.placeholders)
)
return render_template(
'views/templates/breaking-change.html',
template_change=template_change,
new_template=new_template,
column_headings=list(ascii_uppercase[:len(example_column_headings)]),
example_rows=[
example_column_headings,
get_example_csv_rows(new_template),
get_example_csv_rows(new_template)
],
form=form
)
try:
service_api_client.update_service_template(
template_id,
form.name.data,
template['template_type'],
form.template_content.data,
service_id,
subject,
form.process_type.data
)
except HTTPError as e:
if e.status_code == 400:
if 'content' in e.message and any(['character count greater than' in x for x in e.message['content']]):
form.template_content.errors.extend(e.message['content'])
else:
raise e
else:
raise e
else:
return redirect(url_for(
'.view_template',
service_id=service_id,
template_id=template_id
))
db_template = service_api_client.get_service_template(service_id, template_id)['data']
if email_or_sms_not_enabled(db_template['template_type'], current_service['permissions']):
return redirect(url_for(
'.action_blocked',
service_id=service_id,
notification_type=db_template['template_type'],
return_to='view_template',
template_id=template_id
))
else:
return render_template(
'views/edit-{}-template.html'.format(template['template_type']),
form=form,
template_id=template_id,
template_type=template['template_type'],
heading_action='Edit'
)
@main.route("/services/<service_id>/templates/<template_id>/delete", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_templates')
def delete_service_template(service_id, template_id):
template = service_api_client.get_service_template(service_id, template_id)['data']
if request.method == 'POST':
service_api_client.delete_service_template(service_id, template_id)
return redirect(url_for(
'.choose_template',
service_id=service_id,
))
try:
last_used_notification = template_statistics_client.get_template_statistics_for_template(
service_id, template['id']
)
message = 'It was last used {} ago'.format(
'more than seven days' if not last_used_notification else get_human_readable_delta(
parse(last_used_notification['created_at']).replace(tzinfo=None),
datetime.utcnow()
)
)
except HTTPError as e:
if e.status_code == 404:
message = None
else:
raise e
return render_template(
'views/templates/template.html',
template_delete_confirmation_message=(
'Are you sure you want to delete {}?'.format(template['name']),
message,
),
template=get_template(
template,
current_service,
expand_emails=True,
letter_preview_url=url_for(
'.view_letter_template_preview',
service_id=service_id,
template_id=template['id'],
filetype='png',
),
show_recipient=True,
),
)
@main.route("/services/<service_id>/templates/<template_id>/redact", methods=['GET'])
@login_required
@user_has_permissions('manage_templates')
def confirm_redact_template(service_id, template_id):
template = service_api_client.get_service_template(service_id, template_id)['data']
return render_template(
'views/templates/template.html',
template=get_template(
template,
current_service,
expand_emails=True,
letter_preview_url=url_for(
'.view_letter_template_preview',
service_id=service_id,
template_id=template_id,
filetype='png',
),
show_recipient=True,
),
show_redaction_message=True,
)
@main.route("/services/<service_id>/templates/<template_id>/redact", methods=['POST'])
@login_required
@user_has_permissions('manage_templates')
def redact_template(service_id, template_id):
service_api_client.redact_service_template(service_id, template_id)
flash(
'Personalised content will be hidden for messages sent with this template',
'default_with_tick'
)
return redirect(url_for(
'.view_template',
service_id=service_id,
template_id=template_id,
))
@main.route('/services/<service_id>/templates/<template_id>/versions')
@login_required
@user_has_permissions('view_activity')
def view_template_versions(service_id, template_id):
return render_template(
'views/templates/choose_history.html',
versions=[
get_template(
template,
current_service,
expand_emails=True,
letter_preview_url=url_for(
'.view_template_version_preview',
service_id=service_id,
template_id=template_id,
version=template['version'],
filetype='png',
)
)
for template in service_api_client.get_service_template_versions(service_id, template_id)['data']
]
)
@main.route('/services/<service_id>/templates/<template_id>/set-template-sender', methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_templates')
def set_template_sender(service_id, template_id):
template = service_api_client.get_service_template(service_id, template_id)['data']
sender_details = get_template_sender_form_dict(service_id, template)
no_senders = sender_details.get('no_senders', False)
form = SetTemplateSenderForm(
sender=sender_details['current_choice'],
sender_choices=sender_details['value_and_label'],
)
option_hints = {sender_details['default_sender']: '(Default)'}
if form.validate_on_submit():
service_api_client.update_service_template_sender(
service_id,
template_id,
form.sender.data if form.sender.data else None,
)
return redirect(url_for('.view_template', service_id=service_id, template_id=template_id))
return render_template(
'views/templates/set-template-sender.html',
form=form,
template_id=template_id,
no_senders=no_senders,
option_hints=option_hints
)
def get_template_sender_form_dict(service_id, template):
context = {
'email': {
'field_name': 'email_address'
},
'letter': {
'field_name': 'contact_block'
},
'sms': {
'field_name': 'sms_sender'
}
}[template['template_type']]
sender_format = context['field_name']
service_senders = get_sender_details(service_id, template['template_type'])
context['default_sender'] = next(
(x['id'] for x in service_senders if x['is_default']), "Not set"
)
if not service_senders:
context['no_senders'] = True
context['value_and_label'] = [(sender['id'], Markup(nl2br(sender[sender_format]))) for sender in service_senders]
context['value_and_label'].insert(0, ('', 'Blank')) # Add blank option to start of list
context['current_choice'] = template['service_letter_contact'] if template['service_letter_contact'] else ''
return context
def get_last_use_message(template_name, template_statistics):
try:
most_recent_use = max(
parse(template_stats['updated_at']).replace(tzinfo=None)
for template_stats in template_statistics
)
except ValueError:
return '{} has never been used'.format(template_name)
return '{} was last used {} ago'.format(
template_name,
get_human_readable_delta(most_recent_use, datetime.utcnow())
)
def get_human_readable_delta(from_time, until_time):
delta = until_time - from_time
if delta < timedelta(seconds=60):
return 'under a minute'
elif delta < timedelta(hours=1):
minutes = int(delta.seconds / 60)
return '{} minute{}'.format(minutes, '' if minutes == 1 else 's')
elif delta < timedelta(days=1):
hours = int(delta.seconds / 3600)
return '{} hour{}'.format(hours, '' if hours == 1 else 's')
else:
days = delta.days
return '{} day{}'.format(days, '' if days == 1 else 's')
def should_show_template(template_type):
return (
template_type != 'letter' or
'letter' in current_service['permissions']
)
| 2.09375 | 2 |
tests/test_pipeline4.py | gvwilson/nitinat | 0 | 12761769 | """Test layered configuration pipeline."""
from pathlib import Path
from unittest.mock import patch
import yaml
from nitinat.pipeline4 import SYSTEM_CONFIG, read_layered_config
GET_HOME_DIR = "nitinat.pipeline4._get_home_dir"
def make_file(fs, path, contents):
fs.create_file(path, contents=yaml.dump(contents))
def test_layered_config_read_system(fs):
fs.cwd = "/home/person/project/analysis"
expected = {"alpha": 1}
make_file(fs, SYSTEM_CONFIG, expected)
with patch(GET_HOME_DIR, return_value=Path("/home/person")):
actual = read_layered_config("test.yml")
assert actual == expected
def test_layered_config_read_personal(fs):
fs.cwd = "/home/person/project/analysis"
expected = {"beta": 2}
make_file(fs, "/home/person/.nitinat.yml", expected)
with patch(GET_HOME_DIR, return_value=Path("/home/person")):
actual = read_layered_config("test.yml")
assert actual == expected
def test_layered_config_read_project_from_project_root(fs):
fs.cwd = "/home/person/project/analysis"
expected = {"gamma": 3}
make_file(fs, "/home/person/project/.nitinat.yml", expected)
with patch(GET_HOME_DIR, return_value=Path("/home/person")):
actual = read_layered_config("test.yml")
assert actual == expected
def test_layered_config_read_project_from_project_subdir(fs):
fs.cwd = "/home/person/project/analysis"
expected = {"gamma": 3}
make_file(fs, "/home/person/project/.nitinat.yml", expected)
with patch(GET_HOME_DIR, return_value=Path("/home/person")):
actual = read_layered_config("temp/test.yml")
assert actual == expected
def test_layered_config_combine_files(fs):
fs.cwd = "/home/person/project/analysis"
make_file(fs, SYSTEM_CONFIG, {"alpha": 1})
make_file(fs, "/home/person/.nitinat.yml", {"beta": 2})
make_file(fs, "/home/person/project/.nitinat.yml", {"gamma": 3})
with patch(GET_HOME_DIR, return_value=Path("/home/person")):
actual = read_layered_config("temp/test.yml")
assert actual == {"alpha": 1, "beta": 2, "gamma": 3}
| 2.296875 | 2 |
source/handlers/telegram_api/__init__.py | icYFTL/CBot | 0 | 12761770 | from . import *
__all__ = ['events', 'misc'] | 1.078125 | 1 |
topological_nav/tools/eval_traj_following.py | KH-Kyle/rmp_nav | 30 | 12761771 | <reponame>KH-Kyle/rmp_nav
import numpy as np
import gflags
import sys
import glob
from easydict import EasyDict
import os
from rmp_nav.neural.common.dataset import DatasetVisualGibson
from rmp_nav.common.utils import get_project_root, get_data_dir, get_gibson_asset_dir, get_config_dir
from rmp_nav.simulation import agent_factory
from topological_nav.reachability import model_factory
from topological_nav.tools import eval_envs
from topological_nav.tools.eval_traj_following_common import EvaluatorReachability
gflags.DEFINE_string('env', 'space8', '')
gflags.DEFINE_string('model', 'model_12env_v2_future_pair_proximity_z0228', '')
gflags.DEFINE_boolean('dry_run', False, '')
gflags.DEFINE_float('sparsify_thres', 0.99, '')
gflags.DEFINE_integer('start_idx', 0, '')
gflags.DEFINE_integer('n_traj', 100, '')
gflags.DEFINE_float('clip_velocity', 0.5, 'Limit the max velocity.')
gflags.DEFINE_boolean('visualize', True, '')
gflags.DEFINE_boolean('save_screenshot', False, '')
gflags.DEFINE_float('zoom', 1.0, '')
FLAGS = gflags.FLAGS
FLAGS(sys.argv)
model = model_factory.get(FLAGS.model)()
sparsifier = model['sparsifier']
motion_policy = model['motion_policy']
traj_follower = model['follower']
agent = agent_factory.agents_dict[model['agent']]()
e = EvaluatorReachability(dataset=eval_envs.make(FLAGS.env, sparsifier),
sparsifier=sparsifier,
motion_policy=motion_policy,
follower=traj_follower,
agent=agent,
agent_reverse=None,
sparsify_thres=FLAGS.sparsify_thres,
clip_velocity=FLAGS.clip_velocity,
visualize=FLAGS.visualize,
save_screenshot=FLAGS.save_screenshot,
zoom=FLAGS.zoom,
dry_run=FLAGS.dry_run)
e.run(start_idx=FLAGS.start_idx, n_traj=FLAGS.n_traj)
| 1.773438 | 2 |
tests_integ/asyncio/test_index.py | kvasnevskyi/elasticmagic | 0 | 12761772 | import pytest
from elasticmagic.search import SearchQuery
from .conftest import Car
@pytest.mark.asyncio
async def test_get(es_index, cars):
doc = await es_index.get(1, doc_cls=Car)
assert doc.name == '<NAME>'
assert doc._id == '1'
assert doc._index == es_index.get_name()
assert doc._score is None
doc = await es_index.get(2, doc_cls=Car)
assert doc.name == '<NAME>'
assert doc._id == '2'
assert doc._index == es_index.get_name()
assert doc._score is None
@pytest.mark.asyncio
async def test_multi_get_by_ids(es_index, cars):
docs = await es_index.multi_get([1, 2, 3], doc_cls=Car)
assert len(docs) == 3
doc = docs[0]
assert doc.name == '<NAME>'
assert doc._id == '1'
assert doc._index == es_index.get_name()
assert doc._score is None
doc = docs[1]
assert doc.name == '<NAME>'
assert doc._id == '2'
assert doc._index == es_index.get_name()
assert doc._score is None
doc = docs[2]
assert doc is None
@pytest.mark.asyncio
async def test_multi_get_by_ids_with_doc_cls_as_list(es_index, cars):
docs = await es_index.multi_get([1, 2], doc_cls=[Car])
doc = docs[0]
assert doc.name == '<NAME>'
assert doc._id == '1'
assert doc._index == es_index.get_name()
assert doc._score is None
doc = docs[1]
assert doc.name == '<NAME>'
assert doc._id == '2'
assert doc._index == es_index.get_name()
assert doc._score is None
@pytest.mark.asyncio
async def test_multi_get_by_docs(es_index, cars):
docs = await es_index.multi_get([Car(_id=1), Car(_id=2)])
doc = docs[0]
assert doc.name == '<NAME>'
assert doc._id == '1'
assert doc._index == es_index.get_name()
assert doc._score is None
doc = docs[1]
assert doc.name == '<NAME>'
assert doc._id == '2'
assert doc._index == es_index.get_name()
assert doc._score is None
@pytest.mark.asyncio
async def test_multi_get_by_dicts(es_index, cars):
docs = await es_index.multi_get([
{'_id': 1, '_type': 'car'},
{'_id': 2, 'doc_cls': Car},
])
doc = docs[0]
assert doc.name == '<NAME>'
assert doc._id == '1'
assert doc._index == es_index.get_name()
assert doc._score is None
doc = docs[1]
assert doc.name == '<NAME>'
assert doc._id == '2'
assert doc._index == es_index.get_name()
assert doc._score is None
@pytest.mark.asyncio
async def test_search(es_index, cars):
res = await es_index.search(
SearchQuery(Car.name.match("Lightning"))
)
assert res.total == 1
assert len(res.hits) == 1
doc = res.hits[0]
assert doc.name == '<NAME>'
assert doc._id == '1'
assert doc._index == es_index.get_name()
assert doc._score > 0
assert doc._score == res.max_score
@pytest.mark.asyncio
async def test_count(es_index, cars):
res = await es_index.count(
SearchQuery(Car.name.match("Lightning"))
)
assert res.count == 1
@pytest.mark.asyncio
async def test_scroll(es_index, cars):
with pytest.warns(UserWarning, match='Cannot determine document class'):
search_res = await es_index.search(
SearchQuery(), scroll='1m',
)
assert search_res.total == 2
assert len(search_res.hits) == 2
assert search_res.scroll_id is not None
scroll_res = await es_index.scroll(search_res.scroll_id, scroll='1m')
assert scroll_res.total == 2
assert len(scroll_res.hits) == 0
clear_scroll_res = await es_index.clear_scroll(scroll_res.scroll_id)
assert clear_scroll_res.succeeded is True
@pytest.mark.asyncio
async def test_multi_search(es_index, cars):
results = await es_index.multi_search([
SearchQuery(Car.name.match("Lightning")),
SearchQuery(Car.name.match("Sally")),
])
assert len(results) == 2
res = results[0]
assert res.total == 1
assert len(res.hits) == 1
doc = res.hits[0]
assert doc.name == '<NAME>'
assert doc._id == '1'
assert doc._index == es_index.get_name()
assert doc._score > 0
assert doc._score == res.max_score
res = results[1]
assert res.total == 1
assert len(res.hits) == 1
doc = res.hits[0]
assert doc.name == '<NAME>'
assert doc._id == '2'
assert doc._index == es_index.get_name()
assert doc._score > 0
assert doc._score == res.max_score
@pytest.mark.asyncio
async def test_delete(es_index, cars):
res = await es_index.delete(1, doc_type='car')
es_version = await es_index.get_cluster().get_es_version()
if es_version.major >= 5:
assert res.result == 'deleted'
if es_version.major <= 5:
assert res.found is True
@pytest.mark.asyncio
async def test_delete_by_query(es_index, cars):
res = await es_index.delete_by_query(
SearchQuery(Car.name.match("Lightning")),
refresh=True,
)
assert res.deleted == 1
assert (await es_index.count()).count == 1
@pytest.mark.asyncio
async def test_flush(es_index, cars):
await es_index.add([Car(name='Mater')])
res = await es_index.flush()
assert res
| 2.34375 | 2 |
train.py | Romero027/OmniNet | 0 | 12761773 | <reponame>Romero027/OmniNet<filename>train.py
#
# Copyright 2019 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
"""
Authors: <NAME>
OmniNet training script.
"""
import argparse
import os
import torch
import time
import glob
import numpy as np
import libs.omninet as omninet
from libs.util import dataloaders as dl
from tensorboardX import SummaryWriter
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
import libs.omninet.routines as r
from libs.omninet.util import ScheduledOptim
from torch.optim.adam import Adam
import random
import sys
from tqdm import tqdm
from libs.util.train_util import *
coco_images = 'data/coco/train_val'
caption_dir = 'data/coco'
vqa_dir = 'data/vqa'
model_save_path = 'checkpoints'
hmdb_data_dir='data/hmdb'
hmdb_process_dir='data/hmdbprocess'
penn_data_dir='data/penn'
def train(shared_model, task, batch_size, train_steps, gpu_id, start, restore, counter, barrier=None, save_interval=None,
eval_interval=None, log=True):
log_dir = 'logs/%s' % task
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if (log == True):
summary_writer = SummaryWriter(log_dir)
# Create local model
torch.manual_seed(int(random.random() * 1000))
if gpu_id>0:
model = omninet.OmniNet(gpu_id=gpu_id)
model=model.cuda(gpu_id)
else:
#For GPU 0, use the shared model always
model=shared_model
if task == 'caption':
DL,val_dl = dl.coco_cap_batchgen(caption_dir=caption_dir, image_dir=coco_images,
num_workers=8,
batch_size=batch_size)
optimizer = ScheduledOptim(
Adam(
filter(lambda x: x.requires_grad, shared_model.parameters()),
betas=(0.9, 0.98), eps=1e-09),
512, 16000,restore,init_lr=0.02)
elif task == 'vqa':
DL,val_dl = dl.vqa_batchgen(vqa_dir, coco_images, num_workers=8, batch_size=batch_size)
optimizer = ScheduledOptim(
Adam(
filter(lambda x: x.requires_grad, shared_model.parameters()),
betas=(0.9, 0.98), eps=1e-09),
512, 16000,restore,max_lr=0.0001,init_lr=0.02)
elif task == 'hmdb':
DL,val_dl=dl.hmdb_batchgen(hmdb_data_dir,hmdb_process_dir,num_workers=8,batch_size=batch_size,
test_batch_size=int(batch_size/4),
clip_len=16)
optimizer = ScheduledOptim(
Adam(
filter(lambda x: x.requires_grad, shared_model.parameters()),
betas=(0.9, 0.98), eps=1e-09),
512, 16000,restore,max_lr=0.0001,init_lr=0.02)
elif task == 'penn':
DL,val_dl,test_dl=dl.penn_dataloader(penn_data_dir,batch_size=batch_size,
test_batch_size=int(batch_size/2),num_workers=4,vocab_file='conf/penn_vocab.json')
optimizer = ScheduledOptim(
Adam(
filter(lambda x: x.requires_grad, shared_model.parameters()),
betas=(0.9, 0.98), eps=1e-09),
512, 16000,restore,init_lr=0.02)
model=model.train()
for i in range(start, train_steps):
model.zero_grad()
if barrier is not None:
barrier.wait()
if gpu_id > 0:
with torch.cuda.device(gpu_id):
model.load_state_dict(shared_model.state_dict())
# Calculate loss
step = counter.increment()
if task == 'caption':
if (log and eval_interval is not None and i % eval_interval == 0):
model = model.eval()
val_loss=0
val_acc=0
print('-' * 100)
print('Evaluation step')
for b in tqdm(val_dl):
imgs = b['img']
if gpu_id>=0:
imgs=imgs.cuda(device=gpu_id)
captions = b['cap']
# In val mode we do not pass the targets for prediction. We use it only for loss calculation
_,loss,acc = r.image_caption(model, imgs, targets=captions, mode='val',return_str_preds=True)
val_loss += float(loss.detach().cpu().numpy())
val_acc+=acc
val_loss/=len(val_dl)
val_acc=(val_acc/len(val_dl))
summary_writer.add_scalar('Val_loss', val_loss, step)
print('Step %d, COCO validation loss: %f, Accuracy %f %%' % (step, val_loss,val_acc))
print('-' * 100)
model = model.train()
batch = next(DL)
if gpu_id >= 0:
imgs = batch['img'].cuda(device=gpu_id)
else:
imgs = batch['img']
captions = batch['cap']
_, loss,acc = r.image_caption(model, imgs, targets=captions)
loss.backward()
loss=loss.detach()
if log:
summary_writer.add_scalar('Loss', loss, step)
print('Step %d, Caption Loss: %f, Accuracy: %f %%' % (step, loss,acc))
elif task == 'vqa':
if (log and eval_interval is not None and i % eval_interval == 0):
model = model.eval()
val_loss = 0
val_acc=0
print('-' * 100)
print('Evaluation step')
for b in tqdm(val_dl):
imgs = b['img']
answers=b['ans']
if gpu_id >= 0:
imgs = imgs.cuda(device=gpu_id)
answers=answers.cuda(device=gpu_id)
questions= b['ques']
# In val mode we do not pass the targets for prediction. We use it only for loss calculation
pred, loss,acc = r.vqa(model, imgs, questions,targets=answers, mode='val',return_str_preds=True)
val_loss += float(loss.detach().cpu().numpy())
val_acc+=acc
val_loss/=len(val_dl)
val_acc=(val_acc/len(val_dl))
summary_writer.add_scalar('Val_loss', val_loss, step)
print('Step %d, VQA validation loss: %f, Accuracy %f %%' % (step, val_loss,val_acc))
print('-' * 100)
model = model.train()
continue
batch = next(DL)
if gpu_id >= 0:
imgs = batch['img'].cuda(device=gpu_id)
answers = batch['ans'].cuda(device=gpu_id)
else:
imgs = batch['img']
answers = batch['ans']
questions = batch['ques']
_, loss,acc = r.vqa(model, imgs, questions, targets=answers)
loss.backward()
loss=loss.detach()
if log:
summary_writer.add_scalar('Loss', loss, step)
print('Step %d, VQA Loss: %f, Accuracy: %f %%' % (step, loss,acc))
elif task=='hmdb':
if (log and eval_interval is not None and i % eval_interval == 0):
model = model.eval()
val_loss = 0
val_acc=0
print('-' * 100)
print('Evaluation step')
for b in tqdm(val_dl):
vid,labels = b
if gpu_id >= 0:
vid = vid.cuda(device=gpu_id)
labels = labels.cuda(device=gpu_id)
_, loss,acc = r.hmdb(model, vid,targets=labels, mode='val')
val_loss += float(loss.detach().cpu().numpy())
val_acc+=acc
val_loss/=len(val_dl)
val_acc=(val_acc/len(val_dl))
summary_writer.add_scalar('Val_loss', val_loss, step)
print('Step %d, HMDB validation loss: %f, Accuracy %f %%' % (step, val_loss,val_acc))
print('-' * 100)
model = model.train()
continue
vid,labels = next(DL)
if gpu_id >= 0:
vid = vid.cuda(device=gpu_id)
labels = labels.cuda(device=gpu_id)
_, loss,acc = r.hmdb(model, vid,targets=labels,return_str_preds=True)
loss.backward()
loss=loss.detach()
if log:
summary_writer.add_scalar('Loss', loss, step)
print('Step %d, HMDB Loss: %f, Accuracy: %f %%' % (step, loss,acc))
elif task == 'penn':
if (log and eval_interval is not None and i % eval_interval == 0):
model = model.eval()
val_loss=0
val_acc=0
print('-' * 100)
print('Evaluation step')
for b in tqdm(test_dl):
en = b['text']
targets = b['tokens']
pad_id=b['pad_id']
pad_mask=b['pad_mask']
if gpu_id>=0:
targets=targets.to(gpu_id)
pad_mask=pad_mask.to(gpu_id)
_,loss,acc = r.penn(model, en, target_pad_mask=pad_mask,
pad_id=pad_id,targets=targets, mode='val',return_str_preds=True)
loss=loss.detach()
val_loss += float(loss.cpu().numpy())
val_acc+=acc
val_loss/=len(val_dl)
val_acc=(val_acc/len(val_dl))
summary_writer.add_scalar('Val_loss', val_loss, step)
print('Step %d, PENN validation loss: %f, Accuracy %f %%' % (step, val_loss,val_acc))
print('-' * 100)
model = model.train()
batch = next(DL)
en = batch['text']
targets = batch['tokens']
pad_id=batch['pad_id']
pad_mask=batch['pad_mask']
if gpu_id>=0:
targets=targets.to(gpu_id)
pad_mask=pad_mask.to(gpu_id)
_, loss,acc = r.penn(model, en, pad_id=pad_id, targets=targets,target_pad_mask=pad_mask)
loss.backward()
loss=loss.detach()
if log:
summary_writer.add_scalar('Loss', loss, step)
print('Step %d, PENN Loss: %f, Accuracy: %f %%' % (step, loss,acc))
# End Calculate loss
if gpu_id>0:
ensure_shared_grads(model, shared_model, gpu_id)
optimizer.step()
# Save model
if (save_interval != None and (i + 1) % save_interval == 0):
shared_model.save(model_save_path, step)
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OmniNet training script.')
parser.add_argument('n_iters', help='Number of iterations to train.')
parser.add_argument('tasks', help='List of tasks seperated by comma.')
parser.add_argument('batch_sizes', help='List of batch size for each task seperated by comma')
parser.add_argument('--n_jobs', default=1, help='Number of asynchronous jobs to run for each task.')
parser.add_argument('--n_gpus', default=1, help='Number of GPUs to use')
parser.add_argument('--save_interval', default=100, help='Number of iterations after which to save the model.')
parser.add_argument('--restore', default=-1, help='Step from which to restore model training')
parser.add_argument('--restore_last', help='Restore the latest version of the model.', action='store_true')
parser.add_argument('--eval_interval', help='Interval after which to evaluate on the test/val set.', default=1000)
args = parser.parse_args()
torch.manual_seed(47)
mp.set_start_method('spawn',force=True)
n_iters = int(args.n_iters)
n_jobs = int(args.n_jobs)
tasks=args.tasks
batch_sizes=args.batch_sizes
save_interval = int(int(args.save_interval) / n_jobs)
eval_interval = int(int(args.eval_interval) / n_jobs)
if args.restore_last == True:
ckpts = glob.glob(os.path.join(model_save_path, '*'))
iters = [int(os.path.basename(c)) for c in ckpts]
if len(iters) != 0:
restore = max(iters)
else:
restore = 0
else:
restore = int(args.restore)
tasks=tasks.split(',')
tasks=[t.strip() for t in tasks]
batch_sizes=batch_sizes.split(',')
batch_sizes=[int(b.strip()) for b in batch_sizes]
if len(tasks)!=len(batch_sizes):
raise Exception('Number of tasks provided does not match the number of batch sizes provided.')
n_gpus = int(args.n_gpus)
n_tasks = len(tasks) * n_jobs
shared_model = omninet.OmniNet(gpu_id=0)
if restore != -1:
shared_model.restore(model_save_path, restore)
else:
restore=0
shared_model=shared_model.to(0)
shared_model.share_memory()
counters = [Counter(restore) for i in range(len(tasks))]
barrier = mp.Barrier(n_tasks)
start = int(restore / n_jobs)
# Declare training processes for multi-gpu hogwild training
processes = []
for i in range(n_tasks):
#If more than one GPU is used, use first GPU only for model sharing
if n_gpus>1:
gpu_id=i%n_gpus
else:
gpu_id=0
process = mp.Process(target=train, args=(shared_model, tasks[i % len(tasks)], batch_sizes[i % len(tasks)],
int(n_iters / n_jobs),
gpu_id, start, restore, counters[i % len(tasks)], barrier,
(save_interval if i == 0 else None),
(eval_interval if i < len(tasks) else None),
(True if i < len(tasks) else False)))
process.start()
processes.append(process)
for p in processes:
p.join()
| 1.882813 | 2 |
exp_shapes/train_shapes_setup.py | rishamsidhu/n2nmn | 0 | 12761774 | <filename>exp_shapes/train_shapes_setup.py
from __future__ import absolute_import, division, print_function
import random
import pickle
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=int, default=0)
args = parser.parse_args()
gpu_id = args.gpu_id # set GPU id to use
import os; os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
import numpy as np
import tensorflow as tf
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False, log_device_placement=False))
import json
from models_shapes.nmn3_assembler import Assembler
from models_shapes.nmn3_model import NMN3ModelAtt
# Module parameters
H_im = 30
W_im = 30
num_choices = 2
embed_dim_txt = 300
embed_dim_nmn = 300
lstm_dim = 256
num_layers = 2
encoder_dropout = True
decoder_dropout = True
decoder_sampling = True
T_encoder = 15
T_decoder = 11
N = 256
# Training parameters
weight_decay = 5e-4
max_grad_l2_norm = 10
max_iter = 40000
snapshot_interval = 10000
exp_name = "shapes_gt_layout_" + input("Experiment identifying numbers:")
snapshot_dir = './exp_shapes/tfmodel/%s/' % exp_name
# Log params
log_interval = 20
log_dir = './exp_shapes/tb/%s/' % exp_name
# Data files
vocab_shape_file = './exp_shapes/data/vocabulary_shape.txt'
vocab_layout_file = './exp_shapes/data/vocabulary_layout.txt'
image_sets = ['train.large', 'train.med', 'train.small', 'train.tiny']
training_text_files = './exp_shapes/shapes_dataset/%s.query_str.txt'
training_image_files = './exp_shapes/shapes_dataset/%s.input.npy'
training_label_files = './exp_shapes/shapes_dataset/%s.output'
training_gt_layout_file = './exp_shapes/data/%s.query_layout_symbols.json'
image_mean_file = './exp_shapes/data/image_mean.npy'
# Load vocabulary
with open(vocab_shape_file) as f:
vocab_shape_list = [s.strip() for s in f.readlines()]
vocab_shape_dict = {vocab_shape_list[n]:n for n in range(len(vocab_shape_list))}
num_vocab_txt = len(vocab_shape_list)
assembler = Assembler(vocab_layout_file)
num_vocab_nmn = len(assembler.module_names)
#random seed
np.random.seed(int(time.time()))
tf.set_random_seed(int(time.time()) + 9)
# Network inputs
text_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_batch = tf.placeholder(tf.float32, [None, H_im, W_im, 3])
expr_validity_batch = tf.placeholder(tf.bool, [None])
vqa_label_batch = tf.placeholder(tf.int32, [None])
use_gt_layout = tf.constant(True, dtype=tf.bool)
gt_layout_batch = tf.placeholder(tf.int32, [None, None])
# The model
nmn3_model = NMN3ModelAtt(image_batch, text_seq_batch,
seq_length_batch, T_decoder=T_decoder,
num_vocab_txt=num_vocab_txt, embed_dim_txt=embed_dim_txt,
num_vocab_nmn=num_vocab_nmn, embed_dim_nmn=embed_dim_nmn,
lstm_dim=lstm_dim,
num_layers=num_layers, EOS_idx=assembler.EOS_idx,
encoder_dropout=encoder_dropout,
decoder_dropout=decoder_dropout,
decoder_sampling=decoder_sampling,
num_choices=num_choices, use_gt_layout=use_gt_layout,
gt_layout_batch=gt_layout_batch)
compiler = nmn3_model.compiler
scores = nmn3_model.scores
log_seq_prob = nmn3_model.log_seq_prob
# Loss function
softmax_loss_per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=scores, labels=vqa_label_batch)
# The final per-sample loss, which is vqa loss for valid expr
# and invalid_expr_loss for invalid expr
final_loss_per_sample = softmax_loss_per_sample # All exprs are valid
avg_sample_loss = tf.reduce_mean(final_loss_per_sample)
seq_likelihood_loss = tf.reduce_mean(-log_seq_prob)
total_training_loss = seq_likelihood_loss + avg_sample_loss
total_loss = total_training_loss + weight_decay * nmn3_model.l2_reg
# Train with Adam
solver = tf.train.AdamOptimizer()
gradients = solver.compute_gradients(total_loss)
# Clip gradient by L2 norm
# gradients = gradients_part1+gradients_part2
gradients = [(tf.clip_by_norm(g, max_grad_l2_norm), v)
for g, v in gradients]
solver_op = solver.apply_gradients(gradients)
# Training operation
# Partial-run can't fetch training operations
# some workaround to make partial-run work
with tf.control_dependencies([solver_op]):
train_step = tf.constant(0)
sess.run(tf.global_variables_initializer())
#varaible access
prefix = 'neural_module_network/layout_execution/'
mods = ['TransformModule', 'FindModule', 'AnswerModule']
swaps = dict.fromkeys(mods)
old = dict.fromkeys(mods, 0)
num_swaps = int(input("Number of swaps?"))
temp = {}
for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
y = x.eval(session = sess)
temp[x.name] = np.array(y)
with open(os.path.join(snapshot_dir, "start_vars.txt"), "wb") as f:
pickle.dump(temp, f)
for mod in mods:
swaps[mod] = []
for i in range(num_swaps):
d = {}
for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = prefix + mod):
y = x.eval(session = sess)
if i == 0:
d[x.name] = np.array(y)
else:
if "bias" in x.name:
d[x.name] = np.zeros(y.shape)
elif "Adam" in x.name:
d[x.name] = np.zeros(y.shape)
else:
#var = tf.get_variable(name = "temp" + str(count), initializer = tf.contrib.layers.xavier_initializer(), shape = y.shape)
var = tf.Variable(tf.contrib.layers.xavier_initializer()(shape = y.shape))
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
y = var.eval(session = sess)
d[x.name] = np.array(y)
swaps[mod] += [d]
with open(os.path.join(snapshot_dir, "start_swaps.txt"), "wb") as f:
pickle.dump(swaps, f)
| 1.945313 | 2 |
robotframework_archive/rfarchivesetup.py | adiralashiva8/robotframework-archive | 0 | 12761775 | <gh_stars>0
import mysql.connector
import logging
def rfarchive_setup(opts):
# connect to database
print("INFO: Connecting to dB")
mydb = connect_to_mysql(opts.host, opts.username, opts.password)
# create new user
obj = mydb.cursor()
print("INFO: Creating superuser with local access")
try:
obj.execute("CREATE USER IF NOT EXISTS 'superuser'@'localhost' IDENTIFIED BY '<PASSWORD>';")
obj.execute("GRANT ALL PRIVILEGES ON *.* TO 'superuser'@'localhost' WITH GRANT OPTION;")
except Exception as e:
print(str(e))
print("INFO: Creating superuser with remote access")
try:
obj.execute("CREATE USER 'superuser'@'%' IDENTIFIED BY '<PASSWORD>';")
obj.execute("GRANT ALL PRIVILEGES ON *.* TO 'superuser'@'%' WITH GRANT OPTION;")
except Exception as e:
print(str(e))
print("INFO: Reloading grant table")
try:
obj.execute("FLUSH PRIVILEGES;")
except Exception as e:
print(str(e))
print("INFO: Creating rfarchive dB")
try:
obj.execute("CREATE DATABASE IF NOT EXISTS rfarchive;")
except Exception as e:
print(str(e))
print("INFO: Creating required tables")
rfdb = connect_to_mysql_db(opts.host, opts.username, opts.password, "rfarchive")
try:
rfobj = rfdb.cursor()
rfobj.execute("CREATE TABLE IF NOT EXISTS hsproject ( pid INT NOT NULL auto_increment primary key, name TEXT, description TEXT, created DATETIME, updated DATETIME, total INT, percentage FLOAT);")
rfobj.execute("CREATE TABLE IF NOT EXISTS hsexecution ( eid INT NOT NULL auto_increment primary key, pid INT, description TEXT, time DATETIME, total INT, pass INT, fail INT, skip INT, etime TEXT);")
rfobj.execute("CREATE TABLE IF NOT EXISTS hstest ( tid INT NOT NULL auto_increment primary key, eid INT, pid INT, name TEXT, status TEXT, time TEXT, error TEXT, comment TEXT, assigned TEXT, eta TEXT, review TEXT, type TEXT, tag TEXT, updated DATETIME);")
# snow project
rfobj.execute("CREATE TABLE IF NOT EXISTS spproject ( pid INT NOT NULL auto_increment primary key, name TEXT, description TEXT, created DATETIME, updated DATETIME, total INT);")
rfobj.execute("CREATE TABLE IF NOT EXISTS spexecution ( eid INT NOT NULL auto_increment primary key, pid INT, description TEXT, time DATETIME);")
rfobj.execute("CREATE TABLE IF NOT EXISTS sptest ( tid INT NOT NULL auto_increment primary key, eid INT, pid INT, name TEXT, browser_time FLOAT, client_response_time FLOAT, response_time FLOAT, sql_count FLOAT, sql_time FLOAT);")
# sf project
rfobj.execute("CREATE TABLE IF NOT EXISTS sfproject ( pid INT NOT NULL auto_increment primary key, name TEXT, description TEXT, created DATETIME, updated DATETIME, total INT);")
rfobj.execute("CREATE TABLE IF NOT EXISTS sfexecution ( eid INT NOT NULL auto_increment primary key, pid INT, description TEXT, time DATETIME);")
rfobj.execute("CREATE TABLE IF NOT EXISTS sftest ( tid INT NOT NULL auto_increment primary key, eid INT, pid INT, name TEXT, ept_time FLOAT);")
except Exception as e:
print(str(e))
commit_and_close_db(mydb)
def connect_to_mysql(host, user, pwd):
try:
mydb = mysql.connector.connect(
host=host,
user=user,
passwd=<PASSWORD>
)
return mydb
except Exception as e:
print(e)
def connect_to_mysql_db(host, user, pwd, db):
try:
mydb = mysql.connector.connect(
host=host,
user=user,
passwd=<PASSWORD>,
database=db
)
return mydb
except Exception as e:
print(e)
def commit_and_close_db(db):
db.commit()
db.close() | 2.71875 | 3 |
checkfileint.py | MrForg3t/sourcecodetrm | 0 | 12761776 | from os import path, system
from platform import system as osInfo
from time import sleep
def checkFile():
print("\n")
checkFileData()
sleep(0.1)
print("\n")
checkFileEssential()
sleep(0.1)
print("\n")
def checkFileData():
try:
if osInfo() == "Windows":
if path.isdir("data"):
if path.exists("data/appData.json"):
print("appData.json [✅]")
else:
print("appData.json [❌]")
x = input("Start repair your data/appData: (y/N) ")
if x == "y":
system("repairfiles.exe")
elif x == "N":
exit()
else:
exit()
if path.exists("data/uuidData.json"):
print("uuidData.json [✅]")
else:
print("uuidData.json [❌]")
x = input("Start repair your data/uuidData: (y/N) ")
if x == "y":
system("repairfiles.exe")
elif x == "N":
exit()
else:
exit()
else:
print("'data/' path [❌]")
x = input("Start repair your 'data/': (y/N) ")
if x == "y":
system("repairfiles.exe")
elif x == "N":
exit()
else:
exit()
elif osInfo() == "Darwin":
print("Not supported on this platform for now.")
elif osInfo() == "Linux":
print("Not supported on this platform for now.")
else:
print("We cannnot find your operating system")
except Exception as error:
print(f"Error {error}")
def checkFileEssential():
try:
if osInfo() == "Windows":
if path.exists("main.exe"):
print("main.exe[✅]")
if path.exists("launcher.exe"):
print("launcher.exe [✅]")
else:
print("launcher.exe [❌]")
x = input("Start repair your launcher.exe file: (y/N) ")
if x == "y":
system("repairfiles.exe")
elif x == "N":
exit()
else:
exit()
if path.exists("uuid_gen.exe"):
print("uuid_gen.exe [✅]")
else:
print("uuid_gen.exe [❌]")
x = input("Start repair your uuid_gen.exe file: (y/N) ")
if x == "y":
system("repairfiles.exe")
elif x == "N":
exit()
else:
exit()
else:
print("main.exe [❌]")
x = input("Start repair your main.exe file: (y/N) ")
if x == "y":
system("repairfiles.exe")
elif x == "N":
exit()
else:
exit()
elif osInfo() == "Darwin":
print("Not supported on this platform for now.")
elif osInfo() == "Linux":
print("Not supported on this platform for now.")
else:
print("We cannnot find your operating system")
except Exception as error:
print(f"Error {error}")
if __name__ == '__main__': checkFile()
sleep(6) | 3.125 | 3 |
Feedback_System.py | FraserEtchells/ArgumentAnalyser | 0 | 12761777 | <reponame>FraserEtchells/ArgumentAnalyser<filename>Feedback_System.py
#!/usr/bin/env python
# coding: utf-8
# In[33]:
import os
import pandas as pd
import json
import random
import sys
import ast
import numpy as np
import argparse
import nltk
import pickle
import scipy
import spacy
from sklearn_pandas import DataFrameMapper
from sklearn import preprocessing
def get_component_ratios(component_tuple):
claims_to_major_claims= 0
premises_to_claims = 0
if(component_tuple[0] > 0):
claims_to_major_claims = component_tuple[1]/component_tuple[0]
else:
claims_to_major_claims = component_tuple[1]
if (component_tuple[1] > 0):
premises_to_claims = component_tuple[2]/component_tuple[1]
else:
premises_to_claims = component_tuple[2]
return claims_to_major_claims,premises_to_claims
def get_introduction_conclusion_major_claims_ratio(component_tuple):
major_claims_to_claims = 0
if(component_tuple[2] > 0):
major_claims_to_claims = component_tuple[1]/component_tuple[2]
else:
major_claims_to_claims = component_tuple[1]
return major_claims_to_claims
def get_paragraph_claims_ratio(component_tuple):
premises_to_claims = 0
if(component_tuple[2] > 0):
premises_to_claims = component_tuple[3]/component_tuple[2]
else:
premises_to_claims = component_tuple[3]
return premises_to_claims
#returns a tuple that contains the passed essays major claim, claim and premise counts.
def component_count_total(essay):
major_claim_count = 0
claim_count = 0
premise_count = 0
for index, row in essay.iterrows():
if row["Argument Component Type"] == "MajorClaim":
major_claim_count +=1
elif row["Argument Component Type"] == "Claim":
claim_count +=1
elif row["Argument Component Type"] == "Premise":
premise_count +=1
return major_claim_count, claim_count, premise_count
#returns a tuple that contains the passed dataframe of essays average number of major claims, claims and premises.
def average_component_count(data):
component_counts = []
major_claims = []
claims = []
premises = []
completed_essay_id = set()
for index,row in data.iterrows():
curr_essay_id = row["Essay ID"]
if curr_essay_id in completed_essay_id:
continue
else:
completed_essay_id.add(curr_essay_id)
curr_essay = data.loc[(data['Essay ID'] == curr_essay_id)]
component_counts.append(component_count_total(curr_essay))
for component_tuple in component_counts:
major_claims.append(component_tuple[0])
claims.append(component_tuple[1])
premises.append(component_tuple[2])
major_claims.sort()
claims.sort()
premises.sort()
average_major_claims = major_claims[round(len(major_claims) / 2)]
average_claims = claims[round(len(claims) / 2)]
average_premises = premises[round(len(premises) / 2)]
return average_major_claims, average_claims, average_premises
#returns a list of tuples. Tuples take the form of: paragraph number,major claim count, claim count, premise count.
def component_count_paragraphs(essay):
current_paragraph = 0
total_paragraphs = essay["Total Paragraphs"].values[0]
paragraph_components_tuples = []
for current_paragraph in range(total_paragraphs):
p_major_claim_count = 0
p_claim_count = 0
p_premise_count = 0
curr_paragraph = essay.loc[(essay['Paragraph Number'] == current_paragraph + 1)]
for index, row in curr_paragraph.iterrows():
if row["Argument Component Type"] == "MajorClaim":
p_major_claim_count +=1
elif row["Argument Component Type"] == "Claim":
p_claim_count +=1
elif row["Argument Component Type"] == "Premise":
p_premise_count +=1
paragraph_tuple = (current_paragraph+1, p_major_claim_count, p_claim_count, p_premise_count)
paragraph_components_tuples.append(paragraph_tuple)
return paragraph_components_tuples
def average_introduction_component_count(data):
introduction_component_counts = []
major_claims = []
claims = []
premises = []
completed_essay_id = set()
for index,row in data.iterrows():
curr_essay_id = row["Essay ID"]
if curr_essay_id in completed_essay_id:
continue
else:
completed_essay_id.add(curr_essay_id)
curr_essay = data.loc[(data['Essay ID'] == curr_essay_id)]
paragraphs_list = component_count_paragraphs(curr_essay)
introduction_component_counts.append(paragraphs_list[1])
for component_tuple in introduction_component_counts:
major_claims.append(component_tuple[1])
claims.append(component_tuple[2])
premises.append(component_tuple[3])
major_claims.sort()
claims.sort()
premises.sort()
average_major_claims = major_claims[round(len(major_claims) / 2)]
average_claims = claims[round(len(claims) / 2)]
average_premises = premises[round(len(premises) / 2)]
return average_major_claims, average_claims, average_premises
def average_conclusion_component_count(data):
conclusion_component_counts = []
major_claims = []
claims = []
premises = []
completed_essay_id = set()
for index,row in data.iterrows():
curr_essay_id = row["Essay ID"]
if curr_essay_id in completed_essay_id:
continue
else:
completed_essay_id.add(curr_essay_id)
curr_essay = data.loc[(data['Essay ID'] == curr_essay_id)]
paragraphs_list = component_count_paragraphs(curr_essay)
conclusion_component_counts.append(paragraphs_list[-1])
for component_tuple in conclusion_component_counts:
major_claims.append(component_tuple[1])
claims.append(component_tuple[2])
premises.append(component_tuple[3])
major_claims.sort()
claims.sort()
premises.sort()
average_major_claims = major_claims[round(len(major_claims) / 2)]
average_claims = claims[round(len(claims) / 2)]
average_premises = premises[round(len(premises) / 2)]
return average_major_claims, average_claims, average_premises
def average_paragraph_component_count(data):
component_counts = []
major_claims = []
claims = []
premises = []
completed_essay_id = set()
for index,row in data.iterrows():
curr_essay_id = row["Essay ID"]
if curr_essay_id in completed_essay_id:
continue
else:
completed_essay_id.add(curr_essay_id)
curr_essay = data.loc[(data['Essay ID'] == curr_essay_id)]
paragraphs_list = component_count_paragraphs(curr_essay)
paragraphs_list.pop(0)
paragraphs_list.pop(0)
paragraphs_list.pop(len(paragraphs_list)-1)
for i in range(len(paragraphs_list)):
component_counts.append(paragraphs_list[i])
for component_tuple in component_counts:
major_claims.append(component_tuple[1])
claims.append(component_tuple[2])
premises.append(component_tuple[3])
claims.sort()
premises.sort()
average_major_claims = 0
average_claims = claims[round(len(claims) / 2)]
average_premises = premises[round(len(claims) / 2)]
return average_major_claims, average_claims, average_premises
#gives feedback based on how the passed essay compares to the corpus' average results. Do this in the form of ratios to ensure longer essays will be marked appropriately.
def component_count_feedback(essay):
average_component_count_tuple = (2,3,8)
essay_component_count_tuple = component_count_total(essay)
essay_ratio_tuple = get_component_ratios(essay_component_count_tuple)
major_claims_to_claims = essay_ratio_tuple[0]
premises_to_claims = essay_ratio_tuple[1]
average_ratio_tuple = get_component_ratios(average_component_count_tuple)
average_major_claims_to_claims = average_ratio_tuple[0]
average_premises_to_claims = average_ratio_tuple[1]
feedback = []
feedback.append("Your essay has " + str(essay_component_count_tuple[0]) +" Major Claims," + str(essay_component_count_tuple[1]) + " Claims and " + str(essay_component_count_tuple[2]) + " Premises.")
feedback.append("Your essay has a ratio of " + str(major_claims_to_claims) + " of Claims to Major Claims.")#Want a higher ratio
feedback.append("On average, essays we have seen have a ratio of " + str(average_major_claims_to_claims) )
if major_claims_to_claims - average_major_claims_to_claims > -0.1 and major_claims_to_claims - average_major_claims_to_claims < 0.1:
feedback.append("This is good - it means you have a good amount of sub-arguments to support your overall thesis.")
elif major_claims_to_claims - average_major_claims_to_claims < -0.1:
feedback.append("This is not great - you may have too few arguments to support your overall thesis.")
elif major_claims_to_claims - average_major_claims_to_claims > 0.1:
feedback.append("While you have a lot of Claims to Major Claims, be aware that having too many claims may unfocus your thesis statement.")
feedback.append("Your essay has a ratio of " + str(premises_to_claims) + " of Premises to Claims.") #Want a higher ratio
feedback.append("On average, essays we have seen have a ratio of"+ str(average_premises_to_claims))
if premises_to_claims - average_premises_to_claims < 0.1:
feedback.append("This is not great. Generally, we want more premises than claims in order to give better justification to our points.")
elif premises_to_claims - average_premises_to_claims > -0.1:
feedback.append("This is good - it means on average you have a lot of support for your points.")
return feedback
def paragraph_component_count_feedback(essay):
#Compares the count of argument components in each paragraph to average results.
#originally, we used functions to derive these results (which are the same as the dataset is static) which vastly increases run times.
average_introduction_component_tuple = (0,1,0,0)
average_conclusion_component_tuple = (0,1,1,0)
average_paragraph_component_tuple = (0,0,1,3)
essay_paragraph_tuple_list = component_count_paragraphs(essay)
essay_paragraph_tuple_list.pop(0) # remove prompt paragraph
essay_introduction_component_tuple = essay_paragraph_tuple_list.pop(0) #get tuple for the introduction paragraph
essay_conclusion_component_tuple = essay_paragraph_tuple_list.pop(len(essay_paragraph_tuple_list)-1) #get tuple for conclusion paragraph
#as we use pop method, list contains only the main body paragraphs.
introduction_major_claims_to_claims = get_introduction_conclusion_major_claims_ratio(essay_introduction_component_tuple)
average_introduction_major_claims_to_claims = get_introduction_conclusion_major_claims_ratio(average_introduction_component_tuple)
conclusion_major_claims_to_claims = get_introduction_conclusion_major_claims_ratio(essay_conclusion_component_tuple)
average_conclusion_major_claims_to_claims = get_introduction_conclusion_major_claims_ratio(average_conclusion_component_tuple)
average_paragraph_premises_to_claims = get_paragraph_claims_ratio(average_paragraph_component_tuple)
feedback = []
feedback.append("The introduction has " + str(essay_introduction_component_tuple[1]) + " Major Claims, " + str(essay_introduction_component_tuple[2]) + " Claims and " + str(essay_introduction_component_tuple[3]) + "Premises")
feedback.append("On average, essays we have seen have 1 Major Claim, 0 Claims and 0 Premises")
if(essay_introduction_component_tuple[2] > 0 and essay_introduction_component_tuple[1] > 0):
feedback.append("The introduction has a ratio of " + str(introduction_major_claims_to_claims) + " of Major Claims to Claims")
feedback.append("On average, essays we have seen have a ratio of " + str(average_introduction_major_claims_to_claims) + " of Major Claims to Claims")
if(introduction_major_claims_to_claims - average_introduction_major_claims_to_claims > -1 and introduction_major_claims_to_claims - average_introduction_major_claims_to_claims < 1.1):
feedback.append("Your Major Claims to Claims ratio is good - generally we want less Claims and more Major Claims in an introduction, but having the same number is fine.")
elif(introduction_major_claims_to_claims - average_introduction_major_claims_to_claims > 1.1):
feedback.append("Your Major Claims to Claims ratio is not good - having too many claims and not many Major Claims in your introduction makes your structure messier.")
elif(essay_introduction_component_tuple[2] == 0 and essay_introduction_component_tuple[1] > 0):
feedback.append("The introduction has no Claims, therefore we cannot calculate the ratio of Major Claims to Claims - the metric we normally use.")
if(essay_introduction_component_tuple[1] < 2 ):
feedback.append("Since there are " + str(essay_introduction_component_tuple[1]) + " Major Claims this is desirable as you want to have more Major Claims than Claims in your introduction.")
else:
feedback.appened("Since there are " + str(essay_introduction_component_tuple[1]) + " Major Claims however, this is not desirable as you are including too many thesis statements within your introduction. The limit is one or two Major Claims.")
elif(essay_introduction_component_tuple[1] == 0):
feedback.append("The introduction has no Major Claims, therefore we cannot calculate the ratio of Major Claims to Claims - the metric we normally use")
feedback.append("Generally, you should include a Major Claim in the introduction. This is not as vital if you are including atleast one within your Conclusion however.")
if(essay_introduction_component_tuple[3] > 0):
feedback.append("Your introduction includes atleast one premise - this is undesirable. Premises are better suited in the Main Body Paragraphs of your essay.")
for i in range(len(essay_paragraph_tuple_list)):
feedback.append("Paragraph " + str(essay_paragraph_tuple_list[i][0]) + " has " + str(essay_paragraph_tuple_list[i][1]) + " Major Claims, " + str(essay_paragraph_tuple_list[i][2]) + " Claims and " + str(essay_paragraph_tuple_list[i][3]) + " Premises.")
feedback.append("On average, essays we have seen have 0 Major Claims, 1 Claim and 3 Premises")
paragraph_premises_to_claims = get_paragraph_claims_ratio(essay_paragraph_tuple_list[i])
if(essay_paragraph_tuple_list[i][2] > 0 and essay_paragraph_tuple_list[i][3] > 0):
feedback.append("This Paragraph has a ratio of " + str(paragraph_premises_to_claims) + " of Premises to Claims")
feedback.append("On average, essays we have seen have a ratio of " + str(average_paragraph_premises_to_claims) + " of Premises to Claims")
if(paragraph_premises_to_claims - average_paragraph_premises_to_claims > -1 and paragraph_premises_to_claims - average_paragraph_premises_to_claims < 2):
feedback.append("Your Premises to Claims ratio is great - each Claim needs roughly 3 to 4 Premises to properly back it up")
elif(paragraph_premises_to_claims - average_paragraph_premises_to_claims <= -1):
feedback.append("Your Premises to Claims ratio is poor - you should aim to add more Premises to this Claim in order to give it proper justification")
elif(paragraph_premises_to_claims - average_paragraph_premises_to_claims >= 2):
feedback.append("Your Premises to Claims ratio is higher than average - if you have a limited word count you may be better removing some of your premises in this paragraph and either create a new paragraph to support the overall thesis, or add more Premises to another paragraph")
elif(essay_paragraph_tuple_list[i][2] == 0 and essay_paragraph_tuple_list[i][3] > 0):
feedback.append("This paragraph does not include any Claims, therefore we cannot calculate the ratio of Premises to Claims - the metric we normally use")
feedback.append("This is extremely undesirable - every main body paragraph should have a Claim as it helps justify the overall thesis statement of the essay.")
if(essay_paragraph_tuple_list[i][3] > 6 ):
feedback.append("This paragraph also includes many more premises than on average - if you have a limited word count you may be better removing some of your premises from this paragraph and convert them to a claim, or add to other areas in your essay.")
elif(essay_paragraph_tuple_list[i][2] > 0 and essay_paragraph_tuple_list[i][3] == 0):
feedback.append("This paragraph does not include any Premises, therefore we cannot calculate the ratio of Premises to Claims - the metric we normally use")
feedback.append("This is extremely undesirable - every main body paragraph include a few Premises in order to justify the Claims being presented")
if(essay_paragraph_tuple_list[i][2] > 2):
feedback.append("This paragraph also includes several claims - you may be better converting some of these claims into Premises to make a single clearer and well balanced point.")
elif(essay_paragraph_tuple_list[i][2] == 0 and essay_paragraph_tuple_list[i][3] == 0):
feedback.append("This paragraph does not include any Premises or Claims - this is a wasted paragraph that would be put to better use by clearly stating an argument to support your essay's thesis statements.")
feedback.append("Try including 1 Claim and atleast 3 Premises to this paragraph.")
if(essay_paragraph_tuple_list[i][1] > 0):
feedback.append("This paragraph includes atleast 1 Major Claim. This is not desirable, try to keep your thesis statements to either the Introduciton or Conclusion")
feedback.append("The conclusion has " + str(essay_conclusion_component_tuple[1]) + " Major Claims, " + str(essay_conclusion_component_tuple[2]) + " Claims and " + str(essay_conclusion_component_tuple[3]) + " Premises")
feedback.append("On average, essays we have seen have 1 Major Claim, 1 Claim and 0 Premises")
if (essay_conclusion_component_tuple[1] > 0 and essay_conclusion_component_tuple[2] > 0):
feedback.append("The conclusion has a ratio of " + str(conclusion_major_claims_to_claims) + " of Claims to Major Claims" )
feedback.append("On average, essays we have seen have a ratio of " + str(average_conclusion_major_claims_to_claims) + " of Claims to Major Claims")
if(conclusion_major_claims_to_claims - average_conclusion_major_claims_to_claims > -1 and conclusion_major_claims_to_claims - average_conclusion_major_claims_to_claims < 2):
feedback.append("Your Claims to Major Claims ratio is good - generally we want less Claims and more Major Claims in a conclusion, although having a single Claim in your conclusion or summarising your Claims is also a good idea.")
elif(conclusion_major_claims_to_claims - average_conclusion_major_claims_to_claims > 2):
feedback.append("Your Claims to Major Claims ratio is not good - having too many Claims in your conclusion impacts the readability of your final thesis statement.")
elif(essay_conclusion_component_tuple[1] > 0 and essay_conclusion_component_tuple[2] == 0):
feedback.append("The Conclusion has no Claims, therfore we cannot calculate the ratio of Major Claims to Claims - the metric we normally use.")
if(essay_conclusion_component_tuple[1] < 2 ):
feedback.append("Since there are " + str(essay_conclusion_component_tuple[1]) + " Major Claims this is desirable as you want to have more Major Claims than Claims in your conclusion.")
else:
feedback.appened("Since there are " + str(essay_conclusion_component_tuple[1]) + " Major Claims however, this is not desirable as you are including too many thesis statements within your conclusion. The limit is one or two Major Claims.")
elif(essay_conclusion_component_tuple[1] == 0):
feedback.append("The conclusion has no Major Claims, therefore we cannot calculate the ratio of Major Claims to Claims - the metric we normally use")
feedback.append("Generally, you should include a Major Claim in the conclusion. This is not as vital if you are including atleast one within your introduction however, but summarising the Major Claim again within the conclusion is a good way to close your essay.")
if(essay_conclusion_component_tuple[3] > 0):
feedback.append("Your conclusion includes atleast one premise - this is undesirable. Premises are better suited in the Main Body Paragraphs of your essay.")
return feedback
def paragraph_component_sequence(essay):
#Pass in an essay. Take each paragraph. Store the order of components in a series - MC = 1, C = 2 and P = 3. So in a paragraph with Claim, Premise Premise, it would go (1,2,2)
total_paragraphs = essay["Total Paragraphs"].values[0]
essay_paragraphs_flow = []
for current_paragraph in range(2, total_paragraphs + 1): #start at 2, as we want to ignore the essay prompt sentence.
paragraph = essay.loc[(essay['Paragraph Number'] == current_paragraph)]
paragraph_flow = []
paragraph_flow.append(current_paragraph)
for index, row in paragraph.iterrows():
if row['Argument Component Type'] == "MajorClaim":
paragraph_flow.append("MajorClaim")
elif row['Argument Component Type'] == "Claim":
paragraph_flow.append("Claim")
elif row['Argument Component Type'] == "Premise":
paragraph_flow.append("Premise")
else:
paragraph_flow.append("None")
essay_paragraphs_flow.append(paragraph_flow)
return essay_paragraphs_flow
def paragraph_flow_feedback(essay):
#Provide the flow of Argument Components in each paragraph.
essay_paragraphs_flow = paragraph_component_sequence(essay)
introduction = essay_paragraphs_flow.pop(0)
conclusion = essay_paragraphs_flow.pop(len(essay_paragraphs_flow) -1)
feedback = []
if not introduction:
feedback.append("It appears your Introduction consists of one or fewer sentences, therefore we cannot comment on the structure of it.")
else:
paragraph_number = introduction.pop(0)
feedback.append("The flow of Argument Components in your Introduction goes: " + str(introduction) + " where 'None' labels a non-argumentative sentence.")
#In an introduction, we want the Major Claims to either be in the first or last sentence.
if introduction[0] == "MajorClaim":
feedback.append("Your introduction starts with a Major Claim. This is great, it means you are immediately informing your reader of your main focus for the essay.")
elif introduction[len(introduction) -1] == "Major Claim":
feedback.append("Your introduction ends with a Major Claim. This is great, as you mention your thesis statement right before getting into your arguments.")
for index in range(len(introduction) -1):
if index != 1 or index!= len(introduction)-1:
if introduction[index] == "MajorClaim":
feedback.append("Your introduction contains a Major Claim, however it is not in a great position. Try to keep your Major Claims to the first or last sentence for better readability.")
for index in range(len(essay_paragraphs_flow)):
#Main body paragraphs should start or end with a Claim
paragraph = essay_paragraphs_flow[index]
paragraph_number = paragraph.pop(0)
feedback.append("The flow of Argument Components in Paragraph " + str(paragraph_number) + " goes: " + str(paragraph) + " where 'None' labels a non-argumentative sentence.")
if paragraph[0] == "Claim":
feedback.append("Paragraph "+ str(paragraph_number) + " starts with a Claim. This is good, you immediately bring this sub-arguments main point forward.")
elif paragraph[len(paragraph) -1] == "Claim":
feedback.append("Paragraph "+ str(paragraph_number) + " ends with a Claim. This is good, you are ending the paragraph with the point of your previous statements.")
for index_two in range(len(paragraph) -1):
if index_two != 0 and index_two != len(paragraph)-1:
if paragraph[index_two] == "Claim":
feedback.append("This paragraph contains a Claim that is neither at the start or the end of the paragraph. This makes it harder for the reader to determine the actual point of this argument")
if not conclusion:
feedback.append("It appears your Conclusion consists of one or fewer sentences, therefore we cannot comment on the structure of it.")
else:
#Conclusions should start or end with a major claim
paragraph_number = conclusion.pop(0)
feedback.append("The flow of Argument Components in your Conclusion goes:"+ str(conclusion) + " where 'None' labels a non-argumentative sentence.")
if conclusion[0] == "MajorClaim":
feedback.append("Your conclusion starts with a Major Claim. This is great, it means you are immediately describing the thesis of your essay.")
elif conclusion[len(conclusion) -1] == "Major Claim":
feedback.append("Your conclusion ends with a Major Claim. This is great, it means your thesis statement is left in the reader's mind and is always a great way to close an essay.")
for index in range(len(conclusion) -1):
if index != 0 and index != len(conclusion)-1:
if conclusion[index] == "MajorClaim":
feedback.append("Your conclusion contains a Major Claim, however it is not in a great position. Try to keep your Major Claims to the first or last sentence for better readability.")
return feedback
def argumentative_to_none_argumentative_ratios(essay):
#calculate the ratio of Argumentative sentences to non-argumentative sentences.
total_paragraphs = essay["Total Paragraphs"].values[0]
argumentative_to_non_argumentative_ratios = []
for current_paragraph in range(2, total_paragraphs + 1):
paragraph = essay.loc[(essay['Paragraph Number'] == current_paragraph)]
paragraph_non_argumentative_count = 1
paragraph_argumentative_count = 1
for index, row in paragraph.iterrows():
if row['Argument Component Type'] == "None":
paragraph_non_argumentative_count += 1
else:
paragraph_argumentative_count += 1
argumentative_to_non_argumentative_ratios.append(paragraph_argumentative_count/paragraph_non_argumentative_count)
return argumentative_to_non_argumentative_ratios
def argumentative_to_none_argumentative_feedback(essay):
#Provide argumentative feedback - count ratio of argumentative to non-argumentative sentences in each paragraph
ratio_list = argumentative_to_none_argumentative_ratios(essay)
feedback = []
for index in range(len(ratio_list)):
if index == 0: #if introduction
feedback.append("Your introduction has a ratio of " + str(ratio_list[index]) + " of Argumentative to Non-Argumentative Sentences.")
if ratio_list[index] > 0.25:
feedback.append("This is decent - you are not diluting your introduction with sentences that do not really contribute to the overall message.")
else:
feedback.append("This is poor - in the introduction try to stay brief and on point so you can get to your main points sooner.")
elif index == len(ratio_list)-1: #if conclusion
feedback.append("Your conclusion has a ratio of " + str(ratio_list[index]) + " of Argumentative to Non-Argumentative Sentences.")
if ratio_list[index] > 0.25:
feedback.append("This is decent - you are not diluting your conclusion with sentences that do not really contribute to the overall message.")
else:
feedback.append("This is poor - in the conclusion aim to summarise your overall thesis and not dilute the message.")
else: #any other paragraph
feedback.append("Paragraph " + str(index+1) + " has a ratio of "+ str(ratio_list[index]) + " of Argumentative to Non-Argumentative Sentences.")
if ratio_list[index] > 1:
feedback.append("This is good, in main body paragraphs we need to be introducing the bulk of our justifications so non-argumentative sentences may make our arguments less clear.")
else:
feedback.append("This is poor - while main body paragraphs are larger and more diverse, they still need to be focussed on creating sub-arguments to aid the overall thesis statements in the introduciton and conclusion.")
return feedback
def results_feedback(essay):
#Compile the feedback for the overall sentence by sentence breakdown - lists each sentence and what Argument Component they are.
feedback = []
feedback.append("Here is the list of all your sentences and what type of Argument Component they were")
previous_paragraph = 0
for index, row in essay.iterrows():
if(index != 0):
current_paragraph = row["Paragraph Number"]
if (previous_paragraph != current_paragraph):
previous_paragraph = current_paragraph
feedback.append("Paragraph " + str(previous_paragraph))
feedback.append(row["Sentence"] + " = " + row["Argument Component Type"])
return feedback
def main():
#Only run for testing purposes
train = pd.read_pickle("./train.pkl")
test = pd.read_pickle("./test.pkl")
test_essay_id = 4
test_essay = test.loc[(test['Essay ID'] == test_essay_id)]
print(component_count_feedback(train, test_essay))
print(paragraph_component_count_feedback(train, test_essay))
print(paragraph_flow_feedback(test_essay))
print(argumentative_to_none_argumentative_feedback(test_essay))
# In[ ]:
| 2.625 | 3 |
assignments/python3/hello/hello.py | jeremybergen/csci000-astudent | 2 | 12761778 | <gh_stars>1-10
#!/usr/bin/env python3
# Kattis - hello problem
import sys
def answer():
return "Hello World!"
# not used!
def greet(name):
ans = f'Hello {name}!'
return ans
def solve():
print(answer())
def test():
assert answer() == "Hello World!"
print('all test cases passed...')
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'test':
#print(sys.argv)
test()
else:
solve() | 3.25 | 3 |
MoonPhase.py | bklevence/MagTag_Moon | 2 | 12761779 | <reponame>bklevence/MagTag_Moon<gh_stars>1-10
# Write your code here :-)
# SPDX-FileCopyrightText: 2017 <NAME>, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
# Moonphase example utilizing Farmsense API by bk
# <EMAIL>
from adafruit_magtag.magtag import MagTag
import adafruit_requests
import secrets
import wifi
import ipaddress
import ssl
import socketpool
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
print("Connecting to %s" % secrets["ssid"])
wifi.radio.connect(secrets["ssid"], secrets["password"])
print("Connected to %s!" % secrets["ssid"])
print("My IP address is", wifi.radio.ipv4_address)
pool = socketpool.SocketPool(wifi.radio)
requests = adafruit_requests.Session(pool, ssl.create_default_context())
TIME_URL = "https://io.adafruit.com/api/v2/time/seconds"
response = requests.get(TIME_URL)
unix = str(response.text)
print(unix)
# Set up where we'll be fetching data from
DATA_SOURCE = "https://api.farmsense.net/v1/moonphases/?d="
DATA_SOURCE += unix
DATA_Phase = [0, "Phase"]
DATA_Illumination = [0, "Illumination"]
DATA_Age = [0, "Age"]
def text_transform(value):
return value
magtag = MagTag(url=DATA_SOURCE,
json_path=(DATA_Phase, DATA_Illumination, DATA_Age))
magtag.network.connect()
magtag.add_text(
text_position=(
(magtag.graphics.display.width // 2) - 1,
(magtag.graphics.display.height // 2) + 22,
),
text_scale=1.5,
text_transform=text_transform,
text_anchor_point=(0.5, 0.5),
)
magtag.add_text(
text_position=(
(magtag.graphics.display.width // 2) - 1,
(magtag.graphics.display.height // 2) + 43,
),
text_scale=1.5,
text_transform=text_transform,
text_anchor_point=(0.5, 0.5),
# is_data= False,
)
magtag.add_text(
text_font="/fonts/MoonPhases-75.bdf",
text_position=((magtag.graphics.display.width // 2) - 1, 40),
text_anchor_point=(0.5, 0.5),
)
try:
value = magtag.fetch()
moonChar = [
"0",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"1",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
]
z = value[2] / 29.56
y = int(z * 28)
print(moonChar[y])
magtag.set_text(value[0], index=0)
p = str("Illuminated: " + str(int(value[1]*100)) +"%")
magtag.set_text(p, index=1)
magtag.set_text(moonChar[y], index=2)
print(value)
print("Illumination:", value[1], "%")
except (ValueError, RuntimeError) as e:
print("Some error occured, retrying! -", e)
magtag.exit_and_deep_sleep(60)
| 2.765625 | 3 |
src/api/pdi/application/dashboard/GetDataOperationWidget/GetDataOperationWidgetResponse.py | ahmetcagriakca/pythondataintegrator | 1 | 12761780 | <reponame>ahmetcagriakca/pythondataintegrator
from pdip.cqrs.decorators import responseclass
from pdi.application.dashboard.GetDataOperationWidget.GetDataOperationWidgetDto import GetDataOperationWidgetDto
@responseclass
class GetDataOperationWidgetResponse:
Data: GetDataOperationWidgetDto = None
| 1.539063 | 2 |
mycroft/client/enclosure/mouth.py | sowmyavasudeva/SmartBookmark | 1 | 12761781 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
class EnclosureMouth:
"""
Listens to enclosure commands for Mycroft's Mouth.
Performs the associated command on Arduino by writing on the Serial port.
"""
def __init__(self, ws, writer):
self.ws = ws
self.writer = writer
self.is_timer_on = False
self.__init_events()
def __init_events(self):
self.ws.on('enclosure.mouth.reset', self.reset)
self.ws.on('enclosure.mouth.talk', self.talk)
self.ws.on('enclosure.mouth.think', self.think)
self.ws.on('enclosure.mouth.listen', self.listen)
self.ws.on('enclosure.mouth.smile', self.smile)
self.ws.on('enclosure.mouth.viseme', self.viseme)
self.ws.on('enclosure.mouth.text', self.text)
self.ws.on('enclosure.mouth.display', self.display)
def reset(self, event=None):
self.writer.write("mouth.reset")
def talk(self, event=None):
self.writer.write("mouth.talk")
def think(self, event=None):
self.writer.write("mouth.think")
def listen(self, event=None):
self.writer.write("mouth.listen")
def smile(self, event=None):
self.writer.write("mouth.smile")
def viseme(self, event=None):
if event and event.data:
code = event.data.get("code")
time_until = event.data.get("until")
# Skip the viseme if the time has expired. This helps when a
# system glitch overloads the bus and throws off the timing of
# the animation timing.
if code and (not time_until or time.time() < time_until):
self.writer.write("mouth.viseme=" + code)
def text(self, event=None):
text = ""
if event and event.data:
text = event.data.get("text", text)
self.writer.write("mouth.text=" + text)
def display(self, event=None):
code = ""
xOffset = ""
yOffset = ""
clearPrevious = ""
if event and event.data:
code = event.data.get("img_code", code)
xOffset = event.data.get("xOffset", xOffset)
yOffset = event.data.get("yOffset", yOffset)
clearPrevious = event.data.get("clearPrev", clearPrevious)
clearPrevious = int(str(clearPrevious) == "True")
clearPrevious = "cP=" + str(clearPrevious) + ","
x_offset = "x=" + str(xOffset) + ","
y_offset = "y=" + str(yOffset) + ","
message = "mouth.icon=" + x_offset + y_offset + clearPrevious + code
# Check if message exceeds Arduino's serial buffer input limit 64 bytes
if len(message) > 60:
message1 = message[:31]
message2 = message[31:]
message1 += "$"
message2 += "$"
message2 = "mouth.icon=" + message2
self.writer.write(message1)
time.sleep(0.25) # writer bugs out if sending messages too rapidly
self.writer.write(message2)
else:
time.sleep(0.1)
self.writer.write(message)
| 2.71875 | 3 |
stream_consumer.py | luigisaetta/ocipy | 2 | 12761782 | import oci
import os
import io
import time
import sys
from pathlib import Path
from oci.config import validate_config
from oci.streaming import StreamClient
from oci.streaming.models import CreateCursorDetails
import base64
# configuration for connection to Oracle OCI
# for user, tenancy you have to specify the OCID
# the key is the key (PEM) you have uploaded to your profile
#
config = {
"user": "ocid1.XXXXXX",
"key_file": "/Users/lsaetta/Progetti/xxxx/oci_api_key.pem",
"fingerprint": "<KEY>",
"tenancy": "ocid1.ZZZZZ",
"region": "eu-frankfurt-1"
}
SLEEP_TIME = 2 # in sec.
# check command line params
def check_params():
N_PARAMS = 1 # expected # of params
n_params = len(sys.argv)
if (n_params < (N_PARAMS + 1)):
print("Usage: stream_subscriber.py partition_id")
print("")
sys.exit(-1)
else:
print("Running with: ")
print("partition_id {}".format(sys.argv[1]))
print("")
def decode(str):
return base64.b64decode(str).decode('utf-8')
#
# Main
#
print("")
check_params()
validate_config(config)
print("Validate config OK")
print("")
partition_id = sys.argv[1]
stream_id = "ocid1.stream.oc1.eu-frankfurt-1.aaaaaaaafsxpk4zdonaed3d27s5jwhazylryizrqmbd4ihnsgbbkpj3k6saa"
# check on partition_id OK, on offset OK
cursor_details = CreateCursorDetails(partition = partition_id, type = "LATEST")
# initialize consumer
client = StreamClient(config)
print("*** GET cursor ")
response = client.create_cursor(stream_id = stream_id, create_cursor_details = cursor_details)
## extract cursor from response
cursor = response.data.value
# infinite READ loop...
while True:
# print("*** GET messages ")
response_mess = client.get_messages(stream_id = stream_id, cursor = cursor)
# prepare for goimg forward
# you need to pass ***new*** cursor
cursor = response_mess.headers['opc-next-cursor']
print("*")
if (len(response_mess.data) > 0):
print("Messages: ")
for mess in response_mess.data:
print(decode(mess.value))
# sleep before next loop
time.sleep(SLEEP_TIME)
| 2.1875 | 2 |
Greedy/Candies.py | PK-100/Competitive_Programming | 70 | 12761783 | # Link: https://www.hackerrank.com/challenges/candies/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the candies function below.
def candies(n, arr):
incr=[1 for x in range(n)]
decr=[1 for x in range(n)]
final=[1 for x in range(n)]
for i in range(1,n):
if(arr[i]>arr[i-1]):
#increasing
incr[i]=incr[i-1]+1
for i in range(n-2,-1,-1):
if(arr[i]>arr[i+1]):
#decreasing
decr[i]=decr[i+1]+1
#print(incr)
#print(decr)
for i in range(n):
final[i]=max(incr[i],decr[i])
#print(final)
return sum(final)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = []
for _ in range(n):
arr_item = int(input())
arr.append(arr_item)
result = candies(n, arr)
fptr.write(str(result) + '\n')
fptr.close()
| 3.6875 | 4 |
data_preprocessing/utils.py | roychowdhuryresearch/HFO-Classification | 13 | 12761784 | import numpy as np
import math
from scipy.interpolate import interp1d
import scipy.linalg as LA
import os
import numpy as np
from skimage.transform import resize
from multiprocessing import Process
import shutil
from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor, as_completed
def compute_tf_fig(org_sig):
final_sig = create_extended_sig(org_sig)
wave2000 = final_sig
ps_SampleRate = 2000
s_Len = len(final_sig)
#exts_len = len(final_sig)
s_HalfLen = math.floor(s_Len/2)+1
ps_MinFreqHz = 10
ps_MaxFreqHz = 500
ps_FreqSeg = 512
v_WAxis = np.linspace(0, 2*np.pi, s_Len, endpoint=False)
v_WAxis = v_WAxis* ps_SampleRate
v_WAxisHalf = v_WAxis[:s_HalfLen]
v_FreqAxis = np.linspace(ps_MinFreqHz, ps_MaxFreqHz,num=ps_FreqSeg)#ps_MinFreqHz:s_FreqStep:ps_MaxFreqHz
v_FreqAxis = v_FreqAxis[::-1]
v_InputSignalFFT = np.fft.fft(wave2000)
ps_StDevCycles = 3
m_GaborWT = np.zeros((ps_FreqSeg, s_Len),dtype=complex)
for i, s_FreqCounter in enumerate(v_FreqAxis):
v_WinFFT = np.zeros(s_Len)
s_StDevSec = (1 / s_FreqCounter) * ps_StDevCycles
v_WinFFT[:s_HalfLen] = np.exp(-0.5*np.power( v_WAxisHalf - (2* np.pi* s_FreqCounter) , 2)*
(s_StDevSec**2))
v_WinFFT = v_WinFFT* np.sqrt(s_Len)/ LA.norm(v_WinFFT, 2)
m_GaborWT[i, :] = np.fft.ifft(v_InputSignalFFT* v_WinFFT)/np.sqrt(s_StDevSec)
return s_HalfLen, v_FreqAxis, v_WAxisHalf, v_InputSignalFFT, m_GaborWT
def compute_spectrum(org_sig):
final_sig = create_extended_sig(org_sig)
wave2000 = final_sig
ps_SampleRate = 2000
s_Len = len(final_sig)
#exts_len = len(final_sig)
s_HalfLen = math.floor(s_Len/2)+1
ps_MinFreqHz = 10
ps_MaxFreqHz = 500
ps_FreqSeg = 512
v_WAxis = np.linspace(0, 2*np.pi, s_Len, endpoint=False)
v_WAxis = v_WAxis* ps_SampleRate
v_WAxisHalf = v_WAxis[:s_HalfLen]
v_FreqAxis = np.linspace(ps_MinFreqHz, ps_MaxFreqHz,num=ps_FreqSeg)#ps_MinFreqHz:s_FreqStep:ps_MaxFreqHz
v_FreqAxis = v_FreqAxis[::-1]
v_InputSignalFFT = np.fft.fft(wave2000)
ps_StDevCycles = 3
m_GaborWT = np.zeros((ps_FreqSeg, s_Len),dtype=complex)
for i, s_FreqCounter in enumerate(v_FreqAxis):
v_WinFFT = np.zeros(s_Len)
s_StDevSec = (1 / s_FreqCounter) * ps_StDevCycles
v_WinFFT[:s_HalfLen] = np.exp(-0.5*np.power( v_WAxisHalf - (2* np.pi* s_FreqCounter) , 2)*
(s_StDevSec**2))
v_WinFFT = v_WinFFT* np.sqrt(s_Len)/ LA.norm(v_WinFFT, 2)
m_GaborWT[i, :] = np.fft.ifft(v_InputSignalFFT* v_WinFFT)/np.sqrt(s_StDevSec)
return resize(np.abs(m_GaborWT[:, 3000:5000]), (224,224))
def create_extended_sig(wave2000):
#wave2000 = bb
s_len = len(wave2000)
s_halflen = int(np.ceil(s_len/2)) + 1
sig = wave2000
start_win = sig[:s_halflen] - sig[0]
end_win = sig[s_len - s_halflen - 1:] - sig[-1]
start_win = -start_win[::-1] + sig[0]
end_win = -end_win[::-1] + sig[-1]
final_sig = np.concatenate((start_win[:-1],sig, end_win[1:]))
#print(s_halflen, start_win.shape, end_win.shape, sig.shape, final_sig.shape)
if len(final_sig)%2 == 0:
final_sig = final_sig[:-1]
return final_sig
def strip_key(key):
key = key.strip()
key = key.replace('EEG', '').strip()
key = key.replace('Ref', '').strip()
key = key.replace('-', '').strip()
key = key.replace('_', ' ').strip()
key = key.split(" ")
if len(key) > 1:
key = key[1]
else:
key = key[0]
return key
def normalized(a, max_ = 2000-11):
c = (max_*(a - np.min(a))/np.ptp(a)).astype(int)
c = c + 5
return c
def construct_features(raw_signal, length=1000):
#HFO with spike
canvas = np.zeros((2*length, 2*length))
hfo_spike = normalized(raw_signal)
index = np.arange(len(hfo_spike))
for ii in range(3):
canvas[index,hfo_spike-ii] = 256
canvas[index,hfo_spike+ii] = 256
spike_image = resize(canvas, (224, 224))
intensity_image = np.zeros_like(canvas)
intensity_image[index, :] = raw_signal
hfo_image = resize(intensity_image, (224, 224))
return spike_image, hfo_image
def clean_folder(saved_fn):
if not os.path.exists(saved_fn):
#os.mkdir(saved_fn)
os.makedirs(saved_fn)
else:
shutil.rmtree(saved_fn)
os.mkdir(saved_fn)
def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=3):
"""
A parallel version of the map function with a progress bar.
Args:
array (array-like): An array to iterate over.
function (function): A python function to apply to the elements of array
n_jobs (int, default=16): The number of cores to use
use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of
keyword arguments to function
front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job.
Useful for catching bugs
Returns:
[function(array[0]), function(array[1]), ...]
"""
#We run the first few iterations serially to catch bugs
if front_num > 0:
front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]]
#If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging.
if n_jobs==1:
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
#Assemble the workers
with ProcessPoolExecutor(max_workers=n_jobs) as pool:
#Pass the elements of array into function
if use_kwargs:
futures = [pool.submit(function, **a) for a in array[front_num:]]
else:
futures = [pool.submit(function, a) for a in array[front_num:]]
kwargs = {
'total': len(futures),
'unit': 'it',
'unit_scale': True,
'leave': True
}
#Print out the progress as tasks complete
for f in tqdm(as_completed(futures), **kwargs):
pass
out = []
#Get the results from the futures.
for i, future in tqdm(enumerate(futures)):
try:
out.append(future.result())
except Exception as e:
out.append(e)
return front + out | 1.90625 | 2 |
tests/account_tests.py | katrinaalaimo/pypi-flask | 1 | 12761785 | # More tests would be needed before deployment
from flask import Response
from pypi_org.data.users import User
from pypi_org.viewmodels.account.register_viewmodel import RegisterViewModel
from pypi_org.views.account_views import register_post
from tests.test_client import flask_app
import unittest.mock
def test_register_validation_when_valid():
form_data = {
'name': 'Kat',
'email': '<EMAIL>',
'password': '<PASSWORD>'*3
}
with flask_app.test_request_context(path='/account/register', data=form_data):
vm = RegisterViewModel()
# Avoids database call on register
target = 'pypi_org.services.user_service.find_user_by_email'
with unittest.mock.patch(target, return_value=None):
vm.validate()
assert vm.error is None
def test_register_validation_for_existing_user():
form_data = {
'name': 'Kat',
'email': '<EMAIL>',
'password': '<PASSWORD>
}
with flask_app.test_request_context(path='/account/register', data=form_data):
vm = RegisterViewModel()
# Avoids database call on register
target = 'pypi_org.services.user_service.find_user_by_email'
test_user = User(email=form_data.get('email'))
with unittest.mock.patch(target, return_value=test_user):
vm.validate()
assert vm.error is not None
assert 'already exists' in vm.error
def test_register_validation_no_email():
form_data = {
'name': 'Kat',
'email': '',
'password': '<PASSWORD>'*3
}
with flask_app.test_request_context(path='/account/register', data=form_data):
vm = RegisterViewModel()
vm.validate()
assert vm.error is not None
assert 'email' in vm.error
def test_register_validation_view_new_user():
form_data = {
'name': '<PASSWORD>',
'email': '<EMAIL>',
'password': '<PASSWORD>
}
target_find_user = 'pypi_org.services.user_service.find_user_by_email'
target_create_user = 'pypi_org.services.user_service.create_user'
find_user = unittest.mock.patch(target_find_user, return_value=None)
create_user = unittest.mock.patch(target_create_user, return_value=User())
request = flask_app.test_request_context(path='/account/register', data=form_data)
with find_user, create_user, request:
resp: Response = register_post()
assert resp.location == '/account'
def test_account_home_no_login(client):
target = 'pypi_org.services.user_service.find_user_by_id'
with unittest.mock.patch(target, return_value=None):
resp: Response = client.get('/account')
assert resp.status_code == 302
assert resp.location == 'http://localhost/account/login'
def test_account_home_with_login(client):
target = 'pypi_org.services.user_service.find_user_by_id'
test_user = User(name='Kat', email='<EMAIL>')
with unittest.mock.patch(target, return_value=test_user):
resp: Response = client.get('/account')
assert resp.status_code == 200
assert b'Kat' in resp.data
| 2.65625 | 3 |
src/django/countries/views.py | sergiohgp/COVID_webscrapping | 0 | 12761786 | from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from .models import Country, Covid_Cases
import pymongo
from .forms import CountryForm
# Connect to MongoDb
client = pymongo.MongoClient('mongodb://localhost:27017/')
if client:
DB = client['termproject']
# Create your views here.
def country_list(req):
country = Country.objects.all()
covid_cases = Covid_Cases.objects.all()
country_list = []
for i in country:
for j in covid_cases:
if (i.id == j.id):
country_list.append({
'id': i.id,
'name': i.name,
'flag': i.flag,
'area': i.area,
'population': i.population,
'total_cases': j.total_cases,
'cases_milion': str(round((float(1000000) * float(str(j.total_cases).replace(',', '')))/float(str(i.population).replace(',', '')), 2)),
'new_cases': j.new_cases,
'total_deaths': j.total_deaths,
})
context = {
'country_list': country_list
}
return render(req, 'countries/country_list.html', context)
def country_edit(req, id):
country = Country.objects.get(id=id)
covid_cases = Covid_Cases.objects.get(id=id)
selected_country = {
'id': id,
'name': country.name,
'flag': country.flag,
'area': country.area,
'population': country.population,
'total_cases': str(covid_cases.total_cases).replace(',', ''),
'new_cases': str(covid_cases.new_cases).replace(',', ''),
'total_deaths': str(covid_cases.total_deaths).replace(',', ''),
}
context = {
'country': selected_country,
}
return render(req, 'countries/edit_country.html', context)
def country_add(req):
return render(req, 'countries/add_country.html')
def country_insert(req):
if req.method == 'POST':
form = CountryForm(req.POST)
if form.is_valid():
id = form.cleaned_data['id']
name = form.cleaned_data['name']
population = form.cleaned_data['population']
area = form.cleaned_data['area']
flag = form.cleaned_data['flag']
total_cases = form.cleaned_data['total_cases']
new_cases = form.cleaned_data['new_cases']
total_deaths = form.cleaned_data['total_deaths']
country_document = {
'id': name + "_" + id,
'name': name,
'population': population,
'area': area,
'flag': flag
}
covid_cases_document = {
'id': name + "_" + id,
'country_name': name,
'total_cases': total_cases,
'new_cases': new_cases,
'total_deaths': total_deaths,
}
col = DB['countries_country']
if col:
document_exists = col.find_one({'id': id})
if document_exists:
col.delete_one({'id': id})
col.update_one(country_document, {
'$set': country_document}, upsert=True)
col = DB['countries_covid_cases']
if col:
document_exists = col.find_one({'id': id})
if document_exists:
col.delete_one({'id': id})
col.update_one(covid_cases_document, {
'$set': covid_cases_document}, upsert=True)
return HttpResponseRedirect('/')
else:
form = CountryForm()
return render(req, 'countries/add_country.html', {'form': form})
def country_update(req):
if req.method == 'POST':
form = CountryForm(req.POST)
if form.is_valid():
id = form.cleaned_data['id']
name = form.cleaned_data['name']
population = form.cleaned_data['population']
area = form.cleaned_data['area']
flag = form.cleaned_data['flag']
total_cases = form.cleaned_data['total_cases']
new_cases = form.cleaned_data['new_cases']
total_deaths = form.cleaned_data['total_deaths']
# Update into DB
country_document = {
'id': id,
'name': name,
'population': population,
'area': area,
'flag': flag
}
covid_cases_document = {
'id': id,
'country_name': name,
'total_cases': total_cases,
'new_cases': new_cases,
'total_deaths': total_deaths,
}
col = DB['countries_country']
if col:
document_exists = col.find_one({'id': id})
if document_exists:
col.delete_one({'id': id})
col.update_one(country_document, {
'$set': country_document}, upsert=True)
col = DB['countries_covid_cases']
if col:
document_exists = col.find_one({'id': id})
if document_exists:
col.delete_one({'id': id})
col.update_one(covid_cases_document, {
'$set': covid_cases_document}, upsert=True)
return HttpResponseRedirect('/')
else:
form = CountryForm()
return render(req, 'countries/edit_country.html', {'form': form})
def country_delete(req, id):
col = DB['countries_country']
if col:
document_exists = col.find_one({'id': id})
if document_exists:
col.delete_one({'id': id})
col = DB['countries_covid_cases']
if col:
document_exists = col.find_one({'id': id})
if document_exists:
col.delete_one({'id': id})
return HttpResponseRedirect('/')
| 2.234375 | 2 |
icevision/tfms/albumentations/albumentations_helpers.py | jerbly/icevision | 0 | 12761787 | <filename>icevision/tfms/albumentations/albumentations_helpers.py<gh_stars>0
__all__ = ["aug_tfms", "resize_and_pad"]
import albumentations as A
from icevision.imports import *
from icevision.core import *
def _resize(size, ratio_resize=A.LongestMaxSize):
return ratio_resize(size) if isinstance(size, int) else A.Resize(*size)
def resize_and_pad(
size: Union[int, Tuple[int, int]],
pad: A.DualTransform = partial(
A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]
),
):
height, width = (size, size) if isinstance(size, int) else size
return [_resize(size), pad(min_height=height, min_width=width)]
def aug_tfms(
size: Union[int, Tuple[int, int]],
presize: Optional[Union[int, Tuple[int, int]]] = None,
horizontal_flip: Optional[A.HorizontalFlip] = A.HorizontalFlip(),
shift_scale_rotate: Optional[A.ShiftScaleRotate] = A.ShiftScaleRotate(),
rgb_shift: Optional[A.RGBShift] = A.RGBShift(),
lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),
blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),
crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),
pad: Optional[A.DualTransform] = partial(
A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]
),
) -> List[A.BasicTransform]:
"""Collection of useful augmentation transforms.
# Arguments
size: The final size of the image. If an `int` is given, the maximum size of
the image is rescaled, maintaing aspect ratio. If a `tuple` is given,
the image is rescaled to have that exact size (height, width).
presizing: Rescale the image before applying other transfroms. If `None` this
transform is not applied. First introduced by fastai,this technique is
explained in their book in [this](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb)
chapter (tip: search for "Presizing").
horizontal_flip: Flip around the y-axis. If `None` this transform is not applied.
shift_scale_rotate: Randomly shift, scale, and rotate. If `None` this transform
is not applied.
rgb_shift: Randomly shift values for each channel of RGB image. If `None` this
transform is not applied.
lightning: Randomly changes Brightness and Contrast. If `None` this transform
is not applied.
blur: Randomly blur the image. If `None` this transform is not applied.
crop_fn: Randomly crop the image. If `None` this transform is not applied.
Use `partial` to saturate other parameters of the class.
pad: Pad the image to `size`, squaring the image if `size` is an `int`.
If `None` this transform is not applied. Use `partial` to sature other
parameters of the class.
# Returns
A list of albumentations transforms.
"""
height, width = (size, size) if isinstance(size, int) else size
tfms = []
tfms += [_resize(presize, A.SmallestMaxSize) if presize is not None else None]
tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]
# Resize as the last transforms to reduce the number of artificial artifacts created
if crop_fn is not None:
crop = crop_fn(height=height, width=width)
tfms += [A.OneOrOther(crop, _resize(size), p=crop.p)]
else:
tfms += [_resize(size)]
tfms += [pad(min_height=height, min_width=width) if pad is not None else None]
tfms = [tfm for tfm in tfms if tfm is not None]
return tfms
| 2.15625 | 2 |
rack/api/v1/__init__.py | tkaneko0204/rack | 0 | 12761788 | <reponame>tkaneko0204/rack
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WSGI middleware for RACK API controllers.
"""
from oslo.config import cfg
import routes
from rack.api.v1 import groups
from rack.api.v1 import keypairs
from rack.api.v1 import networks
from rack.api.v1 import processes
from rack.api.v1 import securitygroups
from rack.api import versions
from rack.openstack.common import log as logging
from rack import wsgi as base_wsgi
openstack_client_opts = [
cfg.StrOpt('sql_connection',
help='Valid sql_connection for Rack'),
]
CONF = cfg.CONF
CONF.register_opts(openstack_client_opts)
LOG = logging.getLogger(__name__)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kargs):
# NOTE(vish): Default the format part of a route to only accept json
# and xml so it doesn't eat all characters after a '.'
# in the url.
kargs.setdefault('requirements', {})
if not kargs['requirements'].get('format'):
kargs['requirements']['format'] = 'json|xml'
return routes.Mapper.connect(self, *args, **kargs)
class APIRouter(base_wsgi.Router):
"""Routes requests on the RACK API to the appropriate controller
and method.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`rack.wsgi.Router` doesn't have one."""
return cls()
def __init__(self):
mapper = APIMapper()
self._setup_routes(mapper)
super(APIRouter, self).__init__(mapper)
def _setup_routes(self, mapper):
versions_resource = versions.create_resource()
mapper.connect("/",
controller=versions_resource,
action="show",
conditions={'method': ['GET']})
mapper.redirect("", "/")
groups_resource = groups.create_resource()
mapper.connect("/groups",
controller=groups_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}",
controller=groups_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups",
controller=groups_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}",
controller=groups_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}",
controller=groups_resource,
action="delete",
conditions={"method": ["DELETE"]})
networks_resource = networks.create_resource()
mapper.connect("/groups/{gid}/networks",
controller=networks_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/networks/{network_id}",
controller=networks_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/networks",
controller=networks_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/networks/{network_id}",
controller=networks_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}/networks/{network_id}",
controller=networks_resource,
action="delete",
conditions={"method": ["DELETE"]})
keypairs_resource = keypairs.create_resource()
mapper.connect("/groups/{gid}/keypairs",
controller=keypairs_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/keypairs/{keypair_id}",
controller=keypairs_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/keypairs",
controller=keypairs_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/keypairs/{keypair_id}",
controller=keypairs_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}/keypairs/{keypair_id}",
controller=keypairs_resource,
action="delete",
conditions={"method": ["DELETE"]})
securitygroups_resource = securitygroups.create_resource()
mapper.connect("/groups/{gid}/securitygroups",
controller=securitygroups_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}",
controller=securitygroups_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/securitygroups",
controller=securitygroups_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}",
controller=securitygroups_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}",
controller=securitygroups_resource,
action="delete",
conditions={"method": ["DELETE"]})
processes_resource = processes.create_resource()
mapper.connect("/groups/{gid}/processes",
controller=processes_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/processes/{pid}",
controller=processes_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/processes",
controller=processes_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/processes/{pid}",
controller=processes_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}/processes/{pid}",
controller=processes_resource,
action="delete",
conditions={"method": ["DELETE"]})
# RACK proxy resources
mapper.connect("/groups/{gid}/proxy",
controller=processes_resource,
action="show_proxy",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/proxy",
controller=processes_resource,
action="create_proxy",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/proxy",
controller=processes_resource,
action="update_proxy",
conditions={"method": ["PUT"]})
| 1.867188 | 2 |
Data_src/Family_Top_Hits.py | WooMichael/BLAST_HMMER_Cross_Validation | 0 | 12761789 | <gh_stars>0
from Bio import SeqIO
import pandas as pd
import os
import numpy as np
# Global Variables
all_kap_sequence_path = '../Data/Metadata/All_Kaplan_Sequences.fa'
seq_kap_path = "../Data/Metadata/Experimental_Sequences_Kaplan/"
top_result_path = "../Data/Family_Search_Data/Top_Hits_Scans/Scanned_Results.csv"
scan_path = "../Data/Family_Search_Data/MSA_Scans_From_nhmmerscan/"
counter = 0
top_hits_txt_list = os.listdir(scan_path)
final_tuple = []
# Getting the path for the output of each sequence
for name in top_hits_txt_list:
path = scan_path + name
with open(path, "r") as file1:
line = file1.readlines()
arr = np.array(line)
arr = arr[2:3]
array = str(arr).split()
new_array = np.array(array)
if (len(new_array) > 1):
query_name = new_array[2]
top_hit = new_array[0]
e_score = new_array[12]
bit_score = new_array[13]
query_name = str(query_name).strip("'[]")
top_hit = str(top_hit).strip("'[]")
e_score = str(e_score).strip("'[]")
bit_score = str(bit_score).strip("'[]")
# print(query_name, top_hit, e_score, bit_score)
final_tuple.append(query_name + "," + top_hit + "," + e_score + "," + bit_score + ",")
else:
pass
with open("Family_Top_Hits.csv", "w") as file:
file.write("Query Name,Top Hit,E-Value,Bit Score,Sequence Count,Total As,Total Ts,Total Cs,Total Gs,Total Ns \n")
wp = "Family_Top_Hits.csv"
with open(top_result_path, "r") as file2:
df1 = pd.read_csv(file2)
# print(df1.head())
for record in SeqIO.parse(all_kap_sequence_path, "fasta"):
path_to_save = seq_kap_path + record.id + ".fa"
# SeqIO.write(record, path_to_save, "fasta")
counter += 1
total_a = 0
total_t = 0
total_c = 0
total_g = 0
total_n = 0
for letter in record.seq:
if (letter == 'A'):
total_a += 1
elif (letter == 'T'):
total_t += 1
elif (letter == 'C'):
total_c += 1
elif (letter == 'G'):
total_g += 1
elif (letter == 'N'):
total_n += 1
for group in final_tuple:
group = group.split(",")[0:4]
if (record.id == group[0]):
group.append(str(len(record.seq)))
group.append(str(total_a))
group.append(str(total_t))
group.append(str(total_c))
group.append(str(total_g))
group.append(str(total_n))
group = str(group)
group = group.replace("'", "").strip("[]")
print(group)
with open("Family_Top_Hits.csv", "a") as file:
file.write(group + '\n')
print("Seperation Complete...")
print("you have seperated this many sequences : " + str(counter)) | 2.46875 | 2 |
src/urban_meal_delivery/__init__.py | webartifex/urban-meal-delivery | 1 | 12761790 | <reponame>webartifex/urban-meal-delivery
"""Source code for the urban-meal-delivery research project.
Example:
>>> import urban_meal_delivery as umd
>>> umd.__version__ != '0.0.0'
True
"""
# The config object must come before all other project-internal imports.
from urban_meal_delivery.configuration import config # isort:skip
from importlib import metadata as _metadata
from urban_meal_delivery import db
from urban_meal_delivery import forecasts
try:
_pkg_info = _metadata.metadata(__name__)
except _metadata.PackageNotFoundError: # pragma: no cover
__author__ = 'unknown'
__pkg_name__ = 'unknown'
__version__ = 'unknown'
else:
__author__ = _pkg_info['author']
__pkg_name__ = _pkg_info['name']
__version__ = _pkg_info['version']
| 1.601563 | 2 |
download.py | tuomoko/tek-notes | 0 | 12761791 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
u"""Downloads notes from the TEK web.
@author: <NAME>
"""
import requests
from bs4 import BeautifulSoup
import os.path
import wget
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
url_root = 'https://teknet.tek.fi/arkisto.lehti/content/'
url = url_root+'ack-vad-gul.html'
r = requests.get(url, verify=False)
parsed_html = BeautifulSoup(r.text, 'lxml')
songs = parsed_html.body.find('ul', attrs={'class': 'blog-list'})
song_urls = songs.find_all('a')
urls = map(lambda x: x.get('href'), song_urls)
# names = map(lambda x: x.string, song_urls)
for song_url in urls:
correct_url = song_url.replace("a%CC%88", "%C3%A4")
correct_url = correct_url.replace("o%CC%88", "%C3%B6")
r_song = requests.get(url_root+correct_url, verify=False)
parsed_html = BeautifulSoup(r_song.text, 'lxml')
content = parsed_html.body.find('div', attrs={'class': 'node'})
if content:
imgs = content.find_all('img')
img_urls = map(lambda x: x.get('src'), imgs)
for img_url in img_urls:
filename = img_url[img_url.rfind("/")+1:]
if not os.path.isfile(filename):
wget.download(url_root+img_url)
| 2.890625 | 3 |
Dangerous/Weevely/modules/file/upload.py | JeyZeta/Dangerous- | 0 | 12761792 | '''
Created on 23/set/2011
@author: norby
'''
from core.moduleguess import ModuleGuess
from core.moduleexception import ModuleException, ExecutionException, ProbeException, ProbeSucceed
from core.http.cmdrequest import CmdRequest, NoDataException
from random import choice
from hashlib import md5
from core.argparse import ArgumentParser
from core.argparse import SUPPRESS
from core.utils import b64_chunks
from base64 import b64encode
WARN_FILE_EXISTS = 'File exists'
WARN_NO_SUCH_FILE = 'No such file or permission denied'
WARN_MD5_MISMATCH = 'MD5 hash mismatch'
WARN_UPLOAD_FAIL = 'Upload fail, check path and permission'
class Upload(ModuleGuess):
'''Upload binary/ascii file into remote filesystem'''
def _set_vectors(self):
self.vectors.add_vector('file_put_contents', 'shell.php', [ "file_put_contents('$rpath', base64_decode($_POST['$post_field']), FILE_APPEND);", "-post", "{\'$post_field\' : \'$data\' }" ])
self.vectors.add_vector('fwrite', 'shell.php', [ '$h = fopen("$rpath", "a+"); fwrite($h, base64_decode($_POST["$post_field"])); fclose($h);', "-post", "{\'$post_field\' : \'$data\' }" ])
self.support_vectors.add_vector("rm", 'file.rm', "$rpath -recursive".split(' '))
self.support_vectors.add_vector("check_exists", 'file.check', "$rpath exists".split(' '))
self.support_vectors.add_vector('md5', 'file.check', '$rpath md5'.split(' '))
self.support_vectors.add_vector('clear', 'shell.php', "file_put_contents('$rpath', '');" )
def _set_args(self):
self.argparser.add_argument('lpath')
self.argparser.add_argument('rpath')
self.argparser.add_argument('-chunksize', type=int, default=1024)
self.argparser.add_argument('-content', help=SUPPRESS)
self.argparser.add_argument('-vector', choices = self.vectors.keys()),
self.argparser.add_argument('-force', action='store_true')
def _load_local_file(self):
if not self.args['content']:
try:
local_file = open(self.args['lpath'], 'r')
except Exception, e:
raise ProbeException(self.name, '\'%s\' %s' % (self.args['lpath'], WARN_NO_SUCH_FILE))
self.args['content'] = local_file.read()
local_file.close()
self.args['content_md5'] = md5(self.args['content']).hexdigest()
self.args['content_chunks'] = self.__chunkify(self.args['content'], self.args['chunksize'])
self.args['post_field'] = ''.join([choice('abcdefghijklmnopqrstuvwxyz') for i in xrange(4)])
def _check_remote_file(self):
if self.support_vectors.get('check_exists').execute({'rpath' : self.args['rpath']}):
if not self.args['force']:
raise ProbeException(self.name, '%s. Overwrite \'%s\' using -force option.' % (WARN_FILE_EXISTS, self.args['rpath']))
else:
self.support_vectors.get('clear').execute({'rpath' : self.args['rpath']})
def _prepare(self):
self._load_local_file()
self._check_remote_file()
def _execute_vector(self):
self._result = False
i=1
for chunk in self.args['content_chunks']:
formatted_args = { 'rpath' : self.args['rpath'], 'post_field' : self.args['post_field'], 'data' : chunk }
self.current_vector.execute( formatted_args)
i+=1
def _verify_vector_execution(self):
if self.support_vectors.get('check_exists').execute({'rpath' : self.args['rpath']}):
if self.support_vectors.get('md5').execute({'rpath' : self.args['rpath']}) == self.args['content_md5']:
self._result = True
raise ProbeSucceed(self.name, 'File uploaded')
else:
self.mprint('\'%s\' %s' % (self.args['rpath'], WARN_MD5_MISMATCH))
def _verify(self):
if not self.support_vectors.get('check_exists').execute({'rpath' : self.args['rpath']}):
raise ProbeException(self.name, '\'%s\' %s' % (self.args['rpath'], WARN_UPLOAD_FAIL))
def __chunkify(self, file_content, chunksize):
content_len = len(file_content)
if content_len > chunksize:
content_chunks = b64_chunks(file_content, chunksize)
else:
content_chunks = [ b64encode(file_content) ]
numchunks = len(content_chunks)
if numchunks > 20:
self.mprint('Warning: uploading %iB in %i chunks of %sB. Increase chunk size with option \'-chunksize\' to reduce upload time' % (content_len, numchunks, self.args['chunksize']) )
return content_chunks
| 2.078125 | 2 |
scripts/08.py | hstern2/pet-fish-eric | 0 | 12761793 | <gh_stars>0
#!/usr/bin/env python3
### if statements
#a = 38
#if a == 37: # two equals signs for comparison (one for assignment)
# print('yes, a is equal to 37')
# print('more stuff')
#if a != 38: print('no, a is not equal to 38')
#a = 14
#if a > 10:
# print('bigger than 10')
# print('yes')
# if a > 20:
# print('really big')
# print('first level')
#print('always')
### if-then-else
#a = 4
#if a > 10:
# print('bigger than 10')
#else:
# print('less than or equal to 10\n')
### if-then-else with different branches
#a = 8
#if a < 5:
# print('smaller than 5')
#elif a < 10:
# print('smaller than 10')
#elif a < 15:
# print('smaller than 15')
### ternary operator
#a = 200
#print(40+(2*a if a > 100 else 0.5*a))
| 4.15625 | 4 |
toph_copycat.py | novojitdas/PythonProblemsSolutions | 0 | 12761794 | <reponame>novojitdas/PythonProblemsSolutions
value = input("Enter Input:")
print(value)
| 2.984375 | 3 |
A02_DjangoTest/smtest/views.py | oldinaction/smpython | 0 | 12761795 | import json
from django.shortcuts import render, HttpResponse
from django.views import View
# FBV
def users(request):
# if request.method == "GET":
# pass
user_list = ['smalle', 'aezocn']
return HttpResponse(json.dumps(user_list))
class MyBaseView(object):
# 装饰器作用(拦截器)
def dispatch(self, request, *args, **kwargs):
print('before...')
# 此时MyBaseView无父类,则到self(StudentsView)的其他父类查找dispatch
ret = super(MyBaseView, self).dispatch(request, *args, **kwargs)
print('end...')
return ret
# CBV: 根据不同的Http类型自动选择对应方法
class StudentsView(MyBaseView, View): # 多继承(优先级从左到右),寻找自身属性或方法 -> 寻找最左边父类的属性或方法 -> 寻找第二个父类的属性或方法 -> ...
# def dispatch(self, request, *args, **kwargs):
# # 反射获取对应的方法(父类View内部也是实现了一个dispatch)
# fun = getattr(self, request.method.lower())
# return fun(request, *args, **kwargs)
def get(self,request,*args,**kwargs):
print('get...')
return HttpResponse('GET...')
def post(self,request,*args,**kwargs):
return HttpResponse('POST...')
def put(self,request,*args,**kwargs):
return HttpResponse('PUT...')
def delete(self,request,*args,**kwargs):
return HttpResponse('DELETE...')
# ################# contenttypes ####################
from . import models
def test_contenttypes_create(request):
'''创建数据'''
banner = models.BannerImage.objects.filter(name='home').first()
models.Image.objects.create(path='home1.jpg', content_object=banner)
models.Image.objects.create(path='home2.jpg', content_object=banner)
models.Image.objects.create(path='home3.jpg', content_object=banner)
return HttpResponse('test_contenttypes_create...')
def test_contenttypes_list(request):
'''查询数据'''
banner = models.BannerImage.objects.filter(id=1).first()
image_list = banner.image_list.all()
# <QuerySet [<Image: Image object (1)>, <Image: Image object (2)>, <Image: Image object (3)>]>
print(image_list)
return HttpResponse('test_contenttypes_list...') | 2.265625 | 2 |
jubakit/_cli/service/bandit.py | vishalbelsare/jubakit | 12 | 12761796 | <reponame>vishalbelsare/jubakit
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from jubatus.bandit.types import *
from .generic import GenericCLI
from ..args import Arguments
from ..util import *
from ..._stdio import print
class BanditCLI(GenericCLI):
@classmethod
def _name(cls):
return 'bandit'
@Arguments(str)
def do_register_arm(self, arm_id):
"""Syntax: register_arm arm_id
Adds the specified arm.
"""
self.client.register_arm(arm_id)
@Arguments(str)
def do_delete_arm(self, arm_id):
"""Syntax: delete_arm arm_id
Deletes the specified arm.
"""
self.client.delete_arm(arm_id)
@Arguments(str)
def do_select_arm(self, player_id):
"""Syntax: select_arm player_id
Select the specified arm and return the next best guess.
"""
print(self.client.select_arm(player_id))
@Arguments(str, str, float)
def do_register_reward(self, player_id, arm_id, reward):
"""Syntax: register_reward player_id arm_id reward
Registers the reward for the specified player and ID.
"""
print(self.client.register_reward(player_id, arm_id, reward))
@Arguments(str)
def do_get_arm_info(self, player_id):
"""Syntax: get_arm_info player_id
Returns the arm info for the specified ID.
"""
print(self.client.get_arm_info(player_id))
@Arguments(str)
def do_reset(self, player_id):
"""Syntax: reset player_id
Resets the specified player record.
"""
print(self.client.reset(player_id))
| 2.40625 | 2 |
fluids/assets/crosswalk.py | BerkeleyAutomation/FLUIDS | 26 | 12761797 | <reponame>BerkeleyAutomation/FLUIDS
import numpy as np
from fluids.assets.shape import Shape
from fluids.assets.waypoint import Waypoint
class CrossWalk(Shape):
def __init__(self, start_wps=[], end_wps=[], **kwargs):
Shape.__init__(self, color=(0xf1, 0xf4, 0xf5), **kwargs)
point0 = (self.points[2] + self.points[3]) / 2
point1 = (self.points[0] + self.points[1]) / 2
if len(start_wps) and len(end_wps):
self.start_waypoints = start_wps
self.end_waypoints = end_wps
else:
self.start_waypoints = [Waypoint(point0[0],
point0[1],
owner=self,
ydim=5,
angle=self.angle),
Waypoint(point1[0],
point1[1],
owner=self,
ydim=5,
angle=self.angle + np.pi)]
self.end_waypoints = [Waypoint(point1[0],
point1[1],
owner=self,
ydim=5,
angle=self.angle),
Waypoint(point0[0],
point0[1],
owner=self,
ydim=5,
angle=self.angle + np.pi)]
self.start_waypoints[0].nxt = [self.end_waypoints[0]]
self.start_waypoints[1].nxt = [self.end_waypoints[1]]
| 2.578125 | 3 |
solutions/Valid Sudoku/solution.py | nilax97/leetcode-solutions | 3 | 12761798 | class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
for i in range(9):
check_r = dict()
check_c = dict()
check_b = dict()
print("_____")
for j in range(9):
print(check_r.keys(),check_c.keys(),check_b.keys())
r = board[i][j]
c = board[j][i]
b = board[3 * (i%3) + j//3][3 * (i//3) + j%3]
if(r!="."):
if(r in check_r.keys()):
print("ROW",i,j,"-",r,check_r.keys())
return False
check_r[r] = 1
if(c!="."):
if(c in check_c.keys()):
print("COLUMN",i,j,"-",c,check_c.keys())
return False
check_c[c] = 1
if(b!="."):
if(b in check_b.keys()):
print("BOX",i,j,"-",b,check_b.keys())
return False
check_b[b] = 1
return True
| 3.53125 | 4 |
whyqd/action/assign_category_booleans.py | whythawk/whyqd | 17 | 12761799 | <filename>whyqd/action/assign_category_booleans.py<gh_stars>10-100
from __future__ import annotations
from typing import Dict
from whyqd.base import BaseCategoryAction
class Action(BaseCategoryAction):
"""`CATEGORISE` support function which must be run *before* it to derive boolean category terms from
values in a source data column.
Scripts must be 'flat' and are of the form::
"ASSIGN_CATEGORY_BOOLEANS > 'destination_field'::bool < 'source_column'"
Where:
* `destination_field` is a `FieldModel` and is the destination column. The `::` linked `CategoryModel` defines
what term the source values are to be assigned.
* Values from the `source_column` `ColumnModel` are treated as boolean `True` or `False`, defined by `::bool`.
"""
def __init__(self) -> None:
super().__init__()
self.name = "ASSIGN_CATEGORY_BOOLEANS"
self.title = "Assign category booleans"
self.description = "Assign values in a source data column as categorical boolean terms based on whether values are present, or are null."
self.structure = "boolean"
def parse(self, script: str) -> Dict[str, str]:
"""Validates term requirements for this category action script.
Script is of the form::
"ACTION > 'destination_column'::term < 'source_column'"
Which is inherited as::
{
"action": ACTION,
"destination": 'destination_column',
"category": term,
"source": 'source_column',
"source_category": [term]
}
Parameters
----------
script: str
An action script.
Raises
------
ValueError for any parsing errors.
Returns
-------
dict
Parsed dictionary of validated split strings for further processing.
"""
parsed = super().parse(script)
# Class-based term validation
if parsed["action"] != "ASSIGN_CATEGORY_BOOLEANS":
raise ValueError(f"Action not valid for 'ASSIGN_CATEGORY_BOOLEANS' parser ({parsed['action']}).")
if parsed.get("source_category"):
raise ValueError("'ASSIGN_CATEGORY_BOOLEANS' category assignment does not need unique references.")
return parsed
| 2.78125 | 3 |
sc-kpm/python/common/__init__.py | MaxGavr/sc-machine | 0 | 12761800 | from .sc_keynodes import ScKeynodes
from .sc_module import ScModule
from .sc_exception import *
from .sc_event import ScEventManager | 1.054688 | 1 |
soda/scientific/soda/scientific/anomaly_detection/anomaly_detector.py | sodadata/soda-core | 4 | 12761801 | <reponame>sodadata/soda-core<filename>soda/scientific/soda/scientific/anomaly_detection/anomaly_detector.py<gh_stars>1-10
import datetime
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Tuple
import pandas as pd
import yaml
from pydantic import BaseModel, validator
from soda.common.logs import Logs
from soda.scientific.anomaly_detection.feedback_processor import FeedbackProcessor
from soda.scientific.anomaly_detection.models.prophet_model import (
FreqDetectionResult,
ProphetDetector,
)
class UserFeedback(BaseModel):
"""Validation model for user feedback data dict in payload."""
isCorrectlyClassified: Optional[bool] = None
isAnomaly: Optional[bool] = None
reason: Optional[str] = None
freeTextReason: Optional[str] = None
skipMeasurements: Optional[str] = None
@validator("skipMeasurements")
def check_accepted_values_skip_measurements(cls, v):
accepted_values = ["this", "previous", "previousAndThis", None]
assert v in accepted_values, f"skip_measurements must be one of {accepted_values}, but '{v}' was provided."
return v
class SeverityLevelAreas(BaseModel):
"""Validates severity levels dicts."""
greaterThanOrEqual: Optional[float] = None
lessThanOrEqual: Optional[float] = None
class AnomalyDiagnostics(BaseModel):
value: Optional[float] = None
fail: Optional[SeverityLevelAreas] = None
warn: Optional[SeverityLevelAreas] = None
anomalyProbability: Optional[float] = None
anomalyPredictedValue: Optional[float] = None
anomalyErrorSeverity: str = "pass"
anomalyErrorCode: str = ""
feedback: Optional[UserFeedback] = UserFeedback()
class LocationModel(BaseModel):
filePath: Optional[str] = None
line: Optional[int] = None
col: Optional[int] = None
# some of those fields might end up being ignored down the line by ADS
class AnomalyResult(BaseModel):
identity: Optional[str] = None
measurementId: Optional[str] = None
type: Optional[str] = None
definition: Optional[str] = None
location: LocationModel = LocationModel()
metrics: Optional[List[str]] = None
dataSource: Optional[str] = None
table: Optional[str] = None
partition: Optional[str] = None
column: Optional[str] = None
outcome: Optional[str] = None
diagnostics: AnomalyDiagnostics = AnomalyDiagnostics()
class AnomalyHistoricalCheckResults(BaseModel):
results: List[AnomalyResult]
class AnomalyHistoricalMeasurement(BaseModel):
id: str
identity: str
value: float
dataTime: datetime.datetime
class AnomalyHistoricalMeasurements(BaseModel):
results: List[AnomalyHistoricalMeasurement]
class AnomalyDetector:
def __init__(self, measurements, check_results, logs: Logs):
self._logs = logs
self.df_measurements = self._parse_historical_measurements(measurements)
self.df_check_results = self._parse_historical_check_results(check_results)
self.params = self._parse_params()
def evaluate(self) -> Tuple[str, Dict[str, Any]]:
df_historic = self._convert_to_well_shaped_df()
feedback = FeedbackProcessor(params=self.params, df_historic=df_historic, logs=self._logs)
feedback.run()
detector = ProphetDetector(
logs=self._logs,
params=self.params,
time_series_data=feedback.df_feedback_processed,
has_exegonenous_regressor=feedback.has_exegonenous_regressor,
)
df_anomalies = detector.run()
level, diagnostics = self._parse_output(df_anomalies, detector.freq_detection_result)
return level, diagnostics
@staticmethod
def _parse_historical_measurements(measurements: Dict[str, List[Dict[str, Any]]]) -> pd.DataFrame:
if measurements:
parsed_measurements = AnomalyHistoricalMeasurements.parse_obj(measurements)
_df_measurements = pd.DataFrame.from_dict(parsed_measurements.dict()["results"])
return _df_measurements
else:
raise ValueError("No historical measurements found.")
def _parse_historical_check_results(self, check_results: Dict[str, List[Dict[str, Any]]]) -> pd.DataFrame:
if check_results.get("results"):
parsed_check_results = AnomalyHistoricalCheckResults.parse_obj(check_results)
_df_check_results = pd.DataFrame.from_dict(parsed_check_results.dict()["results"])
return _df_check_results
else:
self._logs.debug(
"No past check results found. This could be because there are no past runs of "
"Anomaly Detection for this check yet."
)
parsed_check_results = AnomalyHistoricalCheckResults(results=[AnomalyResult()])
_df_measurements = pd.DataFrame.from_dict(parsed_check_results.dict()["results"])
return _df_measurements
def _convert_to_well_shaped_df(self) -> pd.DataFrame:
if not self.df_check_results.empty:
self._logs.debug("Got test results from data request. Merging it with the measurements")
df = self.df_measurements.merge(
self.df_check_results,
how="left",
left_on="id",
right_on="measurementId",
suffixes=("", "_tr"),
)
else:
df = self.df_measurements.copy()
# Flatten diagnostics dictionary
if "diagnostics" in df.columns:
df_flattened = self.flatten_df(df.copy(), "diagnostics")
column_maps = self.params["request_params"]["columns_mapping"]
df_flattened = df_flattened[df_flattened.columns[df_flattened.columns.isin(list(column_maps.keys()))]]
df_flattened = df_flattened.rename(columns=column_maps) # type: ignore
df_flattened["ds"] = pd.to_datetime(df_flattened["ds"]) # type: ignore
df_flattened["ds"] = df_flattened["ds"].dt.tz_localize(None)
return df_flattened
@staticmethod
def flatten_df(df: pd.DataFrame, target_col_name: str) -> pd.DataFrame:
assert isinstance(df, pd.DataFrame)
assert not df.empty
df[target_col_name] = df[target_col_name].apply(lambda x: {} if pd.isnull(x) else x)
target_array_to_flatten = list(df[target_col_name].values)
df_flattened = pd.DataFrame.from_dict(target_array_to_flatten) # type: ignore
df_joined = pd.merge(
df,
df_flattened,
left_index=True,
right_index=True,
suffixes=("", "_diag"),
)
return df_joined
def _parse_params(self) -> Dict[str, Any]:
try:
this_dir = Path(__file__).parent.resolve()
config_file = this_dir.joinpath("detector_config.yaml")
# Read detector configuration
with open(config_file) as stream:
loaded_config = yaml.safe_load(stream)
# Manipulate configuration
loaded_config["response_params"]["output_columns"] = self._replace_none_values_by_key(
loaded_config["response_params"]["output_columns"]
)
loaded_config["feedback_processor_params"]["output_columns"] = self._replace_none_values_by_key(
loaded_config["feedback_processor_params"]["output_columns"]
)
self._logs.debug(f"Config parsed {loaded_config}")
return loaded_config
except Exception as e:
self._logs.error(e)
raise e
@staticmethod
def _replace_none_values_by_key(dct: Dict[str, Any]) -> Mapping[str, Any]:
result = {}
for key, value in dct.items():
if value is None:
value = key
result[key] = value
return result
@staticmethod
def _parse_output(
df_anomalies: pd.DataFrame, freq_detection_result: FreqDetectionResult
) -> Tuple[str, Dict[str, Any]]:
if not df_anomalies.empty:
results_dict = df_anomalies.to_dict(orient="records")[0]
level = results_dict["level"]
diagnostics = {
"value": results_dict["real_data"],
"warn": {
"greaterThanOrEqual": results_dict["warning_greater_than_or_equal"],
"lessThanOrEqual": results_dict["warning_lower_than_or_equal"],
},
"fail": {
"greaterThanOrEqual": results_dict["critical_greater_than_or_equal"],
"lessThanOrEqual": results_dict["critical_lower_than_or_equal"],
},
"anomalyProbability": results_dict["anomaly_probability"],
"anomalyPredictedValue": results_dict["trend"],
"anomalyErrorSeverity": freq_detection_result.error_severity,
"anomalyErrorCode": freq_detection_result.error_code,
}
else:
level = "pass"
diagnostics = {
"value": None,
"warn": None,
"fail": None,
"anomalyProbability": None,
"anomalyPredictedValue": None,
"anomalyErrorSeverity": freq_detection_result.error_severity,
"anomalyErrorCode": freq_detection_result.error_code,
}
diagnostics_dict: Dict[str, Any] = AnomalyDiagnostics.parse_obj(diagnostics).dict()
return level, diagnostics_dict
| 2.234375 | 2 |
terra/projects/migrations/0008_auto_20190222_1212.py | dymaxionlabs/platform | 0 | 12761802 | <reponame>dymaxionlabs/platform
# Generated by Django 2.1.7 on 2019-02-22 12:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0007_auto_20190222_0155'),
]
operations = [
migrations.RemoveField(
model_name='layer',
name='collection',
),
migrations.RemoveField(
model_name='map',
name='layer_collections',
),
migrations.AddField(
model_name='map',
name='layers',
field=models.ManyToManyField(to='projects.Layer'),
),
migrations.DeleteModel(
name='LayerCollection',
),
]
| 1.710938 | 2 |
pacote-download/ex(1-100)/ex054.py | gssouza2051/python-exercicios | 0 | 12761803 | <filename>pacote-download/ex(1-100)/ex054.py
'''Crie um programa que leia o ano de nascimento de sete pessoas.
No final, mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores'''
from datetime import date
atual=date.today().year
totmaior=0
totmenor=0
for c in range(1,8):
n=int(input(('Em que ano voce nasceu ? '))
idade= atual - n
if idade >=21 :
totmaior +=1
else:
totmenor +=1
print('Ao todo tivemos {} pessoas maior de idade'.format(totmaior))
print('E também tivemos {} pessoas menor de idade'.format(totmenor)) | 3.59375 | 4 |
mindspore/communication/__init__.py | TommyLike/mindspore | 1 | 12761804 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Collective communication interface.
"""
from .management import GlobalComm, init, release, get_rank, get_group_size, get_world_rank_from_group_rank, \
get_group_rank_from_world_rank, create_group, HCCL_WORLD_COMM_GROUP, NCCL_WORLD_COMM_GROUP, get_group, \
get_local_rank, get_local_rank_size, destroy_group
__all__ = [
"GlobalComm", "init", "release", "get_rank", "get_group_size", "get_world_rank_from_group_rank",
"get_group_rank_from_world_rank", "create_group", "HCCL_WORLD_COMM_GROUP", "NCCL_WORLD_COMM_GROUP", "get_group",
"get_local_rank", "get_local_rank_size", "destroy_group"
]
| 1.289063 | 1 |
examples/particle/aws/dynamodb/use_dynamodb/example_dynamodb_table.py | pmbrent/Particle-Cloud-Framework | 46 | 12761805 | from pcf.core import State
from pcf.particle.aws.dynamodb.dynamodb_table import DynamoDB
#example dynamodb
dynamodb_example_json = {
"pcf_name": "pcf_dynamodb", # Required
"flavor": "dynamodb_table", # Required
"aws_resource": {
# Refer to https://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Client.create_table for a full list of parameters
"AttributeDefinitions": [
{
"AttributeName": "Post",
"AttributeType": "S"
},
{
"AttributeName": "PostDateTime",
"AttributeType": "S"
},
],
"TableName": "pcf_test_table",
"KeySchema": [
{
"AttributeName": "Post",
"KeyType": "HASH"
},
{
"AttributeName": "PostDateTime",
"KeyType": "RANGE"
}
],
"LocalSecondaryIndexes": [
{
"IndexName": "LastPostIndex",
"KeySchema": [
{
"AttributeName": "Post",
"KeyType": "HASH"
},
{
"AttributeName": "PostDateTime",
"KeyType": "RANGE"
}
],
"Projection": {
"ProjectionType": "KEYS_ONLY"
}
}
],
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
},
"Tags": [
{
"Key": "Name",
"Value": "pcf-dynamodb-test"
}
]
}
}
# create dynamodb particle using json
dynamodb_particle = DynamoDB(dynamodb_example_json)
# example start
dynamodb_particle.set_desired_state(State.running)
dynamodb_particle.apply()
print(dynamodb_particle.get_state())
print(dynamodb_particle.get_current_state_definition())
# example update
dynamodb_example_json["aws_resource"]["ProvisionedThroughput"] = {"ReadCapacityUnits": 25, "WriteCapacityUnits": 30}
dynamodb_particle = DynamoDB(dynamodb_example_json)
dynamodb_particle.set_desired_state(State.running)
dynamodb_particle.apply()
# example item
key_value = {
"Post": {
"S": "adding post to table"
},
"PostDateTime": {
"S": "201807031301"
}
}
# example put item
dynamodb_particle.put_item(key_value)
# example get item
print(dynamodb_particle.get_item(key_value))
# example delete item
print(dynamodb_particle.delete_item(key_value))
print(dynamodb_particle.get_state())
print(dynamodb_particle.get_current_state_definition())
# example terminate
dynamodb_particle.set_desired_state(State.terminated)
dynamodb_particle.apply()
print(dynamodb_particle.get_state())
| 2.296875 | 2 |
core/controllers/brutescan.py | yizhimanpadewoniu/webdirfuzz | 0 | 12761806 | <reponame>yizhimanpadewoniu/webdirfuzz
#!/usr/bin/env python
# coding=utf-8
import time
import string
from comm.request import Req
from conf.settings import DICT_PATH
from core.data import result
from core.data import fuzz_urls
class BruteScan(Req):
"""
暴力字典扫描并对存在的URL再次进行fuzz
"""
def __init__(self, site, timeout, delay, threads, ext):
super(BruteScan, self).__init__(site, timeout, delay, threads)
self.to_brute_urls = []
self.bruted_urls = []
self.ext = ext
self.threads = threads
def load_sen_dict(self):
with open(DICT_PATH+'/brute.txt', 'r') as f:
return f.readlines()
def gen_dict(self):
for path in self.load_sen_dict():
path = string.strip(path)
if path.find('[EXT]'):
path = path.replace('[EXT]', self.ext)
url = self.site_parse[0]+'://'+self.site_parse[1]+'/'+path
self.to_brute_urls.append(url)
def brute(self, url):
if self.get_is_vul(url):
self.bruted_urls.append(url)
fuzz_urls.put(url)
def start(self):
self.gen_dict()
print '[%s] Start Brute Scan ...' % time.strftime('%H:%M:%S')
for url in self.to_brute_urls:
self.pool.spawn(self.brute, url)
self.pool.joinall()
print '[%s] Stop Brute Scan!' % time.strftime('%H:%M:%S')
print '[%s] %s Founded' % (time.strftime('%H:%M:%S'), len(self.bruted_urls))
result.brute = self.bruted_urls | 2.671875 | 3 |
tests/test_script_metrics.py | prorevizor/noc | 84 | 12761807 | # ----------------------------------------------------------------------
# noc.core.script.metrics tests
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
import pytest
# NOC modules
from noc.core.script.metrics import (
percent,
percent_invert,
percent_usage,
convert_percent_str,
sum,
subtract,
is1,
invert0,
scale,
)
@pytest.mark.parametrize(
"value,total,expected",
[
(10.0, 0, 100.0),
(10.0, None, 100.0),
(1.0, 10.0, 10.0),
(5.0, 10.0, 50.0),
(9.0, 10.0, 90.0),
(10.0, 10.0, 100.0),
],
)
def test_percent(value, total, expected):
assert percent(value, total) == expected
@pytest.mark.parametrize(
"value,total,expected",
[
(10.0, 0, 100.0),
(10.0, None, 100.0),
(1.0, 9.0, 10.0),
(5.0, 5.0, 50.0),
(9.0, 0.0, 100.0),
(10.0, 10.0, 50.0),
],
)
def test_percent_usage(value, total, expected):
assert percent_usage(value, total) == expected
@pytest.mark.parametrize(
"value,total,expected",
[
(10.0, 0, 100.0),
(10.0, None, 100.0),
(1.0, 10.0, 90.0),
(5.0, 10.0, 50.0),
(9.0, 10.0, 10.0),
(10.0, 10.0, 0.0),
],
)
def test_percent_invert(value, total, expected):
assert percent_invert(value, total) == expected
@pytest.mark.parametrize(
"value,expected", [("09%", 9.0), ("09% ", 9.0), ("09", 9.0), ("10%", 10.0), (None, 0)]
)
def test_convert_percent_str(value, expected):
assert convert_percent_str(value) == expected
@pytest.mark.parametrize(
"values,expected", [((1.0,), 1.0), ((1.0, 2.0), 3.0), ((1.0, 2.0, 3.0), 6.0)]
)
def test_sum(values, expected):
assert sum(*values) == expected
@pytest.mark.parametrize(
"values,expected", [((10.0, 1.0), 9.0), ((10.0, 1.0, 2.0), 7.0), ((10.0, 1.0, 2.0, 3.0), 4.0)]
)
def test_subtract(values, expected):
assert subtract(*values) == expected
@pytest.mark.parametrize("value,expected", [(0, 0), (1, 1), (2, 0)])
def test_is1(value, expected):
assert is1(value) == expected
@pytest.mark.parametrize("value,expected", [(-1, 1), (0, 1), (1, 0)])
def test_invert0(value, expected):
assert invert0(value) == expected
@pytest.mark.parametrize("sf,value,expected", [(1, 1, 1), (0, 1, 0), (10, 5, 50), (8, 0.25, 2.0)])
def test_scale(sf, value, expected):
f = scale(sf)
assert f(value) == expected
| 2.140625 | 2 |
Comment_Injection.py | keocol/SAML-Nuts | 2 | 12761808 | import urllib.parse
import base64
from urllib.parse import unquote
from urllib.parse import quote
tempEmail = input('Registered Email: ').encode('UTF-8')
suffix = input ('Suffix Added: ').encode('UTF-8')
adminEmail = input('Admin Email: ').encode('UTF-8')
saml_dec = base64.b64decode(unquote(input('SAMLRespone: ')))
saml_dec = saml_dec.replace(tempEmail, (adminEmail + b'<!--hoho-->' + suffix))
final = urllib.parse.quote(base64.b64encode(saml_dec).decode())
print ('\n\n\n\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n\n\n')
print (final)
| 2.578125 | 3 |
packaging/setup/plugins/ovirt-engine-common/base/network/firewall_manager_iptables.py | phoenixsbk/kvmmgr | 1 | 12761809 | <gh_stars>1-10
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Firewall manager iptables plugin.
"""
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from otopi import util
from otopi import plugin
from otopi import constants as otopicons
from otopi import filetransaction
from ovirt_engine import util as outil
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import firewall_manager_base
from . import process_firewalld_services
@util.export
class Plugin(plugin.PluginBase):
"""
Firewall manager iptables plugin.
"""
class _IpTablesManager(firewall_manager_base.FirewallManagerBase):
_SERVICE = 'iptables'
def _get_rules(self):
if self._rules is None:
self._rules = outil.processTemplate(
osetupcons.FileLocations.OVIRT_IPTABLES_DEFAULT,
subst={
'@CUSTOM_RULES@': (
process_firewalld_services.Process.getInstance(
environment=self.environment,
).parseFirewalld(
format=(
'-A INPUT -p {protocol} -m state '
'--state NEW -m {protocol} '
'--dport {port} -j ACCEPT\n'
),
portSeparator=':',
)
),
}
)
return self._rules
def __init__(self, plugin):
super(Plugin._IpTablesManager, self).__init__(plugin)
self._rules = None
@property
def name(self):
return osetupcons.Const.FIREWALL_MANAGER_IPTABLES
def detect(self):
return self.plugin.services.exists(self._SERVICE)
def active(self):
return self.plugin.services.status(self._SERVICE)
def prepare_examples(self):
content = self._get_rules()
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=osetupcons.FileLocations.OVIRT_IPTABLES_EXAMPLE,
content=content,
modifiedList=self.environment[
otopicons.CoreEnv.MODIFIED_FILES
],
)
)
def enable(self):
self.environment[otopicons.NetEnv.IPTABLES_ENABLE] = True
self.environment[
otopicons.NetEnv.IPTABLES_RULES
] = self._get_rules()
# This file is updated by otopi. Here we just prevent it from
# being deleted on cleanup.
# TODO: copy/move some uninstall code from the engine to otopi
# to allow just adding lines to iptables instead of replacing
# the file and also remove these lines on cleanup.
self.environment[
osetupcons.CoreEnv.UNINSTALL_UNREMOVABLE_FILES
].append(
osetupcons.FileLocations.SYSCONFIG_IPTABLES,
)
def print_manual_configuration_instructions(self):
self.plugin.dialog.note(
text=_(
'An example of the required configuration for iptables '
'can be found at:\n'
' {example}'
).format(
example=osetupcons.FileLocations.OVIRT_IPTABLES_EXAMPLE
)
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
before=(
osetupcons.Stages.KEEP_ONLY_VALID_FIREWALL_MANAGERS,
),
)
def _setup(self):
self.environment[
osetupcons.ConfigEnv.FIREWALL_MANAGERS
].append(Plugin._IpTablesManager(self))
# vim: expandtab tabstop=4 shiftwidth=4
| 1.773438 | 2 |
setup.py | smgim/kiltsnielsen | 30 | 12761810 | <gh_stars>10-100
from distutils.core import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="kiltsreader",
version="0.0.1",
author="<NAME> and <NAME>",
author_email="<EMAIL>",
description="A package for reading Kilts NielsenIQ files and directories",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/chrisconlon/kiltsnielsen",
project_urls={
"Bug Tracker": "https://github.com/chrisconlon/kiltsnielsen/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=['kiltsreader'],
install_requires=['pyarrow >= 5.0.0','pandas >= 1.2','numpy>= 1.2'],
python_requires='>=3.6',
) | 1.773438 | 2 |
app.py | fbcbarbosa/hello-usp | 0 | 12761811 | <filename>app.py<gh_stars>0
import os
import socket
import flask
app = flask.Flask(__name__)
@app.route("/")
def hello():
name = "USP"
hostname = socket.gethostname()
html = '''
<h1>Oi {name}!</h1>
<h2>Hostname: {hostname}</h2>
'''
return html.format(name=name, hostname=hostname)
if __name__ == "__main__":
port = 8080
app.run(host='0.0.0.0', port=port)
| 2.59375 | 3 |
Python/CodingBat/string_splosion.py | dvt32/cpp-journey | 1 | 12761812 | # http://codingbat.com/prob/p118366
def string_splosion(str):
result = ""
for i in range( len(str) ):
result += str[:i+1]
return result
| 3.578125 | 4 |
meadow/meadow/utils/book_searcher.py | digital-gachilib/meadow | 0 | 12761813 | from operator import attrgetter
from typing import List
from meadow.models import Book
def search_by_title(title: str) -> List[Book]:
if not title:
return list(filter(attrgetter("is_approved"), Book.objects.all()))
title = title.lower()
books = []
for book in Book.objects.all():
if book.title.lower().count(title) > 0 and book.is_approved:
books.append(book)
return books
def book_preview(book_id: int) -> dict:
book = Book.objects.get(id=book_id)
if not book.is_approved:
raise ValueError("Book is not approved!")
return {
"title": book.title,
"author": {"first_name": book.author.first_name, "last_name": book.author.last_name},
"description": book.description,
"isbn_10": book.isbn_10,
"isbn_13": book.isbn_13,
"download_link": book.download_link,
}
| 2.578125 | 3 |
event_annotator/__init__.py | roger-selzler/event_annotator | 0 | 12761814 | from .event_annotator import Event_annotator
from .event_annotator import load_csv_data
| 1.101563 | 1 |
RFMScore.py | Faouzizi/RFM-Analysis | 1 | 12761815 | #################################################################################
####################### Import python packages ##################################
#################################################################################
import pandas as pd
import numpy as np
from pandas_profiling import ProfileReport
import matplotlib.pyplot as plt
def create_RFMScore(df):
############################################################
# We keep only some variable for the segmentation
############################################################
colonnes_RFM = ['DocIDHash','ID', 'LodgingRevenue','OtherRevenue','DaysSinceLastStay']
df_rfm = df[colonnes_RFM]
############################################################
# Create Frequency variable
############################################################
rfm = df_rfm.groupby(['DocIDHash'])[['ID']].nunique().reset_index().rename(columns={'ID':'frequency'})
############################################################
# Create Recency variable
############################################################
df_rfm.rename(columns={'DaysSinceLastStay':'recency'}, inplace=True)
rfm = pd.merge(rfm, df_rfm.groupby(['DocIDHash'])[['recency']].min().reset_index(), how='inner', on='DocIDHash')
############################################################
# Create Monetary Value variable
############################################################
df_rfm['monetary_value'] = df_rfm['LodgingRevenue'] + df_rfm['OtherRevenue']
rfm = pd.merge(rfm, df_rfm.groupby(['DocIDHash'])[['monetary_value']].sum().reset_index(), how='inner',on='DocIDHash')
############################################################
# Then we compute quantiles
############################################################
quantiles = rfm.quantile(q=[0.20,0.40, 0.60,0.80])
quantiles = quantiles.to_dict()
############################################################
# This two function to cast the continues variables Recency,
# Frequency and Monetary Value to discontinues variables
############################################################
# Convert recency variable
def RClass(x,p,d):
if x <= d[p][0.2]:
return 1
elif x <= d[p][0.40]:
return 2
elif x <= d[p][0.60]:
return 3
elif x <= d[p][0.8]:
return 4
else:
return 5
# Convert Frequency and Monetary Value variables
def FMClass(x,p,d):
if x <= d[p][0.20]:
return 5
elif x <= d[p][0.40]:
return 4
elif x <= d[p][0.60]:
return 3
elif x <= d[p][0.80]:
return 2
else:
return 1
# Create New discontinue variables from continues ones
rfm['R_Quartile'] = rfm['recency'].apply(RClass, args=('recency',quantiles,))
rfm['F_Quartile'] = rfm['frequency'].apply(FMClass, args=('frequency',quantiles,))
rfm['M_Quartile'] = rfm['monetary_value'].apply(FMClass, args=('monetary_value',quantiles,))
############################################################
# Get The RFM Score
############################################################
rfm['RFMScore'] = rfm['R_Quartile'].astype('str') + rfm['F_Quartile'].astype('str') + rfm['M_Quartile'].astype('str')
return(rfm) | 2.328125 | 2 |
fuzz_lightyear/settings.py | bbhunter/fuzz-lightyear | 169 | 12761816 | import random
from functools import lru_cache
from hypothesis import core
class Settings:
def __init__(self) -> None:
self.seed = random.getrandbits(128) # type: int
self.unicode_enabled = True # type: bool
self.enable_color = True # type: bool
@property
def seed(self) -> int:
return self._seed
@seed.setter
def seed(self, value: int) -> None:
self._seed = value
core.global_force_seed = value # type: ignore
random.seed(value)
@lru_cache(maxsize=1)
def get_settings() -> Settings:
return Settings()
| 2.453125 | 2 |
src/gluonts/nursery/tsbench/src/tsbench/evaluations/aws/ecr.py | RingoIngo/gluon-ts | 1 | 12761817 | <filename>src/gluonts/nursery/tsbench/src/tsbench/evaluations/aws/ecr.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from .session import account_id, default_session
def image_uri(path: str) -> str:
"""
Returns the ECR image URI for the model at the specified path.
Args:
path: The path, including the tag.
Returns:
The image URI.
"""
return f"{account_id()}.dkr.ecr.{default_session().region_name}.amazonaws.com/{path}"
| 2.015625 | 2 |
hookup/cli.py | wedataintelligence/hookup | 20 | 12761818 | from hookup import db
from hookup.models import Page, User
import getpass
DEFAULT_SITES = ["facebook", "twitter", "netflix", "github"]
def create_superuser():
username = input("Username: ")
password = getpass.getpass("Password ")
user = User(username=username, password=password)
user.save()
def register_sites(sites=DEFAULT_SITES):
user = User.query.first()
for site in sites:
page = Page(name=site, source=f"{site}.html", stock=True)
user.pages.append(page)
user.save()
def main():
db.create_all()
exists = User.query.first()
if not exists:
create_superuser()
register_sites()
print("[+] Done")
if __name__ == '__main__':
main()
| 2.859375 | 3 |
pombase/util.py | qky666/pombase | 1 | 12761819 | from __future__ import annotations
import re
from typing import Callable, TypeVar, Any, Sequence, Mapping, MutableMapping, Iterable, Optional
from datetime import datetime, date as dt_date, time as dt_time
from time import time as t_time, sleep
from unicodedata import normalize
from string import Formatter
from dateutil.parser import parserinfo, parse
from seleniumbase.config.settings import LARGE_TIMEOUT
from . import types as pb_types
T = TypeVar('T')
def wait_until(f: Callable[..., T],
args: list = None,
kwargs: dict = None,
timeout: Optional[pb_types.NumberType] = None,
step: pb_types.NumberType = 0.5,
expected: Any = True,
equals: bool = True,
raise_error: str = None, ) -> (bool, T):
"""
Waits until Callable `f` returns the `expected` value
(or something different from the expected value if `equals` is False).
If you want to check an object property instead of a method, you can use a `lambda` function.
:param f: The Callable object (usually function or method)
:param args: List of positional arguments passed to f. Default: []
:param kwargs: Dictionary of keyword arguments passed to f. Default: {}
:param timeout: Timeout in seconds
:param step: Wait time between each check
:param expected: Expected value
:param equals: If True, wait until f(*args, **kwargs) == expected.
If False, wait until f(*args, **kwargs) != expected.
:param raise_error: If not None, raises an Error if timeout is reached
:return: Tuple(success, value). success is True if the waiting succeeded,
and value is the last value returned by f(*args, **kwargs)
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
if timeout is None:
timeout = LARGE_TIMEOUT
if timeout < 0:
raise RuntimeError(f"timeout should be >= 0. timeout = {timeout}")
if step <= 0:
raise RuntimeError(f"step should be > 0. step = {step}")
if equals is True:
default_value = None if expected is not None else False
else:
default_value = expected
current = t_time()
start = current
stop = start + timeout
value = default_value
# noinspection PyBroadException,TryExceptPass
try:
value = f(*args, **kwargs)
except Exception:
pass
keep_looping = True
while keep_looping:
if (value == expected) is equals:
return True, value
after = t_time()
if after < current + step:
sleep(current + step - after)
current = t_time()
if current <= stop:
# noinspection PyBroadException,TryExceptPass
try:
value = f(*args, **kwargs)
except Exception:
pass
else:
keep_looping = False
else:
if raise_error is not None:
raise TimeoutError(
f"{raise_error}. f='{f}', args='{args}', kwargs='{kwargs}', timeout='{timeout}', step='{step}', "
f"expected='{expected}', equals='{equals}', last value={value}",
)
else:
return False, value
class ParserInfoEs(parserinfo):
HMS = [('h', 'hour', 'hours', 'hora', 'horas'),
('m', 'minute', 'minutes', 'minuto', 'minutos'),
('s', 'second', 'seconds', 'segundo', 'segundos')]
JUMP = [' ', '.', ',', ';', '-', '/', "'", 'at', 'on', 'and', 'ad', 'm', 't', 'of', 'st', 'nd', 'rd', 'th',
'a', 'en', 'y', 'de']
MONTHS = [('Jan', 'January', 'Ene', 'Enero'),
('Feb', 'February', 'Febrero'),
('Mar', 'March', 'Marzo'),
('Apr', 'April', 'Abr', 'Abril'),
('May', 'May', 'Mayo'),
('Jun', 'June', 'Junio'),
('Jul', 'July', 'Julio'),
('Aug', 'August', 'Ago', 'Agosto'),
('Sep', 'Sept', 'September', 'Septiembre'),
('Oct', 'October', 'Octubre'),
('Nov', 'November', 'Noviembre'),
('Dec', 'December', 'Dic', 'Diciembre')]
PERTAIN = ['of', 'de']
WEEKDAYS = [('Mon', 'Monday', 'L', 'Lun', 'Lunes'),
('Tue', 'Tuesday', 'M', 'Mar', 'Martes'),
('Wed', 'Wednesday', 'X', 'Mie', 'Mié', 'Mier', 'Miér', 'Miercoles', 'Miércoles'),
('Thu', 'Thursday', 'J', 'Jue', 'Jueves'),
('Fri', 'Friday', 'V', 'Vie', 'Viernes'),
('Sat', 'Saturday', 'S', 'Sab', 'Sáb', 'Sabado', 'Sábado'),
('Sun', 'Sunday', 'D', 'Dom', 'Domingo')]
def __init__(self, dayfirst=True, yearfirst=False):
super().__init__(dayfirst=dayfirst, yearfirst=yearfirst)
class DateUtil:
@staticmethod
def parse_datetime_es(date_str: str) -> datetime:
parser_info_es = ParserInfoEs()
return parse(date_str, parser_info_es)
@staticmethod
def parse_date_es(date_str: str) -> dt_date:
return DateUtil.parse_datetime_es(date_str).date()
@staticmethod
def parse_time_es(date_str: str) -> dt_time:
return DateUtil.parse_datetime_es(date_str).time()
@staticmethod
def python_format_date(the_date: datetime, python_format_str: str = "{date.day}/{date:%m}/{date.year}") -> str:
class CustomFormatter(Formatter):
def get_field(self,
field_name: str,
args: Sequence[Any],
kwargs: Mapping[str, Any]) -> Any:
if field_name.startswith("date") is False:
raise RuntimeError(f"Incorrect python_format_str: {python_format_str}")
return super().get_field(field_name, args, kwargs)
formatter = CustomFormatter()
# noinspection StrFormat
return formatter.format(python_format_str, date=the_date)
class CaseInsensitiveDict(MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return ((lowerkey, keyval[1]) for lowerkey, keyval in self._store.items())
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
def clean(s: str) -> str:
s = s.lower()
s = re.sub('á', 'a', s)
s = re.sub('é', 'e', s)
s = re.sub('í', 'i', s)
s = re.sub('ó', 'o', s)
s = re.sub('ú', 'u', s)
s = re.sub('ñ', 'n', s)
# Invalid characters
s = re.sub('[^0-9a-zA-Z_]', '_', s)
# Remove leading characters until we find a letter or underscore
s = re.sub('^[^a-zA-Z_]+', '', s)
return s
def normalize_caseless(text: str) -> str:
return normalize("NFKD", text.casefold())
def caseless_equal(left, right) -> bool:
return normalize_caseless(left) == normalize_caseless(right)
def caseless_text_in_texts(text: str, texts: Iterable[str]) -> bool:
normalized_set = {normalize_caseless(t) for t in texts}
normalized_text = normalize_caseless(text)
return normalized_text in normalized_set
def expand_replacing_spaces_and_underscores(texts: Iterable[str]) -> set[str]:
expanded = set(texts)
expanded = expanded.union({t.replace("_", " ") for t in texts})
expanded = expanded.union({t.replace(" ", "_") for t in texts})
return expanded
def first_not_none(*args: T) -> Optional[T]:
for i in args:
if i is not None:
return i
else:
return None
| 2.671875 | 3 |
src/hub/dataload/sources/dbnsfp/dbnsfp_parser.py | raymond301/myvariant.info | 0 | 12761820 | import csv
import glob
from biothings.utils.dataload import list_split, dict_sweep, unlist, value_convert_to_number
VALID_COLUMN_NO = 245
'''this parser is for dbNSFP v3.5a beta2 downloaded from
https://sites.google.com/site/jpopgen/dbNSFP'''
# convert one snp to json
def _map_line_to_json(df, version, include_gnomad, index=0):
# specific variable treatment
chrom = df["#chr"]
if chrom == 'M':
chrom = 'MT'
# fields[7] in version 2, represent hg18_pos
hg18_end = df["hg18_pos(1-based)"]
if hg18_end == ".":
hg18_end = "."
else:
hg18_end = int(hg18_end)
# in case of no hg19 position provided, remove the item
if df["hg19_pos(1-based)"] == '.':
return None
else:
chromStart = int(df["hg19_pos(1-based)"])
chromEnd = chromStart
chromStart_38 = int(df["pos(1-based)"])
ref = df["ref"].upper()
alt = df["alt"].upper()
HGVS_19 = "chr%s:g.%d%s>%s" % (chrom, chromStart, ref, alt)
HGVS_38 = "chr%s:g.%d%s>%s" % (chrom, chromStart_38, ref, alt)
if version == 'hg19':
HGVS = HGVS_19
elif version == 'hg38':
HGVS = HGVS_38
siphy_29way_pi = df["SiPhy_29way_pi"]
if siphy_29way_pi == ".":
siphy = "."
else:
freq = siphy_29way_pi.split(":")
siphy = {'a': freq[0], 'c': freq[1], 'g': freq[2], 't': freq[3]}
gtex_gene = df["GTEx_V6p_gene"].split('|')
gtex_tissue = df["GTEx_V6p_tissue"].split('|')
gtex = map(dict, map(lambda t: zip(('gene', 'tissue'), t), zip(gtex_gene, gtex_tissue)))
acc = df["Uniprot_acc_Polyphen2"].rstrip().rstrip(';').split(";")
pos = df["Uniprot_aapos_Polyphen2"].rstrip().rstrip(';').split(";")
uniprot = map(dict, map(lambda t: zip(('acc', 'pos'), t), zip(acc, pos)))
provean_score = df["PROVEAN_score"].split(';')
sift_score = df["SIFT_score"].split(';')
hdiv_score = df["Polyphen2_HDIV_score"].split(';')
hvar_score = df["Polyphen2_HVAR_score"].split(';')
lrt_score = df["LRT_score"].split(';')
m_cap_score = df["M-CAP_score"].split(';')
mutationtaster_score = df["MutationTaster_score"].split(';')
mutationassessor_score = df["MutationAssessor_score"].split(';')
vest3_score = df["VEST3_score"].split(';')
metasvm_score = df["MetaSVM_score"].split(';')
fathmm_score = df["FATHMM_score"].split(';')
metalr_score = df["MetaLR_score"].split(';')
revel_score = df["REVEL_score"].split(';')
'''
parse mutpred top 5 features
'''
def modify_pvalue(pvalue):
return float(pvalue.strip('P = '))
mutpred_mechanisms = df["MutPred_Top5features"]
if mutpred_mechanisms not in ['.', ',', '-']:
mutpred_mechanisms = mutpred_mechanisms.split(" (") and mutpred_mechanisms.split(";")
mutpred_mechanisms = [m.rstrip(")") for m in mutpred_mechanisms]
mutpred_mechanisms = [i.split(" (") for i in mutpred_mechanisms]
mutpred_mechanisms = sum(mutpred_mechanisms, [])
mechanisms = [
{"mechanism": mutpred_mechanisms[0],
"p_val": modify_pvalue(mutpred_mechanisms[1])},
{"mechanism": mutpred_mechanisms[2],
"p_val": modify_pvalue(mutpred_mechanisms[3])},
{"mechanism": mutpred_mechanisms[4],
"p_val": modify_pvalue(mutpred_mechanisms[5])},
{"mechanism": mutpred_mechanisms[6],
"p_val": modify_pvalue(mutpred_mechanisms[7])},
{"mechanism": mutpred_mechanisms[8],
"p_val": modify_pvalue(mutpred_mechanisms[9])}
]
else:
mechanisms = '.'
# normalize scores
def norm(arr):
return [None if item == '.' else item for item in arr]
provean_score = norm(provean_score)
sift_score = norm(sift_score)
hdiv_score = norm(hdiv_score)
hvar_score = norm(hvar_score)
lrt_score = norm(lrt_score)
m_cap_score = norm(m_cap_score)
mutationtaster_score = norm(mutationtaster_score)
mutationassessor_score = norm(mutationassessor_score)
vest3_score = norm(vest3_score)
metasvm_score = norm(metasvm_score)
fathmm_score = norm(fathmm_score)
metalr_score = norm(metalr_score)
revel_score = norm(revel_score)
gnomad = {"gnomad_exomes": {
"ac": df["gnomAD_exomes_AC"],
"an": df["gnomAD_exomes_AN"],
"af": df["gnomAD_exomes_AF"],
"afr_ac": df["gnomAD_exomes_AFR_AC"],
"afr_af": df["gnomAD_exomes_AFR_AF"],
"afr_an": df["gnomAD_exomes_AFR_AN"],
"amr_ac": df["gnomAD_exomes_AMR_AC"],
"amr_an": df["gnomAD_exomes_AMR_AN"],
"amr_af": df["gnomAD_exomes_AMR_AF"],
"asj_ac": df["gnomAD_exomes_ASJ_AC"],
"asj_an": df["gnomAD_exomes_ASJ_AN"],
"asj_af": df["gnomAD_exomes_ASJ_AF"],
"eas_ac": df["gnomAD_exomes_EAS_AC"],
"eas_af": df["gnomAD_exomes_EAS_AF"],
"eas_an": df["gnomAD_exomes_EAS_AN"],
"fin_ac": df["gnomAD_exomes_FIN_AC"],
"fin_af": df["gnomAD_exomes_FIN_AF"],
"fin_an": df["gnomAD_exomes_FIN_AN"],
"nfe_ac": df["gnomAD_exomes_NFE_AC"],
"nfe_af": df["gnomAD_exomes_NFE_AF"],
"nfe_an": df["gnomAD_exomes_NFE_AN"],
"sas_ac": df["gnomAD_exomes_SAS_AC"],
"sas_af": df["gnomAD_exomes_SAS_AF"],
"sas_an": df["gnomAD_exomes_SAS_AN"],
"oth_ac": df["gnomAD_exomes_OTH_AC"],
"oth_af": df["gnomAD_exomes_OTH_AF"],
"oth_an": df["gnomAD_exomes_OTH_AN"]
},
"gnomad_genomes": {
"ac": df["gnomAD_genomes_AC"],
"an": df["gnomAD_genomes_AN"],
"af": df["gnomAD_genomes_AF"],
"afr_ac": df["gnomAD_genomes_AFR_AC"],
"afr_af": df["gnomAD_genomes_AFR_AF"],
"afr_an": df["gnomAD_genomes_AFR_AN"],
"amr_ac": df["gnomAD_genomes_AMR_AC"],
"amr_an": df["gnomAD_genomes_AMR_AN"],
"amr_af": df["gnomAD_genomes_AMR_AF"],
"asj_ac": df["gnomAD_genomes_ASJ_AC"],
"asj_an": df["gnomAD_genomes_ASJ_AN"],
"asj_af": df["gnomAD_genomes_ASJ_AF"],
"eas_ac": df["gnomAD_genomes_EAS_AC"],
"eas_af": df["gnomAD_genomes_EAS_AF"],
"eas_an": df["gnomAD_genomes_EAS_AN"],
"fin_ac": df["gnomAD_genomes_FIN_AC"],
"fin_af": df["gnomAD_genomes_FIN_AF"],
"fin_an": df["gnomAD_genomes_FIN_AN"],
"nfe_ac": df["gnomAD_genomes_NFE_AC"],
"nfe_af": df["gnomAD_genomes_NFE_AF"],
"nfe_an": df["gnomAD_genomes_NFE_AN"],
"oth_ac": df["gnomAD_genomes_OTH_AC"],
"oth_af": df["gnomAD_genomes_OTH_AF"],
"oth_an": df["gnomAD_genomes_OTH_AN"]
}
}
# load as json data
one_snp_json = {
"_id": HGVS,
"dbnsfp": {
"rsid": df["rs_dbSNP150"],
#"rsid_dbSNP144": fields[6],
"chrom": chrom,
"hg19": {
"start": chromStart,
"end": chromEnd
},
"hg18": {
"start": df["hg18_pos(1-based)"],
"end": hg18_end
},
"hg38": {
"start": df["pos(1-based)"],
"end": df["pos(1-based)"]
},
"ref": ref,
"alt": alt,
"aa": {
"ref": df["aaref"],
"alt": df["aaalt"],
"pos": df["aapos"],
"refcodon": df["refcodon"],
"codonpos": df["codonpos"],
"codon_degeneracy": df["codon_degeneracy"],
},
"genename": df["genename"],
"uniprot": list(uniprot),
"interpro_domain": df["Interpro_domain"],
"cds_strand": df["cds_strand"],
"ancestral_allele": df["Ancestral_allele"],
#"altaineandertal": fields[17],
#"denisova": fields[18]
"ensembl": {
"geneid": df["Ensembl_geneid"],
"transcriptid": df["Ensembl_transcriptid"],
"proteinid": df["Ensembl_proteinid"]
},
"sift": {
"score": sift_score,
"converted_rankscore": df["SIFT_converted_rankscore"],
"pred": df["SIFT_pred"]
},
"polyphen2": {
"hdiv": {
"score": hdiv_score,
"rankscore": df["Polyphen2_HDIV_rankscore"],
"pred": df["Polyphen2_HDIV_pred"]
},
"hvar": {
"score": hvar_score,
"rankscore": df["Polyphen2_HVAR_rankscore"],
"pred": df["Polyphen2_HVAR_pred"]
}
},
"lrt": {
"score": lrt_score,
"converted_rankscore": df["LRT_converted_rankscore"],
"pred": df["LRT_pred"],
"omega": df["LRT_Omega"]
},
"mutationtaster": {
"score": mutationtaster_score,
"converted_rankscore": df["MutationTaster_converted_rankscore"],
"pred": df["MutationTaster_pred"],
"model": df["MutationTaster_model"],
"AAE": df["MutationTaster_AAE"]
},
"mutationassessor": {
"score": mutationassessor_score,
"rankscore": df["MutationAssessor_score_rankscore"],
"pred": df["MutationAssessor_pred"]
},
"fathmm": {
"score": fathmm_score,
"rankscore": df["FATHMM_converted_rankscore"],
"pred": df["FATHMM_pred"]
},
"provean": {
"score": provean_score,
"rankscore": df["PROVEAN_converted_rankscore"],
"pred": df["PROVEAN_pred"]
},
"vest3": {
"score": vest3_score,
"rankscore": df["VEST3_rankscore"],
"transcriptid": df["Transcript_id_VEST3"],
"transcriptvar": df["Transcript_var_VEST3"]
},
"fathmm-mkl": {
"coding_score": df["fathmm-MKL_coding_score"],
"coding_rankscore": df["fathmm-MKL_coding_rankscore"],
"coding_pred": df["fathmm-MKL_coding_pred"],
"coding_group": df["fathmm-MKL_coding_group"]
},
"eigen": {
"coding_or_noncoding": df["Eigen_coding_or_noncoding"],
"raw": df["Eigen-raw"],
"phred": df["Eigen-phred"]
},
"eigen-pc": {
"raw": df["Eigen-PC-raw"],
"phred": df["Eigen-PC-phred"],
"raw_rankscore": df["Eigen-PC-raw_rankscore"]
},
"genocanyon": {
"score": df["GenoCanyon_score"],
"rankscore": df["GenoCanyon_score_rankscore"]
},
"metasvm": {
"score": metasvm_score,
"rankscore": df["MetaSVM_rankscore"],
"pred": df["MetaSVM_pred"]
},
"metalr": {
"score": metalr_score,
"rankscore": df["MetaLR_rankscore"],
"pred": df["MetaLR_pred"]
},
"reliability_index": df["Reliability_index"],
"m_cap_score": {
"score": m_cap_score,
"rankscore": df["M-CAP_rankscore"],
"pred": df["M-CAP_pred"]
},
"revel": {
"score": revel_score,
"rankscore": df["REVEL_rankscore"]
},
"mutpred": {
"score": df["MutPred_score"],
"rankscore": df["MutPred_rankscore"],
"accession": df["MutPred_protID"],
"aa_change": df["MutPred_AAchange"],
"pred": mechanisms
},
"dann": {
"score": df["DANN_score"],
"rankscore": df["DANN_rankscore"]
},
"gerp++": {
"nr": df["GERP++_NR"],
"rs": df["GERP++_RS"],
"rs_rankscore": df["GERP++_RS_rankscore"]
},
"integrated": {
"fitcons_score": df["integrated_fitCons_score"],
"fitcons_rankscore": df["integrated_fitCons_score_rankscore"],
"confidence_value": df["integrated_confidence_value"]
},
"gm12878": {
"fitcons_score": df["GM12878_fitCons_score"],
"fitcons_rankscore": df["GM12878_fitCons_score_rankscore"],
"confidence_value": df["GM12878_confidence_value"]
},
"h1-hesc": {
"fitcons_score": df["H1-hESC_fitCons_score"],
"fitcons_rankscore": df["H1-hESC_fitCons_score_rankscore"],
"confidence_value": df["H1-hESC_confidence_value"]
},
"huvec": {
"fitcons_score": df["HUVEC_fitCons_score"],
"fitcons_rankscore": df["HUVEC_fitCons_score_rankscore"],
"confidence_value": df["HUVEC_confidence_value"]
},
"phylo": {
"p100way": {
"vertebrate": df["phyloP100way_vertebrate"],
"vertebrate_rankscore": df["phyloP100way_vertebrate_rankscore"]
},
"p20way": {
"mammalian": df["phyloP20way_mammalian"],
"mammalian_rankscore": df["phyloP20way_mammalian_rankscore"]
}
},
"phastcons": {
"100way": {
"vertebrate": df["phastCons100way_vertebrate"],
"vertebrate_rankscore": df["phastCons100way_vertebrate_rankscore"]
},
"20way": {
"mammalian": df["phastCons20way_mammalian"],
"mammalian_rankscore": df["phastCons20way_mammalian_rankscore"]
}
},
"siphy_29way": {
"pi": siphy,
"logodds": df["SiPhy_29way_logOdds"],
"logodds_rankscore": df["SiPhy_29way_logOdds_rankscore"]
},
"1000gp3": {
"ac": df["1000Gp3_AC"],
"af": df["1000Gp3_AF"],
"afr_ac": df["1000Gp3_AFR_AC"],
"afr_af": df["1000Gp3_AFR_AF"],
"eur_ac": df["1000Gp3_EUR_AC"],
"eur_af": df["1000Gp3_EUR_AF"],
"amr_ac": df["1000Gp3_AMR_AC"],
"amr_af": df["1000Gp3_AMR_AF"],
"eas_ac": df["1000Gp3_EAS_AC"],
"eas_af": df["1000Gp3_EAS_AF"],
"sas_ac": df["1000Gp3_SAS_AC"],
"sas_af": df["1000Gp3_SAS_AF"]
},
"twinsuk": {
"ac": df["TWINSUK_AC"],
"af": df["TWINSUK_AF"]
},
"alspac": {
"ac": df["ALSPAC_AC"],
"af": df["ALSPAC_AF"]
},
"esp6500": {
"aa_ac": df["ESP6500_AA_AC"],
"aa_af": df["ESP6500_AA_AF"],
"ea_ac": df["ESP6500_EA_AC"],
"ea_af": df["ESP6500_EA_AF"]
},
"exac": {
"ac": df["ExAC_AC"],
"af": df["ExAC_AF"],
"adj_ac": df["ExAC_Adj_AC"],
"adj_af": df["ExAC_Adj_AF"],
"afr_ac": df["ExAC_AFR_AC"],
"afr_af": df["ExAC_AFR_AF"],
"amr_ac": df["ExAC_AMR_AC"],
"amr_af": df["ExAC_AMR_AF"],
"eas_ac": df["ExAC_EAS_AC"],
"eas_af": df["ExAC_EAS_AF"],
"fin_ac": df["ExAC_FIN_AC"],
"fin_af": df["ExAC_FIN_AF"],
"nfe_ac": df["ExAC_NFE_AC"],
"nfe_af": df["ExAC_NFE_AF"],
"sas_ac": df["ExAC_SAS_AC"],
"sas_af": df["ExAC_SAS_AF"]
},
"exac_nontcga": {
"ac": df["ExAC_nonTCGA_AC"],
"af": df["ExAC_nonTCGA_AF"],
"adj_ac": df["ExAC_nonTCGA_Adj_AC"],
"adj_af": df["ExAC_nonTCGA_Adj_AF"],
"afr_ac": df["ExAC_nonTCGA_AFR_AC"],
"afr_af": df["ExAC_nonTCGA_AFR_AF"],
"amr_ac": df["ExAC_nonTCGA_AMR_AC"],
"amr_af": df["ExAC_nonTCGA_AMR_AF"],
"eas_ac": df["ExAC_nonTCGA_EAS_AC"],
"eas_af": df["ExAC_nonTCGA_EAS_AF"],
"fin_ac": df["ExAC_nonTCGA_FIN_AC"],
"fin_af": df["ExAC_nonTCGA_FIN_AF"],
"nfe_ac": df["ExAC_nonTCGA_NFE_AC"],
"nfe_af": df["ExAC_nonTCGA_NFE_AF"],
"sas_ac": df["ExAC_nonTCGA_SAS_AC"],
"sas_af": df["ExAC_nonTCGA_SAS_AF"]
},
"exac_nonpsych": {
"ac": df["ExAC_nonpsych_AC"],
"af": df["ExAC_nonpsych_AF"],
"adj_ac": df["ExAC_nonpsych_Adj_AC"],
"adj_af": df["ExAC_nonpsych_Adj_AF"],
"afr_ac": df["ExAC_nonpsych_AFR_AC"],
"afr_af": df["ExAC_nonpsych_AFR_AF"],
"amr_ac": df["ExAC_nonpsych_AMR_AC"],
"amr_af": df["ExAC_nonpsych_AMR_AF"],
"eas_ac": df["ExAC_nonpsych_EAS_AC"],
"eas_af": df["ExAC_nonpsych_EAS_AF"],
"fin_ac": df["ExAC_nonpsych_FIN_AC"],
"fin_af": df["ExAC_nonpsych_FIN_AF"],
"nfe_ac": df["ExAC_nonpsych_NFE_AC"],
"nfe_af": df["ExAC_nonpsych_NFE_AF"],
"sas_ac": df["ExAC_nonpsych_SAS_AC"],
"sas_af": df["ExAC_nonpsych_SAS_AF"]
},
"clinvar": {
"rs": df["clinvar_rs"],
"clinsig": list(map(int,[i for i in df["clinvar_clnsig"].split("|") if i != "."])),
"trait": [i for i in df["clinvar_trait"].split("|") if i != "."],
"golden_stars": list(map(int,[i for i in df["clinvar_golden_stars"].split("|") if i != "."]))
},
"gtex": list(gtex)
}
}
if include_gnomad:
one_snp_json['dbnsfp'].update(gnomad)
one_snp_json = list_split(dict_sweep(unlist(value_convert_to_number(one_snp_json)), vals=[".", '-', None]), ";")
one_snp_json["dbnsfp"]["chrom"] = str(one_snp_json["dbnsfp"]["chrom"])
return one_snp_json
# open file, parse, pass to json mapper
def data_generator(input_file, version, include_gnomad):
open_file = open(input_file)
db_nsfp = csv.reader(open_file, delimiter="\t")
index = next(db_nsfp)
assert len(index) == VALID_COLUMN_NO, "Expecting %s columns, but got %s" % (VALID_COLUMN_NO, len(index))
previous_row = None
for row in db_nsfp:
df = dict(zip(index, row))
# use transpose matrix to have 1 row with N 187 columns
current_row = _map_line_to_json(df, version=version, include_gnomad=include_gnomad)
if previous_row and current_row:
if current_row["_id"] == previous_row["_id"]:
aa = previous_row["dbnsfp"]["aa"]
if not isinstance(aa, list):
aa = [aa]
aa.append(current_row["dbnsfp"]["aa"])
previous_row["dbnsfp"]["aa"] = aa
if len(previous_row["dbnsfp"]["aa"]) > 1:
continue
else:
yield previous_row
previous_row = current_row
if previous_row:
yield previous_row
def load_data_file(input_file, version, include_gnomad=False):
data = data_generator(input_file, version=version, include_gnomad=include_gnomad)
for one_snp_json in data:
yield one_snp_json
# load path and find files, pass to data_generator
def load_data(path_glob, version='hg19', include_gnomad=False):
for input_file in sorted(glob.glob(path_glob)):
for d in load_data_file(input_file, version, include_gnomad):
yield d
| 2.625 | 3 |
backend/migrations/versions/cc7575091441_.py | NitishGadangi/CovidAid | 3 | 12761821 | <reponame>NitishGadangi/CovidAid
"""empty message
Revision ID: cc7575091441
Revises: <PASSWORD>
Create Date: 2020-10-10 23:07:04.672476
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cc7575091441'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('points', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'points')
# ### end Alembic commands ###
| 1.226563 | 1 |
Discord_Games/connect_four.py | QuirkyDevil/Discord-Games | 0 | 12761822 | <filename>Discord_Games/connect_four.py
import discord
import random
import textwrap
from discord.ui import Button, Select
# 2 player connect 4
class connect_4_buttons(discord.ui.Button):
def __init__(self, label):
super().__init__(style=discord.ButtonStyle.green, label=label)
async def callback(self, interaction: discord.Interaction):
view: connect_4_view = self.view
column = (int(self.label)-1)
confirm = view.check_if_valid(column) # counting begins from 0
if confirm is True:
row = view.get_next_row(column)
if interaction.user.id == view.red.id:
view.insert_to(row, column, "red")
elif interaction.user.id == view.yellow.id:
view.insert_to(row, column, "yellow")
view.check_for_winning()
embedAndView = view.prepare_edit()
embed = embedAndView[0]
embed.colour = view.update_embed_colour()
_view = embedAndView[1]
await interaction.response.edit_message(embed=embed, view=_view)
else:
await interaction.response.send_message("That column is full! Select some other column.", ephemeral=True)
self.disabled = True
class connect_4_view(discord.ui.View):
HEIGHT = 7
WIDTH = 7
RED_EMOJI = '🔴'
YELLOW_EMOJI = '🟡'
BLUE_EMOJI = '⏺️'
RED_COLOUR = 0xfc0335 # red
YELLOW_COLOUR = 0xfcfc03 # yellow
TIE_COLOUR = 0xff9a1f
# yellow will be -1 and red will be 1
def __init__(self, ctx, member, message):
super().__init__()
self.ctx = ctx
self.timeout = 60
self.member = member
self.message = message
# self.board = numpy.zeros((self.HEIGHT, self.WIDTH))
self.board = [[0 for y in range(self.WIDTH)] for x in range(self.HEIGHT)]
self.numbers_emojis = [
"1️⃣",
"2️⃣",
"3️⃣",
"4️⃣",
"5️⃣",
"6️⃣",
"7️⃣",
"8️⃣",
"9️⃣",
"🔟",
]
# self.alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
self.red = random.choice([ctx.author, member])
self.yellow = random.choice([ctx.author, member])
while self.red == self.yellow:
self.yellow = random.choice([ctx.author, member])
self.current_player = random.choice([self.red, self.yellow])
self.winner = None
self.match_is_draw = False
self.content = textwrap.dedent(f"""\n
{ctx.author} vs {member}
{self.red} will play as {self.RED_EMOJI}
{self.yellow} will play as {self.YELLOW_EMOJI}
""")
self.embed = discord.Embed(
title = textwrap.dedent(f"""\n
It is {self.current_player}'s turn.
"""))
self.embed.description = self.convert_board_to_str()
self.embed.colour = self.update_embed_colour()
for i in range(self.WIDTH):
self.add_item(connect_4_buttons(label = str(i+1)))
async def interaction_check(self, item: Button | Select, interaction: discord.Interaction) -> bool:
return interaction.user.id == self.current_player.id
async def on_timeout(self):
self.stop()
self.disable_buttons()
if self.current_player == self.red:
self.embed.title = f"{self.red} didn't make any move since 1 minute.\n{self.yellow} won!"
self.winner = self.yellow
elif self.current_player == self.yellow:
self.embed.title = f"{self.yellow} didn't make any move since 1 minute.\n{self.red} won!"
self.winner = self.red
self.embed.description = self.convert_board_to_str()
self.embed.colour = self.update_embed_colour()
return await self.message.edit(embed=self.embed, view=self)
def update_embed_colour(self):
if self.match_is_draw is True:
return self.TIE_COLOUR
elif self.winner == self.red:
return self.RED_COLOUR
elif self.winner == self.yellow:
return self.YELLOW_COLOUR
elif self.current_player == self.red:
return self.RED_COLOUR
elif self.current_player == self.yellow:
return self.YELLOW_COLOUR
def convert_board_to_str(self) -> str:
string = ""
for rows in self.board:
for elem in rows:
string += f"{elem} "
string += "\n"
string = string.replace("0", f"{self.BLUE_EMOJI}")
string = string.replace("-1", f"{self.YELLOW_EMOJI}")
string = string.replace("1", f"{self.RED_EMOJI}")
string += " ".join([str(self.numbers_emojis[i]) for i in range(self.WIDTH)])
return string
def disable_buttons(self):
for btn in self.children:
btn.disabled = True
def check_if_valid(self, column) -> bool:
# we only check the first row
if self.board[0][column] == 0:
return True
else:
return False
def get_next_row(self, column) -> int:
row_index = (len(self.board) -1) # if len is 7, max index will be 6
for row in reversed(self.board):
# print(f"row is {row_index} and column is {column}") # board.index breaks this
if row[column] == 0:
return row_index
row_index -= 1
def insert_to(self, row: int, column: int, colour: str):
if colour == "red":
self.board[row][column] = 1
self.current_player = self.yellow
if colour == "yellow":
self.board[row][column] = -1
self.current_player = self.red
def check_for_winning(self):
# check horizontal
for row in range(self.WIDTH):
for column in range(self.HEIGHT -3): # if len= 7, range will be 0-3
to_be_added = [self.board[row][column], self.board[row][column +1], self.board[row][column +2], self.board[row][column +3]]
if sum(to_be_added) == 4: #4 is for red
self.winner = self.red
self.disable_buttons()
self.stop()
return
elif sum(to_be_added) == -4: #-4 is for yellow
self.winner = self.yellow
self.disable_buttons()
self.stop()
return
# check vertical
for row in range(self.WIDTH -3):
for column in range(self.HEIGHT): # row is int here
to_be_added = [self.board[row][column], self.board[row+1][column], self.board[row+2][column], self.board[row+3][column]]
if sum(to_be_added) == 4: #4 is for red
self.winner = self.red
self.disable_buttons()
self.stop()
return
elif sum(to_be_added) == -4: #-4 is for yellow
self.winner = self.yellow
self.disable_buttons()
self.stop()
return
# check diagonals \\\
for column in range(self.HEIGHT -3):
for row in range(self.WIDTH -3):
to_be_added = [self.board[row][column], self.board[row+1][column+1], self.board[row+2][column+2], self.board[row+3][column+3]]
if sum(to_be_added) == 4: #4 is for red
self.winner = self.red
self.disable_buttons()
self.stop()
return
elif sum(to_be_added) == -4: #-4 is for yellow
self.winner = self.yellow
self.disable_buttons()
self.stop()
return
# check diagonals ///
for column in range(self.WIDTH - 3):
for row in range(3, self.HEIGHT):
to_be_added = [self.board[row][column], self.board[row-1][column+1], self.board[row-2][column+2], self.board[row-3][column+3]]
if sum(to_be_added) == 4: #4 is for red
self.winner = self.red
self.disable_buttons()
self.stop()
return
elif sum(to_be_added) == -4: #-4 is for yellow
self.winner = self.yellow
self.disable_buttons()
self.stop()
return
# we need to see if its a draw and all columns are filled
if all(i != 0 for row in self.board for i in row):
self.match_is_draw = True
self.disable_buttons()
self.stop()
return
def prepare_edit(self):
if self.match_is_draw is True:
self.embed.title = "It is a tie!"
elif self.winner is None:
self.embed.title = f"It is {self.current_player}'s turn."
elif self.winner is not None:
self.embed.title = f"*{self.winner}* won!"
self.embed.colour = self.update_embed_colour()
self.embed.description = self.convert_board_to_str()
for column in range(self.WIDTH):
check = self.check_if_valid(column)
if check is False:
button = self.children[column]
button.disabled = True
return [self.embed, self]
#==================================
| 2.90625 | 3 |
keras-bert-poetry-generator/model.py | ganfanhang/DeepLearningExamples | 274 | 12761823 | <filename>keras-bert-poetry-generator/model.py
# -*- coding: utf-8 -*-
# @File : model.py
# @Author : AaronJny
# @Time : 2019/12/25
# @Desc :
from bert4keras.models import build_transformer_model
import tensorflow as tf
from dataset import keep_words
import settings
model = build_transformer_model(settings.CONFIG_PATH, settings.CHECKPOINT_PATH, application='lm', keep_tokens=keep_words)
model.summary()
# loss fun,交叉熵
# 输入的数据,从第二个字符开始,可以作为正确的目标结果(输入是没有经过one-hot编码的)
y_true = model.input[0][:, 1:]
# 目标mask
y_mask = model.get_layer('Embedding-Token').output_mask[:, 1:]
y_mask = tf.cast(y_mask, tf.float32)
# 预测结果,到倒数第二个(包括)时结束
y_pred = model.output[:, :-1]
cross_entropy = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
cross_entropy = tf.reduce_sum(cross_entropy * y_mask) / tf.reduce_sum(y_mask)
model.add_loss(cross_entropy)
model.compile(tf.keras.optimizers.Adam(1e-5))
| 2.1875 | 2 |
scripts/to_jsonl.py | mengxis/KPTimes | 24 | 12761824 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import json
import codecs
import fnmatch
import logging
import itertools
import bs4
from tqdm import tqdm
def recursive_iglob(rootdir='.', pattern='*'):
"""Recursive version of iglob.
Taken from https://gist.github.com/whophil/2a999bcaf0ebfbd6e5c0d213fb38f489
"""
for root, dirnames, filenames in os.walk(rootdir):
for filename in fnmatch.filter(filenames, pattern):
yield os.path.join(root, filename)
def fix_unclosed(tag_name, html):
return re.sub(r'(<{}.*[^/-])>'.format(tag_name), r'\1 />', html)
def convert_jptimes(content):
content = fix_unclosed('meta', content)
content = fix_unclosed('link', content)
doc = bs4.BeautifulSoup(content, 'html.parser')
file_name_components = input_file.split('/')
date = '/'.join(file_name_components[2:5])
categories = file_name_components[5:-1]
file_name = file_name_components[-1]
url = 'http://' + input_file
author = doc.find('meta', attrs={'name': 'author'})['content']
# Extracting title
title = doc.find('meta', property='og:title')
if not title:
logging.error('no title for {}'.format(input_file))
print(doc.find_all('meta'))
input()
return
title = re.sub(r'\s+', ' ', title['content']).strip()
title = re.sub(r'\| The Japan Times', '', title)
if not len(title):
logging.error('no title for {}'.format(input_file))
return
# Extracting headline
headline = doc.find('meta', property='og:description')
if not headline:
logging.error('no headline for {}'.format(input_file))
return
headline = re.sub(r'\s+', ' ', headline['content']).strip()
if not len(headline):
logging.error('no headline for {}'.format(input_file))
return
# Extracting article content
body = doc.find('div', attrs={'id': 'jtarticle'})
if not body:
logging.error('no body for {}'.format(input_file))
return
body = re.sub(r'\s+', ' ', body.get_text(separator=' ')).strip()
if not len(body):
logging.error('no body for {}'.format(input_file))
return
# Extracting keywords
keywords = doc.find('meta', attrs={'name': 'keywords'})
if keywords is None:
logging.error('no keywords for {}'.format(input_file))
return
keywords = re.sub(r'\s+', ' ', keywords['content']).strip()
keywords = keywords.split(', ')
# remove empty keywords
keywords = [k.split(';') for k in keywords if k]
if not keywords:
logging.error('no keywords for {}'.format(input_file))
return
return {
'title': title, 'headline': '', 'abstract': body,
'keyword': keywords, 'file_name': file_name,
'date': date, 'categories': categories, 'url': url,
'author': author
}
def convert_nytimes(content):
doc = bs4.BeautifulSoup(content, 'html.parser')
file_name_components = input_file.split('/')
date = '/'.join(file_name_components[1:4])
categories = file_name_components[4:-1]
file_name = '.'.join(file_name_components[-1].split('.')[:-1])
url = 'http://' + input_file
# Removing script and style tags
for script in doc(['script', 'style', 'link', 'button']):
script.decompose() # rip it out
try:
# Before 2013
author = doc.find('meta', attrs={'name': 'author'})['content']
except TypeError:
# After 2013
author = doc.find('meta', attrs={'name': 'byl'})['content']
author = author.replace('By ', '')
# Extracting title
title = doc.find('meta', property='og:title')
if not title:
logging.error('no title for {}'.format(input_file))
return
title = re.sub(r'\s+', ' ', title['content']).strip()
if not len(title):
logging.error('no title for {}'.format(input_file))
return
# Extracting headline
headline = doc.find('meta', property='og:description')
if not headline:
logging.error('no headline for {}'.format(input_file))
return
headline = re.sub(r'\s+', ' ', headline['content']).strip()
if not len(headline):
logging.error('no headline for {}'.format(input_file))
return
# Extracting article content
body = doc.find('section', attrs={'name': 'articleBody'})
if not body:
body = doc.find_all('p', attrs={'class': 'story-body-text story-content'})
if not body:
logging.error('no body for {}'.format(input_file))
return
else:
body = ' '.join([re.sub(r'\s+', ' ', p.get_text(separator=' ')).strip() for p in body])
else:
body = re.sub(r'\s+', ' ', body.get_text(separator=' ')).strip()
if not len(body):
logging.error('no body for {}'.format(input_file))
return
# Extracting keywords
keywords = doc.find('meta', attrs={'name': 'news_keywords'})
if keywords is None:
keywords = doc.find('meta', attrs={'name': 'keywords'})
if not keywords:
logging.error('no keywords for {}'.format(input_file))
return
keywords = re.sub(r'\s+', ' ', keywords['content']).strip()
keywords = keywords.split(',')
# remove empty keywords
keywords = [k.split(';') for k in keywords if k]
if not keywords:
logging.error('no keywords for {}'.format(input_file))
return
return {
'title': title, 'headline': headline, 'abstract': body,
'keyword': keywords, 'file_name': file_name,
'date': date, 'categories': categories, 'url': url,
'author': author
}
if __name__ == '__main__':
import argparse
def arguments():
parser = argparse.ArgumentParser(description='Converts html files to jsonl using a filelist')
parser.add_argument(
'-f', '--filelist', type=argparse.FileType('r'),
help='Filelist file. If not given convert every found '
'file into `dataset.jsonl` without id')
args = parser.parse_args()
return args
args = arguments()
logging.basicConfig(level=logging.INFO)
logging.info('start converting...')
articles_processed = 0
output_file = '..' + os.sep + 'dataset.jsonl'
jptimes_dir = 'www.japantimes.co.jp/'
nytimes_dir = 'www.nytimes.co.jp/'
if args.filelist:
files = [l.strip().split('\t') for l in args.filelist]
args.filelist.close()
output_file = '..' + os.sep + args.filelist.name.replace('url.filelist', 'jsonl')
else:
files = itertools.chain(
recursive_iglob(rootdir=jptimes_dir, pattern='[!.]*'),
recursive_iglob(rootdir=nytimes_dir, pattern='*.html')
)
with codecs.open(output_file, 'w', 'utf-8') as f:
for input_file in tqdm(files):
if args.filelist:
id_, input_file = input_file
input_file = input_file.replace('http://', '')
if not os.path.isfile(input_file):
continue
# Loading soup
with open(input_file) as g:
content = g.read()
if 'nytimes' in input_file:
res = convert_nytimes(content)
elif 'japantimes' in input_file:
res = convert_jptimes(content)
else:
logging.error('Unrecognised file type : {}'.format(
input_file))
if not res:
continue
if args.filelist:
res['id'] = id_
f.write(json.dumps(res) + '\n')
articles_processed += 1
logging.info('Converted {} articles'.format(articles_processed))
if args.filelist and articles_processed != len(files):
logging.info(
'There are {} missing articles. Please (re)try downloading '
'articles using download script'.format(len(files) - articles_processed)
)
| 2.40625 | 2 |
news_collector/collector/apps.py | orehush/channels-examples | 1,311 | 12761825 | from django.apps import AppConfig
class CollectorConfig(AppConfig):
name = 'collector'
| 1.132813 | 1 |
create_trainingset_and_classifier/training_data_output_offline_model/large_pixels.py | ganzri/Tracking-Pixels | 0 | 12761826 | <filename>create_trainingset_and_classifier/training_data_output_offline_model/large_pixels.py
# Copyright (C) 2022 <NAME>, ETH Zürich, Information Security Group
# Released under the MIT License
"""
To answer (partially) the question whether there are large pixels (larger than 1x1).
counts how many samples are matched to a consent declaration and larger than 1x1, specifically also counts the pinkgellac ones as these stem from missmatching (see thesis, 5.3.1), counts some common 2x2 pixels.
"""
import json
from typing import Dict, Any, List
from urllib import parse
#from docopt import docopt
def main() -> None:
#argv = None
#cargs = docopt(__doc__, argv=argv)
#mode: str = cargs["<mode>"]
#to_query: str = cargs["<query>"]
in_pixels: Dict[str, Dict[str, Any]] = dict()
with open('10_12_2021_filter_lists.json') as fd:
in_pixels = json.load(fd)
print(f"Nr of samples loaded: {len(in_pixels)}")
count1 = 0
count2 = 0
count3 = 0
pink_count = 0
adobe = 0
o7 = 0
for key in in_pixels:
sample = in_pixels[key]
if sample["matched"] == 1 and (sample["img_size"][0] > 1 or sample["img_size"][1] > 1):
#print(sample)
count1 += 1
if "pinkgellac.nl" in sample["url"]:
pink_count += 1
elif sample["matched"] == 0 and (sample["blocked"][0] == 1 or sample["blocked"][1] == 1) and (sample["img_size"][0] == 2 and sample["img_size"][1] == 2):
url = sample["url"]
if "omtrdc.net" in url:
adobe += 1
elif "2o7.net" in url:
o7 += 1
#else:
#print(sample)
count2 += 1
elif sample["matched"] == 0 and (sample["img_size"][0] == 1 and sample["img_size"][1] == 1) and (sample["blocked"][0] == 0 and sample["blocked"][1] == 0):
print(sample)
count3 += 1
print(f"matched large images: {count1}")
print(f"pinkgellac.nl count: {pink_count}")
print(f"unmatched, blocked 2x2 images: {count2}")
print(f"adobe: {adobe}, 07: {o7}")
print(F"unmatched 1x1 not blocked: {count3}")
if __name__ == "__main__":
exit(main())
| 3.1875 | 3 |
office365/sharepoint/social/socialRestActor.py | wreiner/Office365-REST-Python-Client | 544 | 12761827 | <filename>office365/sharepoint/social/socialRestActor.py<gh_stars>100-1000
from office365.runtime.client_object import ClientObject
class SocialRestActor(ClientObject):
pass
| 1.296875 | 1 |
recipes-examples/aikit-docker-images/aikit-docker-images/aikit-docker-images.py | denix0/meta-yogurt | 6 | 12761828 | #!/usr/bin/env python3
# Copyright (C) 2019 <NAME> <<EMAIL>>
# Released under the MIT license (see COPYING.MIT for the terms)
import argparse
import subprocess
import sys
CONTAINER_MODEL = 'model'
CONTAINER_DEMO = 'demo'
IMAGE_MODEL = 'phytecorg/aidemo-customvision-model:0.4.1'
IMAGE_DEMO = 'phytecorg/aidemo-customvision-demo:0.5.0'
NETWORK = 'aikit'
def stop_containers():
process_ps = subprocess.run(['docker', 'ps', '--format={{.Names}}'],
check=True, stdout=subprocess.PIPE)
containers = process_ps.stdout.decode('utf-8').split('\n')
if CONTAINER_MODEL in containers:
subprocess.run(['docker', 'stop', CONTAINER_MODEL], check=True)
if CONTAINER_DEMO in containers:
subprocess.run(['docker', 'stop', CONTAINER_DEMO], check=True)
def list_networks():
process_list = subprocess.run(['docker', 'network', 'ls',
'--format={{.Name}}'], check=True, stdout=subprocess.PIPE)
return process_list.stdout.decode('utf-8').split('\n')
def create_network():
if NETWORK not in list_networks():
subprocess.run(['docker', 'network', 'create', NETWORK], check=True)
def remove_network():
if NETWORK in list_networks():
subprocess.run(['docker', 'network', 'rm', NETWORK], check=True)
def run_containers():
subprocess.run(['docker', 'run',
'--rm',
'--name', CONTAINER_MODEL,
'--network', NETWORK,
'-p', '8877:8877',
'-d', IMAGE_MODEL,
'--port', '8877', 'hands'], check=True)
subprocess.run(['docker', 'run',
'--rm',
'--privileged',
'--name', CONTAINER_DEMO,
'--network', NETWORK,
'--device', '/dev/video0',
'-e', 'QT_QPA_PLATFORM=wayland',
'-e', 'QT_WAYLAND_FORCE_DPI=192',
'-e', 'QT_WAYLAND_DISABLE_WINDOWDECORATION=1',
'-e', 'XDG_RUNTIME_DIR=/run/user/0',
'-v', '/run/user/0:/run/user/0',
'-d', IMAGE_DEMO, '/bin/bash', '-c',
'weston-start && sleep 1 && aidemo-customvision-demo -x'], check=True)
def start(args):
stop_containers()
create_network()
run_containers()
return 0
def stop(args):
stop_containers()
remove_network()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convenience runner for '
'starting and stopping Docker images for the AI kit')
subparsers = parser.add_subparsers()
subparser_start = subparsers.add_parser('start')
subparser_start.set_defaults(function=start)
subparser_stop = subparsers.add_parser('stop')
subparser_stop.set_defaults(function=stop)
args = parser.parse_args()
sys.exit(args.function(args))
| 2.3125 | 2 |
place/migrations/0001_initial.py | Odreystella/My_Place_Record_Project2 | 0 | 12761829 | # Generated by Django 3.2.4 on 2021-06-13 15:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True)),
],
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.TextField(default=1623599319.0517447)),
('updated_at', models.TextField(blank=True, null=True)),
('is_deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=64)),
('location', models.CharField(max_length=64)),
('memo', models.TextField()),
('best_menu', models.CharField(max_length=64)),
('additional_info', models.CharField(max_length=200)),
('stars', models.DecimalField(blank=True, decimal_places=1, max_digits=2, null=True)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='place', to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='place', to='place.category')),
('like_comments', models.ManyToManyField(blank=True, related_name='like_place', to=settings.AUTH_USER_MODEL)),
('tags', models.ManyToManyField(blank=True, related_name='place', to='place.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, default='default.png', null=True, upload_to='images/')),
('place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='photo', to='place.place')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.TextField(default=1623599319.0517447)),
('updated_at', models.TextField(blank=True, null=True)),
('is_deleted', models.BooleanField(default=False)),
('content', models.TextField()),
('commenter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='comment', to=settings.AUTH_USER_MODEL)),
('place', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='place.place')),
],
options={
'abstract': False,
},
),
]
| 1.84375 | 2 |
src/package/package.py | buckler-project/armoury | 1 | 12761830 | <filename>src/package/package.py<gh_stars>1-10
import os
from abc import *
import yaml
from utils import setting as _setting
from utils import cmd as _cmd
class Package:
def __init__(self, url, name, auther):
self.url = url
self.name = name
self.auther = auther
self.parent_path = ''
self.config_path = ''
def get_name(self):
return f'{self.auther}/{self.name}'
def get_path(self):
return f'{self.parent_path}/{self.auther}/{self.name}'
def get_config_path(self):
return f'{self.parent_path}/{self.auther}/{self.name}/{self.config_path}'
class PackageFactory:
def __init__(self):
self.parent_path = ''
self.config_path = ''
def generate(self, url, name, auther):
package = self._generate(url=url, name=name, auther=auther)
if not os.path.isdir(package.get_path()):
return package
path = package.get_config_path()
with open(path) as f:
package.config = yaml.load(f)
return package
def generate_from_name(self, name):
name = name.split('/')
return self.generate_from_directory(auther=name[0], name=name[1])
def generate_from_directory(self, auther, name):
if os.path.isdir(f'{self.parent_path}/{auther}/{name}'):
cmd = f'''cd {self.parent_path}/{auther}/{name}/ \\
&& git config --get remote.origin.url
'''
url = _cmd.run_cmd(cmd, subprocess=True, output=False)
else:
url = f'{_setting.url}{auther}/{name}'
return self.generate(url=url, name=name, auther=auther)
def generate_from_url(self, url):
if url[-1] == '/':
url = url[:-1]
list = url.split('/')
return self.generate(url=url, name=list[-1], auther=list[-2])
def generate_from_package(self, package):
return self.generate(url=package.url, name=package.name, auther=package.auther)
def _generate(self, url, name, auther):
#raise NotImplementedError()
return Package(url=url, name=name, auther=auther)
| 2.484375 | 2 |
fusion_train_256_ahdr.py | rajat95/AHDR-Net-Tensorflow | 6 | 12761831 |
from __future__ import absolute_import
from __future__ import print_function
import os,time,cv2,sys,math
import tensorflow as tf
import numpy as np
import time, datetime
import argparse
import random
import os, sys
import subprocess
from utils import utils, helpers
from builders import fusion_model_builder
import datetime
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def radiance_writer(out_path, image):
with open(out_path, "wb") as f:
f.write(bytes("#?RADIANCE\n# Made with Python & Numpy\nFORMAT=32-bit_rle_rgbe\n\n",'UTF-8'))
f.write(bytes("-Y %d +X %d\n" %(image.shape[0], image.shape[1]),'UTF-8'))
brightest = np.max(image,axis=2)
mantissa = np.zeros_like(brightest)
exponent = np.zeros_like(brightest)
np.frexp(brightest, mantissa, exponent)
scaled_mantissa = mantissa * 255.0 / brightest
rgbe = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
rgbe[...,0:3] = np.around(image[...,0:3] * scaled_mantissa[...,None])
rgbe[...,3] = np.around(exponent + 128)
rgbe.flatten().tofile(f)
def compute_psnr(img1, img2):
mse = np.mean((img1-img2)**2)
if mse == 0:
return 100
PIXEL_MAX = 1.0 # input -1~1
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def log_tonemap(im):
return tf.log(1+5000*im)/tf.log(1+5000.0)
def log_tonemap_output(im):
return np.log(1+5000*im)/np.log(1+5000.0)
parser = argparse.ArgumentParser()
parser.add_argument('--nTry', type=int, default=None, help='Current try number')
parser.add_argument('--num_epochs', type=int, default=100, help='Number of epochs to train for')
parser.add_argument('--id_str', type=str, default="", help='Unique ID string to identify current try')
parser.add_argument('--status_id', type=int, default=1, help='Status ID to write to status.txt. Can be 1, 2 or 3')
parser.add_argument('--epoch_start_i', type=int, default=0, help='Start counting epochs from this number')
parser.add_argument('--checkpoint_step', type=int, default=1, help='How often to save checkpoints (epochs)')
parser.add_argument('--validation_step', type=int, default=1, help='How often to perform validation (epochs)')
parser.add_argument('--image', type=str, default=None, help='The image you want to predict on. Only valid in "predict" mode.')
parser.add_argument('--continue_training', type=str2bool, default=False, help='Whether to continue training from a checkpoint')
parser.add_argument('--dataset', type=str, default="hdr_ddg_dataset_ulti_13thJuly", help='Dataset you are using.')
parser.add_argument('--crop_height', type=int, default=256, help='Height of cropped input image to network')
parser.add_argument('--crop_width', type=int, default=256, help='Width of cropped input image to network')
parser.add_argument('--batch_size', type=int, default=16, help='Number of images in each batch')
parser.add_argument('--num_val_images', type=int, default=100000, help='The number of images to used for validations')
parser.add_argument('--model', type=str, default="DRIB_4_four_conv", help='The model you are using. See model_builder.py for supported models')
parser.add_argument('--frontend', type=str, default="ResNet101", help='The frontend you are using. See frontend_builder.py for supported models')
parser.add_argument('--save_logs', type=str2bool, default=True, help='Whether to save training info to the corresponding logs txt file')
parser.add_argument('--log_interval', type=int, default=100, help='Log Interval')
parser.add_argument('--init_lr', type=float, default=0.0002, help='Initial learning rate')
parser.add_argument('--lr_decay', type=float, default=0.94, help='Initial learning rate')
parser.add_argument('--loss', type=str, default='l2', help='Choose between l2 or l1 norm as a loss function')
parser.add_argument('--logdir', type=str, default='/workspace/logs', help='Choose between l2 or l1 norm as a loss function')
parser.add_argument('--crop_pixels_height',type=int,default=10,help='Location of input image')
args = parser.parse_args()
try_name = "Try%d_%s_%s"%(args.nTry,args.model,args.id_str)
if not os.path.isdir(try_name):
os.makedirs(try_name)
if args.save_logs:
if args.continue_training:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
status = open("status%d.txt"%(args.status_id),'a')
else:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'w')
status = open("status%d.txt"%(args.status_id),'w')
config = tf.ConfigProto()
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
if not os.path.exists(os.path.join(args.logdir,try_name,'train')):
os.makedirs(os.path.join(args.logdir,try_name,'train'),exist_ok=True)
if not os.path.exists(os.path.join(args.logdir,try_name,'test')):
os.makedirs(os.path.join(args.logdir,try_name,'test'),exist_ok=True)
train_writer = tf.summary.FileWriter('{}/{}/train'.format(args.logdir,try_name))
test_writer = tf.summary.FileWriter('{}/{}/test'.format(args.logdir,try_name))
train_loss_pl = tf.placeholder(tf.float32,shape=None)
train_loss_summary =tf.summary.scalar('train_loss',train_loss_pl)
test_loss_pl = tf.placeholder(tf.float32,shape=None)
test_loss_summary =tf.summary.scalar('test_loss',test_loss_pl)
test_psnr_pl = tf.placeholder(tf.float32,shape=None)
test_psnr_summary =tf.summary.scalar('val_psnr',test_psnr_pl)
le_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
me_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
he_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
gt_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
le_image_summ = tf.summary.image('le images',le_image_pl,max_outputs=args.batch_size)
me_image_summ = tf.summary.image('me images',me_image_pl,max_outputs=args.batch_size)
he_image_summ = tf.summary.image('he images',he_image_pl,max_outputs=args.batch_size)
gt_image_summ = tf.summary.image('gt images',gt_image_pl,max_outputs=args.batch_size)
input_exposure_stacks = [tf.placeholder(tf.float32,shape=[None,None,None,6]) for x in range(3)]
gt_exposure_stack = tf.placeholder(tf.float32,shape=[None,None,None,3])
lr = tf.placeholder("float", shape=[])
network, init_fn = fusion_model_builder.build_model(model_name=args.model, frontend=args.frontend, input_exposure_stack=input_exposure_stacks, crop_width=args.crop_width, crop_height=args.crop_height, is_training=True)
str_params = utils.count_params()
print(str_params)
if args.save_logs:
log_file.write(str_params + "\n")
if args.loss == 'l2':
loss = tf.losses.mean_squared_error(log_tonemap(gt_exposure_stack), log_tonemap(network))
elif args.loss == 'l1':
loss = tf.losses.absolute_difference(log_tonemap(gt_exposure_stack), log_tonemap(network))
opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, var_list=[var for var in tf.trainable_variables()])
saver=tf.train.Saver(max_to_keep=1000)
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
# Load a previous checkpoint if desired
model_checkpoint_name = "%s/ckpts/latest_model_"%(try_name) + args.model + "_" + args.dataset + ".ckpt"
if args.continue_training:
print('Loading latest model checkpoint')
saver.restore(sess, model_checkpoint_name)
print('Loaded latest model checkpoint')
print("\n***** Begin training *****")
print("Try -->", args.nTry)
print("Dataset -->", args.dataset)
print("Model -->", args.model)
print("Crop Height -->", args.crop_height)
print("Crop Width -->", args.crop_width)
print("Num Epochs -->", args.num_epochs)
print("Batch Size -->", args.batch_size)
print("Save Logs -->", args.save_logs)
avg_loss_per_epoch = []
avg_val_loss_per_epoch = []
avg_psnr_per_epoch = []
if args.save_logs:
log_file.write("\nDataset --> " + args.dataset)
log_file.write("\nModel --> " + args.model)
log_file.write("\nCrop Height -->" + str(args.crop_height))
log_file.write("\nCrop Width -->" + str(args.crop_width))
log_file.write("\nNum Epochs -->" + str(args.num_epochs))
log_file.write("\nBatch Size -->" + str(args.batch_size))
log_file.close()
status.write("\nDataset --> " + args.dataset)
status.write("\nModel --> " + args.model)
status.write("\nCrop Height -->" + str(args.crop_height))
status.write("\nCrop Width -->" + str(args.crop_width))
status.write("\nNum Epochs -->" + str(args.num_epochs))
status.write("\nBatch Size -->" + str(args.batch_size))
status.close()
# Load the data
print("Loading the data ...")
# ["he_at_me", "le_at_me", "me_at_he", "me_at_le", "he", "le", "me"]
exposure_keys_train = ["he", "le", "me"]
exposure_keys_train_labels = ["hdr"]
exposure_keys_val = ["he", "le", "me"]
exposure_keys_val_labels = ["hdr"]
multiexposure_train_names = utils.prepare_data_multiexposure("%s/train_256"%(args.dataset), exposure_keys_train)
multiexposure_train_label_names = utils.prepare_data_multiexposure("%s/train_labels_256"%(args.dataset), exposure_keys_train_labels)
multiexposure_val_names = utils.prepare_data_multiexposure("%s/val"%(args.dataset), exposure_keys_val)
multiexposure_val_label_names = utils.prepare_data_multiexposure("%s/val_labels"%(args.dataset), exposure_keys_val_labels)
train_input_names_he, train_input_names_le, train_input_names_me = multiexposure_train_names[0], multiexposure_train_names[1], multiexposure_train_names[2]
train_output_names_hdr = multiexposure_train_label_names[0]
val_input_names_he, val_input_names_le, val_input_names_me = multiexposure_val_names[0], multiexposure_val_names[1], multiexposure_val_names[2]
val_output_names_hdr = multiexposure_val_label_names[0]
# Which validation images do we want
val_indices = []
num_vals = min(args.num_val_images, len(val_input_names_he))
# Set random seed to make sure models are validated on the same validation images.
# So you can compare the results of different models more intuitively.
random.seed(16)
val_indices=random.sample(range(0,len(val_input_names_he)),num_vals)
learning_rates = []
lr_decay_step = 1
small_loss_bin = []
train_step =0
val_step = 0
# Do the training here
for epoch in range(args.epoch_start_i, args.num_epochs):
learning_rate = args.init_lr*(float)(args.lr_decay)**(float)(epoch)
learning_rates.append(learning_rate)
print("\nLearning rate for epoch # %04d = %f\n"%(epoch, learning_rate))
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write("\nLearning rate for epoch " + str(epoch) + " = " + str(learning_rate) + "\n")
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write("\nLearning rate for epoch " + str(epoch) + " = " + str(learning_rate) + "\n")
status.close()
current_losses = []
current_losses_val = []
cnt=0
# Equivalent to shuffling
id_list = np.random.permutation(len(train_input_names_he))
num_iters = int(np.floor(len(id_list) / args.batch_size))
st = time.time()
epoch_st=time.time()
for i in range(num_iters):
input_image_le_batch = []
input_image_me_batch = []
input_image_he_batch = []
output_image_batch = []
# Collect a batch of images
for j in range(args.batch_size):
index = i*args.batch_size + j
id = id_list[index]
cv2_image_train_he = cv2.imread(train_input_names_he[id],-1)
input_image_he = np.float32(cv2.cvtColor(cv2_image_train_he,cv2.COLOR_BGR2RGB)) / 65535.0
input_image_he_gamma,_,_ = utils.ldr_to_hdr_train(input_image_he,train_input_names_he[id])
input_image_he_c = np.concatenate([input_image_he,input_image_he_gamma],axis=2)
cv2_image_train_me = cv2.imread(train_input_names_me[id],-1)
input_image_me = np.float32(cv2.cvtColor(cv2_image_train_me,cv2.COLOR_BGR2RGB)) / 65535.0
input_image_me_gamma,_,_ = utils.ldr_to_hdr_train(input_image_me,train_input_names_me[id])
input_image_me_c = np.concatenate([input_image_me,input_image_me_gamma],axis=2)
cv2_image_train_le = cv2.imread(train_input_names_le[id],-1)
input_image_le = np.float32(cv2.cvtColor(cv2_image_train_le,cv2.COLOR_BGR2RGB)) / 65535.0
input_image_le_gamma,_,_ = utils.ldr_to_hdr_train(input_image_le,train_input_names_le[id])
input_image_le_c = np.concatenate([input_image_le,input_image_le_gamma],axis=2)
output_image = cv2.cvtColor(cv2.imread(train_output_names_hdr[id],-1),cv2.COLOR_BGR2RGB)
input_image_le_batch.append(np.expand_dims(input_image_le_c, axis=0))
input_image_me_batch.append(np.expand_dims(input_image_me_c, axis=0))
input_image_he_batch.append(np.expand_dims(input_image_he_c, axis=0))
output_image_batch.append(np.expand_dims(output_image, axis=0))
input_image_le_batch = np.squeeze(np.stack(input_image_le_batch, axis=1))
input_image_me_batch = np.squeeze(np.stack(input_image_me_batch, axis=1))
input_image_he_batch = np.squeeze(np.stack(input_image_he_batch, axis=1))
output_image_batch = np.squeeze(np.stack(output_image_batch, axis=1))
train_writer.add_summary(sess.run(le_image_summ,feed_dict={le_image_pl:input_image_le_batch[...,:3]}),i)
train_writer.add_summary(sess.run(me_image_summ,feed_dict={me_image_pl:input_image_me_batch[...,:3]}),i)
train_writer.add_summary(sess.run(he_image_summ,feed_dict={he_image_pl:input_image_he_batch[...,:3]}),i)
train_writer.add_summary(sess.run(gt_image_summ,feed_dict={gt_image_pl:output_image_batch[...,:3]}),i)
# Do the training here
_,current_loss=sess.run([opt,loss],feed_dict={input_exposure_stacks[0]:input_image_le_batch,input_exposure_stacks[1]:input_image_me_batch,input_exposure_stacks[2]:input_image_he_batch, gt_exposure_stack:output_image_batch, lr:learning_rate})
current_losses.append(current_loss)
small_loss_bin.append(current_loss)
cnt = cnt + args.batch_size
if cnt % args.log_interval == 0:
small_loss_bin_mean = np.mean(small_loss_bin)
string_print = "Epoch = %d Count = %d Current_Loss = %.4f Time = %.2f "%(epoch, cnt, small_loss_bin_mean, time.time()-st)
small_loss_bin = []
train_str = utils.LOG(string_print)
print(train_str)
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write(train_str + "\n")
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write(train_str + "\n")
status.close()
st = time.time()
summ = sess.run(train_loss_summary, feed_dict={train_loss_pl:np.mean(current_losses)})
train_writer.add_summary(summ,train_step)
train_step +=1
mean_loss = np.mean(current_losses)
avg_loss_per_epoch.append(mean_loss)
# Create directories if needed
if not os.path.isdir("%s/%s/%04d"%(try_name, "ckpts", epoch)):
os.makedirs("%s/%s/%04d"%(try_name, "ckpts", epoch))
# Save latest checkpoint to same file name
print("Saving latest checkpoint")
saver.save(sess, model_checkpoint_name)
if val_indices != 0 and epoch % args.checkpoint_step == 0:
print("Saving checkpoint for this epoch")
saver.save(sess, "%s/%s/%04d/model.ckpt"%(try_name, "ckpts", epoch))
print("Average Training loss = ", mean_loss)
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write("\nAverage Training loss = " + str(mean_loss))
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write("\nAverage Training loss = " + str(mean_loss))
status.close()
if epoch % args.validation_step == 0:
print("Performing validation")
if not os.path.isdir("%s/%s/%04d"%(try_name, "val_Imgs", epoch)):
os.makedirs("%s/%s/%04d"%(try_name, "val_Imgs", epoch))
psnr_pre_list = []
psnr_post_list = []
val_idx_count = 0
pred_time_list = []
# Do the validation on a small set of validation images
for ind in val_indices:
print("\rRunning test image %d / %d"%(val_idx_count+1, len(val_input_names_he)))
input_images = []
cv2_img_he = cv2.imread(val_input_names_he[ind],-1)
h,w = cv2_img_he.shape[:2]
input_image_he = np.expand_dims(np.float32(cv2.cvtColor(cv2_img_he,cv2.COLOR_BGR2RGB)),axis=0)/65535.0
input_image_he_gamma,_,_ = utils.ldr_to_hdr_test(input_image_he,val_input_names_he[ind])
input_image_he_c = np.concatenate([input_image_he,input_image_he_gamma],axis=3)
cv2_img_me = cv2.imread(val_input_names_me[ind],-1)
h,w = cv2_img_me.shape[:2]
input_image_me = np.expand_dims(np.float32(cv2.cvtColor(cv2_img_me,cv2.COLOR_BGR2RGB)),axis=0)/65535.0
input_image_me_gamma,_,_ = utils.ldr_to_hdr_test(input_image_me,val_input_names_me[ind])
input_image_me_c = np.concatenate([input_image_me,input_image_me_gamma],axis=3)
cv2_img_le = cv2.imread(val_input_names_le[ind],-1)
h,w = cv2_img_le.shape[:2]
input_image_le = np.expand_dims(np.float32(cv2.cvtColor(cv2_img_le,cv2.COLOR_BGR2RGB)),axis=0)/65535.0
input_image_le_gamma,_,_ = utils.ldr_to_hdr_test(input_image_le,val_input_names_le[ind])
input_image_le_c = np.concatenate([input_image_le,input_image_le_gamma],axis=3)
cv2_img_hdr = cv2.imread(val_output_names_hdr[ind],-1)
h,w = cv2_img_hdr.shape[:2]
gt_hdr = cv2.cvtColor(cv2_img_hdr,cv2.COLOR_BGR2RGB)
gt_hdr = np.expand_dims(np.float32(gt_hdr), axis=0)
pred_st = time.time()
output_image_pred, curr_val_loss = sess.run([network,loss],feed_dict={input_exposure_stacks[0]:input_image_le_c,input_exposure_stacks[1]:input_image_me_c,input_exposure_stacks[2]:input_image_he_c,gt_exposure_stack:gt_hdr})
pred_et = time.time()
pred_time_list.append(pred_et-pred_st)
output_image = np.squeeze(output_image_pred)
gt_hdr = np.squeeze(gt_hdr)
h,w = output_image.shape[:2]
output_image_cropped = output_image[args.crop_pixels_height:h-args.crop_pixels_height,:,:]
gt_hdr_cropped = gt_hdr[args.crop_pixels_height:h-args.crop_pixels_height,:,:]
current_pre_psnr = compute_psnr(output_image_cropped, gt_hdr_cropped)
current_post_psnr = compute_psnr(log_tonemap_output(output_image_cropped), log_tonemap_output(gt_hdr_cropped))
current_losses_val.append(curr_val_loss)
psnr_pre_list.append(current_pre_psnr)
psnr_post_list.append(current_post_psnr)
file_name = utils.filepath_to_name(val_input_names_he[ind])
radiance_writer("%s/%s/%04d/%s_pred.hdr"%(try_name, "val_Imgs", epoch, file_name),output_image)
radiance_writer("%s/%s/%04d/%s_gt.hdr"%(try_name, "val_Imgs", epoch, file_name),gt_hdr)
val_idx_count = val_idx_count+1
mean_val_loss = np.mean(current_losses_val)
merge_summ = tf.summary.merge([test_loss_summary,test_psnr_summary])
merge_summ = sess.run(merge_summ, feed_dict={test_loss_pl:mean_val_loss,test_psnr_pl:np.mean(psnr_post_list)})
test_writer.add_summary(merge_summ,val_step)
val_step+=1
mean_pre_psnr = np.mean(psnr_pre_list)
mean_post_psnr = np.mean(psnr_post_list)
mean_proc_time = np.mean(pred_time_list)
print('val psnr pre list {}\n'.format(psnr_pre_list))
print('val psnr post list {}\n'.format(psnr_post_list))
print("Average Validation loss = %f"%(mean_val_loss))
print("Average PRE-PSNR = %f"%(mean_pre_psnr))
print("Average POST -PSNR = %f"%(mean_post_psnr))
print('Average processing time = %f'%(mean_proc_time))
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write("\nAverage Validation loss = " + str(mean_val_loss)+"\n")
log_file.write("Average PRE-PSNR = %f\n"%(mean_pre_psnr))
log_file.write("Average POST-PSNR = %f\n"%(mean_post_psnr))
log_file.write('Average processing time = %f\n'%(mean_proc_time))
status = open("status%d.txt"%(args.status_id),'a')
status.write("\nAverage Validation loss = " + str(mean_val_loss)+"\n")
status.write("Average PRE-PSNR = %f\n"%(mean_pre_psnr))
status.write("Average POST -PSNR = %f\n"%(mean_post_psnr))
status.write('Average processing time = %f\n'%(mean_proc_time))
status.close()
epoch_time=time.time()-epoch_st
remain_time=epoch_time*(args.num_epochs-1-epoch)
m, s = divmod(remain_time, 60)
h, m = divmod(m, 60)
if s!=0:
train_time="Remaining training time = %d hours %d minutes %d seconds\n"%(h,m,s)
else:
train_time="Remaining training time : Training completed.\n"
str_time = utils.LOG(train_time)
print(str_time)
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write(str_time + "\n")
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write(str_time + "\n")
status.close()
sess.close()
| 2.15625 | 2 |
module.py | damnkk/cycle-GAN | 0 | 12761832 | <reponame>damnkk/cycle-GAN
import tensorflow.compat.v1 as tf
import ops
import utils
from reader import Reader
from gen import Generator
| 1.078125 | 1 |
pyneql/ontology/person.py | Valerie-Hanoka/PyNeQL | 4 | 12761833 | <reponame>Valerie-Hanoka/PyNeQL<filename>pyneql/ontology/person.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
personquerybuilder is part of the project PyNeQL
Author: <NAME>
"""
from pyneql.ontology.thing import Thing
from pyneql.log.loggingsetup import (
setup_logging,
)
from pyneql.utils.enum import (
LanguagesIso6391 as Lang,
)
from pyneql.utils.utils import (
QueryException,
contains_a_date,
merge_two_dicts_in_sets,
normalize_str,
parse_literal_with_language
)
from dateutil.parser import parse as parsedate
from itertools import chain
import six
class Person(Thing):
"""
A semantic representation of a person, retrieved from the Semantic Web.
"""
setup_logging()
# Elements which will be used to construct the query for a Person
has_full_name = None
has_last_name = None
has_first_name = None
has_birth_year = None
has_death_year = None
has_url = None
def __init__(self,
full_name=None, last_name=None, first_name=None,
url=None,
# birth_year=None, death_year=None,
query_language=Lang.DEFAULT,
endpoints=None, # SPARQL endpoints where the query should be sent
class_name=u'Person'
):
if not (full_name or (first_name and last_name) or url): # or birth_year or death_year
raise QueryException("There is not enough information provided to find this person."
" Provide full name information.")
self.has_full_name = normalize_str(full_name) if full_name else None
self.has_last_name = normalize_str(last_name) if last_name else None
self.has_first_name = normalize_str(first_name) if first_name else None
# self.has_birth_year = birth_year
# self.has_death_year = death_year
super(Person, self).__init__(
url=url,
query_language=query_language,
endpoints=endpoints,
class_name=class_name
)
def _get_life_info(self, life_event):
"""For a given information type (i.e death, birth), this function
returns all information that is available in the linked data about the
life event of the person (e.g: date and/or place).
:param life_event: An event of the life of a person (e.g.: birth, death)
:return: a dict of information concerning the given life event
"""
biography_info = {}
already_contains_birth_date = False # True if and only if we already have a full date
for k, v in self.attributes.items():
k = k.lower()
if life_event in k:
all_info = v if isinstance(v, set) else {v}
for info in all_info:
if info.count('-') > 4:
continue
if contains_a_date(info):
if already_contains_birth_date:
continue
try:
biography_info['date'] = parsedate(info)
already_contains_birth_date = 1
except ValueError:
# No available date info to parse
continue
elif 'place' in k:
biography_info = merge_two_dicts_in_sets(
biography_info,
{'place': info})
elif 'name' in k:
biography_info = merge_two_dicts_in_sets(
biography_info,
{'name': info})
elif 'cause' in k or 'manner' in k:
biography_info = merge_two_dicts_in_sets(
biography_info,
{'cause/manner': info})
else:
biography_info = merge_two_dicts_in_sets(
biography_info,
{'other': info})
return biography_info
def get_death_info(self):
"""This function returns all information that is available in the linked data
about the death of the person (e.g: date and/or place).
:return: a dict of information concerning the death of the person
"""
return self._get_life_info('death')
def get_birth_info(self):
"""This function returns all information that is available in the linked data
about the birth of the person (e.g: date and/or place).
:return: a dict of information concerning the birth of the person
"""
return self._get_life_info('birth')
def get_gender(self):
"""This function returns the gender of the person.
We assume that there is only one gender available for a person in the retrieved data.
:return:
- 'F' if the person is labelled as a woman
- 'M' if the person is labelled as a man
- 'MtF', 'FtM', 'intersex' or 'queer' if the person is transgender or genderqueer.
- 'unknown' if the gender information is unavailable."""
genders = {
u'female': u'F',
u'Q6581072': u'F',
u'male': u'M',
u'Q6581097': u'M',
u'Q1052281': u'MtF',
u'Q2449503': u'FtM',
u'Q1097630': u'intersex',
u'genderqueer': u'queer'
}
retrieved_genders = [
parse_literal_with_language(g)
for g
in self.get_attributes_with_keyword('gender').values()
]
for gender, lang in retrieved_genders:
# We take the first declared gender in the iterator (others -if any- are ignored)
g = genders.get(gender[gender.find(':') + 1:], None)
if g is not None:
return g
return u'unknown'
def get_names(self):
"""This function returns all information that is available in the linked data
about the name of the person (e.g: birth name, family name, name in the native language,...).
:return: a dict of information concerning the names of the person.
"""
# Idiomatic elements
# A dirty way to get all the names that does not contain 'name' in their names
idiomatic_name_keys = {
v for v in chain.from_iterable([n for k, n in self.voc_attributes.items() if 'name' in k])
if 'name' not in v
}
unfiltered_names = {
k: v for k, v in six.iteritems(self.attributes)
if 'name' in k or k in idiomatic_name_keys
}
names = {}
for name_type, name in unfiltered_names.items():
if isinstance(name, set):
filtered = [n for n in name if n.count('-') < 5]
if filtered:
names[name_type] = filtered if len(filtered) > 1 else filtered.pop()
else:
if name.count('-') < 5:
names[name_type] = name
return names
def get_external_ids(self):
"""This function returns a curated list of external ids of the Person.
:return: a dict of Person ids such as VIAF, Wikidata, IDREF, ARK,...
"""
ids = {}
same_as = self.attributes.get(u'owl:sameAs')
same_as = same_as if isinstance(same_as, set) else {same_as}
exact_match = self.attributes.get(u'skos:exactMatch')
exact_match = exact_match if isinstance(exact_match, set) else {exact_match}
external_ids = same_as.union(exact_match)
for external_id in external_ids:
if u'ark' in external_id:
ids[u'ark'] = external_id
elif u'viaf' in external_id:
ids[u'viaf'] = external_id
elif u'd-nb.info' in external_id:
ids[u'Deutschen_Nationalbibliothek'] = external_id
elif u'wikidata.org' in external_id:
ids[u'wikidata'] = external_id
elif u'idref' in external_id:
ids[u'idref'] = external_id
return ids
# def get_works(self):
# """This function returns the works of a person.
# Not implemented yet.
# """
# # TODO
# # wdt:P1455
# # http://purl.org/dc/terms/contributor
# raise NotImplementedError
| 2.8125 | 3 |
archived/ip2host.py | Duke-LeTran/practice-and-notes | 0 | 12761834 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 14:43:38 2020
@author: dukel
"""
#%%
import numpy as np
import pandas as pd
import socket
ls = ['192.168.3.11',
'172.16.58.3',
'192.168.127.12',
'172.16.17.32',
'172.16.58.3',
'192.168.127.12',
'172.16.31.10',
'192.168.3.11',
'172.16.58.3',
'172.16.58.3',
'172.16.17.32',
'172.16.58.3',
'172.16.17.32',
'192.168.127.12',
'172.16.17.32',
'192.168.3.11',
'172.16.31.10',
'172.16.58.3',
'172.16.31.10',
'172.16.31.10']
def ip2host(ls_input):
"""
Parameters : list of a ip addreses
----------
Returns : list of tuples, n=2, consisting of the ip and hostname
"""
ls_output = []
for ip in ls_input:
try:
x = socket.gethostbyaddr(ip)
ls_output.append((ip, x[0]))
except Exception as e:
print('Error: ', e)
ls_output.append((ip, None))
return ls_output
def host2ip(ls_input):
ls_output = []
for hostname in ls_input:
try:
x = socket.gethostbyname(hostname)
ls_output.append((hostname, x[0]))
except Exception as e:
print('Error: ', e)
ls_output.append((hostname, None))
return ls_output
ls2 = convert_ip_to_hostname(ls)
#%%
# clean
df = pd.DataFrame(data=ls2, columns=['ip', 'hostname'])
df['hostname'] = df['hostname'].str.replace('.ucdmc.ucdavis.edu','').str.upper() | 2.75 | 3 |
globals.py | bilginfurkan/Anonimce | 2 | 12761835 | report_reasons = [(1, "Spam"), (2, "Çıplaklık veya vahşet içeren paylaşım"), (3, "Yasadışı paylaşım"), (4, "Doxxing (kişinin gerçek hayat bilgilerinin paylaşılması)"), (5, "Hakaret")]
HIDDEN_TOPICS = [ "sanat", "spor", "teknoloji" ]
| 1.351563 | 1 |
workers/command_worker_signed.py | connax-utim/uhost-micropython | 1 | 12761836 | """
The command worker for Signed Tag. Designed to process incoming signed messages from UTIM.
It checks the input data structure (should contain two TLV elements: message and signature)
and verifies elements lengths. In case everything is correct it calls uHost's decrypt()
method and passing there the dev-id of the UTIM which has sent the message,
the message itself and the signature.
In case the signature verification was successful, the Worker builds the package addressed to_Client
and puts it into the outbound queue.
In case the input data structure is corrupted or signature verification failed, the Worker
reports an issue (in debug mode) and discards the message.
"""
import logging
from ..utilities.tag import Tag
from ..utilities.length import Length
class CommandWorkerSignedException(Exception):
"""
Some exception of CommandWorkerSigned class
"""
pass
class CommandWorkerSignedMethodException(Exception):
"""
No Utim method exception of CommandWorkerSigned class
"""
pass
class CommandWorkerSigned(object):
"""
Signed command worker class
"""
def __init__(self, uhost):
"""
Initialization
"""
# Check all necessary methods
methods = [
'decrypt'
]
for method in methods:
if not (hasattr(uhost, method) and callable(getattr(uhost, method))):
raise CommandWorkerSignedMethodException
self.__uhost = uhost # Uhost instance
def process(self, devid, data, outbound_queue):
"""
Run process
"""
logging.debug("Command Worker Signed. Trying to verify signature of %s : %s",
[hex(x) for x in data], [hex(x) for x in data])
tag1 = data[0:1]
length_bytes1 = data[1:3]
length1 = int.from_bytes(length_bytes1, byteorder='big', signed=False)
value1 = data[3:3 + length1]
tag2 = data[3 + length1:4 + length1]
length_bytes2 = data[4 + length1: 6 + length1]
length2 = int.from_bytes(length_bytes2, byteorder='big', signed=False)
value2 = data[6 + length1:6 + length1 + length2]
# Logging
logging.debug('Tag1: %s', str(tag1))
logging.debug('Length1: %d', length1)
logging.debug('Value1: %s', [x for x in value1])
logging.debug('Tag2: %s', str(tag2))
logging.debug('Length2: %d', length2)
logging.debug('Value2: %s', [x for x in value2])
# Check real data length
if (length1 == len(value1) and tag1 == Tag.UCOMMAND.SIGNED and
length2 == len(value2) and tag2 == Tag.UCOMMAND.SIGNATURE and
length2 == Length.UCOMMAND.SIGNATURE):
unsigned_message = self.__uhost.decrypt(devid, value1, value2)
if unsigned_message is not None:
logging.debug("Unsigned message: %s", [x for x in unsigned_message])
packet = ['to_Client/' + devid, unsigned_message]
outbound_queue.put(packet)
else:
logging.debug('Command_worker_signed: failed to decrypt message')
else:
logging.debug('Command_worker_signed: Invalid input data')
| 2.796875 | 3 |
interpies/graphics.py | jobar8/interpies | 27 | 12761837 | <reponame>jobar8/interpies
# -*- coding: utf-8 -*-
"""
Interpies - a libray for the interpretation of gravity and magnetic data.
graphics.py:
Functions for creating and manipulating graphics, colormaps and plots.
@author: <NAME>
Geophysics Labs, 2017
"""
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from skimage import exposure
# import local modules
import interpies
import interpies.colors as icolors
# temporary solution to silence a warning issued by Numpy when called by matplotlib imshow function
warnings.filterwarnings('ignore', category=FutureWarning)
#==============================================================================
# Functions for loading and modifying colormaps
#==============================================================================
def make_colormap(table, name='CustomMap'):
"""
Return a LinearSegmentedColormap. The colormap is also registered with
plt.register_cmap(cmap=my_cmap)
Parameters
----------
table : a sequence of RGB tuples.
Values need to be either floats between 0 and 1, or
integers between 0 and 255.
"""
if np.any(table > 1):
table = table / 255.
cdict = {'red': [], 'green': [], 'blue': []}
N = float(len(table))-1
for i, rgb in enumerate(table):
red, gre, blu = rgb
cdict['red'].append([i/N, red, red])
cdict['green'].append([i/N, gre, gre])
cdict['blue'].append([i/N, blu, blu])
new_cmap = mcolors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=new_cmap)
return new_cmap
def cmap_to_array(cmap, n=256):
"""
Return a nx3 array of RGB values that defines a colormap or generate it from
a colormap object.
Input is colormap name (if recognised) or matplotlib cmap object.
"""
# first assume cmap is a string
if cmap in icolors.datad: # additional colormaps in interpies.colors module
cm_array = np.asarray(icolors.datad[cmap])
elif cmap in cm.cmap_d: # matplotlib colormaps + the new ones (viridis, inferno, etc.)
cmap = cm.cmap_d[cmap]
cm_array = cmap(np.linspace(0, 1, n))[:, :3]
# now assume cmap is a colormap object
else:
try:
cm_array = cmap(np.linspace(0, 1, n))[:, :3] # remove alpha column
except:
raise ValueError('Colormap {} has not been recognised'.format(cmap))
return cm_array
def load_cmap(cmap='geosoft'):
"""
Return a colormap object.
If input is a string, load first the colormap, otherwise return the cmap unchanged.
"""
# first suppose input is the name of the colormap
if cmap in icolors.datad: # one of the additional colormaps in interpies colors module
cm_list = icolors.datad[cmap]
new_cm = mcolors.LinearSegmentedColormap.from_list(cmap, cm_list)
plt.register_cmap(cmap=new_cm)
return new_cm
elif cmap in cm.cmap_d: # matplotlib colormaps + the new ones (viridis, inferno, etc.)
return cm.get_cmap(cmap)
elif isinstance(cmap, mcolors.Colormap):
return cmap
else:
raise ValueError('Colormap {} has not been recognised'.format(cmap))
def plot_cmap(name='geosoft', n=256):
'''
Make a checkerboard plot of the colours in a palette.
Parameters
----------
name : str
Name of the colormap to plot.
n : int, optional
Number of cells to use. Note that the closest power of 2 is actually used
as the plot is a square.
'''
ncols = int(np.sqrt(n))
fig, ax = plt.subplots(figsize=(8, 8))
ax.imshow(np.arange(ncols**2).reshape(ncols, ncols),
cmap=load_cmap(name),
interpolation="nearest", aspect="equal")
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(False)
def equalize_colormap(cmap, data, name='EqualizedMap'):
'''
Re-map a colormap according to a cumulative distribution. This is used to
perform histogram equalization of an image by changing the colormap
instead of the image. *This is not strickly speaking the equalization of the
colormap itself*.
The cdf and bins is calculated from the input image, as if carrying out
the histogram equalization of that image. In effect, the cdf becomes integrated
to the colormap as a mapping function by redistributing the indices of the
input colormap.
Parameters
----------
cmap : string or colormap object
Input colormap to remap.
data : array
Input data
'''
# first retrieve the color table (lists of RGB values) behind the input colormap
cm_array = cmap_to_array(cmap, n=256)
# perform histogram equalization of the data using scikit-image function.
# bins : centers of bins, cdf : values of cumulative distribution function.
cdf, bins = exposure.cumulative_distribution(
data[~np.isnan(data)].flatten(), nbins=256)
# normalize the bins to interval (0,1)
bins_norm = (bins - bins.min())/np.float(bins.max() - bins.min())
# calculate new indices by applying the cdf as a function on the old indices
# which are initially regularly spaced.
old_indices = np.linspace(0, 1, len(cm_array))
new_indices = np.interp(old_indices, cdf, bins_norm)
# make sure indices start with 0 and end with 1
new_indices[0] = 0.0
new_indices[-1] = 1.0
# remap the color table
cdict = {'red': [], 'green': [], 'blue': []}
for i, n in enumerate(new_indices):
red, gre, blu = cm_array[i]
cdict['red'].append([n, red, red])
cdict['green'].append([n, gre, gre])
cdict['blue'].append([n, blu, blu])
# return new colormap
return mcolors.LinearSegmentedColormap(name, cdict)
def clip_colormap(cm_array, data, min_percent=2, max_percent=98, name='ClippedMap'):
'''
Modify the colormap so that the image of the data looks clipped at extreme
values.
Clipping boundaries are specified by percentiles and calculated from the
input data. These boundaries are then "transfered" to the colormap.
'''
# remove NaNs
valid_data = data[~np.isnan(data)]
# calculate boundaries from data
data_min, data_max = np.percentile(valid_data, (min_percent, max_percent))
# calculate corresponding values on a scale from 0 to 1
imin = (data_min - valid_data.min()) / (valid_data.max() - valid_data.min())
imax = (data_max - valid_data.min()) / (valid_data.max() - valid_data.min())
# calculate the number of indices to add to accommodate clipped values
n_new = len(cm_array) / (imax - imin)
n_left = int(imin * n_new)
n_right = int(n_new - imax * n_new)
# calculate new indices
new_indices = np.linspace(0, 1, len(cm_array) + n_left + n_right)
# remap the color table
cdict = {'red': [], 'green': [], 'blue': []}
for i, n in enumerate(new_indices):
if i < n_left:
red, gre, blu = cm_array[0]
cdict['red'].append([n, red, red])
cdict['green'].append([n, gre, gre])
cdict['blue'].append([n, blu, blu])
elif i >= len(cm_array) + n_left:
red, gre, blu = cm_array[-1]
cdict['red'].append([n, red, red])
cdict['green'].append([n, gre, gre])
cdict['blue'].append([n, blu, blu])
else:
red, gre, blu = cm_array[i - n_left]
cdict['red'].append([n, red, red])
cdict['green'].append([n, gre, gre])
cdict['blue'].append([n, blu, blu])
# return new colormap
return mcolors.LinearSegmentedColormap(name, cdict)
def modify_colormap(cmap, data=None, modif='autolevels',
min_percent=2, max_percent=98, brightness=1.0):
'''
Modify a colormap by clipping or rescaling, according to statistics of the
input data or to fixed parameters. Also implement brightness control.
'''
# get the name of the colormap if input is colormap object
if isinstance(cmap, mcolors.Colormap):
cm_name = cmap.name
else:
cm_name = cmap
# retrieve the color table (lists of RGB values) behind the input colormap
cm_array = cmap_to_array(cmap, n=256)
# modify color table
if modif == 'autolevels':
return clip_colormap(cm_array,
data,
min_percent=min_percent,
max_percent=max_percent)
elif modif == 'brightness':
# convert brightness to gamma value
gamma = np.exp(1/brightness - 1)
normed_cm_array = exposure.adjust_gamma(cm_array, gamma=gamma)
else:
normed_cm_array = cm_array
# create new colormap
new_cm = mcolors.LinearSegmentedColormap.from_list(cm_name + '_n', normed_cm_array)
return new_cm
#===============================================================================
# Functions for displaying grid data
#===============================================================================
def stats_boundaries(data, std_range=1, step=1):
'''
Return a list of statistical quantities ordered in increasing order: min, mean, max
and the standard deviation intervals in between.
These are intended to be used for axis ticks in plots.
Parameters
----------
data : array-like
Input data.
std_range : int, optional
Extent of the range from the mean as a multiple of the standard deviation.
step : float, optional
Size of the interval, as a fraction of the standard deviation. Must be <= `nSigma`.
'''
mean = np.nanmean(data)
sigma = np.nanstd(data)
new_ticks = mean + sigma*np.arange(-std_range, std_range+step, step)
# make sure the boundaries don't go over min and max
new_ticks = np.unique(new_ticks.clip(np.nanmin(data), np.nanmax(data)))
return [np.nanmin(data)] + list(new_ticks) + [np.nanmax(data)]
def alpha_blend(rgb, intensity, alpha=0.7):
"""
Combines an RGB image with an intensity map using "alpha" transparent blending.
https://en.wikipedia.org/wiki/Alpha_compositing
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
alpha : float
This controls the transparency of the rgb image. 1.0 is fully opaque
while 0.0 is fully transparent.
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
return alpha*rgb + (1 - alpha)*intensity
def imshow_hs(source, ax=None, cmap='geosoft', cmap_norm='equalize', hs=True,
zf=10, azdeg=45, altdeg=45, dx=1, dy=1, hs_contrast=1.5, cmap_brightness=1.0,
blend_mode='alpha', alpha=0.7, contours=False, colorbar=True,
cb_contours=False, cb_ticks='linear', std_range=1, figsize=(8, 8),
title=None, **kwargs):
'''
Display an array or a grid with optional hillshading and contours.
The data representation is controlled by the colormap and two types
of normalisation can be applied to balance an uneven distribution of values.
Contrary to the standard method available in `plt.imshow`, it is the
colormap, not the data, that is modified. This allows the true distribution
of the data to be displayed on the colorbar. The two options are equalisation
(default) or clipping extremes (autolevels).
Parameters
----------
source : 2D array or interpies grid object
Grid to plot. Arrays with NaNs and masked arrays are supported.
ax : matplotlib Axes instance
This indicates where to make the plot. Create new figure if absent.
cmap : string or colormap object
Colormap or name of the colormap to use to display the array. The default
is 'geosoft' and corresponds to the blue to pink clra colormap from
Geosoft Oasis Montaj.
cmap_norm : string
Type of normalisation of the colormap.
Possible values are:
'equalize' (or 'equalization')
Increases contrast by distributing intensities across all the
possible colours. The distribution is calculated from the
data and applied to the colormap.
'auto' (or 'autolevels')
Stretches the histogram of the colormap so that dark colours become
darker and the bright colours become brighter. The extreme values
are calculated with percentiles: min_percent (defaults to 2%) and
max_percent (defaults to 98%).
'none' or any other value
The colormap is not normalised. The data can still be normalised
in the usual way using the 'norm' keyword argument and a
Normalization instance defined in matplotlib.colors().
hs : boolean
If True, the array is displayed in colours over a grey hillshaded version
of the data.
zf : float
Vertical exaggeration (Z factor) for hillshading.
azdeg : float
The azimuth (0-360, degrees clockwise from North) of the light source.
altdeg : float
The altitude (0-90, degrees up from horizontal) of the light source.
dx : float, optional
cell size in the x direction
dy : float, optional
cell size in the y direction
hs_contrast : float
Increase or decrease the contrast of the hillshade. This is directly
passed to the fraction argument of the matplotlib hillshade function.
cmap_brightness : float
Increase or decrease the brightness of the image by adjusting the
gamma of the colorbar. Default value is 1.0 meaning no effect. Values
greater than 1.0 make the image brighter, less than 1.0 darker.
Useful when the presence of the hillshade makes the result a little
too dark.
blend_mode : {'alpha', 'hsv', 'overlay', 'soft'}
The type of blending used to combine the colormapped data values with the
illumination intensity. Default is 'alpha' and the effect is controlled
by the alpha parameter.
alpha : float
Controls the transparency of the data overlaid over the hillshade.
1.0 is fully opaque while 0.0 is fully transparent.
contours : Boolean or integer
If True, add contours to the map, the number of them being the default value, i.e. 32.
If an integer is given instead, use this value as the number of contours
levels.
colorbar : Boolean
If True, draw a colorbar on the right-hand side of the map. The colorbar
shows the distribution of colors, as modified by the normalization algorithm.
cb_ticks : string
If left as default ('linear') the ticks and labels on the colorbar are
spaced linearly in the standard way. Otherwise (any other keyword, for example
'stats'), the mean and two ticks at + and - std_range*(standard deviation)
are shown instead.
std_range : integer (default is 1)
Extent of the range from the mean as a multiple of the standard deviation.
cb_contours : Boolean
Add lines corresponding to contours values on the colorbar.
figsize: tuple
Dimensions of the figure: width, height in inches.
If not provided, the default is (8, 8).
title: string
String to display as a title above the plot. If the source is a grid
object, the title is taken by default from the name of the grid.
kwargs : other optional arguments
Can be used to pass other arguments to `plt.imshow()`, such as 'origin'
and 'extent', or to the colorbar('shrink'), the title
('fontweight' and 'fontsize'), or the contours ('colors').
Returns
-------
ax : Matplotlib Axes instance.
Notes
-----
This function exploits the hillshading capabilities implemented in
matplotlib.colors.LightSource. A new blending mode is added (alpha compositing,
see https://en.wikipedia.org/wiki/Alpha_compositing).
'''
# get extra information if input data is grid object (grid.grid)
# `extent` is added to the kwargs of the imshow function
if isinstance(source, interpies.Grid):
kwargs['extent'] = source.extent
data = source.data
if title is None:
if source.name != 'Unknown':
title = source.name
else:
data = source.copy()
## Extract keywords - using pop() also removes the key from the dictionary
# keyword for the colorbar
cb_kwargs = dict(shrink=kwargs.pop('shrink', 0.6))
# keywords for the title
title_kwargs = dict(fontweight=kwargs.pop('fontweight', None),
fontsize=kwargs.pop('fontsize', 'large'))
# keyword arguments that can be passed to ls.shade
shade_kwargs = dict(norm=kwargs.get('norm'),
vmin=kwargs.get('vmin'),
vmax=kwargs.get('vmax'))
# keywords for cmap normalisation
min_percent = kwargs.pop('min_percent', 2)
max_percent = kwargs.pop('max_percent', 98)
# keywords for contours
ct_colors = kwargs.pop('ct_colors', 'k')
ct_cmap = kwargs.pop('ct_cmap', None)
# modify colormap if required
if cmap_norm in ['equalize', 'equalise', 'equalization', 'equalisation']:
# equalisation
my_cmap = equalize_colormap(cmap, data)
elif cmap_norm in ['auto', 'autolevels']:
# clip colormap
my_cmap = modify_colormap(cmap, data, modif='autolevels',
min_percent=min_percent, max_percent=max_percent)
else:
# colormap is loaded unchanged from the input name
my_cmap = load_cmap(cmap) # raise error if name is not recognised
# apply brightness control
if cmap_brightness != 1.0:
my_cmap = modify_colormap(my_cmap, modif='brightness', brightness=cmap_brightness)
# create figure or retrieve the one already defined
if ax:
fig = ax.get_figure()
else:
fig, ax = plt.subplots(figsize=figsize)
# convert input data to a masked array
data = np.ma.masked_array(data, np.isnan(data))
# add array to figure with hillshade or not
if hs:
# flip azimuth upside down if grid is also flipped
if 'origin' in kwargs:
if kwargs['origin'] == 'lower':
azdeg = 180 - azdeg
# create light source
ls = mcolors.LightSource(azdeg, altdeg)
# calculate hillshade and combine the colormapped data with the intensity
if alpha == 0:
# special case when only the shaded relief is needed without blending
rgb = ls.hillshade(data, vert_exag=zf, dx=dx, dy=dy, fraction=hs_contrast)
kwargs['cmap'] = 'gray'
elif blend_mode == 'alpha':
# transparency blending
rgb = ls.shade(data, cmap=my_cmap, blend_mode=alpha_blend,
vert_exag=zf, dx=dx, dy=dy,
fraction=hs_contrast, alpha=alpha, **shade_kwargs)
else:
# other blending modes from matplotlib function
rgb = ls.shade(data, cmap=my_cmap, blend_mode=blend_mode,
vert_exag=zf, dx=dx, dy=dy,
fraction=hs_contrast, **shade_kwargs)
# finally plot the array
ax.imshow(rgb, **kwargs)
else:
# display data without hillshading
im = ax.imshow(data, cmap=my_cmap, **kwargs)
# add contours
levels = None
if isinstance(contours, bool):
if contours:
levels = 32
else:
levels = contours
contours = True
if levels is not None:
# remove cmap keyword that might have been added earlier
_ = kwargs.pop('cmap', None)
conts = plt.contour(data, levels, linewidths=0.5,
colors=ct_colors, linestyles='solid',
cmap=ct_cmap, **kwargs)
# add colorbar
if colorbar and alpha != 0:
if hs:
# Use a proxy artist for the colorbar
im = ax.imshow(data, cmap=my_cmap, **kwargs)
im.remove()
# draw colorbar
if cb_ticks == 'linear': # normal equidistant ticks on a linear scale
cb1 = plt.colorbar(im, ax=ax, **cb_kwargs)
else: # show ticks at min, max, mean and standard deviation interval
new_ticks = stats_boundaries(data, std_range, std_range)
cb1 = plt.colorbar(im, ax=ax, ticks=new_ticks, **cb_kwargs)
# add optional contour lines on colorbar
if contours and cb_contours:
cb1.add_lines(conts)
cb1.update_normal(im)
# add title
if title:
ax.set_title(title, **title_kwargs)
# return Axes instance for re-use
return ax
def save_image(output_file, fig=None, size=None, dpi=100):
'''
Save a Matplotlib figure as an image without borders or frames. The format
is controlled by the extension of the output file name.
Parameters
----------
output_file : string
Path to output file.
fig : Matplotlib figure instance
Figure you want to save as the image
size : tuple (w, h)
Width and height of the output image in pixels.
dpi : integer
Image resolution.
'''
if fig is None:
fig = plt.gcf()
ax = fig.gca()
ax.set_axis_off()
ax.set_position([0, 0, 1, 1])
if size:
w, h = size
fig.set_size_inches(w/dpi, h/dpi, forward=False)
fig.savefig(output_file, dpi=dpi)
| 2.78125 | 3 |
example-tests/example_FDTD.py | sbastrakov/pyHiChi | 1 | 12761838 | <filename>example-tests/example_FDTD.py
import pyHiChi as pfc
import numpy as np
import math as ma
def valueEx(x, y, z):
Ex = 0
return Ex
def valueEy(x, y, z):
Ey = np.cos(z + ma.pi/6)
return Ey
def valueEz(x, y, z):
Ez = 0
return Ez
def valueBx(x, y, z):
Bx = -np.cos(z+ma.pi/6)
return Bx
def valueBy(x, y, z):
By = 0
return By
def valueBz(x, y, z):
Bz = 0
return Bz
def step(minCoords, maxCoords, gridSize):
steps = pfc.vector3d(1, 1, 1)
steps.x = (maxCoords.x - minCoords.x)/(gridSize.x)
steps.y = (maxCoords.y - minCoords.y)/(gridSize.y)
steps.z = (maxCoords.z - minCoords.z)/(gridSize.z)
return steps
gridSize = pfc.vector3d(5, 10, 11)
minCoords = pfc.vector3d(0.0, 1.0, 0.0)
maxCoords = pfc.vector3d(3.5, 7.0, 2*ma.pi)
stepsGrid = step(minCoords, maxCoords, gridSize)
timeStep = 1e-16
grid = pfc.YeeGrid(gridSize, timeStep, minCoords, stepsGrid)
grid.setE(valueEx, valueEy, valueEz)
grid.setB(valueBx, valueBy, valueBz)
fieldSolver = pfc.FDTD(grid)
#show
import matplotlib.pyplot as plt
import matplotlib.animation as animation
N = 37
x = np.arange(0, 3.5, 3.5/N)
z = np.arange(0, 2*ma.pi, 2*ma.pi/N)
def getFields():
global grid, x, z, N
Ex = np.zeros(shape=(N,N))
Ey = np.zeros(shape=(N,N))
Ez = np.zeros(shape=(N,N))
Bx = np.zeros(shape=(N,N))
By = np.zeros(shape=(N,N))
Bz = np.zeros(shape=(N,N))
for ix in range(N):
for iy in range(N):
coordXZ = pfc.vector3d(x[ix], 0.0, z[iy])
E = grid.getE(coordXZ)
Ex[ix, iy] = E.x
Ey[ix, iy] = E.y
Ez[ix, iy] = E.z
B = grid.getB(coordXZ)
Bx[ix, iy] = B.x
By[ix, iy] = B.y
Bz[ix, iy] = B.z
return Ex, Ey, Ez, Bx, By, Bz
def updateData():
for i in range(10000):
fieldSolver.updateFields()
(Ex, Ey, Ez, Bx, By, Bz) = getFields()
fig, axes = plt.subplots(ncols=3, nrows=2)
im11 = axes[0, 0].imshow(Ex, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 3.5), animated = True)
fig.colorbar(im11, ax=axes[0, 0])
im12 = axes[0, 1].imshow(Ey, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 3.5), animated = True)
fig.colorbar(im12, ax=axes[0, 1])
im13 = axes[0, 2].imshow(Ez, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 3.5), animated = True)
fig.colorbar(im13, ax=axes[0, 2])
im21 = axes[1, 0].imshow(Bx, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 3.5), animated = True)
fig.colorbar(im21, ax=axes[1, 0])
im22 = axes[1, 1].imshow(By, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 3.5), animated = True)
fig.colorbar(im22, ax=axes[1, 1])
im23 = axes[1, 2].imshow(Bz, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 3.5), animated = True)
fig.colorbar(im23, ax=axes[1, 2])
def updatefig(*args):
updateData()
(Ex, Ey, Ez, Bx, By, Bz) = getFields()
im11.set_array(Ex)
im12.set_array(Ey)
im13.set_array(Ez)
im21.set_array(Bx)
im22.set_array(By)
im23.set_array(Bz)
return im11, im12, im13, im21, im22, im23,
ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=True)
plt.show()
| 2.453125 | 2 |
contrastive.py | Swall0w/siamese-network | 0 | 12761839 | import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Contrastive(function.Function):
"""Contrastive loss function."""
def __init__(self, margin, use_cudnn=True):
self.margin = float(margin)
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x0_type, x1_type, y_type = in_types
type_check.expect(
x0_type.dtype == numpy.float32,
x1_type.dtype == numpy.float32,
x0_type.shape == x1_type.shape,
x0_type.shape[0] == x1_type.shape[0],
x1_type.shape[0] == y_type.shape[0],
x0_type.ndim == 2,
x1_type.ndim == 2,
y_type.ndim == 1
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x0, x1, y = inputs
self.diff = x0 - x1 # N x 2
self.dist_sq = xp.sum(self.diff ** 2, axis=1) # N
self.dist = xp.sqrt(self.dist_sq)
self.mdist = self.margin - self.dist
dist = xp.maximum(self.mdist, 0)
loss = y * self.dist_sq + (1 - y) * dist * dist
loss = xp.sum(loss) / 2.0 / x0.shape[0]
return xp.array(loss, dtype=xp.float32),
def backward(self, inputs, gy):
xp = cuda.get_array_module(*inputs)
x0, x1, y = inputs
y = xp.vstack((y, y)).T
alpha = gy[0] / y.shape[0]
dist = xp.vstack((self.dist, self.dist)).T
# similar pair
gx0 = alpha * y * self.diff
# dissimilar pair
mdist = xp.vstack((self.mdist, self.mdist)).T
mdist_p = xp.array(self.mdist > 0, dtype=xp.int32)
mdist_p = xp.vstack((mdist_p, mdist_p)).T
gx0 += alpha * (1 - y) * mdist_p * mdist * -(self.diff / dist)
gx0 = gx0.astype(xp.float32)
return gx0, -gx0, None
def contrastive(x0, x1, y, margin=1, use_cudnn=True):
"""Contrastive loss.
"""
return Contrastive(margin, use_cudnn)(x0, x1, y)
| 2.5625 | 3 |
Arrays/3.py | Nadeemk07/Python-Examples | 0 | 12761840 | # Python3
m, n = [int(i) for i in input().split()]
if n <= 1:
print(n)
quit()
lesser_n = (n+2) % 60
lesser_m = (m+1) % 60
def fibo(n):
a, b = 0, 1
for i in range(2, n+1):
c = a+b
c = c % 10
b, a = c, b
return (c-1)
if lesser_n <= 1:
a = lesser_n-1
else:
a = fibo(lesser_n)
if lesser_m <= 1:
b = lesser_m-1
else:
b = fibo(lesser_m)
# print(a)
# print(b)
if a >= b:
print(a-b)
else:
print(10+a-b)
| 3.140625 | 3 |
app/models.py | CAUCHY2932/mark_py3 | 0 | 12761841 | <reponame>CAUCHY2932/mark_py3<gh_stars>0
# coding:utf-8
from flask_sqlalchemy import Pagination
from app import db
from flask import abort
DB_PREFIX = "mk_" # 数据库前缀
def paginate(query, page, per_page=20, error_out=True):
"""
这个函数是给使用db.session.query查询的数据,而不是用db.Model对象查询的数据,
因为db.session.query返回的数据是没有paginate函数的 所以这里实现了个
"""
if error_out and page < 1:
abort(404)
items = query.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
abort(404)
if page == 1 and len(items) < per_page:
total = len(items)
else:
total = query.order_by(None).count()
return Pagination(query, page, per_page, total, items)
| 2.9375 | 3 |
src/utils/shared_functions.py | NOAA-OWP/inundation-mapping | 2 | 12761842 | #!/usr/bin/env python3
import os
from os.path import splitext
import fiona
import rasterio
import numpy as np
from rasterio.warp import calculate_default_transform, reproject, Resampling
from pyproj.crs import CRS
def getDriver(fileName):
driverDictionary = {'.gpkg' : 'GPKG','.geojson' : 'GeoJSON','.shp' : 'ESRI Shapefile'}
driver = driverDictionary[splitext(fileName)[1]]
return(driver)
def pull_file(url, full_pulled_filepath):
"""
This helper function pulls a file and saves it to a specified path.
Args:
url (str): The full URL to the file to download.
full_pulled_filepath (str): The full system path where the downloaded file will be saved.
"""
import urllib.request
print("Pulling " + url)
urllib.request.urlretrieve(url, full_pulled_filepath)
def delete_file(file_path):
"""
This helper function deletes a file.
Args:
file_path (str): System path to a file to be deleted.
"""
try:
os.remove(file_path)
except FileNotFoundError:
pass
def run_system_command(args):
"""
This helper function takes a system command and runs it. This function is designed for use
in multiprocessing.
Args:
args (list): A single-item list, the first and only item being a system command string.
"""
# Parse system command.
command = args[0]
# Run system command.
os.system(command)
def subset_wbd_gpkg(wbd_gpkg, multilayer_wbd_geopackage):
import geopandas as gp
from utils.shared_variables import CONUS_STATE_LIST, PREP_PROJECTION
print("Subsetting " + wbd_gpkg + "...")
# Read geopackage into dataframe.
wbd = gp.read_file(wbd_gpkg)
gdf = gp.GeoDataFrame(wbd)
for index, row in gdf.iterrows():
state = row["STATES"]
if state != None: # Some polygons are empty in the STATES field.
keep_flag = False # Default to Fault, i.e. to delete the polygon.
if state in CONUS_STATE_LIST:
keep_flag = True
# Only split if multiple states present. More efficient this way.
elif len(state) > 2:
for wbd_state in state.split(","): # Some polygons have multiple states, separated by a comma.
if wbd_state in CONUS_STATE_LIST: # Check each polygon to make sure it's state abbrev name is allowed.
keep_flag = True
break
if not keep_flag:
gdf.drop(index, inplace=True) # Delete from dataframe.
# Overwrite geopackage.
layer_name = os.path.split(wbd_gpkg)[1].strip('.gpkg')
gdf.crs = PREP_PROJECTION
gdf.to_file(multilayer_wbd_geopackage, layer=layer_name,driver='GPKG',index=False)
def get_fossid_from_huc8(huc8_id,foss_id_attribute='fossid',
hucs=os.path.join(os.environ['inputDataDir'],'wbd','WBD_National.gpkg'),
hucs_layerName=None):
hucs = fiona.open(hucs,'r',layer=hucs_layerName)
for huc in hucs:
if huc['properties']['HUC8'] == huc8_id:
return(huc['properties'][foss_id_attribute])
def update_raster_profile(args):
elev_cm_filename = args[0]
elev_m_filename = args[1]
projection = args[2]
nodata_val = args[3]
blocksize = args[4]
keep_intermediate = args[5]
overwrite = args[6]
if os.path.exists(elev_m_filename) & overwrite:
os.remove(elev_m_filename)
elif not os.path.exists(elev_m_filename):
pass
else:
print(f"Skipping {elev_m_filename}. Use overwrite option.")
return
if isinstance(blocksize, int):
pass
elif isinstance(blocksize,str):
blocksize = int(blocksize)
elif isinstance(blocksize,float):
blocksize = int(blocksize)
else:
raise TypeError("Pass integer for blocksize")
assert elev_cm_filename.endswith('.tif'), "input raster needs to be a tif"
# Update nodata value and convert from cm to meters
dem_cm = rasterio.open(elev_cm_filename)
no_data = dem_cm.nodata
dem_m_profile = dem_cm.profile.copy()
dem_m_profile.update(driver='GTiff',tiled=True,nodata=nodata_val,
blockxsize=blocksize, blockysize=blocksize,
dtype='float32',crs=projection,compress='lzw',interleave='band')
dest = rasterio.open(elev_m_filename, "w", **dem_m_profile, BIGTIFF='YES')
for idx,window in dem_cm.block_windows(1):
data = dem_cm.read(1,window=window)
# wrote out output of this line as the same variable.
data = np.where(data == int(no_data), nodata_val, (data/100).astype(rasterio.float32))
# removed this line to avoid having two array copies of data. Kills memory usage
#del data
dest.write(data, indexes = 1, window=window)
# not necessary
#del dem_m
dem_cm.close()
dest.close()
if keep_intermediate == False:
os.remove(elev_cm_filename)
return(elev_m_filename)
'''
This function isn't currently used but is the preferred method for
reprojecting elevation grids.
Several USGS elev_cm.tifs have the crs value in their profile stored as the string "CRS.from_epsg(26904)"
instead of the actual output of that command.
Rasterio fails to properly read the crs but using gdal retrieves the correct projection.
Until this issue is resolved use the reproject_dem function in reproject_dem.py instead.
reproject_dem is not stored in the shared_functions.py because rasterio and
gdal bindings are not entirely compatible: https://rasterio.readthedocs.io/en/latest/topics/switch.html
'''
def reproject_raster(input_raster_name,reprojection,blocksize=None,reprojected_raster_name=None):
if blocksize is not None:
if isinstance(blocksize, int):
pass
elif isinstance(blocksize,str):
blocksize = int(blocksize)
elif isinstance(blocksize,float):
blocksize = int(blocksize)
else:
raise TypeError("Pass integer for blocksize")
else:
blocksize = 256
assert input_raster_name.endswith('.tif'), "input raster needs to be a tif"
reprojection = rasterio.crs.CRS.from_string(reprojection)
with rasterio.open(input_raster_name) as src:
# Check projection
if src.crs.to_string() != reprojection:
if src.crs.to_string().startswith('EPSG'):
epsg = src.crs.to_epsg()
proj_crs = CRS.from_epsg(epsg)
rio_crs = rasterio.crs.CRS.from_user_input(proj_crs).to_string()
else:
rio_crs = src.crs.to_string()
print(f"{input_raster_name} not projected")
print(f"Reprojecting from {rio_crs} to {reprojection}")
transform, width, height = calculate_default_transform(
src.crs, reprojection, src.width, src.height, *src.bounds)
kwargs = src.meta.copy()
kwargs.update({
'crs': reprojection,
'transform': transform,
'width': width,
'height': height,
'compress': 'lzw'
})
if reprojected_raster_name is None:
reprojected_raster_name = input_raster_name
assert reprojected_raster_name.endswith('.tif'), "output raster needs to be a tif"
with rasterio.open(reprojected_raster_name, 'w', **kwargs, tiled=True, blockxsize=blocksize, blockysize=blocksize, BIGTIFF='YES') as dst:
reproject(
source=rasterio.band(src, 1),
destination=rasterio.band(dst, 1),
src_transform=src.transform,
src_crs=rio_crs,
dst_transform=transform,
dst_crs=reprojection.to_string(),
resampling=Resampling.nearest)
del dst
del src
def mem_profile(func):
def wrapper(*args, **kwargs):
if (os.environ.get('mem') == "1"):
profile(func)(*args, **kwargs)
else:
func(*args, **kwargs)
return wrapper
def append_id_to_file_name(file_name, identifier):
'''
Processing:
Takes an incoming file name and inserts an identifier into the name
just ahead of the extension, with an underscore added.
ie) filename = "/output/myfolder/a_raster.tif"
indentifer = "13090001"
Becomes: "/output/myfolder/a_raster_13090001.tif"
Note:
- Can handle a single identifier or a list of identifier
ie) identifier = ["13090001", "123000001"]
Becomes: "/output/myfolder/a_raster_13090001_123000001.tif"
- This allows for file name to not be submitted and will return None
Inputs:
file_name: a single file name
identifier: a value or list of values to be inserted with an underscore
added ahead of the extention
Output:
out_file_name: A single name with each identifer added at the end before
the extension, each with an underscore in front of the identifier.
'''
if file_name is not None:
root,extension = os.path.splitext(file_name)
if isinstance(identifier, list):
out_file_name = root
for i in identifier:
out_file_name += "_{}".format(i)
out_file_name += extension
else:
out_file_name = root + "_{}".format(identifier) + extension
else:
out_file_name = None
return(out_file_name)
| 2.484375 | 2 |
plac.py | IMP1/plac | 0 | 12761843 | <filename>plac.py
# ######################### LICENCE ###############################
#
# Copyright (c) 2010-2019, <NAME>
# All rights reserved.
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
See doc/plac.pdf, doc/plac_adv.pdf for the documentation.
"""
from plac_core import *
from plac_ext import (Interpreter, import_main, ReadlineInput,
stdout, runp, Monitor, default_help)
__version__ = '1.1.0'
try:
from plac_tk import TkMonitor
except ImportError:
pass
| 1.4375 | 1 |
docusign_esign/models/signing_group.py | cridenour/docusign-python-client | 0 | 12761844 | <gh_stars>0
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SigningGroup(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created': 'str',
'created_by': 'str',
'error_details': 'ErrorDetails',
'group_email': 'str',
'group_name': 'str',
'group_type': 'str',
'modified': 'str',
'modified_by': 'str',
'signing_group_id': 'str',
'users': 'list[SigningGroupUser]'
}
attribute_map = {
'created': 'created',
'created_by': 'createdBy',
'error_details': 'errorDetails',
'group_email': 'groupEmail',
'group_name': 'groupName',
'group_type': 'groupType',
'modified': 'modified',
'modified_by': 'modifiedBy',
'signing_group_id': 'signingGroupId',
'users': 'users'
}
def __init__(self, created=None, created_by=None, error_details=None, group_email=None, group_name=None, group_type=None, modified=None, modified_by=None, signing_group_id=None, users=None): # noqa: E501
"""SigningGroup - a model defined in Swagger""" # noqa: E501
self._created = None
self._created_by = None
self._error_details = None
self._group_email = None
self._group_name = None
self._group_type = None
self._modified = None
self._modified_by = None
self._signing_group_id = None
self._users = None
self.discriminator = None
if created is not None:
self.created = created
if created_by is not None:
self.created_by = created_by
if error_details is not None:
self.error_details = error_details
if group_email is not None:
self.group_email = group_email
if group_name is not None:
self.group_name = group_name
if group_type is not None:
self.group_type = group_type
if modified is not None:
self.modified = modified
if modified_by is not None:
self.modified_by = modified_by
if signing_group_id is not None:
self.signing_group_id = signing_group_id
if users is not None:
self.users = users
@property
def created(self):
"""Gets the created of this SigningGroup. # noqa: E501
# noqa: E501
:return: The created of this SigningGroup. # noqa: E501
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this SigningGroup.
# noqa: E501
:param created: The created of this SigningGroup. # noqa: E501
:type: str
"""
self._created = created
@property
def created_by(self):
"""Gets the created_by of this SigningGroup. # noqa: E501
# noqa: E501
:return: The created_by of this SigningGroup. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this SigningGroup.
# noqa: E501
:param created_by: The created_by of this SigningGroup. # noqa: E501
:type: str
"""
self._created_by = created_by
@property
def error_details(self):
"""Gets the error_details of this SigningGroup. # noqa: E501
:return: The error_details of this SigningGroup. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this SigningGroup.
:param error_details: The error_details of this SigningGroup. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def group_email(self):
"""Gets the group_email of this SigningGroup. # noqa: E501
# noqa: E501
:return: The group_email of this SigningGroup. # noqa: E501
:rtype: str
"""
return self._group_email
@group_email.setter
def group_email(self, group_email):
"""Sets the group_email of this SigningGroup.
# noqa: E501
:param group_email: The group_email of this SigningGroup. # noqa: E501
:type: str
"""
self._group_email = group_email
@property
def group_name(self):
"""Gets the group_name of this SigningGroup. # noqa: E501
The name of the group. # noqa: E501
:return: The group_name of this SigningGroup. # noqa: E501
:rtype: str
"""
return self._group_name
@group_name.setter
def group_name(self, group_name):
"""Sets the group_name of this SigningGroup.
The name of the group. # noqa: E501
:param group_name: The group_name of this SigningGroup. # noqa: E501
:type: str
"""
self._group_name = group_name
@property
def group_type(self):
"""Gets the group_type of this SigningGroup. # noqa: E501
# noqa: E501
:return: The group_type of this SigningGroup. # noqa: E501
:rtype: str
"""
return self._group_type
@group_type.setter
def group_type(self, group_type):
"""Sets the group_type of this SigningGroup.
# noqa: E501
:param group_type: The group_type of this SigningGroup. # noqa: E501
:type: str
"""
self._group_type = group_type
@property
def modified(self):
"""Gets the modified of this SigningGroup. # noqa: E501
# noqa: E501
:return: The modified of this SigningGroup. # noqa: E501
:rtype: str
"""
return self._modified
@modified.setter
def modified(self, modified):
"""Sets the modified of this SigningGroup.
# noqa: E501
:param modified: The modified of this SigningGroup. # noqa: E501
:type: str
"""
self._modified = modified
@property
def modified_by(self):
"""Gets the modified_by of this SigningGroup. # noqa: E501
# noqa: E501
:return: The modified_by of this SigningGroup. # noqa: E501
:rtype: str
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""Sets the modified_by of this SigningGroup.
# noqa: E501
:param modified_by: The modified_by of this SigningGroup. # noqa: E501
:type: str
"""
self._modified_by = modified_by
@property
def signing_group_id(self):
"""Gets the signing_group_id of this SigningGroup. # noqa: E501
When set to **true** and the feature is enabled in the sender's account, the signing recipient is required to draw signatures and initials at each signature/initial tab ( instead of adopting a signature/initial style or only drawing a signature/initial once). # noqa: E501
:return: The signing_group_id of this SigningGroup. # noqa: E501
:rtype: str
"""
return self._signing_group_id
@signing_group_id.setter
def signing_group_id(self, signing_group_id):
"""Sets the signing_group_id of this SigningGroup.
When set to **true** and the feature is enabled in the sender's account, the signing recipient is required to draw signatures and initials at each signature/initial tab ( instead of adopting a signature/initial style or only drawing a signature/initial once). # noqa: E501
:param signing_group_id: The signing_group_id of this SigningGroup. # noqa: E501
:type: str
"""
self._signing_group_id = signing_group_id
@property
def users(self):
"""Gets the users of this SigningGroup. # noqa: E501
# noqa: E501
:return: The users of this SigningGroup. # noqa: E501
:rtype: list[SigningGroupUser]
"""
return self._users
@users.setter
def users(self, users):
"""Sets the users of this SigningGroup.
# noqa: E501
:param users: The users of this SigningGroup. # noqa: E501
:type: list[SigningGroupUser]
"""
self._users = users
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SigningGroup, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SigningGroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.726563 | 2 |
remove_links_in_queue.py | JasonDing02/InterlinguaCorpusProject | 0 | 12761845 | <reponame>JasonDing02/InterlinguaCorpusProject
links_file = open("link_queue.txt", "r")
link_queue = links_file.readlines()
go = []
count = 0
index = 0
while index < len(link_queue):
if link_queue[index].find("interforo") == -1:
go.append(link_queue[index])
del link_queue[index]
else:
index = index + 1
count = count + 1
print(count)
links_file = open("link_queue.txt", "w")
for link in go:
links_file.write(link)
| 2.953125 | 3 |
myponggame.py | VishalChoubey1019/Pong-Game | 0 | 12761846 | <gh_stars>0
import turtle
import time
import random
b = 0
# SCREEN
display = turtle.Screen()
display.title("Pong Game")
display.bgcolor("black")
display.setup(width = 700, height = 500)
# WALLS
# WALL 1
wall1 = turtle.Turtle()
wall1.shape('square')
wall1.color('blue')
wall1.speed(0)
wall1.shapesize(stretch_len = 1, stretch_wid = 4)
wall1.penup()
wall1.goto(-300,0)
# WALL 1 MOVEMENT
def wall1up():
y = wall1.ycor()
y += 20
wall1.sety(y)
def wall1down():
y = wall1.ycor()
y -= 20
wall1.sety(y)
# WALL 2
wall2 = turtle.Turtle()
wall2.shape('square')
wall2.color('blue')
wall2.speed(0)
wall2.shapesize(stretch_len = 1, stretch_wid = 4)
wall2.penup()
wall2.goto(300,0)
# WALL 2 MOVEMENT
def wall2up():
y = wall2.ycor()
y += 20
wall2.sety(y)
def wall2down():
y = wall2.ycor()
y -= 20
wall2.sety(y)
# WALL's REACTION ON KEYPRESS
display.listen()
display.onkeypress(wall1up,"w")
display.onkeypress(wall1down,"s")
display.onkeypress(wall2up,"Up")
display.onkeypress(wall2down,"Down")
#BALL
ball = turtle.Turtle()
ball.color('white')
ball.shape('circle')
ball.speed(0)
ball.penup()
ball.goto(0,0)
ball.dx = random.randint(-10, 5)
ball.dy = random.randint(-5, 10)
# Displays the score
sketch = turtle.Turtle()
sketch.speed(0)
sketch.color("red")
sketch.penup()
sketch.hideturtle()
sketch.goto(0, 210)
sketch.write("PLAYER A : 0 PLAYER B : 0",
align="center", font=("Courier", 24, "normal"))
def fun():
ball.goto(-1000,-1000)
wall1.goto(-1000,1000)
wall2.goto(1000,1000)
if scoreA > scoreB:
sketch.clear()
sketch.goto(0,0)
sketch.write("CONGRATULATIONS!! \nPLAYER A WON THE GAME", align= "center", font=("Courier", 25, "normal"))
time.sleep(2)
if scoreA < scoreB:
sketch.clear()
sketch.goto(0,0)
sketch.write("CONGRATULATIONS!! \nPLAYER B WON THE GAME", align= "center", font=("Courier", 25, "normal"))
time.sleep(2)
# MAIN FUNCTION:
scoreA = 0
scoreB = 0
while True:
display.update()
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# BOUNDARY:
if ball.ycor() > 240 or ball.ycor() <-240:
ball.dy *= -1
# WALL TOUCH:
if ball.xcor()<-260 and ball.xcor()>-270 and ball.ycor() > wall1.ycor()-70 and ball.ycor() < wall1.ycor()+70:
ball.setx(-260)
ball.dx *= -1
if ball.xcor()>260 and ball.xcor()<270 and ball.ycor() > wall2.ycor()-70 and ball.ycor() < wall2.ycor()+70:
ball.setx(260)
ball.dx *= -1
# SCORE UPDATE:
if ball.xcor()<-350:
ball.dx = random.randint(-10, -3)
ball.dy = random.randint(-8, -3)
ball.goto(0,0)
time.sleep(0.1)
scoreA += 0
scoreB += 1
sketch.clear()
sketch.write("PLAYER A : {} PLAYER B : {} ".format(scoreA , scoreB), align = "center" , font=("Courier", 20, "normal"))
if ball.xcor()>350:
ball.dx = random.randint(3, 8)
ball.dy = random.randint(4, 10)
ball.goto(0,0)
time.sleep(0.1)
scoreA += 1
scoreB += 0
sketch.clear()
sketch.write("PLAYER A : {} PLAYER B : {}".format(scoreA , scoreB), align = "center" , font=("Courier", 20, "normal"))
if scoreA == 3 or scoreB == 3:
fun()
# display.onkeypress(wall2down,"Down") | 3.34375 | 3 |
medium/109-convert-sorted-list-to-binary-search-tree.py | wanglongjiang/leetcode | 2 | 12761847 | '''
有序链表转换二叉搜索树
给定一个单链表,其中的元素按升序排序,将其转换为高度平衡的二叉搜索树。
本题中,一个高度平衡二叉树是指一个二叉树每个节点 的左右两个子树的高度差的绝对值不超过 1。
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
'''
思路:
'''
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
pass
| 3.75 | 4 |
tpl/ase_gauss_ring_conf.tpl.py | zadorlab/KinBot | 30 | 12761848 | """
Template to run ase to do a contrained optimization
using Gaussian
KinBot needs to pass to the template:
1. A label for the calculation
2. The number of cores
3. The kwargs for Gaussian
4. The atom vector
5. The geometry
6. The Gaussian command
"""
import os, sys, re
import numpy as np
import ase
from ase import Atoms
from ase.calculators.gaussian import Gaussian
from ase.optimize.pcobfgs import PCOBFGS
from ase.db import connect
label = '{label}'
kwargs = {kwargs}
Gaussian.command = '{qc_command} < PREFIX.com > PREFIX.log'
calc = Gaussian(**kwargs)
atom = {atom}
geom = {geom}
mol = Atoms(symbols = atom, positions = geom)
mol.set_calculator(calc)
fix = {fix}
change = {change}
bonds = []
angles = []
dihedrals = []
for fi in fix:
if len(fi) == 2:
#careful: atom indices in the fix lists start at 1
bondlength = mol.get_distance(fi[0] - 1, fi[1] - 1)
bonds.append([bondlength,[fi[0] - 1, fi[1] - 1]])
if len(fi) == 3:
#careful: atom indices in the fix lists start at 1
angle = mol.get_angle(fi[0]-1,fi[1]-1,fi[2]-1) * np.pi / 180
angles.append([angle,[fi[0]-1,fi[1]-1,fi[2]-1]])
if len(fi) == 4:
#careful: atom indices in the fix lists start at 1
dihed = mol.get_dihedral(fi[0]-1,fi[1]-1,fi[2]-1,fi[3]-1) * np.pi / 180
dihedrals.append([dihed,[fi[0]-1,fi[1]-1,fi[2]-1,fi[3]-1]])
for ci in change:
if len(ci) == 3:
#careful: atom indices in the fix lists start at 1
bondlength = ci[2]
bonds.append([bondlength,[ci[0] - 1, ci[1] - 1]])
if len(ci) == 4:
#careful: atom indices in the fix lists start at 1
angle = ci[3] * np.pi / 180
angles.append([angle,[ci[0]-1,ci[1]-1,ci[2]-1]])
if len(ci) == 5:
#careful: atom indices in the fix lists start at 1
dihed = ci[4] * np.pi / 180
dihedrals.append([dihed,[ci[0]-1,ci[1]-1,ci[2]-1,ci[3]-1]])
dyn = PCOBFGS(mol,
trajectory=label + '.traj',
bonds=bonds,
angles=angles,
dihedrals=dihedrals,
force_consistent=False)
try:
dyn.run(fmax = 0.01, steps = 400)
e = mol.get_potential_energy()
data = {{'energy': e, 'status' : 'normal'}}
except RuntimeError:
data = {{'status' : 'error'}}
db = connect('{working_dir}/kinbot.db')
db.write(mol, name=label, data=data)
# add the finished stamp
f = open(label + '.log','a')
f.write('done\n')
f.close()
| 2.453125 | 2 |
zephir-exports/lib/utils.py | cdlib/zephir-services | 1 | 12761849 | <reponame>cdlib/zephir-services<gh_stars>1-10
#!/usr/bin/env python
"""Utils.py: Utils are a collection of methods used across scripts"""
import datetime
import os
import sys
import click
import environs
import sqlalchemy.engine.url
import yaml
class AppEnv:
""" AppEnv Class provides an easy helper for loading enviroment variables with
default values and yaml configuration files into an object for Zephir Services.
Enviroment variables include: ROOT_PATH, CONFIG_PATH, CACHE_PATH, IMPORT_PATH,
and OUTPUT_PATH. OVERRIDE_CONFIG_PATH is a special environment variable that
load after the CONFIG_PATH is loaded.
Args:
name: Prefix for application specific environment variables
root: The default root directory.
"""
def __init__(self, name, root_dir=os.path.dirname(__file__)):
self.name = name
# load enviroment variables from .env file
app_env = environs.Env()
app_env.read_env()
with app_env.prefixed("{}_".format(name)):
self.ROOT_PATH = app_env("ROOT_PATH", False) or root_dir
self.ENV = app_env("ENV", False)
self.CONFIG_PATH = app_env("CONFIG_PATH", False) or os.path.join(
self.ROOT_PATH, "config"
)
self.OVERRIDE_CONFIG_PATH = app_env("OVERRIDE_CONFIG_PATH", False)
self.CACHE_PATH = app_env("CACHE_PATH", False) or os.path.join(
self.ROOT_PATH, "cache"
)
self.IMPORT_PATH = app_env("IMPORT_PATH", False) or os.path.join(
self.ROOT_PATH
)
self.OUTPUT_PATH = app_env("OUTPUT_PATH", False) or os.path.join(
self.ROOT_PATH, "export"
)
# TODO(ccollett): Refactor this to output path
self.EXPORT_PATH = app_env("EXPORT_PATH", False) or os.path.join(
self.ROOT_PATH, "export"
)
# Load application config
config = AppEnv._load_config(self.CONFIG_PATH)
# Used in testing, config files in test data will override local config files
if self.OVERRIDE_CONFIG_PATH is not None and os.path.isdir(
self.OVERRIDE_CONFIG_PATH
):
config = AppEnv._load_config(self.OVERRIDE_CONFIG_PATH, config)
self.CONFIG = config
@staticmethod
def _load_config(path, config={}):
"""Load configuration files in the configuration directory
into a unified configuration dictionary.
Notes: Configuration files must be yaml files. The names
of the files become the top-level keys in the dictionary.
Args:
path: Path to a configuration directory.
config: An existing dictionary of configuration values.
Returns:
A configuration dictionary populated with the contents of the
configuration files.
"""
for entry in os.scandir(path):
if entry.is_file() and entry.name.endswith(".yml"):
section = os.path.splitext(entry.name)[0]
with open(entry, "r") as ymlfile:
config[section] = {}
config[section].update(yaml.safe_load(ymlfile))
return config
class DatabaseHelper:
"""Database Helper stores and manages database configurations to
a relational database. The class provides methods for instantiating database
connections in different packages (sqlalchemy, mysql.connector)
Notes: These strings depend on the sqlalchemy package.
Args:
config: A dictionary of database configuration values.
env_prefix: A prefix used to identify environment variables for script
Returns:
A database connection string compatable with sqlalchemy.
"""
def __init__(self, config, env_prefix):
self.drivername = os.environ.get(
"{}_DB_DRIVERNAME".format(env_prefix)
) or config.get("drivername")
self.username = os.environ.get(
"{}_DB_USERNAME".format(env_prefix)
) or config.get("username")
self.password = os.environ.get(
"{}_DB_PASSWORD".format(env_prefix)
) or config.get("password")
self.host = os.environ.get("{}_DB_HOST".format(env_prefix)) or config.get(
"host"
)
self.port = os.environ.get("{}_DB_PORT".format(env_prefix)) or config.get(
"port"
)
self.database = os.environ.get(
"{}_DB_DATABASE".format(env_prefix)
) or config.get("database")
self.socket = os.environ.get("{}_DB_SOCKET".format(env_prefix)) or config.get(
"socket"
)
def connection_url(self):
"""
Returns:
A database connection string compatable with sqlalchemy.
"""
url = str(
sqlalchemy.engine.url.URL(
self.drivername,
self.username,
self.password,
self.host,
self.port,
self.database,
)
)
# if using mysql, add the socket to the URL
if self.drivername == "mysql+mysqlconnector" and self.socket is not None:
url = "{}?unix_socket={}".format(url, self.socket)
return url
def connection_args(self):
"""
Returns:
A database arguments compatable with mysqlconnector.
"""
args = {
"user": self.username,
"password": <PASSWORD>,
"host": self.host,
"database": self.database,
"unix_socket": self.socket,
}
return args
class ConsoleMessenger:
"""ConsoleMessenger Class provides utility functions for outputing
messages to the console, which can be configured for verbosity.
This eliminates having to track these conditional logic to know when to print
specific messages.
Args:
app: The name of the application (to prepend stderr messages)
verbosity: verbosity level of application
* -1: quiet [No stdout, ERROR stderr]
* 0: default [stdout, ERROR stderr]
* 1: verbose [stdout, INFO stderr]
* 2: very_verbose [stdout, DEBUG stderr]
"""
def __init__(self, app=None, verbosity=0):
self.app = app
self.verbosity = verbosity
# verbose diagnostic messages only
def info(self, message):
if self.verbose():
self.send_error(message, level="INFO")
# very verbose debug messages only
def debug(self, message):
if self.very_verbose():
self.send_error(message, level="DEBUG")
# concise error handling messages
def error(self, message):
self.send_error(message, level="ERROR")
# standard output for use by chained applications
def out(self, message):
if not self.quiet():
click.secho(message, file=sys.stdout)
def send_error(self, message, level=None):
line = ""
if self.very_verbose():
line += datetime.datetime.today().strftime("%Y-%m-%d %H:%M:%S.%f ")
if level and self.very_verbose():
line += level + " "
if self.app:
line += self.app + ": "
line += message
click.secho(line, file=sys.stderr)
def quiet(self):
return self.verbosity == -1
def default(self):
return self.verbosity == 0
def verbose(self):
return self.verbosity >= 1
def very_verbose(self):
return self.verbosity >= 2
| 2.765625 | 3 |
release/stubs.min/Autodesk/Revit/UI/__init___parts/DockablePanes.py | YKato521/ironpython-stubs | 0 | 12761850 | class DockablePanes(object):
""" Provides a container of all Revit built-in DockablePaneId instances. """
BuiltInDockablePanes = None
__all__ = [
"BuiltInDockablePanes",
]
| 1.40625 | 1 |