max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
akatsuki/thanos/views.py
|
raun/vigilant-system
| 0
|
12775951
|
<reponame>raun/vigilant-system
from thanos import models, serializers
from rest_framework import generics, mixins
# Create your views here.
class FeatureRequestsListAll(generics.ListAPIView):
queryset = models.FeatureRequest.objects.all()
serializer_class = serializers.FeatureRequestsBasicListSerializer
class FeatureRequestsList(generics.CreateAPIView, generics.ListAPIView):
lookup_url_kwarg = 'user_id'
serializer_class = serializers.FeatureRequestsBasicListSerializer
def get_queryset(self):
user_id = self.kwargs.get(self.lookup_url_kwarg)
owned = models.FeatureRequest.objects.filter(creator__id=user_id)
watching = models.FeatureRequest.objects.filter(id__in=models.UserActionsFR.objects.filter(user__id=user_id).filter(action_type=3).values('feature_request'))
return owned.union(watching)
class FeatureRequestsDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.FeatureRequest.objects.all()
serializer_class = serializers.FeatureRequestsBasicListSerializer
class FeatureRequestsResponseDetail(generics.RetrieveAPIView):
lookup_url_kwarg = 'feature_request_id'
serializer_class = serializers.FeatureRequestResponseSerializer
def get_queryset(self):
feature_request_id = self.kwargs.get(self.lookup_url_kwarg)
return models.FeatureRequestResponse.objects.filter(id=feature_request_id)
class UserActionsCreate(generics.CreateAPIView, generics.RetrieveDestroyAPIView):
lookup_url_kwarg = 'feature_request_id'
serializer_class = serializers.UserActionsSerializer
def get_queryset(self):
feature_request_id = self.kwargs.get(self.lookup_url_kwarg)
return models.UserActionsFR.objects.filter(feature_request__id=feature_request_id)
class CommentsCrud(generics.CreateAPIView, generics.RetrieveUpdateDestroyAPIView):
queryset = models.Comment.objects.all()
serializer_class = serializers.CommentSerializer
class CommentsList(generics.ListAPIView):
lookup_url_kwarg = 'feature_request_id'
serializer_class = serializers.CommentSerializer
def get_queryset(self):
feature_request_id = self.kwargs.get(self.lookup_url_kwarg)
return models.Comment.objects.filter(feature_request__id=feature_request_id)
#class Replies
| 2.09375
| 2
|
girlfriend/util/concurrent.py
|
chihongze/girlfriend
| 83
|
12775952
|
<filename>girlfriend/util/concurrent.py
# coding: utf-8
"""并发工具集
"""
from __future__ import absolute_import
import threading
from girlfriend.exception import InvalidArgumentException
class CountDownLatch(object):
"""基于计数的闭锁实现
"""
def __init__(self, count):
if count <= 0:
raise InvalidArgumentException(u"count参数必须为正整数")
self._count = count
self._condition = threading.Condition()
def count_down(self):
with self._condition:
self._count -= 1
if self._count <= 0:
self._condition.notifyAll()
def await(self):
with self._condition:
while self._count > 0:
self._condition.wait()
class CyclicBarrier(object):
"""循环关卡实现
"""
def __init__(self, count):
if count <= 0:
raise InvalidArgumentException(u"count参数必须为正整数")
self._count = count
self._awaiting_count = count
self._condition = threading.Condition()
def await(self):
with self._condition:
self._awaiting_count -= 1
if self._awaiting_count <= 0:
self._awaiting_count = self._count # 回收再利用
self._condition.notifyAll()
else:
self._condition.wait()
| 3.15625
| 3
|
backend/src/deploy/deployCandProp.py
|
pedromtelho/BlockchainElection-TestNetwork
| 0
|
12775953
|
<reponame>pedromtelho/BlockchainElection-TestNetwork<gh_stars>0
import json
from web3 import Web3
from solc import compile_standard
import time
provider_url = "https://kovan.infura.io/v3/175c2cb13956473187db1e38282f6d6c"
web3 = Web3(Web3.HTTPProvider(provider_url))
def receiveFormInformations(ipca, pib, name, privKey):
# Solidity source code
compiled_sol = compile_standard({
"language": "Solidity",
"sources": {
"CandidateProposal.sol": {
"content": '''
pragma solidity >=0.4.22;
contract CandidateProposal {
struct Proposal {
string candidateName;
uint ipca;
uint pib;
}
address candidate;
mapping(address=>Proposal) referenceProposals;
constructor(uint _ipca, uint _pib, string memory _candidateName) public {
candidate = msg.sender;
referenceProposals[candidate].ipca = _ipca;
referenceProposals[candidate].pib = _pib;
referenceProposals[candidate].candidateName = _candidateName;
}
function getName() public view returns (string memory) {
return referenceProposals[candidate].candidateName;
}
function getIpca() public view returns (uint) {
return referenceProposals[candidate].ipca;
}
function getPib() public view returns (uint) {
return referenceProposals[candidate].pib;
}
}
'''
}
},
"settings":
{
"outputSelection": {
"*": {
"*": [
"metadata", "evm.bytecode",
"evm.bytecode.sourceMap"
]
}
}
}
})
# get bytecode
bytecode = compiled_sol['contracts']['CandidateProposal.sol']['CandidateProposal']['evm']['bytecode']['object']
# get abi
abi = json.loads(compiled_sol['contracts']['CandidateProposal.sol']
['CandidateProposal']['metadata'])['output']['abi']
candidateProposal = web3.eth.contract(abi=abi, bytecode=bytecode)
acct = web3.eth.account.privateKeyToAccount(privKey)
tx_hash = candidateProposal.constructor(ipca, pib, name).buildTransaction({
'from': acct.address,
'nonce': web3.eth.getTransactionCount(acct.address),
'gas': 1728712,
'gasPrice': web3.toWei('50', 'gwei')
})
signed = web3.eth.account.signTransaction(
tx_hash, privKey)
result = web3.eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = web3.eth.waitForTransactionReceipt(result)
return tx_receipt.contractAddress, abi
| 2.109375
| 2
|
jobmon/launcher.py
|
adamnew123456/jobmon
| 2
|
12775954
|
"""
JobMon Launcher
===============
Launches the JobMon supervisor as a daemon - generally, the usage pattern for
this module will be something like the following::
>>> from jobmon import config
>>> config_handler = config.ConfigHandler
>>> config_handler.load(SOME_FILE)
>>> run(config_handler)
"""
import logging
import os
import sys
from jobmon import (
daemon, service, command_server, event_server, status_server, ticker, util
)
# Make sure that we get console logging before the supervisor becomes a
# daemon, so if any errors occur before that, they can be seen
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
LOGGER = logging.getLogger('jobmon.launcher')
def run_daemon(config_handler, as_daemon=True):
"""
Starts the supervisor daemon, passing to it the appropriate
configuration.
:param config.ConfigHandler config_handler: The configuration to run the \
daemon with.
:param bool as_daemon: If ``True``, then this will launch a daemon and the \
parent process will exit. If ``False``, then this will launch a daemon but \
the parent process will continue.
"""
supervisor_wrapper = SupervisorDaemon(
home_dir=config_handler.working_dir,
kill_parent=as_daemon,
stderr=config_handler.log_file)
logging.info('Sending log messages[%s] to %s',
config_handler.log_level,
config_handler.log_file)
supervisor_wrapper.start(config_handler)
def run_fork(config_handler):
"""
Starts the supervisor as a direct child process, passing to it the appropriate
configuration. This is meant for use during tests, when the child process needs
to be monitored (and possibly killed if it crashes) instead of allowed to
roam free as in the daemon case.
:param config.ConfigHandler config_handler: The configuration to run the \
supervisor with.
:return int: The PID of the child process that was launched.
"""
logging.info('Sending log messages[%s] to %s',
config_handler.log_level,
config_handler.log_file)
pid = os.fork()
if pid == 0:
LOGGER.info('In child: starting processing')
execute_supervisor(config_handler)
else:
return pid
def execute_supervisor(config_handler):
"""
Runs the supervisor according to the given configuration.
:param config.ConfigHandler config_handler: The configuration.
"""
# Read the jobs and start up the supervisor, and then make sure to
# die if we exit
try:
util.reset_loggers()
logging.basicConfig(filename=config_handler.log_file,
level=config_handler.log_level,
format='%(name)s %(asctime)s %(message)s')
supervisor_shim = service.SupervisorShim()
events = event_server.EventServer(config_handler.event_port)
restart_svr = ticker.Ticker(supervisor_shim.on_job_timer_expire)
commands = command_server.CommandServer(
config_handler.control_port, supervisor_shim)
status = status_server.StatusServer(supervisor_shim)
supervisor = service.SupervisorService(
config_handler, events, status, restart_svr)
events.start()
commands.start()
status.start()
restart_svr.start()
supervisor.start()
# This has to be done last, since it starts up the autostart
# jobs and gets the ball rolling
supervisor_shim.set_service(supervisor)
# The event server should be the last to terminate, since it
# has to tell the outside world that we're gone
LOGGER.info('Waiting for events to exit')
events.wait_for_exit()
except Exception as ex:
LOGGER.error('DEAD SUPERVISOR', exc_info=True)
finally:
LOGGER.info('Peace out!')
os._exit(0)
class SupervisorDaemon(daemon.Daemon):
def run(self, config_handler):
"""
Runs the supervisor according to the given configuration.
:param config.ConfigHandler config_handler: The configuration.
"""
LOGGER.info('Done daemonizing, launching supervisor')
execute_supervisor(config_handler)
| 2.953125
| 3
|
code/testing/gaussian/2d_plot.py
|
MorrisHuang-skipper/Serial-MD
| 0
|
12775955
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from pylab import cm
mpl.rcParams['font.family'] = 'STIXGeneral'
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
plt.rcParams['font.size'] = 16
plt.rcParams['figure.figsize'] = [5.6, 4]
plt.rcParams['axes.titlesize'] = 16
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 6
plt.rcParams['legend.fontsize'] = 13
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['axes.linewidth'] = 1
colors = cm.get_cmap('Set1', 9)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.xaxis.set_tick_params(which='major', size=5, width=1,
direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=5, width=1,
direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', right='on')
e = 1.6e-19
x = np.loadtxt('out.dat', unpack=True)
ax.hist(x, color=colors(0), bins=500, histtype='step', density=True)
x = np.loadtxt('out2.dat', unpack=True)
ax.hist(x, color=colors(1), bins=500, histtype='step', density=True)
x = np.loadtxt('out3.dat', unpack=True)
ax.hist(x, color=colors(2), bins=500, histtype='step', density=True)
plt.tight_layout()
# plt.savefig('../figure/1a.pdf')
plt.show()
| 2.1875
| 2
|
pymenu.py
|
JessieMB/snake
| 0
|
12775956
|
# coding=utf-8
"""
EXAMPLE
Example file, timer clock with in-menu options.
Copyright (C) 2017 <NAME> @ppizarror
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
# Import pygame and libraries
from random import randrange
import datetime
import os
import pygame
from pygame.locals import *
# Import pygameMenu
import pygameMenu
from pygameMenu.locals import *
# Constants and global variables
ABOUT = ['PygameMenu {0}'.format(pygameMenu.__version__),
'Author: {0}'.format(pygameMenu.__author__),
TEXT_NEWLINE,
'Email: {0}'.format(pygameMenu.__email__)]
COLOR_BLUE = (12, 12, 200)
COLOR_BACKGROUND = [128, 0, 128]
COLOR_WHITE = (255, 255, 255)
FPS = 60
H_SIZE = 600 # Height of window size
HELP = ['Press ESC to enable/disable Menu',
'Press ENTER to access a Sub-Menu or use an option',
'Press UP/DOWN to move through Menu',
'Press LEFT/RIGHT to move through Selectors']
W_SIZE = 800 # Width of window size
# Init pygame
pygame.init()
os.environ['SDL_VIDEO_CENTERED'] = '1'
# Write help message on console
for m in HELP:
print(m)
# Create window
surface = pygame.display.set_mode((W_SIZE, H_SIZE))
pygame.display.set_caption('PygameMenu example')
# Main timer and game clock
clock = pygame.time.Clock()
timer = [0.0]
dt = 1.0 / FPS
timer_font = pygame.font.Font(pygameMenu.fonts.FONT_NEVIS, 100)
# Functions
def mainmenu_background():
"""
Background color of the main menu, on this function user can plot
images, play sounds, etc.
"""
surface.fill((40, 0, 40))
def reset_timer():
"""
Reset timer
"""
timer[0] = 0
def change_color_bg(c, **kwargs):
"""
Change background color
:param c: Color tuple
"""
if c == (-1, -1, -1): # If random color
c = (randrange(0, 255), randrange(0, 255), randrange(0, 255))
if kwargs['write_on_console']:
print('New background color: ({0},{1},{2})'.format(*c))
COLOR_BACKGROUND[0] = c[0]
COLOR_BACKGROUND[1] = c[1]
COLOR_BACKGROUND[2] = c[2]
# Timer menu
timer_menu = pygameMenu.Menu(surface,
window_width=W_SIZE,
window_height=H_SIZE,
font=pygameMenu.fonts.FONT_NEVIS,
title='Timer Menu',
# Adds 5px to title vertical position
title_offsety=5,
menu_alpha=85,
menu_width=600,
menu_height=int(H_SIZE / 2),
# If this menu closes (press ESC) back to main
onclose=PYGAME_MENU_RESET,
dopause=False)
timer_menu.add_option('Reset timer', reset_timer)
# Adds a selector (element that can handle functions)
timer_menu.add_selector('Change bgcolor',
# Values of selector, call to change_color_bg
[('Random', (-1, -1, -1)), # Random color
('Default', (128, 0, 128)),
('Black', (0, 0, 0)),
('Blue', COLOR_BLUE)],
# Action when changing element with left/right
onchange=None,
# Action when pressing return on a element
onreturn=change_color_bg,
# Kwargs, optional parametrs to change_color_bg function
write_on_console=True)
timer_menu.add_option('Return to Menu', PYGAME_MENU_BACK)
timer_menu.add_option('Close Menu', PYGAME_MENU_CLOSE)
# Help menu
help_menu = pygameMenu.TextMenu(surface,
window_width=W_SIZE,
window_height=H_SIZE,
font=pygameMenu.fonts.FONT_FRANCHISE,
title='Help',
# Pressing ESC button does nothing on this menu
onclose=PYGAME_MENU_DISABLE_CLOSE,
menu_color_title=(120, 45, 30),
# Background color
menu_color=(30, 50, 107),
dopause=False)
help_menu.add_option('Return to Menu', PYGAME_MENU_BACK)
for m in HELP:
help_menu.add_line(m)
# About menu
about_menu = pygameMenu.TextMenu(surface,
window_width=W_SIZE,
window_height=H_SIZE,
font=pygameMenu.fonts.FONT_NEVIS,
font_title=pygameMenu.fonts.FONT_8BIT,
title='About',
# Disable menu close (ESC button)
onclose=PYGAME_MENU_DISABLE_CLOSE,
text_fontsize=20,
font_size_title=30,
menu_color_title=COLOR_BLUE,
dopause=False)
about_menu.add_option('Return to Menu', PYGAME_MENU_BACK)
for m in ABOUT:
about_menu.add_line(m)
about_menu.add_line(TEXT_NEWLINE)
# Main menu, pauses execution of the application
menu = pygameMenu.Menu(surface,
window_width=W_SIZE,
window_height=H_SIZE,
font=pygameMenu.fonts.FONT_NEVIS,
title='Main Menu',
title_offsety=5,
menu_alpha=90,
enabled=False,
bgfun=mainmenu_background,
onclose=PYGAME_MENU_CLOSE)
menu.add_option(timer_menu.get_title(), timer_menu) # Add timer submenu
menu.add_option(help_menu.get_title(), help_menu) # Add help submenu
menu.add_option(about_menu.get_title(), about_menu) # Add about submenu
menu.add_option('Exit', PYGAME_MENU_EXIT) # Add exit function
# Main loop
while True:
# Tick
clock.tick(60)
timer[0] += dt
# Paint background
surface.fill(COLOR_BACKGROUND)
# Application events
events = pygame.event.get()
for event in events:
if event.type == QUIT:
exit()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
if menu.is_disabled():
menu.enable()
# Draw timer
time_string = str(datetime.timedelta(seconds=int(timer[0])))
time_blit = timer_font.render(time_string, 1, COLOR_WHITE)
time_blit_size = time_blit.get_size()
surface.blit(time_blit, (
W_SIZE / 2 - time_blit_size[0] / 2, H_SIZE / 2 - time_blit_size[1] / 2))
# Execute main from principal menu if is enabled
menu.mainloop(events)
# Flip surface
pygame.display.flip()
| 3.46875
| 3
|
parcv/ResumeParser.py
|
asimokby/cv-parser-huggingface
| 0
|
12775957
|
from tracemalloc import start
from matplotlib.pyplot import contour
from parcv.Models import Models
from datetime import datetime
from dateutil import parser
import re
from string import punctuation
from collections import Counter
import math
class ResumeParser:
def __init__(self, ner, ner_dates, zero_shot_classifier, tagger, qa_squad):
self.models = Models()
self.ner, self.ner_dates, self.zero_shot_classifier, self.tagger, self.qa_squad = ner, ner_dates, zero_shot_classifier, tagger, qa_squad
self.parsed_cv = {}
def parse(self, resume_segments):
for segment_name in resume_segments:
resume_segment = resume_segments[segment_name]
if segment_name == "contact_info":
self.new_parse_contact_info(resume_segment)
elif segment_name == "work_and_employment":
self.new_parse_job_history(resume_segment)
elif segment_name == "education_and_training":
self.parse_education_history(resume_segment)
elif segment_name == "skills":
self.parse_skills(resume_segment)
return self.parsed_cv
def parse_skills(self, resume_segment):
splitter = re.compile(r'[{}]+'.format(re.escape(punctuation)))
labels = ['technical skill', 'title', 'other']
skills = []
for item in resume_segment:
for elem in splitter.split(item):
elem_splitted = [i for i in elem.strip().split() if i and not i.isdigit() and i.isalpha()]
capitalized = all([True if i[0].isupper() else False for i in elem_splitted])
if capitalized and elem_splitted and len(elem_splitted) < 4:
candidate_skill = ' '.join(elem_splitted)
if self.belongs_to_label(candidate_skill, 'technical skill', labels):
skills.append(candidate_skill)
self.parsed_cv['Skills'] = skills
def parse_education_history(self, resume_segment):
self.parsed_cv["Education"] = []
education_info = []
questions = ["what is the university's or the school's name?", "what is the field of study?", "what is the qualification?"]
school_names = self.ask_till_stopping(resume_segment, questions[0], 'school name', 10)
school_names = sorted(school_names, key=lambda x: x[1][0])
majors = self.ask_till_stopping(resume_segment, questions[1], 'field of study', len(school_names))
qualifications = self.ask_till_stopping(resume_segment, questions[2], 'qualification', len(school_names))
major_on_right = True
qualification_on_right = True
for idx, school in enumerate(school_names):
education_item = {}
school_name, (idx1, idx2) = school
major, major_on_right = self.get_closest_item_to_school(majors, major_on_right, idx, idx1, idx2)
qualification, qualification_on_right = self.get_closest_item_to_school(qualifications, qualification_on_right, idx, idx1, idx2)
majors.remove(major)
qualifications.remove(qualification)
if major:
major = major[0]
if qualification:
qualification = qualification[0]
if "high school" in school_name.lower():
major, qualification = "", ""
education_item['School Name'] = school_name
education_item['Field of Study'] = major
education_item['Qualification'] = qualification
education_info.append(education_item)
self.parsed_cv["Education"] = education_info
def get_closest_item_to_school(self, items, right_position, idx, idx1, idx2):
closest_left = math.inf
closest_left_item = None
closest_right = math.inf
closest_right_item = None
for item in items:
st_idx, end_idx = item[1]
if end_idx <= idx1:
if idx1 - end_idx < closest_left:
closest_left = idx1 - end_idx
closest_left_item = item
elif st_idx >= idx2:
if st_idx - idx2 < closest_right:
closest_right = st_idx - idx2
closest_right_item = item
if idx == 0:
if closest_right < closest_left: right_position = True
else: right_position = False
if right_position:
if closest_right_item:
return closest_right_item, right_position
elif closest_left_item:
return closest_left_item, right_position
else:
if closest_left_item:
return closest_left_item, right_position
elif closest_right_item:
return closest_right_item, right_position
return "", right_position
def ask_till_stopping(self, resume_segment, question, category, limit):
labels = ['school name', 'field of study', 'degree', "location", "other"]
context = ' , '.join(resume_segment)
answer_idxs = []
if not context.strip(): return answer_idxs
while True:
qa_input = {'question': question, 'context': context}
out = self.qa_squad(qa_input)
start_idx, end_idx, answer = out['start'], out['end'], out['answer']
if not answer:
break
context = context.replace(context[start_idx:end_idx], "")
if not context.strip(): return answer_idxs
splitter = re.compile(r'[\s{}]+'.format(re.escape(punctuation)))
answer_splitted = splitter.split(answer)
answer_splitted = [i for i in answer_splitted if i and not i.isdigit() and i.isalpha() ]
capitalized = all([True if i[0].isupper() else False for i in answer_splitted])
if len(answer_splitted) > 2:
num_of_1 = sum([True if i[0].isupper() else False for i in answer_splitted])
capitalized = num_of_1 > len(answer_splitted)//2
if not capitalized:
break
else:
if category == 'school name':
if self.belongs_to_label(answer, category, labels):
answer_idxs.append([answer, (start_idx, end_idx)])
else:
answer_idxs.append([answer, (start_idx, end_idx)])
if len(answer_idxs) > limit:
break
return answer_idxs
def new_find_person_name(self, contact_info):
context = ' , '.join(contact_info)
qa_input = {'question': "What is the person's name?", 'context': context}
out = self.qa_squad(qa_input)
return out['answer']
def find_school_names(self, resume_segment):
labels = ["institution", "degree", "field of study"]
idx_line = []
for idx, line in enumerate(resume_segment):
splitter = re.compile(r'[\s{}]+'.format(re.escape(punctuation)))
answer_splitted = splitter.split(line)
answer_splitted = [i for i in answer_splitted if i and not i.isdigit() and i.isalpha() ]
capitalized = all([True if i[0].isupper() else False for i in answer_splitted])
if len(answer_splitted) > 2:
num_of_1 = sum([True if i[0].isupper() else False for i in answer_splitted])
capitalized = num_of_1 > len(answer_splitted)//2
if not capitalized: continue
qa_input = {'question': "What is the school's name?", 'context': line}
out = self.qa_squad(qa_input)
answer = out['answer']
if self.belongs_to_label(line, "school", labels):
if answer:
idx_line.append((idx, answer))
return idx_line
def find_job_titles(self, resume_segment):
labels = ["company", "institution", "job title", "details"]
idx_line = []
for idx, line in enumerate(resume_segment):
splitter = re.compile(r'[\s{}]+'.format(re.escape(punctuation)))
answer_splitted = splitter.split(line)
answer_splitted = [i for i in answer_splitted if i and not i.isdigit() and i.isalpha() ]
capitalized = all([True if i[0].isupper() else False for i in answer_splitted])
if len(answer_splitted) > 2:
num_of_1 = sum([True if i[0].isupper() else False for i in answer_splitted])
capitalized = num_of_1 > len(answer_splitted)//2
if not capitalized: continue
qa_input = {'question': "What is the job name?", 'context': line}
out = self.qa_squad(qa_input)
answer = out['answer']
if self.belongs_to_label(line, "job title", labels):
if answer:
idx_line.append((idx, answer))
return idx_line
def belongs_to_label(self, sequence, label, labels):
res = self.zero_shot_classifier(sequence, labels)
class_score = zip(res["labels"], res["scores"])
highest = sorted(class_score, key=lambda x: x[1])[-1]
if highest[0] == label:
return True
return False
def new_parse_contact_info(self, contact_info):
contact_info_dict = {}
name = self.new_find_person_name(contact_info)
email = self.find_contact_email(contact_info)
phone1, phone2 = self.find_phone_numbers(contact_info)
address = self.find_address(contact_info)
contact_info_dict["Email"] = email
contact_info_dict["phone1"] = phone1
contact_info_dict["phone2"] = phone2
contact_info_dict['address'] = address
self.parsed_cv['Name'] = name
self.parsed_cv['Contact Info'] = contact_info_dict
def find_phone_numbers(self, contact_info):
context = ' , '.join(contact_info)
qa_input = {'question': "What is the phone number?", 'context': context}
out = self.qa_squad(qa_input)
answer1 = out['answer']
context = context.replace(answer1, "")
qa_input = {'question': "What is the phone number?", 'context': context}
answer2 = self.qa_squad(qa_input)['answer']
count_nums = lambda x: len([i for i in x if i and i.isdigit()])
if count_nums(answer1) < 7:
answer1 = ""
if count_nums(answer2) < 7:
answer2 = ""
return answer1, answer2
def find_address(self, contact_info):
context = ' , '.join(contact_info)
qa_input = {'question': "What is the address?", 'context': context}
address = self.qa_squad(qa_input)['answer']
labels = ['address', 'email', 'phone number', 'other']
if self.belongs_to_label(address, "address",labels):
return address
else:
return ""
def parse_contact_info(self, contact_info):
contact_info_dict = {}
name = self.find_person_name(contact_info)
email = self.find_contact_email(contact_info)
self.parsed_cv['Name'] = name
contact_info_dict["Email"] = email
self.parsed_cv['Contact Info'] = contact_info_dict
def find_person_name(self, items):
class_score = []
splitter = re.compile(r'[{}]+'.format(re.escape(punctuation.replace("&", "") )))
classes = ["person name", "address", "email", "title"]
for item in items:
elements = splitter.split(item)
for element in elements:
element = ''.join(i for i in element.strip() if not i.isdigit())
if not len(element.strip().split()) > 1: continue
out = self.zero_shot_classifier(element, classes)
highest = sorted(zip(out["labels"], out["scores"]), key=lambda x: x[1])[-1]
if highest[0] == "person name":
class_score.append((element, highest[1]))
if len(class_score):
return sorted(class_score, key=lambda x: x[1], reverse=True)[0][0]
return ""
def find_contact_email(self, items):
for item in items:
match = re.search(r'[\w.+-]+@[\w-]+\.[\w.-]+', item)
if match:
return match.group(0)
return ""
def new_get_job_company(self, line1, line2, resume_segment):
context = resume_segment[line1]
if line2 <= len(resume_segment)-1:
context = context + " , " + resume_segment[line2]
qa_input = {'question': "What is the company's name?", 'context': context}
out = self.qa_squad(qa_input)
return out['answer']
def new_parse_job_history(self, resume_segment):
idx_job_title = self.find_job_titles(resume_segment)
current_and_below = False
if not len(idx_job_title):
self.parsed_cv["Job History"] = []
return
if idx_job_title[0][0] == 0: current_and_below = True
job_history = []
for ls_idx, (idx, job_title) in enumerate(idx_job_title):
job_info = {}
job_info["Job Title"] = job_title
# company
if current_and_below: line1, line2 = idx, idx+1
else: line1, line2 = idx, idx-1
job_info["Company"] = self.new_get_job_company(line1, line2, resume_segment)
if current_and_below: st_span = idx
else: st_span = idx-1
# Dates
if ls_idx == len(idx_job_title) - 1: end_span = len(resume_segment)
else: end_span = idx_job_title[ls_idx+1][0]
start, end = self.get_job_dates(st_span, end_span, resume_segment)
job_info["Start Date"] = start
job_info["End Date"] = end
job_history.append(job_info)
self.parsed_cv["Job History"] = job_history
def parse_job_history(self, resume_segment):
idx_job_title = self.get_job_titles(resume_segment)
current_and_below = False
if not len(idx_job_title):
self.parsed_cv["Job History"] = []
return
if idx_job_title[0][0] == 0: current_and_below = True
job_history = []
for ls_idx, (idx, job_title) in enumerate(idx_job_title):
job_info = {}
job_info["Job Title"] = self.filter_job_title(job_title)
# company
if current_and_below: line1, line2 = idx, idx+1
else: line1, line2 = idx, idx-1
job_info["Company"] = self.get_job_company(line1, line2, resume_segment)
if current_and_below: st_span = idx
else: st_span = idx-1
# Dates
if ls_idx == len(idx_job_title) - 1: end_span = len(resume_segment)
else: end_span = idx_job_title[ls_idx+1][0]
start, end = self.get_job_dates(st_span, end_span, resume_segment)
job_info["Start Date"] = start
job_info["End Date"] = end
job_history.append(job_info)
self.parsed_cv["Job History"] = job_history
def get_job_titles(self, resume_segment):
classes = ["organization", "institution", "company", "job title", "work details"]
idx_line = []
for idx, line in enumerate(resume_segment):
has_verb = False
line_modifed = ''.join(i for i in line if not i.isdigit())
sentence = self.models.get_flair_sentence(line_modifed)
self.tagger.predict(sentence)
tags = []
for entity in sentence.get_spans('pos'):
tags.append(entity.tag)
if entity.tag.startswith("V"):
has_verb = True
most_common_tag = max(set(tags), key=tags.count)
if most_common_tag == "NNP":
if not has_verb:
out = self.zero_shot_classifier(line, classes)
class_score = zip(out["labels"], out["scores"])
highest = sorted(class_score, key=lambda x: x[1])[-1]
if highest[0] == "job title":
idx_line.append((idx, line))
return idx_line
def get_job_dates(self, st, end, resume_segment):
search_span = resume_segment[st:end]
dates = []
for line in search_span:
for dt in self.get_ner_in_line(line, "DATE"):
if self.isvalidyear(dt.strip()):
dates.append(dt)
if len(dates): first = dates[0]
exists_second = False
if len(dates) > 1:
exists_second = True
second = dates[1]
if len(dates) > 0:
if self.has_two_dates(first):
d1, d2 = self.get_two_dates(first)
return self.format_date(d1), self.format_date(d2)
elif exists_second and self.has_two_dates(second):
d1, d2 = self.get_two_dates(second)
return self.format_date(d1), self.format_date(d2)
else:
if exists_second:
st = self.format_date(first)
end = self.format_date(second)
return st, end
else:
return (self.format_date(first), "")
else: return ("", "")
def filter_job_title(self, job_title):
job_title_splitter = re.compile(r'[{}]+'.format(re.escape(punctuation.replace("&", "") )))
job_title = ''.join(i for i in job_title if not i.isdigit())
tokens = job_title_splitter.split(job_title)
tokens = [''.join([i for i in tok.strip() if (i.isalpha() or i.strip()=="")]) for tok in tokens if tok.strip()]
classes = ["company", "organization", "institution", "job title", "responsibility", "details"]
new_title = []
for token in tokens:
if not token: continue
res = self.zero_shot_classifier(token, classes)
class_score = zip(res["labels"], res["scores"])
highest = sorted(class_score, key=lambda x: x[1])[-1]
if highest[0] == "job title":
new_title.append(token.strip())
if len(new_title):
return ', '.join(new_title)
else: return ', '.join(tokens)
def has_two_dates(self, date):
years = self.get_valid_years()
count = 0
for year in years:
if year in str(date):
count+=1
return count == 2
def get_two_dates(self, date):
years = self.get_valid_years()
idxs = []
for year in years:
if year in date:
idxs.append(date.index(year))
min_idx = min(idxs)
first = date[:min_idx+4]
second = date[min_idx+4:]
return first, second
def get_valid_years(self):
current_year = datetime.today().year
years = [str(i) for i in range(current_year-100, current_year)]
return years
def format_date(self, date):
out = self.parse_date(date)
if out:
return out
else:
date = self.clean_date(date)
out = self.parse_date(date)
if out:
return out
else:
return date
def clean_date(self, date):
try:
date = ''.join(i for i in date if i.isalnum() or i =='-' or i == '/')
return date
except:
return date
def parse_date(self, date):
try:
date = parser.parse(date)
return date.strftime("%m-%Y")
except:
try:
date = datetime(date)
return date.strftime("%m-%Y")
except:
return 0
def isvalidyear(self, date):
current_year = datetime.today().year
years = [str(i) for i in range(current_year-100, current_year)]
for year in years:
if year in str(date):
return True
return False
def get_ner_in_line(self, line, entity_type):
if entity_type == "DATE": ner = self.ner_dates
else: ner = self.ner
return [i['word'] for i in ner(line) if i['entity_group'] == entity_type]
def get_job_company(self, idx, idx1, resume_segment):
job_title = resume_segment[idx]
if not idx1 <= len(resume_segment)-1: context = ""
else:context = resume_segment[idx1]
candidate_companies = self.get_ner_in_line(job_title, "ORG") + self.get_ner_in_line(context, "ORG")
classes = ["organization", "company", "institution", "not organization", "not company", "not institution"]
scores = []
for comp in candidate_companies:
res = self.zero_shot_classifier(comp, classes)['scores']
scores.append(max(res[:3]))
sorted_cmps = sorted(zip(candidate_companies, scores), key=lambda x: x[1], reverse=True)
if len(sorted_cmps): return sorted_cmps[0][0]
return context
| 2.40625
| 2
|
clearData.py
|
thesociallions/geotweetclusters
| 0
|
12775958
|
<reponame>thesociallions/geotweetclusters
# clearData.py
# <NAME> (s2497867)
# <NAME> (s2580861)
import pickle
import sys
def main():
tweets = {}
for line in sys.stdin:
datalist = line.rstrip().split(' ')
tweetID = datalist[0]
tweetUser = datalist[1]
tweetText = datalist[2]
try:
tweetGEO = datalist[3]
Province = tweetGEO.split(', ')[1]
search = tweets.get(Province,None)
if search is None:
tweets[Province] = [(tweetID, tweetUser, tweetText, tweetGEO)]
else:
search.append((tweetID, tweetUser, tweetText, tweetGEO))
tweets[Province] = search
except:
dotnothing = "dotnothing"
with open('tweets.pickle','wb') as f:
pickle.dump(tweets,f)
if __name__ == "__main__":
main()
| 2.8125
| 3
|
airnetSNL/dataset/rand_shapes.py
|
dennis-j-lee/AirNet-SNL
| 1
|
12775959
|
<gh_stars>1-10
import numpy as np
from skimage.draw import random_shapes
import torch
from torch.utils.data import Dataset
from torch_radon import Radon
class RandomShapeDataset(Dataset):
"""
Generate random shapes for training and testing.
Args:
* imgSize (int): Number of rows / cols in image
* maxShapes (int): Number of shapes in image
* nImg (int): Number of images in dataset
* angles (torch.tensor): View angles
* idxOffset (int): Seed for random shape generation
* scaleFactor (int): Scale of pixel values
"""
def __init__(self,
angles: np.array,
imgSize: int = 128,
maxShapes: int = 10,
nImg: int = 260,
idxOffset: int = 0,
scaleFactor: int = 1000):
self.imgSize = imgSize
self.maxShapes = maxShapes
self.radon = Radon(imgSize,
angles,
clip_to_circle=True,
det_count=imgSize)
self.nImg = nImg
self.angles = angles
self.idxOffset = idxOffset
self.scaleFactor = scaleFactor
def __len__(self):
return self.nImg
def __getitem__(self, idx):
"""
Args:
idx (int): Index number of image to generate
Returns:
sinogram (torch.tensor): Array of shape (nPixels, nViews)
img (torch.tensor): Array of shape (nRows, nCols)
"""
if torch.is_tensor(idx):
idx = idx.tolist()
if idx >= self.nImg:
raise ValueError(f'Exceeded {self.nImg} images')
seed_idx = idx + self.idxOffset
img, _ = random_shapes((self.imgSize, self.imgSize),
max_shapes=self.maxShapes,
shape=None,
multichannel=False,
random_seed=seed_idx,
allow_overlap=False)
img = torch.tensor(img).unsqueeze(0).unsqueeze(0)
img = img.type(torch.cuda.FloatTensor)
img = invertAndZero(img, invert=True)
if torch.max(img) != 0:
img = img / torch.max(img) * self.scaleFactor
sinogram = self.radon.forward(img)
return sinogram.squeeze(0).type(torch.cuda.FloatTensor), \
img.squeeze(0).type(torch.cuda.FloatTensor)
def invertAndZero(img, invert=True):
'''
Calculate (1 - img), assuming image values in [0, 1].
Zero out the image outside of an inscribed circle.
'''
dtype = torch.cuda.FloatTensor
TENSOR_SCALE_FACTOR = 255
n = img.shape[2]
if n % 2 == 0:
begin = -n // 2
end = n // 2 - 1
else:
begin = -(n - 1) // 2
end = (n - 1) // 2
mask = -1 * torch.ones(img.shape).type(dtype)
x = torch.arange(begin, end + 1).type(dtype)
X1, X2 = torch.meshgrid(x, x)
X1 = X1.float()
X2 = X2.float()
distance = torch.sqrt(X1 ** 2 + X2 ** 2)
distance[distance > end] = -1
nSamples = img.shape[0]
for ss in range(nSamples):
mask[ss, 0, :, :] = distance
if invert:
zeroed = TENSOR_SCALE_FACTOR - img
else:
zeroed = img
zeroed_tensor = zeroed.masked_fill(mask == -1, 0)
return zeroed_tensor
| 2.5625
| 3
|
Python/interview/review/MeanMedianDataStruct.py
|
darrencheng0817/AlgorithmLearning
| 2
|
12775960
|
<filename>Python/interview/review/MeanMedianDataStruct.py
'''
Created on 2016年2月29日
@author: Darren
'''
class My_DS:
def __init__(self):
self.sum=0
self.count=0
self.data=[0]*1001
def add(self,num):
self.sum+=num
self.count+=1
self.data[num]+=1
def get_mean(self):
if self.count==0:
return "Empty"
return self.sum/self.count
def get_median(self):
if self.count==0:
return "Empty"
num1,num2=-1,-1
count,index=0,0
while index<len(self.data):
count+=self.data[index]
if count>=self.count//2 and num1==-1:
num1=index
if count>=self.count//2+1 and num2==-1:
num2=index
if num1!=-1 and num2!=-1:
break
index+=1
if self.count&1==0:
return (num1+num2)/2
else:
return num2
from heapq import *
class MedianFinder:
def __init__(self):
self.heaps = [], []
def addNum(self, num):
small, large = self.heaps
heappush(small, -heappushpop(large, num))
if len(large) < len(small):
heappush(large, -heappop(small))
def findMedian(self):
small, large = self.heaps
if len(large) > len(small):
return float(large[0])
return (large[0] - small[0]) / 2.0
nums=[5,15,1,3]
so=My_DS()
for index,num in enumerate(nums):
so.add(num)
print(index)
print("mean",so.get_mean())
print("median",so.get_median())
| 3.25
| 3
|
example_images.py
|
BerenMillidge/Theory_Associative_Memory
| 3
|
12775961
|
# quick scripts to generate example images for figures
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import numpy as np
from functions import *
from data import *
from copy import deepcopy
import pickle
def plot_threshold_value_examples(savename):
with open(savename, 'rb') as handle:
data = pickle.load(handle)
print("LOADED")
images = data["images"]
reconstructions = data["reconstructions"]
sqdiffs = data["sqdiffs"]
print(images.shape)
print(reconstructions.shape)
print(sqdiffs.shape)
# get buckets
bucket_edges = [1,10,25,50,75,100,200,500]
bucket_elements = [[] for i in range(len(bucket_edges))]
for i,sqdiff in enumerate(sqdiffs):
# first 0th bucket
if sqdiff >=bucket_edges[0] and sqdiff <= bucket_edges[1]:
bucket_elements[0].append(i)
for j in range(len(bucket_edges)-2):
j = j+1
if sqdiff >= bucket_edges[j] and sqdiff <= bucket_edges[j+1]:
bucket_elements[j].append(i)
# final bucket
if sqdiff > bucket_edges[-1]:
bucket_elements[-1].append(i)
for b in bucket_elements:
print(len(b))
first_indices = [bucket_elements[i][0] for i in range(len(bucket_elements))]
#print(first_indices)
#setup figure
nrow = 2
ncol = len(bucket_elements)
fig, ax_array = plt.subplots(nrow, ncol, figsize=(ncol+1,nrow+1), gridspec_kw = {'wspace':0, 'hspace':0, 'top':1.-0.5/(nrow+1), 'bottom': 0.5/(nrow+1), 'left': 0.5/(ncol+1), 'right' :1-0.5/(ncol+1)})
for i,ax_row in enumerate(ax_array):
for j,axes in enumerate(ax_row):
idx = first_indices[j]
if i == 0:
axes.imshow(images[idx].transpose(1,2,0))
if i == 1:
axes.imshow(reconstructions[idx])
#axes.set_aspect("auto")
axes.set_yticklabels([])
axes.set_xticklabels([])
axes.set_xticks([])
axes.set_yticks([])
#fig.suptitle("Cifar10 Fraction Masked")
fig.subplots_adjust(wspace=0, hspace=0)
plt.subplots_adjust(wspace=0, hspace=0)
#plt.tight_layout()
plt.savefig("example_images/threshold_examples_cifar10.jpg", format="jpeg",bbox_inches = "tight", pad_inches = 0)
plt.show()
if __name__ == '__main__':
trainset_cifar, testset_cifar = get_cifar10(10000)
imgs = trainset_cifar[0][0]
print(imgs.shape)
for i in range(5):
fig = plt.figure()
plt.imshow(imgs[i].reshape(3,32,32).permute(1,2,0))
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig("example_images/img_3_" + str(i) + ".jpg")
plt.show()
# query img
img = imgs[0]
print(img.shape)
img = img.reshape(32 * 32 * 3)
halved = halve_continuous_img(img)
print(halved.shape)
fig = plt.figure()
plt.imshow(halved.permute(1,2,0))
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig("example_images/query_img_3" + str(i) + ".jpg")
plt.show()
plot_threshold_value_examples("example_reconstructions_thresholds_saved_3")
| 2.515625
| 3
|
savman/cli.py
|
stratts/savman
| 0
|
12775962
|
'''A utility for backing up and restoring saved games.
Usage:
savman list [--backups]
savman scan [--nocache]
savman update
savman load <directory>
savman backup <directory> [<game>] [options]
savman restore <game> [<directory>] [options]
savman -h | --help
Commands:
list Show a list of games or backups
scan Perform a scan for games
update Check for a database update
load Load backups from directory
backup Backup all games to directory, or single game if specified
restore Restore game to either save location or specified directory
Options:
-h --help Display this screen
--scan Perform a scan for games
--nocache Scan without cache, slower but can find games the
regular scan missed
--update Check for database update
--max <count> Maximum number of versions to keep (default: 10)
--min <count> Number of versions to trim to when max is exceeded (default: 5)
--source <num> Game location to restore or backup from
--target <num> Game location to restore to
'''
from savman import databaseman, gameman, datapath, __version__
import sys
import os
import logging
import time
from docopt import docopt
from threading import Thread
def run():
print('savman', __version__)
if '--debug' in sys.argv:
sys.argv.remove('--debug')
log = logging.getLogger()
log.setLevel(logging.DEBUG)
args = docopt(__doc__, version='savman {}'.format(__version__))
if args['backup'] and args['<directory>'] and not os.path.isdir(args['<directory>']):
try:
os.mkdir(args['<directory>'])
except FileNotFoundError:
path = os.path.normpath(args['<directory>'])
parser.error("Could not create '{}' as directory '{}' does not exist".format(
path, os.path.dirname(path)
))
sys.exit(1)
dbman = databaseman.Manager()
dbman.latesturl = 'http://strata.me/latestdb.json'
dbname = datapath('gamedata')
#dbname = datapath('dummydataz')
if not os.path.isfile(dbname) and hasattr(sys, 'frozen'):
shutil.copy(os.path.join(sys._MEIPASS, 'gamedata'), dbname)
dbman.load(dbname)
if args['update'] or args['--update']: dbman.check_update()
if dbman.update: dbman.download(dbname)
gman = gameman.GameMan(dbman.db)
gman.cachefile = datapath('cache')
gman.customfile = datapath('custom.txt')
gman.load_custom()
gman.load_cache(dircache=not args['--nocache'], cleargames=dbman.update)
# Clear cache and rescan when database updated
if args['scan'] or args['--scan'] or dbman.update: gman.find_games()
if args['load']:
gman.load_backups(args['<directory>'])
if args['restore']:
try:
gman.restore_game(args['<game>'], args['<directory>'], args['--source'],
args['--target'])
except gameman.InvalidIdError as e:
logging.error("Could not restore '{}': {}".format(args['<game>'], e))
sys.exit(1)
gman.save_cache()
if args['list'] and gman.games:
maxname = max([len(game.name) for game in gman.games.values()])
maxid = max([len(game.id) for game in gman.games.values()])
print('\nName', ' '*(maxname-4), 'ID', ' '*(maxid-2), 'Locations')
print('-'*(maxname)+' ', '-'*(maxid)+' ', '-'*(maxid))
for item, data in sorted(gman.games.items()):
locnum = len(data.locations)
for index, location in enumerate(data.locations):
#bak = Backup()
#bak.build(location.path, location.include, location.exclude)
#size = bak.curver.size/1000
#if size < 1000: sizet = ' ({} KB)'.format(round(size))
#else: sizet = ' ({} MB)'.format(round((size/1000), 1))
namelen = len(data.name)
idlen = len(data.id)
if locnum > 1: prefix = '[{}] '.format(index+1)
else: prefix = ''
if index == 0:
print(data.name, ' '*((maxname-namelen)+2), data.id,
' '*((maxid-idlen)+2), prefix, location.path, sep='')
else: print(' '*(maxname+2), ' '*(maxid+2), prefix, location.path, sep='')
#print('*', location.path, sizet)
print('\n{} games in total.\n'.format(len(gman.games)))
if args['backup'] and args['<directory>']:
if args['<game>']: game = [args['<game>']]
else: game = None
minver = 5
maxver = 10
try:
if args['--min']: minver = int(args['--min'])
if args['--max']: maxver = int(args['--max'])
except ValueError:
logging.error("Argument for '--max' and '--min' must be a number")
sys.exit(1)
if minver >= maxver:
logging.error("Value for '--min' must be under '--max' (min: {}, max: {})".format(
minver, maxver
))
sys.exit(1)
gman.backup_games(args['<directory>'], games=game, trim_min=minver, trim_max=maxver)
logging.info('Finished!')
| 2.921875
| 3
|
bsm/operation/detect_package.py
|
bsmsoft/bsm
| 3
|
12775963
|
from bsm.config.util import detect_package
from bsm.operation import Base
class DetectPackage(Base):
def execute(self, directory):
return detect_package(directory, self._config['package_runtime'])
| 1.960938
| 2
|
faker/providers/person/vi_VN/__init__.py
|
nkthanh98/faker
| 0
|
12775964
|
<gh_stars>0
# coding=utf-8
from __future__ import unicode_literals
from collections import OrderedDict
from .. import Provider as PersonProvider
class Provider(PersonProvider): # Data from https://github.com/duyetdev/vietnamese-namedb
"""Provider for Vietnamese person generator"""
formats_female = OrderedDict((
('{{first_name_female}} {{mid_name_female}} {{last_name_female}}',
0.75),
('{{first_name_female}} {{last_name_female}}', 0.25),
))
formats_male = OrderedDict((
('{{first_name_male}} {{mid_name_male}} {{last_name_male}}', 0.75),
('{{first_name_male}} {{last_name_male}}', 0.25),
))
formats = formats_male.copy()
formats.update(formats_female)
first_names_female = OrderedDict((
('Quỳnh', 0.017834394904458598),
('Quân', 0.09426751592356689),
('Phượng', 0.006369426751592357),
('Samuel', 0.0012738853503184713),
('Sang', 0.06369426751592357),
('Quý', 0.033121019108280254),
('Quyền', 0.01910828025477707),
('Sơn', 0.14394904458598726),
('Qúy', 0.0025477707006369425),
('Phương', 0.11464968152866242),
('Quốc', 0.03949044585987261),
('Quyên', 0.008917197452229299),
('Sanh', 0.0025477707006369425),
('Quang', 0.0840764331210191),
('Sĩ', 0.008917197452229299),
('Tâm', 0.11847133757961784),
('Tân', 0.07133757961783439),
('Thái', 0.06751592356687898),
('Tài', 0.09426751592356689),
('Sa', 0.0025477707006369425),
('Sương', 0.005095541401273885),
('Tấn', 0.01910828025477707),
('Thân', 0.007643312101910828),
('Thanh', 0.08025477707006369),
('Thành', 0.13885350318471337),
('Thắng', 0.11210191082802548),
('Thảo', 0.06624203821656051),
('Thiện', 0.06496815286624204),
('Thạnh', 0.005095541401273885),
('Thi', 0.03949044585987261),
('Thao', 0.005095541401273885),
('Thế', 0.005095541401273885),
('Thịnh', 0.10955414012738854),
('Thông', 0.05859872611464968),
('Thọ', 0.02802547770700637),
('Thư', 0.016560509554140127),
('Thu', 0.01019108280254777),
('Khiêm', 0.017834394904458598),
('Sinh', 0.012738853503184714),
('Thuận', 0.048407643312101914),
('Thuý', 0.005095541401273885),
('Trường', 0.07261146496815286),
('Thưởng', 0.008917197452229299),
('Dö', 0.0012738853503184713),
('Thuần', 0.007643312101910828),
('An', 0.09044585987261146),
('Hợp', 0.005095541401273885),
('Hoàng', 0.17452229299363056),
('Huy', 0.24076433121019108),
('Anh', 0.25605095541401274),
('Thức', 0.015286624203821656),
('Duyên', 0.014012738853503185),
('Chức', 0.0025477707006369425),
('Ân', 0.030573248407643312),
('Bảo', 0.08789808917197452),
('Ảnh', 0.0012738853503184713),
('Bằng', 0.016560509554140127),
('Bình', 0.07898089171974522),
('Chánh', 0.01019108280254777),
('Biên', 0.003821656050955414),
('Biển', 0.005095541401273885),
('Châu', 0.03184713375796178),
('Ca', 0.0012738853503184713),
('Chi', 0.011464968152866241),
('Chinh', 0.017834394904458598),
('Chiến', 0.02038216560509554),
('Chương', 0.025477707006369428),
('Công', 0.044585987261146494),
('Cương', 0.012738853503184714),
('Cúc', 0.003821656050955414),
('Đại', 0.044585987261146494),
('Cường', 0.14904458598726114),
('Danh', 0.045859872611464965),
('Đạt', 0.15159235668789808),
('Dung', 0.035668789808917196),
('Đoàn', 0.008917197452229299),
('Đức', 0.1197452229299363),
('Điền', 0.01019108280254777),
('Định', 0.024203821656050957),
('Giang', 0.04713375796178344),
('Dũng', 0.1554140127388535),
('Điệp', 0.01019108280254777),
('Đông', 0.022929936305732482),
('Hải', 0.13630573248407643),
('Hạnh', 0.02802547770700637),
('Dương', 0.07261146496815286),
('Hiếu', 0.1554140127388535),
('Hân', 0.015286624203821656),
('Hào', 0.015286624203821656),
('Hàm', 0.0012738853503184713),
('Hà', 0.05605095541401274),
('Hanh', 0.0012738853503184713),
('Hòa', 0.04331210191082802),
('Hiển', 0.02802547770700637),
('Hóa', 0.006369426751592357),
('Hiến', 0.007643312101910828),
('Hoàn', 0.01910828025477707),
('Hiệp', 0.045859872611464965),
('Hoa', 0.011464968152866241),
('Hiệu', 0.007643312101910828),
('Hổ', 0.005095541401273885),
('Huân', 0.011464968152866241),
('Hoạch', 0.0012738853503184713),
('Hội', 0.005095541401273885),
('Huệ', 0.006369426751592357),
('Hùng', 0.1464968152866242),
('Hưng', 0.09681528662420383),
('Kha', 0.033121019108280254),
('Hương', 0.024203821656050957),
('Khánh', 0.09554140127388536),
('Khang', 0.03821656050955414),
('Khương', 0.02038216560509554),
('Kiệt', 0.02929936305732484),
('Khoa', 0.10828025477707007),
('Lâm', 0.07770700636942675),
('Lập', 0.016560509554140127),
('Lai', 0.005095541401273885),
('Linh', 0.12356687898089172),
('Kiều', 0.007643312101910828),
('Long', 0.13248407643312102),
('Loan', 0.022929936305732482),
('Lin', 0.0012738853503184713),
('Lợi', 0.030573248407643312),
('Lê', 0.008917197452229299),
('Luân', 0.05605095541401274),
('Liêm', 0.015286624203821656),
('Lộc', 0.07133757961783439),
('Lục', 0.0012738853503184713),
('Mạnh', 0.049681528662420385),
('Minh', 0.1592356687898089),
('Khải', 0.02929936305732484),
('Mai', 0.02165605095541401),
('Mẫn', 0.017834394904458598),
('Mỹ', 0.015286624203821656),
('Lưu', 0.011464968152866241),
('Nam', 0.17452229299363056),
('Ngân', 0.045859872611464965),
('Nghĩa', 0.0840764331210191),
('Nghi', 0.007643312101910828),
('Phát', 0.06496815286624204),
('Nghiệp', 0.006369426751592357),
('Nghị', 0.007643312101910828),
('Nhã', 0.014012738853503185),
('Nhựt', 0.022929936305732482),
('Phong', 0.08789808917197452),
('Nguyên', 0.12101910828025478),
('Phúc', 0.11464968152866242),
('Phi', 0.03184713375796178),
('Nhân', 0.0840764331210191),
('Nhật', 0.03949044585987261),
('Nhu', 0.0025477707006369425),
('Phước', 0.05477707006369427),
('Phú', 0.06751592356687898),
('Nhàn', 0.007643312101910828),
('Duy', 0.18980891719745224),
('Duyệt', 0.01019108280254777),
('Hiền', 0.04203821656050955),
('Hi', 0.0012738853503184713),
('Hoà', 0.008917197452229299),
('Hoài', 0.01910828025477707),
('Hậu', 0.03949044585987261),
('Hữu', 0.011464968152866241),
('Hường', 0.008917197452229299),
('Huỳnh', 0.007643312101910828),
('Huyền', 0.022929936305732482),
('Kiêm', 0.0012738853503184713),
('Khôi', 0.025477707006369428),
('Khuyên', 0.003821656050955414),
('Kiên', 0.034394904458598725),
('Kỵ', 0.0012738853503184713),
('Kim', 0.01019108280254777),
('Kin', 0.0012738853503184713),
('Lân', 0.012738853503184714),
('La', 0.0012738853503184713),
('Luật', 0.008917197452229299),
('Lan', 0.007643312101910828),
('Lý', 0.012738853503184714),
('Mãn', 0.0012738853503184713),
('Mùi', 0.0012738853503184713),
('Ngọc', 0.06369426751592357),
('Nguyệt', 0.005095541401273885),
('Nhẫn', 0.006369426751592357),
('Nhơn', 0.008917197452229299),
('Như', 0.015286624203821656),
('Pha', 0.0025477707006369425),
('Đăng', 0.044585987261146494),
('Vân', 0.02038216560509554),
('Ý', 0.007643312101910828),
('Em', 0.007643312101910828),
('Tuấn', 0.2229299363057325),
('Tính', 0.015286624203821656),
('Trung', 0.1375796178343949),
('Vi', 0.005095541401273885),
('Kỷ', 0.0012738853503184713),
('Chính', 0.014012738853503185),
('Đồng', 0.01019108280254777),
('Duân', 0.0025477707006369425),
('Hitachi', 0.0012738853503184713),
('Khả', 0.0025477707006369425),
('Hoan', 0.007643312101910828),
('Lĩnh', 0.0012738853503184713),
('Nguyện', 0.005095541401273885),
('Ninh', 0.01019108280254777),
('Tùng', 0.14522292993630573),
('Quy', 0.003821656050955414),
('Tú', 0.0751592356687898),
('Vũ', 0.12484076433121019),
('Thuật', 0.003821656050955414),
('Văn', 0.0267515923566879),
('Đan', 0.003821656050955414),
('Vương', 0.048407643312101914),
('Việt', 0.08280254777070063),
('Viên', 0.008917197452229299),
('Tiến', 0.11719745222929936),
('Chỉnh', 0.0025477707006369425),
('Bích', 0.005095541401273885),
('Tín', 0.06624203821656051),
('Tin', 0.0025477707006369425),
('Thùy', 0.011464968152866241),
('Tịnh', 0.008917197452229299),
('Thúy', 0.014012738853503185),
('Toàn', 0.08789808917197452),
('Toan', 0.0025477707006369425),
('Trí', 0.0929936305732484),
('Tri', 0.003821656050955414),
('Trà', 0.006369426751592357),
('Toản', 0.01019108280254777),
('Trọng', 0.025477707006369428),
('Trực', 0.015286624203821656),
('Trình', 0.012738853503184714),
('Trữ', 0.0012738853503184713),
('Triết', 0.01019108280254777),
('Tòng', 0.0025477707006369425),
('Vinh', 0.09936305732484077),
('Tưởng', 0.006369426751592357),
('Tường', 0.015286624203821656),
('Tuyển', 0.007643312101910828),
('Student', 0.005095541401273885),
('Vy', 0.008917197452229299),
('Vượng', 0.014012738853503185),
('Trang', 0.04203821656050955),
('Vỉnh', 0.0012738853503184713),
('Vỹ', 0.0025477707006369425),
('Doanh', 0.005095541401273885),
('Đắc', 0.003821656050955414),
('Đôn', 0.003821656050955414),
('Giáp', 0.008917197452229299),
('Hành', 0.0012738853503184713),
('Ất', 0.003821656050955414),
('Hinh', 0.0025477707006369425),
('Khanh', 0.016560509554140127),
('Thương', 0.033121019108280254),
('Tầy', 0.0012738853503184713),
('Thơ', 0.006369426751592357),
('Thổ', 0.0012738853503184713),
('Tuyễn', 0.0025477707006369425),
('Trân', 0.007643312101910828),
('Triều', 0.02038216560509554),
('Trúc', 0.011464968152866241),
('Tuân', 0.015286624203821656),
('Thủy', 0.016560509554140127),
('Uy', 0.007643312101910828),
('Hảo', 0.01019108280254777),
('Viễn', 0.01019108280254777),
('Khueâ', 0.0012738853503184713),
('Taâm', 0.0025477707006369425),
('Taân', 0.0012738853503184713),
('Âu', 0.005095541401273885),
('Bé', 0.003821656050955414),
('Bốn', 0.0012738853503184713),
('Chung', 0.02165605095541401),
('Diễm', 0.005095541401273885),
('Diệu', 0.007643312101910828),
('Du', 0.008917197452229299),
('Điển', 0.0012738853503184713),
('Đạo', 0.006369426751592357),
('Đẩu', 0.0012738853503184713),
('Đường', 0.0025477707006369425),
('Lăng', 0.0025477707006369425),
('Đô', 0.0025477707006369425),
('Lãm', 0.0025477707006369425),
('Lực', 0.014012738853503185),
('Ni', 0.0025477707006369425),
('Oanh', 0.008917197452229299),
('Ngôn', 0.0025477707006369425),
('Nhung', 0.01019108280254777),
('Nhiên', 0.0025477707006369425),
('Pháp', 0.005095541401273885),
('Thuyên', 0.0025477707006369425),
('My', 0.014012738853503185),
('Thoa', 0.007643312101910828),
('Thạch', 0.014012738853503185),
('Thào', 0.0012738853503184713),
('Thiên', 0.02165605095541401),
('Kiển', 0.0012738853503184713),
('Tiệp', 0.008917197452229299),
('Vui', 0.0025477707006369425),
('Ánh', 0.016560509554140127),
('Cảnh', 0.02165605095541401),
('Xuyên', 0.0025477707006369425),
('Ăn', 0.0012738853503184713),
('Cân', 0.0025477707006369425),
('Chí', 0.005095541401273885),
('Cơ', 0.0012738853503184713),
('Dân', 0.003821656050955414),
('Di', 0.0012738853503184713),
('Khuê', 0.007643312101910828),
('Do', 0.0012738853503184713),
('Diệp', 0.007643312101910828),
('Kỳ', 0.007643312101910828),
('Qua', 0.0012738853503184713),
('ThĂ´ng', 0.0012738853503184713),
('Ru', 0.0012738853503184713),
('Thiều', 0.0025477707006369425),
('Trai', 0.0025477707006369425),
('Tuyền', 0.01019108280254777),
('Thiệu', 0.005095541401273885),
('Thực', 0.0025477707006369425),
('Mi', 0.0025477707006369425),
('Hướng', 0.005095541401273885),
('Phục', 0.005095541401273885),
('Phụng', 0.014012738853503185),
('Quyết', 0.017834394904458598),
('Phứơc', 0.0012738853503184713),
('Quất', 0.0012738853503184713),
('Sử', 0.0025477707006369425),
('Sỹ', 0.011464968152866241),
('Toán', 0.003821656050955414),
('Tiên', 0.017834394904458598),
('Tiền', 0.003821656050955414),
('Tình', 0.005095541401273885),
('Sầu', 0.0025477707006369425),
('Triễn', 0.0012738853503184713),
('Sự', 0.0025477707006369425),
('Thể', 0.003821656050955414),
('Thìn', 0.005095541401273885),
('Thống', 0.007643312101910828),
('Thường', 0.005095541401273885),
('Toại', 0.0012738853503184713),
('Sáng', 0.007643312101910828),
('Huyên', 0.0012738853503184713),
('Nông', 0.0025477707006369425),
('Nương', 0.005095541401273885),
('Chiểu', 0.0025477707006369425),
('Nưng', 0.0012738853503184713),
('Truùc', 0.0012738853503184713),
('Döông', 0.003821656050955414),
('Dieäu', 0.0012738853503184713),
('Ñoàng', 0.0012738853503184713),
('Ñoä', 0.0012738853503184713),
('Khaùnh', 0.006369426751592357),
('Cöôøng', 0.0025477707006369425),
('Hoaønh', 0.0012738853503184713),
('Höõu', 0.0012738853503184713),
('Huøng', 0.0025477707006369425),
('Haäu', 0.0012738853503184713),
('Giaõ', 0.0012738853503184713),
('KHoài', 0.0012738853503184713),
('Hieàn', 0.0012738853503184713),
('Kieân', 0.0012738853503184713),
('Laâm', 0.0012738853503184713),
('Laäp', 0.0012738853503184713),
('Laøo', 0.0012738853503184713),
('Luaân', 0.0012738853503184713),
('Löông', 0.0012738853503184713),
('Nguyeân', 0.0012738853503184713),
('Löûa', 0.0012738853503184713),
('Ngoan', 0.005095541401273885),
('Löïc', 0.0012738853503184713),
('Nghóa', 0.0012738853503184713),
('Nga', 0.025477707006369428),
('Ngaân', 0.0012738853503184713),
('Nguyeän', 0.0012738853503184713),
('Phuïc', 0.0012738853503184713),
('Myõ', 0.0012738853503184713),
('Nghò', 0.0012738853503184713),
('Saét', 0.0012738853503184713),
('Ngon', 0.0012738853503184713),
('Phùc', 0.0025477707006369425),
('Maïnh', 0.0012738853503184713),
('Sôn', 0.0012738853503184713),
('Só', 0.0012738853503184713),
('Teøo', 0.0012738853503184713),
('Thaéng', 0.0012738853503184713),
('Quaân', 0.0012738853503184713),
('Quí', 0.006369426751592357),
('Thö', 0.0012738853503184713),
('Thöông', 0.0012738853503184713),
('Trị', 0.0012738853503184713),
('Tư', 0.0025477707006369425),
('Vĩ', 0.012738853503184714),
('Xuyến', 0.003821656050955414),
('Giao', 0.0025477707006369425),
('Song', 0.003821656050955414),
('Đủ', 0.0025477707006369425),
('Giới', 0.0012738853503184713),
('Trieàu', 0.0012738853503184713),
('Tuaân', 0.0012738853503184713),
('Tuaán', 0.0025477707006369425),
('Vuõ', 0.0025477707006369425),
('UÙt', 0.0025477707006369425),
('Tuyeån', 0.0012738853503184713),
('Vieät', 0.0012738853503184713),
('Chiêu', 0.0012738853503184713),
('Cao', 0.0025477707006369425),
('Dự', 0.005095541401273885),
('Đảo', 0.0025477707006369425),
('Đình', 0.003821656050955414),
('Thuỷ', 0.005095541401273885),
('Uyên', 0.01019108280254777),
('Xuân', 0.016560509554140127),
('Yến', 0.022929936305732482),
('Cát', 0.0012738853503184713),
('Hằng', 0.02165605095541401),
('Thăng', 0.005095541401273885),
('Ước', 0.0012738853503184713),
('Trâm', 0.015286624203821656),
('Tháp', 0.0012738853503184713),
('Mến', 0.003821656050955414),
('Niệm', 0.0012738853503184713),
('Nhuận', 0.005095541401273885),
('Qui', 0.0012738853503184713),
('Tuyên', 0.011464968152866241),
('Trứ', 0.0012738853503184713),
('Tự', 0.006369426751592357),
('Tước', 0.0012738853503184713),
('Viển', 0.0012738853503184713),
('Pô', 0.0012738853503184713),
('Giảng', 0.0012738853503184713),
('Hàng', 0.0012738853503184713),
('Chuẩn', 0.003821656050955414),
('Dạng', 0.0012738853503184713),
('Kết', 0.0025477707006369425),
('Ba', 0.003821656050955414),
('Vàng', 0.0012738853503184713),
('Binh', 0.0012738853503184713),
('Bách', 0.005095541401273885),
('Duẫn', 0.0012738853503184713),
('Dy', 0.0012738853503184713),
('Huynh', 0.003821656050955414),
('Huấn', 0.01019108280254777),
('Hông', 0.0012738853503184713),
('Háťng', 0.0012738853503184713),
('Hưởng', 0.006369426751592357),
('Luyện', 0.0025477707006369425),
('Nghệ', 0.0012738853503184713),
('Nghịch', 0.0012738853503184713),
('Ngọ', 0.0025477707006369425),
('Nguyễn', 0.003821656050955414),
('Nhi', 0.022929936305732482),
('Thừa', 0.0012738853503184713),
('Cẩm', 0.0025477707006369425),
('Kông', 0.0025477707006369425),
('Lương', 0.005095541401273885),
('Ái', 0.008917197452229299),
('Bào', 0.0012738853503184713),
('Đa', 0.0012738853503184713),
('Huần', 0.0012738853503184713),
('Kê', 0.0012738853503184713),
('Lừng', 0.0012738853503184713),
('Luận', 0.015286624203821656),
('Nèo', 0.0012738853503184713),
('Nhanh', 0.0025477707006369425),
('Trong', 0.0012738853503184713),
('Thoảng', 0.0012738853503184713),
('Nhường', 0.0012738853503184713),
('Vẵn', 0.0012738853503184713),
('Út', 0.0012738853503184713),
('Tới', 0.003821656050955414),
('Phưáťc', 0.0012738853503184713),
('Tây', 0.003821656050955414),
('Sâm', 0.003821656050955414),
('Xí', 0.0012738853503184713),
('Háşu', 0.0012738853503184713),
('Tuy', 0.0012738853503184713),
('Yên', 0.0025477707006369425),
('Sứng', 0.0012738853503184713),
('Vĩnh', 0.008917197452229299),
('Bão', 0.0025477707006369425),
('Tuyến', 0.007643312101910828),
('Lanh', 0.005095541401273885),
('Mận', 0.0012738853503184713),
('Nội', 0.0012738853503184713),
('Quế', 0.0025477707006369425),
('Oai', 0.0012738853503184713),
('Rinh', 0.0012738853503184713),
('Tam', 0.0025477707006369425),
('Thoan', 0.0012738853503184713),
('Tho', 0.0012738853503184713),
('Truyên', 0.0012738853503184713),
('Thoại', 0.006369426751592357),
('<NAME>', 0.0012738853503184713),
('Trưởng', 0.003821656050955414),
('Xoan', 0.0012738853503184713),
('Ty', 0.0012738853503184713),
('Ktla', 0.0012738853503184713),
('Niê', 0.0025477707006369425),
('Adrơng', 0.0012738853503184713),
('Êban', 0.0012738853503184713),
('Báşąng', 0.0012738853503184713),
('Bắc', 0.006369426751592357),
('Thụ', 0.0025477707006369425),
('Thuấn', 0.003821656050955414),
('Cầu', 0.003821656050955414),
('Lã', 0.0012738853503184713),
('Tẩn', 0.0012738853503184713),
('Vọng', 0.0025477707006369425),
('Được', 0.0012738853503184713),
('Độ', 0.005095541401273885),
('Nhâm', 0.0012738853503184713),
('Hệ', 0.0012738853503184713),
('Tám', 0.0012738853503184713),
('Học', 0.0025477707006369425),
('Đang', 0.0012738853503184713),
('học', 0.0012738853503184713),
('Mừng', 0.0012738853503184713),
('Lịch', 0.003821656050955414),
('Đảng', 0.0012738853503184713),
('Lượng', 0.005095541401273885),
('Võ', 0.0012738853503184713),
('Hớn', 0.0012738853503184713),
('Khiết', 0.0025477707006369425),
('Bạo', 0.0012738853503184713),
('Đời', 0.0012738853503184713),
('Mỉnh', 0.0012738853503184713),
('TĂ i', 0.0012738853503184713),
('Nhất', 0.007643312101910828),
('Tuyết', 0.007643312101910828),
('Thú', 0.0012738853503184713),
('Duõng', 0.0012738853503184713),
('Töù', 0.0012738853503184713),
('Töôøng', 0.0012738853503184713),
('YÙ', 0.0012738853503184713),
('Böûu', 0.0012738853503184713),
('Giaøu', 0.0012738853503184713),
('Khoâi', 0.0012738853503184713),
('Kieàu', 0.0012738853503184713),
('Lieân', 0.0012738853503184713),
('Chaún', 0.0012738853503184713),
('Dieåm', 0.0012738853503184713),
('Nhaân', 0.0012738853503184713),
('Lang', 0.0012738853503184713),
('Baûo', 0.0012738853503184713),
('Baïn', 0.0012738853503184713),
('Thoïai', 0.0012738853503184713),
('Thaûo', 0.0012738853503184713),
('Nhả', 0.0012738853503184713),
('Lỗi', 0.0012738853503184713),
('Phù', 0.0012738853503184713),
('Điềm', 0.0012738853503184713),
('Đal', 0.0012738853503184713),
('Hồng', 0.01019108280254777),
('Khoát', 0.0012738853503184713),
('Thơm', 0.0025477707006369425),
('Thạo', 0.0012738853503184713),
('Đượm', 0.0012738853503184713),
('Thinh', 0.0025477707006369425),
('Khởi', 0.0025477707006369425),
('Mười', 0.0025477707006369425),
('Rôn', 0.0012738853503184713),
('Vụ', 0.0012738853503184713),
('Thy', 0.0025477707006369425),
('Chăm', 0.0012738853503184713),
('Lữ', 0.0025477707006369425),
('Trưng', 0.0012738853503184713),
('Ẩn', 0.0025477707006369425),
('Mảnh', 0.0012738853503184713),
('Dĩnh', 0.0012738853503184713),
('Tú', 0.0012738853503184713),
('Hữu', 0.0012738853503184713),
('Kiếm', 0.0012738853503184713),
('Trắng', 0.0025477707006369425),
('Phiệt', 0.0012738853503184713),
('Quả', 0.0012738853503184713),
('Y', 0.0025477707006369425),
('Lay', 0.003821656050955414),
('Liên', 0.005095541401273885),
('Soái', 0.0012738853503184713),
('Thiêm', 0.0012738853503184713),
('Thuân', 0.0012738853503184713),
('Thỏa', 0.0012738853503184713),
('Chân', 0.0025477707006369425),
('Cự', 0.0012738853503184713),
('Chất', 0.0012738853503184713),
('Cấp', 0.0012738853503184713),
('Tăng', 0.003821656050955414),
('Thắm', 0.005095541401273885),
('Thấch', 0.0012738853503184713),
('Thiết', 0.0025477707006369425),
('Thụy', 0.0025477707006369425),
('Trinh', 0.011464968152866241),
('Truyền', 0.003821656050955414),
('Lành', 0.0025477707006369425),
('Liền', 0.0012738853503184713),
('San', 0.005095541401273885),
('Cần', 0.0012738853503184713),
('Thời', 0.0025477707006369425),
('Khiếu', 0.0012738853503184713),
('Khuyến', 0.0012738853503184713),
('Chuyền', 0.0012738853503184713),
('Chuyên', 0.0012738853503184713),
('Hai', 0.0025477707006369425),
('Hiên', 0.0025477707006369425),
('Quyến', 0.0012738853503184713),
('Sen', 0.0025477707006369425),
('Tá', 0.0012738853503184713),
('Sung', 0.0025477707006369425),
('Điệu', 0.0012738853503184713),
('Đỏ', 0.0012738853503184713),
('Dềng', 0.0012738853503184713),
('Dững', 0.0025477707006369425),
('Gia', 0.0025477707006369425),
('Ấng', 0.0025477707006369425),
('Hồi', 0.0012738853503184713),
('Đoan', 0.0025477707006369425),
('Hài', 0.0025477707006369425),
('Vin', 0.003821656050955414),
('Ly', 0.011464968152866241),
('Thuyền', 0.0012738853503184713),
('Trương', 0.0012738853503184713),
('Hoøa', 0.0012738853503184713),
('Đảm', 0.0012738853503184713),
('Gẩm', 0.0012738853503184713),
('Lam', 0.003821656050955414),
('Soát', 0.0012738853503184713),
('Quảng', 0.0012738853503184713),
('Tỉnh', 0.005095541401273885),
('Kachu', 0.0012738853503184713),
('Kiện', 0.0025477707006369425),
('Nghiêm', 0.0012738853503184713),
('Tuất', 0.0025477707006369425),
('Ayun', 0.0012738853503184713),
('BÄng', 0.0012738853503184713),
('Bông', 0.003821656050955414),
('Bộ', 0.0012738853503184713),
('Chình', 0.0012738853503184713),
('Đầy', 0.0012738853503184713),
('Diện', 0.0025477707006369425),
('Na', 0.0012738853503184713),
('Nha', 0.0012738853503184713),
('Nhiệm', 0.0012738853503184713),
('Ngự', 0.0012738853503184713),
('Thặng', 0.0012738853503184713),
('Thầm', 0.0012738853503184713),
('Lên', 0.0012738853503184713),
('Triệu', 0.003821656050955414),
('Tý', 0.003821656050955414),
('Đào', 0.0025477707006369425),
('Luyến', 0.0012738853503184713),
('Dậu', 0.0012738853503184713),
('Chư', 0.0012738853503184713),
('Vươn', 0.0012738853503184713),
('Lịnh', 0.0025477707006369425),
('Nhớ', 0.0025477707006369425),
('Nhạn', 0.0012738853503184713),
('Điều', 0.0012738853503184713),
('Riếp', 0.0012738853503184713),
('Hạc', 0.0012738853503184713),
('Ai', 0.0012738853503184713),
('Vấn', 0.0025477707006369425),
('Hởi', 0.0012738853503184713),
('Keng', 0.0012738853503184713),
('Lẹ', 0.0012738853503184713),
('Nên', 0.0012738853503184713),
('Quan', 0.0012738853503184713),
('Đặng', 0.0025477707006369425),
('HIếu', 0.0012738853503184713),
('Kbuôr', 0.0012738853503184713),
('Khoái', 0.0012738853503184713),
('Lệ', 0.005095541401273885),
('Thang', 0.0012738853503184713),
('Thảo', 0.0012738853503184713),
('Liễu', 0.0012738853503184713),
('Tĩnh', 0.0012738853503184713),
('Trạng', 0.0025477707006369425),
('Truờng', 0.0012738853503184713),
('TĂş', 0.0012738853503184713),
('Tất', 0.0025477707006369425),
('Nhưỡng', 0.0012738853503184713),
('DONA', 0.0012738853503184713),
('Giàu', 0.003821656050955414),
('Nhiều', 0.0025477707006369425),
('Qúi', 0.0012738853503184713),
('Một', 0.0012738853503184713),
('Tần', 0.0012738853503184713),
('Bồng', 0.0012738853503184713),
('Lưỡng', 0.0012738853503184713),
('Mộng', 0.0025477707006369425),
('Ngộ', 0.0012738853503184713),
('Hiáťn', 0.0012738853503184713),
('Hoành', 0.0012738853503184713),
('Ưng', 0.0012738853503184713),
('Hạo', 0.0012738853503184713),
('Dư', 0.0012738853503184713),
('Lũng', 0.0012738853503184713),
('Khiển', 0.0012738853503184713),
('Tôn', 0.0025477707006369425),
('Úy', 0.0012738853503184713),
('Nào', 0.0012738853503184713),
('Túc', 0.0012738853503184713),
('Tỵ', 0.0012738853503184713),
('Ponl', 0.0012738853503184713),
('Sáu', 0.0012738853503184713),
('Sứ', 0.0012738853503184713),
('Tánh', 0.0012738853503184713),
('Bửu', 0.0025477707006369425),
('Cửu', 0.0012738853503184713),
('Đến', 0.0012738853503184713),
('Thanh.', 0.0012738853503184713),
('Thiệt', 0.0012738853503184713),
('Xứng', 0.0012738853503184713),
('Ban', 0.0025477707006369425),
('Bác', 0.0012738853503184713),
('Năng', 0.0012738853503184713),
('Ngữ', 0.0012738853503184713),
('Nhãn', 0.0012738853503184713),
('Nhuần', 0.0012738853503184713),
('Núi', 0.0012738853503184713),
('Phan', 0.0012738853503184713),
('Nhuân', 0.0012738853503184713),
('Bảy', 0.0012738853503184713),
('Bạch', 0.0012738853503184713),
('Can', 0.0012738853503184713),
('Cẩn', 0.0025477707006369425),
('Chiên', 0.0012738853503184713),
('Dinh', 0.0012738853503184713),
('Dũ', 0.0012738853503184713),
('Dụng', 0.0012738853503184713),
('Lil', 0.0012738853503184713),
('Mẩn', 0.0012738853503184713),
('Miên', 0.0012738853503184713),
('Lài', 0.0012738853503184713),
('Vạn', 0.0012738853503184713),
('Yêm', 0.0012738853503184713),
('Ny', 0.0012738853503184713),
('Ngàn', 0.0012738853503184713),
('Nhờ', 0.0012738853503184713),
('Nhứt', 0.0012738853503184713),
('Đích', 0.0012738853503184713),
('Hạ', 0.0025477707006369425),
('Cang', 0.0012738853503184713),
('Bàng', 0.0012738853503184713),
('Pích', 0.0012738853503184713),
('Thủ', 0.0012738853503184713),
('Thứ', 0.0012738853503184713),
('Thục', 0.0012738853503184713),
('Thượng', 0.0012738853503184713),
('Phiên', 0.0012738853503184713),
('Phùng', 0.0012738853503184713),
('Vị', 0.0012738853503184713),
('Cừ', 0.0012738853503184713),
('Bá', 0.0012738853503184713),
('Khôn', 0.0012738853503184713),
('Hoạt', 0.0012738853503184713),
('Khoẻ', 0.0012738853503184713),
('Thám', 0.0012738853503184713),
('Đẳng', 0.0012738853503184713),
('Thuyết', 0.0012738853503184713),
('Thôn', 0.0012738853503184713),
('Rồng', 0.0012738853503184713),
('Thiệp', 0.0012738853503184713),
('Tạo', 0.0012738853503184713),
('Thuỳ', 0.0012738853503184713),
('Ngà', 0.0012738853503184713),
('Tráng', 0.0012738853503184713),
('Hwing', 0.0012738853503184713),
('Hy', 0.0025477707006369425),
('Kế', 0.0012738853503184713),
('Lại', 0.0012738853503184713),
('Lễ', 0.0012738853503184713),
('Dưỡng', 0.0012738853503184713),
('Hãn', 0.0012738853503184713),
('Hiền.', 0.0012738853503184713),
('Hoá', 0.0012738853503184713),
('Hình', 0.0012738853503184713),
('Hợi', 0.0012738853503184713),
('Hưáťng', 0.0012738853503184713),
('Hỷ', 0.0012738853503184713),
('Hận', 0.0012738853503184713),
('Ấn', 0.0012738853503184713),
('ẩn', 0.0012738853503184713),
('Táťnh', 0.0012738853503184713),
('Trác', 0.0012738853503184713),
('Trịnh', 0.0025477707006369425),
('Tẩm', 0.0012738853503184713),
))
mid_names_female = OrderedDict((
('Phương', 0.20441988950276244),
('An', 0.03867403314917127),
('Anh', 0.06077348066298342),
('Ban', 0.0055248618784530384),
('Bình', 0.011049723756906077),
('Bích', 0.19337016574585636),
('Băng', 0.011049723756906077),
('Bạch', 0.06077348066298342),
('Bảo', 0.11602209944751381),
('Bội', 0.0055248618784530384),
('Cam', 0.0055248618784530384),
('Chi', 0.011049723756906077),
('Chiêu', 0.0055248618784530384),
('Cát', 0.027624309392265192),
('Cẩm', 0.06629834254143646),
('Di', 0.0055248618784530384),
('Diên', 0.0055248618784530384),
('Diễm', 0.12154696132596685),
('Diệp', 0.011049723756906077),
('Diệu', 0.10497237569060773),
('Duy', 0.016574585635359115),
('Duyên', 0.022099447513812154),
('Dã', 0.016574585635359115),
('Dạ', 0.03314917127071823),
('Gia', 0.027624309392265192),
('Giang', 0.011049723756906077),
('Giao', 0.016574585635359115),
('Giáng', 0.016574585635359115),
('Hiếu', 0.022099447513812154),
('Hiền', 0.03314917127071823),
('Hiểu', 0.011049723756906077),
('Hoa', 0.027624309392265192),
('Hoài', 0.03867403314917127),
('Hoàn', 0.011049723756906077),
('Hoàng', 0.06629834254143646),
('Hoạ', 0.0055248618784530384),
('Huyền', 0.055248618784530384),
('Huệ', 0.055248618784530384),
('Huỳnh', 0.0055248618784530384),
('Hà', 0.04419889502762431),
('Hàm', 0.022099447513812154),
('Hương', 0.08287292817679558),
('Hướng', 0.0055248618784530384),
('Hạ', 0.03314917127071823),
('Hạc', 0.0055248618784530384),
('Hạnh', 0.06077348066298342),
('Hải', 0.12154696132596685),
('Hảo', 0.0055248618784530384),
('Hằng', 0.011049723756906077),
('Họa', 0.0055248618784530384),
('Hồ', 0.0055248618784530384),
('Hồng', 0.20441988950276244),
('Khiết', 0.011049723756906077),
('Khuê', 0.0055248618784530384),
('Khánh', 0.10497237569060773),
('Khúc', 0.0055248618784530384),
('Khả', 0.016574585635359115),
('Khải', 0.016574585635359115),
('Kim', 0.20994475138121546),
('Kiết', 0.011049723756906077),
('Kiều', 0.09392265193370165),
('Kỳ', 0.016574585635359115),
('Lam', 0.027624309392265192),
('Lan', 0.055248618784530384),
('Linh', 0.06629834254143646),
('Liên', 0.03314917127071823),
('Liễu', 0.0055248618784530384),
('Loan', 0.0055248618784530384),
('Ly', 0.0055248618784530384),
('Lâm', 0.022099447513812154),
('Lê', 0.0055248618784530384),
('Lưu', 0.0055248618784530384),
('Lệ', 0.0718232044198895),
('Lộc', 0.011049723756906077),
('Lục', 0.0055248618784530384),
('Mai', 0.13259668508287292),
('Minh', 0.19889502762430938),
('Mậu', 0.0055248618784530384),
('Mộc', 0.0055248618784530384),
('Mộng', 0.08287292817679558),
('Mỹ', 0.18232044198895028),
('Nghi', 0.016574585635359115),
('Nguyên', 0.011049723756906077),
('Nguyết', 0.0055248618784530384),
('Nguyệt', 0.06077348066298342),
('Ngân', 0.022099447513812154),
('Ngọc', 0.30939226519337015),
('Nhan', 0.0055248618784530384),
('Nhã', 0.06629834254143646),
('Như', 0.08839779005524862),
('Nhất', 0.0055248618784530384),
('Nhật', 0.049723756906077346),
('Oanh', 0.011049723756906077),
('Phi', 0.027624309392265192),
('Phong', 0.0055248618784530384),
('Phước', 0.011049723756906077),
('Phượng', 0.055248618784530384),
('Phụng', 0.0055248618784530384),
('Quế', 0.03314917127071823),
('Quỳnh', 0.13812154696132597),
('Sao', 0.011049723756906077),
('Song', 0.022099447513812154),
('Sông', 0.011049723756906077),
('Sơn', 0.011049723756906077),
('Sương', 0.0055248618784530384),
('Thanh', 0.2596685082872928),
('Thi', 0.027624309392265192),
('Thiên', 0.11049723756906077),
('Thiếu', 0.0055248618784530384),
('Thiều', 0.0055248618784530384),
('Thiện', 0.011049723756906077),
('Thu', 0.1878453038674033),
('Thuần', 0.0055248618784530384),
('Thy', 0.022099447513812154),
('Thái', 0.049723756906077346),
('Thùy', 0.06629834254143646),
('Thúy', 0.13812154696132597),
('Thơ', 0.0055248618784530384),
('Thư', 0.011049723756906077),
('Thương', 0.016574585635359115),
('Thường', 0.0055248618784530384),
('Thạch', 0.0055248618784530384),
('Thảo', 0.08287292817679558),
('Thục', 0.0718232044198895),
('Thụy', 0.06077348066298342),
('Thủy', 0.049723756906077346),
('Tinh', 0.0055248618784530384),
('Tiên', 0.0055248618784530384),
('Tiểu', 0.016574585635359115),
('Trang', 0.027624309392265192),
('Triều', 0.011049723756906077),
('Triệu', 0.0055248618784530384),
('Trung', 0.0055248618784530384),
('Trà', 0.011049723756906077),
('Trâm', 0.011049723756906077),
('Trân', 0.0055248618784530384),
('Trúc', 0.08839779005524862),
('Trầm', 0.0055248618784530384),
('Tuyết', 0.12154696132596685),
('Tuyền', 0.0055248618784530384),
('Tuệ', 0.016574585635359115),
('Tâm', 0.0718232044198895),
('Tùng', 0.016574585635359115),
('Tùy', 0.011049723756906077),
('Tú', 0.049723756906077346),
('Túy', 0.0055248618784530384),
('Tường', 0.022099447513812154),
('Tịnh', 0.027624309392265192),
('Tố', 0.03314917127071823),
('Từ', 0.011049723756906077),
('Uyên', 0.049723756906077346),
('Uyển', 0.03314917127071823),
('Vi', 0.0055248618784530384),
('Vinh', 0.0055248618784530384),
('Việt', 0.055248618784530384),
('Vy', 0.011049723756906077),
('Vàng', 0.0055248618784530384),
('Vành', 0.0055248618784530384),
('Vân', 0.11049723756906077),
('Vũ', 0.0055248618784530384),
('Xuyến', 0.0055248618784530384),
('Xuân', 0.16574585635359115),
('Yên', 0.027624309392265192),
('Yến', 0.08287292817679558),
('Ái', 0.04419889502762431),
('Ánh', 0.06629834254143646),
('Ðan', 0.016574585635359115),
('Ðinh', 0.0055248618784530384),
('Ðoan', 0.011049723756906077),
('Ðài', 0.0055248618784530384),
('Ðông', 0.03314917127071823),
('Ðồng', 0.0055248618784530384),
('Ý', 0.016574585635359115),
('Đan', 0.03314917127071823),
('Đinh', 0.0055248618784530384),
('Đoan', 0.011049723756906077),
('Đài', 0.0055248618784530384),
('Đông', 0.022099447513812154),
('Đơn', 0.0055248618784530384),
('Đức', 0.0055248618784530384),
('Ấu', 0.0055248618784530384),
))
last_names_female = OrderedDict((
('Chi', 0.11049723756906077),
('Bình', 0.03314917127071823),
('Di', 0.011049723756906077),
('Hạ', 0.022099447513812154),
('Hằng', 0.0718232044198895),
('Khê', 0.011049723756906077),
('Nhiên', 0.022099447513812154),
('Nhàn', 0.011049723756906077),
('Hương', 0.143646408839779),
('Mai', 0.16022099447513813),
('Phương', 0.16574585635359115),
('Thi', 0.049723756906077346),
('Thy', 0.03314917127071823),
('Thơ', 0.04419889502762431),
('Thư', 0.06077348066298342),
('Thảo', 0.1270718232044199),
('Vũ', 0.016574585635359115),
('Ðào', 0.04419889502762431),
('Minh', 0.055248618784530384),
('Yên', 0.011049723756906077),
('Chiêu', 0.0055248618784530384),
('Châu', 0.0718232044198895),
('Duyên', 0.055248618784530384),
('Hiền', 0.06629834254143646),
('Huệ', 0.03867403314917127),
('Hà', 0.1270718232044199),
('Hạnh', 0.09392265193370165),
('Hải', 0.0055248618784530384),
('Hảo', 0.016574585635359115),
('Hậu', 0.016574585635359115),
('Hồng', 0.11602209944751381),
('Hợp', 0.0055248618784530384),
('Lam', 0.049723756906077346),
('Liên', 0.09392265193370165),
('Loan', 0.11602209944751381),
('Nga', 0.10497237569060773),
('Ngà', 0.027624309392265192),
('Ngân', 0.049723756906077346),
('Ngọc', 0.11049723756906077),
('Như', 0.049723756906077346),
('Phượng', 0.06629834254143646),
('Quyên', 0.08287292817679558),
('Quân', 0.03314917127071823),
('San', 0.027624309392265192),
('Thoa', 0.016574585635359115),
('Thu', 0.07734806629834254),
('Thủy', 0.06077348066298342),
('Trang', 0.16574585635359115),
('Trâm', 0.055248618784530384),
('Ty', 0.0055248618784530384),
('Vân', 0.14917127071823205),
('Ðiệp', 0.022099447513812154),
('Băng', 0.027624309392265192),
('Tâm', 0.11602209944751381),
('Cúc', 0.016574585635359115),
('Hoa', 0.08839779005524862),
('Kim', 0.016574585635359115),
('Quỳnh', 0.09944751381215469),
('Trà', 0.027624309392265192),
('Tuyết', 0.03867403314917127),
('Yến', 0.09392265193370165),
('Anh', 0.18232044198895028),
('Hân', 0.027624309392265192),
('Lan', 0.14917127071823205),
('Lễ', 0.0055248618784530384),
('Thúy', 0.04419889502762431),
('Tiên', 0.0718232044198895),
('Trân', 0.022099447513812154),
('Trúc', 0.03867403314917127),
('Uyên', 0.12154696132596685),
('Vy', 0.10497237569060773),
('Linh', 0.17679558011049723),
('Dương', 0.027624309392265192),
('Cát', 0.011049723756906077),
('Ly', 0.06629834254143646),
('Tường', 0.0055248618784530384),
('Hường', 0.022099447513812154),
('Nhi', 0.20994475138121546),
('Nhung', 0.03867403314917127),
('Tú', 0.022099447513812154),
('Vỹ', 0.011049723756906077),
('Khuê', 0.03867403314917127),
('Kiều', 0.027624309392265192),
('Lộc', 0.0055248618784530384),
('My', 0.08287292817679558),
('Phúc', 0.011049723756906077),
('Phước', 0.0055248618784530384),
('Trinh', 0.0718232044198895),
('Huyền', 0.055248618784530384),
('Nương', 0.04419889502762431),
('Thiện', 0.0055248618784530384),
('Ái', 0.016574585635359115),
('Mỹ', 0.03314917127071823),
('Lâm', 0.09944751381215469),
('Nguyệt', 0.055248618784530384),
('Khanh', 0.09944751381215469),
('Thanh', 0.12154696132596685),
('Thiên', 0.016574585635359115),
('Hưởng', 0.0055248618784530384),
('Giang', 0.09944751381215469),
('Chung', 0.0055248618784530384),
('Hòa', 0.0055248618784530384),
('Thục', 0.0055248618784530384),
('Lý', 0.016574585635359115),
('Tranh', 0.0055248618784530384),
('An', 0.022099447513812154),
('Thương', 0.03314917127071823),
('Vi', 0.03867403314917127),
('Miên', 0.022099447513812154),
('Nguyên', 0.022099447513812154),
('Oanh', 0.07734806629834254),
('Sa', 0.016574585635359115),
('Xuân', 0.0718232044198895),
('Mi', 0.03314917127071823),
('Diệu', 0.016574585635359115),
('Thoại', 0.0055248618784530384),
('Ân', 0.016574585635359115),
('Nghi', 0.03867403314917127),
('Ý', 0.016574585635359115),
('Dung', 0.06077348066298342),
('Nhơn', 0.0055248618784530384),
('Sinh', 0.0055248618784530384),
('Thụy', 0.0055248618784530384),
('Ðường', 0.0055248618784530384),
('Diệp', 0.011049723756906077),
('Diễm', 0.027624309392265192),
('Khôi', 0.011049723756906077),
('Nhạn', 0.011049723756906077),
('Quế', 0.022099447513812154),
('Thắm', 0.0055248618784530384),
('Đăng', 0.0055248618784530384),
('Trung', 0.0055248618784530384),
('Giao', 0.011049723756906077),
('Ca', 0.011049723756906077),
('Cương', 0.0055248618784530384),
('Khuyên', 0.016574585635359115),
('Khánh', 0.022099447513812154),
('Thông', 0.0055248618784530384),
('Tuyến', 0.0055248618784530384),
('Tuyền', 0.049723756906077346),
('Xuyến', 0.0055248618784530384),
('Ánh', 0.027624309392265192),
('Đan', 0.027624309392265192),
('Ðan', 0.016574585635359115),
('Uyển', 0.016574585635359115),
('Khai', 0.0055248618784530384),
('Tuệ', 0.0055248618784530384),
('Liễu', 0.016574585635359115),
('Hiệp', 0.0055248618784530384),
('Hoàn', 0.011049723756906077),
('Lệ', 0.027624309392265192),
('Lợi', 0.0055248618784530384),
('Nhân', 0.011049723756906077),
('Phụng', 0.011049723756906077),
('Thuần', 0.011049723756906077),
('Thuận', 0.011049723756906077),
('Cầm', 0.016574585635359115),
('Bích', 0.011049723756906077),
('Hoan', 0.0055248618784530384),
('Nữ', 0.0055248618784530384),
('Sương', 0.03314917127071823),
('Ðàn', 0.0055248618784530384),
('Bảo', 0.011049723756906077),
('Dạ', 0.0055248618784530384),
('Phi', 0.011049723756906077),
('Thùy', 0.0055248618784530384),
('Dao', 0.011049723756906077),
('Kê', 0.0055248618784530384),
('Dân', 0.0055248618784530384),
('Hiếu', 0.0055248618784530384),
('Mẫn', 0.016574585635359115),
('Nhã', 0.016574585635359115),
('Ngôn', 0.0055248618784530384),
('Thêu', 0.0055248618784530384),
('Hoài', 0.0055248618784530384),
('Phong', 0.0055248618784530384),
('Việt', 0.0055248618784530384),
('Vọng', 0.0055248618784530384),
('Ðoan', 0.011049723756906077),
('Ðình', 0.0055248618784530384),
('Du', 0.011049723756906077),
('Ðài', 0.0055248618784530384),
('Đào', 0.0055248618784530384),
('Trầm', 0.0055248618784530384),
('Chinh', 0.0055248618784530384),
('Thường', 0.0055248618784530384),
('xanh', 0.0055248618784530384),
('Bằng', 0.0055248618784530384),
('Lăng', 0.0055248618784530384),
))
first_names_male = first_names_female.copy()
mid_names_male = OrderedDict((
('An', 0.03825136612021858),
('Ân', 0.01092896174863388),
('Anh', 0.09836065573770492),
('Bá', 0.060109289617486336),
('Bách', 0.01092896174863388),
('Bằng', 0.00546448087431694),
('Bảo', 0.12021857923497267),
('Bích', 0.00546448087431694),
('Bình', 0.060109289617486336),
('Bửu', 0.01092896174863388),
('Bữu', 0.00546448087431694),
('Cảnh', 0.00546448087431694),
('Cao', 0.060109289617486336),
('Cát', 0.01092896174863388),
('Chấn', 0.01639344262295082),
('Chánh', 0.00546448087431694),
('Chế', 0.00546448087431694),
('Chí', 0.07103825136612021),
('Chiến', 0.00546448087431694),
('Chiêu', 0.01639344262295082),
('Chính', 0.01639344262295082),
('Chuẩn', 0.00546448087431694),
('Chung', 0.00546448087431694),
('Công', 0.1092896174863388),
('Cường', 0.01092896174863388),
('Cương', 0.01092896174863388),
('Ðắc', 0.03825136612021858),
('Ðại', 0.02185792349726776),
('Dân', 0.01092896174863388),
('Ðan', 0.01092896174863388),
('Ðăng', 0.0273224043715847),
('Đăng', 0.01092896174863388),
('Danh', 0.02185792349726776),
('Ðạt', 0.01092896174863388),
('Ðình', 0.13114754098360656),
('Ðinh', 0.00546448087431694),
('Ðịnh', 0.01092896174863388),
('Ðoàn', 0.00546448087431694),
('Ðồng', 0.01092896174863388),
('Ðông', 0.03825136612021858),
('Ðức', 0.2185792349726776),
('Đức', 0.00546448087431694),
('Dũng', 0.01092896174863388),
('Dương', 0.01092896174863388),
('Duy', 0.15300546448087432),
('Gia', 0.16939890710382513),
('Giang', 0.02185792349726776),
('Hà', 0.00546448087431694),
('Hải', 0.07650273224043716),
('Hán', 0.00546448087431694),
('Hạnh', 0.00546448087431694),
('Hào', 0.00546448087431694),
('Hạo', 0.00546448087431694),
('Hiền', 0.00546448087431694),
('Hiệp', 0.03278688524590164),
('Hiếu', 0.03278688524590164),
('Hiểu', 0.00546448087431694),
('Hồ', 0.01092896174863388),
('Hòa', 0.03278688524590164),
('Hoài', 0.04371584699453552),
('Hoàn', 0.01092896174863388),
('Hoàng', 0.11475409836065574),
('Hồng', 0.09289617486338798),
('Huân', 0.00546448087431694),
('Hùng', 0.04371584699453552),
('Hưng', 0.00546448087431694),
('Hướng', 0.02185792349726776),
('Hữu', 0.22950819672131148),
('Huy', 0.09289617486338798),
('Khắc', 0.07103825136612021),
('Khải', 0.02185792349726776),
('Khai', 0.00546448087431694),
('Khang', 0.00546448087431694),
('Khánh', 0.07650273224043716),
('Khoa', 0.00546448087431694),
('Khôi', 0.01092896174863388),
('Khởi', 0.00546448087431694),
('Khương', 0.00546448087431694),
('Khuyến', 0.00546448087431694),
('Kiên', 0.0273224043715847),
('Kiến', 0.01639344262295082),
('Kiệt', 0.00546448087431694),
('Kim', 0.04918032786885246),
('Kỳ', 0.00546448087431694),
('Lạc', 0.01092896174863388),
('Lâm', 0.03278688524590164),
('Lam', 0.01092896174863388),
('Lập', 0.01092896174863388),
('Liên', 0.00546448087431694),
('Long', 0.01639344262295082),
('Lương', 0.02185792349726776),
('Mạnh', 0.07650273224043716),
('Minh', 0.3005464480874317),
('Mộng', 0.02185792349726776),
('Nam', 0.07650273224043716),
('Nghị', 0.01092896174863388),
('Nghĩa', 0.01092896174863388),
('Ngọc', 0.1366120218579235),
('Nguyên', 0.07103825136612021),
('Nhân', 0.02185792349726776),
('Nhật', 0.08743169398907104),
('Nhất', 0.00546448087431694),
('Như', 0.00546448087431694),
('Niệm', 0.00546448087431694),
('Phi', 0.03825136612021858),
('Phong', 0.01639344262295082),
('Phú', 0.04918032786885246),
('Phúc', 0.060109289617486336),
('Phục', 0.00546448087431694),
('Phụng', 0.00546448087431694),
('Phước', 0.03278688524590164),
('Phượng', 0.00546448087431694),
('Phương', 0.0273224043715847),
('Quân', 0.00546448087431694),
('Quang', 0.22950819672131148),
('Quảng', 0.01639344262295082),
('Quốc', 0.21311475409836064),
('Quý', 0.01092896174863388),
('Quyết', 0.00546448087431694),
('Sĩ', 0.00546448087431694),
('Sơn', 0.04918032786885246),
('Song', 0.00546448087431694),
('Sỹ', 0.02185792349726776),
('Tạ', 0.00546448087431694),
('Tài', 0.01092896174863388),
('Tâm', 0.00546448087431694),
('Tân', 0.0273224043715847),
('Tấn', 0.0546448087431694),
('Tất', 0.01639344262295082),
('Thạch', 0.01092896174863388),
('Thái', 0.06557377049180328),
('Thắng', 0.01092896174863388),
('Thăng', 0.00546448087431694),
('Thành', 0.11475409836065574),
('Thanh', 0.14754098360655737),
('Thất', 0.01639344262295082),
('Thế', 0.1092896174863388),
('Thiên', 0.0546448087431694),
('Thiện', 0.07650273224043716),
('Thiếu', 0.01092896174863388),
('Thiệu', 0.00546448087431694),
('Thịnh', 0.00546448087431694),
('Thời', 0.00546448087431694),
('Thông', 0.01639344262295082),
('Thống', 0.00546448087431694),
('Thụ', 0.00546448087431694),
('Thu', 0.00546448087431694),
('Thuận', 0.03278688524590164),
('Thượng', 0.03278688524590164),
('Thường', 0.01092896174863388),
('Thụy', 0.02185792349726776),
('Tích', 0.01092896174863388),
('Tiến', 0.0273224043715847),
('Tiền', 0.00546448087431694),
('Tiểu', 0.00546448087431694),
('Toàn', 0.00546448087431694),
('Tôn', 0.00546448087431694),
('Trí', 0.04371584699453552),
('Triển', 0.00546448087431694),
('Triệu', 0.00546448087431694),
('Triều', 0.00546448087431694),
('Trọng', 0.08196721311475409),
('Trúc', 0.01092896174863388),
('Trung', 0.08196721311475409),
('Trường', 0.09836065573770492),
('Từ', 0.00546448087431694),
('Tuấn', 0.12568306010928962),
('Tùng', 0.03278688524590164),
('Tường', 0.04371584699453552),
('Tuyền', 0.00546448087431694),
('Uy', 0.01092896174863388),
('Vạn', 0.02185792349726776),
('Văn', 0.01092896174863388),
('Vân', 0.00546448087431694),
('Viễn', 0.02185792349726776),
('Việt', 0.17486338797814208),
('Viết', 0.01639344262295082),
('Vĩnh', 0.04371584699453552),
('Vinh', 0.01092896174863388),
('Vũ', 0.01092896174863388),
('Vương', 0.02185792349726776),
('Xuân', 0.14754098360655737),
('Yên', 0.01639344262295082),
))
last_names_male = OrderedDict((
('Cơ', 0.0035460992907801418),
('Khang', 0.04964539007092199),
('Lai', 0.0070921985815602835),
('Nam', 0.05319148936170213),
('Nguyên', 0.05319148936170213),
('Ninh', 0.01773049645390071),
('Tâm', 0.031914893617021274),
('Thiện', 0.05673758865248227),
('Tường', 0.0425531914893617),
('Ðức', 0.04609929078014184),
('Dũng', 0.0851063829787234),
('Duy', 0.03900709219858156),
('Hoàng', 0.05673758865248227),
('Khải', 0.02127659574468085),
('Khoa', 0.01773049645390071),
('Khôi', 0.024822695035460994),
('Minh', 0.1099290780141844),
('Quân', 0.0425531914893617),
('Quốc', 0.02127659574468085),
('Sơn', 0.09574468085106383),
('Tài', 0.024822695035460994),
('Thái', 0.03546099290780142),
('Tú', 0.02127659574468085),
('Tuấn', 0.04964539007092199),
('Tùng', 0.01773049645390071),
('Việt', 0.05673758865248227),
('Vũ', 0.04964539007092199),
('Cường', 0.05319148936170213),
('Kỳ', 0.014184397163120567),
('Lộc', 0.031914893617021274),
('Long', 0.0673758865248227),
('Phước', 0.01773049645390071),
('Thành', 0.06382978723404255),
('Thịnh', 0.03900709219858156),
('Thúc', 0.0035460992907801418),
('Trúc', 0.0035460992907801418),
('Du', 0.0070921985815602835),
('Nhân', 0.06028368794326241),
('An', 0.04609929078014184),
('Bảo', 0.03546099290780142),
('Chấn', 0.0035460992907801418),
('Ðịnh', 0.014184397163120567),
('Giang', 0.06382978723404255),
('Hiển', 0.010638297872340425),
('Hoa', 0.0035460992907801418),
('Huy', 0.0425531914893617),
('Huynh', 0.0035460992907801418),
('Huỳnh', 0.0035460992907801418),
('Khánh', 0.0425531914893617),
('Lâm', 0.0425531914893617),
('Pháp', 0.0035460992907801418),
('Thạch', 0.014184397163120567),
('Tín', 0.010638297872340425),
('Toàn', 0.031914893617021274),
('Nhã', 0.0035460992907801418),
('Dân', 0.010638297872340425),
('Ðạt', 0.028368794326241134),
('Dương', 0.0425531914893617),
('Hòa', 0.0673758865248227),
('Thuận', 0.02127659574468085),
('Yên', 0.0035460992907801418),
('Chưởng', 0.0035460992907801418),
('Diệp', 0.0035460992907801418),
('Toại', 0.0035460992907801418),
('Nghiệp', 0.010638297872340425),
('Phong', 0.06382978723404255),
('Sĩ', 0.0070921985815602835),
('Sỹ', 0.0070921985815602835),
('Thọ', 0.024822695035460994),
('Tiến', 0.02127659574468085),
('Uy', 0.0070921985815602835),
('Hùng', 0.05673758865248227),
('Hưng', 0.03546099290780142),
('Phương', 0.03546099290780142),
('Anh', 0.0673758865248227),
('Công', 0.010638297872340425),
('Hiếu', 0.028368794326241134),
('Khiêm', 0.024822695035460994),
('Kiên', 0.024822695035460994),
('Thanh', 0.02127659574468085),
('Thắng', 0.04964539007092199),
('Trực', 0.0035460992907801418),
('Thủy', 0.0035460992907801418),
('Án', 0.0035460992907801418),
('Ân', 0.03546099290780142),
('Bằng', 0.01773049645390071),
('Hải', 0.07092198581560284),
('Hào', 0.01773049645390071),
('Hậu', 0.0070921985815602835),
('Hoán', 0.0035460992907801418),
('Lập', 0.0070921985815602835),
('Luận', 0.010638297872340425),
('Luật', 0.0035460992907801418),
('Lý', 0.014184397163120567),
('Phụng', 0.0035460992907801418),
('Sinh', 0.031914893617021274),
('Tráng', 0.0035460992907801418),
('Nghị', 0.014184397163120567),
('Quyết', 0.0070921985815602835),
('Di', 0.0035460992907801418),
('Lộ', 0.0035460992907801418),
('Lực', 0.01773049645390071),
('Trọng', 0.010638297872340425),
('Hành', 0.0035460992907801418),
('Ngọc', 0.014184397163120567),
('Thống', 0.0070921985815602835),
('Hiệp', 0.028368794326241134),
('Quế', 0.0035460992907801418),
('Khương', 0.010638297872340425),
('Quang', 0.03900709219858156),
('Văn', 0.02127659574468085),
('Chiểu', 0.0070921985815602835),
('Chương', 0.0070921985815602835),
('Diệu', 0.0070921985815602835),
('Ðôn', 0.0035460992907801418),
('Hảo', 0.0035460992907801418),
('Hợp', 0.0070921985815602835),
('Kim', 0.0035460992907801418),
('Ngân', 0.0035460992907801418),
('Phú', 0.01773049645390071),
('Phúc', 0.02127659574468085),
('Quảng', 0.0070921985815602835),
('Sang', 0.010638297872340425),
('Siêu', 0.0070921985815602835),
('Trung', 0.04609929078014184),
('Tụ', 0.0035460992907801418),
('Bình', 0.06028368794326241),
('Chính', 0.014184397163120567),
('Hạnh', 0.01773049645390071),
('Mạnh', 0.014184397163120567),
('Phi', 0.014184397163120567),
('Quyền', 0.01773049645390071),
('Toản', 0.010638297872340425),
('Trí', 0.02127659574468085),
('Tuệ', 0.0070921985815602835),
('Cẩn', 0.0070921985815602835),
('Hiền', 0.014184397163120567),
('Kính', 0.0035460992907801418),
('Ngôn', 0.010638297872340425),
('Nhượng', 0.0035460992907801418),
('Tân', 0.01773049645390071),
('Thông', 0.03900709219858156),
('Tiếp', 0.0035460992907801418),
('Tuyền', 0.0070921985815602835),
('Bạch', 0.0035460992907801418),
('Cần', 0.0035460992907801418),
('Cảnh', 0.02127659574468085),
('Ðạo', 0.010638297872340425),
('Huấn', 0.010638297872340425),
('Kiệt', 0.01773049645390071),
('Vinh', 0.03546099290780142),
('Lam', 0.010638297872340425),
('Thiên', 0.010638297872340425),
('Ðăng', 0.0070921985815602835),
('Hà', 0.02127659574468085),
('Thụy', 0.010638297872340425),
('Nhiên', 0.0070921985815602835),
('Dinh', 0.0070921985815602835),
('Dụng', 0.0035460992907801418),
('Học', 0.0070921985815602835),
('Liêm', 0.010638297872340425),
('Nghĩa', 0.01773049645390071),
('Bắc', 0.0070921985815602835),
('Lạc', 0.010638297872340425),
('Vỹ', 0.0035460992907801418),
('Kiếm', 0.0035460992907801418),
('Duệ', 0.0035460992907801418),
('Linh', 0.014184397163120567),
('Mỹ', 0.0070921985815602835),
('Phát', 0.01773049645390071),
('Xuân', 0.010638297872340425),
('Đức', 0.0035460992907801418),
('Lân', 0.014184397163120567),
('Lĩnh', 0.010638297872340425),
('Nhật', 0.014184397163120567),
('Nhuận', 0.0035460992907801418),
('Quý', 0.010638297872340425),
('Võ', 0.01773049645390071),
('Tiền', 0.0035460992907801418),
('Bào', 0.0035460992907801418),
('Canh', 0.0035460992907801418),
('Châu', 0.01773049645390071),
('Chiến', 0.0035460992907801418),
('Cương', 0.01773049645390071),
('Khanh', 0.010638297872340425),
('Khoát', 0.0035460992907801418),
('Lương', 0.010638297872340425),
('Thực', 0.010638297872340425),
('Trác', 0.0035460992907801418),
('Từ', 0.0070921985815602835),
('Vĩnh', 0.0070921985815602835),
('Vượng', 0.0070921985815602835),
('Kha', 0.0035460992907801418),
('Trân', 0.0035460992907801418),
('Kỷ', 0.0035460992907801418),
('Triệu', 0.014184397163120567),
('Ca', 0.0035460992907801418),
('Kiện', 0.0035460992907801418),
('Ðan', 0.01773049645390071),
('Hoàn', 0.0070921985815602835),
('Hội', 0.0035460992907801418),
('Trưởng', 0.0035460992907801418),
('Vĩ', 0.0035460992907801418),
('Ðồng', 0.0035460992907801418),
('Trường', 0.01773049645390071),
('Viên', 0.0035460992907801418),
('Vịnh', 0.0035460992907801418),
('Ðình', 0.0035460992907801418),
('Nghiêm', 0.0035460992907801418),
('Quỳnh', 0.0035460992907801418),
('Tấn', 0.010638297872340425),
('Trình', 0.0070921985815602835),
('Danh', 0.014184397163120567),
('Hiên', 0.0035460992907801418),
('Hỷ', 0.0035460992907801418),
('Khiếu', 0.0035460992907801418),
('Mẫn', 0.0035460992907801418),
('Nhu', 0.0035460992907801418),
('Thạc', 0.0035460992907801418),
('Triết', 0.0035460992907801418),
('Vu', 0.0035460992907801418),
('Vương', 0.0035460992907801418),
('Giác', 0.0035460992907801418),
('Ẩn', 0.0035460992907801418),
('Ðoàn', 0.0070921985815602835),
('Ngạn', 0.0035460992907801418),
('Tiển', 0.0035460992907801418),
('Trụ', 0.0070921985815602835),
('Bổng', 0.0035460992907801418),
('Giáp', 0.0035460992907801418),
('Sử', 0.0035460992907801418),
('Sâm', 0.0035460992907801418),
('Hồng', 0.0070921985815602835),
('Ðiệp', 0.0035460992907801418),
('Nhạn', 0.0035460992907801418),
('Ðộ', 0.0035460992907801418),
('Thời', 0.0035460992907801418),
('Ðiền', 0.0070921985815602835),
('Lễ', 0.0070921985815602835),
('Thể', 0.0035460992907801418),
('Trạch', 0.0035460992907801418),
('Triều', 0.010638297872340425),
('Bửu', 0.0070921985815602835),
('Ðại', 0.0070921985815602835),
('Hữu', 0.0070921985815602835),
('Sáng', 0.0035460992907801418),
('Hoài', 0.0035460992907801418),
('Trang', 0.0035460992907801418),
('Lợi', 0.010638297872340425),
('Trương', 0.0035460992907801418),
('San', 0.0035460992907801418),
('Tổ', 0.0035460992907801418),
('Ðệ', 0.0035460992907801418),
('Doanh', 0.0070921985815602835),
('Thế', 0.0035460992907801418),
('Tịnh', 0.0070921985815602835),
('Ý', 0.0035460992907801418),
('Duyệt', 0.0035460992907801418),
('Năng', 0.0070921985815602835),
('Luân', 0.0070921985815602835),
('Tính', 0.0035460992907801418),
('Nhiệm', 0.0035460992907801418),
('Nhất', 0.0035460992907801418),
('Liệt', 0.0035460992907801418),
('Thuật', 0.0035460992907801418),
('Miên', 0.0035460992907801418),
('Hoạt', 0.0035460992907801418),
('Liên', 0.0070921985815602835),
('Chuyên', 0.0035460992907801418),
('Chinh', 0.0035460992907801418),
('Phu', 0.0035460992907801418),
('Sa', 0.0035460992907801418),
('Ðông', 0.0070921985815602835),
('Khoan', 0.0035460992907801418),
('Tuyển', 0.0035460992907801418),
('Thương', 0.0035460992907801418),
('Gia', 0.0035460992907801418),
('Cao', 0.0035460992907801418),
('Cung', 0.0035460992907801418),
('Hàm', 0.0035460992907801418),
('Hãn', 0.0035460992907801418),
('Thuyết', 0.0035460992907801418),
))
def mid_name_female(self):
"""Generate middle name for female"""
return self.random_element(Provider.mid_names_female)
def mid_name_male(self):
"""Generate middle name for male"""
return self.random_element(Provider.mid_names_male)
| 2.265625
| 2
|
server/opendp_apps/terms_of_access/migrations/0001_initial.py
|
mikephelan/opendp-ux
| 6
|
12775965
|
# Generated by Django 3.1.12 on 2021-07-28 18:28
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TermsOfAccess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('object_id', models.UUIDField(default=uuid.uuid4, editable=False)),
('name', models.CharField(max_length=256)),
('active', models.BooleanField(default=True)),
('description', models.TextField()),
('version', models.FloatField()),
('notes', models.TextField(blank=True)),
],
options={
'verbose_name': 'Terms of Access',
'verbose_name_plural': 'Terms of Access',
'ordering': ('active', 'name'),
},
),
migrations.CreateModel(
name='TermsOfAccessLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('object_id', models.UUIDField(default=uuid.uuid4, editable=False)),
('terms_of_access', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='terms_of_access.termsofaccess')),
],
options={
'abstract': False,
},
),
]
| 1.773438
| 2
|
envs/CartPole/play.py
|
Rhushabh1/Mini-AI-Games
| 0
|
12775966
|
import gym
env_name = "CartPole-v0"
env_name = "Ant-v2"
env = gym.make(env_name)
class Agent:
def __init__(self, env):
self.action_space = env.action_space
def get_action(self, obs):
return self.action_space.sample()
env.reset()
agent = Agent(env)
for i_episode in range(10):
state = env.reset()
for t in range(100):
env.render()
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.close()
| 2.671875
| 3
|
Labs/Topic06-Functions/Lab06-Walkthrough.py
|
conor1982/Labs_Practice
| 0
|
12775967
|
<filename>Labs/Topic06-Functions/Lab06-Walkthrough.py<gh_stars>0
import json
students = []
filename = "/Users/Oriordanc/Desktop/HDip/Programming/Python_Module/students.json"
def writedict(obj):
with open(filename,'wt') as f:
json.dump(obj,f)
def readdict():
with open(filename) as f:
return json.load(f)
def displaymenu():
print("MENU")
print("\ta) Add Student")
print("\tv) View Student")
print("\ts) Save Student")
print("\tl) Load File")
print("\tq) Quit")
choice = input("Select One:")
return choice
def readmodules():
modules = []
currentname = input("\t\tEnter Module Name: ")
while currentname != " ":
module = {}
module["Name"] = currentname
module["Grade"] = int(input("\t\tEnter Grade: "))
modules.append(module)
currentname = input("\t\tEnter new Module Name: (Blank to Quit)")
return modules
def doAdd():
student = {}
student["Name"] = input("Enter Student name: ")
student["Modules"] = readmodules()
students.append(student)
def displaymodules(modules):
print("\t\tName \tGrade")
for module in modules:
print("\t\t{}\t{}".format(module['Name'],module['Grade']))
def doView():
print("All Students")
for student in students:
print("\t{}".format(student["Name"]))
displaymodules(student["Modules"])
def doSave():
writedict(students)
print("students saved")
def doload():
global students
students = readdict()
print("Students Loaded")
#main
choice = displaymenu()
while choice != "q":
if choice == "a":
doAdd()
elif choice == "v":
doView()
elif choice == "s":
doSave()
elif choice == "l":
doload()
elif choice == "q":
pass
else:
print("please select a, v or q")
choice = displaymenu()
print(students)
| 3.703125
| 4
|
view/board.py
|
Joao360/Python-2048
| 0
|
12775968
|
import curses
import os
import sys
import time
class Board:
column_width = 6
blank_column_line = "{}|".format(" " * column_width)
column_divider = "{}+".format("-" * column_width)
def __init__(self, boardSupplier):
self.boardSupplier = boardSupplier
board = boardSupplier()
self.divider = "\r+{0}".format(self.column_divider * len(board))
self.column_separators = "\r|{0}".format(self.blank_column_line * len(board))
self.width = len(self.column_separators)
curses.initscr()
self.window = curses.newwin(20, self.width, 0, 0)
self.window.keypad(1)
curses.noecho()
def draw_board(self):
""" It will (re)print the string representation of the board """
x = 1
for _, columns in enumerate(self.boardSupplier()):
self.window.addstr(x, 0, self.divider)
self.window.addstr(x + 1, 0, self.column_separators)
self.draw_board_line_with_value(x + 2, columns)
self.window.addstr(x + 3, 0, self.column_separators)
x += 4
self.window.addstr(x, 0, self.divider)
self.window.addstr(x + 2, 0, "Q - exit; Arrows for movement")
def draw_board_line_with_value(self, x, columns):
line = "|"
for num in columns:
if num == 0:
line += self.blank_column_line
else:
space_remainder = self.column_width - len(str(num))
line += "{0}{1}{2}|".format(" " * (space_remainder//2 + space_remainder % 2), num, " " * (space_remainder//2))
self.window.addstr(x, 0, line)
| 3.59375
| 4
|
eogrow/utils/meta.py
|
sentinel-hub/eo-grow
| 17
|
12775969
|
"""
Utilities for solving different problems in `eo-grow` package structure, which are mostly a pure Python magic.
"""
from __future__ import annotations
import importlib
import inspect
from typing import TYPE_CHECKING, Any, Dict, Type
if TYPE_CHECKING:
from ..core.pipeline import Pipeline
from ..core.schemas import BaseSchema
_PIPELINE_PARAM_NAME = "pipeline"
def load_pipeline_class(config: dict) -> Type[Pipeline]:
"""Given a config object it loads the pipeline class referenced in the config"""
pipeline_class_name = config.get(_PIPELINE_PARAM_NAME)
if pipeline_class_name is None:
raise ValueError(f"Config file is missing '{_PIPELINE_PARAM_NAME}' parameter, don't know which pipeline to use")
pipeline_class = import_object(pipeline_class_name)
return pipeline_class
def collect_schema(object_with_schema: Any) -> Type[BaseSchema]:
"""A utility that collects a schema from the given object.
The object is expected to hold a unique internal class which inherits from `BaseSchema`. Example:
class MyObject:
class Schema(BaseSchema):
...
This utility would provide `MySchema`. It works also if `MyObject` inherits from a class that holds the schema.
"""
class_with_schema = object_with_schema if inspect.isclass(object_with_schema) else object_with_schema.__class__
try:
return class_with_schema.Schema
except AttributeError as exception:
raise SyntaxError(
f"Class {class_with_schema} is missing a schema. Each EOGrowObject class needs to contain a pydantic "
"model named `Schema`."
) from exception
def import_object(import_path: str) -> Any:
"""Imports an object from a given import path"""
if "." not in import_path:
raise ValueError(f"Import path {import_path} doesn't reference an object in a module.")
module_name, object_name = import_path.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except ModuleNotFoundError as exception:
raise ModuleNotFoundError(f"{exception}. Given import path '{import_path}' is invalid.") from exception
if hasattr(module, object_name):
return getattr(module, object_name)
raise ImportError(
f"Cannot import name '{object_name}' from {module_name} ({module.__file__}). Given import path "
f"'{import_path}' is invalid."
)
def get_os_import_path(import_path: str) -> str:
"""For a Python import path it provides OS import path.
E.g. `eogrow.utils.meta` -> `/home/ubuntu/.../eogrow/utils/meta.py`
"""
module_spec = importlib.util.find_spec(import_path)
if module_spec is not None and module_spec.origin is not None:
return module_spec.origin
raise ValueError(f"Given import path {import_path} not found")
def get_package_versions() -> Dict[str, str]:
"""A utility function that provides dependency package versions
At the moment it is and experimental utility. Everything is under try-catch in case something goes wrong
:return: A dictionary with versions
"""
try:
import pkg_resources
dependency_packages = ["eogrow"] + [
requirement.name for requirement in pkg_resources.working_set.by_key["eogrow"].requires() # type: ignore
]
return {name: pkg_resources.get_distribution(name).version for name in dependency_packages}
except BaseException as ex:
return {"error": repr(ex)}
| 2.640625
| 3
|
server/accession/namebuilder.py
|
coll-gate/collgate
| 2
|
12775970
|
<reponame>coll-gate/collgate<filename>server/accession/namebuilder.py<gh_stars>1-10
# -*- coding: utf-8; -*-
#
# @file batchnamebuilder
# @brief Construct a new batch name using a specific convention and some constraints
# @author <NAME> (INRA UMR1095)
# @date 2018-01-08
# @copyright Copyright (c) 2018 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
import time
from datetime import datetime
from django.db import connection
from organisation.models import GRC
class NamingType(object):
"""
Base naming type class.
"""
SEQUENCE = 0 # Integer auto-increment based sequence (only one's possible)
STATIC = 1 # Static string part
CONSTANT = 2 # Constant string (can be used anywhere, prefix, middle, suffix)
VARIABLE = 3 # Variable string (from a choice)
MDAY = 4 # Day of the month 1..31
MONTH = 5 # Month of the year 1..12
YEAR = 6 # Four digits year
GRC_ID = 7 # GRC name identifier
HASH = 8 # Hash string generation based on a sequence as seed and CRC-15
def __init__(self, naming_type, pos):
self._type = naming_type
self._pos = pos
@property
def type(self):
return self._type
def value(self, variables, constants):
return ""
class NamingTypeSequence(NamingType):
"""
Sequence naming type
"""
def __init__(self, pos, sequence_name, digits=6):
super().__init__(NamingType.SEQUENCE, pos)
self.sequence_name = sequence_name
self.format = "%%.0%ii" % digits
def value(self, variables, constants):
acc_seq = "SELECT nextval('%s')" % self.sequence_name
with connection.cursor() as cursor:
cursor.execute(acc_seq)
v = cursor.fetchone()[0]
return self.format % v
class NamingTypeHash(NamingType):
"""
Hash naming type
"""
SYMBOLS = []
@classmethod
def init(cls):
cls.SYMBOLS = []
# 10 digits
for i in range(0, 10):
cls.SYMBOLS.append(chr(ord('0') + i))
# 22 letters
for i in range(0, 26):
# ignore I,L,O,U
if i not in (8, 11, 14, 20):
cls.SYMBOLS.append(chr(ord('A') + i))
@classmethod
def crc15(cls, seed):
# nanoseconds time 64 bits
now = int(time.time() * 1000 * 1000)
v = [
(seed & 0xff00000000000000) >> 7,
(seed & 0x00ff000000000000) >> 6,
(seed & 0x0000ff0000000000) >> 5,
(seed & 0x000000ff00000000) >> 4,
(seed & 0x00000000ff000000) >> 3,
(seed & 0x0000000000ff0000) >> 2,
(seed & 0x000000000000ff00) >> 1,
(seed & 0x00000000000000ff),
(now & 0xff00000000000000) >> 7,
(now & 0x00ff000000000000) >> 6,
(now & 0x0000ff0000000000) >> 5,
(now & 0x000000ff00000000) >> 4,
(now & 0x00000000ff000000) >> 3,
(now & 0x0000000000ff0000) >> 2,
(now & 0x000000000000ff00) >> 1,
(now & 0x00000000000000ff)
]
crc = 0
for i in range(0, 16):
crc ^= v[i] << 7
for j in range(0, 8):
crc <<= 1
if crc & 0x8000:
crc ^= 0xC599
crc &= 0x7fff
return crc
@classmethod
def to_base32(cls, x):
"""
Crockford's base 32 plus 1 bits
"""
res = ""
if x == 0:
return ""
if x & 0x8000:
res += "1"
if x > 0x03E0:
x1 = (x & 0x7C00) >> 10
res += cls.SYMBOLS[x1]
if x > 0x001F:
x1 = (x & 0x03E0) >> 5
res += cls.SYMBOLS[x1]
x1 = x & 0x001F
res += cls.SYMBOLS[x1]
return res
def __init__(self, pos, sequence_name, length=3):
super().__init__(NamingType.HASH, pos)
self.sequence_name = sequence_name
self.length = length
if length != 3:
raise ValueError("Only max length of 3 is supported")
def value(self, variables, constants):
acc_seq = "SELECT nextval('%s')" % self.sequence_name
with connection.cursor() as cursor:
cursor.execute(acc_seq)
v = cursor.fetchone()[0]
# generate a crc-15 based on the current time and unique seed
crc15 = NamingTypeHash.crc15(v)
# return a 3 chars max string from the crc15
return NamingTypeHash.to_base32(crc15)
class NamingTypeStatic(NamingType):
"""
Serial naming type
"""
def __init__(self, pos, text):
super().__init__(NamingType.STATIC, pos)
self.text = text
def value(self, variables, constants):
return self.text
class NamingTypeConstant(NamingType):
"""
Constant string naming type
"""
def __init__(self, pos, index):
super().__init__(NamingType.CONSTANT, pos)
self._index = index
def value(self, variables, constants):
if self._index < len(constants):
return constants[self._index]
else:
raise ValueError("Missing constant")
class NamingTypeVariable(NamingType):
"""
Variable (from a choice) string naming type
"""
def __init__(self, pos, var_name):
super().__init__(NamingType.VARIABLE, pos)
if var_name not in ('GRC_CODE', 'ACCESSION_CODE', 'ACCESSION_NAME'):
raise ValueError("Unsupported variable name " + var_name)
self._var_name = var_name
def value(self, variables, constants):
v = variables.get(self._var_name, "")
if v is not None:
return v
else:
raise ValueError("Missing variable")
class NamingTypeMonthDay(NamingType):
"""
Day of the month naming type
"""
def __init__(self, pos):
super().__init__(NamingType.MDAY, pos)
def value(self, variables, constants):
day = datetime.today().day
return "%.2i" % day
class NamingTypeMonth(NamingType):
"""
Month naming type
"""
def __init__(self, pos):
super().__init__(NamingType.MONTH, pos)
def value(self, variables, constants):
month = datetime.today().month
return "%.2i" % month
class NamingTypeYear(NamingType):
"""
Year of the month naming type
"""
def __init__(self, pos):
super().__init__(NamingType.YEAR, pos)
def value(self, variables, constants):
year = datetime.today().year
return "%.4i" % year
class NamingTypeGRCCode(NamingType):
"""
GRC name identifier string naming type
"""
def __init__(self, pos):
super().__init__(NamingType.GRC_ID, pos)
def value(self, variables, constants):
return GRC.objects.get_unique_grc().identifier
class NameBuilder(object):
# Some examples of naming
SIMPLE_SERIAL = "{SEQ.6}"
PREFIXED_SERIAL = "{CONST}_{SERIAL.6}"
PREFIXED_SERIAL_WITH_DATE = "{CONST}_{SEQ.6}_{YEAR}{MONTH}{MDAY}"
def __init__(self, sequence_name, builder_format=None):
if not builder_format:
self._naming_format = NameBuilder.PREFIXED_SERIAL_WITH_DATE
else:
self._naming_format = builder_format
# count the name of constants string necessary
self._num_constants = self._naming_format.count("{CONST}")
self._recipe = []
sp = -1
i = 0
pos = 0
const_idx = 0
st = ""
np = ""
for c in self._naming_format:
if c is '{':
if len(st) > 0:
self._recipe.append(NamingTypeStatic(pos, st))
st = ""
pos += 1
sp = i
np = ""
elif c is '}' and sp >= 0:
sp = -1
parts = np.split('.')
if parts[0] == "SEQ":
if len(parts) == 1:
self._recipe.append(NamingTypeSequence(pos, sequence_name, -1))
elif len(parts) == 2:
width = int(parts[1])
self._recipe.append(NamingTypeSequence(pos, sequence_name, width))
elif parts[0] == "CONST":
self._recipe.append(NamingTypeConstant(pos, const_idx))
const_idx += 1
elif parts[0] == "VAR":
if len(parts) == 1:
raise ValueError("Missing variable name")
self._recipe.append(NamingTypeVariable(pos, parts[1]))
elif parts[0] == "MDAY":
self._recipe.append(NamingTypeMonthDay(pos))
elif parts[0] == "MONTH":
self._recipe.append(NamingTypeMonth(pos))
elif parts[0] == "YEAR":
self._recipe.append(NamingTypeYear(pos))
elif parts[0] == "GRC_CODE":
self._recipe.append(NamingTypeGRCCode(pos))
elif parts[0] == "HASH":
if len(parts) == 1:
self._recipe.append(NamingTypeHash(pos, sequence_name))
elif len(parts) == 2:
max_length = int(parts[1])
self._recipe.append(NamingTypeHash(pos, sequence_name, max_length))
else:
pass
pos += 1
elif sp >= 0:
np += c
else:
st += c
i += 1
# last suffix
if len(st) > 0:
self._recipe.append(NamingTypeStatic(pos, st))
@property
def num_constants(self):
"""
Return the number of necessary constants parameters.
"""
return self._num_constants
def pick(self, variables=None, constants=None):
"""
Pick the next name.
:param variables: Named standardized variable dict.
:param constants: List of ordered constants string.
:return: A newly generated name. After that serial if used is incremented
"""
if variables is None:
variables = {}
if constants is None:
constants = []
name = ""
for p in self._recipe:
name += p.value(variables, constants)
return name
class NameBuilderManager(object):
GLOBAL_ACCESSION = "accession"
GLOBAL_BATCH = "batch"
builders = {}
@classmethod
def init(cls):
NamingTypeHash.init()
@classmethod
def register(cls, name, builder):
if name in cls.builders:
raise ValueError("Already defined name builder for this name")
cls.builders[name] = builder
@classmethod
def get(cls, name):
return cls.builders.get(name)
@classmethod
def has(cls, name):
return name in cls.builders
| 2.359375
| 2
|
mapperpy/object_mapper.py
|
lgrech/MapperPy
| 2
|
12775971
|
from enum import Enum
from mapperpy.one_way_mapper import OneWayMapper
__author__ = 'lgrech'
class MappingDirection(Enum):
left_to_right = 1
right_to_left = 2
class ObjectMapper(object):
def __init__(self, from_left_mapper, from_right_mapper):
"""
:param from_left_mapper:
:type from_left_mapper: OneWayMapper
:param from_right_mapper:
:type from_right_mapper: OneWayMapper
"""
self.__from_left_mapper = from_left_mapper
self.__from_right_mapper = from_right_mapper
@classmethod
def from_class(cls, left_class, right_class):
return ObjectMapper(
OneWayMapper.for_target_class(right_class),
OneWayMapper.for_target_class(left_class))
@classmethod
def from_prototype(cls, left_proto_obj, right_proto_obj):
return ObjectMapper(
OneWayMapper.for_target_prototype(right_proto_obj),
OneWayMapper.for_target_prototype(left_proto_obj))
@classmethod
def for_dict(cls, left_proto_obj):
return ObjectMapper(
OneWayMapper.for_target_prototype(left_proto_obj.__dict__),
OneWayMapper.for_target_prototype(left_proto_obj))
def map(self, obj):
if isinstance(obj, self.__from_right_mapper.target_class):
return self.__from_left_mapper.map(obj)
elif isinstance(obj, self.__from_left_mapper.target_class):
return self.__from_right_mapper.map(obj)
raise ValueError("This mapper does not support {} class".format(obj.__class__.__name__))
def map_attr_name(self, attr_name):
"""
:type attr_name: basestring
:rtype: basestring
"""
mapped_name = self.__get_mapped_name(self.__from_left_mapper, attr_name)
if mapped_name and self.__get_mapped_name(self.__from_right_mapper, mapped_name) == attr_name:
return mapped_name
mapped_name = self.__get_mapped_name(self.__from_right_mapper, attr_name)
if mapped_name and self.__get_mapped_name(self.__from_left_mapper, mapped_name) == attr_name:
return mapped_name
raise ValueError("Can't find mapping for attribute name: {}".format(attr_name))
def map_attr_value(self, attr_name, attr_value, mapping_direction=None, target_class=None):
"""
:type attr_name: basestring
:type attr_value: object
:type mapping_direction: MappingDirection
:type target_class: type
:rtype: object
"""
if mapping_direction is not None and target_class is not None\
or mapping_direction is None and target_class is None:
raise ValueError("Either mapping direction or target class has to be set (not both)")
if mapping_direction and mapping_direction == MappingDirection.left_to_right \
or target_class and target_class == self.__from_left_mapper.target_class:
mapped_name = self.__get_mapped_name(self.__from_left_mapper, attr_name)
if mapped_name and self.__get_mapped_name(self.__from_right_mapper, mapped_name) == attr_name:
return self.__from_left_mapper.map_attr_value(attr_name, attr_value)
elif mapping_direction and mapping_direction == MappingDirection.right_to_left \
or target_class and target_class == self.__from_right_mapper.target_class:
mapped_name = self.__get_mapped_name(self.__from_right_mapper, attr_name)
if mapped_name and self.__get_mapped_name(self.__from_left_mapper, mapped_name) == attr_name:
return self.__from_right_mapper.map_attr_value(attr_name, attr_value)
raise ValueError(
"Can't find mapping for attribute name: {}, direction: {}, target class: {}".format(
attr_name, mapping_direction, target_class.__name__ if target_class else None))
def custom_mappings(self, mapping_dict):
mapping, rev_mapping = self.__get_explicit_mapping(mapping_dict)
self.__from_left_mapper.custom_mappings(mapping)
self.__from_right_mapper.custom_mappings(rev_mapping)
return self
def nested_mapper(self, mapper):
if not isinstance(mapper, ObjectMapper):
raise ValueError("Nested mapper has to be an instance of {}, {} found".format(
ObjectMapper.__name__, mapper.__class__.__name__))
left_type = mapper.__from_right_mapper.target_class
self.__from_left_mapper.nested_mapper(mapper.__from_left_mapper, left_type)
right_type = mapper.__from_left_mapper.target_class
self.__from_right_mapper.nested_mapper(mapper.__from_right_mapper, right_type)
return self
def left_initializers(self, initializers_dict):
self.__from_right_mapper.target_initializers(initializers_dict)
return self
def right_initializers(self, initializers_dict):
self.__from_left_mapper.target_initializers(initializers_dict)
return self
def value_converters(self, converters_dict):
to_right_converters, to_left_converters = self.__split_converters(converters_dict)
self.__from_left_mapper.target_value_converters(to_right_converters)
self.__from_right_mapper.target_value_converters(to_left_converters)
return self
def options(self, option):
self.__from_left_mapper.options(option)
self.__from_right_mapper.options(option)
return self
def __repr__(self):
return "{}->{}".format(self.__from_right_mapper.target_class, self.__from_left_mapper.target_class)
@classmethod
def __get_mapped_name(cls, one_way_mapper, attr_name):
try:
return one_way_mapper.map_attr_name(attr_name)
except ValueError:
return None
@classmethod
def __get_explicit_mapping(cls, input_mapping):
mapping = {}
rev_mapping = {}
for left, right in input_mapping.items():
if right is None:
# user requested to suppress implicit mapping for k
mapping[left] = rev_mapping[left] = None
else:
mapping[left] = right
rev_mapping[right] = left
return mapping, rev_mapping
def __split_converters(self, converters_dict):
to_right_converters = {}
to_left_converters = {}
for left_attr_name, converters_tuple in converters_dict.iteritems():
if not isinstance(converters_tuple, tuple) or len(converters_tuple) != 2:
raise ValueError("Converters for {} should be provided in a 2-element tuple".format(left_attr_name))
to_right_converters[left_attr_name] = converters_tuple[0]
to_left_converters[self.__from_left_mapper.map_attr_name(left_attr_name)] = converters_tuple[1]
return to_right_converters, to_left_converters
| 2.84375
| 3
|
server.py
|
martinezpl/STARTHACK21-SBB-backend
| 0
|
12775972
|
<gh_stars>0
# import main Flask class and request object
from flask import Flask, request
from logic import Logic
# create the Flask app
app = Flask("SBB-backend")
log = Logic()
@app.route('/detail')
def detail():
facility = request.args.get('facility')
date = request.args.get('date')
return log.detail(facility, date)
@app.route('/rafcik')
def rafcik():
return log.rafcik()
@app.route('/home')
def home():
return log.home()
@app.route('/form-example')
def form_example():
return 'Form Data Example'
@app.route('/json-example')
def json_example():
return 'JSON Object Example'
if __name__ == '__main__':
# run app in debug mode on port 5000
app.run(host='127.0.0.1', debug=True, port=5000)
| 2.6875
| 3
|
benri/pytorch/rnn.py
|
MaxOSmith/benri
| 0
|
12775973
|
""" Configurable recurrent cell. """
import copy
from pydoc import locate
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from benri.configurable import Configurable
class RNN(nn.Module, Configurable):
def __init__(self, rnn=None, params={}):
nn.Module.__init__(self)
Configurable.__init__(self, params=params)
# Check for unimplemented conditions.
if self.params["bidirectional"]:
raise ValueError("Bidirectional not implemented.")
if self.params["n_layers"] != 1:
raise ValueError("More than 1 layer not implemented.")
if rnn:
rnn = self.rnn
else:
# Locate and build the cell.
cell_ctor = locate("torch.nn.{}".format(self.params["cell_type"]))
if cell_ctor is None:
raise ValueError("Unknown RNN cell: {}".format(self.params["cell_type"]))
self.rnn = cell_ctor(
input_size=self.params["input_size"],
hidden_size=self.params["hidden_size"],
num_layers=self.params["n_layers"],
batch_first=True)
def forward(self, x, state):
""" Wraps the RNN's forward call.
:param x: PackedSequence, or [B, S, E].
:param state: [B, H]
:return: Tuple
- Outputs:
- State:
"""
assert isinstance(x, PackedSequence) or x.shape[0] == state.shape[0]
# Add the sequence dimension to the hidden state. [B, E] -> [S, B, E].
state = state.unsqueeze(0)
if self.params["cell_type"] == "LSTM":
state = torch.split(state, self.params["hidden_size"], dim=2)
y, state = self.rnn(x, state)
if self.params["cell_type"] == "LSTM":
state = torch.cat(state, dim=2)
# Remove the N-layers/bidirectional dimension from the hidden state.
state = state.squeeze(0)
return y, state
def init_state(self, batch_size):
""" Get a an initial zero state.
:param batch_size: Number of examples in the batch.
:return: Initial RNN state of zeros.
"""
if self.params["cell_type"] == "LSTM":
state_shape = [batch_size, self.params["hidden_size"] * 2]
else:
state_shape = [batch_size, self.params["hidden_size"]]
state = Variable(torch.zeros(state_shape), requires_grad=False).float()
return state
@property
def hidden_size(self):
if self.params["cell_type"] == "LSTM":
return 2 * self.params["hidden_size"]
else:
return self.params["hidden_size"]
@property
def output_size(self):
return self.params["hidden_size"]
@staticmethod
def default_params():
return {
"cell_type": "LSTM",
"input_size": 100,
"hidden_size": 100,
"n_layers": 1,
"bidirectional": False}
| 2.5625
| 3
|
cycles/utils/loadShader.py
|
em-yu/BlenderToolbox
| 3
|
12775974
|
import bpy
import os
# pwd = os.getcwd()
pwd = os.path.dirname(os.path.realpath(__file__))
def loadShader(shaderName, mesh):
# switch to different shader names
if shaderName is "EeveeToon":
bpy.context.scene.render.engine = 'BLENDER_EEVEE'
bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
matName = "ToonShade_EV"
blenderFileName = 'EeveeToon.blend'
elif shaderName is "ColoredSteel":
matName = "Blued_Steel"
blenderFileName = 'ColoredSteel.blend'
elif shaderName is "Wax":
matName = "Wax_PBR_SSS"
blenderFileName = 'Wax.blend'
elif shaderName is "Wood":
matName = "UCP wood-v-1-1"
blenderFileName = 'UCPWood.blend' # createy by Elbriga
# load shaders to the mesh
path = pwd + '/../../shaders/' + blenderFileName + "\\Material\\"
bpy.ops.wm.append(filename=matName, directory=path)
mat = bpy.data.materials.get(matName)
mesh.data.materials.append(mat)
mesh.active_material = mat
tree = mat.node_tree
matNode = tree.nodes[-1]
return matNode
| 2.515625
| 3
|
Base/views.py
|
yorlysoro/INCOLARA
| 0
|
12775975
|
<reponame>yorlysoro/INCOLARA<gh_stars>0
from django.urls import reverse_lazy
from django.views.generic import TemplateView, UpdateView, ListView, CreateView, DetailView, DeleteView
from .forms import FormularioSectores, FormularioCuenta
from .models import Cuenta, Sectores
# Create your views here.
class Inicio(TemplateView):
template_name = 'index.html'
class MiCuenta(UpdateView):
model = Cuenta
form_class = FormularioCuenta
template_name = 'Base/mi_cuenta.html'
success_url = reverse_lazy('inicio')
class SectoresListar(ListView):
model = Sectores
context_object_name = 'Sectores_List'
template_name = 'Base/sector_list.html'
class SectoresCrear(CreateView):
model = Sectores
form_class = FormularioSectores
context_object_name = 'Sector'
template_name = 'Base/sector_crear.html'
success_url = reverse_lazy('Base:lista_sectores')
class SectoresEditar(UpdateView):
model = Sectores
form_class = FormularioSectores
template_name = 'Base/sector_editar.html'
def get_success_url(self):
return reverse_lazy('Base:sector_detalle', kwargs={ 'pk' : self.object.id })
class SectoresDetalle(DetailView):
model = Sectores
form_class = FormularioSectores
context_object_name = 'sector'
template_name = 'Base/sector_detalle.html'
class SectoresBorrar(DeleteView):
model = Sectores
template_name = 'Base/sector_borrar.html'
success_url = reverse_lazy('Base:lista_sectores')
| 2.234375
| 2
|
collector_service_sdk/api/template/update_template_with_yaml_pb2.py
|
easyopsapis/easyops-api-python
| 5
|
12775976
|
<reponame>easyopsapis/easyops-api-python<filename>collector_service_sdk/api/template/update_template_with_yaml_pb2.py
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update_template_with_yaml.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from collector_service_sdk.model.collector_service import collector_template_pb2 as collector__service__sdk_dot_model_dot_collector__service_dot_collector__template__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='update_template_with_yaml.proto',
package='template',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1fupdate_template_with_yaml.proto\x12\x08template\x1a\x46\x63ollector_service_sdk/model/collector_service/collector_template.proto\"p\n&UpdateCollectorTemplateWithYamlRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x10\n\x08objectId\x18\x03 \x01(\t\x12\x12\n\nconfigYaml\x18\x04 \x01(\t\"\x96\x01\n.UpdateCollectorTemplateWithYamlResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x32\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32$.collector_service.CollectorTemplateb\x06proto3')
,
dependencies=[collector__service__sdk_dot_model_dot_collector__service_dot_collector__template__pb2.DESCRIPTOR,])
_UPDATECOLLECTORTEMPLATEWITHYAMLREQUEST = _descriptor.Descriptor(
name='UpdateCollectorTemplateWithYamlRequest',
full_name='template.UpdateCollectorTemplateWithYamlRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='template.UpdateCollectorTemplateWithYamlRequest.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='template.UpdateCollectorTemplateWithYamlRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='template.UpdateCollectorTemplateWithYamlRequest.objectId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='configYaml', full_name='template.UpdateCollectorTemplateWithYamlRequest.configYaml', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=117,
serialized_end=229,
)
_UPDATECOLLECTORTEMPLATEWITHYAMLRESPONSEWRAPPER = _descriptor.Descriptor(
name='UpdateCollectorTemplateWithYamlResponseWrapper',
full_name='template.UpdateCollectorTemplateWithYamlResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='template.UpdateCollectorTemplateWithYamlResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='template.UpdateCollectorTemplateWithYamlResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='template.UpdateCollectorTemplateWithYamlResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='template.UpdateCollectorTemplateWithYamlResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=232,
serialized_end=382,
)
_UPDATECOLLECTORTEMPLATEWITHYAMLRESPONSEWRAPPER.fields_by_name['data'].message_type = collector__service__sdk_dot_model_dot_collector__service_dot_collector__template__pb2._COLLECTORTEMPLATE
DESCRIPTOR.message_types_by_name['UpdateCollectorTemplateWithYamlRequest'] = _UPDATECOLLECTORTEMPLATEWITHYAMLREQUEST
DESCRIPTOR.message_types_by_name['UpdateCollectorTemplateWithYamlResponseWrapper'] = _UPDATECOLLECTORTEMPLATEWITHYAMLRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateCollectorTemplateWithYamlRequest = _reflection.GeneratedProtocolMessageType('UpdateCollectorTemplateWithYamlRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATECOLLECTORTEMPLATEWITHYAMLREQUEST,
'__module__' : 'update_template_with_yaml_pb2'
# @@protoc_insertion_point(class_scope:template.UpdateCollectorTemplateWithYamlRequest)
})
_sym_db.RegisterMessage(UpdateCollectorTemplateWithYamlRequest)
UpdateCollectorTemplateWithYamlResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCollectorTemplateWithYamlResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _UPDATECOLLECTORTEMPLATEWITHYAMLRESPONSEWRAPPER,
'__module__' : 'update_template_with_yaml_pb2'
# @@protoc_insertion_point(class_scope:template.UpdateCollectorTemplateWithYamlResponseWrapper)
})
_sym_db.RegisterMessage(UpdateCollectorTemplateWithYamlResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 1.53125
| 2
|
instacart-basket-analysis/pipelines/models/previous_order.py
|
yasserglez/kaggle_titanic
| 2
|
12775977
|
<reponame>yasserglez/kaggle_titanic
import luigi
import ujson
from ..models import PredictModel
class PredictPreviousOrder(PredictModel):
products = luigi.ChoiceParameter(choices=['all', 'reordered'], default='all')
@property
def model_name(self):
return 'previous_order_{}'.format(self.products)
def run(self):
orders = self.requires()['orders'].read()
predictions = {}
for user_data in orders:
user_products = []
for product in user_data['prior_orders'][-1]['products']:
if self.products == 'all':
user_products.append(product['product_id'])
elif self.products == 'reordered' and product['reordered']:
user_products.append(product['product_id'])
order_id = user_data['last_order']['order_id']
predictions[order_id] = user_products
with self.output().open('w') as fd:
ujson.dump(predictions, fd)
if __name__ == '__main__':
luigi.run(local_scheduler=True)
| 2.421875
| 2
|
ogle/lexer/language_spec.py
|
yshrdbrn/ogle
| 0
|
12775978
|
tokens = {
'ID': r'[A-Za-z][A-Za-z_0-9]*',
'FLOATNUM': r'(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+)',
'INTNUM': r'[0-9]+',
# Multi-character operators
'==': r'==',
'<=': r'<=',
'>=': r'>=',
'<>': r'<>',
'::': r'::',
}
special_characters = '<>+-*/=(){}[];,.:'
reserved_keywords = {
'if': 'IF',
'then': 'THEN',
'else': 'ELSE',
'while': 'WHILE',
'class': 'CLASS',
'integer': 'INTEGER',
'float': 'FLOAT',
'do': 'DO',
'end': 'END',
'public': 'PUBLIC',
'private': 'PRIVATE',
'or': 'OR',
'and': 'AND',
'not': 'NOT',
'read': 'READ',
'write': 'WRITE',
'return': 'RETURN',
'main': 'MAIN',
'inherits': 'INHERITS',
'local': 'LOCAL',
'void': 'VOID'
}
comments = r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)'
whitespace = ' \t'
line_end = r'\n+'
| 2.5625
| 3
|
cb_news/news_extractor/views/report_handler.py
|
astandre/cb_news_extractor
| 0
|
12775979
|
from flakon import JsonBlueprint
from cb_news.news_extractor.database import *
from flask import request
import logging
report_handler = JsonBlueprint('report_handler', __name__)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
@report_handler.route('/report', methods=["POST"])
def report_view():
"""Home view.
This view will return an empty JSON mapping.
"""
data = request.get_json()
logger.info("Appending new report %s", data)
new_report = Report()
message = ""
if "id" in data:
new_report.id = data["id"]
if "description" in data:
new_report.description = data["description"]
message += "\nDescription updated"
if "author" in data:
new_report.author = data["author"]
message += "\nAuthor updated"
if "attachment" in data:
att_list = []
for att in data["attachment"]:
att_list.append(Attachment(url=att["url"]))
print(att_list)
report = get_or_create_report(new_report)
return {"id": report.id, "message": message}
@report_handler.route('/report/all', methods=["GET"])
def all_reports():
return {"reports": get_all_saved_reports()}
| 2.34375
| 2
|
api/admin.py
|
surajsonee/Ionic-Django
| 0
|
12775980
|
from django.contrib import admin
from .models import Team, Kpi, KpiValue, Organization
# Register your models here.
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
pass
@admin.register(Team)
class TeamAdmin(admin.ModelAdmin):
pass
@admin.register(Kpi)
class KpiAdmin(admin.ModelAdmin):
pass
@admin.register(KpiValue)
class KpiAdmin(admin.ModelAdmin):
pass
| 1.6875
| 2
|
src/simple_io.py
|
lucascimeca/Robotics_Palpation
| 0
|
12775981
|
from pathlib import Path
from os import path as pt
def file_exist_query(filename):
path = Path(filename)
if path.is_file():
res = None
while res not in ['y', 'Y', 'n', 'N']:
res = input("\nThe file in '{}' already exists, do you really wish to re-write its contents? [y/n]".format(filename))
if res not in ['y', 'Y', 'n', 'N']:
print("Please reply with 'y' or 'n'")
if res in ['n', 'N']:
return False
return True
def file_exists(filename):
path = Path(filename)
if path.is_file():
return True
return False
def folder_exists(folder_name):
return pt.isdir(folder_name)
def folder_create(folder_name, exist_ok=False, parents=True):
path = Path(folder_name)
try:
path.mkdir(parents=parents, exist_ok=exist_ok)
except:
raise OSError("Trying to create an already existing folder!!")
return True
| 3.71875
| 4
|
v7.0/map_analyze.py
|
jsstwright/osumapper
| 296
|
12775982
|
# -*- coding: utf-8 -*-
#
# JSON osu! map analysis
#
import numpy as np;
def get_map_timing_array(map_json, length=-1, divisor=4):
if length == -1:
length = map_json["obj"][-1]["time"] + 1000; # it has an extra time interval after the last note
if map_json["obj"][-1]["type"] & 8: # spinner end
length = map_json["obj"][-1]["spinnerEndTime"] + 1000;
uts_a = map_json["timing"]["uts"];
out = [];
for i, uts in enumerate(uts_a):
begin_time = uts["beginTime"];
mspb = uts["tickLength"];
if i < len(uts_a)-1:
end_time = uts_a[i+1]["beginTime"];
else:
end_time = length;
arr = np.floor(np.arange(begin_time, end_time, mspb / divisor));
out = out + list(map(lambda f: int(f), arr));
return out;
def get_tick_len(map_json, tick):
uts_a = map_json["timing"]["uts"];
if tick < uts_a[0]["beginTime"]:
return uts_a[0]["tickLength"];
_out = 600;
for uts in uts_a:
if tick >= uts["beginTime"]:
_out = uts["tickLength"];
else:
return _out;
return _out;
def get_slider_len(map_json, tick):
ts_a = map_json["timing"]["ts"];
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_slider_len_ts(ts_a, tick):
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_end_time(note):
if note["type"] & 8:
return note["spinnerEndTime"];
elif note["type"] & 2:
return note["sliderData"]["endTime"];
#elif note["type"] & 128:
# return note["holdEndTime"];
else:
return note["time"];
# edited from uts to ts
def get_all_ticks_and_lengths_from_ts(uts_array, ts_array, end_time, divisor=4):
# Returns array of all timestamps, ticklens and sliderlens.
endtimes = ([uts["beginTime"] for uts in uts_array] + [end_time])[1:];
timestamps = [np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor) for i, uts in enumerate(uts_array)];
ticks_from_uts = [list(range(len(timestamp_group))) for timestamp_group in timestamps];
tick_len = [[uts["tickLength"]] * len(np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor)) for i, uts in enumerate(uts_array)];
# slider_len = [[ts["sliderLength"]] * len(np.arange(ts["beginTime"], endtimes[i], ts["tickLength"] / divisor)) for i, ts in enumerate(ts_array)];
slider_len = [get_slider_len_ts(ts_array, timestamp) for timestamp in np.concatenate(timestamps)];
return np.concatenate(ticks_from_uts), np.round(np.concatenate(timestamps)).astype(int), np.concatenate(tick_len), np.array(slider_len);
def get_end_point(note):
if note["type"] & 8:
return np.array([256, 192]);
elif note["type"] & 2:
return np.array(note["sliderData"]["endpoint"]);
else:
return np.array([note["x"], note["y"]]);
def get_input_vector(note, prev_note):
if note["type"] & 8:
return None;
#elif note["type"] & 2:
# return np.array(note["sliderData"]["dIn"]);
else:
vec = np.array([note["x"], note["y"]]) - get_end_point(prev_note);
return vec / max(0.001, np.sqrt(vec.dot(vec)));
def get_output_vector(note, prev_note):
if note["type"] & 8:
return None;
elif note["type"] & 2:
return np.array(note["sliderData"]["dOut"]);
else:
vec = np.array([note["x"], note["y"]]) - get_end_point(prev_note);
return vec / max(0.001, np.sqrt(vec.dot(vec)));
def get_momentum(note, prev_note, slider_len):
"""
momentum = distance snap (distance / slider length).
for sliders, takes small value between from slider end or slider start to next note.
"""
v1 = np.array([note["x"], note["y"]]);
v0 = get_end_point(prev_note);
v = v1 - v0;
if note["time"] - get_end_time(prev_note) == 0 or note["time"] - prev_note["time"] == 0:
# it has the same time the previous note ends. either a bugged sliderend or a double note
return 0;
end_type_momentum = np.sqrt(v.dot(v)) / (note["time"] - get_end_time(prev_note)) / slider_len;
# Since slider jumps in maps cause parameters to be learned too high
# we try to deal with slider leniency by using the beginning of slider
v2 = np.array([prev_note["x"], prev_note["y"]]);
v3 = v1 - v2;
start_type_momentum = np.sqrt(v3.dot(v3)) / (note["time"] - prev_note["time"]) / slider_len;
return np.min([end_type_momentum, start_type_momentum]);
def is_uts_begin(map_json, tick):
uts_a = map_json["timing"]["uts"];
begin_times = [uts["beginTime"] for uts in uts_a];
for t in begin_times:
if tick > t - 1 and tick < t + 5:
return True
return False
def get_map_notes(map_json, **kwargs):
"""
Reads JSON map data and creates a list for every tick
Returns:
data = list of data array: [TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3]
flow_data = list of data array: [i, tick, note_type, x, y, vec_in_x, vec_in_y, vec_out_x, vec_out_y, end_x, end_y]
Ex1, Ex2, Ex3 = tickLength/500, BPM/120, sliderLength/150
"""
length = kwargs.get("length", -1);
divisor = kwargs.get("divisor", 4);
tick_times = get_map_timing_array(map_json, length = length, divisor = divisor);
objs = map_json["obj"];
obj_times = list(map(lambda obj: obj["time"], objs));
# 1 for circle, 2 for slider, 3 for spinner
def get_note_type(obj):
if not obj:
return 0;
if obj["type"] & 2:
return 2;
elif obj["type"] & 8:
return 3;
return 1;
po = 0;
note_max_wait_time = kwargs.get("note_max_wait_time", 1000);
start_time = obj_times[0] - note_max_wait_time;
last_obj_time = start_time;
sliding = 0;
slider_end_time = 0;
spinning = 0;
spinner_end_time = 0;
data = [];
flow_data = [];
# constant multipliers and subtractions
tlen_mp = 1/500;
tlen_s = 1;
bpm_mp = 1/120;
bpm_s = 1;
slen_mp = 1/150;
slen_s = 1;
# tick count from start of uninherited timing section
uts_i = 0;
# tick is timestamp here
for i, tick in enumerate(tick_times):
if is_uts_begin(map_json, tick):
uts_i = 0;
else:
uts_i += 1;
# Attach extra vars at the end of each note data row
tlen = get_tick_len(map_json, tick);
bpm = 60000 / tlen;
slen = get_slider_len(map_json, tick);
ex1 = tlen * tlen_mp - tlen_s;
ex2 = bpm * bpm_mp - bpm_s;
ex3 = slen * slen_mp - slen_s;
while obj_times[po] < tick - 5 and po < len(obj_times) - 1:
po += 1;
if obj_times[po] >= tick - 5 and obj_times[po] <= tick + 5: # found note
last_obj_time = tick;
note_type = get_note_type(objs[po]);
# calculate momentum
if po >= 1:
momentum = get_momentum(objs[po], objs[po-1], slen/tlen);
else:
momentum = 0;
# flow data
if po >= 1:
input_vector = get_input_vector(objs[po], objs[po-1]);
output_vector = get_output_vector(objs[po], objs[po-1]);
else:
input_vector = [0, 0];
output_vector = [0, 0];
if input_vector is None or input_vector[0] is None or input_vector[1] is None:
input_vector = [0, 0];
if output_vector is None or output_vector[0] is None or output_vector[1] is None:
output_vector = [0, 0];
# end point
endpoint = get_end_point(objs[po]);
flow_data.append([uts_i, tick, note_type, objs[po]["x"], objs[po]["y"], input_vector[0], input_vector[1], output_vector[0], output_vector[1], endpoint[0], endpoint[1]]);
# put data
if note_type == 1:
spinning = 0;
sliding = 0;
elif note_type == 2:
sliding = 1;
slider_end_time = objs[po]["sliderData"]["endTime"];
elif note_type == 3:
spinning = 1;
spinner_end_time = objs[po]["spinnerEndTime"];
# because the spinner sometimes get over 3 secs
last_obj_time = spinner_end_time;
# TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3
data.append([uts_i, tick, 1, note_type, sliding, spinning, momentum, ex1, ex2, ex3]);
elif spinning == 1:
if tick >= spinner_end_time - 5:
spinning = 0;
data.append([uts_i, tick, 1, 5, 0, 0, 0, ex1, ex2, ex3]);
else:
data.append([uts_i, tick, 0, 0, 0, 1, 0, ex1, ex2, ex3]);
elif sliding == 1:
if tick >= slider_end_time - 5:
sliding = 0;
data.append([uts_i, tick, 1, 4, 0, 0, 0, ex1, ex2, ex3]);
else:
data.append([uts_i, tick, 0, 0, 1, 0, 0, ex1, ex2, ex3]);
else: # not found
if tick - last_obj_time < note_max_wait_time and tick >= start_time:
data.append([uts_i, tick, 0, 0, 0, 0, 0, ex1, ex2, ex3]);
return data, flow_data;
| 2.375
| 2
|
views.py
|
vsantiago113/Flask-API-Boilerplate
| 1
|
12775983
|
from flask import Flask, request, make_response, jsonify, Response
from flask_restx import Resource, Api, abort, reqparse
from flask_jwt_extended import JWTManager
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required,
jwt_refresh_token_required, get_jwt_identity, get_raw_jwt)
from datetime import timedelta
import random
app = Flask(__name__)
app.secret_key = 'mysupersecretkey'
api = Api(app, version='1.0', title='My API Boilerplate',
description='My API Boilerplate',
)
ns = api.namespace('api/v1', description='Example.')
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
app.config['JWT_TOKEN_LOCATION'] = 'headers'
app.config['JWT_HEADER_NAME'] = 'X-Example-access-token'
app.config['JWT_HEADER_TYPE'] = ''
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(minutes=15)
app.config['JWT_REFRESH_TOKEN_EXPIRES'] = timedelta(minutes=15)
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
jwt = JWTManager(app)
blacklist = set()
devices = {
'routers': {
12345: {
'name': 'RT1',
'ip': '192.168.1.101'
},
123456: {
'name': 'RT2',
'ip': '192.168.1.102'
},
123457: {
'name': 'RT3',
'ip': '192.168.1.103'
},
12345712: {
'name': 'RT4',
'ip': '192.168.1.104'
},
12345752: {
'name': 'RT5',
'ip': '192.168.1.105'
}
}
}
def generate_device_id():
return random.randint(10000, 20000)
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in blacklist
@ns.route('/generatetoken')
class GenerateToken(Resource):
@staticmethod
def post():
if request.authorization.username == 'admin' and request.authorization.password == '<PASSWORD>':
access_token = create_access_token(identity=request.authorization.username, fresh=True)
refresh_token = create_refresh_token(identity=request.authorization.username)
return Response(headers={'X-Example-access-token': access_token,
'X-Example-refresh-token': refresh_token},
status=204, mimetype='application/json')
else:
return make_response(jsonify({'message': 'Bad username or password'}), 401)
@ns.route('/refreshtoken')
class RefreshToken(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
return Response(headers={'X-Example-access-token': access_token,
'X-Example-refresh-token': request.headers.get('X-Example-access-token')},
status=204, mimetype='application/json')
@ns.route('/revoketoken')
class RevokeToken(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
blacklist.add(jti)
return '', 204
@ns.route('/lets_get_all_routers')
class TestRouters(Resource):
@jwt_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('PageSize', type=int, location='args')
parser.add_argument('Offset', type=int, location='args')
args = parser.parse_args()
if not args.PageSize:
page_size = 10
else:
page_size = args.PageSize
if page_size > 100:
raise reqparse.exceptions.RequestEntityTooLarge('PageSize cannot exceed 100 items!')
if not args.Offset:
offset = 0
elif args.Offset > page_size:
offset = 0
else:
offset = args.Offset
items = []
for k, v in devices['routers'].items():
v.update({'id': str(k)})
items.append({k: v})
data = {'url': request.url,
'items': items[offset:page_size],
'PageSize': page_size,
'Offset': offset,
'count': len(items[offset:page_size])}
return make_response(jsonify(data), 200)
@ns.route('/routers')
class ListRouters(Resource):
@jwt_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('PageSize', type=int, location='args')
parser.add_argument('Offset', type=int, location='args')
args = parser.parse_args()
if not args.PageSize:
page_size = 10
else:
page_size = args.PageSize
if page_size > 100:
raise reqparse.exceptions.RequestEntityTooLarge('PageSize cannot exceed 100 items!')
if not args.Offset:
offset = 0
elif args.Offset > page_size:
offset = 0
else:
offset = args.Offset
items = []
for k, v in devices['routers'].items():
v.update({'id': str(k)})
items.append({k: v})
data = {'url': request.url,
'items': items[offset:page_size],
'PageSize': page_size,
'Offset': offset,
'count': len(items[offset:page_size])}
return make_response(jsonify(data), 200)
@jwt_required
def post(self):
data = request.get_json()
while True:
device_id = generate_device_id()
if device_id not in devices['routers']:
break
devices['routers'][device_id] = data
data.update({'id': device_id})
return make_response(jsonify(data), 200)
@ns.route('/routers/<int:device_id>')
class Routers(Resource):
@jwt_required
def get(self, device_id):
try:
device = devices['routers'][device_id]
except KeyError:
abort(404)
else:
return make_response(jsonify(device), 200)
@jwt_required
def put(self, device_id):
data = request.get_json()
devices['routers'][device_id].update(data)
return make_response(jsonify(devices['routers'][device_id]), 200)
@jwt_required
def delete(self, device_id):
devices['routers'].pop(device_id, None)
return make_response(jsonify({'msg': f'Device ID: {device_id} has been deleted!'}), 200)
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in blacklist
app.run(debug=True)
| 2.1875
| 2
|
extra/test_multi.py
|
ragnariock/DeepFashion
| 255
|
12775984
|
### IMPORTS
from __future__ import print_function
import os
import fnmatch
import numpy as np
import skimage.data
import cv2
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from PIL import Image
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.optimizers import RMSprop, Adagrad
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, Input
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping
import logging
FORMAT = "[%(lineno)4s : %(funcName)-30s ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
from selective_search import selective_search_bbox
### GLOBALS
# dimensions of our images.
# img_width = 150
# img_height = 150
img_width = 224
img_height = 224
# dataset_path = 'dataset_dogs_cats'
dataset_path = 'dataset'
dataset_train_path=os.path.join(dataset_path, 'train')
dataset_val_path=os.path.join(dataset_path, 'validation')
dataset_test_path=os.path.join(dataset_path, 'test')
# path to the model weights files.
weights_path = 'weights/vgg16_weights.h5'
#top_model_weights_path = 'output/bottleneck_fc_model.h5'
#top_model_weights_path = 'output_6_categ/best-weights-015-0.5636-0.7923.hdf5'
#finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5'
#finetune_model_weights_path = 'output_6_categ/best-weights-finetune-000-0.2325-0.9062.hdf5'
#finetune_model_weights_path = 'output_6_categ_crop/best-weights-finetune-008-0.3453-0.8774.hdf5'
#finetune_model_weights_path = 'output/best-weights-finetune-000-1.5646-0.5217.hdf5'
#finetune_model_weights_path = 'results_36categ/best-weights-finetune-000-1.5646-0.5217.hdf5'
finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5'
#epochs = 50
epochs = 5
#batch_size = 16
#batch_size = 32
batch_size = 1
# Count no. of images(.jpg) in a directory
def get_images_count_recursive(path):
matches = []
logging.debug('path {}'.format(path))
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
# logging.debug('matches {}'.format(matches))
images_count = len(matches)
return images_count
nb_test_samples = get_images_count_recursive(dataset_test_path)
logging.debug('nb_test_samples {}'.format(nb_test_samples))
if not os.path.exists('output'):
os.makedirs('output')
if not os.path.exists('logs'):
os.makedirs('logs')
# TODO: HARDCODING - Should be same as used during training VGG; Else error (None, None, 512)
input_shape = (img_width, img_height, 3)
# Sorted subdirectories list
def get_subdir_list(path):
names=[]
for name in sorted(os.listdir(path)):
if os.path.isdir(os.path.join(path, name)):
names.append(name)
logging.debug('names {}'.format(names))
return names
class_names = get_subdir_list(dataset_train_path)
logging.debug('class_names {}'.format(class_names))
# build the VGG16 network
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
logging.debug('Model loaded.')
logging.debug('{}'.format(base_model.output_shape)) # (None, None, None, 512) if input_shape not given in applications.VGG16
logging.debug('{}'.format(base_model.output_shape[1:])) # (None, None, 512)
### MODEL 1
# build a classifier model to put on top of the convolutional model
# top_model = Sequential()
# top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
# top_model.add(Dense(256, activation='relu'))
# top_model.add(Dropout(0.5))
# top_model.add(Dense(len(class_names), activation='softmax')) # Binary to Multi classification changes
# #top_model.add(Dense(1, activation='sigmoid'))
# # note that it is necessary to start with a fully-trained
# # classifier, including the top classifier,
# # in order to successfully do fine-tuning
# # top_model.load_weights(top_model_weights_path)
# # add the model on top of the convolutional base
# # base_model.add(top_model) # Not working; AttributeError: 'Model' object has no attribute 'add'
# model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
# logging.debug('{}'.format(model.summary()))
# model.compile(loss='sparse_categorical_crossentropy',
# optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
# metrics=['accuracy'])
### MODEL2
inputs = Input(shape=(base_model.output_shape[1:]))
x_common = Dense(256, activation='relu')(inputs)
## Model Classification
x = Flatten()(x_common)
#x = Dropout(dropout_rate)(x)
predictions_class = Dense(len(class_names), activation='softmax', name='predictions_class')(x)
## Model (Regression) IOU score
x = Flatten()(x_common)
# x = Dense(256, activation='relu')(x)
# x = Dropout(dropout_rate)(x)
predictions_iou = Dense(1, activation='sigmoid', name='predictions_iou')(x)
# This creates a model that includes the Input layer and three Dense layers
#model = Model(inputs=inputs, outputs=[predictions_class(base_model.output), predictions_iou(base_model.output)])
model = Model(inputs=inputs, outputs=[predictions_class(base_model.output), predictions_iou])
logging.debug('model summary {}'.format(model.summary()))
model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'mean_squared_error'},
metrics=['accuracy'])
model.load_weights(finetune_model_weights_path)
logging.debug('weights loaded: {}'.format(finetune_model_weights_path))
def evaluate_test_dataset():
## Test
test_datagen = ImageDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(
dataset_test_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse', # Binary to Multi classification changes
save_to_dir=None,
shuffle=False)
scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)
logging.debug('model.metrics_names {}'.format(model.metrics_names))
logging.debug('scores {}'.format(scores))
def predict_image_dir():
# Predict
# TODO: Hardcoding
# Put all images in sample_images/test folder
dataset_predict_path='sample_images'
#dataset_predict_path='temp'
logging.debug('dataset_predict_path {}'.format(dataset_predict_path))
predict_datagen = ImageDataGenerator(rescale=1. / 255)
predict_generator = predict_datagen.flow_from_directory(
dataset_predict_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse', # Binary to Multi classification changes
save_to_dir=None,
shuffle=False)
nb_predict_samples = get_images_count_recursive(dataset_predict_path)
logging.debug('nb_predict_samples {}'.format(nb_predict_samples))
prediction = model.predict_generator(predict_generator, nb_predict_samples // batch_size, verbose=1)
logging.debug('\n\nprediction \n{}'.format(prediction))
# Display predictions
matches=[]
for root, dirnames, filenames in os.walk(os.path.join(dataset_predict_path,'test')):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
for index,preds in enumerate(prediction):
logging.debug('\n{}'.format((matches[index])))
for index2, pred in enumerate(preds):
logging.debug('class_names {}'.format(class_names[index2]))
logging.debug('pred {0:6f}'.format(float(pred)))
def pad_and_crop_image(old_im, new_width, new_height):
# old_im = Image.open('someimage.jpg')
old_size = old_im.size
new_size = (new_width, new_height)
new_im = Image.new("RGB", new_size) # this is already black!
new_im.paste(old_im, ((new_size[0]-old_size[0])/2,
(new_size[1]-old_size[1])/2))
# new_im.show()
# new_im.save('someimage.jpg')
return new_im
def predict_image_name(image_path_name):
logging.debug('image_path_name {}'.format(image_path_name))
candidates = selective_search_bbox(image_path_name)
logging.debug('candidates {}'.format(candidates))
image_name = image_path_name.split('/')[-1].split('.')[0]
logging.debug('image_name {}'.format(image_name))
# img = Image.open(image_path_name)
# logging.debug('{} {} {}'.format(img.format, img.size, img.mode))
#img2 = img.crop((0, 0, 100, 100))
# img2.save("img2.jpg")
# img2.show()
#crop_img = img[200:400, 100:300] # Crop from x, y, w, h -> 100, 200, 300, 400
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
# img = cv2.imread(image_path_name)
# fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
img_read = Image.open(image_path_name)
logging.debug('{} {} {}'.format(img_read.format, img_read.size, img_read.mode))
# img_read.show()
i=0
for x, y, w, h in (candidates):
# left, upper, right, and lower pixel; The cropped section includes the left column and
# the upper row of pixels and goes up to (but doesn't include) the right column and bottom row of pixels
img_crop = img_read.crop((y, x, y+w, x+h))
img_crop.save('temp/test/'+ image_name + '_' + str(i) + '_cropped_' + '.jpg')
logging.debug('img_crop {} {} {}'.format(img_crop.format, img_crop.size, img_crop.mode))
img_crop_resize = img_crop.resize((img_width, img_height))
img_crop_resize.save('temp/test/'+ image_name + '_' + str(i) + '_cropped_resize' + '.jpg')
logging.debug('img_crop_resize {} {} {}'.format(img_crop_resize.format, img_crop_resize.size, img_crop_resize.mode))
i=i+1
# crop_img = img[x:y, w:h] # Crop from x, y, w, h -> 100, 200, 300, 400
# logging.debug('crop_img {}'.format(crop_img.shape))
# ax.imshow(crop_img)
# # cv2.imshow('cropped', crop_img)
# # cv2.waitKey(0)
# plt.show()
# # Convert Image to array
# img = PIL.Image.open("foo.jpg").convert("L")
# arr = numpy.array(img)
# # Convert array to Image
# img = PIL.Image.fromarray(arr)
# img = cv2.resize(cv2.imread(image_path_name), (224, 224)).astype(np.float32)
# img2.save('temp/test/img_'+str(i)+'.jpg')
# img3 = img2.thumbnail((img_width, img_height))
# logging.debug('img3 {}'.format(type(img3)))
# # img3.save('temp/test/img_'+str(i)+'_resized.jpg')
# logging.debug('{} {} {}'.format(img3.format, img3.size, img3.mode))
# img4 = pad_and_crop_image(img3, img_width, img_height)
# logging.debug('{} {} {}'.format(img4.format, img4.size, img4.mode))
# img4.save('temp/test/img_'+str(i)+'_resized1.jpg')
img=np.array(img_crop_resize).astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
#img = img.transpose((2,0,1))
img = np.expand_dims(img, axis=0)
prediction = model.predict(img, batch_size, verbose=1)
logging.debug('\n\nprediction \n{}'.format(prediction))
for index,preds in enumerate(prediction):
for pred in preds:
logging.debug('pred {0:6f}'.format(float(pred)))
### MAIN ###
#evaluate_test_dataset()
#predict_image_dir()
# #image='dataset/test/Jeans/img_Distressed_Skinny_Jeans_img_00000004.jpg'
# #image='sample_images/test/img_Distressed_Denim_Jeans_img_00000001.jpg'
# image='sample_images/test/img_Acid_Wash_Denim_Romper_img_00000070.jpg'
image='sample_images/test/img_Acid_Wash_-_Skinny_Jeans_img_00000005.jpg'
#image='sample_images/test/img_Boxy_Faux_Fur_Jacket_img_00000001.jpg'
#image='sample_images/test/img_Athletic_Marled_Knit_Joggers_img_00000009.jpg'
predict_image_name(image)
| 1.992188
| 2
|
utils/del_dummydirs.py
|
shleee47/shleee47
| 0
|
12775985
|
import os
import glob
import shutil
def del_dummydirs(rootpath, list):
for root, subdirs, files in os.walk(rootpath):
"""
walk through given rootpath, delete dirs in list
"""
for s in subdirs:
if s in list:
shutil.rmtree(os.path.join(root, s))
print("deleted - ", os.path.join(root, s))
"""
walk through given rootpath, delete files in list
"""
for f in files:
if f in list:
os.remove(os.path.join(root, f))
print("deleted - ", os.path.join(root, f))
if __name__ == '__main__':
# del_dummydirs('/home/nas/DB/DB_video-nonlocal-light/400_val', ['@eaDir', 'Thumbs.db'])
del_dummydirs('/home/sangbuem/MARS/dataset/Kinetics', ['@e', 'Thumb'])
| 3.046875
| 3
|
train/inflammation-classifier.py
|
JorisRoels/mri-inflammation-prediction
| 0
|
12775986
|
'''
This script illustrates training of an inflammation classifier for patches along SI joints
'''
import argparse
import os
import shutil
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from neuralnets.util.io import print_frm
from neuralnets.util.tools import set_seed
from neuralnets.util.augmentation import *
from pytorch_lightning.callbacks import ModelCheckpoint
from data.datasets import SPARCCDataset
from models.sparcc_cnn import Inflammation_CNN
from util.constants import *
factor = {INFLAMMATION_MODULE: 64, DEEP_INFLAMMATION_MODULE: 12, SPARCC_MODULE: 1, JOINT: 1}
def _train_module(net, train_data, val_data, args):
train_data.mode = INFLAMMATION_MODULE
val_data.mode = INFLAMMATION_MODULE
train_loader = DataLoader(train_data, batch_size=factor[INFLAMMATION_MODULE]*args.train_batch_size,
num_workers=args.num_workers, pin_memory=True, shuffle=True)
val_loader = DataLoader(val_data, batch_size=factor[INFLAMMATION_MODULE]*args.test_batch_size,
num_workers=args.num_workers, pin_memory=True)
checkpoint_callback = ModelCheckpoint(save_top_k=5, verbose=True, monitor='val/roc-auc', mode='max')
trainer = pl.Trainer(max_epochs=args.epochs, gpus=args.gpus, accelerator=args.accelerator,
default_root_dir=args.log_dir, flush_logs_every_n_steps=args.log_freq,
log_every_n_steps=args.log_freq, callbacks=[checkpoint_callback],
progress_bar_refresh_rate=args.log_refresh_rate, num_sanity_val_steps=0, deterministic=True)
trainer.fit(net, train_loader, val_loader)
return trainer
def _test_module(trainer, net, test_data, args):
test_data.mode = INFLAMMATION_MODULE
net.load_state_dict(torch.load(trainer.checkpoint_callback.best_model_path)['state_dict'])
test_loader = DataLoader(test_data, batch_size=factor[INFLAMMATION_MODULE]*args.test_batch_size,
num_workers=args.num_workers, pin_memory=True)
trainer.test(net, test_loader)
return trainer
if __name__ == '__main__':
# parse all the arguments
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", help="Path to the directory that contains a preprocessed dataset", type=str,
required=True)
parser.add_argument("--si-joint-model", help="Path to the SI joint detection checkpoint", type=str, required=True)
parser.add_argument("--model-checkpoint-illium", help="Path to the illium U-Net checkpoint", type=str,
required=True)
parser.add_argument("--model-checkpoint-sacrum", help="Path to the sacrum U-Net checkpoint", type=str,
required=True)
parser.add_argument("--repetitions", help="Number of repetitions", type=int, default=1)
parser.add_argument("--folds", help="Number of folds (overrides repetitions parameter if provided)", type=int,
default=None)
# network parameters
parser.add_argument("--train_val_test_split", help="Train/validation/test split", type=str, default="0.50,0.75")
parser.add_argument("--backbone", help="Backbone feature extractor of the model", type=str, default='ResNet18')
parser.add_argument("--omit_t1_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_t2_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_weighting", help="Boolean flag that specifies ROI masking", action='store_true',
default=False)
# optimization parameters
parser.add_argument("--epochs", help="Number of training epochs", type=int, default=400)
parser.add_argument("--lr", help="Learning rate for the optimization", type=float, default=1e-3)
# compute parameters
parser.add_argument("--train_batch_size", help="Batch size during training", type=int, default=1)
parser.add_argument("--test_batch_size", help="Batch size during testing", type=int, default=1)
parser.add_argument("--num_workers", help="Amount of workers", type=int, default=12)
parser.add_argument("--gpus", help="Devices available for computing", type=str, default='0')
parser.add_argument("--accelerator", help="Acceleration engine for computations", type=str, default='dp')
# logging parameters
parser.add_argument("--log_dir", help="Logging directory", type=str, default='logs')
parser.add_argument("--log_freq", help="Frequency to log results", type=int, default=50)
parser.add_argument("--log_refresh_rate", help="Refresh rate for logging", type=int, default=1)
parser.add_argument("--seed", help="Seed for reproducibility", type=int, default=0)
parser.add_argument("--clean-up", help="Boolean flag that specifies ROI masking", action='store_true', default=False)
args = parser.parse_args()
args.train_val_test_split = [float(item) for item in args.train_val_test_split.split(',')]
metrics = []
if args.folds is not None:
reps = args.folds
range_split = ((0, 1), (0, 1))
else:
reps = args.repetitions
f = None
split = args.train_val_test_split
range_split = ((0, split[1]), (0, split[1]), (split[1], 1))
for i in range(reps):
rep_str = 'fold' if args.folds is not None else 'repetition'
print_frm('')
print_frm('Start processing %s %d/%d ...' % (rep_str, i+1, reps))
print_frm('')
"""
Fix seed (in case of cross validation), or increment if repetitive training
"""
if args.folds is not None:
set_seed(args.seed)
else:
args.seed = args.seed + 1
set_seed(args.seed)
"""
Load the data
"""
print_frm('Loading data')
transform = Compose([Rotate90(), Flip(prob=0.5, dim=0), Flip(prob=0.5, dim=1), RandomDeformation(),
AddNoise(sigma_max=0.05)])
train = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[0], folds=args.folds, f=i,
train=True, transform=transform, seed=args.seed, mode=INFLAMMATION_MODULE,
use_t1_input=not args.omit_t1_input, use_t2_input=not args.omit_t2_input,
apply_weighting=not args.omit_weighting)
val = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[1], folds=args.folds, f=i,
train=False, seed=args.seed, mode=INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting)
print_frm('Train data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(train.q_scores),
100*np.mean(1-train.q_scores)))
print_frm('Val data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(val.q_scores),
100*np.mean(1-val.q_scores)))
if args.folds is None:
test = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[2], seed=args.seed,
mode=INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting)
print_frm('Test data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(test.q_scores),
100*np.mean(1-test.q_scores)))
"""
Build the network
"""
print_frm('Building the network')
weights = train.score_weights[0]
net = Inflammation_CNN(backbone=args.backbone, lr=args.lr, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, weights=weights)
print_frm('Balancing weights for loss function: %s' % (weights))
"""
Train the inflammation network
"""
print_frm('Starting training of the inflammation network')
trainer = _train_module(net, train, val, args)
print_frm('Testing network')
_test_module(trainer, net, val if args.folds is not None else test, args)
metrics.append([float(trainer.logged_metrics['test/' + m].cpu()) for m in METRICS])
"""
Save the final model
"""
print_frm('Saving final model')
shutil.copyfile(trainer.checkpoint_callback.best_model_path, os.path.join(trainer.log_dir, OPTIMAL_CKPT))
"""
Clean up
"""
print_frm('Cleaning up')
if args.clean_up:
os.system('rm -r ' + os.path.join(trainer.log_dir, 'checkpoints'))
"""
Report final performance results
"""
metrics = np.asarray(metrics)
metrics_avg = np.mean(metrics, axis=0)
print_frm('Final performance report:')
print_frm('=========================')
for i, m in enumerate(METRICS):
print_frm(' %s: %f' % (m, metrics_avg[i]))
| 2.296875
| 2
|
algorithm/16-DFS.py
|
LeeBeral/python
| 0
|
12775987
|
# DFS: Depth First Search, 从最左侧由根向下遍历,对象有未被遍历的叶时继续往下遍历,无未被遍历叶时向上返回,返回到根时退出。
nums = [2, 0, 3, 1, 3, 4]
pst = 3
def dfs(nums, p, t=0, nb=set()):
step = nums[p]
if p - step < 0 and p + step > len(nums):
return False
if nums[p - step] == t or nums[p + step] == t:
return p - step or p + step
else:
return dfs(nums, p - step , t) or f(nums, p + step , t)
print(dfs(nums, pst))
| 3.875
| 4
|
day-1/range-type.py
|
anishLearnsToCode/python-workshop-3
| 2
|
12775988
|
<reponame>anishLearnsToCode/python-workshop-3
"""
Range
range(stop)
range(start, stop)
range(start, stop, step)
default start = 0
default step = 1
"""
r = range(5, 10, 2)
print(r.start)
print(r.stop)
print(r.step)
print(type(r))
| 3.734375
| 4
|
apps/lti_app/middleware.py
|
PremierLangage/premierlangage
| 8
|
12775989
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# middleware.py
#
# Authors:
# - <NAME> <<EMAIL>>
#
import logging
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect, get_object_or_404
from django.urls import resolve, reverse
from django.utils.deprecation import MiddlewareMixin
from lti_app.models import ActivityOutcome
from activity.models import Activity
logger = logging.getLogger(__name__)
class LTIAuthMiddleware(MiddlewareMixin):
"""
Middleware for authenticating users via an LTI launch URL.
If the request is an LTI launch request, then this middleware attempts to
authenticate the username and signature passed in the POST data.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The LTI launch parameter dict is stored in the session keyed with the
resource_link_id to uniquely identify LTI launches of the LTI producer.
The LTI launch parameter dict is also set as the 'LTI' attribute on the
current request object to simplify access to the parameters.
The current request object is set as a thread local attribute so that the
monkey-patching of django's reverse() function (see ./__init__.py) can access
it in order to retrieve the current resource_link_id.
"""
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'): # pragma: no cover
logger.debug('improperly configured: request has no user attr')
raise ImproperlyConfigured(
"The Django LTI auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the LTIAuthMiddleware class.")
# These parameters should exist outside of session
request.lti_initial_request = False
request.lti_authentication_successful = False
if request.method == 'POST' \
and request.POST.get('lti_message_type') == 'basic-lti-launch-request':
request.lti_initial_request = True
# authenticate and log the user in
user = auth.authenticate(request=request)
if user is not None:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.lti_authentication_successful = True
request.user = user
auth.login(request, user)
resource_link_id = request.POST.get('resource_link_id')
lti_launch = {
'context_id': request.POST.get('context_id'),
'context_label': request.POST.get('context_label'),
'context_title': request.POST.get('context_title'),
'context_type': request.POST.get('context_type'),
'custom_canvas_account_id': request.POST.get(
'custom_canvas_account_id'),
'custom_canvas_account_sis_id': request.POST.get(
'custom_canvas_account_sis_id'),
'custom_canvas_api_domain': request.POST.get(
'custom_canvas_api_domain'),
'custom_canvas_course_id': request.POST.get(
'custom_canvas_course_id'),
'custom_canvas_enrollment_state': request.POST.get(
'custom_canvas_enrollment_state'),
'custom_canvas_membership_roles': request.POST.get(
'custom_canvas_membership_roles', '').split(','),
'custom_canvas_user_id': request.POST.get(
'custom_canvas_user_id'),
'custom_canvas_user_login_id': request.POST.get(
'custom_canvas_user_login_id'),
'launch_presentation_css_url': request.POST.get(
'launch_presentation_css_url'),
'launch_presentation_document_target': request.POST.get(
'launch_presentation_document_target'),
'launch_presentation_height': request.POST.get(
'launch_presentation_height'),
'launch_presentation_locale': request.POST.get(
'launch_presentation_locale'),
'launch_presentation_return_url': request.POST.get(
'launch_presentation_return_url'),
'launch_presentation_width': request.POST.get(
'launch_presentation_width'),
'lis_course_offering_sourcedid': request.POST.get(
'lis_course_offering_sourcedid'),
'lis_outcome_service_url': request.POST.get(
'lis_outcome_service_url'),
'lis_result_sourcedid': request.POST.get(
'lis_result_sourcedid'),
'lis_person_contact_email_primary': request.POST.get(
'lis_person_contact_email_primary'),
'lis_person_name_family': request.POST.get(
'lis_person_name_family'),
'lis_person_name_full': request.POST.get(
'lis_person_name_full'),
'lis_person_name_given': request.POST.get(
'lis_person_name_given'),
'lis_person_sourcedid': request.POST.get(
'lis_person_sourcedid'),
'lti_message_type': request.POST.get('lti_message_type'),
'oauth_consumer_key': request.POST.get(
'oauth_consumer_key'),
'resource_link_description': request.POST.get(
'resource_link_description'),
'resource_link_id': resource_link_id,
'resource_link_title': request.POST.get(
'resource_link_title'),
'roles': request.POST.get('roles', '').split(
','),
'selection_directive': request.POST.get(
'selection_directive'),
'tool_consumer_info_product_family_code': request.POST.get(
'tool_consumer_info_product_family_code'),
'tool_consumer_info_version': request.POST.get(
'tool_consumer_info_version'),
'tool_consumer_instance_contact_email': request.POST.get(
'tool_consumer_instance_contact_email'),
'tool_consumer_instance_description': request.POST.get(
'tool_consumer_instance_description'),
'tool_consumer_instance_guid': request.POST.get(
'tool_consumer_instance_guid'),
'tool_consumer_instance_name': request.POST.get(
'tool_consumer_instance_name'),
'tool_consumer_instance_url': request.POST.get(
'tool_consumer_instance_url'),
'user_id': request.POST.get('user_id'),
'user_image': request.POST.get('user_image'),
}
# Creating and updating data according to lti_launch
user.profile.set_role_lti(lti_launch)
urlmatch = resolve(request.path)
if not urlmatch.app_name or not urlmatch.url_name:
urlmatch = None
if urlmatch and urlmatch.app_name + ":" + urlmatch.url_name == "activity:play":
activity = get_object_or_404(Activity, id=urlmatch.kwargs['activity_id'])
is_course = activity.activity_type == "course"
if not is_course:
Activity.get_or_create_course_from_lti(user, lti_launch)
activity, _ = Activity.get_or_update_from_lti(request, lti_launch)
if not is_course:
ActivityOutcome.get_or_create_from_lti(user, lti_launch)
return redirect(reverse('activity:play', args=[activity.id]))
else:
# User could not be authenticated!
logger.warning('LTI authentication failed')
| 2.171875
| 2
|
docker_registry_client/Repository.py
|
agrrh/docker-registry-client
| 49
|
12775990
|
<filename>docker_registry_client/Repository.py
from __future__ import absolute_import
from .Image import Image
class BaseRepository(object):
def __init__(self, client, repository, namespace=None):
self._client = client
self.repository = repository
self.namespace = namespace
@property
def name(self):
if self.namespace:
return "{self.namespace}/{self.repository}".format(self=self)
return self.repository
class RepositoryV1(BaseRepository):
def __init__(self, client, repository, namespace=None):
if namespace is None:
namespace = 'library'
super(RepositoryV1, self).__init__(client, repository,
namespace=namespace)
self._images = None
def __repr__(self):
return 'RepositoryV1({name})'.format(name=self.name)
def refresh(self):
self._images = self._client.get_repository_tags(self.namespace,
self.repository)
def tags(self):
if self._images is None:
self.refresh()
if type(self._images) is list:
return list(taginfo['name'] for taginfo in self._images)
else:
return list(self._images.keys())
def data(self, tag):
return self._client.get_tag_json(self.namespace, self.repository, tag)
def image(self, tag):
if self._images is None:
self.refresh()
image_id = self._images[tag]
return Image(image_id, self._client)
def untag(self, tag):
return self._client.delete_repository_tag(self.namespace,
self.repository, tag)
def tag(self, tag, image_id):
return self._client.set_tag(self.namespace, self.repository,
tag, image_id)
def delete_repository(self):
# self._client.delete_repository(self.namespace, self.repository)
raise NotImplementedError()
class RepositoryV2(BaseRepository):
def __init__(self, client, repository, namespace=None):
super(RepositoryV2, self).__init__(client, repository,
namespace=namespace)
self._tags = None
def __repr__(self):
return 'RepositoryV2({name})'.format(name=self.name)
def tags(self):
if self._tags is None:
self.refresh()
return self._tags
def manifest(self, tag):
"""
Return a tuple, (manifest, digest), for a given tag
"""
return self._client.get_manifest_and_digest(self.name, tag)
def delete_manifest(self, digest):
return self._client.delete_manifest(self.name, digest)
def refresh(self):
response = self._client.get_repository_tags(self.name)
self._tags = response['tags']
def Repository(client, *args, **kwargs):
if client.version == 1:
return RepositoryV1(client, *args, **kwargs)
else:
assert client.version == 2
return RepositoryV2(client, *args, **kwargs)
| 2.34375
| 2
|
code_examples/tensorflow/mcmc/mcmc_tfp.py
|
xihuaiwen/chinese_bert
| 0
|
12775991
|
# Copyright 2020 Graphcore Ltd.
import argparse
import os
import time as time
import numpy as np
import tensorflow as tf
from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils
from tensorflow.python.ipu.scopes import ipu_scope, ipu_shard
import tensorflow_probability as tfp
# Model and sampling parameters
# Note: increasing model size, number of steps, or dataset size may cause out of memory errors
first_layer_size = 40
num_burnin_steps = 100
num_ipus = 2
num_results = 400
num_leapfrog_steps = 1000
useful_features = 22
num_skip_columns = 2
output_file = "output_samples.txt"
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset-dir",
type=str,
default=".",
help="Path to datasets"
)
args = parser.parse_args()
input_file = os.path.join(
args.dataset_dir, "returns_and_features_for_mcmc.txt"
)
# Print the about message
print("\nMCMC sampling example with TensorFlow Probability\n"
" Single precision\n"
f" Number of IPUs {num_ipus} (one MCMC chain per IPU)\n"
f" Number of results per IPU {num_results}\n"
f" Number of burn-in steps {num_burnin_steps}\n"
f" Number of leapfrog steps {num_leapfrog_steps}\n"
f" First layer size {first_layer_size}")
# Load data
raw_data = np.genfromtxt(input_file, skip_header=1,
delimiter="\t", dtype='float32')
# Pre-process data
observed_return_ = raw_data[:, num_skip_columns]
observed_features_ = raw_data[:, num_skip_columns+1:]
num_features = raw_data.shape[1] - num_skip_columns - 1
if useful_features < num_features:
num_features = useful_features
observed_features_ = observed_features_[:, :num_features]
# Model is an MLP with num_features input dims and layer sizes: first_layer_size, 1, 1
num_model_parameters = num_features * first_layer_size + \
first_layer_size + first_layer_size + 3
# Print dataset parameters
print(" Number of data items {}\n"
" Number of features per data item {}\n"
" Number of model parameters {}\n"
.format(raw_data.shape[0],
num_features,
num_model_parameters
))
# Import TensorFlow modules
tfd = tfp.distributions
# Suppress warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# Initialize TensorFlow graph and session
tf.reset_default_graph()
config = tf.ConfigProto()
sess = tf.Session(config=config)
# Build the neural network
def bdnn(x, p):
nf = num_features
nt = first_layer_size
# Unpack model parameters
w1 = tf.reshape(p[nt+1:nt+nf*nt+1], [nf, nt])
w2 = tf.reshape(p[1:nt+1], [nt, 1])
w3 = p[0]
b1 = p[nt+nf*nt+3:]
b2 = tf.expand_dims(p[nt+nf*nt+2], 0)
b3 = p[nt+nf*nt+1]
# Build layers
x = tf.tanh(tf.nn.xw_plus_b(x, w1, b1))
x = tf.nn.xw_plus_b(x, w2, b2)
x = x * w3 + b3
return tf.squeeze(x)
# Model posterior log probability
def model_log_prob(ret, feat, p):
# Parameters of distributions
prior_scale = 200
studentT_scale = 100
# Features normalization
def normalize_features(f):
return 0.001 * f
# Prior probability distributions on model parameters
rv_p = tfd.Independent(tfd.Normal(loc=0. * tf.ones(shape=[num_model_parameters], dtype=tf.float32),
scale=prior_scale * tf.ones(shape=[num_model_parameters], dtype=tf.float32)),
reinterpreted_batch_ndims=1)
# Likelihood
alpha_bp_estimate = bdnn(normalize_features(feat), p)
rv_observed = tfd.StudentT(
df=2.2, loc=alpha_bp_estimate, scale=studentT_scale)
# Sum of logs
return (rv_p.log_prob(p) +
tf.reduce_sum(rv_observed.log_prob(ret)))
def build_graph(scope_id):
with tf.variable_scope('scope'+scope_id, use_resource=True, reuse=tf.AUTO_REUSE):
# Data items
observed_return = tf.cast(observed_return_, 'float32')
observed_features = tf.cast(observed_features_, 'float32')
# Initial chain state
initial_chain_state = [
0.0 * tf.ones(shape=[num_model_parameters], dtype=tf.float32)
]
# Bijectors
unconstraining_bijectors = [
tfp.bijectors.Identity()
]
# Initialize the step_size
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
step_size = tf.get_variable(
name='step_size',
initializer=tf.constant(.01, dtype=tf.float32),
trainable=False,
use_resource=True
)
# Put the graph into a function so it can be compiled for running on IPU
def hmc_graph():
# Target log probability function
def target_log_prob_fn(*args):
return model_log_prob(observed_return, observed_features, *args)
# Hamiltonian Monte Carlo kernel
hmc_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=num_leapfrog_steps,
step_size=step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(
target_rate=0.2,
num_adaptation_steps=num_burnin_steps,
decrement_multiplier=0.1),
state_gradients_are_stopped=False),
bijector=unconstraining_bijectors)
# Graph to sample from the chain
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=initial_chain_state,
kernel=hmc_kernel)
# Compile the graph
[p], kernel_results = ipu_compiler.compile(hmc_graph, [])
return (p, kernel_results)
# Place the graphs on IPUs
ops = []
for i in range(num_ipus):
with ipu_scope('/device:IPU:'+str(i)):
ops.append(build_graph(scope_id=str(i)))
# Configure IPU
config = utils.create_ipu_config()
# Create num_chips TF devices, with 1 IPU per device
config = utils.auto_select_ipus(config, [1]*num_ipus)
utils.configure_ipu_system(config)
utils.move_variable_initialization_to_cpu()
# Initialize variables
init_g = tf.global_variables_initializer()
sess.run(init_g)
# Warm up
print("\nWarming up...")
sess.run(ops)
print("Done\n")
# Sample
print("Sampling...")
start_time = time.time()
results = sess.run(ops)
end_time = time.time()
print("Done\n")
# Concatenate samples from separate MCMC chains
samples = np.concatenate(list(map(lambda x: x[0], results)), axis=0)
# Write samples to file
np.savetxt(output_file, samples, delimiter='\t')
print("Written {} samples to {}".format(samples.shape[0], output_file))
# Print run time
print("Completed in {0:.2f} seconds\n".format(end_time - start_time))
| 2.3125
| 2
|
Matplotlib/Matplotlib-PieChart.py
|
H2oPtic/Codecademy
| 0
|
12775992
|
<gh_stars>0
from matplotlib import pyplot as plt
import numpy as np
payment_method_names = ["Card Swipe", "Cash", "Apple Pay", "Other"]
payment_method_freqs = [270, 77, 32, 11]
plt.pie(payment_method_freqs)
plt.axis('equal')
plt.show()
| 2.71875
| 3
|
tensorflow_graphics/rendering/tests/splat_with_opengl_test.py
|
sarvex/graphics
| 2,759
|
12775993
|
<filename>tensorflow_graphics/rendering/tests/splat_with_opengl_test.py<gh_stars>1000+
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rasterize than splat functionality with opengl rasterization."""
from tensorflow_graphics.rendering import rasterization_backend
from tensorflow_graphics.rendering.tests import splat_test
from tensorflow_graphics.util import test_case
class SplatWithOpenGLTest(splat_test.SplatTest):
def setUp(self):
super().setUp()
# This pattern was chosen instead of a parametrized test to faclitate
# running the test cases in pure CPU mode on machines that do not have a
# GPU. In this case the opengl rasterizer cannot be added as dependency to
# the binary as CPU only machines do not have the required libEGL.so
# available. This pattern provides a separate build target for the opengl
# rasterizer version.
self._backend = rasterization_backend.RasterizationBackends.OPENGL
if __name__ == '__main__':
test_case.main()
| 1.984375
| 2
|
sdtest-wip.py
|
benhastings/SeGrid_EC2
| 0
|
12775994
|
from selenium import webdriver
import selenium.webdriver.support.ui as ui
from selenium.webdriver.common.keys import Keys
#from selenium.webdriver.common.action_chains import ActionChains
import time
import datetime
import csv
import random
import sys
import urllib2
import socket
#from metricsCollect import metricsCollect
#------------------------------------------------------------
#--- Get Interactive Input for number of loops to execute ---
#numLoops = int(sys.argv[1])
timeToRun=int(sys.argv[1])
endTime=int(time.time()+timeToRun)
#--- Browser definition for Grid usage ----------
browser = sys.argv[2]
#--- SeGrid Hub designation --------------------
hub = sys.argv[3]
instID = sys.argv[4]
l=[]
#statsDHost='ec2-54-80-6-76.compute-1.amazonaws.com'
statsDHost='statsd.elsst.com'
"""
Define UDP connection to send data to statsD
"""
UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
## statsd host & port
addr=(statsDHost,8125)
#--- Read List of PIIs -----------------
PII=[]
try:
csvRd = csv.reader(open('/home/ubuntu/PIIs_250k.csv','rb'))
#csvRd = csv.reader(open('/home/ubuntu/PIIs_30k.csv','rb'))
piiCount = 29000
except:
csvRd = csv.reader(open('./PIIs_250k.csv','rb'))
piiCount = 29000
for j in csvRd:
PII.append(j)
#--- Read List of Journals -----------------
JRNL=[]
try:
csvRd = csv.reader(open('/home/ubuntu/Journals.csv','rb'))
except:
csvRd = csv.reader(open('./Journals.csv','rb'))
for j in csvRd:
JRNL.append(j)
#--- Read List of Search Terms -----------------
SRCH=[]
try:
csvRd = csv.reader(open('/home/ubuntu/SDSrchTerms.csv','rb'))
except:
csvRd = csv.reader(open('./SDSrchTerms.csv','rb'))
for j in csvRd:
SRCH.append(j)
#---------------------------------------
# Function to gracefully exit the browser
# after incrementing loop variables
#-----------
def egress():
try:
driver.quit()
# except WindowsError:
# print ("****WindowsError - pass? ****")
# pass
except urllib2.URLError:
# print ("----URLError - pass? ----")
pass
#------------------------------------------------------
# Function to send error details for tracking
#------------------------------------------------------
def errorReport(hName,titlN,msg):
# l.append('sd.Selenium.error.'+base+'.'+titlN+':1|c\n')
try:
stats
stats+='sd.Selenium.error.'+base+'.'+titlN+':1|c\n'
except:
stats='sd.Selenium.error.'+base+'.'+titlN+':1|c\n'
try:
print('error - '+msg+' '+titlN+' '+driver.title)
except:
print('error - '+msg+' '+titlN)
#------------------------------------------------------
# Function to send error details for tracking
#------------------------------------------------------
def newBrowser(base):
# l.append('sd.Selenium.'+base+'.newBrowser:1|c\n')
stats+='sd.Selenium.'+base+'.newBrowser:1|c\n'
print('new Browser - '+base)
#------------------------------------------------------
# Gather Performance data to send
#------------------------------------------------------
def metricsCollect(dtitl,d,base):
# mets=''
# metrics=['responseStart','responseEnd','domInteractive','loadEventEnd','domContentLoadedEventEnd']
metrics={'ttfb':'responseStart','html':'responseEnd','pgi':'domInteractive','pgl':'loadEventEnd','startRender':'domContentLoadedEventEnd'}
# print(dtitl+' - trying metricsCollect')
try:
# print('try some script execute')
navS = d.execute_script('return performance.timing.navigationStart')
# print('navS: '+str(navS))
# print('try getting other metrics')
for i in metrics:
compVal=int(d.execute_script('return performance.timing.'+metrics[i])-navS)
if(compVal>0):
l.append('sd.Selenium.'+base+'.'+dtitl+'.'+str(i)+':'+str(compVal)+'|ms\n')
if (dtitl.find('Content_Delivery') != -1):
try:
# print('try return prs.abs_end')
pcrT=d.execute_script("return prs.abs_end")
except:
pcrT=0
elif(dtitl.find('Category_Home') != -1):
try:
prs=d.execute_script('return prs')
prsT=[]
prsT.append(prs['pcr'])
prsT.append(prs['pcr_nav'])
pcrT=sorted(prsT)[1]
except:
pcrT=0
else:
# print('found a different page! - '+dtitl)
try:
pcrT=execute_script("return prs['pcr']")
except:
try:
prs=execute_script('return prs')
pcrT=prs['pcr']
except:
pcrT=0
if pcrT > navS:
l.append('sd.Selenium.'+base+'.'+dtitl+'.pcr:'+str(int(pcrT-navS))+'|ms\n')
# print l
# print UDPSock.sendto(mets,addr)
# print('l '+l)
except:
# print('scripts no workie')
pass
return l
#------------------------------------------------------
# Function to execute a request or page interaction
# handles associated error conditions
# Makes call to collect page timing
#-------------
def getPage(resource):
try:
#driver.get("http://"+baseURL)
resource
if 'Unable to process' in driver.title:
# print 'Error - Unable to process, wait 60 seconds'
errorReport(base,titl,'Unable to Process')
time.sleep(60)
exit
elif 'ScienceDirect Error' in driver.title:
dt = datetime.datetime.now()
dTm = str(dt.strftime("%Y/%m/%d %H:%M:%S%Z"))
# print 'SD-00x Error'+dTm
errorReport(base,titl,'SD-00x')
time.sleep(1)
exit
elif 'Error' in driver.title:
# print 'Error, wait 60 seconds'
time.sleep(10)
exit
else:
# l.append('sd.Selenium.'+base+'.'+titl+'.pass:1|c\n')
time.sleep(.5)
# print('trying metricsCollect')
try:
# print('try to append to stats')
metricsCollect(titl,driver,base)
# print(testHolder)
# stats +=''.join(testHolder)
# print(stats)
except:
# print('no append to stats, create instead')
# stats=''.join(metricsCollect(titl,driver,base))
pass
except urllib2.URLError:
# print 'URLError'
errorReport(base,titl,'URLError')
pass
except:
# print (titl+' fail')
errorReport(base,titl,'Other')
pass
#=============================================================
#-------------------------------------------------------------
# Script Begins Here
#-------------------------------------------------------------
#=============================================================
#--- Define static Article Value for looping
idx=0
while endTime > time.time():
"""
Define capabilities of remote webdriver
Specifically: assign browser type
"""
try:
# stats=''
# print('loading browser')
driver=webdriver.Remote("http://"+hub+":4200/wd/hub",desired_capabilities={"browserName": browser})
#driver=webdriver.Chrome()
# print('wait for it...')
# print datetime.datetime.now()
time.sleep(.25)
# Initialize array for holding metrics to send to graphite
# l = []
#-------------------------------------------------
# Define baseURL for following transactions
#-------------------------------------------------
baseIDX=int(random.random()*300)
if (baseIDX%3==0):
baseURL = 'cdc311-www.sciencedirect.com'
base='cdc311'
if (baseIDX%3==1):
baseURL = 'cdc314-www.sciencedirect.com'
base='cdc314'
if (baseIDX%3==2):
baseURL = 'cdc318-www.sciencedirect.com'
base='cdc318'
baseURL = 'cdc311-www.sciencedirect.com'
base='cdc311'
try:
newBrowser(base)
except:
pass
#-------------------------------------------------
# Load Home Page & Authenticate x% of iterations
#-------------------------------------------------
login = int(random.random()*100)
if (login%100 < 50):
#--- Request Home Page ----------------------------------------
titl='Home_Page'
getPage(driver.get("http://"+baseURL))
#--- Find Login Form & Fill in data ---------------------------
try:
driver.find_element_by_id("loginPlusScript").click()
driver.find_element_by_id('username').send_keys('Webmetrics')
driver.find_element_by_id('password').send_keys('<PASSWORD>')
#--- Submit the form based on element ID ----------------
titl='U/P Auth to Home Page'
driver.find_element_by_name("arrow").click()
#--- If choose Org screen displayed, select appropriate value
if 'Choose Organization' in driver.title:
titl='Choose Org to Home Page'
try:
driver.find_element_by_id('1').click()
driver.find_element_by_class_name('button').click()
#metricsCollect(titl)
except:
pass
except:
egress()
exit
#-------------------------------------------------
# Add looping structure to minimize browser churn
#-------------------------------------------------
browserLoop=2
while(browserLoop > 0):
#-------------------------------------------------
# View Article(s) with scrolling where possible
# View multiple articles in same session 33%
#-------------------------------------------------
artLoop = 5
"""
if (login%3==0):
artLoop=8
else:
artLoop=4
"""
# print ('artLoop: '+str(artLoop))
#Comment out for sequential evaluation of articles
#idx = int(random.random()*499000)
while artLoop > 0:
#--- Define Random Value ---------------
idx = int(random.random()*piiCount)
idxPii=idx
# print('articleIDX:'+str(idx))
Pii=str(PII[idxPii]).strip('[\']')
titl = 'Content_Delivery'
#sStart = time.time()
try:
print('try to get: '+"http://"+baseURL+"/science/article/pii/"+Pii)
getPage(driver.get("http://"+baseURL+"/science/article/pii/"+Pii))
except urllib2.URLError:
time.sleep(.25)
pass
try:
dtitl=driver.title[:50]
# print(dtitl[:50])
except:
egress()
exit
"""
if artLoop > 0:
artLoop = artLoop-1
idx = idx+1
"""
try:
#if (login%6 == 0):
if (artLoop%5 < 2):
# if (artLoop%5 < 6):
titl='Search_Results'
SrIdx = int(random.random()*100)%100
# print('trying search')
srString=str(SRCH[SrIdx]).strip('[\']').decode('string_escape')
# print srString
try:
dtitl=driver.title#[:50]
# print 'dtitl: '+dtitl
# Article Page Search
s=driver.find_element_by_css_selector('input#quickSearch')
s.send_keys(srString)
getPage(driver.find_element_by_css_selector('input.submit').click())
# # Other Pages
# s=d.find_element_by_id("qs_all")
# >>> s.send_keys('berries')
# >>> d.find_element_by_id("submit_search").click()
except:
# print ('Search form not found '+baseURL)
time.sleep(.5)
pass
#if (login%6 > 4):
if (artLoop%5 > 2):
#--- Load Browse List - "Category List" -------------
titl='Category_List'
# print('trying browse')
getPage(driver.get("http://"+baseURL+"/science/journals"))
#--- Load Journal Home Pages - "Category Home" ------
jrnLoop = 2
while jrnLoop > 0:
titl='Category_Home'
idx=idx+jrnLoop
jIdx=idx%120
# print('trying journal')
getPage(driver.get("http://"+baseURL+"/science/journal/"+str(JRNL[jIdx]).strip('[\']')))
jrnLoop=jrnLoop-1
except:
egress()
exit
if artLoop > 0:
artLoop = artLoop-1
idx = idx+1
browserLoop=browserLoop-1
# print(browserLoop)
print 'join statsDdata'
statsDdata=''.join(l)
print('here is statsDdata')
print(statsDdata)
try:
print('try to send UDP message')
print UDPSock.sendto(statsDdata,addr)
except:
print('UDP send failed')
pass
l=[]
loop = loop+1
idx=idx+1
egress()
except:
# print('loading browser failed')
# print time.time()
# print titl
errorReport(base,titl,'Start Browser Fail')
#print(statsDdata)
time.sleep(5)
pass
| 2.453125
| 2
|
Python/Stepik/Beginner/Exam-loops/review-9.py
|
SergeyOcheretenko/PythonLearning
| 0
|
12775995
|
count = 0
maximum = (-10) ** 9
for _ in range(4):
x = int(input())
if x % 2 == 1:
count += 1
if x > maximum:
maximum = x
if count > 0:
print(count)
print(maximum)
else:
print('NO')
| 3.390625
| 3
|
zipfileworker.py
|
Lazymindz/AzDevopsAPIWrapper
| 0
|
12775996
|
<reponame>Lazymindz/AzDevopsAPIWrapper
import logging
from logging import NullHandler
import zipfile
import os, os.path
from os import walk
import time
# Set default logging handler to avoid "No handler found" warnings.
logging.getLogger(__name__).addHandler(NullHandler())
def task_getfilenames(rootdir):
filenames = []
for dir in rootdir:
if len(dir[-1]) > 0:
for zipfilename in dir[-1]:
file = zipfile.ZipFile(os.path.join(dir[0], zipfilename))
filenames.append(file.namelist())
return filenames
def task_writeouput(list_filenames):
with open('./BuildOutput/results_output_{date}.txt'.format(date=time.strftime("%Y%m%d-%H")), 'w') as output:
for filetree in list_filenames:
for file in filetree[1:]:
output.write('{buildname}|{filepath}\n'.format(buildname=filetree[0], filepath= file))
def dojob(zippath):
#Task 1 get the root directory
rootdir = walk('./BuildArtifacts/')
if not os.path.exists("./BuildOutput/"):
os.makedirs("./BuildOutput/")
# Task 2 get filenames from Zip
list_filenames = task_getfilenames(rootdir)
# Task 3 : write the output data
task_writeouput(list_filenames)
| 2.328125
| 2
|
api/routes/guilds.py
|
vcokltfre/Raptor
| 3
|
12775997
|
<gh_stars>1-10
from fastapi import APIRouter, HTTPException
from tortoise.exceptions import DoesNotExist
from ..config import get_guild_config as get_config
from ..models import GuildConfigResponse
router = APIRouter(prefix="/guilds")
@router.get("/{id}/config")
async def get_guild_config(id: int) -> GuildConfigResponse:
try:
data = await get_config(id)
except (FileNotFoundError, DoesNotExist):
raise HTTPException(404, "Invalid guild.")
return GuildConfigResponse(config=data)
| 2.3125
| 2
|
DebrisFromExercises/06/Assemble-py/Assemble_py.py
|
it-depends/CPSG-Nand2Tetris
| 0
|
12775998
|
<filename>DebrisFromExercises/06/Assemble-py/Assemble_py.py
import os
import re
import sys
symbolTable = {
"SP": 0,
"LCL": 1,
"ARG": 2,
"THIS": 3,
"THAT": 4,
"SCREEN": 16384,
"KBD": 24576,
"R0": 0,
"R1": 1,
"R2": 2,
"R3": 3,
"R4": 4,
"R5": 5,
"R6": 6,
"R7": 7,
"R8": 8,
"R9": 9,
"R10": 10,
"R11": 11,
"R12": 12,
"R13": 13,
"R14": 14,
"R15": 15,
}
computes = {
"0":"101010",
"1":"111111",
"-1":"111010",
"D":"001100",
"A":"110000",
"!D":"001101",
"!A":"110001",
"-D":"001111",
"-A":"110011",
"D+1":"011111",
"A+1":"110111",
"D-1":"001110",
"A-1":"110010",
"D+A":"000010",
"D-A":"010011",
"A-D":"000111",
"D&A":"000000",
"D|A":"010101"
}
jumps = {
"null":"000",
"JGT":"001",
"JEQ":"010",
"JGE":"011",
"JLT":"100",
"JNE":"101",
"JLE":"110",
"JMP":"111",
}
asmPath=sys.argv[1]
hackPath = re.sub("\.asm$", ".hack", sys.argv[1])
hackFile = open(hackPath, 'w')
commands = map(str.strip, open(asmPath).readlines())
commands = [command for command in commands if re.match("^(?!/)\S", command)]
references = set()
nextInstruction = 0
for command in commands:
if command[0] == "(":
symbolTable[command[1:-1]] = nextInstruction
continue
if re.match("^@\D", command):
references.add(command[1:])
nextInstruction += 1
nextVariable = 16
variables = references - set(symbolTable)
for variable in variables:
symbolTable[variable] = (nextVariable)
nextVariable += 1
for command in commands:
if command[0] == "(":
continue
if command[0] == "@":
address = int(command[1:]) if re.match("^@\d", command) else symbolTable[command[1:]]
hackFile.write("{0:b}".format(address).zfill(16) + '\n')
continue
dest = ""
jump = "null"
if "=" in command:
dest, compJump = command.split("=")
else:
dest = "000"
compJump = command
if ";" in compJump:
comp, jump = compJump.split(";")
else:
jump="null"
comp=compJump
instruction = "111" + \
("1" if "M" in comp else "0") + \
computes[comp.replace("M", "A")] + \
("1" if "A" in dest else "0") + \
("1" if "D" in dest else "0") + \
("1" if "M" in dest else "0") + \
jumps[jump]
instruction.zfill(16)
hackFile.write(instruction + '\n')
hackFile.close()
| 2.140625
| 2
|
demo/custom_extensions/cachebust_static_assets/main.py
|
uk-gov-mirror/LandRegistry.hmlr-design-system
| 6
|
12775999
|
import hashlib
import os
from flask import current_app, url_for
cache_busting_values = {}
class CachebustStaticAssets(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
@app.context_processor
def override_url_for():
return dict(url_for=hashed_url_for)
def hashed_url_for(endpoint, **values):
"""Cachebusting
Use the md5 hash of the file on disk to perform cachebusting duties.
This forces browsers to download new versions of files when they change.
"""
if endpoint == "static":
filename = values.get("filename", None)
if filename:
file_path = os.path.join(current_app.root_path, current_app.static_folder, filename)
if os.path.isfile(file_path):
# Store the hashes in a dict so that on subsequent
# requests we don't have to md5 the file every time
cached_hash = cache_busting_values.get(file_path)
if cached_hash:
values["cache"] = cached_hash
else:
file_hash = md5_for_file(file_path, hexdigest=True)
cache_busting_values[file_path] = file_hash
values["cache"] = file_hash
return url_for(endpoint, **values)
def md5_for_file(path, block_size=256 * 128, hexdigest=False):
"""Calculate an md5 hash for a file
Block size directly depends on the block size of your filesystem
to avoid performances issues
Here I have blocks of 4096 octets (Default NTFS)
"""
md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(block_size), b""):
md5.update(chunk)
if hexdigest:
return md5.hexdigest()
return md5.digest()
| 2.828125
| 3
|
test/test_commandmanager.py
|
lietu/twitch-bot
| 6
|
12776000
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# coding: utf-8
# coding=utf-8
import os
import bot.commandmanager
from bot.chat import Chat
from unittest import TestCase
from mock import Mock
class FakeBot(object):
settings = None
def set_command(self, channel, command, want_user, user_level, code):
pass
class CommandManagerTest(TestCase):
UTILS = """
--- Convert vararg ... to a normal table
function table.pack(...)
return { n = select("#", ...), ... }
end
"""
def setUp(self):
os.environ["LUA_PATH"] = "lua/lib/?.lua;lua/lib/?/?.lua"
def test_functions(self):
chat = Chat(None, None)
chat.message = Mock()
cm = bot.commandmanager.CommandManager("#tmp", FakeBot(), chat=chat)
cm.load_lua(self.UTILS)
# Some test command definitions
def_commands = [
"-ul=user -a=value test_func return tonumber(value) + 1",
"-ul=reg --args=value test_func2 return __chat__test_func(value)"
" + 10",
"--args=... test_args local s = 0; for k, v in ipairs("
"{...}) do s = s + tonumber(v); end; return s"
]
for line in def_commands:
cm.add_command(line.split(" "))
retval = cm.run_command("username", "mod", "test_func2", ["10"],
threaded=False)
self.assertEquals(retval, 21)
retval = cm.run_command(
"username", "mod", "test_args", ["1", "2", "3"], threaded=False
)
self.assertEquals(retval, 6)
def test_simple_functions(self):
chat = Chat(None, None)
chat.message = Mock()
cm = bot.commandmanager.CommandManager("#tmp", FakeBot(), chat=chat)
cm.load_lua(self.UTILS)
# Some test command definitions
def_commands = [
"test_func Hello there, {user}",
"-ul=reg test_func2 Hello, {0}",
]
for line in def_commands:
cm.add_simple_command(line.split(" "))
retval = cm.run_command("username", "mod", "test_func", [],
threaded=False)
self.assertEquals(retval, "Hello there, username")
retval = cm.run_command(
"username", "reg", "test_func2", ["target"], threaded=False
)
self.assertEquals(retval, "Hello, target")
self.assertRaises(
bot.commandmanager.CommandPermissionError,
cm.run_command,
"username",
"user",
"test_func2"
)
def test_want_user(self):
chat = Chat(None, None)
chat.message = Mock()
cm = bot.commandmanager.CommandManager("#tmp", FakeBot(), chat=chat)
cm.load_lua(self.UTILS)
# Some test command definitions
line = "-w test return user"
cm.add_command(line.split(" "))
retval = cm.run_command("fakeuser", "mod", "test", threaded=False)
self.assertEquals(retval, "fakeuser")
def test_quoted(self):
chat = Chat(None, None)
chat.message = Mock()
cm = bot.commandmanager.CommandManager("#tmp", FakeBot(), chat=chat)
cm.load_lua(self.UTILS)
# Some test command definitions
def_commands = [
"-q -a=name,job test_quoted return name .. ': ' .. job",
"-a=name,job test_not_quoted return name .. ': ' .. job",
]
for line in def_commands:
cm.add_command(line.split(" "))
args = '"<NAME>" "<NAME>"'.split(" ")
retval = cm.run_command("fakeuser", "mod", "test_quoted", args,
threaded=False)
self.assertEquals(retval, "<NAME>: <NAME>")
retval = cm.run_command("fakeuser", "mod", "test_not_quoted", args,
threaded=False)
self.assertEquals(retval, '"John: Doe"')
def test_cooldown(self):
chat = Chat(None, None)
chat.message = Mock()
cm = bot.commandmanager.CommandManager("#tmp", FakeBot(), chat=chat)
cm.load_lua(self.UTILS)
# Some test command definitions
def_commands = [
"-c=5 cd_test Cooldown test",
]
for line in def_commands:
cm.add_simple_command(line.split(" "))
def run_cmd(timestamp):
def _run():
return cm.run_command(
"username", "mod", "cd_test", [], timestamp=timestamp,
threaded=False
)
return _run
retval = run_cmd(1)()
self.assertEquals(retval, "Cooldown test")
self.assertRaises(
bot.commandmanager.CommandCooldownError, run_cmd(2)
)
retval = run_cmd(6)()
self.assertEquals(retval, "Cooldown test")
def test_permissions(self):
chat = Chat(None, None)
chat.message = Mock()
cm = bot.commandmanager.CommandManager("#tmp", FakeBot(),
chat=chat)
cm.load_lua(self.UTILS)
# Some test command definitions
def_commands = [
"-ul=owner owner_func return 0",
"-ul=mod mod_func return 1",
"-ul=reg reg_func return 2",
"-ul=user user_func return 3"
]
for line in def_commands:
cm.add_command(line.split(" "))
# owner_func
self.assertRaises(
bot.commandmanager.CommandPermissionError,
cm.run_command,
"username",
"mod",
"owner_func"
)
cm.run_command("username", "owner", "owner_func", threaded=False)
chat.message.assert_called_with(0)
# mod_func
self.assertRaises(
bot.commandmanager.CommandPermissionError,
cm.run_command,
"username",
"reg",
"mod_func"
)
cm.run_command("username", "mod", "mod_func", threaded=False)
chat.message.assert_called_with(1)
# reg_func
self.assertRaises(
bot.commandmanager.CommandPermissionError,
cm.run_command,
"username",
"user",
"reg_func"
)
cm.run_command("username", "reg", "reg_func", threaded=False)
chat.message.assert_called_with(2)
cm.run_command("username", "owner", "reg_func", threaded=False)
chat.message.assert_called_with(2)
# user_func
cm.run_command("username", "user", "user_func", threaded=False)
chat.message.assert_called_with(3)
def test_unicode(self):
chat = Chat(None, None)
chat.message = Mock()
cm = bot.commandmanager.CommandManager("#tmp", FakeBot(), chat=chat)
cm.load_lua(self.UTILS)
line = u"test ヽ༼ຈل͜ຈ༽ノ AMENO ヽ༼ຈل͜ຈ༽ノ"
cm.add_simple_command(line.split(" "))
cm.run_command("username", "mod", "test", threaded=False)
chat.message.assert_called_with(u"ヽ༼ຈل͜ຈ༽ノ AMENO ヽ༼ຈل͜ຈ༽ノ")
| 2.640625
| 3
|
Python/neon_numbers.py
|
MjCode01/DS-Algo-Point
| 1,148
|
12776001
|
# Neon number --> If the sum of digits of the squared numbers are equal to the orignal number , the number is said to be Neon number. Example 9
ch=int(input("Enter 1 to do it with loop and 2 without loop :\n"))
n= int(input("Enter the number :\n"))
def number(n):
sq= n**2
digisum=0
while sq>0:
r=sq%10
digisum = digisum + r
sq=sq//10
if (n==digisum):
print("The number is neon number")
else:
print("Not a neon mumber")
# Without Loop
def number2(n):
sq=n*n
r=sq%10
q=sq//10
tocheck=r+q
if n==tocheck:
print("It is a Neon Number")
else:
print("Not a neon number")
if ch==1:
number(n)
elif ch==2:
number2(n)
else:
print("Enter correct choice")
"""
Time complexity - O(1)
Space complexity - O(1)
I/o--
Enter 1 to do it with loop and 2 without loop :
2
Enter the number :
9
It is a Neon Number
Explanation
Input n: 9
sq=81
r=1
q=8
tocheck=8+1 =>9
Output
if 9 == 9 ==> Neon number
"""
| 4.28125
| 4
|
valheim_server/log_dog.py
|
wchesley/discord_bot.py
| 0
|
12776002
|
<filename>valheim_server/log_dog.py
## TODO:
# set log file location (Config.json?)
# Read file up to present
# Read any new lines written to file
# Pass information from within log files to log_parser.py
# avoid duplicates?
import os
import discord
import logging
import time
import asyncio
import random
import json
import steam.webapi
from utils import default, http
from data.mongoDB import MongoDB_Context
from .log_parser import LogLine
from datetime import datetime
# from . import steam_api
SLEEP_SECONDS = 1
class ValheimLogDog:
def __init__(self, bot):
self.config = default.config()
self.bot = bot
self.data = {
'SteamID':'',
'SteamName':'',
'ZDOID':'',
'steam_login_time':'',
'ZDOID_login_time':'',
'online':False,
}
async def start(self):
default.s_print(f"Fetching log file at: {self.config['log_file']}")
while True:
with open(self.config['log_file'], 'r', os.O_NONBLOCK) as log_file:
default.s_print(f'opened context for log file at: {log_file}')
async for new_lines in self.line_watcher(log_file):
default.s_print(f'Processing the lines found...')
new_lines = self.filter_lines(new_lines)
default.s_print(f'\t> Processed lines: {new_lines}')
for line in new_lines:
#parse lines read here:
default.s_print(f'OG Line: {line}')
log_line = LogLine.remove_text_inside_brackets(line)
default.s_print(f'log_line?: {log_line}')
date, message = LogLine.remove_date(log_line)
default.s_print(f'DATE: {date}\nMESSAGE: {message}')
await self.extract_log_parts(message, date)
default.s_print(f'****Processed log lines COMPLETE*****\nBEGIN DATA CHECK:')
if self.data['steam_login_time'] and self.data['ZDOID_login_time']:
default.s_print('Have login times! ')
if self.compare_login_time(self.data['steam_login_time'], self.data['ZDOID_login_time']):
default.s_print('login times are within two minutes, close enough for a match!')
MongoDB_Context.update_player(self.data)
default.s_print(f'data obj with player: {self.data}')
default.s_print(f'added or updated player!')
self.clear_data()
default.s_print(f'data cleared')
else:
default.s_print(f'Times arent close enough')
else:
default.s_print(f'do not have one or the other login time. ')
default.s_print('closing log file')
log_file.close()
async def line_watcher(self, file):
"""Generator function that returns the new line entered."""
file.seek(0, os.SEEK_END)
while True:
# Reads last line
new_lines = file.readlines()
# sleep if file hasn't been updated
if not new_lines:
# default.s_print(f'No new lines. Sleeping {SLEEP_SECONDS}') # Too spammy for testing
#time.sleep(SLEEP_SECONDS) # sleep handled by asyncio event loop in /cogs/valheim_log_cog.py
await asyncio.sleep(SLEEP_SECONDS)
continue
default.s_print('New line(s) found!')
for l in new_lines:
default.s_print('\t> {}'.format(l.replace('\n', '')))
yield new_lines
def filter_lines(self, lines):
"""Filters the log lines to only return the ones that have actual values."""
return [l for l in lines if l != '\n' and len(l) > 1]
async def extract_log_parts(self, message, date):
"""Get the goods from the valheim log message"""
# Trailing space on the end is intentional, needed to remove that part of the log message
# Return messages are used to verify data in the tests
steam_connect_msg = 'Got connection SteamID '
zDOID_connect = 'Got character ZDOID from '
current_connections = 'Connections'
disconnect = "Closing Socket "
default.s_print(f'message: {message}')
default.s_print(f'Date: {date}')
# if/elif block to determine what we found in log message:
if steam_connect_msg in message:
self.data['SteamID'] = message.replace(steam_connect_msg, '')
self.data['steam_login_time'] = date
steam_name = 'no async error maybe?'
try:
steam_name = self.get_steam_persona(self.data['SteamID'])
default.s_print(f'RECEIVED: {steam_name} FROM STEAM')
except Exception as e:
default.s_print(f'ASYNC ERROR: {e}')
return self.data['SteamID']
elif zDOID_connect in message:
# Death message: Got character ZDOID from Bytes : 0:0
if message[-1] == "0":
split = message.split(' ')
toon = split[4] # Should be ZDOID (in game toon name)
# Don't want to update database while testing...
new_death_count = MongoDB_Context.update_death_count()
default.s_print(f'new death count: {new_death_count}')
death_event = 'no async error maybe?'
try:
MongoDB_Context.update_player_death_count(toon)
# object NoneType can't be used in 'await' expression:
#await self.bot.dispatch('on_death', new_death_count, toon) ## Emmit death event: Not working atm? Dunnow why?
#
await self.manual_on_death_event(toon, new_death_count)
except Exception as e:
default.s_print(f'ASYNC ERROR: {e}')
return f'{toon} death!'
else:
full_message = message.replace(zDOID_connect,'')
full_message = full_message.split(' ')
self.data['ZDOID'] = full_message[0]
self.data['ZDOID_login_time'] = date
return self.data['ZDOID']
elif current_connections in message:
default.s_print(f'current connections: {message}')
connections = message.split(' ')
# log message should look like: 'Connections 1 ZDOS:130588 sent:0 recv:422'
return connections[1]
elif disconnect in message:
default.s_print(f'Disconnect message received!: {message}')
disconnection = message.replace(disconnect,'') # Should be steamID of player who disconnected
MongoDB_Context.player_disconnect(disconnection)
return disconnection
def compare_login_time(self, steam_login_time, zdoid_login_time):
# TODO: Convert strings to datetime objects here:
# Reasoning: It's easier to deal with strings, only need actual datetime objects for this calculation:
default.s_print(f'*****Comparing: {steam_login_time} to {zdoid_login_time}')
steam_dto = ''
zdoid_dto = ''
try:
steam_dto = datetime.strptime(steam_login_time, "%m/%d/%Y %H:%M:%S")
except Exception as e:
default.s_print(f'could not parse steam login time:{steam_login_time}\nERROR: {e}')
return False
try:
zdoid_dto = datetime.strptime(zdoid_login_time, "%m/%d/%Y %H:%M:%S")
except Exception as e:
default.s_print(f'could not parse zdoid login time:{zdoid_login_time}\nERROR: {e}')
return False
if isinstance(steam_dto, str) or isinstance(zdoid_dto, str):
default.s_print(f'Someone was a string? ')
return False
else:
time_diff = steam_dto - zdoid_dto
time_diff = abs(time_diff.total_seconds()) # steam login comes first, value SHOULD be negative.
if time_diff < (60 * 2): # 5 minute timeout
self.data['steam_login_time'] = steam_dto
return True
else:
return False
def get_steam_persona(self, steamID):
# More import errors I don't feel like debugging anymore:
# Instead I"ll just utalize the http class and make requests the old fashioned way
# SteamWebAPI: https://developer.valvesoftware.com/wiki/Steam_Web_API
# try:
# self.data['SteamName'] = SteamIDToVanityName(steamID)
# default.s_print(f"Found Steam Name: {self.data['SteamName']}")
# except Exception as e:
# default.s_print(f'Error getting Steam Name: {e}')
steam_api = steam.webapi.WebAPI(key=self.config['steam_api_key'])
default.s_print(f'Getting steam name for {steamID}')
if steamID is None or " ":
steamID = self.data['SteamID']
steam_url = f"http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key={self.config['steam_api_key']}&steamids={steamID}"
try:
# response = await http.get(steam_url)
response = steam_api.call('ISteamUser.GetPlayerSummaries',steamids=steamID)
default.s_print(f'RESPONSE OBJECT: {response}')
#response = json.loads(response)
first_player = response['response']['players'][0]
self.data['SteamName'] = first_player['personaname']
except Exception as e:
default.s_print(f'Error {e}')
default.s_print(f'SteamName?: {self.data["SteamName"]}')
return self.data['SteamName']
async def manual_on_death_event(self, player_name, death_count):
""" Announce death of player in valheim Server """
death_message = [
"was squased by a troll",
"fell victim to gredwarves",
"ascended to the 10th dead world",
"was fondled by greylings",
"took a deathsquito from behind",
"was collected by the Valkyrie",
"failed Odin's test",
"In Soviet Russia, tree fell you!"
]
rng_death_msg = random.choice(death_message)
# Knights of Ni Bot Spam Channel ID: 831250902470885406
default.s_print(f'MANUAL Death event for {player_name} {rng_death_msg}')
bot_spam = self.bot.get_channel(831250902470885406)
await bot_spam.send(f'RIP {player_name} {rng_death_msg}\nTotal Vikings lost: {death_count}')
def clear_data(self):
""" Clear out self.data store after flushing data to DB """
self.data = {
'SteamID':'',
'SteamName':'',
'ZDOID':'',
'steam_login_time':'',
'ZDOID_login_time':'',
'online':False,
}
default.s_print(f'data obj: {self.data}')
| 2.546875
| 3
|
src/predict.py
|
HariWu1995/miRACL
| 0
|
12776003
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import json
import time
import argparse
from pathlib import Path
import random
import numpy as np
import tensorflow as tf
tf.autograph.set_verbosity(3) # 0: debug, 1: info, 2: warning, 3: error
from src.models.encoder import Encoder
from src.models.RACL import RACL
from src.utils import (
load_config,
split_documents, read_data, reverse_unk,
decode_results, format_results, dict2html
)
def load_basic_arguments(parser):
# Define arguments
parser.add_argument('--model', default='racl', type=str, help='model name')
parser.add_argument('--max_sentence_len', default=156, type=int, help='maximum number of words in sentence')
parser.add_argument('--embedding_dim', default=768, type=int, help='embedding dimension')
parser.add_argument('--n_interactions', default=6, type=int, help='number of RACL blocks to interact')
parser.add_argument('--n_filters', default=96, type=int, help='number of filters in convolution')
parser.add_argument('--kernel_size', default=11, type=int, help='kernel size in convolution')
parser.add_argument('--random_seed', default=4_10_20, type=int, help='random seed')
parser.add_argument('--include_opinion', default=True, type=bool, help='whether to use opinion for model')
parser.add_argument('--random_type', default='normal', type=str, help='random type: uniform or normal (default)')
parser.add_argument('--ckpt', default=798, type=int, help='checkpoint id to load weights')
opt = parser.parse_args()
opt.n_classes = 3
opt.is_training = False
opt.is_evaluating = False
opt.label_smoothing = False
opt.keep_prob_1, opt.keep_prob_2 = 1., 1.
random.seed(opt.random_seed)
np.random.seed(opt.random_seed)
tf.random.set_seed(opt.random_seed)
return opt
# Samples for prediction
documents = [
# 'dessert was also to die for',
# 'sushi so fresh that it crunches in your mouth',
# 'in fact , this was not a nicoise salad and was barely eatable',
# "the two waitress 's looked like they had been sucking on lemons",
"the absence of halal food - not even for room service",
"la foresto de halalaj manĝaĵoj - eĉ ne por ĉambroservo",
"عدم وجود الطعام الحلال - ولا حتى لخدمة الغرف",
"អវត្ដមាននៃអាហារហាឡាល់ - មិនសូម្បីតែសម្រាប់សេវាកម្មបន្ទប់",
"ການຂາດອາຫານຮາລານ - ບໍ່ແມ່ນແຕ່ ສຳ ລັບການບໍລິການຫ້ອງ",
"халал тағамның болмауы - тіпті бөлме қызметтері үшін де емес",
"отсутствие халяльной еды - даже для обслуживания номеров",
"die afwesigheid van halal-kos - nie eens vir kamerdiens nie",
"l'assenza di cibo halal - nemmeno per il servizio in camera",
"ハラルフードがない-ルームサービスでもない",
"할랄 음식의 부재-룸 서비스조차도",
"la ausencia de comida halal, ni siquiera para el servicio de habitaciones",
"sự vắng mặt của thức ăn halal - thậm chí không có dịch vụ ăn uống tại phòng",
# "Have to travel out in order to get food",
# "Smell of the pillows... smelt like someone odour",
# " Very noisy outside the room, found a cockroaches in bathroom, the condition did not works whole nights, very hot can't sleep",
# "I had to stay here due to holiday inn transferring me here because they were closed for renovations. First I am pist because this hotel stinks of weed, my room was not very clean and due to Covid you would think the room would be super clean but nope wrappers all over the place towels had stains, to top it off I even found bugs in my room. I am disgusted. The service is horrible. “There was never a manager on duty” I even reached out to them in email and still no reply from them so they clearly don’t care. Avoid this hotel there are so many other options by the airport that this one poor excuse for cleanliness and bugs they do not deserve a dime. They don’t fix their problems and a manager is never reachable",
# "First impression is the hotel seem to be in need of an upgrade. The grounds did not feel welcoming on the exterior. The interior had carpet coming up in the hallway, I was on the third floor. It had a bad smell that hits you in the face as soon as you get off the elevator. The rooms was decent with a nice size television, desk and a refrigerator but lacked cleanliness. We couldn't shower because the tubes were GROSS. It looked as if it hadn't been properly cleaned for months! You can see the filth buildup YUCK! This is very concerning considering the month I traveled was during the covid-19 pandemic. If this hotel is not properly cleaning guest rooms than are they really practicing safe measures during a global coronavirus pandemic?",
# "Small rooms, restaurant offers the best of microwaved food and wifi is poor. Staff set engaged, but this establishment needs investment and attention to the the customer experience. Plenty of examples where the site could use a goos cleaning - including the restaurant.",
# "I had a horrible check-in experience at this crown plaza. The manager at night shift was exceptionally rude. Just because it was night and I was tired, I stayed there. I checked out next day and went to The Renaissance across the street.",
# "DIRTY FILTHY DISGUSTING!!! Hair and mold in the bathroom, DIRTY carpeting, smells of cigarette smoke and my daughter woke up with bug bites all over her legs!!! Front desk was an absolute joke! Unprofessional rude and lazy!! Travelers BEWARE!!",
# "Called to say my flight is cancelled because of weather ,can you change to next day or refund.before I could complete the sentence they cancelled my reservation and hung up.i know the hotel room was given to somebody else.i cannot believe the service was from very reputable company like yours",
# "The value for the room and the service was very good but the Furnishings in the room is very outdated and more out. The carpet has been replaced and the linen and the bathtub was spotless. Restaurant bar",
# "The Crowne Plaza is located near the newark airport. The hotel offers a transfer ( i got it on my way back). The rooms are small but the bed is very comfortable. Bathroom regular. Also offers a transfer to the outlet nearby but only in 2 specific times a day.",
# "We stayed one night (thankfully) as there was a lot of noise from airplanes taking off and landing and from traffic on the road nearby. The room was very nice with comfortable bed. The shower was over the bath",
# "I visited this hotel with 6 family members in jan 2020. we reached jetlagged early in the morning to be greeted by an extremely rude lady whose name started with Q. I saw her even mocking a few clients. Rooms were clean. Sleep quality was nice Not many eating options around hotel for breakfast, except the hotel itself. In evening one can walk out towards quay and be delighted with so many restaurants. over all a an average hotel BUT the RUDEST STAFF i have ever seen. STAY AWAY IF YOU ANYOTHER OPTION.",
# "Hotel was very crowded and so called club lounge was so crowded that we couldn't use 20 minute wait for breakfast in main restaurant Hotel room small and basic - not luxury Pool good and hotel location excellent",
# "The hotel is actually <NAME> not <NAME> as the name claims. I had booked a room with a king size bed but they could only give me twin beds on the first night so I had to move rooms on the second day. All of the rooms I saw were tired with very bland decor and badly in need of a refresh. I also experienced a lot of noise from neighbouring rooms",
# "I do no understand why you are charging me USD 100 (66% of original room charge) because I have Netherlands nationality but booked my room stating my residential address in Thailand, where I have lived for the last 13 years",
# "Check in was appalling ! Checked into a deluxe room but was given two single beds!! Went downstairs to speak to reception and they told me only room they have is a smoking room which was not practical!!! Then had to sleep there and next day await a room change!!! Which was chased by us as no one remembered the next day!!",
# "I would not recommend this hotel, it is seriously understaffed the restaurant is small for the size of the hotel which results in the tables being too close together. The restaurant staff tried their best but there just weren't enough of them",
# "nice bar and front desk staff members happy faces they made me feel like a vip. update! hotel is dark and old. bathroom was tiny, dark and poor design. elevator was slow. hotel facilities and staff were excellent",
]
def predict(parser, args):
"""
Predict from command line and return response output as html + json
Parameters
----------
args :
args.config_path : str
path to config yml e.g. /production/model_config.yml
args.log_level: str
'debug', 'info', or 'warning' level for root logger and all handlers
"""
config = load_config(Path(args.config_path))
opt = load_basic_arguments(parser)
for key, value in config["model_params"].items():
print(f"Key: {key} - Value: {value}")
opt.key = value
# Define useful directories
predicts_dir = config["paths"]["predictions"]
artefacts_dir = config["paths"]["artefacts"]
checkpoint_dir = config["paths"]["checkpoint"]
opt.ckpt_path = os.path.join(checkpoint_dir, f"RACL-epoch={opt.ckpt:03d}.h5")
# Split document into sentences
sentences, sent2doc = split_documents(documents)
opt.batch_size = len(sentences)
# Load Tokenizer and Encoder
print(f"\n\n\nLoading Encoder ...")
sbert_version = 'distilUSE'
sbert_dir = os.path.join(artefacts_dir, sbert_version)
encoder = Encoder(sbert_dir)
# Tokenize
start_time = time.time()
embeddings, sentences_mask, position_matrices, tokens_in_doc = read_data(sentences, opt, encoder)
embeddings = np.reshape(embeddings, (opt.batch_size, opt.max_sentence_len, opt.embedding_dim))
tokens_in_doc = reverse_unk(tokens_in_doc, sentences)
end_time = time.time()
time_running = end_time - start_time
run_time = f'\n\n\nTokenize {len(sentences)} samples in {time_running:.2f}s'
print(run_time)
# Load model
model = RACL(opt)
model.load_weights(opt.ckpt_path)
# Predict
start_time = time.time()
aspect_probs, opinion_probs, sentiment_probs = model.predict(
sentence=embeddings,
word_mask=sentences_mask.reshape((opt.batch_size, opt.max_sentence_len)),
position_att=position_matrices.reshape((opt.batch_size, opt.max_sentence_len, opt.max_sentence_len))
)
end_time = time.time()
time_running = end_time - start_time
run_time = f'\n\n\nPredict {len(sentences)} samples in {time_running:.2f}s'
print(run_time)
# Feed results into DataFrame
results_df = decode_results(tokens_in_doc, sent2doc,
aspect_probs, opinion_probs, sentiment_probs)
# Write logs
output_file = os.path.join(predicts_dir, f'case_study_{opt.task}')
print(f'\n\nWriting result to \n\t{output_file}.json\n\t{output_file}.html ...')
doc_results = format_results(results_df)
with open(output_file+'.json', 'w') as f_writer:
json.dump(doc_results, f_writer, indent=4)
dict2html(doc_results, output_file+'.html')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Model Prediction')
parser.add_argument('-c', '--config-path', default='production/model_config.yml', type=str, help='Config path')
args, unk_args = parser.parse_known_args()
predict(parser, args)
##########################################
# Executive Time on Local Machine: #
# Tokenize 13 samples in 0.22s #
# Predict 13 samples in 2.27s #
##########################################
| 2.03125
| 2
|
passwordgenerator/app.py
|
aminbeigi/Password-Generator-Rest-API
| 0
|
12776004
|
<reponame>aminbeigi/Password-Generator-Rest-API
from flask import Flask, request
from flask_restful import Api, Resource
from .data_generator import DataGenerator
from webargs import fields, validate
from webargs.flaskparser import use_args, use_kwargs, parser, abort
"""The Password-Generator Restful API
This API will take in words as a parameter or will randomly generate them and
a response limit or will be defaulted to DEFAULT_LIMIT. The response will have
the given paramters, related words and a string of text created by concatenating
and altering the related words to make them appear more cryptic.
"""
app = Flask(__name__)
api = Api(app)
data_generator = DataGenerator()
DEFAULT_LIMIT = 5
API_RESPONSE_LIMIT = 20
ERROR_MESSAGE_422 = f"input is greater than the maximum 20"
class RandomPassword(Resource):
args = {
'limit': fields.Int(required=False)
}
@use_args(args, location="query")
def get(self, args):
# if user didn't specify limit default to DEFAULT_LIMIT
limit = DEFAULT_LIMIT if 'limit' not in args else args['limit']
if limit > API_RESPONSE_LIMIT:
abort(422, message = ERROR_MESSAGE_422)
data = data_generator.generate_random(limit)
return data
class CustomPassword(Resource):
args = {
'words': fields.List(fields.Str()),
'limit': fields.Int(required=False)
}
@use_args(args, location="query")
def get(self, args):
# user went to ./api/password
if 'words' not in args:
word_lst = ['cat', 'dog', 'mouse']
else:
word_lst = args['words']
# if user didn't specify limit default to DEFAULT_LIMIT
limit = DEFAULT_LIMIT if 'limit' not in args else args['limit']
if limit > API_RESPONSE_LIMIT:
abort(422, message = ERROR_MESSAGE_422)
data = data_generator.generate_custom(word_lst, limit)
return data
# This error handler is necessary for usage with Flask-RESTful
@parser.error_handler
def handle_request_parsing_error(err, req, schema, *, error_status_code, error_headers):
"""webargs error handler that uses Flask-RESTful's abort function to return
a JSON error response to the client.
"""
abort(error_status_code, errors=err.messages)
# create endpoints
api.add_resource(RandomPassword, '/api/password/random')
api.add_resource(CustomPassword, '/api/password')
| 3.21875
| 3
|
tf/begin.py
|
rishuatgithub/MLPy
| 0
|
12776005
|
<reponame>rishuatgithub/MLPy
# Tensor Flow basic - <NAME>
#import the tf canonical lib
import tensorflow as tf
#from __future__ import print_function
#A computational graph is a series of TensorFlow operations arranged into a graph of nodes.
#Let's build a simple computational graph. Each node takes zero or more tensors as inputs and produces a tensor as an output.
#One type of node is a constant. Like all TensorFlow constants, it takes no inputs, and it outputs a value it stores internally.
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
print(node1, node2)
# creating a session and printing the computational nodes
sess = tf.Session()
print(sess.run([node1, node2]))
# adding two nodes
node3 = tf.add(node1, node2)
print("node3:", node3)
print("sess.run(node3):", sess.run(node3))
# Using placeholder
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # + provides a shortcut for tf.add(a, b)
# lambda function sort of output using placeholder
print(sess.run(adder_node, {a: 3, b: 4.5}))
print(sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
# More trivial operations using placeholder
add_and_triple = adder_node * 3.
print(sess.run(add_and_triple, {a: 3, b: 4.5}))
# variables
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W*x + b
# initialize the variables. Without which the variables will not be executed
init = tf.global_variables_initializer()
sess.run(init) #runs the Session
#Since x is a placeholder, we can evaluate linear_model for several values of x simultaneously
print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
# calculate the loss in linear_model - reduce_sum
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
# Adjust the variables to have a Zero loss
fixW = tf.assign(W, [-1.])
fixb = tf.assign(b, [1.])
sess.run([fixW, fixb])
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
# Gradient descent using TF
# TensorFlow provides optimizers that slowly change each variable
# in order to minimize the loss function. The simplest optimizer is
# gradient descent
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init) # reset values to incorrect defaults.
for i in range(1000):
sess.run(train, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
print(sess.run([W, b]))
| 4.03125
| 4
|
medium/Palindrome Partitioning/palindrome.py
|
yujiecong/LeetCode-learning
| 0
|
12776006
|
class Solution(object):
def partition(self, s):
self.isPalindrome = lambda s : s == s[::-1]
res = []
self.backtrack(s, res, [])
return res
def backtrack(self, s, res, path):
print(path)
if not s: #如果是空字符串 ''
res.append(path)
return
for i in range(1, len(s) + 1): #注意起始和结束位置
if self.isPalindrome(s[:i]):#如果符合回文
self.backtrack(s[i:], res, path + [s[:i]])
Solution().partition("aab")
| 3.328125
| 3
|
gb_chat/common/thread_executor.py
|
Cerzon/gb_chat
| 0
|
12776007
|
from queue import Empty, SimpleQueue
from typing import Any, Callable, Optional, cast
from PyQt5.QtCore import QEvent, QObject
from PyQt5.QtWidgets import QApplication
from ..log import get_logger
Function = Callable[[], None]
class IoThreadExecutor:
def __init__(self) -> None:
self._queue: SimpleQueue[Function] = SimpleQueue()
self._logger: Any = get_logger("IoThreadExecutor")
def schedule(self, fun: Function) -> None:
self._queue.put(fun)
self._logger.debug("Schedule task", qsize=self._queue.qsize())
def execute_all(self) -> None:
try:
while True:
fun = self._queue.get_nowait()
self._logger.debug(
"Execute task",
qsize=self._queue.qsize(),
)
fun()
except Empty:
return
class _FunctionEvent(QEvent):
EVENT_TYPE: QEvent.Type = QEvent.Type.User
def __init__(self, fun: Function) -> None:
super().__init__(self.EVENT_TYPE)
self.fun = fun
class UiThreadExecutor(QObject):
def __init__(self, app: QApplication) -> None:
super().__init__(parent=None)
self._app = app
self._logger: Any = get_logger("UiThreadExecutor")
def schedule(self, fun: Function) -> None:
self._logger.debug("Schedule task")
self._app.postEvent(self, _FunctionEvent(fun))
def event(self, e: QEvent) -> bool:
if e.type() != _FunctionEvent.EVENT_TYPE:
return super().event(e)
fun_event = cast(_FunctionEvent, e)
self._logger.debug("Execute task")
fun_event.fun()
return True
| 2.375
| 2
|
sitenco/config/code_browser.py
|
Kozea/sitenco
| 3
|
12776008
|
<reponame>Kozea/sitenco
"""
Code browser tools.
"""
import abc
from docutils import nodes
from flask import request
from .tool import Tool, Role, Directive
class CodeBrowser(Tool):
"""Abstract class for code browser tools."""
__metaclass__ = abc.ABCMeta
def __init__(self, project_name, ribbon=None):
self.project_name = project_name
self.ribbon = ribbon
super(CodeBrowser, self).__init__()
def update(self):
"""Nothing has to be done to update code browser tools."""
@abc.abstractproperty
def base_url(self):
"""Base URL of the code browser service."""
raise NotImplementedError
@property
def code_link(self):
"""Link to the code browser interface."""
return self.base_url + self.project_name
class Github(CodeBrowser):
"""GitHub code browser tool."""
base_url = 'https://github.com/'
class Gitorious(CodeBrowser):
"""Gitorious code browser tool."""
base_url = 'https://gitorious.org/'
class Redmine(CodeBrowser):
"""Redmine code browser tool."""
def __init__(self, project_name, base_url):
super(Redmine, self).__init__(project_name)
self._base_url = base_url
@property
def base_url(self):
return self._base_url
class CodeLink(Role):
"""Link tag to the code browser."""
def run(self, name, rawtext, text, lineno, inliner, options=None,
content=None):
return [nodes.reference('', text, refuri=self.tool.code_link)], []
class Editable(Directive):
"""Add a link to page source."""
def run(self):
# TODO: fix the link for code browsers other than GitHub
content = (
'<aside class="editable">'
'<a id="editable" title="Edit this page" href="%s">'
'Edit this page</a></aside>' % (
self.tool.code_link + '/tree/website/pages/' +
request.path.strip('/') + '.rst'))
return [nodes.raw('', content, format='html')]
| 2.421875
| 2
|
components/reports/report_list/report_list.py
|
Sitelink3D-v2-Developer/sitelink3dv2-examples
| 1
|
12776009
|
<reponame>Sitelink3D-v2-Developer/sitelink3dv2-examples<filename>components/reports/report_list/report_list.py
#!/usr/bin/python
import argparse
import json
import logging
import os
import sys
import requests
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "tokens"))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "utils"))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "metadata", "metadata_list"))
from get_token import *
from utils import *
from args import *
from metadata_list import *
session = requests.Session()
def main():
# >> Arguments
arg_parser = argparse.ArgumentParser(description="Report Listing")
# script parameters:
arg_parser = add_arguments_logging(arg_parser, logging.INFO)
# server parameters:
arg_parser = add_arguments_environment(arg_parser)
arg_parser = add_arguments_auth(arg_parser)
arg_parser = add_arguments_pagination(arg_parser)
# request parameters:
arg_parser.add_argument("--site_id", default="", help="Site Identifier", required=True)
arg_parser.set_defaults()
args = arg_parser.parse_args()
logging.basicConfig(format=args.log_format, level=args.log_level)
# << Arguments
server = ServerConfig(a_environment=args.env, a_data_center=args.dc)
logging.info("Running {0} for server={1} dc={2} site={3}".format(os.path.basename(os.path.realpath(__file__)), server.to_url(), args.dc, args.site_id))
headers = headers_from_jwt_or_oauth(a_jwt=args.jwt, a_client_id=args.oauth_id, a_client_secret=args.oauth_secret, a_scope=args.oauth_scope, a_server_config=server)
report_list_url = "{0}/reporting/v1/{1}/longterms/?order=issued_at&filter=e30".format(server.to_url(), args.site_id)
response = session.get(report_list_url, headers=headers)
response.raise_for_status()
report_list = response.json()
logging.info(json.dumps(report_list, indent=4))
if __name__ == "__main__":
main()
| 2.453125
| 2
|
ADE-20k_Dataset/Scripts/Script1_anotaions_to_custom_anotations.py
|
bilals08/F-20-09-R-BA
| 0
|
12776010
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 17:41:44 2020
@author: salman
"""
from PIL import Image
import pandas as pd
import numpy as np
import cv2
import os
d={}
data = pd.read_csv('E:\\fyp data\\ADEK-20\\new_se_new\\new.txt', sep="\t")
arr=np.zeros(151)
print(arr)
for point in data.values:
(key,name,val)=point[0],point[-2],point[-1]
arr[key]=val
print(arr)
print(arr)
train_file= pd.read_csv('E:\\fyp data\\ADEK-20\\validation_images.txt', sep="\t")
train_lst=list(train_file["images"])
path="E:\\fyp data\\ADEK-20\\ADEChallengeData2016\\ADEChallengeData2016\\annotations\\validation\\"
saved="E:\\fyp data\\ADEK-20\\new_se_new\\adk_annotations\\validation\\"
for img in train_lst:
imgPath=path+img+'.png'
image=np.array(cv2.imread(imgPath,0))
image=arr[image]
uniques=np.unique(image)
if len(uniques>0):
cv2.imwrite(saved+img+'.png',image)
print("Done")
| 2.3125
| 2
|
RegressionModels/SimpleLinearRegression.py
|
nicohm/Machine-Learning
| 0
|
12776011
|
<filename>RegressionModels/SimpleLinearRegression.py
"""
SIMPLE LINEAR REGRESION
-----------------------
@autor: <NAME>
We'll learn to compute linear regression model using scikit-learn library
"""
# Import packages
import pandas as pd
import numpy as np
import pylab as pl
import matplotlib.pyplot as plt
from sklearn import linear_model
# Import data
df = pd.read_csv("data1.csv")
# view top 5
df.head(5)
# Select some features of the data
sub_df = df[['GASEXP', 'POP', 'INCOME', 'PUC']]
sub_df.head(5)
# Graph histograms
sub_df.hist()
plt.show
# Scatterplot
plt.scatter(sub_df.INCOME, sub_df.GASEXP, color='red')
plt.xlabel('Income')
plt.ylabel('Gasexp')
plt.show()
# Divide of dataset in training and test data
rul = np.random.rand(len(sub_df)) < 0.6
train = sub_df[rul]
test = sub_df[~rul]
# Simple regression model
reg = linear_model.LinearRegression()
train_x = np.asanyarray(train[['INCOME']])
train_y = np.asanyarray(train[['GASEXP']])
reg.fit (train_x, train_y)
# The coefficients
print('Coefficients: ', reg.coef_)
print('Intercept: ', reg.intercept_)
#Plot
plt.scatter(train.INCOME, train.GASEXP, color='red')
plt.plot(train_x, reg.coef_[0][0]*train + reg.intercept_[0], '-g')
plt.xlabel('Income')
plt.ylabel('Gasexp')
plt.show()
| 4.15625
| 4
|
src/marion/marion/tests/test_views.py
|
OmenApps/marion
| 7
|
12776012
|
"""Tests for the marion application views"""
import json
import tempfile
from pathlib import Path
from django.urls import reverse
import pytest
from pytest_django import asserts as django_assertions
from rest_framework import exceptions as drf_exceptions
from rest_framework import status
from rest_framework.test import APIClient
from marion import defaults, models
from marion.issuers import DummyDocument
client = APIClient()
def count_documents(root):
"""Return the number of generated PDF files in the root directory"""
return len(list(root.glob("*.pdf")))
@pytest.mark.django_db
def test_document_request_viewset_post(monkeypatch):
"""Test the DocumentRequestViewSet create view"""
monkeypatch.setattr(defaults, "DOCUMENTS_ROOT", Path(tempfile.mkdtemp()))
url = reverse("documentrequest-list")
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Request payload required parameters
data = {}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert isinstance(response.data.get("context_query")[0], drf_exceptions.ErrorDetail)
assert response.data.get("context_query")[0].code == "required"
assert isinstance(response.data.get("issuer")[0], drf_exceptions.ErrorDetail)
assert response.data.get("issuer")[0].code == "required"
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Invalid issuer
data = {
"issuer": "marion.issuers.DumberDocument",
"context_query": json.dumps({"fullname": "<NAME>"}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data.get("issuer")[0].code == "invalid_choice"
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Perform standard request
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "<NAME>"}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_201_CREATED
assert models.DocumentRequest.objects.count() == 1
assert (
models.DocumentRequest.objects.get().context.get("fullname")
== "<NAME>"
)
assert count_documents(defaults.DOCUMENTS_ROOT) == 1
@pytest.mark.django_db
def test_document_request_viewset_post_context_query_pydantic_model_validation(
monkeypatch,
):
"""Test the DocumentRequestViewSet create view context_query pydantic model
validation.
"""
monkeypatch.setattr(defaults, "DOCUMENTS_ROOT", Path(tempfile.mkdtemp()))
url = reverse("documentrequest-list")
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Refuse extra fields in context query
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "<NAME>", "friends": 2}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "extra fields not permitted" in str(response.data.get("error"))
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Input types checking
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": None}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "none is not an allowed value" in str(response.data.get("error"))
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Input contraints checking (short fullname)
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "D"}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at least 2 characters" in str(
response.data.get("error")
)
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Input contraints checking (too long fullname)
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "F" * 256}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at most 255 characters" in str(
response.data.get("error")
)
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
@pytest.mark.django_db
def test_document_request_viewset_post_context_pydantic_model_validation(
monkeypatch,
):
"""Test the DocumentRequestViewSet create view context pydantic model
validation.
"""
# pylint: disable=unused-argument,function-redefined
monkeypatch.setattr(defaults, "DOCUMENTS_ROOT", Path(tempfile.mkdtemp()))
url = reverse("documentrequest-list")
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "<NAME>"}),
}
# Refuse extra fields in context
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {
"fullname": "<NAME>",
"identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1",
"friends": 2,
}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "extra fields not permitted" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Types checking
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {"fullname": None, "identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1"}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "none is not an allowed value" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Missing identifier
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {"fullname": "<NAME>"}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "identifier\n field required" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Constraints checking (short fullname)
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {"fullname": "D", "identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1"}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at least 2 characters" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Constraints checking (too long fullname)
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {
"fullname": "F" * 256,
"identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1",
}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at most 255 characters" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
def test_document_template_debug_view_is_only_active_in_debug_mode(settings):
"""Test if the document_template_debug view is active when not in debug mode"""
settings.DEBUG = False
url = reverse("documents-template-debug")
response = client.get(url)
assert response.status_code == 403
def test_document_template_debug_view(settings):
"""Test the document_template_debug view"""
settings.DEBUG = True
settings.MARION_DOCUMENT_ISSUER_CHOICES_CLASS = (
"marion.default.DocumentIssuerChoices"
)
url = reverse("documents-template-debug")
response = client.get(url)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert b"You should provide an issuer." in response.content
response = client.get(url, {"issuer": "foo.bar.baz"})
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert b"Unknown issuer foo.bar.baz" in response.content
response = client.get(url, {"issuer": "marion.issuers.DummyDocument"})
assert response.status_code == 200
# pylint: disable=no-member
django_assertions.assertContains(response, "<h1>Dummy document</h1>")
| 2.546875
| 3
|
code/test.py
|
JJBUP/yolov3_pytorch
| 1
|
12776013
|
<filename>code/test.py
# -*- coding: utf-8 -*-
import argparse
import tqdm
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import numpy as np
from terminaltables import AsciiTable
from Yolo3Body import YOLOV3
from utils.util import get_classes_name, xywh2xyxy, non_max_suppression, get_batch_statistics, ap_per_class
from config import anchors_mask_list
from utils.datasets import ListDataSet
from utils.transforms import DEFAULT_TRANSFORMS
def _create_validation_data_loader(label_path, input_shape, batch_size, num_workers):
dataset = ListDataSet(labels_file=label_path, input_shape=input_shape, transform=DEFAULT_TRANSFORMS)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
collate_fn=dataset.collate_fn
)
return dataloader,len(dataset)
def print_eval_stats(metrics_output, class_names, verbose):
if metrics_output is not None:
precision, recall, AP, f1, ap_class = metrics_output
if verbose:
# Prints class AP and mean AP
ap_table = [["Index", "Class", "AP"]]
for i, c in enumerate(ap_class):
ap_table += [[c, class_names[c], "%.5f" % AP[i]]]
print(AsciiTable(ap_table).table)
print(f"---- mAP {AP.mean():.5f} ----")
else:
print("---- mAP not measured (no detections found by model) ----")
def _evaluate(model, dataloader, class_names, img_size, iou_thres, conf_thres, nms_thres, verbose):
"""Evaluate model on validation dataset.
:param model: Model to evaluate
:type model: models.Darknet
:param dataloader: Dataloader provides the batches of images with targets
:type dataloader: DataLoader
:param class_names: List of class names
:type class_names: [str]
:param img_size: Size of each image dimension for yolo
:type img_size: int
:param iou_thres: IOU threshold required to qualify as detected
:type iou_thres: float
:param conf_thres: Object confidence threshold
:type conf_thres: float
:param nms_thres: IOU threshold for non-maximum suppression
:type nms_thres: float
:param verbose: If True, prints stats of model
:type verbose: bool
:return: Returns precision, recall, AP, f1, ap_class
"""
model.eval() # Set model to evaluation mode
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
labels = []
sample_metrics = [] # List of tuples (TP, confs, pred)
for imgs, targets in tqdm.tqdm(dataloader, desc="Validating"):
# Extract labels
labels += targets[:, 1].tolist()
# Rescale target
targets[:, 2:] = xywh2xyxy(targets[:, 2:])
targets[:, 2:] *= img_size
imgs = Variable(imgs.type(Tensor), requires_grad=False)
with torch.no_grad():
outputs = model(imgs)
outputs = non_max_suppression(outputs, conf_thres=conf_thres, iou_thres=nms_thres)
sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres)
if len(sample_metrics) == 0: # No detections over whole validation set.
print("---- No detections over whole validation set ----")
return None
# Concatenate sample statistics
true_positives, pred_scores, pred_labels = [
np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
metrics_output = ap_per_class(
true_positives, pred_scores, pred_labels, labels)
print_eval_stats(metrics_output, class_names, verbose)
return metrics_output
def run():
parser = argparse.ArgumentParser(description="Evaluate validation data.")
parser.add_argument("-w", "--weight_path", type=str, default="/Users/weimingan/work/weights/yolov3_vocc_50 (1).pth",
help="权重文件")
parser.add_argument("-c", "--classes", type=str, default="../config/voc_names.txt", help="类别文件")
parser.add_argument("--label_path", type=str, default="../data/annotation/voc2007_test.txt", help="标签文件")
parser.add_argument("-b", "--batch_size", type=int, default=8)
parser.add_argument("--img_size", type=int, default=416, help="输入Yolo的图片尺度")
parser.add_argument('--input_shape', type=list, default=[416, 416], help="输入图片的尺寸 w h")
parser.add_argument("--num_workers", type=int, default=1, help="dataloader的线程")
parser.add_argument("--iou_thres", type=float, default=0.5, help="IOU threshold required to qualify as detected")
parser.add_argument("--conf_thres", type=float, default=0.01, help="Object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="IOU threshold for non-maximum suppression")
args = parser.parse_args()
print(f"Command line arguments: {args}")
# 获取制作数据集时候设置的类别列表
classes = get_classes_name(args.classes)
# 加载测试数据集
# 加载模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 设置非训练模型加载
model = YOLOV3(cls_num=len(classes), anchors=anchors_mask_list, img_size=args.img_size, training=False).to(device)
# 加载模型参数
model.load_state_dict(torch.load(args.weight_path, map_location=device))
# 创建dataloader
dataloader,dataset_len = _create_validation_data_loader(args.label_path, args.input_shape, args.batch_size, args.num_workers)
# 开始检测和评估
metrics_output = _evaluate(
model,
dataloader,
classes,
args.img_size,
args.iou_thres,
args.conf_thres,
args.nms_thres,
verbose=True)
precision, recall, AP, f1, ap_class = metrics_output
if __name__ == '__main__':
run()
| 2.171875
| 2
|
verticapy/tests/vModel/test_svd.py
|
vertica/vertica_ml_python
| 7
|
12776014
|
<filename>verticapy/tests/vModel/test_svd.py<gh_stars>1-10
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Pytest
import pytest
# VerticaPy
from verticapy import drop, set_option
from verticapy.connect import current_cursor
from verticapy.datasets import load_winequality
from verticapy.learn.decomposition import SVD
set_option("print_info", False)
@pytest.fixture(scope="module")
def winequality_vd():
winequality = load_winequality()
yield winequality
drop(name="public.winequality",)
@pytest.fixture(scope="module")
def model(winequality_vd):
model_class = SVD("SVD_model_test",)
model_class.drop()
model_class.fit("public.winequality", ["citric_acid", "residual_sugar", "alcohol"])
yield model_class
model_class.drop()
class TestSVD:
def test_repr(self, model):
assert "SVD" in model.__repr__()
model_repr = SVD("model_repr")
model_repr.drop()
assert model_repr.__repr__() == "<SVD>"
def test_deploySQL(self, model):
expected_sql = 'APPLY_SVD("citric_acid", "residual_sugar", "alcohol" USING PARAMETERS model_name = \'SVD_model_test\', match_by_pos = \'true\', cutoff = 1)'
result_sql = model.deploySQL()
assert result_sql == expected_sql
def test_deployInverseSQL(self, model):
expected_sql = 'APPLY_INVERSE_SVD("citric_acid", "residual_sugar", "alcohol" USING PARAMETERS model_name = \'SVD_model_test\', match_by_pos = \'true\')'
result_sql = model.deployInverseSQL()
assert result_sql == expected_sql
def test_plot(self, model):
result = model.plot()
assert len(result.get_default_bbox_extra_artists()) == 8
result = model.plot(dimensions=(2, 3))
assert len(result.get_default_bbox_extra_artists()) == 8
def test_plot_scree(self, model):
result = model.plot_scree()
assert len(result.get_default_bbox_extra_artists()) == 14
def test_plot_circle(self, model):
result = model.plot_circle()
assert len(result.get_default_bbox_extra_artists()) == 16
result = model.plot_circle(dimensions=(2, 3))
assert len(result.get_default_bbox_extra_artists()) == 16
def test_drop(self):
current_cursor().execute("DROP MODEL IF EXISTS SVD_model_test_drop")
model_test = SVD("SVD_model_test_drop",)
model_test.fit("public.winequality", ["alcohol", "quality"])
current_cursor().execute(
"SELECT model_name FROM models WHERE model_name = 'SVD_model_test_drop'"
)
assert current_cursor().fetchone()[0] == "SVD_model_test_drop"
model_test.drop()
current_cursor().execute(
"SELECT model_name FROM models WHERE model_name = 'SVD_model_test_drop'"
)
assert current_cursor().fetchone() is None
def test_get_attr(self, model):
m_att = model.get_attr()
assert m_att["attr_name"] == [
"columns",
"singular_values",
"right_singular_vectors",
"counters",
"call_string",
]
assert m_att["attr_fields"] == [
"index, name",
"index, value, explained_variance, accumulated_explained_variance",
"index, vector1, vector2, vector3",
"counter_name, counter_value",
"call_string",
]
assert m_att["#_of_rows"] == [3, 3, 3, 3, 1]
m_att_details = model.get_attr(attr_name="singular_values")
assert m_att_details["value"][0] == pytest.approx(968.964362586858, abs=1e-6)
assert m_att_details["value"][1] == pytest.approx(354.585184720344, abs=1e-6)
assert m_att_details["value"][2] == pytest.approx(11.7281921567471, abs=1e-6)
def test_get_params(self, model):
assert model.get_params() == {"method": "lapack", "n_components": 0}
def test_to_python(self, model):
current_cursor().execute(
"SELECT APPLY_SVD(citric_acid, residual_sugar, alcohol USING PARAMETERS model_name = '{}', match_by_pos=True) FROM (SELECT 3.0 AS citric_acid, 11.0 AS residual_sugar, 93. AS alcohol) x".format(
model.name
)
)
prediction = current_cursor().fetchone()
assert prediction == pytest.approx(
model.to_python(return_str=False)([[3.0, 11.0, 93.0]])[0]
)
def test_to_sql(self, model):
current_cursor().execute(
"SELECT APPLY_SVD(citric_acid, residual_sugar, alcohol USING PARAMETERS model_name = '{}', match_by_pos=True) FROM (SELECT 3.0 AS citric_acid, 11.0 AS residual_sugar, 93. AS alcohol) x".format(
model.name
)
)
prediction = [float(elem) for elem in current_cursor().fetchone()]
current_cursor().execute(
"SELECT {} FROM (SELECT 3.0 AS citric_acid, 11.0 AS residual_sugar, 93. AS alcohol) x".format(
", ".join(model.to_sql())
)
)
prediction2 = [float(elem) for elem in current_cursor().fetchone()]
assert prediction == pytest.approx(prediction2)
def test_to_memmodel(self, model):
current_cursor().execute(
"SELECT APPLY_SVD(citric_acid, residual_sugar, alcohol USING PARAMETERS model_name = '{}', match_by_pos=True) FROM (SELECT 3.0 AS citric_acid, 11.0 AS residual_sugar, 93. AS alcohol) x".format(
model.name
)
)
prediction = [float(elem) for elem in current_cursor().fetchone()]
current_cursor().execute(
"SELECT {} FROM (SELECT 3.0 AS citric_acid, 11.0 AS residual_sugar, 93. AS alcohol) x".format(
", ".join(
model.to_memmodel().transform_sql(
["citric_acid", "residual_sugar", "alcohol"]
)
)
)
)
prediction2 = [float(elem) for elem in current_cursor().fetchone()]
assert prediction == pytest.approx(prediction2)
prediction3 = model.to_memmodel().transform([[3.0, 11.0, 93.0]])
assert prediction == pytest.approx(list(prediction3[0]))
def test_get_transform(self, winequality_vd, model):
winequality_trans = model.transform(
winequality_vd, X=["citric_acid", "residual_sugar", "alcohol"]
)
assert winequality_trans["col1"].mean() == pytest.approx(
0.0121807874344058, abs=1e-6
)
assert winequality_trans["col2"].mean() == pytest.approx(
-0.00200082024084619, abs=1e-6
)
assert winequality_trans["col3"].mean() == pytest.approx(
0.000194341623203586, abs=1e-6
)
def test_get_inverse_transform(self, winequality_vd, model):
winequality_trans = model.transform(
winequality_vd, X=["citric_acid", "residual_sugar", "alcohol"]
)
winequality_trans = model.inverse_transform(
winequality_trans, X=["col1", "col2", "col3"]
)
assert winequality_trans["citric_acid"].mean() == pytest.approx(
winequality_vd["citric_acid"].mean(), abs=1e-6
)
assert winequality_trans["residual_sugar"].mean() == pytest.approx(
winequality_vd["residual_sugar"].mean(), abs=1e-6
)
assert winequality_trans["alcohol"].mean() == pytest.approx(
winequality_vd["alcohol"].mean(), abs=1e-6
)
def test_svd_score(self, model):
result = model.score()
assert result["Score"][0] == pytest.approx(0.0, abs=1e-6)
assert result["Score"][1] == pytest.approx(0.0, abs=1e-6)
assert result["Score"][2] == pytest.approx(0.0, abs=1e-6)
def test_set_params(self, model):
model.set_params({"n_components": 3})
assert model.get_params()["n_components"] == 3
def test_model_from_vDF(self, winequality_vd):
current_cursor().execute("DROP MODEL IF EXISTS SVD_vDF")
model_test = SVD("SVD_vDF",)
model_test.fit(winequality_vd, ["alcohol", "quality"])
current_cursor().execute(
"SELECT model_name FROM models WHERE model_name = 'SVD_vDF'"
)
assert current_cursor().fetchone()[0] == "SVD_vDF"
model_test.drop()
| 2.109375
| 2
|
src/Cells/Gol_Cell.py
|
eniallator/Game-of-Life-Workshop
| 0
|
12776015
|
from src.Cells.Base_Cell import Base_Cell
class Gol_Cell(Base_Cell):
_neighbour_radius = 1
@classmethod
def try_spawn(cls, neighbours):
gol_cell_count = 0
for row in neighbours:
for cell in row:
if cell.__class__ == Gol_Cell:
gol_cell_count += 1
if gol_cell_count == 3:
return Gol_Cell()
def update(self, neighbours):
gol_cell_count = 0
for row in neighbours:
for cell in row:
if cell.__class__ == Gol_Cell:
gol_cell_count += 1
if gol_cell_count < 2 or gol_cell_count > 3:
self._dead = True
def draw(self, graphics, bounding_box):
graphics.rect((0, 255, 0), bounding_box)
| 3.171875
| 3
|
codeSheets/SEAS6401/PGAProject/Exploratory_Analysis.py
|
kylearbide/kylearbide.github.io
| 0
|
12776016
|
# Databricks notebook source
import pandas as pd
import math
import matplotlib.pyplot as plt
import numpy as np
# COMMAND ----------
roundsDf = pd.read_csv("/dbfs/FileStore/karbide/Rounds.txt")
holesDf = pd.read_csv("/dbfs/FileStore/karbide/Holes.txt")
holesDf.drop(["Score", "ToPar", "Unnamed: 0"],axis=1,inplace = True)
roundsDf.drop(["Unnamed: 0"],axis=1,inplace = True)
# COMMAND ----------
roundsDf.head(10)
# COMMAND ----------
print(roundsDf.shape)
print(roundsDf["PlayerID"].nunique())
print(roundsDf["TournamentID"].nunique())
# COMMAND ----------
holesDf.head(10)
# COMMAND ----------
print(holesDf.shape)
print(holesDf["Player_ID"].nunique())
print(holesDf["Tournament_ID"].nunique())
# COMMAND ----------
# MAGIC %md
# MAGIC For our exploratory analysis, lets go one dataset at a time
# MAGIC
# MAGIC **ROUNDS**
# COMMAND ----------
roundsDf["RoundScore"].describe()
# COMMAND ----------
roundsDf.describe()
# I want to see the round where there were no Pars
# COMMAND ----------
roundsDf.loc[roundsDf["Pars"]==0]
#they are mostly from tournament 448, lets bring in the tournament names and find this one
# COMMAND ----------
Tournaments = pd.read_csv("/dbfs/FileStore/karbide/Last_Season.txt")
# COMMAND ----------
Tournaments.loc[Tournaments["TournamentID"] == 429]
# COMMAND ----------
# the Barracuda Championship uses an alternate scoring format than that of the rest of the tour so it is easiest just to remove it from our sets
roundsDf = roundsDf.loc[roundsDf["TournamentID"] != 448]
holesDf = holesDf.loc[holesDf["Tournament_ID"] != 448]
# COMMAND ----------
# lets also remove rounds where they were not completed
roundsDf["Total_Holes"] = roundsDf["DoubleEagles"] + roundsDf["Eagles"] + roundsDf["Birdies"]+ roundsDf["Pars"]+ roundsDf["Bogeys"]+ roundsDf["DoubleBogeys"]+ roundsDf["WorseThanDoubleBogeys"]
roundsDf = roundsDf.loc[roundsDf["Total_Holes"] == 18]
# COMMAND ----------
#roundsDf.to_csv("/dbfs/FileStore/karbide/RoundsReg.txt")
# COMMAND ----------
roundsDf.describe()
# COMMAND ----------
display(roundsDf["RoundScore"].hist(bins = 27))
#looks symetrical and normally distributed
# COMMAND ----------
Tournaments = pd.read_csv("/dbfs/FileStore/karbide/Last_Season.txt")
TournamentNames = Tournaments[["TournamentID","Name"]]
# COMMAND ----------
Players = pd.read_csv("/dbfs/FileStore/karbide/PlayerStats.txt")
PlayerNames = Players[["PlayerID","PLAYER NAME"]]
# COMMAND ----------
roundsDf = roundsDf.merge(PlayerNames, how = "left", left_on = "PlayerID", right_on = "PlayerID")
# COMMAND ----------
roundsDf = roundsDf.merge(TournamentNames, how = "left", on = "TournamentID")
# COMMAND ----------
# average round score by tournament
tournamentAverages = roundsDf.groupby("Name").agg({"RoundScore" : ['mean']})
tournamentAverages.reset_index(inplace = True)
tournamentAverages.columns = ["TournamentName","Mean"]
tournamentAverages.sort_values("Mean", inplace = True)
# COMMAND ----------
# MAGIC %matplotlib inline
# MAGIC fig, ax = plt.subplots(figsize=(5, 13))
# MAGIC ax.barh(tournamentAverages["TournamentName"],tournamentAverages["Mean"])
# MAGIC
# MAGIC plt.autoscale(enable=True, axis='y', tight=False)
# MAGIC plt.xlabel("Average Round Score (to Par)")
# MAGIC plt.ylabel("Tournament")
# MAGIC plt.title("Average Round Score by Tournament")
# MAGIC plt.grid(axis='x')
# MAGIC
# MAGIC display(fig)
# COMMAND ----------
PlayerAverages = roundsDf.groupby(["PLAYER NAME","PlayerID"]).agg({"RoundScore" : ['mean']})
PlayerAverages.reset_index(inplace = True)
PlayerAverages.columns = ["PlayerName","PlayerID","Mean"]
PlayerAverages.sort_values("Mean", inplace = True)
PlayerAveragesTop = PlayerAverages[0:10]
PlayerAveragesBottom = PlayerAverages[-10:]
PlayersChart = pd.concat([PlayerAveragesTop,PlayerAveragesBottom])
PlayersChart.sort_values("Mean", inplace = True, ascending = False)
# COMMAND ----------
# MAGIC %matplotlib inline
# MAGIC fig, ax = plt.subplots(figsize=(5, 8))
# MAGIC ax.barh(PlayersChart["PlayerName"],PlayersChart["Mean"], color = ["Red","Red","Red","Red","Red","Red","Red","Red","Red","Red","Green","Green","Green","Green","Green","Green","Green","Green","Green","Green"])
# MAGIC
# MAGIC #Design
# MAGIC plt.autoscale(enable=True, axis='y', tight=False)
# MAGIC plt.xlabel("Average Round Score (to Par)")
# MAGIC plt.ylabel("Player")
# MAGIC plt.title("Top and Bottom 10 Players for Average Round Score")
# MAGIC plt.grid(axis='x')
# MAGIC
# MAGIC display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Now lets looks at the Hole by Hole dataframe.
# MAGIC Barracuda Tournament has already been removed
# MAGIC
# MAGIC There are also a few player statistics I want to calculate and store for later (Par3Average, Par4Average, Par5Average, HolesPerBirdie, HolesPerEagle)
# COMMAND ----------
holesDf.head(20)
# COMMAND ----------
holesDf.describe()
# COMMAND ----------
sum(holesDf["HoleInOne"])
# 29 hole in ones
# COMMAND ----------
holesDf["Hole_ScoreNum"].hist(bins = 7)
# COMMAND ----------
holeNumberScores = holesDf.groupby("Number").agg({"Hole_ScoreNum" : ["mean"]})
holeNumberScores.reset_index(inplace = True)
holeNumberScores.columns = ["HoleNumber","Mean"]
holeNumberScores["Mean_adj"] = holeNumberScores["Mean"]*10
holeNumberScores.sort_values("HoleNumber",inplace = True)
holeNumberScores = holeNumberScores.astype({"HoleNumber": str})
# COMMAND ----------
# MAGIC %matplotlib inline
# MAGIC fig, ax = plt.subplots(figsize=(10, 7))
# MAGIC ax.scatter(holeNumberScores["HoleNumber"], holeNumberScores['Mean'])
# MAGIC
# MAGIC #Design
# MAGIC plt.autoscale(enable=True, axis='y', tight=False)
# MAGIC plt.ylabel("Average Score to Par")
# MAGIC plt.xlabel("Hole Number")
# MAGIC plt.title("Average Score by Hole Number")
# MAGIC plt.grid(axis='y')
# MAGIC
# MAGIC display(fig)
# COMMAND ----------
holeParScores = holesDf.groupby("Par").agg({"Hole_ScoreNum" : ["mean"]})
holeParScores.reset_index(inplace = True)
holeParScores.columns = ["HolePar","Mean"]
holeParScores["Mean_adj"] = holeParScores["Mean"]*10
holeParScores.sort_values("HolePar",inplace = True)
xdist = holeParScores["HolePar"].tolist()
holeParScores = holeParScores.astype({"HolePar": str})
labels = holeParScores["Mean"].tolist()
lables = list(map(str, labels))
# COMMAND ----------
# MAGIC %matplotlib inline
# MAGIC fig, ax = plt.subplots(figsize=(5, 7))
# MAGIC ax.bar(holeParScores["HolePar"], holeParScores['Mean'], color = ["Red","Gray","Green"])
# MAGIC
# MAGIC #Design
# MAGIC plt.autoscale(enable=True, axis='y', tight=False)
# MAGIC plt.ylabel("Average Score (to Par)")
# MAGIC plt.xlabel("Par")
# MAGIC plt.title("Average Score by <NAME>")
# MAGIC plt.grid(axis='y')
# MAGIC ## Labels
# MAGIC #for i in range(len(labels)):
# MAGIC #plt.text(x = xdist[i] - 3.5,y=holeParScores["Mean"][i] +0.01, s = lables[i], size = 6)
# MAGIC
# MAGIC display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC **DISTANCE MATRICIES**
# MAGIC
# MAGIC I want to create distance matricies for both players and courses
# MAGIC
# MAGIC The conclusions I hope to draw through this are: *Which courses play similarly*, *Which Players perform similarly*, *Which Players play similarly*
# MAGIC
# MAGIC We will attack each of these questions one at a time
# COMMAND ----------
# Courses
# we are going to do 4 total distances. euclidean and cosine with both filling with zero and mean
CourseMatrix = roundsDf.groupby(["PlayerID","TournamentID"]).agg({"RoundShots": "sum"})
CourseMatrix.reset_index(inplace = True)
CourseMatrix = CourseMatrix.pivot(columns="TournamentID",index = "PlayerID",values="RoundShots").fillna(0)
CoursesList = CourseMatrix.columns
# COMMAND ----------
from sklearn.metrics.pairwise import euclidean_distances, cosine_distances
from scipy.cluster.hierarchy import dendrogram, linkage
# COMMAND ----------
dist = pd.DataFrame(euclidean_distances(CourseMatrix.transpose()))
dist.columns = CoursesList
dist.index = CoursesList
# COMMAND ----------
z = linkage(CourseMatrix.transpose(), metric = 'euclidean')
dendrogram(z, leaf_rotation = 90, color_threshold = 1250, labels= dist.index)
plt.show()
# COMMAND ----------
z = linkage(CourseMatrix.transpose(), metric = 'cosine')
dendrogram(z, leaf_rotation = 90, color_threshold = 0.2, labels= dist.index, orientation = 'right')
plt.show()
# COMMAND ----------
# now lets try will fill
CourseMatrixMean = roundsDf.groupby(["PlayerID","TournamentID"]).agg({"RoundShots": "sum"})
CourseMatrixMean.reset_index(inplace = True)
CourseMatrixMean = CourseMatrixMean.pivot(columns="TournamentID",index = "PlayerID",values="RoundShots")
CourseMatrixMean.fillna(CourseMatrixMean.mean(), inplace = True)
# COMMAND ----------
z = linkage(CourseMatrixMean.transpose(), metric = 'euclidean')
dendrogram(z, leaf_rotation = 90, color_threshold = 300, labels= dist.index)
plt.show()
# COMMAND ----------
z = linkage(CourseMatrixMean.transpose(), metric = 'cosine')
plt.figure()
dendrogram(z, leaf_rotation = 90, color_threshold = 0.01, labels= dist.index)
plt.xlabel("Tournament ID")
plt.show()
# COMMAND ----------
SimilarTournaments = Tournaments.loc[(Tournaments["TournamentID"] == 452) | (Tournaments["TournamentID"] == 410 )| (Tournaments["TournamentID"] == 420 )| (Tournaments["TournamentID"] == 447)]
SimilarTournaments["Name"]
# the 4 tournaments that are grouped the closest together are BMW Championship, Olympic Men's Golf Competition, Sentry Tournament of Champions, The ZOZO CHAMPIONSHIP
# COMMAND ----------
playerCounts = roundsDf.groupby("TournamentID").agg({"PlayerID":"nunique"})
#playerCounts
playerCounts.head()
# COMMAND ----------
# Its grouping together those tournaments that dont have cuts. Hense the values are all closer and they are being compared as matching tournaments. Lets try and remove players that missed the cut
CourseMatrixCuts = roundsDf.groupby(["PlayerID","TournamentID"]).agg({"RoundShots": "sum"})
CourseMatrixCuts.reset_index(inplace = True)
CourseMatrixCuts = CourseMatrixCuts.loc[CourseMatrixCuts["RoundShots"] >= 230]
CourseMatrixCuts = CourseMatrixCuts.pivot(columns="TournamentID",index = "PlayerID",values="RoundShots")
CourseMatrixCuts.fillna(CourseMatrixCuts.mean(), inplace = True)
# COMMAND ----------
z = linkage(CourseMatrixCuts.transpose(), metric = 'cosine')
plt.figure(figsize = [10,10])
dendrogram(z, leaf_rotation = 90, color_threshold = 0.00004, labels= dist.index)
plt.xlabel("Tournament ID")
plt.show()
# COMMAND ----------
z = linkage(CourseMatrixCuts.transpose(), metric = 'euclidean')
plt.figure(figsize = [10,10])
#dendrogram(z, leaf_rotation = 90, color_threshold = 50, labels= dist.index)
dendrogram(z, leaf_rotation = 90, color_threshold = 45, labels= dist.index)
plt.xlabel("Tournament ID")
plt.ylim(37,68)
plt.show()
#now we get some real groups
# COMMAND ----------
# Player performance
PlayerPMatrix = roundsDf.groupby(["PLAYER NAME","TournamentID"]).agg({"RoundShots": "sum"})
PlayerPMatrix.reset_index(inplace = True)
PlayerPMatrix = PlayerPMatrix.loc[PlayerPMatrix["RoundShots"] >= 230]
PlayerPMatrix = PlayerPMatrix.pivot(columns="<NAME>",index = "TournamentID",values="RoundShots")
PlayerPMatrix.fillna(PlayerPMatrix.mean(), inplace = True)
# COMMAND ----------
#I split this into 3 to highlight the close relationships
pLabels = PlayerPMatrix.columns
z = linkage(PlayerPMatrix.transpose(), metric = 'euclidean')
plt.figure(figsize = [10,10])
#dendrogram(z, leaf_rotation = 90, color_threshold = 50, labels= dist.index)
dendrogram(z, leaf_rotation = 90, color_threshold = 15, labels= pLabels)
plt.xlabel("Player ID")
#plt.ylim(8, 17.)
#plt.xlim(1650,1850)
plt.show()
# COMMAND ----------
pLabels = PlayerPMatrix.columns
z = linkage(PlayerPMatrix.transpose(), metric = 'euclidean')
plt.figure(figsize = [10,10])
#dendrogram(z, leaf_rotation = 90, color_threshold = 50, labels= dist.index)
dendrogram(z, leaf_rotation = 90, color_threshold = 15, labels= pLabels)
plt.xlabel("Player Name")
plt.ylabel("Euclidian Distance")
plt.title("Player Euclidian Distances")
plt.ylim(8, 17.)
plt.xlim(1650,1850)
plt.xticks(fontsize = 10, rotation = 45, ha = "right")
plt.show()
# COMMAND ----------
plt.figure(figsize = [10,10])
#dendrogram(z, leaf_rotation = 90, color_threshold = 50, labels= dist.index)
dendrogram(z, leaf_rotation = 90, color_threshold = 15, labels= pLabels)
plt.xlabel("Player ID")
plt.ylim(2.5, 6)
plt.xlim(1200,1225)
plt.show()
# COMMAND ----------
pLabels = PlayerPMatrix.columns
z = linkage(PlayerPMatrix.transpose(), metric = 'cosine')
plt.figure(figsize = [10,10])
#dendrogram(z, leaf_rotation = 90, color_threshold = 50, labels= dist.index)
dendrogram(z, leaf_rotation = 90, color_threshold = 0.00001, labels= pLabels)
plt.xlabel("Player ID")
plt.ylim(0, 0.0000175)
plt.xlim(1675,1830)
plt.show()
# COMMAND ----------
# Player Statistics
playerStats = pd.read_csv("/dbfs/FileStore/karbide/PlayerStats.txt")
playerStats.drop(["Unnamed: 0"], axis = 1, inplace = True)
# COMMAND ----------
playerStats.columns
# COMMAND ----------
playerStats.describe()
# COMMAND ----------
# Im going to create 2 visuals for each stat category
# Greens in Regulation
playerStats["GIR_PCT_OVERALL"].hist(bins = 20)
# COMMAND ----------
playerStatsBunker = playerStats[["PLAYER NAME", "GIR_PCT_FAIRWAY_BUNKER"]]
playerStatsBunker.sort_values(["GIR_PCT_FAIRWAY_BUNKER"], inplace = True)
playerStatsBunkerTop = playerStatsBunker[0:10]
playerStatsBunkerBottom = playerStatsBunker[-10:]
playerStatsBunker = pd.concat([playerStatsBunkerTop,playerStatsBunkerBottom])
# COMMAND ----------
#best and worst performers out of the bunker
%matplotlib inline
fig, ax = plt.subplots(figsize=(5, 8))
ax.barh(playerStatsBunker["PLAYER NAME"],playerStatsBunker["GIR_PCT_FAIRWAY_BUNKER"], color = ["Red","Red","Red","Red","Red","Red","Red","Red","Red","Red","Green","Green","Green","Green","Green","Green","Green","Green","Green","Green"])
#Design
plt.autoscale(enable=True, axis='y', tight=False)
plt.xlabel("Greens in Regulation % (fairway bunker)")
plt.ylabel("Player")
plt.title("Top and Bottom 10 Players from Fairway Bunker")
plt.grid(axis='x')
display(fig)
# COMMAND ----------
import seaborn as sns
GIRSTATS = playerStats[['GIR_PCT_FAIRWAY_BUNKER', 'GIR_PCT_FAIRWAY',
'GIR_PCT_OVERALL', 'GIR_PCT_OVER_100', 'GIR_PCT_OVER_200',
'GIR_PCT_UNDER_100', 'GREEN_PCT_SCRAMBLE_SAND',
'GREEN_PCT_SCRAMBLE_ROUGH']]
GIRcorr = GIRSTATS.corr()
GIRcorr
# COMMAND ----------
sns.heatmap(abs(GIRcorr),
xticklabels = GIRcorr.columns,
yticklabels = GIRcorr.columns,
annot = True,
cmap = "jet")
# COMMAND ----------
# Tee Shots
playerStats["TEE_AVG_DRIVING_DISTANCE"].hist(bins = 15)
# COMMAND ----------
playerStatsTee = playerStats[["PLAYER NAME", "TEE_AVG_BALL_SPEED"]]
playerStatsTee.sort_values(["TEE_AVG_BALL_SPEED"], inplace = True)
playerStatsTeeTop = playerStatsTee[0:10]
playerStatsTeeBottom = playerStatsTee[-10:]
playerStatsTee = pd.concat([playerStatsTeeTop,playerStatsTeeBottom])
# COMMAND ----------
# MAGIC %matplotlib inline
# MAGIC fig, ax = plt.subplots(figsize=(5, 8))
# MAGIC ax.barh(playerStatsTee["PLAYER NAME"],playerStatsTee["TEE_AVG_BALL_SPEED"], color = ["Red","Red","Red","Red","Red","Red","Red","Red","Red","Red","Green","Green","Green","Green","Green","Green","Green","Green","Green","Green"])
# MAGIC
# MAGIC #Design
# MAGIC plt.autoscale(enable=True, axis='y', tight=False)
# MAGIC plt.xlabel("Ball Velocity (mph)")
# MAGIC plt.ylabel("Player")
# MAGIC plt.title("Top and Bottom 10 Players Tee Ball Velocity")
# MAGIC plt.grid(axis='x')
# MAGIC
# MAGIC display(fig)
# COMMAND ----------
# Are the tee off stats coorelated
playerTeeStats = playerStats[['TEE_AVG_BALL_SPEED',
'TEE_AVG_DRIVING_DISTANCE', 'TEE_DRIVING_ACCURACY_PCT',
'TEE_AVG_LAUNCH_ANGLE', 'TEE_AVG_LEFT_ROUGH_TENDENCY_PCT',
'TEE_AVG_RIGHT_ROUGH_TENDENCY_PCT', 'TEE_AVG_SPIN_RATE']]
# COMMAND ----------
Teecorr = playerTeeStats.corr()
abs(Teecorr)
# COMMAND ----------
sns.heatmap(abs(Teecorr),
xticklabels = Teecorr.columns,
yticklabels = Teecorr.columns,
annot = True,
cmap = "jet")
# COMMAND ----------
# Putting
plt.figure(figsize = (8,6))
plt.hist(playerStats["PUTTING_AVG_ONE_PUTTS"], label = "One Putts/round")
plt.hist(playerStats["PUTTING_AVG_TWO_PUTTS"], label = "Two Putts/round")
plt.xlabel("Putts (per Round)")
plt.ylabel("Frequency")
plt.title("Comparing # of One and Two Putts")
plt.legend(loc = "upper right")
# COMMAND ----------
playerStatsPutt = playerStats[["PLAYER NAME", "PUTTING_AVG_PUTTS"]]
playerStatsPutt.sort_values(["PUTTING_AVG_PUTTS"], inplace = True)
playerStatsPuttTop = playerStatsPutt[0:10]
playerStatsPuttBottom = playerStatsPutt[-10:]
playerStatsPutt = pd.concat([playerStatsPuttTop,playerStatsPuttBottom])
# COMMAND ----------
# MAGIC %matplotlib inline
# MAGIC fig, ax = plt.subplots(figsize=(5, 8))
# MAGIC ax.barh(playerStatsPutt["PLAYER NAME"],playerStatsPutt["PUTTING_AVG_PUTTS"], color = ["Green","Green","Green","Green","Green","Green","Green","Green","Green","Green","Red","Red","Red","Red","Red","Red","Red","Red","Red","Red"])
# MAGIC
# MAGIC #Design
# MAGIC plt.autoscale(enable=True, axis='y', tight=False)
# MAGIC plt.xlabel("Putts (per Round)")
# MAGIC plt.ylabel("Player")
# MAGIC plt.title("Top and Bottom 10 Players Putts per Round")
# MAGIC plt.grid(axis='x')
# MAGIC
# MAGIC display(fig)
# COMMAND ----------
playerPuttStats = playerStats[[ 'PUTTING_AVG_ONE_PUTTS', 'PUTTING_AVG_TWO_PUTTS', 'PUTTING_AVG_PUTTS',
'PUTTING_AVG_DIST_BIRDIE_INCH']]
# COMMAND ----------
Puttcorr = playerPuttStats.corr()
abs(Puttcorr)
# COMMAND ----------
sns.heatmap(abs(Puttcorr),
xticklabels = Puttcorr.columns,
yticklabels = Puttcorr.columns,
annot = True,
cmap = "jet")
# COMMAND ----------
# MAGIC %md
# MAGIC # Make my own stats!
# MAGIC
# MAGIC **I want to make 5 stats using the hole dataframe:**
# MAGIC
# MAGIC *Average Par 3,4,and 5 Scores, Holes/Birdie, and Holes/Bogey*
# COMMAND ----------
#Average scores
AverageScores = holesDf.groupby(["Player_ID","Par"]).agg({"Hole_ScoreNum" : "mean"})
AverageScores.reset_index(inplace = True)
AverageScorePar3 = AverageScores.loc[AverageScores["Par"] == 3]
AverageScorePar4 = AverageScores.loc[AverageScores["Par"] == 4]
AverageScorePar5 = AverageScores.loc[AverageScores["Par"] == 5]
AverageScorePar3.columns = ["PlayerID","Par","Par3Average"]
AverageScorePar4.columns = ["PlayerID","Par","Par4Average"]
AverageScorePar5.columns = ["PlayerID","Par","Par5Average"]
AverageScorePar3.drop(["Par"], axis = 1, inplace = True)
AverageScorePar4.drop(["Par"], axis = 1, inplace = True)
AverageScorePar5.drop(["Par"], axis = 1, inplace = True)
# COMMAND ----------
playerStats = playerStats.merge(AverageScorePar3, how = "left", on = "PlayerID")
playerStats = playerStats.merge(AverageScorePar4, how = "left", on = "PlayerID")
playerStats = playerStats.merge(AverageScorePar5, how = "left", on = "PlayerID")
# COMMAND ----------
def holesPerResult(data,result):
results = data.groupby("Player_ID").agg({result: ["sum", 'count']})
results.reset_index(inplace = True)
results.columns = ["PlayerID","Sum","Total"]
results[f"HolesPer{result}"] = results["Total"] / results["Sum"]
results.drop(["Sum","Total"], axis = 1, inplace = True)
return(results)
# COMMAND ----------
h_birdie = holesPerResult(holesDf,"Birdie")
h_bogey = holesPerResult(holesDf,"Bogey")
# COMMAND ----------
playerStats = playerStats.merge(h_birdie, how = "left", on = "PlayerID")
playerStats = playerStats.merge(h_bogey, how = "left", on = "PlayerID")
# COMMAND ----------
plt.figure(figsize = (8,6))
plt.hist(playerStats["HolesPerBirdie"], alpha = 0.5, label = "Birdie", bins = 20)
plt.hist(playerStats["HolesPerBogey"], alpha = 0.5, label = "Bogey", bins = 20)
plt.xlabel("Holes per Result")
plt.ylabel("Frequency")
plt.title("Holes Per Birdie vs Holes Per Bogey")
plt.legend(loc = "upper right")
# COMMAND ----------
resultStats = playerStats[["Par3Average","Par4Average","Par5Average","HolesPerBirdie","HolesPerBogey"]]
# COMMAND ----------
Resultscorr = resultStats.corr()
abs(Resultscorr)
# COMMAND ----------
sns.heatmap(abs(Resultscorr),
xticklabels = Resultscorr.columns,
yticklabels = Resultscorr.columns,
annot = True,
cmap = "jet")
# COMMAND ----------
#playerStats.to_csv("/dbfs/FileStore/karbide/PlayerStatsComplete.txt")
# COMMAND ----------
playerStats.columns
# COMMAND ----------
# MAGIC %md
# MAGIC **Strokes Gained**
# MAGIC
# MAGIC Strokes gained is the most popular statistic for predicting golf results
# COMMAND ----------
StrokesGained = pd.read_csv("/dbfs/FileStore/karbide/StrokesGainedIDs.txt")
StrokesGained.drop(["Unnamed: 0"], axis =1, inplace = True)
# COMMAND ----------
StrokesGained.corr()
# COMMAND ----------
plt.figure(figsize = (8,6))
plt.hist(StrokesGained["AVERAGE"], bins = 18)
plt.xlabel("SG")
plt.ylabel("Frequency")
plt.title("Average Stroked Gained (per Shot)")
#plt.legend(loc = "upper right")
# COMMAND ----------
SP2 = playerStats[['GREEN_PCT_SCRAMBLE_SAND','GREEN_PCT_SCRAMBLE_ROUGH', 'PlayerID']]
SP1 = SP2.merge(StrokesGained, how = "inner", on = "PlayerID")
SP1 = SP1.sort_values("TOTAL SG:T")
SP1["Inv_RankTee"] = [x for x in range(len(SP1["PlayerID"]))]
SP1 = SP1.sort_values("TOTAL SG:T2G")
SP1["Inv_RankFairway"] = [x for x in range(len(SP1["PlayerID"]))]
SP1 = SP1.sort_values("TOTAL SG:P")
SP1["Inv_RankPutt"] = [x for x in range(len(SP1["PlayerID"]))]
SP1 = SP1.sort_values('GREEN_PCT_SCRAMBLE_SAND')
SP1["Inv_RankSand"] = [x for x in range(len(SP1["PlayerID"]))]
SP1 = SP1.sort_values('GREEN_PCT_SCRAMBLE_ROUGH')
SP1["Inv_RankRough"] = [x for x in range(len(SP1["PlayerID"]))]
# COMMAND ----------
def rankedStat(c,name,data):
data[name] = (data[c]/len(data[c]))*100
return(data)
# COMMAND ----------
SP1 = rankedStat("Inv_RankTee","Driving (SG)",SP1)
SP1 = rankedStat("Inv_RankFairway","Fairway (SG)",SP1)
SP1 = rankedStat("Inv_RankPutt","Putting (SG)",SP1)
SP1 = rankedStat("Inv_RankRough","Rough (Green Scramble %)",SP1)
SP1 = rankedStat("Inv_RankSand","Bunker (Green Scramble %)",SP1)
# COMMAND ----------
SP3 = SP1[["Driving (SG)","Fairway (SG)","Putting (SG)","Rough (Green Scramble %)","Bunker (Green Scramble %)","PLAYER NAME"]]
# COMMAND ----------
from math import pi
# COMMAND ----------
SP3
# COMMAND ----------
values=SP3.loc[17].drop('PLAYER NAME').values.flatten().tolist()
values += values[:1]
values2 =SP3.loc[130].drop('PLAYER NAME').values.flatten().tolist()
values2 += values[:1]
angles = [n / float(5) * 2 * pi for n in range(5)]
angles += angles[:1]
categories = ["Driving","Fairway","Putting","Rough","Bunker"]
# COMMAND ----------
ax = plt.subplot(111, polar=True)
plt.xticks(angles[:-1], categories , color='grey', size=8)
ax.set_rlabel_position(0)
plt.yticks([25,50,75], ["25","50","75"], color="grey", size=7)
plt.ylim(0,100)
ax.plot(angles, values, linewidth=1, linestyle='solid', label = "<NAME>", color = "Green")
ax.fill(angles, values, 'g', alpha=0.1)
ax.plot(angles, values2, linewidth=1, linestyle='solid', label="<NAME>", color = "Orange")
ax.fill(angles, values2, 'r', alpha=0.1)
plt.legend(loc='upper right', bbox_to_anchor=(1.5, 1))
font1 = {'family':'serif','color':'grey','size':7}
plt.title("Player Comparison")
plt.xlabel("*Putting, Driving, and Fairway from Strokes Gained. Rough and Bunker from Green Scramble % ", fontdict = font1)
plt.show()
# COMMAND ----------
| 2.859375
| 3
|
parasite/doc.py
|
SGevorg/parasite
| 9
|
12776017
|
import os
import numpy as np
from glob import glob
from textwrap import wrap
from tabulate import tabulate
from collections import defaultdict
from typing import List, Union, Iterator, Iterable, Tuple, Dict
from typing import TypeVar, Generic
from .applicator import Applicator
T = TypeVar('T', bound='BiText')
class BatchDocs(Generic[T]):
def __init__(self,
docs: Iterable[Tuple[str, T]],
num_docs: int = None):
self.docs = docs
if isinstance(docs, list):
num_docs = len(docs)
self.num_docs = num_docs
def __iter__(self):
return iter(self.docs)
def __len__(self):
return self.num_docs
def __str__(self):
str_repr = ""
prefix: str
doc: T
for prefix, doc in self.docs:
str_repr += f"{prefix}\n{doc}\n\n"
return str_repr
def to_files(self,
output_dir: str,
suffix: str = ''):
prefix: str
doc: T
for prefix, doc in self.docs:
dirname = os.path.dirname(prefix)
basename = os.path.basename(prefix)
path_prefix = os.path.join(output_dir, basename)
doc.to_files(path_prefix, suffix=suffix)
def apply(self,
applicator_type: Union['Applicator', str],
applicator: Union['Applicator', str] = None,
*args,
only_src: bool = False,
only_tgt: bool = False,
progress: str = None,
**kwargs) -> T:
if not isinstance(applicator_type, str):
assert applicator is None
fn = applicator_type
else:
applicator_cls = Applicator.by_name(applicator_type).by_name(applicator)
fn = applicator_cls(*args, **kwargs)
return fn.batch_apply(self,
only_src=only_src,
only_tgt=only_tgt,
progress=progress)
def split(self,
mapping_path: str):
subsets: Dict[str, str] = dict()
with open(mapping_path, 'r') as f:
for line in f:
line = line.rstrip('\n')
prefix, _, subset = line.partition('\t')
subsets[prefix] = subset
docs = list(self.docs)
cls, = set(type(doc) for _, doc in docs)
src_lang, = set(doc.src_lang for _, doc in docs)
tgt_lang, = set(doc.tgt_lang for _, doc in docs)
src_lines: Dict[str, List[str]] = defaultdict(list)
tgt_lines: Dict[str, List[str]] = defaultdict(list)
for prefix, doc in docs:
basename = os.path.basename(prefix)
subset = subsets[basename]
src_lines[subset] += doc.src_lines
tgt_lines[subset] += doc.tgt_lines
merged_docs: List[Tuple[str, T]] = []
for subset in src_lines.keys() | tgt_lines.keys():
doc = cls(src=src_lines[subset],
tgt=tgt_lines[subset],
src_lang=src_lang,
tgt_lang=tgt_lang)
prefix = f'{subset}.{src_lang}-{tgt_lang}.'
merged_docs.append((prefix, doc))
return BatchDocs(merged_docs)
class BiText:
__slots__ = ('src_lang', 'tgt_lang',
'src_lines', 'tgt_lines')
def __init__(self,
src: Union[str, List[str]],
tgt: Union[str, List[str]],
*,
src_lang: str,
tgt_lang: str):
self.src_lang = src_lang
self.tgt_lang = tgt_lang
self.src_lines: List[str]
self.tgt_lines: List[str]
if isinstance(src, str):
self.src_lines = src.split('\n')
else:
self.src_lines = src
if isinstance(tgt, str):
self.tgt_lines = tgt.split('\n')
else:
self.tgt_lines = tgt
@classmethod
def read_lines(cls,
file_path: str) -> Iterator[str]:
with open(file_path, 'r') as f:
for line in f:
line = line.rstrip('\n')
if not line:
continue
yield line
@classmethod
def write_lines(cls,
lines: Iterable[str],
file_path: str):
with open(file_path, 'w') as f:
for line in lines:
f.write(f'{line}\n')
@classmethod
def batch_from_files(cls,
*prefixes: str,
src_lang: str,
tgt_lang: str,
suffix: str = ''):
resolved_prefixes: List[str] = []
for prefix in prefixes:
if not prefix.endswith(f"{src_lang}{suffix}"):
prefix = f"{prefix}{src_lang}{suffix}"
for path in glob(prefix):
suffix_offset = len(src_lang) + len(suffix)
resolved_prefix = path[:-suffix_offset]
resolved_prefixes.append(resolved_prefix)
generator = (
(prefix, cls.from_files(prefix,
src_lang=src_lang,
tgt_lang=tgt_lang,
suffix=suffix))
for prefix in resolved_prefixes
)
return BatchDocs(
generator,
num_docs=len(prefixes)
)
@classmethod
def from_files(cls,
prefix: str,
*,
src_lang: str,
tgt_lang: str,
suffix: str = ''):
src_path = f'{prefix}{src_lang}{suffix}'
tgt_path = f'{prefix}{tgt_lang}{suffix}'
src_lines = list(cls.read_lines(src_path))
tgt_lines = list(cls.read_lines(tgt_path))
return cls(src=src_lines,
tgt=tgt_lines,
src_lang=src_lang,
tgt_lang=tgt_lang)
def to_files(self,
prefix: str,
suffix: str = ''):
src_path = f'{prefix}{self.src_lang}{suffix}'
os.makedirs(os.path.dirname(src_path), exist_ok=True)
tgt_path = f'{prefix}{self.tgt_lang}{suffix}'
os.makedirs(os.path.dirname(tgt_path), exist_ok=True)
self.write_lines(self.src_lines, src_path)
self.write_lines(self.tgt_lines, tgt_path)
def segment(self, segmenter: Union['Applicator', str],
*args,
only_src: bool = False,
only_tgt: bool = False,
**kwargs) -> 'BiText':
if isinstance(segmenter, str):
applicator_cls = Applicator.by_name('segmenter').by_name(segmenter)
applicator = applicator_cls(**kwargs)
else:
assert not kwargs
applicator = segmenter
return applicator(self,
only_src=only_src,
only_tgt=only_tgt)
def encode(self, encoder: Union['Applicator', str],
*args,
**kwargs):
if isinstance(encoder, str):
applicator_cls = Applicator.by_name('encoder').by_name(encoder)
applicator = applicator_cls(**kwargs)
else:
assert not kwargs
applicator = encoder
return applicator(self)
@classmethod
def wrap_row(cls, *cols: str, **kwargs) -> List[str]:
return [
'\n'.join(wrap(col, **kwargs))
for col in cols
]
def __str__(self):
src_lines = self.src_lines
tgt_lines = self.tgt_lines
num_src_lines = len(src_lines)
num_tgt_lines = len(tgt_lines)
num_rows = max(num_src_lines, num_tgt_lines)
src_lines = [''] * (num_rows - num_src_lines) + src_lines
tgt_lines = [''] * (num_rows - num_tgt_lines) + tgt_lines
rows = [
self.wrap_row(src_line, tgt_line)
for src_line, tgt_line
in zip(src_lines, tgt_lines)
]
return tabulate(
rows,
headers=[self.src_lang, self.tgt_lang],
tablefmt='grid',
showindex='always'
)
class AlignedBiText(BiText):
def __init__(self,
src: Union[str, List[str]],
tgt: Union[str, List[str]],
*,
src_lang: str,
tgt_lang: str):
super().__init__(src, tgt, src_lang=src_lang, tgt_lang=tgt_lang)
assert len(self.src_lines) == len(self.tgt_lines)
class EncodedBiText(BiText):
__slots__ = ('src_lang', 'tgt_lang',
'src_lines', 'tgt_lines',
'_src_embeddings', '_tgt_embeddings',
'_src_lines', '_tgt_lines',
'num_src_lines', 'num_tgt_lines',
'num_src_tokens', 'num_tgt_tokens')
def __init__(self,
src: Union[str, List[str]],
tgt: Union[str, List[str]],
*,
src_lang: str,
tgt_lang: str,
# num_src_tokens: int,
# num_tgt_tokens: int,
src_embeddings: np.ndarray,
tgt_embeddings: np.ndarray,
num_src_lines: int = None,
num_tgt_lines: int = None):
if num_src_lines is None:
num_src_lines = len(src)
self.num_src_lines = num_src_lines
if num_tgt_lines is None:
num_tgt_lines = len(tgt)
self.num_tgt_lines = num_tgt_lines
super().__init__(src[:self.num_src_lines],
tgt[:self.num_tgt_lines],
src_lang=src_lang, tgt_lang=tgt_lang)
self._src_embeddings = src_embeddings
self._tgt_embeddings = tgt_embeddings
self._src_lines = src
self._tgt_lines = tgt
# self.num_src_tokens = num_src_tokens
# self.num_tgt_tokens = num_tgt_tokens
@property
def src_embeddings(self):
return self._src_embeddings[:self.num_src_lines]
@property
def tgt_embeddings(self):
return self._tgt_embeddings[:self.num_tgt_lines]
def src_windows_embeddings(self):
return self._src_embeddings
def tgt_windows_embeddings(self):
return self._tgt_embeddings
@property
def src_windows_lines(self):
return self._src_lines
@property
def tgt_windows_lines(self):
return self._tgt_lines
| 2.3125
| 2
|
captain_hook/services/pagerduty/__init__.py
|
brantje/captain_hook
| 1
|
12776018
|
<reponame>brantje/captain_hook
from __future__ import absolute_import
from .pagerduty import PagerdutyService
| 1.109375
| 1
|
dashboard/views.py
|
yahyasaadi/jirani
| 0
|
12776019
|
<reponame>yahyasaadi/jirani
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .models import Post, Neighborhood, Business, Contact
from .forms import HoodForm, PostForm
# Create your views here.
@login_required
def home(request):
hood = Neighborhood.objects.filter(user=request.user)
posts = Post.objects.all().order_by('-date_posted')
context ={
'posts':posts,
'hood':hood,
}
return render(request,'dashboard/home.html',context)
@login_required
def hoods(request):
hoods = Neighborhood.objects.all()
context ={
'hoods':hoods,
}
return render(request,'dashboard/hoods.html',context)
# Creating a hood
@login_required
def add_hood(request):
if request.method == 'POST':
hood_name = request.POST['hood_name']
hood_location = request.POST['hood_location']
new_hood = Neighborhood(hood_name=hood_name, hood_location=hood_location, user=request.user)
# new_form = HoodForm()
hood_form = HoodForm(request.POST, instance=new_hood)
if hood_form.is_valid():
hood_form.save()
messages.success(request, f'Your neighborhood has been added.')
return redirect('home')
else:
hood_form = HoodForm(instance=request.user)
return render(request, 'dashboard/add_hood.html', context={'hood_form':hood_form})
@login_required
def post(request, hood_id):
if request.method == 'POST':
title = request.POST['title']
post = request.POST['post']
new_post = Post(title=title, post=post, user=request.user, hood_id=hood_id)
post_form = PostForm(request.POST, instance=new_post)
if post_form.is_valid():
post_form.save()
messages.success(request, f'Your post has been created!')
return redirect('home')
else:
post_form = PostForm(instance=request.user)
return render(request, 'dashboard/new_post.html', context={'post_form':post_form})
# Hood Detail View
@login_required
def hood_detail(request,id):
hood = get_object_or_404(Neighborhood,id=id)
businesses = Business.objects.filter(hood=hood)
contacts = Contact.objects.filter(hood=hood)
# posts = Post.objects.filter(hood_id=id)
context = {
'hood':hood,
'businesses': businesses,
'contacts':contacts
}
return render(request,'dashboard/hood_detail.html', context)
# Individual Hood Posts
@login_required
def hood_posts(request,id):
posts = Post.objects.filter(hood=id).order_by('-date_posted')
return render(request, 'dashboard/hood_posts.html', {'posts':posts})
| 2.125
| 2
|
abstract-factory-pattern/motorcycle/MotorcycleImpl.py
|
lcarnevale/software-pattern-python
| 0
|
12776020
|
from .Motorcycle import Motorcycle
class MotorcycleImpl(Motorcycle):
def useful_function_b(self) -> str:
return "The result of implementing Motorcycle."
| 2.703125
| 3
|
kisn_pylab/kilosort.py
|
Whitlock-Group/KISN-PyLab
| 0
|
12776021
|
# -*- coding: utf-8 -*-
"""
@author: bartulem
Run Kilosort2 through Python.
As it stands (spring/summer 2020), to use Kilosort2 one still requires Matlab. To ensure it works,
one needs a specific combination of Matlab, the GPU driver version and CUDA compiler files.
On the lab computer, I set it up to work on Matlab R2019b, driver version 10.2. (GeForce RTX 2080 Ti)
and v10.1 CUDA. !!! NB: a different Matlab or driver version would require different CUDA files !!!
Additionally, since I don't change the config file from session to session, I wrote the script
below to run Matlab code through Python, such that the whole processing pipeline would remain Pythonic.
Apart from Matlab, in order for this to run, you need to install the "matlab engine"; further instructions
can be found here: https://www.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html
However, if you need to modify the config file, you either need to change the code below accordingly
or go to Matlab and run Kilosort2 the old school way.
"""
import time
import matlab.engine
import os
import sys
def run_kilosort(file_dir, kilosort2_dir):
# check that the data directory is there
if not os.path.exists(file_dir):
print('Could not find data directory {}, try again.'.format(file_dir))
sys.exit()
# check that the Kilosort2 directory is there
if not os.path.exists(kilosort2_dir):
print('Could not find Kilosort directory {}, try again.'.format(kilosort2_dir))
sys.exit()
print('Kilosort2 to be run on file: {}.'.format(file_dir))
# run Kilosort2
print('Running Kilosort2, please be patient - this could take >1 hour.')
start_time = time.time()
eng = matlab.engine.start_matlab()
eng.cd(kilosort2_dir, nargout=0)
eng.ls(nargout=0)
eng.master_kilosort(file_dir, nargout=0)
eng.quit()
print('Finished! Running Kilosort2 took {:.2f} minutes.\n'.format((time.time() - start_time) / 60))
| 3
| 3
|
src/3_pd_model_gradient_boosting.py
|
pegodk/lending_club
| 0
|
12776022
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier
from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score
from src.utils import calc_annual_return_vec, print_test_results
from config import basedir
if __name__ == "__main__":
# Read the datasets
train = pd.read_csv(os.path.join(basedir, 'data', 'processed', 'PD_train_continuous.csv'), sep=";")
test = pd.read_csv(os.path.join(basedir, 'data', 'processed', 'PD_test_continuous.csv'), sep=";")
X_train = np.array(train.drop(columns="good_bad"))
y_train = np.array(train["good_bad"])
X_test = np.array(test.drop(columns="good_bad"))
y_test = np.array(test["good_bad"])
print('Length of training set:', len(y_train))
print('Length of testing set: ', len(y_test))
####################################################################################################################
###################################### Random Forest Classification ######################################
####################################################################################################################
reg = GradientBoostingClassifier()
reg.fit(X_train, y_train)
y_train_predict = np.round(reg.predict(X_train), 2)
y_test_predict = np.round(reg.predict(X_test), 2)
y_hat_test = reg.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_hat_test))
y_hat_test_proba = reg.predict_proba(X_test)[:][:, 1]
predictions = pd.concat([pd.DataFrame(y_test), pd.DataFrame(y_hat_test_proba)], axis=1)
predictions.columns = ["y_test", "y_hat_test_proba"]
fpr, tpr, thresholds = roc_curve(y_test, y_hat_test_proba)
auc = roc_auc_score(y_test, y_hat_test_proba)
plt.figure()
plt.plot(fpr, tpr)
plt.plot(fpr, fpr, linestyle="--", color="k")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.title(f"ROC curve (AUC = {np.round(auc, 2)})")
plt.savefig('../results/PD_GradientBoosting_model_auc.png')
plt.savefig(os.path.join(basedir, 'results', 'roc', 'PD_GradientBoosting.png'))
plt.show()
scores = mean_absolute_error(y_test_predict, y_test)
print('Mean Abs Error: {:.2f}'.format(scores))
####################################################################################################################
########################################### Feature Importance ###########################################
####################################################################################################################
print_FeatureImportance = False
if print_FeatureImportance:
importances = reg.feature_importances_
std = np.std([tree.feature_importances_ for tree in reg.estimators_], axis=0)
indices = np.flip(np.argsort(importances), axis=0)
xaxis = np.linspace(0, len(indices) - 1, len(indices))
names = []
for idx in indices:
names.append(train.columns[idx])
ax = plt.figure()
plt.title("Feature Importance")
plt.bar(xaxis, importances[indices] * 100, color="r", yerr=std[indices] * 100, align="center")
plt.xticks(xaxis, names, rotation=90)
plt.ylabel('%')
plt.tight_layout()
plt.savefig(os.path.join(basedir, 'results', 'roc', 'PD_GradientBoosting_FeatureImportance.png'))
####################################################################################################################
####################################### Evaluating Output Results ########################################
####################################################################################################################
print_results = False
if print_results:
idx = y_test_predict > 15.0
print_test_results(f"Yield (15% < predict):", test[idx])
idx = np.logical_and(y_test_predict > 10.0, y_test_predict < 15.0)
print_test_results(f"Yield (10% < predict < 15%):", test[idx])
idx = np.logical_and(y_test_predict > 5.0, y_test_predict < 10.0)
print_test_results(f"Yield (5% < predict < 10%):", test[idx])
idx = np.logical_and(y_test_predict > 0.0, y_test_predict < 5.0)
print_test_results(f"Yield (0% < predict < 5%):", test[idx])
idx = np.logical_and(y_test_predict > -10.0, y_test_predict < 0.0)
print_test_results(f"Yield (-10% < predict < 0%):", test[idx])
idx = np.logical_and(y_test_predict > -20.0, y_test_predict < -10.0)
print_test_results(f"Yield (-20% < predict < -10%):", test[idx])
idx = y_test_predict < -20.0
print_test_results(f"Yield (-20% > predict):", test[idx])
plt.show(block=True)
| 2.734375
| 3
|
rlutils/pytorch/functional.py
|
vermouth1992/rlutils
| 0
|
12776023
|
import numpy as np
import torch
import torch.nn as nn
def soft_update(target: nn.Module, source: nn.Module, tau):
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target: nn.Module, source: nn.Module):
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def compute_target_value(reward, gamma, done, next_q):
q_target = reward + gamma * (1.0 - done) * next_q
return q_target
def to_numpy_or_python_type(tensors):
"""Converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
"""
def _to_single_numpy_or_python_type(t):
if isinstance(t, torch.Tensor):
x = t.detach().cpu().numpy()
return x.item() if np.ndim(x) == 0 else x
return t # Don't turn ragged or sparse tensors to NumPy.
import tensorflow as tf
return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors)
| 2.984375
| 3
|
projects/zfssa_projects.py
|
aldenso/zfssa-scripts
| 1
|
12776024
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @CreateTime: Jun 18, 2017 1:13 PM
# @Author: <NAME>
# @Contact: <EMAIL>
# @Last Modified By: <NAME>
# @Last Modified Time: Jun 18, 2017 3:45 PM
# @Description: Modify Here, Please
from __future__ import print_function, division
import re
import json
import csv
from datetime import datetime
import ast
import argparse
import logging
import requests
from requests.exceptions import HTTPError, ConnectionError
from urllib3.exceptions import InsecureRequestWarning
import yaml
# to disable warning
# InsecureRequestWarning: Unverified HTTPS request is being made.
# Adding certificate verification is strongly advised. See:
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
requests.urllib3.disable_warnings(InsecureRequestWarning)
START = datetime.now()
ZFSURL = "" # API URL (https://example:215/api)
ZAUTH = () # API Authentication tuple (username, password)
HEADER = {"Content-Type": "application/json"}
LOGFILE = "projects_output.log"
def create_parser():
"""Get Arguments"""
parser = argparse.ArgumentParser(
description="Script to handle projects in ZFS Storage Appliance")
parser.add_argument(
"-s", "--server", type=str, help="Server config file (YAML)", required=True)
parser.add_argument(
"-f", "--file", type=str, help="projects file (CSV)", required=True)
parser.add_argument(
"-p", "--progress", action="store_true", help="progress bar and logging to file",
required=False)
group = parser.add_mutually_exclusive_group()
group.add_argument("-c", "--create", action="store_true",
help="Create projects specified in csv file")
group.add_argument("-d", "--delete", action="store_true",
help="Delete projects specified in csv file")
group.add_argument("-l", "--list", action="store_true",
help="List/Check projects specified in csv file")
return parser
def read_project_file(filename):
"""Read projects csv file and return the list."""
projectlist = []
with open(filename, 'r') as cvsfile:
filereader = csv.reader(cvsfile, delimiter=',')
for row in filereader:
projectlist.append(row)
del projectlist[0]
return projectlist
def read_yaml_file(configfile):
"""Read config file and return credentials in json."""
config = {}
with open(configfile, 'r') as configuration:
try:
config = yaml.load(configuration)
except yaml.YAMLError as error:
print("Error in configuration file: {}").format(error)
return config
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def response_size(nbytes):
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def create_project(fileline):
"""Create Project from csv file. (err, msg)"""
if len(fileline) != 20:
return True, "CREATE - FAIL - Error in line {} It needs to be 20 columns long"\
.format(fileline)
pool, project, mountpoint, quota, reservation, compression, dedup, logbias, nodestroy,\
recordsize, readonly, atime, default_sparse, default_user, default_group, default_permissions,\
default_volblocksize, default_volsize, sharenfs, sharesmb = fileline
fullurl = ZFSURL + "/storage/v1/pools/{}/projects"\
.format(pool)
# converted_size = get_real_size(size, size_unit)
# real_blocksize = get_real_blocksize(blocksize)
try:
data = {"name": project,
"mountpoint": mountpoint,
"quota": quota,
"reservation": reservation,
"compression": compression,
"dedup": dedup,
"logbias": logbias,
"nodestroy": nodestroy,
"recordsize": recordsize,
"readonly": readonly,
"atime": atime,
"default_sparse": default_sparse,
"default_user": default_user,
"default_group": default_group,
"default_permissions": default_permissions,
"default_volblocksize": default_volblocksize,
"default_volsize": default_volsize,
"sharenfs": sharenfs,
"sharesmb": sharesmb}
if quota == 'None' and reservation == 'None':
del data["quota"]
del data["reservation"]
elif quota == 'None':
del data["quota"]
elif reservation == 'None':
del data["reservation"]
req = requests.post(fullurl, data=json.dumps(data),
auth=ZAUTH, verify=False, headers=HEADER)
j = json.loads(req.text)
if 'fault' in j:
if 'message' in j['fault']:
return True, "CREATE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, j['fault']['message'])
req.close()
req.raise_for_status()
return False, "CREATE - SUCCESS - project '{}' pool '{}'".format(project, pool)
except HTTPError as error:
if error.response.status_code == 401:
exit("CREATE - FAIL - project '{}' pool '{}' - Error {}".format(project, pool,
error.message))
else:
return True, "CREATE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
except ConnectionError as error:
return True, "CREATE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
def delete_project(fileline):
"""Delete project specified in csv file (err, msg)"""
if len(fileline) != 2:
return True, "DELETE - FAIL - Error in line {} It needs to be 2 columns long"\
.format(fileline)
pool, project = fileline
fullurl = ZFSURL + "/storage/v1/pools/{}/projects/{}".format(pool, project)
try:
req = requests.delete(fullurl, auth=ZAUTH, verify=False, headers=HEADER)
req.close()
req.raise_for_status()
return False, "DELETE - SUCCESS - project '{}' pool '{}'".format(project, pool)
except HTTPError as error:
if error.response.status_code == 401:
exit("DELETE - FAIL - project '{}' pool '{}' - Error {}".format(project, pool,
error.message))
else:
return True, "DELETE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
except ConnectionError as error:
return True, "DELETE - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
def list_projects(fileline):
"""List/Show projects specified in csv file (err, msg)"""
pool = project = None
if len(fileline) == 2:
pool, project = fileline
elif len(fileline) == 20:
pool, project, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = fileline
else:
return True, "LIST - FAIL - Error in line {} It needs to be 2 or 20 columns long"\
.format(fileline)
fullurl = ZFSURL + "/storage/v1/pools/{}/projects/{}".format(pool, project)
try:
req = requests.get(fullurl, auth=ZAUTH, verify=False, headers=HEADER)
j = json.loads(req.text)
req.close()
req.raise_for_status()
return False, "LIST - PRESENT - project '{}' pool '{}' mountpoint '{}' quota '{}' "\
"reservation '{}' compression '{}' dedup '{}' logbias '{}' nodestroy '{}' "\
"recordsize '{}' readonly '{}' atime '{}' def_sparse '{}' def_user '{}' "\
"def_group '{}' def_perms '{}' def_volblocksize '{}' def_volsize '{}' "\
"sharenfs '{}' sharesmb '{}'"\
.format(j["project"]["name"],
j["project"]["pool"],
j["project"]["mountpoint"],
response_size(j["project"]["quota"]),
response_size(j["project"]["reservation"]),
j["project"]["compression"],
j["project"]["dedup"],
j["project"]["logbias"],
j["project"]["nodestroy"],
response_size(j["project"]["recordsize"]),
j["project"]["readonly"],
j["project"]["atime"],
j["project"]["default_sparse"],
j["project"]["default_user"],
j["project"]["default_group"],
j["project"]["default_permissions"],
response_size(j["project"]["default_volblocksize"]),
response_size(j["project"]["default_volsize"]),
j["project"]["sharenfs"],
j["project"]["sharesmb"])
except HTTPError as error:
if error.response.status_code == 401:
exit("LIST - FAIL - project '{}', pool '{}' - Error {}".format(project, pool,
error.message))
else:
return True, "LIST - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
except ConnectionError as error:
return True, "LIST - FAIL - project '{}' pool '{}' - Error {}"\
.format(project, pool, error.message)
def createprogress(count):
"""Return Bar class with max size specified"""
progressbar = Bar(message='Processing',
suffix='%(index)d/%(max)d - remain: %(remaining)d'
' - %(percent).1f%% - %(eta)ds',
max=count)
return progressbar
def createlogger():
"""Return logger"""
# create logger with 'progress bar'
logger = logging.getLogger('projects')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(LOGFILE)
# create formatter and add it to the handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handler to logger
logger.addHandler(fh)
return logger
def main(args):
"""Run all projects actions"""
csvfile = args.file
listprojects = args.list
createproject = args.create
deleteproject = args.delete
projectlistfromfile = read_project_file(csvfile)
configfile = args.server
config = read_yaml_file(configfile)
global ZFSURL, ZAUTH
ZFSURL = "https://{}:215/api".format(config['ip'])
ZAUTH = (config['username'], config['password'])
if createproject:
if args.progress:
progbar = createprogress(len(projectlistfromfile))
logger = createlogger()
for entry in projectlistfromfile:
err, msg = create_project(entry)
if err:
logger.warn(msg)
else:
logger.info(msg)
progbar.next()
progbar.finish()
else:
print("#" * 79)
print("Creating projects")
print("#" * 79)
for entry in projectlistfromfile:
print(create_project(entry)[1])
print("=" * 79)
elif deleteproject:
if args.progress:
progbar = createprogress(len(projectlistfromfile))
logger = createlogger()
for entry in projectlistfromfile:
err, msg = delete_project(entry)
if err:
logger.warn(msg)
else:
logger.info(msg)
progbar.next()
progbar.finish()
else:
print("#" * 79)
print("Deleting projects")
print("#" * 79)
for entry in projectlistfromfile:
print(delete_project(entry)[1])
print("=" * 79)
elif listprojects:
if args.progress:
progbar = createprogress(len(projectlistfromfile))
logger = createlogger()
for entry in projectlistfromfile:
err, msg = list_projects(entry)
if err:
logger.warn(msg)
else:
logger.info(msg)
progbar.next()
progbar.finish()
else:
print("#" * 79)
print("Listing projects")
print("#" * 79)
for entry in projectlistfromfile:
print(list_projects(entry)[1])
print("=" * 79)
else:
print("#" * 79)
print("You need to specify an option (--list, --create, --delete)")
print("#" * 79)
delta = datetime.now() - START
print("Finished in {} seconds".format(delta.seconds))
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
if args.progress:
try:
from progress.bar import Bar
except ImportError as err:
print("You need to install progress: pip install progress - Error: {}".format(err))
exit(1)
main(args)
| 2.28125
| 2
|
plugins/sort_by_article_count/__init__.py
|
julianespinel/website
| 0
|
12776025
|
from pelican import signals
from . import count
def add_filter(pelican):
"""Add count_elements filter to Pelican."""
pelican.env.filters.update(
{'sort_by_article_count': count.sort_by_article_count})
def register():
"""Plugin registration."""
signals.generator_init.connect(add_filter)
| 1.867188
| 2
|
event_evaluator.py
|
aria-jpl/coseismic_usgs_neic_evaluator
| 0
|
12776026
|
#!/usr/bin/env python
'''
Takes in the usgs neic event object, then determines if it
is relevant above the input filter criteria. If it passes
this filter, an aoi type for the event is created and submitted
to create_aoi
'''
from __future__ import division
from builtins import range
from past.utils import old_div
import os
import re
import json
import argparse
import datetime
import dateutil.parser
import requests
import submit_create_aoi
import submit_slack_notification
import track_displacement_evaluator
import pytz
import math
from shapely import wkt
import geojson
from shapely.geometry import shape, Point, Polygon
from shapely.ops import nearest_points
import constants
def main(event_path, depth_filter=None, mag_filter=None, alertlevel_filter=None, polygon_filter=None, slack_notification=None, water_filter=False, dynamic_threshold=False, create_aoi_version='master', days_pre_event=30, days_post_event=30, distance_from_land=50):
'''runs the filters on the input event. If it passes it generates appropriate metadata and submits the aoi'''
event = get_event(event_path)
print('found event: {0}'.format(event))
# calculate relevant event information such as mag, extent, etc
event_info = calculate_event_info(event)
# determine if the event passes the requisite filters
#if not pass_filters(event_info, depth_filter, mag_filter, alertlevel_filter, polygon_filter, water_filter, dynamic_threshold, distance_from_land):
if not pass_filters(event_info, depth_filter, mag_filter, alertlevel_filter, polygon_filter, water_filter, dynamic_threshold):
print("Event failed to pass filters....not generating AOI.")
return
# call displacement code
event_tracks = track_displacement_evaluator.main(event['location']['coordinates'], event_info['location']['coordinates'])
# submit job for event AOI
#params = build_params(event, event_info, days_pre_event, days_post_event, aoi, False)
# submit the aoi
#submit_create_aoi.main(params, create_aoi_version, 'factotum-job_worker-small', '8', 'create_neic_event_aoi')
for event_track in event_tracks:
# set the end time for the AOITRACKs 5 years into the future so they remain active for long-term analysis
days_post_event = 365 * 5
# process the aoi params
params = build_params(event, event_info, days_pre_event, days_post_event, event_track, True)
print(params)
# submit the aoi
submit_create_aoi.main(params, create_aoi_version, 'factotum-job_worker-small', '8', 'create_neic_event_aoi')
# run slack notification
# mlucas if slack_notification:
# mlucas run_slack_notification(event, slack_notification)
#def pass_filters(event_info, depth_filter, mag_filter, alertlevel_filter, polygon_filter, water_filter, dynamic_threshold, distance_from_land):
def pass_filters(event_info, depth_filter, mag_filter, alertlevel_filter, polygon_filter, water_filter, dynamic_threshold):
'''runs all requisite filters, returning true if it needs to process, false if not'''
# if it's a test, just pass it
if event_info['id'] == 'USGS_NEIC_us1000test':
return True
# run polygon filter
if polygon_filter:
if not run_polygon_filter(event_info, polygon_filter):
print("Event failed polygon filter.")
return False
# run distance filter
# if distance_from_land:
# if not run_distance_filter(event_info, distance_from_land):
# print("Event failed distance filter.")
# return False
# run depth filter
if depth_filter:
if not run_depth_filter(event_info, float(depth_filter)):
print('Event failed depth filter.')
return False
# run water filter
if water_filter:
if not run_water_filter(event_info, float(water_filter)):
print('Event failed water mask filter.')
return False
print('Event passed water mask filter.')
# run dynamic thresholding
# if dynamic_threshold:
# if run_dynamic_threshold(event_info):
# print('event meets dynamic threshold, submitting event.')
# return True
# else:
# print('event does not meet dynamic threshold. not submitting event.')
# return False
if mag_filter: # run magnitude filter
if event_info['mag'] >= mag_filter:
print('Event passed magnitude filter, processing')
return True
else:
print('Event failed magnitude filter, not processing')
return False
if alertlevel_filter: # run alertlevel filter
if alertlevel_reaches(event_info['alert'], alertlevel_filter):
print('Event passes alertlevel filter, processing')
return True
else:
print('Event fails alertlevel filter, not processing.')
return False
print('Event has not been excluded by filters, processing.')
return True
def calculate_event_info(event):
'''builds a dict of relevant event information, such as magnitude, region, etc, returns it as a dict'''
event_id = get_met(event, 'id')
event_mag = float(get_met(event, 'mag'))
event_alertlevel = get_met(event, 'alert')
event_location = get_met(event, 'epicenter')
event_lat = event_location['coordinates'][1]
event_lon = event_location['coordinates'][0]
event_depth = float(event['metadata']['geometry']['coordinates'][2])
# determine event extent
event_geojson = determine_extent(event_lat, event_lon, event_mag)
# call displacement_evaluator here
return {'id': event_id, 'mag': event_mag, 'depth': event_depth, 'alertlevel': event_alertlevel, 'location': event_geojson, 'lat': event_lat, 'lon': event_lon}
def run_water_filter(event_info, amount):
'''returns True if it passes the mask or fails to load/run the mask'''
try:
# lazy loading
import lightweight_water_mask
print("Geojson being processed: {}".format(event_info['location']))
land_area = lightweight_water_mask.get_land_area(event_info['location'])
print("Land area is: {}".format(land_area))
if land_area > amount:
print("Land area of event is {}".format(land_area))
print("Threshold: {}".format(amount))
return True
else:
print("Land area less than {}".format(amount))
except Exception as err:
print('Failed on water masking: {}'.format(err))
return True
return False
def run_depth_filter(event_info, depth_filter):
'''returns True if it passes the mask, False otherwise. True == depth < depth_filter'''
depth = float(event_info['depth'])
if depth >= depth_filter:
return False
return True
def run_distance_filter(event_info, distance_from_land):
''' Returns True if the event epicenter is within the specified distance from land; otherwise, False'''
print("Running distance filter...")
nearest_distance = None
# Read config file that defines region geojson and region-specific params
try:
f = open('/home/ops/verdi/ops/usgs_neic_evaluator/config/regions.json')
except Exception as e:
print(e)
data = json.load(f)
for region in data:
# If a distance_from_land parameter is specified in the region, pull it; if not, use default
print("Evaluating region: ".format(region['region_name']))
region_distance_from_land = region.get('distance_from_land')
if isValid(region_distance_from_land):
print("Distance from land parameter specified within region config; overwriting default value to {}".format(region_distance_from_land))
tmp_distance_from_land = int(region_distance_from_land)
else:
print("Distance from land parameter NOT specified within region config; using default value of {}".format(distance_from_land))
tmp_distance_from_land = distance_from_land
try:
# Create shape objects from region geojson defined in config
s = json.dumps(region['region_geojson'])
p = shape(geojson.loads(s))
polygon = wkt.loads(str(p))
# Create point object from event epicenter
#lng = event_info["event_location"]["coordinates"][1]
#lat = event_info["event_location"]["coordinates"][0]
lng = event_info["lon"]
lat = event_info["lat"]
point = Point(lng, lat)
# If event overlaps with region, no need to calculate distance; event will be processed
if point.within(polygon):
print("Event epicenter is within a defined region. Processing event.")
return True
np1, np2 = nearest_points(polygon, point)
nearest_distance = haversine(np1.y, np1.x, point.y, point.x)
except Exception as e:
print(e)
if nearest_distance <= tmp_distance_from_land:
print("Event will be processed. The distance between the two closest is: {}".format(nearest_distance))
return True
else:
print("Event distance from this region is too great. The distance between the two closest is: {}".format(nearest_distance))
f.close()
return False
def isValid(region_distance_from_land):
''' Make sure the distance from land value is valid '''
try:
if (region_distance_from_land is not None and
region_distance_from_land != "" and
(int(region_distance_from_land, 10) >= 0)):
return True
except:
return False
def haversine(lat1, lon1, lat2, lon2):
'''
Calculate the distance between two points
'''
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
c = 2 * math.asin(math.sqrt(a))
return constants.EARTH_RADIUS * c
def get_coord(lat, lng):
lat_r = math.radians(lat)
lng_r = math.radians(lng)
x = constants.EARTH_RADIUS * math.cos(lat_r) * math.cos(lng_r)
y = constants.EARTH_RADIUS * math.cos(lat_r) * math.sin(lng_r)
return x, y
def run_polygon_filter(event_info, polygon_filter):
'''runs the event through a spatial filter. returns True if it passes, False otherwise.'''
if type(polygon_filter) is str:
polygon_filter = json.loads(polygon_filter)
event_geojson = event_info['location']['coordinates']
if is_overlap(polygon_filter, event_geojson):
return True
return False
def is_overlap(geojson1, geojson2):
'''returns True if there is any overlap between the two geojsons. The geojsons
are just a list of coordinate tuples'''
p1 = Polygon(geojson1)
p2 = Polygon(geojson2)
return p1.intersects(p2)
def run_dynamic_threshold(event_info):
'''runs a series of filters, designed to pick up relevant events'''
event_mag = event_info['mag']
event_alertlevel = event_info['alertlevel']
if event_mag >= 7.0:
return True
if event_mag >= 6.0 and alertlevel_reaches(event_alertlevel, 'yellow'):
return True
if alertlevel_reaches(event_alertlevel, 'red'):
return True
return False
def alertlevel_reaches(event_level, comparison_level):
'''looks to see if the event alert level is at or above the comparison level, returns true if is is
false otherwise'''
if event_level is None:
return False
alert_dict = {'green': 1, 'yellow': 2, 'orange': 3, 'red': 4}
if alert_dict[event_level] < alert_dict[comparison_level]:
return False
return True
def get_event(event_path):
'''loads the event json as a dict'''
event_filename = os.path.basename(event_path)
event_ds_path = os.path.join(event_path, event_filename + '.dataset.json')
event_met_path = os.path.join(event_path, event_filename + '.met.json')
cwd = os.getcwd()
event_object = {}
with open(event_ds_path) as f:
event_object = json.load(f)
with open(event_met_path) as f:
event_object['metadata'] = json.load(f)
return event_object
def get_met(product, key):
if key in list(product.keys()):
return product[key]
if '_source' in list(product.keys()) and key in list(product['_source'].keys()):
return product['_source'][key]
if '_source' in list(product.keys()) and 'metadata' in list(product['_source'].keys()) and key in list(product['_source']['metadata'].keys()):
return product['_source']['metadata'][key]
if 'metadata' in list(product.keys()) and key in list(product['metadata'].keys()):
return product['metadata'][key]
if 'metadata' in list(product.keys()) and 'properties' in list(product['metadata'].keys()) and key in list(product['metadata']['properties'].keys()):
return product['metadata']['properties'][key]
if 'properties' in list(product.keys()) and key in product['properties']:
return product['properties'][key]
return False
def shift(lat, lon, bearing, distance):
R = 6378.1 # Radius of the Earth
bearing = old_div(math.pi * bearing, 180) # convert degrees to radians
lat1 = math.radians(lat) # Current lat point converted to radians
lon1 = math.radians(lon) # Current long point converted to radians
lat2 = math.asin(math.sin(lat1) * math.cos(old_div(distance, R)) +
math.cos(lat1) * math.sin(old_div(distance, R)) * math.cos(bearing))
lon2 = lon1 + math.atan2(math.sin(bearing) * math.sin(old_div(distance, R)) * math.cos(lat1),
math.cos(old_div(distance, R)) - math.sin(lat1) * math.sin(lat2))
lat2 = math.degrees(lat2)
lon2 = math.degrees(lon2)
return [lon2, lat2]
def determine_extent(lat, lon, mag):
lat = float(lat)
lon = float(lon)
mag = float(mag)
distance = (mag - 5.0) / 2.0 * 150
l = list(range(0, 361, 20))
coordinates = []
for b in l:
coords = shift(lat, lon, b, distance)
coordinates.append(coords)
return {"coordinates": [coordinates], "type": "Polygon"}
def build_params(event, event_info, days_pre_event, days_post_event, event_track, isTrack):
'''builds parameters for a job submission from the event, which creates the aoi,
and returns those parameters'''
# loads the config json
current_dir = os.path.dirname(os.path.realpath(__file__))
params_path = os.path.join(current_dir, 'config', 'aoi_params.json')
params = json.load(open(params_path, 'r'))
aoi_name = build_aoi_name(event, event_info, isTrack)
# geojson_polygon = event_info['location']
aoi_event_time = get_met(event, 'starttime')
starttime = determine_time(aoi_event_time, -1 * float(days_pre_event))
eventtime = get_met(event, 'starttime')
endtime = determine_time(aoi_event_time, float(days_post_event))
aoi_image_url = parse_browse_url(event)
event_metadata = build_event_metadata(event, event_info) # builds additional metadata to be displayed
params['starttime'] = starttime
params['eventtime'] = eventtime
params['endtime'] = endtime
params['additional_metadata']['image_url'] = aoi_image_url
params['additional_metadata']['event_metadata'] = event_metadata
if isTrack:
params['name'] = aoi_name + "_" + str(event_track[0])
params['geojson_polygon'] = json.loads(event_track[1])
params['additional_metadata']['event_metadata']['track_number'] = event_track[0]
params['additional_metadata']['event_metadata']['orbit_direction'] = event_track[2]
params['track_number'] = event_track[0]
params['orbit_direction'] = event_track[2]
#params['water_masked_geojson_polygon'] = event_track[3]
params['additional_metadata']['event_metadata']['water_masked_geojson_polygon'] = json.loads(event_track[3])
else:
params['name'] = aoi_name
params['geojson_polygon'] = event_track
params['track_number'] = ""
params['orbit_direction'] = ""
# load account and username from context
context = load_json('_context.json')
params['account'] = context['account']
params['username'] = context['username']
return params
def load_json(file_path):
'''load the file path into a dict and return the dict'''
with open(file_path, 'r') as json_data:
json_dict = json.load(json_data)
json_data.close()
return json_dict
def build_event_metadata(event, event_info):
'''builds info that goes into the aoi met, event_metadata field, that is displayed'''
event_met = {}
event_met['event id'] = event_info['id']
event_met['magnitude'] = event_info['mag']
event_met['depth'] = event_info['depth']
event_met['location'] = get_met(event, 'place')
event_met['latitude'] = event_info['lat']
event_met['longitude'] = event_info['lon']
event_met['label'] = get_met(event, 'title')
try:
event_met['time'] = convert_epoch_time_to_utc(get_met(event, 'time'))
except:
pass
event_met['pager_status'] = event_info['alertlevel']
event_met['tsunami warning'] = get_met(event, 'tsunami')
event_met['usgs information'] = 'https://earthquake.usgs.gov/earthquakes/eventpage/{0}'.format(event_info['id'])
return event_met
def build_aoi_name(event, event_info, isTrack):
'''attempts to build a readable event name'''
if isTrack:
try:
id_str = get_met(event, 'id')
place = get_met(event, 'place')
regex = re.compile(' of (.*)[,]? (.*)')
match = re.search(regex, place)
location_str = '{0}_{1}'.format(match.group(1), match.group(2))
location_str = location_str.replace(',', '')
mag = get_met(event, 'mag')
mag_str = "{0:0.1f}".format(float(mag))
return 'AOITRACK_eq_usgs_neic_pdl_{0}_{1}_{2}'.format(id_str, mag_str, location_str)
except:
return 'AOITRACK_eq_usgs_neic_pdl_{0}'.format(event_info['id'])
else:
try:
id_str = get_met(event, 'id')
place = get_met(event, 'place')
regex = re.compile(' of (.*)[,]? (.*)')
match = re.search(regex, place)
location_str = '{0}_{1}'.format(match.group(1), match.group(2))
location_str = location_str.replace(',', '')
mag = get_met(event, 'mag')
mag_str = "{0:0.1f}".format(float(mag))
return 'AOI_monitoring_{0}_{1}_{2}'.format(id_str, mag_str, location_str)
except:
return 'AOI_monitoring_{0}'.format(event_info['id'])
def convert_epoch_time_to_utc(epoch_timestring):
dt = datetime.datetime.utcfromtimestamp(epoch_timestring).replace(tzinfo=pytz.UTC)
return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] # use microseconds and convert to milli
def determine_time(time_string, offset):
initial_time = dateutil.parser.parse(time_string).replace(tzinfo=pytz.UTC)
final_time = initial_time + datetime.timedelta(days=offset)
return final_time.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]
def parse_browse_url(event):
'''Pull the event detail json from the feed and attempt to extract the shakemap. Return None if fails.'''
try:
url = event['properties']['detail']
session = requests.session()
response = session.get(url)
json_data = json.loads(response.text)
browse_url = json_data['properties']['products']['shakemap'][0]['contents']['download/tvmap.jpg']['url']
return browse_url
except:
print('Failed to parse browse url')
return None
def build_longlabel(event):
estr = get_met(event, 'place') # ex: "69km WSW of Kirakira, Solomon Islands"
regex = re.compile(' of (.*)[,]? (.*)')
match = re.search(regex, estr)
if match:
product_name = '%s %s' % (match.group(1), match.group(2))
else:
product_name = estr
product_name = product_name.replace(' ', '_')
return product_name.replace(',', '')
def run_slack_notification(event, slack_notification):
'''submit slack webhook, requires slack notification key'''
event_vals = get_met(event, 'metadata')
submit_slack_notification.slack_notify(event_vals, slack_notification)
def parser():
'''
Construct a parser to parse arguments
@return argparse parser
'''
parse = argparse.ArgumentParser(description="Run PAGER query with given parameters")
parse.add_argument("-e", "--event_path", required=True, help="path to the event file", dest="event_path")
parse.add_argument("-t", "--depth_filter", required=False, default=None, help="Maximum depth filter in km", dest="depth_filter")
parse.add_argument("-m", "--mag_filter", required=False, default=None, help="Minimum magnitude filter", dest="mag_filter")
parse.add_argument("-a", "--alertlevel_filter", required=False, default=None, help="Minium pager alert level filter", choices=['green', 'yellow', 'orange', 'red'], dest="alertlevel_filter")
parse.add_argument("-p", "--polygon_filter", required=False, default=None, help="Geojson polygon filter", dest="polygon_filter")
parse.add_argument("-s", "--slack_notification", required=False, default=False, help="Key for slack notification, will notify via slack if provided.", dest="slack_notification")
parse.add_argument("-w", "--water_filter", required=False, default=False, help="Water filter. If provided, use minimum number of square kilometers in the aoi required to pass the filter.", dest="water_filter")
parse.add_argument("-d", "--dynamic_threshold", required=False, default=False, action='store_true', help="Flag for whether a dynamic threshold is used. Takes priority over pager & mag filters.", dest="dynamic_threshold")
parse.add_argument("-r", "--create_aoi_version", required=False, default='master', help="Version of create_aoi to submit", dest="create_aoi_version")
parse.add_argument("--days_pre_event", required=False, default=30, help="Days for the AOI to span pre-event", dest="days_pre_event")
parse.add_argument("--days_post_event", required=False, default=30, help="Days for the AOI to span post-event", dest="days_post_event")
parse.add_argument("--distance_from_land", required=False, default=50, help="Distance from land (km)", dest="distance_from_land")
return parse
if __name__ == '__main__':
args = parser().parse_args()
main(event_path=args.event_path, depth_filter=args.depth_filter, mag_filter=args.mag_filter, alertlevel_filter=args.alertlevel_filter, polygon_filter=args.polygon_filter, slack_notification=args.slack_notification, water_filter=args.water_filter, dynamic_threshold=args.dynamic_threshold,
create_aoi_version=args.create_aoi_version, days_pre_event=args.days_pre_event, days_post_event=args.days_post_event, distance_from_land=args.distance_from_land)
| 2.3125
| 2
|
sklearn_pmml_model/linear_model/__init__.py
|
iamDecode/sklearn-pmml-model
| 62
|
12776027
|
"""
The :mod:`sklearn_pmml_model.linear_model` module implements generalized linear models.
"""
# License: BSD 2-Clause
from .implementations import PMMLLinearRegression, PMMLLogisticRegression, PMMLRidge, \
PMMLRidgeClassifier, PMMLLasso, PMMLElasticNet
__all__ = [
'PMMLLinearRegression',
'PMMLLogisticRegression',
'PMMLRidge',
'PMMLRidgeClassifier',
'PMMLLasso',
'PMMLElasticNet'
]
| 1.234375
| 1
|
services/__init__.py
|
S4CH/discord-bot
| 1
|
12776028
|
from .group import GroupMeet
| 1.070313
| 1
|
shapeft/views.py
|
dksivagis/shpescape
| 1
|
12776029
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import time
from django.conf import settings
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404
from django.contrib.gis.geos import fromstr, LineString
from django.contrib.gis.models import SpatialRefSys
from django.contrib.gis.gdal import DataSource, OGRGeometry
from django.utils.datastructures import SortedDict
import simplejson
from shapes.forms import UploadForm
from ft_auth.views import *
from shapeft.models import shapeUpload
#@cache_page(60*5)
def static(request, template):
if not template:
template = "index.html"
return render_to_response(template, RequestContext(request,{}))
def generic_import(request):
"""
accept an uploaded file and create associated shapeUpload obj
"""
token = get_token(request)
if not token:
return HttpResponseRedirect('/auth/FTVerify')
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
form.handle(request.FILES['file_obj'])
create_simplify = request.POST.get('create_simplify', False);
create_centroid = request.POST.get('create_centroid', False);
create_centroid_poly = request.POST.get('create_centroid_poly', False);
#save form info in a model, and run from cron
uids = []
for shapefile in form.shapefiles:
upload = shapeUpload()
upload.auth_token = token
upload.shapefile = shapefile
upload.status = 1
upload.save()
upload.create_simplify = bool(create_simplify)
upload.create_centroid = bool(create_centroid)
upload.create_centroid_poly = bool(create_centroid_poly)
uids.append(upload.uid)
url = '/uploads/%s/' % 'g'.join(uids)
return HttpResponseRedirect(url)
else:
form = UploadForm()
return render_to_response('upload.html', RequestContext(request,{
'form': form}))
def upload_detail(request, upload_ids):
"""
display status of one or more shapeUploads
"""
uids = upload_ids.split('g')
uploads = shapeUpload.objects.filter(uid__in=uids).order_by('id')
#upload = get_object_or_404(shapeUpload, id=upload_id)
return render_to_response('upload_detail.html', RequestContext(request,{
'uploads': uploads}))
def import_from_shape(upload,
start_row=0,
max_rows=200000,
create_int_style_cols=True):
"""
a shapeUpload object
max_rows - any more than this is ignored
centroid - if it's a (multi)polygon, should we also create a geometry_centroid field
"""
upload.status = 2 #set this right away so it doesn't get reprocessed
upload.save()
ds = DataSource(upload.shapefile)
layer = ds[0]
fields = layer.fields
num_features = len(layer)
#set max # of _style features
max_distinct_style_vals = max(min(num_features / 100, 50),10)
print 'there are %d features' % num_features
upload.total_rows = num_features
if not num_features:
print 'no rows, returning'
upload.status = 6
upload.save()
return
rows = []
#get field types
field_map = {
'OFTString':'STRING',
'OFTReal':'NUMBER',
'OFTInteger':'NUMBER',
'OFTDate':'DATETIME'
}
field_types = [field_map[f.__name__] for f in layer.field_types]
field_layers = layer.fields
#insert geometry layers first
field_layers.insert(0,'geometry')
field_types.insert(0,'LOCATION')
field_layers.insert(1,'geometry_vertex_count')
field_types.insert(1,'NUMBER')
if upload.create_simplify:
field_layers.insert(0,'geometry_simplified')
field_types.insert(0,'LOCATION')
field_layers.insert(1,'geometry_simplified_vertex_count')
field_types.insert(1,'NUMBER')
#use sorted dict so we can ensure table has geom columns upfront
field_dict = SortedDict(zip(field_layers, field_types))
#set up extra fields if creating int/style cols
if create_int_style_cols:
int_style_dict = {}
for field,field_type in field_dict.items():
if field_type == 'STRING':
field_dict[field + '_ft_style'] = 'NUMBER'
int_style_dict[field] = {}
print field_dict
#add some custom import fields
field_dict['import_notes'] = 'STRING'
print 'FIELD DICT', field_dict
print 'starting to process'
for i, feat in enumerate(layer):
if i > max_rows:
continue
if start_row and i < start_row:
continue
upload.rows_processed = i + 1
if not i % ((num_features / 50) or 5):
print upload.rows_processed,'rp'
upload.save()
upload.save()
rd = {}
#geom = fromstr(feat.geom.wkt,srid=srid)
if layer.srs:
try:
geom = OGRGeometry(feat.geom.wkt, layer.srs.proj4)
geom.transform(4326)
except Exception, e:
print 'FAIL GEOM'
print e,
geom = None
else:
geom = OGRGeometry(feat.geom.wkt)
if geom:
geom = fromstr(geom.wkt)
#create optional centroid for polys
if upload.create_centroid and 'oly' in geom.geom_type:
field_dict['geometry_pos'] = 'LOCATION'
rd['geometry_pos'] = geom.point_on_surface.kml
if upload.create_centroid_poly and 'oly' in geom.geom_type:
field_dict['geometry_pos_poly_2'] = 'LOCATION'
field_dict['geometry_pos_poly_3'] = 'LOCATION'
rd['geometry_pos_poly_2'] = geom.point_on_surface.buffer(.0001,10).kml
rd['geometry_pos_poly_3'] = geom.point_on_surface.buffer(.0005,10).kml
#if it's > 1M characters, we need to simplify it for FT
simplify_tolerance = .0001
while len(geom.kml) > 1000000:
geom = geom.simplify(simplify_tolerance)
print 'simplified to %f' % simplify_tolerance
rd['import_notes'] = 'simplified to %d DD' % simplify_tolerance
simplify_tolerance = simplify_tolerance * 1.5
if not geom.valid:
rd['import_notes'] = '<br>Geometry not valid'
kml = geom.kml
rd['geometry'] = kml
rd['geometry_vertex_count'] = geom.num_coords
if upload.create_simplify and not 'oint' in geom.geom_type:
amt = .002
if 'oly' in geom.geom_type:
buffer_geom = geom.buffer(amt)
buffer_geom = buffer_geom.buffer(amt * -1)
simple_geom = buffer_geom.simplify(amt)
else:
simple_geom = geom.simplify(amt)
rd['geometry_simplified'] = simple_geom.kml
rd['geometry_simplified_vertex_count'] = simple_geom.num_coords
for f in fields:
val = feat.get(f)
#make sure we have proper null type for diff fields
if val == '<Null>':
continue
if not val:
continue
if field_dict[f] == 'DATETIME':
val = val.isoformat().split('T')[0]
if field_dict[f] == 'STRING' \
and create_int_style_cols \
and field_dict.has_key(f + '_ft_style'):
#check to see if we have a number for this yet
try:
rd[f + '_ft_style'] = int_style_dict[f][val]
except:
int_style_dict[f][val] = len(int_style_dict[f])
rd[f + '_ft_style'] = int_style_dict[f][val]
#however if we have too many distinct vals, let's just not do this anymore
if len(int_style_dict[f]) > max_distinct_style_vals:
print 'DELETING FD %s' % f
del field_dict[f + '_ft_style']
del rd[f + '_ft_style']
#sucks, but now we should just remove all these fields from previous rows
for srow in rows:
try:del srow[f + '_ft_style']
except:
pass #probably this was a null value?
rd[f] = val
rows.append(rd)
#let's process 10k rows at a time.. not keep everything in memory
if len(rows) > 10000:
uploadRows(upload, field_dict, rows)
rows = []
uploadRows(upload, field_dict, rows)
def uploadRows(upload, field_dict, rows):
if not upload.ft_table_id:
upload = createTable(upload, field_dict)
upload.status = 3
upload.save()
print 'inserting %d rows' % len(rows)
insertData(upload, field_dict, rows)
upload.status = 4
upload.save()
def insertSql(client, sql, attempt_no=0):
try:resp = client.query(sql)
except:
print 'unable to query sql %s' % sql
resp = client.query(sql)
print resp[:50]
if 'Unable' in resp:
if attempt_no > 3:
return 'Error - failed after 3 attempts' + resp
#print sql
print resp
time.sleep(1)
print 'len: %d, attempt: %d' % (len(sql), attempt_no)
insertSql(client, sql, attempt_no + 1)
return resp
def getClient(upload):
ftClient = OAuthFTClient(
FT_OAUTH['key'],
FT_OAUTH['secret'],
upload.auth_token.ft_token,
upload.auth_token.ft_token_secret)
print 'client created'
return ftClient
def createTable(upload, field_dict):
ftClient = getClient(upload)
table_dictionary = {upload.get_title() : field_dict}
results = ftClient.query(SQL().createTable(table_dictionary))
table_id = results.split("\n")[1]
print 'new table: %s' % results
upload.ft_table_id = table_id
upload.save()
return upload
def insertData(upload, field_dict, rows):
ftClient = getClient(upload)
#insert rows
sql = []
sql_len = 0
for i, row in enumerate(rows):
upload.rows_imported = i + 1
if sql_len > 500000 or len(sql) > 100: # max upload is 1MB?
insertSql(ftClient, ';'.join(sql))
sql = []
sql_len = 0
upload.save()
try:
insert_statement = SQL().insert(upload.ft_table_id, row)
except Exception, e:
print 'FAIL SQL', row
print e
continue
sql.append(insert_statement)
sql_len += len( insert_statement)
insertSql(ftClient, ';'.join(sql))
upload.save()
| 1.9375
| 2
|
tests/unit/utils.py
|
satra/hdmf
| 0
|
12776030
|
<filename>tests/unit/utils.py
import tempfile
from hdmf.utils import docval, getargs
from hdmf.container import Container
CORE_NAMESPACE = 'test_core'
class Foo(Container):
@docval({'name': 'name', 'type': str, 'doc': 'the name of this Foo'},
{'name': 'my_data', 'type': ('array_data', 'data'), 'doc': 'some data'},
{'name': 'attr1', 'type': str, 'doc': 'an attribute'},
{'name': 'attr2', 'type': int, 'doc': 'another attribute'},
{'name': 'attr3', 'type': float, 'doc': 'a third attribute', 'default': 3.14})
def __init__(self, **kwargs):
name, my_data, attr1, attr2, attr3 = getargs('name', 'my_data', 'attr1', 'attr2', 'attr3', kwargs)
super().__init__(name=name)
self.__data = my_data
self.__attr1 = attr1
self.__attr2 = attr2
self.__attr3 = attr3
def __eq__(self, other):
attrs = ('name', 'my_data', 'attr1', 'attr2', 'attr3')
return all(getattr(self, a) == getattr(other, a) for a in attrs)
def __str__(self):
attrs = ('name', 'my_data', 'attr1', 'attr2', 'attr3')
return '<' + ','.join('%s=%s' % (a, getattr(self, a)) for a in attrs) + '>'
@property
def my_data(self):
return self.__data
@property
def attr1(self):
return self.__attr1
@property
def attr2(self):
return self.__attr2
@property
def attr3(self):
return self.__attr3
def __hash__(self):
return hash(self.name)
class FooBucket(Container):
@docval({'name': 'name', 'type': str, 'doc': 'the name of this bucket'},
{'name': 'foos', 'type': list, 'doc': 'the Foo objects in this bucket', 'default': list()})
def __init__(self, **kwargs):
name, foos = getargs('name', 'foos', kwargs)
super().__init__(name=name)
self.__foos = {f.name: f for f in foos} # note: collections of groups are unordered in HDF5
for f in foos:
f.parent = self
def __eq__(self, other):
return self.name == other.name and self.foos == other.foos
def __str__(self):
return 'name=%s, foos=%s' % (self.name, self.foos)
@property
def foos(self):
return self.__foos
def remove_foo(self, foo_name):
foo = self.__foos.pop(foo_name)
if foo.parent is self:
self._remove_child(foo)
return foo
def get_temp_filepath():
# On Windows, h5py cannot truncate an open file in write mode.
# The temp file will be closed before h5py truncates it and will be removed during the tearDown step.
temp_file = tempfile.NamedTemporaryFile()
temp_file.close()
return temp_file.name
| 2.3125
| 2
|
detector_mxnet/videoflow_contrib/detector_mxnet/__init__.py
|
videoflow/videoflow-contrib
| 12
|
12776031
|
from contextlib import suppress
with suppress(ImportError):
from .mxnet_object_detector import MxnetObjectDetector
| 1.210938
| 1
|
abstract_scorm_xblock/abstract_scorm_xblock/utils.py
|
Abstract-Tech/abstract-scorm-xblock
| 5
|
12776032
|
<reponame>Abstract-Tech/abstract-scorm-xblock
# -*- coding: utf-8 -*-
import pkg_resources
from django.template import Context, Template
def gettext(text):
"""Dummy `gettext` replacement to make string extraction
tools scrape strings marked for translation """
return text
def resource_string(path):
"""Handy helper for getting resources from our kit."""
return pkg_resources.resource_string(__name__, path).decode("utf8")
def render_template(template_path, context):
template_str = resource_string(template_path)
template = Template(template_str)
return template.render(Context(context))
| 2.359375
| 2
|
python/pex/value.py
|
JiveHelix/pex
| 0
|
12776033
|
<filename>python/pex/value.py
##
# @file value.py
#
# @brief Synchronizes a value between the interface and the model, with
# both ends allowed to register callbacks to be notified when the value has
# changed.
#
# @author <NAME> (<EMAIL>)
# @date 06 Jun 2020
# @copyright <NAME>
# Licensed under the MIT license. See LICENSE file.
from __future__ import annotations
from typing import (
ClassVar,
Generic,
TypeVar,
Type,
Optional,
cast,
Callable,
NamedTuple,
Any,
List)
from types import TracebackType
import abc
from .manifold import ValueManifold, ValueCallbackManifold
from .types import (
ValueCallback,
ValueType,
NodeType,
Reference)
from .tube import Tube, HasDisconnectAll
from .proxy import FilterProxy
ModelType = TypeVar("ModelType")
InterfaceType = TypeVar("InterfaceType")
class ValueBase(Generic[ModelType, InterfaceType], Tube, HasDisconnectAll):
"""
Manages a synchronized value between model and the interface.
A value callback has a single argument with a type parameterized
as ModelType.
"""
value_: ModelType
# Private singleton manifolds to connect the interface to the model.
interfaceManifold_: ClassVar[ValueManifold] = ValueManifold()
modelManifold_: ClassVar[ValueManifold] = ValueManifold()
valueCallbacks_: ValueCallbackManifold[InterfaceType]
def __init__(
self,
name: str,
nodeType: NodeType,
initialValue: ModelType) -> None:
Tube.__init__(self, name, nodeType)
if nodeType == NodeType.model:
# Model nodes connect to the model manifold and publish to the
# interface manifold
self.output_ = ValueBase.interfaceManifold_
self.input_ = ValueBase.modelManifold_
else:
assert nodeType == NodeType.interface
# Interface nodes connect to the interface manifold and publish
# to the model manifold
self.output_ = ValueBase.modelManifold_
self.input_ = ValueBase.interfaceManifold_
self.value_ = initialValue
self.valueCallbacks_ = ValueCallbackManifold()
self.input_.Connect(self.name_, self.OnValueChanged_)
def OnValueChanged_(self, value: ModelType) -> None:
self.value_ = value
self.valueCallbacks_(cast(InterfaceType, value))
def Connect(self, callback: ValueCallback[InterfaceType]) -> None:
self.valueCallbacks_.Add(callback)
def Disconnect(self, callback: ValueCallback) -> None:
self.valueCallbacks_.Disconnect(callback)
def DisconnectAll(self) -> None:
self.valueCallbacks_.Clear()
def __repr__(self) -> str:
return "{}({}: {})".format(
type(self).__name__,
self.name_,
self.value_)
ModelClass = TypeVar('ModelClass', bound='ModelValueBase')
class ModelValueBase(
Generic[ModelType],
ValueBase[ModelType, ModelType]):
@classmethod
def Create(
class_: Type[ModelClass],
name: str,
value: ModelType) -> ModelClass:
return class_(name, NodeType.model, value)
def OnValueChanged_(self, value: ModelType) -> None:
super(ModelValueBase, self).OnValueChanged_(value)
# There may be multiple interface listeners for this model node.
# When one of them sends a new value, the others should be notified.
# Interface nodes do not echo, or we would find ourselves in an
# infinite loop!
self.output_.Publish(self.name_, self.value_)
def Get(self) -> ModelType:
return self.value_
def Set(self, value: ModelType) -> None:
self.value_ = value
self.output_.Publish(self.name_, value)
def SetWithoutNotify_(self, value: ModelType) -> None:
self.value_ = value
def Notify_(self) -> None:
self.output_.Publish(self.name_, self.value_)
T = TypeVar('T')
class Interface(Generic[T], HasDisconnectAll):
"""
All read-write interface nodes implement these functions.
"""
@abc.abstractmethod
def Get(self) -> T:
...
@abc.abstractmethod
def Set(self, value: T) -> None:
...
@abc.abstractmethod
def Connect(self, callback: ValueCallback[T]) -> None:
...
InterfaceClass = TypeVar('InterfaceClass', bound='ReadableValue')
class ReadableValue(
Generic[ModelType],
ValueBase[ModelType, ModelType]):
model_: ModelValueBase[ModelType]
def __init__(self, modelValue: ModelValueBase[ModelType]) -> None:
super(ReadableValue, self).__init__(
modelValue.name_,
NodeType.interface,
modelValue.value_)
self.model_ = modelValue
@classmethod
def Create(
class_: Type[InterfaceClass],
modelValue: ModelValueBase[ModelType]) -> InterfaceClass:
return cast(
InterfaceClass,
ReadableValue(modelValue))
def Get(self) -> ModelType:
return self.model_.Get()
class InterfaceValue(ReadableValue[ModelType], Interface[ModelType]):
""" Adds write access through Set method. """
def Set(self, value: ModelType) -> None:
self.output_.Publish(self.name_, cast(ModelType, value))
@classmethod
def Create(
class_: Type[InterfaceClass],
modelValue: ModelValueBase[ModelType]) \
-> InterfaceClass:
return cast(
InterfaceClass,
InterfaceValue(modelValue))
def DefaultFilterOnSet(value: InterfaceType) -> ModelType:
""" Default No-op filter. """
return cast(ModelType, value)
def DefaultFilterOnGet(value: ModelType) -> InterfaceType:
""" Default No-op filter. """
return cast(InterfaceType, value)
FilteredInterface = TypeVar('FilteredInterface', bound='FilteredReadOnlyValue')
class FilteredReadOnlyValue(ValueBase[ModelType, InterfaceType]):
"""
Use AttachFilter to assign a function that will filter any call
to Set and any value received from the manifold.
"""
model_: ModelValueBase[ModelType]
filterOnGet_: FilterProxy[ModelType, InterfaceType]
def __init__(self, modelValue: ModelValueBase[ModelType]) -> None:
super(FilteredReadOnlyValue, self).__init__(
modelValue.name_,
NodeType.interface,
modelValue.value_)
self.model_ = modelValue
self.filterOnGet_ = FilterProxy.Create(DefaultFilterOnGet, None)
@classmethod
def Create(
class_: Type[FilteredInterface],
modelValue: ModelValueBase[ModelType]) -> FilteredInterface:
return cast(
FilteredInterface,
FilteredReadOnlyValue(modelValue))
def AttachFilterOnGet(
self,
filterOnGet: Callable[[ModelType], InterfaceType]) -> None:
self.filterOnGet_ = \
FilterProxy.Create(filterOnGet, self.RestoreDefaultFilterOnGet_)
def RestoreDefaultFilterOnGet_(
self,
ignored: Reference[Callable[[ModelType], InterfaceType]]) -> None:
self.filterOnGet_ = FilterProxy.Create(DefaultFilterOnGet, None)
def OnValueChanged_(self, value: ModelType) -> None:
""" Overrides the method in ModelValueBase to insert filterOnGet_ """
self.valueCallbacks_(self.filterOnGet_(value))
def Get(self) -> InterfaceType:
return self.filterOnGet_(self.model_.Get())
class FilteredInterfaceValue(
Generic[ModelType, InterfaceType],
FilteredReadOnlyValue[ModelType, InterfaceType],
Interface[InterfaceType]):
"""
Use AttachFilter to assign a function that will filter any call
to Set and any value received from the manifold.
"""
filterOnSet_: FilterProxy[InterfaceType, ModelType]
def __init__(self, modelValue: ModelValueBase[ModelType]) -> None:
super(FilteredInterfaceValue, self).__init__(modelValue)
self.filterOnSet_ = FilterProxy.Create(DefaultFilterOnSet, None)
self.filterOnGet_ = FilterProxy.Create(DefaultFilterOnGet, None)
def AttachFilterOnSet(
self,
filterOnSet: Callable[[InterfaceType], ModelType]) -> None:
self.filterOnSet_ = \
FilterProxy.Create(filterOnSet, self.RestoreDefaultFilterOnSet_)
def RestoreDefaultFilterOnSet_(
self,
ignored: Reference[Callable[[InterfaceType], ModelType]]) -> None:
self.filterOnSet_ = FilterProxy.Create(DefaultFilterOnSet, None)
def Set(self, value: InterfaceType) -> None:
self.output_.Publish(self.name_, self.filterOnSet_(value))
@classmethod
def Create(
class_: Type[FilteredInterface],
modelValue: ModelValueBase[ModelType]) -> FilteredInterface:
return cast(
FilteredInterface,
FilteredInterfaceValue[ModelType, InterfaceType](modelValue))
class ModelValue(Generic[ModelType], ModelValueBase[ModelType]):
@classmethod
def Create(
class_: Type[ModelClass],
name: str,
value: ModelType) -> ModelClass:
return cast(
ModelClass,
ModelValue(name, NodeType.model, value))
def GetInterfaceNode(self) -> InterfaceValue[ModelType]:
""" The default interface type is InterfaceValue.
Read-only or filtered interface values will have to be created manually.
"""
return InterfaceValue.Create(self)
class FilteredModelValue(Generic[ModelType], ModelValue[ModelType]):
"""
Use AttachFilter to assign a function that will filter any call
to Set. Only interface nodes can filter on Get.
"""
filterOnSet_: FilterProxy[ModelType, ModelType]
def __init__(self, name: str, initialValue: ModelType) -> None:
super(FilteredModelValue, self).__init__(
name,
NodeType.model,
initialValue)
self.filterOnSet_ = FilterProxy.Create(DefaultFilterOnSet, None)
def OnValueChanged_(self, value: ModelType) -> None:
super(FilteredModelValue, self).OnValueChanged_(
self.filterOnSet_(value))
def AttachFilterOnSet(
self,
filterOnSet: Callable[[ModelType], ModelType]) -> None:
self.filterOnSet_ = \
FilterProxy.Create(filterOnSet, self.RestoreDefaultFilterOnSet_)
def RestoreDefaultFilterOnSet_(
self,
ignored: Reference[Callable[[ModelType], ModelType]]) -> None:
self.filterOnSet_ = FilterProxy.Create(DefaultFilterOnGet, None)
def Set(self, value: ModelType) -> None:
super(FilteredModelValue, self).Set(self.filterOnSet_(value))
def SetWithoutNotify_(self, value: ModelType) -> None:
super(FilteredModelValue, self).SetWithoutNotify_(
self.filterOnSet_(value))
def SetUnfiltered(self, value: ModelType) -> None:
ModelValue.Set(self, value)
@classmethod
def Create(
class_: Type[ModelClass],
name: str,
value: ModelType) -> ModelClass:
return cast(
ModelClass,
FilteredModelValue(name, value))
class ValueContext(Generic[ModelClass]):
modelValue: ModelClass
def __init__(self, modelValue: ModelClass) -> None:
self.modelValue_ = modelValue
self.originalValue_ = modelValue.Get()
def Set(self, value: ModelType) -> None:
self.modelValue_.SetWithoutNotify_(value)
def __enter__(self) -> ValueContext:
return self
def __exit__(
self,
exceptionType: Optional[Type[BaseException]],
exceptionValue: Optional[BaseException],
exceptionTraceback: Optional[TracebackType]) -> None:
if exceptionType is None:
self.modelValue_.Notify_()
else:
# An exception was raised.
# Revert to the previous value and do not Notify.
self.modelValue_.SetWithoutNotify_(self.originalValue_)
class ChangedNode(NamedTuple):
node: ModelValueBase
originalValue: Any
class MultipleValueContext:
changedNodes_: List[ChangedNode]
def __init__(self) -> None:
self.changedNodes_ = []
def Set(self, node: ModelValueBase[ModelType], value: ModelType) -> None:
self.changedNodes_.append(ChangedNode(node, node.Get()))
node.SetWithoutNotify_(value)
def __enter__(self) -> MultipleValueContext:
return self
def __exit__(
self,
exceptionType: Optional[Type[BaseException]],
exceptionValue: Optional[BaseException],
exceptionTraceback: Optional[TracebackType]) -> None:
if exceptionType is None:
# Operation was successful.
# Notify all of the changed nodes.
for node, _ in self.changedNodes_:
node.Notify_()
else:
# An exception was raised.
# Revert to the previous value and do not Notify.
for node, originalValue in self.changedNodes_:
node.SetWithoutNotify_(originalValue)
| 2.578125
| 3
|
tests/otus/snapshots/snap_test_api.py
|
ColeVoelpel/virtool
| 1
|
12776034
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
snapshots['TestCreate.test[True-uvloop-None-True] history'] = {
'_id': '9pfsom1b.0',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop-None-True] otu'] = {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop-TMV-True] history'] = {
'_id': '9pfsom1b.0',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created Tobacco mosaic virus (TMV)',
'diff': {
'_id': '9pfsom1b',
'abbreviation': 'TMV',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop-TMV-True] otu'] = {
'_id': '9pfsom1b',
'abbreviation': 'TMV',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop-True] history'] = {
'_id': '9pfsom1b.0',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop-True] otu'] = {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestSetAsDefault.test[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Set Isolate b as default',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'default'
],
[
True,
False
]
],
[
'change',
[
'isolates',
1,
'default'
],
[
False,
True
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'set_as_default',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestSetAsDefault.test[True-uvloop] joined'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': False,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': True,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestSetAsDefault.test_no_change[True-uvloop] joined'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestSetAsDefault.test_no_change[True-uvloop] response'] = {
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
snapshots['test_get[uvloop-None] 1'] = {
'abbreviation': 'PVF',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
{
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'id': 'KX269872',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': None,
'last_indexed_version': 0,
'most_recent_change': None,
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestEdit.test[True-uvloop-data0-TMV-Changed name to Tobacco mosaic otu] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '<KEY>',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data1-PVF-Changed name to Tobacco mosaic otu and changed abbreviation to TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu and changed abbreviation to TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'PVF',
'TMV'
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data2-PVF-Changed abbreviation to TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed abbreviation to TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'PVF',
'TMV'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data3-TMV-Changed name to Tobacco mosaic otu] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data4-TMV-Changed name to Tobacco mosaic otu and removed abbreviation TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu and removed abbreviation TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'TMV',
''
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data5--Changed name to Tobacco mosaic otu and added abbreviation TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed name to Tobacco mosaic otu and added abbreviation TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'',
'TMV'
]
],
[
'change',
'lower_name',
[
'prunus virus f',
'tobacco mosaic otu'
]
],
[
'change',
'name',
[
'Prunus virus F',
'Tobacco mosaic otu'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data6-PVF-Changed abbreviation to TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed abbreviation to TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'PVF',
'TMV'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data7-PVF-Changed abbreviation to TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Changed abbreviation to TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'PVF',
'TMV'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': '<NAME>',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data8--Added abbreviation TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added abbreviation TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'',
'TMV'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': '<NAME>',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test[True-uvloop-data9-TMV-Removed abbreviation TMV] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed abbreviation TMV',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
'abbreviation',
[
'TMV',
''
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEdit.test_no_change[True-uvloop-Tobacco mosaic otu-TMV-data0] 1'] = {
'abbreviation': 'TMV',
'id': 'test',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'most_recent_change': None,
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'foo'
}
}
snapshots['TestEdit.test_no_change[True-uvloop-Tobacco mosaic otu-TMV-data1] 1'] = {
'abbreviation': 'TMV',
'id': 'test',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'most_recent_change': None,
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'foo'
}
}
snapshots['TestEdit.test_no_change[True-uvloop-Tobacco mosaic otu-TMV-data2] 1'] = {
'abbreviation': 'TMV',
'id': 'test',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'most_recent_change': None,
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'foo'
}
}
snapshots['test_remove[True-uvloop--True] history'] = {
'_id': '6116cba1.removed',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed Prunus virus F',
'diff': {
'_id': '6116cba1',
'abbreviation': '',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 'removed'
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_remove[True-uvloop-PVF-True] history'] = {
'_id': '6116cba1.removed',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed Prunus virus F (PVF)',
'diff': {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 'removed'
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_get_isolate[uvloop-None] 1'] = {
'default': True,
'id': 'cab8b360',
'sequences': [
{
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'id': 'KX269872',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
snapshots['TestEditIsolate.test[True-uvloop-data0-Renamed Isolate b to Variant b] json'] = {
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'variant'
}
snapshots['TestEditIsolate.test[True-uvloop-data0-Renamed Isolate b to Variant b] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate b to Variant b',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
1,
'source_type'
],
[
'isolate',
'variant'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test[True-uvloop-data1-Renamed Isolate b to Variant b] json'] = {
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'variant'
}
snapshots['TestEditIsolate.test[True-uvloop-data1-Renamed Isolate b to Variant b] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate b to Variant b',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
1,
'source_type'
],
[
'isolate',
'variant'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test[True-uvloop-data2-Renamed Isolate b to Variant A] json'] = {
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'A',
'source_type': 'variant'
}
snapshots['TestEditIsolate.test[True-uvloop-data2-Renamed Isolate b to Variant A] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate b to Variant A',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
1,
'source_name'
],
[
'b',
'A'
]
],
[
'change',
[
'isolates',
1,
'source_type'
],
[
'isolate',
'variant'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test[True-uvloop-data3-Renamed Isolate b to Isolate A] json'] = {
'default': False,
'id': 'test',
'sequences': [
],
'source_name': 'A',
'source_type': 'isolate'
}
snapshots['TestEditIsolate.test[True-uvloop-data3-Renamed Isolate b to Isolate A] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate b to Isolate A',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
1,
'source_name'
],
[
'b',
'A'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test_force_case[True-uvloop] json'] = {
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'variant'
}
snapshots['TestEditIsolate.test_force_case[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Renamed Isolate 8816-v2 to Variant 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'source_type'
],
[
'isolate',
'variant'
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop-None-True] json'] = {
'abbreviation': '',
'id': '9pfsom1b',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': None,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'id': '9pfsom1b.0',
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop-True] json'] = {
'abbreviation': '',
'id': '9pfsom1b',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': None,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'id': '9pfsom1b.0',
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop-TMV-True] json'] = {
'abbreviation': 'TMV',
'id': '9pfsom1b',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': None,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Created Tobacco mosaic virus (TMV)',
'diff': {
'_id': '9pfsom1b',
'abbreviation': 'TMV',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'id': '9pfsom1b.0',
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestEdit.test[True-uvloop-data0-TMV-Changed name to Tobacco mosaic otu] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data0-TMV-Changed name to Tobacco mosaic otu] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data1-PVF-Changed name to Tobacco mosaic otu and changed abbreviation to TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu and changed abbreviation to TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data1-PVF-Changed name to Tobacco mosaic otu and changed abbreviation to TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data2-PVF-Changed abbreviation to TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed abbreviation to TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data2-PVF-Changed abbreviation to TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data3-TMV-Changed name to Tobacco mosaic otu] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data3-TMV-Changed name to Tobacco mosaic otu] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data4-TMV-Changed name to Tobacco mosaic otu and removed abbreviation TMV] json'] = {
'abbreviation': '',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu and removed abbreviation TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data4-TMV-Changed name to Tobacco mosaic otu and removed abbreviation TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': '',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data5--Changed name to Tobacco mosaic otu and added abbreviation TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed name to Tobacco mosaic otu and added abbreviation TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data5--Changed name to Tobacco mosaic otu and added abbreviation TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'tobacco mosaic otu',
'name': 'Tobacco mosaic otu',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data6-PVF-Changed abbreviation to TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed abbreviation to TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data6-PVF-Changed abbreviation to TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data7-PVF-Changed abbreviation to TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Changed abbreviation to TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data7-PVF-Changed abbreviation to TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data8--Added abbreviation TMV] json'] = {
'abbreviation': 'TMV',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Added abbreviation TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data8--Added abbreviation TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': 'TMV',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data9-TMV-Removed abbreviation TMV] json'] = {
'abbreviation': '',
'id': '6116cba1',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'issues': {
'empty_isolate': [
'cab8b360'
],
'empty_otu': False,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': 0,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Removed abbreviation TMV',
'id': '6116cba1.1',
'method_name': 'edit',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'user': {
'id': 'test'
}
},
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEdit.test[True-uvloop-data9-TMV-Removed abbreviation TMV] otu'] = {
'_id': '6116cba1',
'abbreviation': '',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['test_list_isolates[uvloop-None] json'] = [
{
'default': True,
'id': 'cab8b360',
'sequences': [
],
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'bcb9b352',
'sequences': [
],
'source_name': '7865',
'source_type': 'isolate'
}
]
snapshots['TestAddIsolate.test_first[True-uvloop] json'] = {
'default': True,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
snapshots['TestAddIsolate.test_first[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': '9pf',
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_first[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Isolate b as default',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
'isolates',
[
[
0,
{
'default': True,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestAddIsolate.test_force_case[True-uvloop] json'] = {
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': 'Beta',
'source_type': 'isolate'
}
snapshots['TestAddIsolate.test_force_case[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': '9pf',
'source_name': 'Beta',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_force_case[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Isolate Beta',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
'isolates',
[
[
1,
{
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': 'Beta',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestAddIsolate.test_empty[True-uvloop] json'] = {
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': '',
'source_type': ''
}
snapshots['TestAddIsolate.test_empty[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': '9pf',
'source_name': '',
'source_type': ''
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_empty[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Unnamed Isolate',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
'isolates',
[
[
1,
{
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': '',
'source_type': ''
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestEditIsolate.test[True-uvloop-data0-Renamed Isolate b to Variant b] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'source_name': 'b',
'source_type': 'variant'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEditIsolate.test[True-uvloop-data1-Renamed Isolate b to Variant b] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'source_name': 'b',
'source_type': 'variant'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEditIsolate.test[True-uvloop-data2-Renamed Isolate b to Variant A] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'source_name': 'A',
'source_type': 'variant'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEditIsolate.test[True-uvloop-data3-Renamed Isolate b to Isolate A] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': 'test',
'source_name': 'A',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestEditIsolate.test_force_case[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'variant'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestSetAsDefault.test[True-uvloop] json'] = {
'default': True,
'id': 'test',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
snapshots['TestRemoveIsolate.test_change_default[True-uvloop] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'bcb9b352',
'source_name': '7865',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestRemoveIsolate.test_change_default[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed Isolate 8816-v2 and set Isolate 7865 as default',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'id'
],
[
'cab8b360',
'bcb9b352'
]
],
[
'change',
[
'isolates',
0,
'source_name'
],
[
'8816-v2',
'7865'
]
],
[
'remove',
[
'isolates',
0,
'sequences'
],
[
[
0,
{
'_id': 'KX269872',
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
]
]
],
[
'remove',
'isolates',
[
[
1,
{
'default': False,
'id': 'bcb9b352',
'sequences': [
],
'source_name': '7865',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_list_sequences[uvloop-None] json'] = [
{
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'id': 'KX269872',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
]
snapshots['test_get_sequence[uvloop-None] json'] = {
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'id': 'KX269872',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
snapshots['test_create_sequence[True-uvloop-None] json'] = {
'accession': 'foobar',
'definition': 'A made up sequence',
'host': 'Plant',
'id': '9pfsom1b',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'reference': {
'id': 'hxn167'
},
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
snapshots['test_create_sequence[True-uvloop-None] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': True,
'version': 1
}
snapshots['test_create_sequence[True-uvloop-None] sequence'] = {
'_id': '9pfsom1b',
'accession': 'foobar',
'definition': 'A made up sequence',
'host': 'Plant',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'reference': {
'id': 'hxn167'
},
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
snapshots['test_create_sequence[True-uvloop-None] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created new sequence foobar in Isolate 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
[
'isolates',
0,
'sequences'
],
[
[
0,
{
'_id': '9pfsom1b',
'accession': 'foobar',
'definition': 'A made up sequence',
'host': 'Plant',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'reference': {
'id': 'hxn167'
},
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
]
]
],
[
'change',
'verified',
[
False,
True
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create_sequence',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_edit_sequence[True-uvloop-None] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Edited sequence KX269872 in Isolate 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'sequences',
0,
'definition'
],
[
'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'A made up sequence'
]
],
[
'change',
[
'isolates',
0,
'sequences',
0,
'host'
],
[
'sweet cherry',
'Grapevine'
]
],
[
'change',
[
'isolates',
0,
'sequences',
0,
'sequence'
],
[
'TGTTTAAGAGATTAAACAACCGCTTTC',
'ATGCGTGTACTG'
]
],
[
'change',
'verified',
[
False,
True
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'edit_sequence',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_edit_sequence[True-uvloop-None] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': True,
'version': 1
}
snapshots['test_edit_sequence[True-uvloop-None] sequence'] = {
'_id': 'KX269872',
'definition': 'A made up sequence',
'host': 'Grapevine',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
snapshots['test_remove_sequence[True-uvloop-None] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['test_remove_sequence[True-uvloop-None] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed sequence KX269872 from Isolate 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'remove',
[
'isolates',
0,
'sequences'
],
[
[
0,
{
'_id': 'KX269872',
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove_sequence',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['test_edit_sequence[True-uvloop-None] json'] = {
'definition': 'A made up sequence',
'host': 'Grapevine',
'id': 'KX269872',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'ATGCGTGTACTG'
}
snapshots['TestAddIsolate.test_default[True-uvloop-True] json'] = {
'default': True,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
snapshots['TestAddIsolate.test_default[True-uvloop-True] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': False,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': True,
'id': '9pf',
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_default[True-uvloop-True] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Isolate b as default',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'change',
[
'isolates',
0,
'default'
],
[
True,
False
]
],
[
'add',
'isolates',
[
[
1,
{
'default': True,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestAddIsolate.test_default[True-uvloop-False] json'] = {
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
snapshots['TestAddIsolate.test_default[True-uvloop-False] otu'] = {
'_id': '6116cba1',
'abbreviation': 'PVF',
'imported': True,
'isolates': [
{
'default': True,
'id': 'cab8b360',
'source_name': '8816-v2',
'source_type': 'isolate'
},
{
'default': False,
'id': '9pf',
'source_name': 'b',
'source_type': 'isolate'
}
],
'last_indexed_version': 0,
'lower_name': 'prunus virus f',
'name': 'Prunus virus F',
'reference': {
'id': 'hxn167'
},
'schema': [
],
'verified': False,
'version': 1
}
snapshots['TestAddIsolate.test_default[True-uvloop-False] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Added Isolate b',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'add',
'isolates',
[
[
1,
{
'default': False,
'id': '9pf',
'sequences': [
],
'source_name': 'b',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'add_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestRemoveIsolate.test[True-uvloop] history'] = {
'_id': '6116cba1.1',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Removed Isolate 8816-v2',
'diff': [
[
'change',
'version',
[
0,
1
]
],
[
'remove',
'isolates',
[
[
0,
{
'default': True,
'id': 'cab8b360',
'sequences': [
{
'_id': 'KX269872',
'definition': 'Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete cds.',
'host': 'sweet cherry',
'isolate_id': 'cab8b360',
'otu_id': '6116cba1',
'segment': None,
'sequence': 'TGTTTAAGAGATTAAACAACCGCTTTC'
}
],
'source_name': '8816-v2',
'source_type': 'isolate'
}
]
]
]
],
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'remove_isolate',
'otu': {
'id': '6116cba1',
'name': 'Prunus virus F',
'version': 1
},
'reference': {
'id': 'hxn167'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[True-uvloop--True] json'] = {
'abbreviation': '',
'id': '9pfsom1b',
'isolates': [
],
'issues': {
'empty_isolate': False,
'empty_otu': True,
'empty_sequence': False,
'isolate_inconsistency': False
},
'last_indexed_version': None,
'most_recent_change': {
'created_at': '2015-10-06T20:00:00Z',
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'id': '9pfsom1b.0',
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
},
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop--True] otu'] = {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
}
snapshots['TestCreate.test[True-uvloop--True] history'] = {
'_id': '9pfsom1b.0',
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'description': 'Created Tobacco mosaic virus',
'diff': {
'_id': '9pfsom1b',
'abbreviation': '',
'isolates': [
],
'last_indexed_version': None,
'lower_name': 'tobacco mosaic virus',
'name': 'Tobacco mosaic virus',
'reference': {
'id': 'foo'
},
'schema': [
],
'verified': False,
'version': 0
},
'index': {
'id': 'unbuilt',
'version': 'unbuilt'
},
'method_name': 'create',
'otu': {
'id': '9pfsom1b',
'name': 'Tobacco mosaic virus',
'version': 0
},
'reference': {
'id': 'foo'
},
'user': {
'id': 'test'
}
}
| 1.703125
| 2
|
3vvrsto.py
|
BlackPhoenixSlo/3vVrsto
| 0
|
12776035
|
<gh_stars>0
import bottle
import model
import random
igra = model.Igra()
second = True
@bottle.get('/AI_learns/')
def leanAI():
igra.learn()
return bottle.redirect('http://127.0.0.1:8080/new')
@bottle.get('/new')
def newgame():
igra.polje = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
if random.randint(0, 1) == 1:
igra.polje = igra.bot1.nasledni(igra.polje, "l1")
second = False
else:
second = True
return bottle.redirect('http://127.0.0.1:8080/')
@bottle.get('/place/<mesto>')
def place2(mesto):
print(mesto)
if igra.polje[int(mesto) // 3][int(mesto) % 3] != 0:
return bottle.redirect('http://127.0.0.1:8080/')
igra.dodaj(int(mesto))
info = igra.konec(igra.polje)
if info == 1:
place("Zmagal si")
igra.izpisi(igra.polje, 1)
if info == 2 :
place("Izenačeno je")
igra.izpisi(igra.polje, 1)
if info == -1:
place("Izgubil")
igra.izpisi(igra.polje, 1)
if second:
igra.polje = igra.bot1.nasledni(igra.polje, "l2")
else:
igra.polje = igra.bot1.nasledni(igra.polje, "l1")
info = igra.konec(igra.polje)
if info == 2 :
place("Izenačeno je")
igra.izpisi(igra.polje, 1)
if info == -1:
place("Izgubil si")
igra.izpisi(igra.polje, 1)
igra.izpisi(igra.polje, 1)
return bottle.redirect('http://127.0.0.1:8080/')
@bottle.get('/')
def place(end=0):
win = model.countratio[1]
lose = model.countratio[-1]
pat = model.countratio[2]
ar = {}
for x in range(9):
ar[x] = igra.polje[x // 3][x % 3]
if ar[x] == 0:
ar[x] = "empty.png"
if ar[x] == 1:
ar[x] = "x.png"
if ar[x] == -1:
ar[x] = "o.png"
if end == 0:
return bottle.template('view/igra.tpl', win=win, lose=lose, pat=pat, ime0=ar[0], ime1=ar[1], ime2=ar[2], ime3=ar[3], ime4=ar[4], ime5=ar[5], ime6=ar[6], ime7=ar[7], ime8=ar[8])
else:
print("jaaaaa")
return bottle.redirect(f'http://127.0.0.1:8080/end/{end}')
@bottle.get('/end/<end>')
def placement2(end):
ar = {}
for x in range(9):
ar[x] = igra.polje[x // 3][x % 3]
if ar[x] == 0:
ar[x] = "empty.png"
if ar[x] == 1:
ar[x] = "x.png"
if ar[x] == -1:
ar[x] = "o.png"
return bottle.template('view/konec.tpl', wl=end, ime0=ar[0], ime1=ar[1], ime2=ar[2], ime3=ar[3], ime4=ar[4], ime5=ar[5], ime6=ar[6], ime7=ar[7], ime8=ar[8])
@bottle.get("/static/<filename>")
def server_static(filename):
return bottle.static_file(filename, root="./images")
bottle.run(reloader=True, debug=True)
| 2.46875
| 2
|
backend/modules/dataprep/PandasPreprocessor.py
|
FeelsBright/SmartMed
| 0
|
12776036
|
<filename>backend/modules/dataprep/PandasPreprocessor.py
from typing import Dict
import pandas as pd
import logging
logging.basicConfig(filename='~/../logs/start.log', level=logging.DEBUG)
def debug(fn):
'''logging decorator'''
def wrapper(*args, **kwargs):
logging.debug("Entering {:s}...".format(fn.__name__))
result = fn(*args, **kwargs)
logging.debug("Finished {:s}.".format(fn.__name__))
return result
return wrapper
class PandasPreprocessor:
'''Class to preprocessing any datasets'''
def __init__(self, settings: Dict):
self.settings = settings
| 2.734375
| 3
|
lwm2m/MdsNotificationDemo.py
|
jefforeilly/django-rest-framework-iot
| 3
|
12776037
|
<gh_stars>1-10
'''
Created on October 25th, 2014
Subscribe to a resource, connect to the notification channel of an mDS instance and receive
notifications from the subscribed resource
Process the notifications and filter a set of endpints and a particualr resource path. Index the
resource value from the notification and use it to actuate an indicator.
@author: mjkoster
'''
if __name__ == '__main__' :
import httplib
import json
from urlparse import urlparse
import base64
httpServer = 'http://barista2.cloudapp.net:8080'
httpDomain = 'domain'
resourcePathBase = '/' + httpDomain + '/endpoints'
subscribeURI = '/3302/0/5500'
actuateURI = '/11101/0/5901'
baseURL = httpServer + resourcePathBase
username = 'admin'
password = '<PASSWORD>'
auth = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
ep_names = []
def discoverEndpoints(basePath):
uriObject = urlparse(basePath)
print 'discoverEP : ' + basePath
httpConnection = httplib.HTTPConnection(uriObject.netloc)
httpConnection.request('GET', uriObject.path, headers= \
{"Accept" : "application/json", "Authorization": ("Basic %s" % auth) })
response = httpConnection.getresponse()
print response.status, response.reason
if response.status == 200:
endpoints = json.loads(response.read())
httpConnection.close()
for endpoint in endpoints:
if endpoint['type'] == 'DEMO' and discoverResources(endpoint['name'], subscribeURI):
ep_names.append(endpoint['name'])
print 'endpoint: ' + endpoint['name']
return ep_names
def discoverResources(endpoint, uri_path):
resources = []
uriObject = urlparse(baseURL + '/' + endpoint)
print 'discoverRES : ' + endpoint
httpConnection = httplib.HTTPConnection(uriObject.netloc)
httpConnection.request('GET', uriObject.path, headers= \
{"Accept" : "application/json", "Authorization": ("Basic %s" % auth) })
response = httpConnection.getresponse()
print response.status, response.reason
if response.status == 200:
resources = json.loads(response.read())
httpConnection.close()
for resource in resources:
if resource['uri'] == uri_path:
print 'resource: ' + resource['uri']
return resource['uri']
else:
return 0
def subscribe(resourceURI):
for ep in ep_names:
path = httpServer + '/' + httpDomain + '/subscriptions' + '/' + ep + subscribeURI + '?sync=true'
print "subscribe: " + path
uriObject = urlparse(path)
httpConnection = httplib.HTTPConnection(uriObject.netloc)
httpConnection.request('PUT', uriObject.path + '?' + uriObject.query, "", \
{"Content-Type" : "application/json", "Authorization": ("Basic %s" % auth)})
response = httpConnection.getresponse()
print response.status, response.reason
httpConnection.close()
def longPoll(channelPath):
print 'poll: ' + channelPath
uriObject = urlparse(channelPath)
httpConnection = httplib.HTTPConnection(uriObject.netloc)
while 1:
httpConnection.request('GET', uriObject.path, headers= \
{"Accept" : "application/json", "Authorization": ("Basic %s" % auth) })
response = httpConnection.getresponse()
print response.status, response.reason
if response.status == 200:
httpBody = response.read()
if len(httpBody) > 0:
handleNotifications(json.loads(httpBody))
def handleNotifications(events):
if 'notifications' in events:
for notification in events['notifications']:
if (notification['ep'] in ep_names) and (notification['path'] == subscribeURI):
process_payload(notification)
def process_payload(notification):
value = base64.b64decode(notification['payload']) #notification payloads are base64 encoded
print "value: ", value
ledBarString = ""
for led in range(10):
if float(value)/10 > led:
ledBarString += '1'
else:
ledBarString += '0'
actuateLEDbar(ledBarString)
def actuateLEDbar(ledString = '0000000000'):
for ep in ep_names:
path = baseURL + '/' + ep + actuateURI
print "actuating: " + path + ", value=" + ledString
uriObject = urlparse(path)
httpConnection = httplib.HTTPConnection(uriObject.netloc)
httpConnection.request('PUT', uriObject.path + '?' + uriObject.query, ledString, \
{"Content-Type" : "application/json", "Authorization": ("Basic %s" % auth)})
response = httpConnection.getresponse()
print response.status, response.reason
httpConnection.close()
"""
Start
"""
print "Started"
discoverEndpoints(baseURL)
subscribe(subscribeURI)
try:
longPoll(httpServer + '/' + httpDomain + '/notification/pull')
except KeyboardInterrupt: pass
print 'got KeyboardInterrupt'
print 'closed'
| 2.578125
| 3
|
raciocinio_algoritmico/6- 22-04-2020/02.py
|
PedroMoreira87/python
| 0
|
12776038
|
# [[1, 2, 3],
# [4, 5, 6],
# [7, 8, 9]]
#
# print(((1-4) ** 2 + (4-4) ** 2 + (7-4) ** 2)/3)
def constroi_matriz(n, m):
mat = []
for i in range(n):
linha = []
for j in range(m):
linha.append(0)
mat.append(linha)
return mat
def popula_matriz(mat):
for i in range(len(mat)):
for j in range(len(mat[i])):
mat[i][j] = float(input(f'Digite o valor para a posição ({i}, {j}): '))
return mat
def pega_coluna(mat, c):
col = []
for i in range(len(mat)):
col.append(mat[i][c])
return col
def media(lista):
soma = 0
for i in range(len(lista)):
soma += lista[i]
return soma / len(lista)
def variancia(lista):
mu = media(lista)
var = 0
for i in range(len(lista)):
var += (lista[i] - mu) ** 2
var /= len(lista)
return var
def moda(lista):
# contar os números
# dicionário no formato <chave, valor>, sendo que
# chave é um número e valor é a quantidade de ocorrências
contagem = {}
for i in range(len(lista)):
if lista[i] in contagem: # valor já existe?
# incrementando a contagem de um valor que já foi visto antes
contagem[lista[i]] += 1
else:
# é a primeira vez que vemos esse valor em específico
contagem[lista[i]] = 1
# verificar quais números mais se repetiram
# 1, 2, 3, 4 -> amodal (sem moda)
# 1, 1, 2, 3, 4 -> 1 é a moda
# 1, 1, 2, 2, 3, 4 -> bimodal (tanto 1 quanto 2 são modas)
modas = []
vezes = 0
for num, qtd in contagem.items():
if qtd > vezes: # a moda mudou
modas = [num]
vezes = qtd
elif qtd == vezes: # mais uma moda
modas.append(num)
# teste para verificar se o conjunto é amodal
if vezes == 1:
modas = []
return modas
mat = constroi_matriz(3, 3)
mat = popula_matriz(mat)
col_0 = pega_coluna(mat, 0)
print(f'Media coluna 0: {media(col_0)}')
print(f'Moda coluna 0: {moda(col_0)}')
print(f'Variância coluna 0: {variancia(col_0)}')
| 3.453125
| 3
|
src/python/nimbusml/linear_model/symsgdbinaryclassifier.py
|
michaelgsharp/NimbusML
| 134
|
12776039
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
SymSgdBinaryClassifier
"""
__all__ = ["SymSgdBinaryClassifier"]
from sklearn.base import ClassifierMixin
from ..base_predictor import BasePredictor
from ..internal.core.linear_model.symsgdbinaryclassifier import \
SymSgdBinaryClassifier as core
from ..internal.utils.utils import trace
class SymSgdBinaryClassifier(
core,
BasePredictor,
ClassifierMixin):
"""
Train an symbolic SGD model.
.. remarks::
Stochastic gradient descent (SGD) is a well known method for
regression and classification
tasks, and is primarily a sequential algorithm. The
``SymSgdBinaryClassifier`` is an
implementation of a parallel SGD algorithm that, to a first-order
approximation, retains the
sequential semantics of SGD. Each thread learns a local model as well
a `model combiner`
which allows local models to be combined to to produce what a
sequential model would have
produced.
**Reference**
`Parallel Stochastic Gradient Descent with Sound Combiners
<https://arxiv.org/pdf/1705.08030.pdf>`_
:param feature: see `Columns </nimbusml/concepts/columns>`_.
:param label: see `Columns </nimbusml/concepts/columns>`_.
:param normalize: Specifies the type of automatic normalization used:
* ``"Auto"``: if normalization is needed, it is performed
automatically. This is the default choice.
* ``"No"``: no normalization is performed.
* ``"Yes"``: normalization is performed.
* ``"Warn"``: if normalization is needed, a warning
message is displayed, but normalization is not performed.
Normalization rescales disparate data ranges to a standard scale.
Feature
scaling insures the distances between data points are proportional
and
enables various optimization methods such as gradient descent to
converge
much faster. If normalization is performed, a ``MaxMin`` normalizer
is
used. It normalizes values in an interval [a, b] where ``-1 <= a <=
0``
and ``0 <= b <= 1`` and ``b - a = 1``. This normalizer preserves
sparsity by mapping zero to zero.
:param caching: Whether trainer should cache input training data.
:param number_of_iterations: Number of passes over the data.
:param learning_rate: Determines the size of the step taken in the
direction of the gradient in each step of the learning process. This
determines how fast or slow the learner converges on the optimal
solution. If the step size is too big, you might overshoot the optimal
solution. If the step size is too small, training takes longer to
converge to the best solution.
:param l2_regularization: L2 regularization.
:param number_of_threads: Degree of lock-free parallelism. Determinism not
guaranteed. Multi-threading is not supported currently.
:param tolerance: Tolerance for difference in average loss in consecutive
passes.
:param update_frequency: The number of iterations each thread learns a
local model until combining it with the global model. Low value means
more updated global model and high value means less cache traffic.
:param memory_size: Memory size for L-BFGS. Lower=faster, less accurate.
The technique used for optimization here is L-BFGS, which uses only a
limited amount of memory to compute the next step direction. This
parameter indicates the number of past positions and gradients to store
for the computation of the next step. Must be greater than or equal to
``1``.
:param shuffle: Shuffle data?.
:param positive_instance_weight: Apply weight to the positive class, for
imbalanced data.
:param params: Additional arguments sent to compute engine.
.. seealso::
:py:class:`LogisticRegressionBinaryClassifier
<nimbusml.linear_model.LogisticRegressionBinaryClassifier>`,
:py:class:`SgdBinaryClassifier
<nimbusml.linear_model.SgdBinaryClassifier>`,
:py:class:`FastLinearBinaryClassifier
<nimbusml.linear_model.FastLinearBinaryClassifier>`
.. index:: models, parallel, SGD, symbolic
Example:
.. literalinclude:: /../nimbusml/examples/SymSgdBinaryClassifier.py
:language: python
"""
@trace
def __init__(
self,
normalize='Auto',
caching='Auto',
number_of_iterations=50,
learning_rate=None,
l2_regularization=0.0,
number_of_threads=None,
tolerance=0.0001,
update_frequency=None,
memory_size=1024,
shuffle=True,
positive_instance_weight=1.0,
feature=None,
label=None,
**params):
if 'feature_column_name' in params:
raise NameError(
"'feature_column_name' must be renamed to 'feature'")
if feature:
params['feature_column_name'] = feature
if 'label_column_name' in params:
raise NameError(
"'label_column_name' must be renamed to 'label'")
if label:
params['label_column_name'] = label
BasePredictor.__init__(self, type='classifier', **params)
core.__init__(
self,
normalize=normalize,
caching=caching,
number_of_iterations=number_of_iterations,
learning_rate=learning_rate,
l2_regularization=l2_regularization,
number_of_threads=number_of_threads,
tolerance=tolerance,
update_frequency=update_frequency,
memory_size=memory_size,
shuffle=shuffle,
positive_instance_weight=positive_instance_weight,
**params)
self.feature = feature
self.label = label
@trace
def predict_proba(self, X, **params):
'''
Returns probabilities
'''
return self._predict_proba(X, **params)
@trace
def decision_function(self, X, **params):
'''
Returns score values
'''
return self._decision_function(X, **params)
def get_params(self, deep=False):
"""
Get the parameters for this operator.
"""
return core.get_params(self)
| 2.75
| 3
|
hive/utils/registry.py
|
chandar-lab/RLHive
| 81
|
12776040
|
import argparse
import inspect
from copy import deepcopy
from functools import partial, update_wrapper
from typing import List, Mapping, Sequence, _GenericAlias
import yaml
class Registrable:
"""Class used to denote which types of objects can be registered in the RLHive
Registry. These objects can also be configured directly from the command line, and
recursively built from the config, assuming type annotations are present.
"""
@classmethod
def type_name(cls):
"""This should represent a string that denotes the which type of class you are
creating. For example, "logger", "agent", or "env".
"""
raise ValueError
class CallableType(Registrable):
"""A wrapper that allows any callable to be registered in the RLHive Registry.
Specifically, it maps the arguments and annotations of the wrapped function to the
resulting callable, allowing any argument names and type annotations of the
underlying function to be present for outer wrapper. When called with some
arguments, this object returns a partial function with those arguments assigned.
By default, the type_name is "callable", but if you want to create specific types
of callables, you can simply create a subclass and override the type_name method.
See :py:class:`hive.utils.utils.OptimizerFn`.
"""
def __init__(self, fn):
"""
Args:
fn: callable to be wrapped.
"""
self._fn = fn
update_wrapper(self, self._fn)
def __call__(self, *args, **kwargs):
return partial(self._fn, *args, **kwargs)
@classmethod
def type_name(cls):
return "callable"
def __repr__(self):
return f"<{type(self).__name__} {repr(self._fn)}>"
class Registry:
"""This is the Registry class for RLHive. It allows you to register different types
of :py:class:`Registrable` classes and objects and generates constructors for those
classes in the form of `get_{type_name}`.
These constructors allow you to construct objects from dictionary configs. These
configs should have two fields: `name`, which corresponds to the name used when
registering a class in the registry, and `kwargs`, which corresponds to the keyword
arguments that will be passed to the constructor of the object. These constructors
can also build objects recursively, i.e. if a config contains the config for
another `Registrable` object, this will be automatically created before being
passed to the constructor of the original object. These constructors also allow you
to directly specify/override arguments for object constructors directly from the
command line. These parameters are specified in dot notation. They also are able
to handle lists and dictionaries of Registrable objects.
For example, let's consider the following scenario:
Your agent class has an argument `arg1` which is annotated to be `List[Class1]`,
`Class1` is `Registrable`, and the `Class1` constructor takes an argument `arg2`.
In the passed yml config, there are two different Class1 object configs listed.
the constructor will check to see if both `--agent.arg1.0.arg2` and
`--agent.arg1.1.arg2` have been passed.
The parameters passed in the command line will be parsed according to the type
annotation of the corresponding low level constructor. If it is not one of
`int`, `float`, `str`, or `bool`, it simply loads the string into python using a
yaml loader.
Each constructor returns the object, as well a dictionary config with all the
parameters used to create the object and any Registrable objects created in the
process of creating this object.
"""
def __init__(self) -> None:
self._registry = {}
def register(self, name, constructor, type):
"""Register a Registrable class/object with RLHive.
Args:
name (str): Name of the class/object being registered.
constructor (callable): Callable that will be passed all kwargs from
configs and be analyzed to get type annotations.
type (type): Type of class/object being registered. Should be subclass of
Registrable.
"""
if not issubclass(type, Registrable):
raise ValueError(f"{type} is not Registrable")
if type.type_name() not in self._registry:
self._registry[type.type_name()] = {}
def getter(self, object_or_config, prefix=None):
if object_or_config is None:
return None, {}
elif isinstance(object_or_config, type):
return object_or_config, {}
name = object_or_config["name"]
kwargs = object_or_config.get("kwargs", {})
expanded_config = deepcopy(object_or_config)
if name in self._registry[type.type_name()]:
object_class = self._registry[type.type_name()][name]
parsed_args = get_callable_parsed_args(object_class, prefix=prefix)
kwargs.update(parsed_args)
kwargs, kwargs_config = construct_objects(
object_class, kwargs, prefix
)
expanded_config["kwargs"] = kwargs_config
return object_class(**kwargs), expanded_config
else:
raise ValueError(f"{name} class not found")
setattr(self.__class__, f"get_{type.type_name()}", getter)
self._registry[type.type_name()][name] = constructor
def register_all(self, base_class, class_dict):
"""Bulk register function.
Args:
base_class (type): Corresponds to the `type` of the register function
class_dict (dict[str, callable]): A dictionary mapping from name to
constructor.
"""
for cls in class_dict:
self.register(cls, class_dict[cls], base_class)
def __repr__(self):
return str(self._registry)
def construct_objects(object_constructor, config, prefix=None):
"""Helper function that constructs any objects specified in the config that
are registrable.
Returns the object, as well a dictionary config with all the parameters used to
create the object and any Registrable objects created in the process of creating
this object.
Args:
object_constructor (callable): constructor of object that corresponds to
config. The signature of this function will be analyzed to see if there
are any :py:class:`Registrable` objects that might be specified in the
config.
config (dict): The kwargs for the object being created. May contain configs for
other `Registrable` objects that need to be recursively created.
prefix (str): Prefix that is attached to the argument names when looking for
command line arguments.
"""
signature = inspect.signature(object_constructor)
prefix = "" if prefix is None else f"{prefix}."
expanded_config = deepcopy(config)
for argument in signature.parameters:
if argument not in config:
continue
expected_type = signature.parameters[argument].annotation
if isinstance(expected_type, type) and issubclass(expected_type, Registrable):
config[argument], expanded_config[argument] = registry.__getattribute__(
f"get_{expected_type.type_name()}"
)(config[argument], f"{prefix}{argument}")
if isinstance(expected_type, _GenericAlias):
origin = expected_type.__origin__
args = expected_type.__args__
if (
(origin == List or origin == list)
and len(args) == 1
and isinstance(args[0], type)
and issubclass(args[0], Registrable)
and isinstance(config[argument], Sequence)
):
objs = []
expanded_config[argument] = []
for idx, item in enumerate(config[argument]):
obj, obj_config = registry.__getattribute__(
f"get_{args[0].type_name()}"
)(item, f"{prefix}{argument}.{idx}")
objs.append(obj)
expanded_config[argument].append(obj_config)
config[argument] = objs
elif (
origin == dict
and len(args) == 2
and isinstance(args[1], type)
and issubclass(args[1], Registrable)
and isinstance(config[argument], Mapping)
):
objs = {}
expanded_config[argument] = {}
for key, val in config[argument].items():
obj, obj_config = registry.__getattribute__(
f"get_{args[1].type_name()}"
)(val, f"{prefix}{argument}.{key}")
objs[key] = obj
expanded_config[argument][key] = obj_config
config[argument] = objs
return config, expanded_config
def get_callable_parsed_args(callable, prefix=None):
"""Helper function that extracts the command line arguments for a given function.
Args:
callable (callable): function whose arguments will be inspected to extract
arguments from the command line.
prefix (str): Prefix that is attached to the argument names when looking for
command line arguments.
"""
signature = inspect.signature(callable)
arguments = {
argument: signature.parameters[argument]
for argument in signature.parameters
if argument != "self"
}
return get_parsed_args(arguments, prefix)
def get_parsed_args(arguments, prefix=None):
"""Helper function that takes a dictionary mapping argument names to types, and
extracts command line arguments for those arguments. If the dictionary contains
a key-value pair "bar": int, and the prefix passed is "foo", this function will
look for a command line argument "\-\-foo.bar". If present, it will cast it to an
int.
If the type for a given argument is not one of `int`, `float`, `str`, or `bool`,
it simply loads the string into python using a yaml loader.
Args:
arguments (dict[str, type]): dictionary mapping argument names to types
prefix (str): prefix that is attached to each argument name before searching
for command line arguments.
"""
prefix = "" if prefix is None else f"{prefix}."
parser = argparse.ArgumentParser()
for argument in arguments:
parser.add_argument(f"--{prefix}{argument}")
parsed_args, _ = parser.parse_known_args()
parsed_args = vars(parsed_args)
# Strip the prefix from the parsed arguments and remove arguments not present
parsed_args = {
(key[len(prefix) :] if key.startswith(prefix) else key): parsed_args[key]
for key in parsed_args
if parsed_args[key] is not None
}
for argument in parsed_args:
expected_type = arguments[argument]
if isinstance(expected_type, inspect.Parameter):
expected_type = expected_type.annotation
if expected_type in [int, str, float]:
parsed_args[argument] = expected_type(parsed_args[argument])
elif expected_type is bool:
value = str(parsed_args[argument]).lower()
parsed_args[argument] = not ("false".startswith(value) or value == "0")
else:
parsed_args[argument] = yaml.safe_load(parsed_args[argument])
return parsed_args
registry = Registry()
| 2.890625
| 3
|
linuxmachinebeta/review/signals.py
|
linux-machine/linuxmachinebeta
| 0
|
12776041
|
from django.dispatch import receiver
from django.db.models.signals import post_delete
from linuxmachinebeta.review.models import ServiceReview
@receiver(post_delete, sender=ServiceReview)
def update_rating_after_delete(sender, instance, **kwargs):
instance.service.update_rating()
| 1.648438
| 2
|
users/admin.py
|
mixnix/subject_rate
| 0
|
12776042
|
# users/admin.py
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import ReviewsUserCreationForm, ReviewsUserChangeForm
from .models import ReviewsUser
class ReviewUserAdmin(UserAdmin):
add_form = ReviewsUserCreationForm
form = ReviewsUserChangeForm
list_display = ['email', 'username', 'created_reviews']
model = ReviewsUser
admin.site.register(ReviewsUser, ReviewUserAdmin)
| 1.648438
| 2
|
air_pollution_death_rate_related/interactive_map/interactive_map.py
|
nghitrampham/air_pollution_death_rate_related
| 0
|
12776043
|
<gh_stars>0
'''
Interactive map of repiratory deaths and air pollution across U.S. counties
Note: naming conventions confirmed by pylint
'''
import json
from urllib.request import urlopen
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from plotly.callbacks import Points, InputDeviceState
import plotly.graph_objects as go
import load_data
POINTS, STATE = Points(), InputDeviceState()
def county(counties, fip_curr):
'''
Determine county name
Input: county information and fip ID
Output: county name
'''
# find county name associated with fip
for name in range(0, len(counties['features'])):
if fip_curr == counties['features'][name]['id']:
county_name = counties['features'][name]['properties']['NAME']
else:
pass
return county_name
# Load in county geographic data
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
COUNTIES = json.load(response)
# load all datasets
# predicted aqi
DF_ALL_AQI = load_data.load_predicted_aqi(COUNTIES)
# correlation between respiratory deaths and air pollution
DF_CORR_CORRECTED = load_data.load_correlation(COUNTIES)
# calculate mean correlation
DF_MEAN_CORR = load_data.calc_mean_corr(COUNTIES, DF_CORR_CORRECTED)
# respiratory death rate
DF_DEATH_RATE = load_data.load_deathrate(COUNTIES)
# air pollution
DF_AIR_CORRECTED = load_data.load_air_pollution(COUNTIES)
# choose an initial county
FIP = '25025'
# filter data by that county
DF_CORR_COUNTY = DF_CORR_CORRECTED[DF_CORR_CORRECTED['fips'] == FIP]
# correlation figure
FIG_CORR = go.Figure(data=go.Scatter(x=DF_CORR_COUNTY['year'], y=DF_CORR_COUNTY['correlation'], mode='lines+markers', name='lines+markers'))
# find county name associated with fip
COUNTY_NAME = county(COUNTIES, FIP)
TITLE_NAME = 'Correlation between respiratory deaths and air pollution, County: ' + COUNTY_NAME
FIG_CORR.update_layout(title=TITLE_NAME, xaxis_title='Year', yaxis_title='Correlation')
FIG_CORR.update_yaxes(range=[-1, 1])
# respiratory deaths figure
COUNTY_DEATH_RATE = []
COUNTY_YEAR = []
for ID in range(0, len(DF_DEATH_RATE['fips'])):
if FIP == DF_DEATH_RATE['fips'][ID]:
COUNTY_DEATH_RATE.append(DF_DEATH_RATE['death rate'][ID])
COUNTY_YEAR.append(DF_DEATH_RATE['year'][ID])
else:
pass
DF_ADJUSTED_DEATH_RATE = pd.DataFrame({'year': COUNTY_YEAR, 'death rate': COUNTY_DEATH_RATE})
DF_SORTED_DEATH_RATE = DF_ADJUSTED_DEATH_RATE.sort_values(by=['year'])
TITLE_NAME = 'Respiratory Deaths, County: ' + COUNTY_NAME
FIG_DEATHRATE = go.Figure(data=go.Scatter(x=DF_SORTED_DEATH_RATE['year'], y=DF_SORTED_DEATH_RATE['death rate'], mode='lines+markers', name='lines+markers'))
FIG_DEATHRATE.update_layout(title=TITLE_NAME, xaxis_title='Year', yaxis_title='% of Total Deaths')
# air pollution figure
DF_COUNTY_AIR_POLLUTION = DF_AIR_CORRECTED[DF_AIR_CORRECTED['fips'] == FIP]
# create the air pollution figure
FIG_AIR_POLLUTION = go.Figure(data=go.Scatter(x=DF_COUNTY_AIR_POLLUTION['year'], y=DF_COUNTY_AIR_POLLUTION['AQI'], mode='lines+markers', name='lines+markers'))
TITLE_NAME = 'Air Pollution, County: ' + COUNTY_NAME
FIG_AIR_POLLUTION.update_layout(title=TITLE_NAME, xaxis_title='Year', yaxis_title='Air Quality Index')
# Display mean correlation across U.S. counties
F = go.FigureWidget([go.Choroplethmapbox(geojson=COUNTIES, locations=DF_MEAN_CORR['fips'], z=DF_MEAN_CORR['mean_correlation'], colorscale="Reds", zmin=min(DF_MEAN_CORR['mean_correlation']), zmax=max(DF_MEAN_CORR['mean_correlation']), marker_opacity=0.5, marker_line_width=0, colorbar_title="Mean Correlation")])
F.update_layout(mapbox_style="carto-positron", mapbox_zoom=3, mapbox_center={"lat": 37.0902, "lon": -95.7129}, title_text='Correlation Between Air Pollution and Respiratory Deaths', geo_scope='usa')
# Interactive interface global parameters
STYLES = {'pre': {'border': 'thin lightgrey solid', 'overflowX': 'scroll'}}
EXTERNAL_STYLESHEETS = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
APP = dash.Dash(__name__, external_stylesheets=EXTERNAL_STYLESHEETS)
COLORS = {'background': '#111111', 'text': '#ffffff'}
# Base Text for initialized county for predicted aqi
# Get the AQI for that county
for ID in range(0, len(DF_ALL_AQI['fips'])):
if FIP == DF_ALL_AQI['fips'][ID]:
AQI_VAL = str(DF_ALL_AQI['aqi'][ID])
else:
pass
UPDATED_TEXT = '''
#### Predicted Air Quality Index (AQI)
###### A Keras Sequential model predicted the air quaity index for one day into the future given the prior air pollution and respiratory deaths.
###### AQI Ranges: (0-50: Good, 51-100: Moderate, 101-150: Unhealthy for sensitive groups, 151-200: Unhealthy, 201-300: Very unhealthy, 301-500: Hazardous)
County: ''' + COUNTY_NAME +'''
\n Predicted AQI: ''' + AQI_VAL +'''
\n Predicted Date: 03/19/2019 '''
APP_TEXT = '''
Instructions: locate a U.S. county by panning and zooming on the U.S. map that displays the average correlation between respiratory deaths and air pollution in the upper left. Note that counties that did not have data available are not included in the map.
Once a county is located, click on that county to display 1) the correlation between respiratory deaths and air pollution since 2000 (upper right), 2) the predicted air quality index given the prior respiratory deaths and air pollution (text under interactive map),
3) the air quality index (measure of air pollution) since 2000 (lower left), and 4) the percent of respiratory deaths since 2000 (lower right) for that county.
'''
APP.layout = html.Div(style={'backgroundColor': COLORS['background']}, children=[
# Title of interactive interface
html.H1(
children='Correlation and predictive modeling of air pollution and respiratory deaths',
style={'textAlign': 'center', 'color': COLORS['text']}
),
# Sub heading describing the interface
html.Div(children=APP_TEXT, style={
'textAlign': 'center',
'color': COLORS['text'],
'font-size': 14
}),
# Interactive map of the mean correlation across U.S. counties
html.Div([
dcc.Graph(id='correlation map', figure=F)
], style={'width': '50%', 'display': 'inline-block', 'padding': '0 20', 'height':'200%'}),
# Time series of the correlation of a U.S. country
html.Div([
dcc.Graph(id='correlation', figure=FIG_CORR)
], style={'width': '50%', 'display': 'inline-block', 'padding': '0 20', 'height':'200%'}),
# Descriptive text of the predicted AQI
html.Div([
dcc.Markdown(id='predicted_aqi', children=UPDATED_TEXT)
], style={'width': '100%', 'display': 'inline-block', 'padding': '0 20', 'textAlign': 'center', 'color': COLORS['text']}),
# Time Series of the air pollution of a U.S. county
html.Div([
dcc.Graph(id='air_pollution', figure=FIG_AIR_POLLUTION),
], style={'width': '50%', 'display': 'inline-block', 'padding': '0 20'}),
# Time series of the respiratory death rate of a U.S. county
html.Div([
dcc.Graph(id='respiratory_death_rate', figure=FIG_DEATHRATE),
], style={'width': '50%', 'display': 'inline-block', 'padding': '0 20'})
])
# callback for updating air pollution graph
@APP.callback(dash.dependencies.Output('air_pollution', 'figure'), [dash.dependencies.Input('correlation map', 'clickData')])
def update_air_graph(click_data):
'''
Update air pollution graph upon clicking on a U.S. county
Input: FIP ID
Output: Updated county name and air pollution time series
'''
# clicked upon fip id
fip_curr = str(click_data['points'][0]['location'])
df_air = DF_AIR_CORRECTED[DF_AIR_CORRECTED['fips'] == fip_curr]
# create the air pollution figure
fig_air = go.Figure(data=go.Scatter(x=df_air['year'], y=df_air['AQI'], mode='lines+markers', name='lines+markers'))
county_name = county(COUNTIES, fip_curr)
title_name = 'Air Pollution, County: ' + county_name
fig_air.update_layout(title=title_name, xaxis_title='Year', yaxis_title='Air Quality Index')
return fig_air
# Update the predicted aqi for a county
@APP.callback(dash.dependencies.Output('predicted_aqi', 'children'), [dash.dependencies.Input('correlation map', 'clickData')])
def update_aqi_graph(click_data):
'''
Update predicted AQI text
Input: FIP ID
Output: Updated county name and predicted AQI value
'''
# clicked upon fip id
fip_curr = str(click_data['points'][0]['location'])
# get the county name
county_name = county(COUNTIES, fip_curr)
# Get the AQI for that county
for curr_id in range(0, len(DF_ALL_AQI['fips'])):
if fip_curr == DF_ALL_AQI['fips'][curr_id]:
aqi_val = str(DF_ALL_AQI['aqi'][curr_id])
else:
pass
txt = '''
#### Predicted Air Quality Index (AQI)
###### A Keras Sequential model predicted the air quality index for one day into the future given the prior air pollution and respiratory deaths.
###### AQI Ranges: (0-50: Good, 51-100: Moderate, 101-150: Unhealthy for sensitive groups, 151-200: Unhealthy, 201-300: Very unhealthy, 301-500: Hazardous)
County: ''' + county_name +'''
\n Predicted AQI: ''' + aqi_val +'''
\n Predicted Date: 03/19/2019 '''
return txt
# call back for updating the correlation graph
@APP.callback(dash.dependencies.Output('correlation', 'figure'), [dash.dependencies.Input('correlation map', 'clickData')])
def update_corr_graph(click_data):
'''
Update time series correlation graph
Input: FIP ID
Output: Updated county name and correlation
'''
# clicked upon fip id
fip_curr = str(click_data['points'][0]['location'])
# filter data by that county
# filter data by that county
df_corr = DF_CORR_CORRECTED[DF_CORR_CORRECTED['fips'] == fip_curr]
fig_corr = go.Figure(data=go.Scatter(x=df_corr['year'], y=df_corr['correlation'], mode='lines+markers', name='lines+markers'))
county_name = county(COUNTIES, fip_curr)
title_name = 'Correlation between respiratory deaths and air pollution, County: ' + county_name
fig_corr.update_layout(title=title_name, xaxis_title='Year', yaxis_title='Correlation')
fig_corr.update_yaxes(range=[-1, 1])
return fig_corr
# call back for updating the death rate graph
@APP.callback(dash.dependencies.Output('respiratory_death_rate', 'figure'), [dash.dependencies.Input('correlation map', 'clickData')])
def update_death_graph(click_data):
'''
Update time series of respiratory death rates graph
Input: FIP ID
Output: Updated county name and percent deaths time series
'''
# clicked upon fip id
fip_curr = str(click_data['points'][0]['location'])
# Use a fip code to filter the data by county
death_rate = []
county_year = []
for curr_id in range(0, len(DF_DEATH_RATE['fips'])):
if fip_curr == DF_DEATH_RATE['fips'][curr_id]:
death_rate.append(DF_DEATH_RATE['death rate'][curr_id])
county_year.append(DF_DEATH_RATE['year'][curr_id])
else:
pass
adjusted_dr = pd.DataFrame({'year': county_year, 'death rate': death_rate})
sorted_dr = adjusted_dr.sort_values(by=['year'])
fig_dr = go.Figure(data=go.Scatter(x=sorted_dr['year'], y=sorted_dr['death rate'], mode='lines+markers', name='lines+markers'))
county_name = county(COUNTIES, fip_curr)
title_name = 'Respiratory Deaths, County: ' + county_name
fig_dr.update_layout(title=title_name, xaxis_title='Year', yaxis_title='% of Total Deaths')
return fig_dr
if __name__ == '__main__':
APP.run_server(debug=False)
| 2.859375
| 3
|
movie/app/models.py
|
zhangzhibo123/flask---movie
| 0
|
12776044
|
<filename>movie/app/models.py
# -*- coding:utf-8 -*-
from datetime import datetime
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
__author__ = 'zhangzhibo'
__date__ = '202018/5/18 10:36'
from app import db
class UserInfo(db.Model):
tablename__ = "userinfo"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), unique=True)
email = db.Column(db.String(50), unique=True)
address = db.Column(db.String(100))
def __init__(self, username, email,address):
self.username = username
self.email = email
self.address = address
def __repr__(self):
return '<User %r>' % self.username
#会员
class User(db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True) #编号
name = db.Column(db.String(100), unique=True)
pwd = db.Column(db.String(100))
email = db.Column(db.String(100), unique=True)
phone = db.Column(db.String(11), unique=True)
info = db.Column(db.Text)
face = db.Column(db.String(255))
addtime = db.Column(db.DateTime, index=True, default=datetime.now)
def __repr__(self):
return "<User %r>" % self.name
#验证密码,采用hash256加密算法保存密码
def check_pwd(self, pwd):
from werkzeug.security import check_password_hash
return check_password_hash(self.pwd, pwd)
#管理员
class Admin(db.Model):
tablename__ = "admin"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
pwd = db.Column(db.String(100))
is_super = db.Column(db.SmallInteger)
addtime = db.Column(db.DateTime, index=True, default=datetime.now)
def __repr__(self):
return "<User %r>" % self.name
def check_pwd(self, pwd):
from werkzeug.security import check_password_hash
return check_password_hash(self.pwd, pwd)
class Tag(db.Model):
__tablename__ = "tag"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
addtime = db.Column(db.DateTime, index=True, default=datetime.now)
def __repr__(self):
return "<Tag %r>" % self.name
if __name__ == '__main__':
db.create_all()
| 2.75
| 3
|
pjproject_android/pjsip-apps/src/python/samples/presence.py
|
WachterJud/qaul.net_legacy
| 4
|
12776045
|
<filename>pjproject_android/pjsip-apps/src/python/samples/presence.py
# $Id: presence.py 2171 2008-07-24 09:01:33Z bennylp $
#
# Presence and instant messaging
#
# Copyright (C) 2003-2008 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
import pjsua as pj
LOG_LEVEL = 3
pending_pres = None
pending_uri = None
def log_cb(level, str, len):
print str,
class MyAccountCallback(pj.AccountCallback):
def __init__(self, account=None):
pj.AccountCallback.__init__(self, account)
def on_incoming_subscribe(self, buddy, from_uri, contact_uri, pres):
global pending_pres, pending_uri
# Allow buddy to subscribe to our presence
if buddy:
return (200, None)
print 'Incoming SUBSCRIBE request from', from_uri
print 'Press "A" to accept and add, "R" to reject the request'
pending_pres = pres
pending_uri = from_uri
return (202, None)
class MyBuddyCallback(pj.BuddyCallback):
def __init__(self, buddy=None):
pj.BuddyCallback.__init__(self, buddy)
def on_state(self):
print "Buddy", self.buddy.info().uri, "is",
print self.buddy.info().online_text
def on_pager(self, mime_type, body):
print "Instant message from", self.buddy.info().uri,
print "(", mime_type, "):"
print body
def on_pager_status(self, body, im_id, code, reason):
if code >= 300:
print "Message delivery failed for message",
print body, "to", self.buddy.info().uri, ":", reason
def on_typing(self, is_typing):
if is_typing:
print self.buddy.info().uri, "is typing"
else:
print self.buddy.info().uri, "stops typing"
lib = pj.Lib()
try:
# Init library with default config and some customized
# logging config.
lib.init(log_cfg = pj.LogConfig(level=LOG_LEVEL, callback=log_cb))
# Create UDP transport which listens to any available port
transport = lib.create_transport(pj.TransportType.UDP,
pj.TransportConfig(0))
print "\nListening on", transport.info().host,
print "port", transport.info().port, "\n"
# Start the library
lib.start()
# Create local account
acc = lib.create_account_for_transport(transport, cb=MyAccountCallback())
acc.set_basic_status(True)
my_sip_uri = "sip:" + transport.info().host + \
":" + str(transport.info().port)
buddy = None
# Menu loop
while True:
print "My SIP URI is", my_sip_uri
print "Menu: a=add buddy, d=delete buddy, t=toggle", \
" online status, i=send IM, q=quit"
input = sys.stdin.readline().rstrip("\r\n")
if input == "a":
# Add buddy
print "Enter buddy URI: ",
input = sys.stdin.readline().rstrip("\r\n")
if input == "":
continue
buddy = acc.add_buddy(input, cb=MyBuddyCallback())
buddy.subscribe()
elif input == "t":
acc.set_basic_status(not acc.info().online_status)
elif input == "i":
if not buddy:
print "Add buddy first"
continue
buddy.send_typing_ind(True)
print "Type the message: ",
input = sys.stdin.readline().rstrip("\r\n")
if input == "":
buddy.send_typing_ind(False)
continue
buddy.send_pager(input)
elif input == "d":
if buddy:
buddy.delete()
buddy = None
else:
print 'No buddy was added'
elif input == "A":
if pending_pres:
acc.pres_notify(pending_pres, pj.SubscriptionState.ACTIVE)
buddy = acc.add_buddy(pending_uri, cb=MyBuddyCallback())
buddy.subscribe()
pending_pres = None
pending_uri = None
else:
print "No pending request"
elif input == "R":
if pending_pres:
acc.pres_notify(pending_pres, pj.SubscriptionState.TERMINATED,
"rejected")
pending_pres = None
pending_uri = None
else:
print "No pending request"
elif input == "q":
break
# Shutdown the library
acc.delete()
acc = None
if pending_pres:
acc.pres_notify(pending_pres, pj.SubscriptionState.TERMINATED,
"rejected")
transport = None
lib.destroy()
lib = None
except pj.Error, e:
print "Exception: " + str(e)
lib.destroy()
lib = None
| 2.28125
| 2
|
plot.py
|
thepushkarp/Analyze-IMDB-Top-250
| 4
|
12776046
|
<reponame>thepushkarp/Analyze-IMDB-Top-250
import os
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
print('Reading data from csv...')
# Read data from csv file
movieRatings = pd.read_csv('movieRatings.csv', header = 0)
# Add decade column to Movie Ratings DataFrame
movieRatings['Decade'] = ((movieRatings['Year']//10).astype(int)*10)
# Number of movies releasd and average IMDB Ratings segregated by decade
moviesByDecade = pd.DataFrame({
'Decade' : movieRatings['Decade'].value_counts().index,
'Movies' : movieRatings['Decade'].value_counts(),
'Average IMDB Rating' : None
}).sort_values('Decade').reset_index(drop = True)
# Calculate Average IMDB Ratings
for i in range(len(moviesByDecade)):
decadeFilter = movieRatings['Decade'] == moviesByDecade.iloc[i, 0]
filteredMovies = movieRatings[decadeFilter]
moviesByDecade.iloc[i, 2] = filteredMovies['IMDB Rating'].mean()
# Set of unique genres
genreList = set()
# List of genre for each movie
genres = []
# Add genres to genreList
for i in range(len(movieRatings)):
genre = movieRatings.iloc[i, 2].strip('[]').split(', ')
genre = [genreName.strip('\'') for genreName in genre]
genres.append(genre)
genreList.update(set(genre))
# Change Genre Column to list from string
movieRatings['Genre'] = pd.Series(genres)
# Number of movies segregated by Genre
moviesByGenre = pd.DataFrame({
'Genres' : list(genreList),
'Movies' : 0
})
# Add number of movies for each genre
for i in range(len(moviesByGenre)):
for j in range(len(movieRatings)):
if moviesByGenre.iloc[i, 0] in movieRatings.iloc[j, 2]:
moviesByGenre.iloc[i, 1]+=1
print('Ploting graphs of the data...')
# Plot movies by genre
moviesByGenre.plot(x = 'Genres', y = 'Movies', kind = 'bar', figsize=(17, 17), \
fontsize = 15)
plt.title('Number of movies in IMDB Top 250 by genre', fontsize=30)
plt.legend(['Number of movies'], fontsize = 15)
plt.xlabel('Genres', fontsize=20)
plt.ylabel('Number of movies', fontsize=20)
plt.savefig(os.path.join('plots', 'genre.png'))
# Plot movies by decade
moviesByDecade.plot(x = 'Decade', y = 'Movies', kind = 'bar', figsize=(17, 17), \
fontsize = 15)
plt.title('Number of movies in IMDB Top 250 by decade', fontsize=30)
plt.legend(['Number of movies'], fontsize = 15)
plt.xlabel('Decade', fontsize=20)
plt.ylabel('Number of movies', fontsize=20)
plt.savefig(os.path.join('plots', 'decade.png'))
# Plot average IMDB rating
moviesByDecade.plot(x = 'Decade', y = 'Average IMDB Rating', kind = 'line', \
figsize=(17, 17), fontsize = 15)
plt.title('Average IMDB ratings of IMDB Top 250 movies by decade', fontsize=30)
plt.legend(['Average IMDB rating'], fontsize = 15)
plt.xlabel('Decade', fontsize=20)
plt.ylabel('IMDB rating', fontsize=20)
plt.savefig(os.path.join('plots', 'average.png'))
print('Plots saved in \'plots\' folder')
| 3.359375
| 3
|
module/utils.py
|
XuanMaoSecLab/shockwave
| 8
|
12776047
|
import os
import itertools
from itertools import product
# get_files(loadrules,["[F.3]","[A.1]"],[".yaml"])
def get_files(_path, _startwith=None, _endwith=None):
'''
get all files
:param _startwith : ["str1","str2"]
:param _endwith : [".sol",".py"]
'''
if not _startwith: _startwith = [""]
if type(_startwith) is str :
if os.path.isfile(_startwith):
return [_startwith]
else:
_startwith = [_startwith]
if not _endwith: _endwith = [""]
if type(_endwith) is str : _endwith = [_endwith]
all_files = []
def checkwith(_fp,_fn):
for x,y in list(product(_startwith, _endwith)):
if _fn.startswith(x) and _fn.endswith(y):
path_name = os.path.join(_fp,_fn)
all_files.append(path_name)
for fpath, dirname, fnames in os.walk(_path):
for filename in fnames:
checkwith(fpath,filename)
return all_files
def filestartwith(_file):
_startwith = []
if '.' in _file:
for t in _file.split(","):
_startwith.append("[{0}]".format(t))
else:
_startwith = [""]
return _startwith
def filenamewith(_file):
_startwith = []
if _file:
for t in _file.split(","):
_startwith.append(t)
else:
_startwith = [""]
return _startwith
| 2.9375
| 3
|
freqtools/freq_models.py
|
bleykauf/freqtools
| 0
|
12776048
|
"""Submodule containing frequency-based models."""
from freqtools.freq_data import OscillatorNoise
import numpy as np
import matplotlib.pyplot as plt
class FreqModel:
"""
Base class for frequency based models, i.e. values (y axis) as a function of
frequency (x axis). Its functionality is purposfully kept simple and its main
purpose is to implement basic behaviour.
Parameters
----------
*args :
Placeholder, not used. The respective subclasses have to implement behaviour of
positional
arguments
**kwargs :
All keyworded arguments are added as attribues.
"""
def __init__(self, *args, **kwargs):
del args
for key, value in kwargs.items():
setattr(self, key, value)
def values(self, freqs):
raise NotImplementedError("Subclasses have to implement this method.")
def plot(self, freqs, ax=None, xscale="log", yscale="log", ylabel=""):
"""
Plot the model.
Parameters
----------
ax : Axis (optional)
If axis is provided, they will be used for the plot. if not provided, a new
plot will automatically be created.
xscale : {"log" or "linear"}
Scaling of the x axis.
yscale : {"log" or "linear"}
Scaling for the y axis.
ylabel : str
Label for the y axis.
Returns
-------
fig, ax : Figure, Axis
The Figure and Axis handles of the plot that was used.
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
ax.plot(freqs, self.values(freqs), label=self.label)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.set_ylabel(ylabel)
ax.set_xlabel("Frequency / Hz")
plt.grid(True, which="both", ls="-")
return fig, ax
class OscillatorNoiseModel(FreqModel):
"""
A base class holding models of spectral densities of oscillator noise, i.e.
frequency or phase noise. Its main purpose is to make it easy to convert between
ASD(f), PSD(f) and L(f) in terms of both frequency and phase noise. The data is
provided in one of these representations and makes all other representations
available.
Parameters
----------
*args :
Placeholder, not used. The respective subclasses have to implement behaviour of
positional arguments
n_sided : 1 (optional)
placeholder, for now only one-sided distributions are supported.
label : str
Optional label used for plotting.
**kwargs :
All keyworded arguments are added as attribues.
Attributes
----------
n_sided
label : str
Optional label used for plotting
representation
unit
ylabel
"""
def __init__(self, n_sided=1, label="", representation=None, **kwargs):
_allowed_representations = [
"asd_freq",
"asd_phase",
"psd_freq",
"psd_phase",
"script_L",
]
super().__init__(
label=label,
n_sided=n_sided,
_allowed_representations=list(_allowed_representations),
representation=representation,
**kwargs
)
self._unit_dict = {
"asd_freq": "Hz/$\\sqrt{\\mathrm{Hz}}$",
"asd_phase": "$\\mathrm{rad}/\\sqrt{\\mathrm{Hz}}$",
"psd_freq": "Hz${}^2$/Hz",
"psd_phase": "rad${}^2$/Hz",
"script_L": "dBc/Hz",
}
self._ylabel_dict = {
"asd_freq": "{}-sided ASD",
"asd_phase": "{}-sided ASD",
"psd_freq": "{}-sided PSD",
"psd_phase": "{}-sided PSD",
"script_L": "L(f)",
}
@property
def ylabel(self):
"""y axis label used for plotting; doesn't contain the unit.""" # noqa: D403
return self._ylabel_dict[self.representation].format(self.n_sided)
@property
def unit(self):
"""String containing the unit of `values`"""
return self._unit_dict[self.representation]
@property
def representation(self):
"""The representation of `values`."""
return self._representation
@representation.setter
def representation(self, representation):
assert (
representation in self._allowed_representations
), "representation must be one of {}".format(self._allowed_representations)
self._representation = representation
@property
def n_sided(self):
"""Currently only one-sided distribtuions are supported."""
return self._n_sided
@n_sided.setter
def n_sided(self, new_n):
# FIXME: support for two-sided distributions.
assert new_n == 1, "Only 1-sided distributions are supported as of yet."
self._n_sided = new_n
def values(self, freqs):
"""
Array containing the values of the spectral density model. Maps to one
representation, depending on `representation` attribute.
"""
method = getattr(self, self.representation)
return method(freqs)
def asd_freq(self, freqs):
"""
Amplitude spectral density of the frequency noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
return np.array(freqs) * self.asd_phase(freqs)
def asd_phase(self, freqs):
"""
Amplitude spectral density of the phase noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
return np.sqrt(self.psd_phase(freqs))
def psd_freq(self, freqs):
"""
Power spectral density of the frequency noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
return self.asd_freq(freqs) ** 2
def psd_phase(self, freqs):
"""
Power spectral density of the phase noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
# psd_phase can either be derived from psd_freq or script_L
try:
# convert to linear scale, factor 1/10 in exponent because dBc are used
psd_phase = 10 ** (self.script_L(freqs) / 10)
if self.n_sided == 1:
# one-sided distributions have a factor 2, see Table A1 in [1]
psd_phase *= 2
except AttributeError:
psd_phase = self.psd_freq(freqs) / np.array(freqs) ** 2
return psd_phase
def script_L(self, freqs):
"""
The phase noise L(f) (pronounced "script ell of f").
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
# see Table A.1 in [1] for the conversion from S_phi(f) and L(f)
L = self.psd_phase(freqs)
if self.n_sided == 1:
L /= 2
L = 10 * np.log10(L) # convert to dBc/Hz
return L
def plot(self, freqs, ax=None, xscale="log", yscale="log", ylabel=""):
"""
Plot the spectral density model.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
ax : matplotlib.axes.Axes (optional)
The axes to plot on. If not given, a new figure is created.
xscale : str {"log", "linear"} (optional)
The scale of the x-axis.
yscale : str {"log", "linear"} (optional)
The scale of the y-axis.
ylabel : str (optional)
The label of the y-axis.
"""
if not ylabel:
# automatically create ylabel
ylabel = self.ylabel + " / " + self.unit
fig, ax = super().plot(
freqs, ax=ax, xscale=xscale, yscale=yscale, ylabel=ylabel
)
if not self.representation == "script_L":
ax.set_yscale("log")
return fig, ax
def to_oscillator_noise(self, freqs):
"""
Convert the noise model to a `OscillatorNoise` object.
Parameters
----------
freqs : 1d-array
The Fourier frequencies in Hz.
Returns
-------
oscillator_noise : OscillatorNoise
The model represented as an `OscillatorNoise` object.
"""
oscillator_noise = OscillatorNoise(
freqs,
self.values(freqs),
representation=self.representation,
n_sided=self.n_sided,
divide_by=1,
)
return oscillator_noise
class PowerLawNoise(OscillatorNoiseModel):
r"""
Power law phase and frequency noise models [1] for common noise types:
.. math:: S_\phi = b_{i} \cdot f^{i}
or
.. math:: S_\phi = d_{i} \cdot f^{i}
Parameters
----------
coeff : float or list of floats
Coefficient b_i (for phase noise) or d_i (for frequency noise), cp. [1]. Has to
b a list if `edge_freqs` is set.
exponent : int or list of ints
The coefficient of the power law noise. The noise type depends on the `base`
for a given exponent, cp. [1]. Has to be a list if `edge_freqs` is set.
edge_freqs : list of floats (optional)
Allows to construct composite models that have different noise types for
different frequency ranges. In this case, `coeff` and `exponent` have to be
lists of length `len(edge_freqs) + 1`. The edge frequencies are the frequencies
where the noise type changes.
Allowed coefficients for phase noise:
- -4 : random walk frequency
- -3 : flicker frequency
- -2 : white frequency
- -1 : flicker phase
- 0 : white phase
Allowed coefficients for frequency noise:
- -2 : random walk frequency
- -1 : flicker frequency
- 0 : white frequency
- 1 : flicker phase
- 2 : white phase
base : {'phase', 'freq'}:
determines whether the exponent and coefficient is given in terms of phase or
frequency.
References
----------
[1] <NAME> - Enrico's Chart of Phase Noise and Two-Sample Variances
(http://rubiola.org/pdf-static/Enrico%27s-chart-EFTS.pdf)
"""
def __init__(
self,
coeff=1,
exponent=0,
base="phase",
representation="psd_phase",
edge_freqs=None,
):
assert base in ["phase", "freq"]
if base == "freq":
# express everything in terms of psd_phase
if type(exponent) == list:
exponent = np.array(exponent)
exponent = exponent - 2
_label_dict = {
-4: "random walk frequency",
-3: "flicker frequency",
-2: "white frequency",
-1: "flicker phase",
0: "white phase",
}
try:
label = _label_dict[exponent] + " noise"
except (KeyError, TypeError):
label = "noise model"
super().__init__(
coeff=coeff, exponent=exponent, label=label, representation=representation
)
if edge_freqs:
self.edge_freqs = list(edge_freqs)
self.edge_freqs.append(np.inf)
def psd_phase(self, freqs):
"""
Power spectral density of the phase noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The power spectral density of the phase noise.
"""
# Implement PSD of phase, all other representations can be calculated by virtue
# of subclassing OscillatorNoiseModel.
# FIXME: Improve the cases
if type(self.coeff) == list:
previous_f_edge = 0
freqs = np.array(freqs)
values = []
for f_edge, coeff, exp in zip(self.edge_freqs, self.coeff, self.exponent):
idx = np.where(np.logical_and(freqs > previous_f_edge, freqs <= f_edge))
new_vals = coeff * freqs[idx] ** exp
values.append(new_vals)
previous_f_edge = f_edge
# flatten the list of lists
values = [item for sublist in values for item in sublist]
if len(values) < len(freqs):
# add the last value
values.append(coeff * freqs[-1] ** exp)
values = np.array(values)
else:
values = self.coeff * freqs**self.exponent
return values
class JohnsonNoise(OscillatorNoiseModel):
"""
Johnson Noise model.
Parameters
----------
signal_power : float
Carrier signal power in dBm / Hz
temperature : float (default 300.)
Temperature in kelvin
Attributes
----------
signal_power : float
temperature : float
References
----------
[1] Wikipedia: Johnson–Nyquist noise
(https://en.wikipedia.org/wiki/Johnson%E2%80%93Nyquist_noise)
"""
def __init__(
self,
signal_power,
temperature=300.0,
label="Johnson Noise",
representation=None,
):
super().__init__(temperature=temperature, label=label, n_sided=1)
self.signal_power = signal_power
def script_L(self, freqs):
"""
Calculate the script_L representation of the Johnson noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The script_L representation of the Johnson noise.
"""
# Implement L(f), all other representations can be calculated by virtue of
# subclassing OscillatorNoiseModel.
kb = 1.380649e-23 # Boltzmann constant in J/K
freqs = np.ones(len(freqs))
# 1e-3 because normalized to mW, normalized to signal power, length of freqds
noise = (
10 * np.log10(4 * kb * self.temperature / 1e-3) * freqs - self.signal_power
)
# subtract 3 dB since above quantity is defined as one-sided according to [1]
noise -= 3
return noise
class PhotonShotNoise(OscillatorNoiseModel):
"""
Shot noise of an optical beatnote
Parameters
----------
signal_power : float
Signal power in dBm / Hz
radiant_sensitivity : float (default 0.3)
Radiant sensitivity of the photodiode in A/W. Default taken for Hamamatsu G4176.
optical_power : float (default 1e-3)
optical power in W
resisitivity : float (default 50)
resistivity in Ohm.
"""
def __init__(
self,
signal_power,
optical_power=1e-3,
radiant_sensitivity=0.3,
representation=None,
resistivity=50,
label="Photon shot noise",
):
super().__init__(
radiant_sensitivity=radiant_sensitivity,
resistivity=resistivity,
label=label,
optical_power=optical_power,
n_sided=1,
)
self.signal_power = signal_power
def script_L(self, freqs):
"""
Calculate the script_L representation of the Johnson noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The script_L representation of the photon shot noise.
"""
e = 1.6e-19 # electron charge in C
freqs = np.ones(len(freqs))
noise = (
10
* np.log10(
2
* e
* self.radiant_sensitivity
* self.optical_power
* self.resistivity
/ 1e-3
)
* freqs
- self.signal_power
)
# FIXME: Assume above expression is a one-sided distribution, but didn't check.
noise -= 3
return noise
class NoiseFloor(OscillatorNoiseModel):
"""
Used for converting a spectrum analyzer measurement to oscilaltor noise model of the
noise floor by dividing the detection noise by the carrier signal ampliude.
Parameters
----------
signal_power : float
Signal power in dBm / Hz
noise_floor : float
measured noise floor in dBm / Hz
divide_by : int (optional)
dividy-by factor if prescaler was used for the measurements
Attributes
----------
signal_power : float
Signal power in dBm / Hz
noise_floor : float
measured noise floor in dBm / Hz
divide_by : int
dividy-by factor if prescaler was used for the measurements
"""
def __init__(
self,
signal_power,
noise_floor,
representation=None,
divide_by=1,
label="Detection noise",
):
super().__init__(label=label, divide_by=divide_by, n_sided=1)
self.signal_power = signal_power
self.noise_floor = noise_floor
def script_L(self, freqs):
"""
Calculate the script_L representation of the noise floor.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The script_L representation of the noise floor.
"""
freqs = np.ones(len(freqs))
noise = (
freqs * self.noise_floor + 20 * np.log10(self.divide_by) - self.signal_power
)
noise -= 3 # is measured as one-sided distribution
return noise
class BetaLine(OscillatorNoiseModel):
"""
The beta separation line as a function of frequency. It is originally defined for
the single-sided spectral density (in Hz²/Hz).
References
----------
[1] <NAME>., <NAME>., & <NAME>. (2010). Simple approach to the
relation between laser frequency noise and laser line shape.
Applied Optics, 49(25), 4801.
https://doi.org/10.1364/AO.49.004801
"""
def __init__(self, representation="psd_freq", **kwargs):
super().__init__(
representation=representation, label=r"$\beta$ separation line", **kwargs
)
def psd_freq(self, freqs):
"""
The values of the beta separation line in Hz²/Hz as a function of frequency
Parameters
----------
freqs : float or list_like
Frequency in Hz
Returns
-------
1d array :
The values of the beta separation line.
"""
return 8 * np.log(2) * np.array(freqs) / np.pi**2
def intersection(self, density, which="first"):
"""
Returns the freqeuncy where the PSD and the beta separation line intersect.
Parameters
----------
density : OscillatorNoise
A OscillatorNoise object. Correct representation (PSD of frequency) will
automatically be used.
which : {'first', 'last'}
if there are more intersections between beta separation line and PSD, this
argument determines whether the lowest (first, default) or highest (last)
intersection frequency should be returned.
Returns
-------
float :
the frequency where the two lines intersect in Hz
"""
psd_vals = density.psd_freq
beta_vals = self.values(density.freqs)
# indices of the intersections, i.e. where the sign of the difference between
# the PSD and the beta separation line switches.
idx = np.argwhere(np.diff(np.sign(psd_vals - beta_vals))).flatten()
first_or_last = {"first": 0, "last": -1}
if idx.size == 0: # array is empty
return np.inf
return density.freqs[idx][first_or_last[which]]
def linewidth(self, density, f_min=1e3, which="first"):
"""
The FWHM linewidth according to equation (10) in [1].
Parameters
----------
density : OscillatorNoise
A PhaseFreqNoise object. Correct scaling and base (PSD of frequency) will
automatically be used.
f_min : float
minimum values of the frequency that should be considered in Hz. The
default value for f_min (1e-3) corresponds to 1 ms.
which : {'first', 'last'}
if there are more intersections between beta separation line and PSD, this
argument determines whether the lowest (first, default) or highest (last)
intersection frequency should be returned.
"""
f_max = self.intersection(density, which=which)
idx = np.where(np.logical_and(density.freqs <= f_max, density.freqs >= f_min))
freqs = density.freqs[idx]
psd_vals_over_line = density.values[idx]
# equation (10) in [1]
area = np.trapz(psd_vals_over_line, x=freqs)
fwhm = np.sqrt(8 * np.log(2) * area) # equation (9) in [1]
return fwhm
class AtomShotNoise(FreqModel):
"""
Atomic shot noise of an atom interferometer gravimeter.
Parameters
----------
n_atoms : float
Number of atoms.
contrast : float
Peak-to-peak contrast of the fringe.
T : float
Interferometer time in seconds.
keff : float
Effective wavevector of the atom interferometer in 1/m.
"""
def __init__(self, n_atoms, contrast, T, keff, **kwargs):
super().__init__(n_atoms=n_atoms, contrast=contrast, T=-T, keff=keff, **kwargs)
def values(self, freqs):
"""Shot noise limit in m/s²."""
sigma_p = 1 / np.sqrt(self.n_atoms) # atomic shot noise
sigma_g = 2 * sigma_p / (self.contrast * self.keff * self.T**2) # in m/s**2
return sigma_g
| 3.1875
| 3
|
homeassistant/components/derivative/__init__.py
|
domwillcode/home-assistant
| 22,481
|
12776049
|
"""The derivative component."""
| 1.148438
| 1
|
desafios/Mundo 2/Ex043IMC.py
|
duartecgustavo/Python---Estudos-
| 6
|
12776050
|
<filename>desafios/Mundo 2/Ex043IMC.py
# Desafio 43 - Aula 12 : Programa que calcule o IMC e apresente a tabela:
# A/ Abaixo de 18.5 - ABAIXO DO PESO.
# B/ Entre 18.5 e 25 - PESO IDEAL.
# C/ De 25 até 30 - SOBREPESO.
# D/ 30 até 50 - OBESIDADE.
# E/ Acima de 40 - OBESIDADE MORBIDA.
# FORMULA - ALTURA² / PESO
print('='*50)
nome = 'IMC (INDICE DE MASSA CORPORAL) FREE'
print(f'{nome:^50}')
print('='*50)
altura = float(input('Me diga sua altura: (m)'))
peso = float(input('Agora me diga seu peso atual: (KG)'))
imc = peso/(altura**2)
if imc < 18.5:
print(f'Seu imc é {imc:.1f}!\nVocê está ABAIXO DO PESO ideal! Coma mais.')
elif 18.5 <= imc < 25:
print(f'Seu imc é {imc:.1f}!\nVocê está no PESO IDEAL. Continue assim!')
elif 25 < imc <= 30:
print(f'Seu imc é {imc:.1f}!\nVocê está com SOBREPESO! Se cuide e pratique exercicios.')
elif 30 < imc <= 50:
print(f'Seu imc é {imc:.1f}!\nVocê está com OBESIDADE, pratique exercicios, controle sua alimentação e se possivel consulte um médico.')
else:
print(f'Seu imc é {imc:.1f}!\nATENÇÃO! Procure um médico especialista para lhe orientar sobre seu peso. Sua saúde está em risco!')
| 3.4375
| 3
|