blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
317dad88744c1269052736adadbc85b1b3cbd13d | 27ff304d692664a492987904f195a58a342d5864 | /python/scripts/blast_checker.py | 63b24b0f557d0400194c36cbb2f21a8a56c57d2f | [] | no_license | grmwld/crg | b252387b01fc77fa31bae0c2b9824cd86f583e36 | 330382f7734e7af38e43f64594ed119eebad0cad | refs/heads/master | 2021-05-27T23:20:02.386205 | 2010-06-24T21:45:56 | 2010-06-24T21:45:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,568 | py | #! /usr/bin/python
import sys
import os
import optparse
import shutil
import logging
from cStringIO import StringIO
sys.path.append('/users/rg/agrimaldi/Code/python/python/lib/')
from AGBio.ncbi.BlastWrappers import *
from AGBio.Utilities import *
sys.path.append('/users/rg/mmariotti/libraries')
sys.path.append('/users/rg/mmariotti/scripts')
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--database_check',
dest='dbc',
help='location of the database that should be used for checking.',
metavar='DB')
parser.add_option('-i', '--input',
dest='fasta_query',
help='Full path to a .seq file inside a selenoprofiles output folder.',
metavar='FILE')
parser.add_option('-a', '--ncore',
dest='ncore',
type='int',
help='number of cores to use for the blast.',
metavar='INT')
parser.add_option('-b', '--blast_flavour',
dest='blast_flavour',
help='what kind of blast should be performed ?',
metavar='BLAST')
parser.add_option('-v', '--verbosity',
dest='verbosity', action='count',
help='set verbosity level')
parser.add_option('-T', '--temp',
dest='temp',
help='temporary folder.',
metavar='DIR')
parser.set_defaults(temp = '/home/agrimaldi/temp',
ncore = 1,
blast_flavour = 'blastp')
(opts, args) = parser.parse_args()
sp_res = opts.fasta_query
blast_output = os.path.join(opts.temp, 'tmpblast.xml')
blaster = BlastAllWrapper(sp_res, blast_output,
flavour=opts.blast_flavour,
db=opts.dbc, gis=True, ncore=opts.ncore)
print blaster.cline
blaster.run()
gis_file = os.path.join(opts.temp, 'tmpgis')
os.system("read_blast_xml -n 50 -i " + blast_output + " | grep S_ID | gawk -F'|' '{print $2}' > " + gis_file)
blast_res = os.path.join(opts.temp, 'blast_res.fasta')
os.system("fastacmd -i "+ gis_file +" -o "+ blast_res+ " -d " + opts.db)
blast_res_aln = os.path.join(opts.temp, 'blast_res.faln')
os.system("custom_align.py -i "+blast_res + " -o " + blast_res_aln + " -m tcoffee -r gi -a 4 -u U:C -t " + opts.temp)
if __name__ == '__main__':
main()
| [
"sopo.dev@gmail.com"
] | sopo.dev@gmail.com |
f4f6ff305e1272bdd3cbe21061c7609ed4be5b08 | 9cef360429d6a5c7eee4e223a132760a11d955dc | /ex1.py | bdfee7d05f3b168680bb2c66e53b3d7dac09e02d | [] | no_license | JIANG09/LearnPythonTheHardWay | 12df059ad02e3d413b0fb8701fb718ce8dcb25c7 | 6f5dc3554e6f02e74b227b89254fb47158d227f0 | refs/heads/master | 2021-07-10T19:00:05.313674 | 2018-12-11T14:11:07 | 2018-12-11T14:11:07 | 128,955,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | # coding=utf-8
print("Hello World!")
print("Hello Again")
print("I like typing this.")
print("This is fun.")
print('Yay!Printing!')
print("I'd much rather you 'not'")
print('I "said" do not touch this.')
print('希望爸爸胃口好心情好!') | [
"2907411054@qq.com"
] | 2907411054@qq.com |
a9d72cd94acdd3bc036f28fd6640c9053f57bcc8 | e366e330fcf6c4f35e337056f73e65fb38bf351a | /coordenadas.py | 077436ae877391917839124eec331bd88a8839a4 | [] | no_license | CBGO2/Mision-02 | 5df5b7c67df6f2d62a24211ded1c5be67415ba9e | d5fa407d7466d8b731044d34f01cfd26b0b14202 | refs/heads/master | 2020-03-26T22:50:08.595899 | 2018-08-22T03:52:31 | 2018-08-22T03:52:31 | 145,490,598 | 0 | 0 | null | 2018-08-21T01:42:40 | 2018-08-21T01:42:39 | null | UTF-8 | Python | false | false | 456 | py | # Autor: Carlos Badillo García, A01377618
# Descripcion: Usando las dos coordenadas dadas, descubrir la distancia entre ambas.
# Escribe tu programa después de esta línea.
ppx = int(input("¿Cuál es el valor de x1?"))
ppy = int(input("¿Cuál es el valor de y1?"))
spx = int(input("¿Cuál es el valor de x2?"))
spy = int(input("¿Cuál es el valor de y2?"))
d = ((spx-ppx)**2+(spy-ppy)**2)**(1/2)
print("La distancia entre los dos puntos es:", d)
| [
"noreply@github.com"
] | noreply@github.com |
07750ee3854043de3103fc109c88235690b26540 | 234f1b4fadd5253837e8acfb134a4330be783862 | /alien_invasion/files/ship.py | 09b5f8ad9818597cbbe6faf455c17ab0a3a482df | [] | no_license | mozoku24/Python-Crash-Course | cad2b433a9aef8fdeb4be1650d6d012df389ae87 | 3590801c9cfe358edb8b9d0df591710794c65f13 | refs/heads/master | 2022-12-03T00:04:26.962319 | 2020-08-19T07:18:40 | 2020-08-19T07:18:40 | 286,694,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,719 | py | """
ship模块,包含ship类,负责管理飞船的大部分行为。
"""
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
"""初始化飞船并设置其初始位置"""
super(Ship, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
#加载飞船图像并获取其外接矩形
self.image = pygame.image.load(r'E:\Temp\alien_invasion\images\ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
#将每艘新飞船放在屏幕底部中央
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
#在飞船的属性center中存储小数值
self.center = float(self.rect.centerx)
#移动标志
self.moving_right = False
self.moving_left = False
def update(self):
"""根据移动标志调整飞船的位置"""
#更新飞船的center值,而不是rect
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
#根据self.center更新rect对象
self.rect.centerx = self.center
def blitme(self):
"""在指定位置绘制飞船"""
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""让飞船在屏幕上居中"""
self.center = self.screen_rect.centerx | [
"noreply@github.com"
] | noreply@github.com |
f0128317036c9b966541e24a1e1efe172ad2fce5 | cc5eb8eb50d64ffbca780c42a908053ec549f295 | /python-in-a-day-scripts/ch12 program/script_002.py | 43129ebbb2a9f5b3ad633d6fc7d93d8accaedfbb | [] | no_license | bemagee/LearnPython | 328b1f7a9d5046fe1503aece8a5134a7dd2727d2 | a42565f8fb45f9e2ebbcdcf359ebb9092bf837c2 | refs/heads/master | 2020-12-13T02:45:30.308604 | 2016-10-24T03:09:12 | 2016-10-24T03:09:12 | 10,793,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # Our epic programmer dict from before
epic_programmer_dict = {
'Tim Berners-Lee' : ['tbl@gmail.com', 111],
'Guido van Rossum' : ['gvr@gmail.com', 222],
'Linus Torvalds': ['lt@gmail.com', 333],
'Larry Page' : ['lp@gmail.com', 444],
'Sergey Brin' : ['sb@gmail.com', 555]
}
print epic_programmer_dict
| [
"bemagee@gmail.com"
] | bemagee@gmail.com |
1612efa81047e7a20e0dadd4e871ca67fee01b1b | f879be78003d04f5332ea18373ef0de1a17f5817 | /ververica_sdk/models/delete_api_token_response.py | 06f93515c8b1b040224e70273134aed534c4b518 | [] | no_license | justlikemikezz/ververica-sdk | 8228b1d1e9bb9c0530842162f771f7708d1b1555 | b946aa879cc80ad25b8c746b8c2cdc6bde086cbb | refs/heads/master | 2020-12-22T15:58:27.469611 | 2020-01-29T00:33:21 | 2020-01-29T00:33:21 | 236,849,548 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | # coding: utf-8
"""
Ververica Platform API
The Ververica Platform APIs, excluding Application Manager. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: platform@ververica.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DeleteApiTokenResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""DeleteApiTokenResponse - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeleteApiTokenResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteApiTokenResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"michael.handria@bird.co"
] | michael.handria@bird.co |
4038aed1a3543a7584fd17318abc90b7b2ac4ea7 | e3932a292695efbac57ae7adf5bd9c5554a88e39 | /binding/python/setup.py | dd77ed90fa69813d54e55eef7604a48e1e43a0f5 | [
"MIT"
] | permissive | makiolo/Calculate | 1356e00628ef7f90423880de8190660667a407d1 | 81dcfcd7ce256abe1d5924329dd0ae8320fe749f | refs/heads/master | 2021-07-03T07:02:44.673841 | 2017-09-20T22:51:18 | 2017-09-20T22:51:18 | 103,838,273 | 0 | 0 | null | 2017-09-17T15:10:14 | 2017-09-17T15:10:14 | null | UTF-8 | Python | false | false | 1,611 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import os
import os.path as path
from platform import system
from setuptools import setup
from setuptools.dist import Distribution
if sys.version_info < (2, 7):
sys.exit('Only Python versions superior or equal than 2.7 supported')
os.chdir(path.abspath(path.dirname(path.realpath(__file__))))
class BinaryDistribution(Distribution):
def has_ext_modules(foo):
return True
def is_pure(self):
return False
library = 'calculate'
with open('{}/__init__.py'.format(library), 'r') as file:
metadata = {entry.split('=')[0].strip(' '): eval(entry.split('=')[-1])
for entry in file.read().split('\n') if '=' in entry}
extensions = {'Linux': 'so', 'Darwin': 'dylib', 'Windows': 'dll'}
extension = extensions.get(system(), '')
library_name = 'lib' + library
basedir = path.realpath(__file__).replace(path.basename(__file__), '')
basedir = path.join(basedir, library)
library_path = path.join(basedir, library_name + '.' + extension)
if not path.lexists(library_path):
raise EnvironmentError('Missing shared library')
setup(
name=library,
distclass=BinaryDistribution,
version=metadata['__version__'],
license=metadata['__license__'],
author=metadata['__author__'],
author_email=metadata['__email__'],
home_page=metadata['__site__'],
description=metadata['__description__'],
install_requires=[
'cffi>=1.0.1'
],
packages=[library],
package_data={library: [library_name + '.' + extension]}
)
| [
"alorenzo.md@gmail.com"
] | alorenzo.md@gmail.com |
0e5fee26994b35cd5bd788f42ce452bc9d810c6b | 28dbb450fd31e1c7541668c5bbb0248edc13ea6f | /class 2708/exercise1.py | 29e740efc54847881b22b64e1df04ae32ae57797 | [
"MIT"
] | permissive | Gabriel-Fernandes1917/lab-the-python | 1cfcebeadb944295c92361abe2ddc62318c4975c | 0ed4fe7cf5e6c5447d3f021e50d390fc3af1b0d7 | refs/heads/main | 2023-07-23T21:19:26.716328 | 2021-09-10T11:23:20 | 2021-09-10T11:23:20 | 395,613,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py |
dictionary ={None:None}
loop = "sim"
while(loop == "sim"):
dictionary[0]=input('informe o nome\n')
dictionary[1]=input('informe o cpf\n')
loop = input('deseja continuar ?')
print(dictionary)
| [
"gabgui2001@gmail.com"
] | gabgui2001@gmail.com |
917fb6ce87aa5c3a0330750def2f94634054364c | d890ba28b707966d05dbb40bf8e73f9e07f85c78 | /new.py | d6ed5ce88af972ab15c54bd90d62db1ca2172104 | [] | no_license | abir0205/Mission-to-Mars | 7ed6e2aa5c80f8d52995eef71f3b43988e752e48 | e9628204a30d193d4c4934e79121b96f3144f5ce | refs/heads/main | 2023-06-07T01:35:48.592289 | 2021-07-01T03:28:14 | 2021-07-01T03:28:14 | 380,420,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# 1. Use browser to visit the URL
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
hemisphere_image_urls = []
main_url = 'https://astrogeology.usgs.gov'
# 3. Write code to retrieve the image urls and titles for each hemisphere.
html = browser.html
html_soup = soup(html, 'html.parser')
image_finder = html_soup.find("div", class_='collapsible results')
images = image_finder.find_all('a')
partial_urls = set([image['href'] for image in images])
for partial_url in partial_urls:
hemispheres = {}
full_url = f'{main_url}{partial_url}'
browser.visit(full_url)
browser.links.find_by_text('Open').click()
html = browser.html
url_soup = soup(html, 'html.parser')
download_div = url_soup.find('div', class_ = 'collapsible results')
img_anchor = url_soup.find_all('a')
title_elem = url_soup.select_one('div.content')
title = title_elem.find("h2", class_='title').get_text()
hemispheres = {
'img_url': img_anchor,
'title': title,
}
hemisphere_image_urls.append(hemispheres)
print(hemisphere_image_urls)
| [
"Ahossai7@binghamton.edu"
] | Ahossai7@binghamton.edu |
625ca5383313c0120b33009c1c2af442f7029425 | 78d9b2f3d3c67a9bd66014ea1fd98729268e8343 | /src/accounts/views.py | af54b9f22f8836e6ee6fa4b06daff38fbffd6cea | [] | no_license | pedrozan/blog | 9ea69e4ef1c82d9815f7f801c07aadb34175a3db | e360672e88325db82c4f07874b14809e77e332f7 | refs/heads/master | 2022-11-13T04:15:52.165296 | 2020-07-04T16:21:06 | 2020-07-04T16:21:06 | 276,705,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse
from django.views.generic import CreateView
class UserRegistrationView(CreateView):
form_class = UserCreationForm
template_name = 'user_registration.html'
def get_successfull_url(self):
return reverse('login')
| [
"pedro.schleder@gmail.com"
] | pedro.schleder@gmail.com |
3ab8865d156fd4539ee009f877d33e4d2f16b8ae | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=13/params.py | 28bdc367d387d98fbf09079da0322b1eedc608ea | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.628952',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 13,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
86118937a3c5da7d22eb06c3ed34e49f7cfa2f11 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2711/47774/305852.py | a8c9c3680c535404ce3caf423c50014ec1f95130 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | def isSimilar(s1, s2):
diff, l = 0, len(s1)
for i in range(l):
if (s1[i] != s2[i]):
diff += 1
if (diff > 2):
return False
return True
def find(f, x):
return f[x] if x == f[x] else find(f, f[x])
def merge(f, x, y):
rx = find(f, f[x])
ry = find(f, f[y])
f[ry] = rx
def solve(A):
A = list(set(A))
l,w = len(A), len(A[0])
res = 0
f = [i for i in range(l)]
if l <= w*w:
for i in range(l):
for j in range(i + 1, l):
if (find(f, i) != find(f,j)):
isS = isSimilar(A[i], A[j])
if (isS):
merge(f, i, j)
else:
dict = {}
for i in range(l):
if (A[i] in dict):
dict[A[i]].add(i)
else:
dict[A[i]] = {i}
word = list(A[i])
for i0 in range(w):
for j0 in range(i0+1, w):
if (word[i0] != word[j0]):
word[i0],word[j0] = word[j0],word[i0]
neighbor = ''.join(word)
if (neighbor in dict):
dict[neighbor].add(i)
else:
dict[neighbor] = {i}
word[i0],word[j0] = word[j0],word[i0]
for i in range(l):
for j in dict[A[i]]:
merge(f,i,j)
for i in range(l):
if (i == f[i]):
res += 1
return res
s=eval(input())
print(solve(s)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
0dd9e260541d802e91f2058473d8baff323b757c | 60a29068e369900bd1de946bdbc7f9cf61a23127 | /manage.py | c7299db7505f33e3eb96f61001d69f0bc600b78c | [] | no_license | crowdbotics-apps/circuit-web-version-22188 | 4e44be1fb2d6ded8e87f731fd3a2c85e3cfec19e | 7eed4cb920846adf871057b150d0eb72b794a7aa | refs/heads/master | 2023-01-21T08:59:50.677549 | 2020-11-21T22:43:33 | 2020-11-21T22:43:33 | 309,153,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'circuit_22188.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7bc8ce68bc7ce2b233b0fd4db87796b82fa4e26d | bc6df4755d7b82e49e2ce1bd7276bdd6fba6f511 | /source/inputters/dataset.py | 4f172b558ad2986a64dfd2863c5318289027e138 | [] | no_license | laihuiyuan/knowledge-driven-dialogue | 9ae9d16c770cb57283cba6d99c46b3f73ed50090 | c1df20d66f8a391f4d93bc8fceaa8fe9b08906bc | refs/heads/master | 2020-05-15T21:40:28.758151 | 2019-05-26T08:16:35 | 2019-05-26T08:16:35 | 182,506,013 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | # -*- coding: UTF-8 -*-
"""
The original version comes from Baidu.com, https://github.com/baidu/knowledge-driven-dialogue
File: source/inputters/dataset.py
"""
import torch
from torch.utils.data import DataLoader
from source.utils.misc import Pack
from source.utils.misc import list2tensor
class Dataset(torch.utils.data.Dataset):
"""
Dataset
"""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
@staticmethod
def collate_fn(device=-1):
"""
collate_fn
"""
def collate(data_list):
"""
collate
"""
batch = Pack()
for key in data_list[0].keys():
batch[key] = list2tensor([x[key] for x in data_list])
if device >= 0:
batch = batch.cuda(device=device)
return batch
return collate
def create_batches(self, batch_size=1, shuffle=False, device=-1):
"""
create_batches
"""
loader = DataLoader(dataset=self,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=self.collate_fn(device),
pin_memory=False)
return loader
| [
"huiyuanlai.l@gmail.com"
] | huiyuanlai.l@gmail.com |
bfbb3c1be656c0a35f38cde384ddf61d7a4de0c4 | 95f22aa6d38a959eb9d870bbf83afa54b0f619ab | /plugin5/main.py | deeef38195e68dccaffb223ffbb17bec777a43d6 | [] | no_license | tfmoraes/inv_plugin_test1 | 3be54c9c6fbec00a0b964196410b9f7a04c4f6eb | 4ccd65ef5e85b256b62d903adf61162186b47e30 | refs/heads/master | 2023-04-06T00:39:45.064523 | 2021-05-04T21:36:58 | 2021-05-04T21:36:58 | 201,117,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | from pubsub import pub as Publisher
import vtk
from skimage.segmentation import felzenszwalb, slic, quickshift, watershed
from skimage.segmentation import mark_boundaries, find_boundaries
from skimage.util import img_as_float
from invesalius.data import styles
from invesalius.data import imagedata_utils as iu
from invesalius.data import slice_
def preprocess_image(image, window, level):
image = iu.get_LUT_value_255(image, window, level)
return img_as_float(image)
def load():
print("Loading plugin")
slc = slice_.Slice()
image = slc.matrix
window = slc.window_width
level = slc.window_level
image = preprocess_image(image, window, level)
mask = slc.create_new_mask()
mask.was_edited = True
mask.matrix[0, :, :] = 1
mask.matrix[:, 0, :] = 1
mask.matrix[:, :, 0] = 1
for i in range(image.shape[0]):
mask.matrix[i+1, 1:, 1:] = find_boundaries(felzenszwalb(image[i])) * 255
| [
"totonixsame@gmail.com"
] | totonixsame@gmail.com |
0d3a6376d60f3cb52921f20e2916d6a0084c3025 | 9d460ddab78d26322419285575395edc850d76d8 | /account/urls.py | 36e4cbc4d6fa9edc617dc85151e04276ff858461 | [] | no_license | nhatvmgcs18725/My_first_project | 53d929a56dfaae6c29ef2657b3e1678068ed6edf | 50e4fa66c16204ce8cc75c734c200efa23ac93f8 | refs/heads/main | 2023-06-10T06:44:28.551520 | 2021-06-28T12:37:26 | 2021-06-28T12:37:26 | 381,025,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from django.urls import path
from . import views
urlpatterns=[
path('register',views.register,name="register"),
path('login',views.login,name="login"),
path('logout',views.logout,name="logout"),
path('cart',views.cart,name="cart"),
path('order1',views.order1,name="order1"),
path('home',views.home,name="home"),
path('final',views.final,name="final"),
path('download',views.download,name='download')
] | [
"nhatvmgcs18725@fpt.edu.vn"
] | nhatvmgcs18725@fpt.edu.vn |
5245bc11bfacf34d092a6630efd1e6ec7b5948a9 | 32809f6f425bf5665fc19de2bc929bacc3eeb469 | /src/1096-Brace-Expansion-II/1096.py | 78067156acba02fd1f032327859403cee51255d5 | [] | no_license | luliyucoordinate/Leetcode | 9f6bf01f79aa680e2dff11e73e4d10993467f113 | bcc04d49969654cb44f79218a7ef2fd5c1e5449a | refs/heads/master | 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 | C++ | UTF-8 | Python | false | false | 723 | py | import itertools
class Solution:
def braceExpansionII(self, expression):
groups = [[]]
level = 0
for i, c in enumerate(expression):
if c == '{':
if level == 0:
start = i+1
level += 1
elif c == '}':
level -= 1
if level == 0:
groups[-1].append(self.braceExpansionII(expression[start:i]))
elif level == 0:
if c == ",":
groups.append([])
else:
groups[-1].append([c])
return sorted(set().union(*[set(map(''.join, itertools.product(*group))) for group in groups])) | [
"luliyucoordinate@outlook.com"
] | luliyucoordinate@outlook.com |
5f7fbca5b07a1829d557e292fde130d12362f1c9 | 8a925a66777993cc0caf4085a9c309cbd59fad7d | /mdp/gridworld1d.py | 4f142791692f8e6672a0d7b49fe98838f1c88406 | [
"MIT"
] | permissive | yrlu/irl-imitation | 96de0b435ef2ebd25f42694a90496f9e0dcbeb8a | c2f2f57d0d7cb98d4cca125579d68c411ed2cbe9 | refs/heads/master | 2023-05-22T16:20:35.389623 | 2022-07-05T07:23:56 | 2022-07-05T07:23:56 | 92,340,121 | 364 | 96 | null | null | null | null | UTF-8 | Python | false | false | 5,163 | py | # 1D Gridworld
#
# ---
# @author Yiren Lu
# @email luyiren [at] seas [dot] upenn [dot] edu
#
# MIT License
import numpy as np
from utils import *
class GridWorld1D(object):
"""
1D grid world environment (without terminal states)
"""
def __init__(self, rewards, terminals, move_rand=0.0):
"""
inputs:
rewards 1d float array - contains rewards
terminals a set of all the terminal states
"""
self.n_states = len(rewards)
self.rewards = rewards
self.terminals = terminals
self.actions = [-1, 1]
self.n_actions = len(self.actions)
self.move_rand = move_rand
def get_reward(self, state):
return self.rewards[state]
def get_transition_states_and_probs(self, state, action):
"""
inputs:
state int - state
action int - action
returns
a list of (state, probability) pair
"""
if action < 0 or action >= self.n_actions:
# invalid input
return []
if self.is_terminal(state):
return [(state, 1.0)]
if self.move_rand == 0:
if state+self.actions[action] < 0 or state+self.actions[action] >= self.n_states:
return [(state, 1.0)]
return [(state+self.actions[action], 1.0)]
else:
mov_probs = np.zeros(3)
mov_probs[1+self.actions[action]] += 1 - self.move_rand
for i in range(3):
mov_probs[i] += self.move_rand/3
if state == 0:
mov_probs[1] += mov_probs[0]
mov_probs[0] = 0
if state == self.n_states - 1:
mov_probs[1] += mov_probs[2]
mov_probs[2] = 0
res = []
for i in range(3):
if mov_probs[i] != 0:
res.append((state-1+i, mov_probs[i]))
return res
def is_terminal(self, state):
if state in self.terminals:
return True
else:
return False
##############################################
# Stateful Functions For Model-Free Leanring #
##############################################
def reset(self, start_pos):
self._cur_state = start_pos
def get_current_state(self):
return self._cur_state
def step(self, action):
"""
Step function for the agent to interact with gridworld
inputs:
action action taken by the agent
returns
current_state current state
action input action
next_state next_state
reward reward on the next state
is_done True/False - if the agent is already on the terminal states
"""
if self.is_terminal(self._cur_state):
self._is_done = True
return self._cur_state, action, self._cur_state, self.get_reward(self._cur_state), True
st_prob = self.get_transition_states_and_probs(self._cur_state, action)
rand_idx = np.random.choice(np.arange(0, len(st_prob)), p=[prob for st, prob in st_prob])
last_state = self._cur_state
next_state = st_prob[rand_idx][0]
reward = self.get_reward(last_state)
self._cur_state = next_state
return last_state, action, next_state, reward, False
#######################
# Some util functions #
#######################
def get_transition_mat(self):
"""
get transition dynamics of the gridworld
return:
P_a NxNxN_ACTIONS transition probabilities matrix -
P_a[s0, s1, a] is the transition prob of
landing at state s1 when taking action
a at state s0
"""
N_STATES = self.n_states
N_ACTIONS = len(self.actions)
P_a = np.zeros((N_STATES, N_STATES, N_ACTIONS))
for si in range(N_STATES):
for a in range(N_ACTIONS):
probs = self.get_transition_states_and_probs(si, a)
for sj, prob in probs:
# Prob of si to sj given action a
P_a[si, sj, a] = prob
return P_a
def generate_demonstrations(self, policy, n_trajs=100, len_traj=20, rand_start=False, start_pos=0):
"""gatheres expert demonstrations
inputs:
gw Gridworld - the environment
policy Nx1 matrix
n_trajs int - number of trajectories to generate
rand_start bool - randomly picking start position or not
start_pos 2x1 list - set start position, default [0,0]
returns:
trajs a list of trajectories - each element in the list is a list of Steps representing an episode
"""
trajs = []
for i in range(n_trajs):
if rand_start:
# override start_pos
start_pos = np.random.randint(0, self.n_states)
episode = []
self.reset(start_pos)
cur_state = start_pos
cur_state, action, next_state, reward, is_done = self.step(int(policy[cur_state]))
episode.append(Step(cur_state=cur_state, action=self.actions[action], next_state=next_state, reward=reward, done=is_done))
# while not is_done:
for _ in range(1,len_traj):
cur_state, action, next_state, reward, is_done = self.step(int(policy[cur_state]))
episode.append(Step(cur_state=cur_state, action=self.actions[action], next_state=next_state, reward=reward, done=is_done))
if is_done:
break
trajs.append(episode)
return trajs
| [
"luyiren@seas.upenn.edu"
] | luyiren@seas.upenn.edu |
e23f2edbeae333f052ce6c3b881ea6c9b93be27a | 02f315c71fc653d9523607a6777ff8d89851ced2 | /walky/client/common.py | 175b55ad08cca8644d58ab2b39c1c48bb4966a39 | [
"MIT"
] | permissive | amimoto/walky | 8655d7b386a56e9f0a2e59dd2a7c45489070ed71 | bf2971ccb86a8db58599aa7d8857f467a9f5fc9b | refs/heads/master | 2021-01-10T22:06:32.612248 | 2015-07-02T02:46:44 | 2015-07-02T02:46:44 | 32,905,805 | 0 | 1 | null | 2015-04-06T21:23:10 | 2015-03-26T03:36:07 | Python | UTF-8 | Python | false | false | 1,360 | py | from __future__ import absolute_import
import weakref
import threading
import asyncore
import socket
from walky.objects import *
from walky.port import *
from walky.engine import *
class Client(object):
engine = None
settings = None
connection = None
port = None
engine_class = Engine
object_class = ObjectStub
def __init__( self,
**settings ):
settings.setdefault('engine_class',self.engine_class)
settings.setdefault('port_class',self.port_class)
settings.setdefault('object_class',self.object_class)
self.port = settings.get('port')
self.settings = settings
self.reset()
def reset(self):
if self.engine: self.engine.shutdown()
self.engine = self.settings['engine_class']()
def connect(self,*args,**kwargs):
""" Start the engine and the asyncore
"""
self.engine.start()
self.connection = self.engine.connection_new(*args,**kwargs)
def run(self):
pass
def on_readline(self,line):
try:
pass
except Exception as ex:
pass
def sendline(self,line):
self.port().sendline(line)
def object_get(self,reg_obj_id):
return self.object_class(self.connection,reg_obj_id)
def close(self):
self.engine.shutdown()
| [
"aki@zaber.com"
] | aki@zaber.com |
92bcd6a9e32f3b20e70dab4cd65ec40d73a7a67a | a2f80a041964940132b4e5c4a1b63d6df444c7a2 | /geokey_wegovnow/tests/test_urls.py | 844703ef6f2574f409ca69684adf61e51b217da5 | [
"MIT"
] | permissive | ExCiteS/geokey-wegovnow | c41774c9c2c9aa2bae9827af5b25f83cc48339bc | 66df7e17fa3eb2d8da2e56e39236b019f98a2a08 | refs/heads/master | 2021-01-17T18:32:19.711898 | 2018-10-08T15:20:38 | 2018-10-08T15:20:38 | 71,469,698 | 0 | 0 | MIT | 2018-11-07T16:39:34 | 2016-10-20T14:09:02 | Python | UTF-8 | Python | false | false | 1,216 | py | """Test all URLs."""
from django.test import TestCase
from django.core.urlresolvers import reverse, resolve
from geokey_wegovnow import views
class UrlPatternsTests(TestCase):
"""Tests for URL patterns."""
# ###########################
# TEST FOR ADMIN VIEWS
# ###########################
def test_uwum_profile(self):
"""Test admin page url for UWUM profile settings."""
view = views.UWUMProfileSettingsView
reversed_url = reverse('geokey_wegovnow:uwum_profile_settings')
self.assertEqual(reversed_url, '/admin/profile/settings/')
resolved_url = resolve('/admin/profile/settings/')
self.assertEqual(resolved_url.func.func_name, view.__name__)
# ###########################
# TESTS FOR PUBLIC API
# ###########################
def test_api_uwum_navigation(self):
"""Test API url for UWUM navigation."""
view = views.UWUMNavigationAPIView
reversed_url = reverse('geokey_wegovnow:api_uwum_navigation')
self.assertEqual(reversed_url, '/api/wegovnow/navigation/')
resolved_url = resolve('/api/wegovnow/navigation/')
self.assertEqual(resolved_url.func.func_name, view.__name__)
| [
"j.osokinas@mappingforchange.org.uk"
] | j.osokinas@mappingforchange.org.uk |
1b33fef7283eefd3083d4c28ff5834eaadea6816 | 3573668d5401f909f8196fccf5f0cbd18b25c7c2 | /work/w08/d3/01-02-uploading-images-django/completed-code/catcollector/main_app/migrations/0005_photo.py | 75da4a08c9d30814cd91ddd81127376416c1b877 | [] | no_license | LordSnoosh/SEIR2222 | 8a015941a5585e8c167ff51f38cfc04b1f3f7d3a | 516d684ca01c46094c95ecef2a2af62e1aaa622f | refs/heads/master | 2023-04-11T16:30:41.260438 | 2021-05-14T15:22:49 | 2021-05-14T15:22:49 | 368,380,443 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | # Generated by Django 3.1.4 on 2020-12-22 17:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main_app', '0004_cat_toys'),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=200)),
('cat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main_app.cat')),
],
),
]
| [
"jim.clark@generalassemb.ly"
] | jim.clark@generalassemb.ly |
77f2a108100941515d525b082674a52ef5e8f944 | 3d8b5343a272a8fb145def54f941a4005993365e | /week8/autoenc.py | e3db9fe5b9ad95909ba1d460b0b6543a28256700 | [] | no_license | gronlund/au_ml18 | 1725481c22f4877b10bb294371f32677b1c1cbd0 | 2c00d43b69f12780a9bef55842b0a273cce1715b | refs/heads/master | 2020-03-26T21:16:50.278360 | 2018-10-22T11:48:09 | 2018-10-22T11:48:09 | 145,378,775 | 1 | 33 | null | 2018-09-17T16:28:27 | 2018-08-20T06:58:36 | Jupyter Notebook | UTF-8 | Python | false | false | 2,739 | py | import os
import urllib
import urllib.request
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
# First we create a torch data loader - we have made that for you
def load_au_data(filename):
""" Load and return the au digits data """
if not os.path.exists(filename):
print('file not exists - downloading')
with open(filename, 'wb') as fh:
path = "http://users-cs.au.dk/jallan/ml/data/{0}".format(filename)
fh.write(urllib.request.urlopen(path).read())
tmp = np.load(filename)
au_digits = tmp['digits'] * 2 - 1
au_labels = tmp['labels']
print('data shape, type, min, max', au_digits.shape, au_digits.dtype, au_digits.min(), au_digits.max())
print('labels shape and type', au_labels.shape, au_labels.dtype, au_labels.min(), au_labels.max())
return au_digits, au_labels
def load_digits_train_data():
""" load and return digits training data """
filename = 'auTrain.npz'
return load_au_data(filename)
def load_digits_test_data():
""" Load and return digits test data """
filename = 'auTest.npz'
return load_au_data(filename)
class DigitsDataset(Dataset):
""" example of how to make a pytorch data set """
def __init__(self, X, y):
self.X = X
self.y = y
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return torch.from_numpy(self.X[idx]).float()
X_train, y_train = load_digits_train_data()
train_data = DigitsDataset(X_train, y_train)
train_loader = DataLoader(train_data, batch_size=32, shuffle=True)
# you can scan over train_loader to get data
### YOUR CODE HERE
### END CODE
print('Finished Training - lets plot some encodings')
# Assumes you have a class named net that supports forward to evaluate the neural net - if not fix the names etc. to make it work
fig, ax = plt.subplots(2, 8, figsize=(20, 16))
vis_loader = DataLoader(train_data, batch_size=1)
for i, timg in enumerate(vis_loader):
with torch.no_grad():
ax[0, i].imshow(timg.reshape(28, 28), cmap='gray')
dec = net.forward(timg)
ax[1, i].imshow(dec.reshape(28, 28), cmap='gray')
if i >= 7:
print('break man')
break
# Assumes you have a class named net has a linear layer named W1 - if not rename
fig2, waxes = plt.subplots(4, 8, figsize=(20, 16))
with torch.no_grad():
W1 = net.W1.weight.detach().numpy()
print('W1 shape', W1.shape)
for i, wax in enumerate(waxes.flat):
w = W1[i, :]
w = w/np.linalg.norm(w) # normalize
wax.imshow(w.reshape(28, 28), cmap='gray')
plt.show()
| [
"jallan@cs.au.dk"
] | jallan@cs.au.dk |
636fe7f33650c3bd29921d6bf95425a2aeeaef48 | d09fd96bbc931fbb8522e5c991973f064a4ded50 | /baxter/devel/.private/baxter_maintenance_msgs/lib/python2.7/dist-packages/baxter_maintenance_msgs/msg/_UpdateStatus.py | dcdfcbd9e5d9bc1182afd40950d3c1c371b7df12 | [] | no_license | rymonyu/EE4-Robotics | b3827ba0dff5bdfdd1e47fe07a40e955c5226f38 | 6cf9272abd7fe8a074dc74a032f6e0b35edb8548 | refs/heads/master | 2020-08-22T15:09:39.706809 | 2019-12-15T23:35:45 | 2019-12-15T23:35:45 | 216,420,098 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,704 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from baxter_maintenance_msgs/UpdateStatus.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class UpdateStatus(genpy.Message):
_md5sum = "74e246350421569590252c39e8aa7b85"
_type = "baxter_maintenance_msgs/UpdateStatus"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# See the class UpdateRunner()
# status: One-word description of the current action being performed
# long_description: Details pertaining to status if any. Used for verbose error messages.
uint16 status
float32 progress
string long_description
uint16 STS_IDLE = 0
uint16 STS_INVALID = 1
uint16 STS_BUSY = 2
uint16 STS_CANCELLED = 3
uint16 STS_ERR = 4
uint16 STS_MOUNT_UPDATE = 5
uint16 STS_VERIFY_UPDATE = 6
uint16 STS_PREP_STAGING = 7
uint16 STS_MOUNT_STAGING = 8
uint16 STS_EXTRACT_UPDATE = 9
uint16 STS_LOAD_KEXEC = 10
"""
# Pseudo-constants
STS_IDLE = 0
STS_INVALID = 1
STS_BUSY = 2
STS_CANCELLED = 3
STS_ERR = 4
STS_MOUNT_UPDATE = 5
STS_VERIFY_UPDATE = 6
STS_PREP_STAGING = 7
STS_MOUNT_STAGING = 8
STS_EXTRACT_UPDATE = 9
STS_LOAD_KEXEC = 10
__slots__ = ['status','progress','long_description']
_slot_types = ['uint16','float32','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status,progress,long_description
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(UpdateStatus, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = 0
if self.progress is None:
self.progress = 0.
if self.long_description is None:
self.long_description = ''
else:
self.status = 0
self.progress = 0.
self.long_description = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Hf().pack(_x.status, _x.progress))
_x = self.long_description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.status, _x.progress,) = _get_struct_Hf().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.long_description = str[start:end].decode('utf-8')
else:
self.long_description = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_Hf().pack(_x.status, _x.progress))
_x = self.long_description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.status, _x.progress,) = _get_struct_Hf().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.long_description = str[start:end].decode('utf-8')
else:
self.long_description = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_Hf = None
def _get_struct_Hf():
global _struct_Hf
if _struct_Hf is None:
_struct_Hf = struct.Struct("<Hf")
return _struct_Hf
| [
"rymonyu@gmail.com"
] | rymonyu@gmail.com |
297f0900fb895a2b9d47e22dba06bc0e49e89095 | fd4b63792f0aa44acba8e656f2c71f6e4dd61377 | /web/changelly.py | 48c2dafe3e756c46132bdb2f0567c64e46bf7c80 | [] | no_license | procaff3inator/changepark | 03869a4ebe7c789bc516672ebb1fa6475cb22550 | fc55a4492c4084f92031de26ae87050d045d9e94 | refs/heads/master | 2020-03-19T01:07:07.462694 | 2018-04-29T05:35:10 | 2018-04-29T05:35:10 | 135,524,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,879 | py | import copy
import hashlib
import hmac
import json
import requests
from functools import wraps
from uuid import uuid4
def api_method(f):
"""Decorate functions that are API methods.
:param f: A function/method to be wrapped
"""
@wraps(f)
def d(*args, **kwargs):
payload = f(*args, **kwargs)
print(payload)
return requests.post(
payload['url'],
headers=payload['headers'],
data=payload['payload']
)
return d
class Changelly(object):
def __init__(self, url, key, secret):
self.url = url
self.key = key
self.secret = secret
def _prepare_payload(self, params):
json_pl = {'jsonrpc': '2.0', 'id': str(uuid4()) }
json_pl.update(params)
serialized_data = json.dumps(json_pl)
sign = hmac.new(
self.secret.encode('utf-8'),
serialized_data.encode('utf-8'),
hashlib.sha512
).hexdigest()
headers = {
'api-key': self.key,
'sign': sign,
'Content-type': 'application/json',
}
return {'url': self.url, 'payload': serialized_data, 'headers': headers}
@api_method
def get_currencies(self):
"""Fetch a list of supported currencies from the server."""
params = {
'method': 'getCurrencies',
'params': [],
}
return self._prepare_payload(params)
@api_method
def get_min_amount(self, fromcurr, tocurr):
"""Get min amount that can be exchanged between
two different currencies.
:param fromcurr: Currency to change from
:param tocurr: Currency to change to
"""
return self._prepare_payload({
'method': 'getMinAmount',
'params': {
'from': fromcurr,
'to': tocurr,
},
})
@api_method
def get_exchange_amount(self, fromcurr, tocurr, amount):
"""Get the exchange amount between two different
currencies.
:param fromcurr: Currency to change from
:param tocurr: Currency to change to
:param amount: Amount to be exchaned
"""
return self._prepare_payload({
'method': 'getExchangeAmount',
'params': {
'from': fromcurr,
'to': tocurr,
'amount': amount,
},
})
@api_method
def get_status(self, transaction_id):
"""Get the status of a transaction.
:param transaction_id: Id of the transaction
"""
return self._prepare_payload({
"method": "getStatus",
"params": {
"id": transaction_id
},
})
@api_method
def create_transaction(self, fromcurr, tocurr, address, amount, **kwargs):
"""Create a transation to convert from one currency to another.
:param fromcurr: From Currency
:param tocurrency: To Currency
:param address: Address to send the amount to
:param amount: Amount to send
:param extra_id: Required for XRP, STEEM/SBD, XLM, DCT, XEM
:param refund_address: Optional param, enables refund
:param refund_extraid: Required for XRP, STEEM/SBD, XLM, DCT, XEM
"""
# raise NotImplementedError("WIP")
params = {
'from': fromcurr,
'to': tocurr,
'address': address,
'amount': amount,
# 'refundAddress': address, # for now let's have no refund!
}
if 'extraid' in kwargs:
params['extraId'] = kwargs['extraid']
if 'refundextraid' in kwargs:
params['refundExtraId'] = kwargs['refundextraid']
return self._prepare_payload({
'method': 'createTransaction',
'params': params,
})
@api_method
def get_transactions(self, **kwargs):
"""Get a list of transactions according to the filter
params passed.
:param currency: Currency to filter from
:param address: Address to filter by
:param extraId: Extra id needed by some currencies
:param limit: Result limit
:param offset: Result offset
"""
return self._prepare_payload({
'method': 'getTransactions',
'params': {},
})
@api_method
def find_transactions(self, **kwargs):
"""Filter transations by the given params.
"""
return self._prepare_payload({
'method': 'getTransactions',
'params': kwargs,
})
if __name__ == '__main__':
from config import api_creds
c = Changelly(
api_creds['url'],
api_creds['key'],
api_creds['secret']
)
# print("Foo: {}".format(c.get_currencies().text))
# print("Foo: {}".format(c.get_min_amount('btc', 'ltc').text))
# print("Foo: {}".format(c.get_exchange_amount("btc", "eth", "100").text))
# print("Foo: {}".format(c.get_status('f6e0c6a5bb05').text))
# print("Foo: {}".format(c.get_status('4bb51c2cca9b').text))
# print("Foo: {}".format(c.get_transactions().text))
# def create_transaction(self, fromcurr, tocurr, address, amount, **kwargs):
address = 'LhNXzB2AWQ1Q2ArLPwefvrwY9cCENtDz47'
print("Foo: {}".format(c.create_transaction('btc', 'ltc', address, '0.00359353', extraid=None).text))
| [
"procaff3inator@gmail.com"
] | procaff3inator@gmail.com |
70ba1e98be58094fefb1b3ad1735ac0b7a6c9499 | 80ce9b73a0447c13838de64e89fa95f24852f95a | /get_textf_from_lsi.py | fdf0f4ecb0ceed992bdde759286d4cb904a8443c | [] | no_license | thomason-jesse/synpol | 17458cd05484e783d3fe84d6c951b27728286b91 | 2edd89d88fbab50bcaa7d77a9e83e3cfe4627a14 | refs/heads/master | 2021-01-20T10:00:36.555994 | 2017-10-20T16:39:34 | 2017-10-20T16:39:34 | 90,314,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,484 | py | #!/usr/bin/env python
__author__ = 'jesse'
''' takes a wnid graph, a set of wnid -> text observation maps, and a serialized lsi model and
calculates the textual features for the wnid observations given the observed text and outputs
a map from wnid -> text features
'''
import argparse
import pickle
import os
import time
def main():
# read infiles
print "reading in urls, observations, lsi model, and dictionary..."
f = open(FLAGS_wnid_urls, 'rb')
wnid_urls = pickle.load(f)
wnids = wnid_urls.keys()
f.close()
print "... read graph"
if FLAGS_text_obs_unified > 0:
f = open(FLAGS_text_obs_infile, 'rb')
wnid_text = pickle.load(f)
f.close()
print "... read text observations"
print "... done"
# calculate lsi textual features from text corpus observations
print "launching jobs to calculate lsi textual features from text observations..."
remaining_wnid_jobs = []
for wnid_idx in range(0, len(wnids)):
wnid = wnids[wnid_idx]
launch_job = False
if FLAGS_text_obs_unified > 0:
wnid_text_obs = FLAGS_text_obs_infile
if wnid in wnid_text and len(wnid_text[wnid]) > 0:
launch_job = True
else:
wnid_text_obs = str(wnid_idx) + "_" + FLAGS_text_obs_infile
try:
with open(wnid_text_obs, 'rb') as pf:
_ = pickle.load(pf)
launch_job = True
except (IOError, EOFError):
print "... WARNING: missing pickle for wnid " + str(wnid_idx) + "; cannot get features for it"
if launch_job:
outf = str(wnid_idx) + "_lsi_temp.pickle" if FLAGS_text_obs_unified > 0 else str(wnid_idx) + "_" + FLAGS_outfile
cmd = ("condorify_gpu_email python get_textf_from_lsi_for_wnid.py " +
"--target_wnid " + wnid + " " +
"--text_obs_infile " + wnid_text_obs + " " +
"--lsi_dictionary " + FLAGS_lsi_dictionary + " " +
"--lsi_dictionary " + FLAGS_lsi_dictionary + " " +
"--tfidf_model " + FLAGS_tfidf_model + " " +
"--lsi_model " + FLAGS_lsi_model + " " +
"--lsi_fsize " + str(FLAGS_lsi_fsize) + " " +
"--outfile " + outf +
str(wnid_idx) + "_lsi_temp")
os.system(cmd)
remaining_wnid_jobs.append(wnid_idx)
print "... done"
# poll for jobs finished and build merged duplicates structure
if FLAGS_text_obs_unified > 0:
print "merging textf results into map as they become available..."
wnid_textf = {}
while len(remaining_wnid_jobs) > 0:
time.sleep(10) # poll for finished scripts every 10 seconds
newly_finished_jobs = []
for wnid_idx in remaining_wnid_jobs:
log_fn = str(wnid_idx) + "_lsi_temp"
lsi_fn = log_fn + ".pickle"
if os.path.isfile(lsi_fn):
try:
with open(lsi_fn, 'rb') as pf:
new_textf = pickle.load(pf)
except (IOError, EOFError, ValueError, KeyError):
continue
newly_finished_jobs.append(wnid_idx)
os.system("rm " + lsi_fn)
os.system("rm err." + log_fn)
os.system("rm " + log_fn)
wnid_textf[wnids[wnid_idx]] = new_textf
remaining_wnid_jobs = [wnid_idx for wnid_idx in remaining_wnid_jobs if wnid_idx not in newly_finished_jobs]
if len(newly_finished_jobs) > 0:
print ("... " + str(len(remaining_wnid_jobs)) + " wnids remain after adding wnids: " +
str(newly_finished_jobs))
whether_to_continue = raw_input("continue checks(Y/n)? ") # handle this weird shit
if whether_to_continue == 'n':
break
# write textf
print "writing wnid -> textf observations to file..."
with open(FLAGS_outfile, 'wb') as f:
d = wnid_textf
pickle.dump(d, f)
print "... done"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--wnid_urls', type=str, required=True,
help="wnid urls used when getting text observations")
parser.add_argument('--text_obs_infile', type=str, required=True,
help="wnid text observations file")
parser.add_argument('--text_obs_unified', type=int, required=True,
help="whether wnid text observations are in one file or one per wnid (1 for one file)")
parser.add_argument('--lsi_dictionary', type=str, required=True,
help="dictionary of words used in lsi model")
parser.add_argument('--tfidf_model', type=str, required=True,
help="tfidf model used by lsi")
parser.add_argument('--lsi_model', type=str, required=True,
help="serialized lsi model")
parser.add_argument('--lsi_fsize', type=int, required=True,
help="number of features in lsi")
parser.add_argument('--outfile', type=str, required=True,
help="output text features from w2v")
args = parser.parse_args()
for k, v in vars(args).items():
globals()['FLAGS_%s' % k] = v
main()
| [
"jesse@cs.utexas.edu"
] | jesse@cs.utexas.edu |
faac087b45aa6ee29a57ab65290e48b37be927c5 | 03143da0cf99ea92d372feca0954d22d64858d86 | /Approach 4/EMNIST/EMNIST-4/utils/mnistutil.py | a7b7435b0cbd75973bfc88d7ab188e5b1eaa0596 | [] | no_license | rangeetpan/moduleDecompose | ff31732a878e5f9d5e79c3bba9fd9f051c7a5d16 | 508c2a87e00c4e1b616bc29515fc1de2aba55c4e | refs/heads/master | 2021-02-20T08:49:55.157544 | 2020-03-06T04:50:43 | 2020-03-06T04:50:43 | 245,332,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,184 | py | '''
Created on Feb 8, 2019
@author: mislam
'''
from keras.datasets import mnist
from skimage.transform import resize
import numpy as np
from keras import backend as K
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
class MNISTUitl:
def __init__(self):
self.name = None
def load(self,f):
return np.load(f)['arr_0']
def getdata(self,a,b,img_rows = 28, img_cols = 28):
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_zo = []
y_zo = []
for i in range(len(y_train)):
if y_train[i] == a or y_train[i] == b:
A = resize(x_train[i], (img_rows, img_cols),mode='constant')
Ay = y_train[i]#resize(y_train[i], (img_rows, img_cols))
x_zo.append(A)
y_zo.append(Ay)
xt_zo = []
yt_zo = []
for i in range(len(y_test)):
if y_test[i] == a or y_test[i] == b:
A = resize(x_test[i], (img_rows, img_cols),mode='constant')
Ay = y_test[i]#resize(y_train[i], (img_rows, img_cols))
xt_zo.append(A)
yt_zo.append(Ay)
x_zo = np.array(x_zo)
y_zo = np.array(y_zo)
xt_zo = np.array(xt_zo)
yt_zo = np.array(yt_zo)
return x_zo, y_zo, xt_zo, yt_zo
def getdata2(self,a,b,img_rows = 28, img_cols = 28):
# the data, split between train and test sets
x_train = self.load('emnist-train-imgs.npz')
x_test = self.load('emnist-test-imgs.npz')
y_train = self.load('emnist-train-labels.npz')
for i in range(0,len(y_train)):
y_train[i]=y_train[i]-1
y_test = self.load('emnist-test-labels.npz')
for i in range(0,len(y_test)):
y_test[i]=y_test[i]-1
x_zo = []
y_zo = []
for i in range(len(y_train)):
if y_train[i] in [0,1,2,3,4,5,6,7,8,9]:
A = resize(x_train[i], (img_rows, img_cols),mode='constant')
Ay = y_train[i]#resize(y_train[i], (img_rows, img_cols))
x_zo.append(A)
y_zo.append(Ay)
xt_zo = []
yt_zo = []
for i in range(len(y_test)):
if y_test[i] in [0,1,2,3,4,5,6,7,8,9]:
A = resize(x_test[i], (img_rows, img_cols),mode='constant')
Ay = y_test[i]#resize(y_train[i], (img_rows, img_cols))
xt_zo.append(A)
yt_zo.append(Ay)
x_zo = np.array(x_zo)
y_zo = np.array(y_zo)
xt_zo = np.array(xt_zo)
yt_zo = np.array(yt_zo)
return x_zo, y_zo, xt_zo, yt_zo
def train(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 2):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo#keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo#keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(7, activation=tf.nn.relu ,name = "H"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
nm.fit(x_train, y_train, epochs=10)
return nm, x_test, y_test
def train2(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(nm.summary())
nm.fit(x_train, y_train, epochs=ep)
return nm, x_test, y_test
def trainDense2(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H1"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H2"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(nm.summary())
nm.fit(x_train, y_train, epochs=ep)
return nm, x_test, y_test
def trainDense4(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H1"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H2"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H3"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H4"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(nm.summary())
nm.fit(x_train, y_train, epochs=ep)
return nm, x_test, y_test
def trainDense6(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H1"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H2"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H3"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H4"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H5"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H6"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(nm.summary())
nm.fit(x_train, y_train, epochs=ep)
return nm, x_test, y_test
def trainData(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
# nm = keras.Sequential([
# keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
# keras.layers.Dense(49, activation=tf.nn.relu ,name = "H"),
# keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
# ])
# nm.compile(optimizer='adam',
# loss='sparse_categorical_crossentropy',
# metrics=['accuracy'])
# print(nm.summary())
# nm.fit(x_train, y_train, epochs=ep)
return x_test, y_test,x_train, y_train
def train3(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
input_shape = (img_rows,img_cols,1)
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_zo, numclass )
y_test = keras.utils.to_categorical(yt_zo, numclass)
num_classes = 10
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=ep)
return model, x_test, y_test | [
"rangeet@iastate.edu"
] | rangeet@iastate.edu |
c10986ff491244041f84595f91b37372f02b12ba | 22cfcaff5b211ddcf447a37a7409d9097b198d96 | /ex4301.py | 941aa165787cbff286f512a93bd0f98e63303f5d | [] | no_license | CasCoy/LPTHW | 03920c696db4a2423c9b2705b2d7ee5a7c08c460 | 3d0ed0a4ffc914e474cd7abb7d6cef15bf65d4ce | refs/heads/master | 2021-08-30T11:25:37.100736 | 2017-12-17T18:28:09 | 2017-12-17T18:28:09 | 109,824,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,435 | py | # -*- coding: utf-8 -*-
from sys import exit
from random import randint
class Scene(object):
def enter(self):
print "This scene is not yet configured. Subclass it and implement enter()"
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene("finished")
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
current_scene.enter()
class Delay(Scene):
quips = [
"Your flight will is delayed.",
"I think you will have to live here forever.",
"HAHA you will never be comfortable again."
]
def enter(self):
print Delay.quips[randint(0, len(self.quips)-1)]
exit(1)
class Airport(Scene):
def enter(self):
print "Oh how exciting, you are going home for the Christmas Holidays"
print "That sounds just like a lovely time, it is too bad you are so far away"
print "If you want to have a nice holiday, you will first need to arrive,"
print "you need to get on the airplane before it leaves "
print "and do not forget your luggage"
print "\n"
print "You are rushing out of your taxi to get to the check in desk when"
print "a group of grade school kids line up for their field trip."
print "They are all standing in front of the check in desk blocking you."
print "They are about to check in one after one when a new desk opens."
print "They are fast, small, agile, and ahead of you...you (run, wait, fight)"
action = raw_input("> ")
if action == "run":
print "Fast thinking!"
print "Who told you it was a good idea to run through small children?"
print "Don't you know that you could injure one of them, or worse, yourself."
print "they are shuffeling around now"
print "you weave in and out of them and toppel over one of their hello kitty backpacks"
print "you suffer a concussion and have to go to the hospital. missin your flight. oops. "
return 'delay'
elif action == "wait":
print "Use your Buddhist training"
print "take a deep breath"
print "In"
print "out"
print "in"
print "out"
print "you wait forever."
return 'delay'
elif action == "fight":
print "you drop your suitcase"
print "allowing both of your hands to be free "
print "you reach in your pockets"
print "find your smart phone and play a video"
print "all the kids are distracted and subdued"
print "you actually do note need to fight at all, just pick up your luggage and walk calmly to the desk"
return 'going_through_security'
else:
print "DOES NOT COMPUTE!"
return 'airport'
class going_through_security(Scene):
def enter(self):
print "Boarding pass in hand"
print "time to get on the plane"
print "just another obstacle in the way, this time not as small as children"
print "Hopefully you know your flight number"
print "you have to tell the agent what it is to get through"
print "wrong 3 times and you won't get through"
print "get it right, otherwise you will not make it"
#code = "%d%d%d" % (randint(1,9), randint(1,9), randint(1,9))
code = ("069")
guess = raw_input("[keypad]> ")
guesses = 0
while guess != code and guesses < 10:
print "I am sorry, that flight doesn't exist, are you sure?!"
guesses += 1
guess = raw_input("[keypad]> ")
if guess == code:
print "Thank you"
print "You better hurry, your flight is leaving soon"
print "please find your terminal"
return 'terminal'
else:
print "It is 2017, what kind of world are you living in"
print "that you think you can get on a plane without your flight number"
print "Ridiculous. You will freeze to death in this airport by the time you rememeber"
return 'delay'
class Terminal(Scene):
def enter(self):
print "You managed to get through security relatively unharmed"
print "Ecxept for that body search, but you are fine"
print "You have to make it though the terminal before check in is over"
print "you run through the long hallways looking for your gate"
print "you finally find it and you greet the ticket agent"
print "She would like to see your documents."
print "You serach through your bag"
print "Your passport, boarding pass, are there"
print "You have to give them to her in the right order do you give her them as:"
print "first option: 1.passport 2.boarding pass"
print "or"
print "second option: 1.boarding pass 2.passport"
action = raw_input("> ")
if action == "first option":
print "Hurridly you make the quick decision in your head"
print "that of course she wants your passport first"
print "a.nd you would be wrong"
print "She does not accept your documents"
print "and you have to sit inside the airport"
print "watching the plane take off"
return 'delay'
elif action == "second option":
print "You take a risk giving her your boarding pass first"
print "Would she even accept it not knowing who you are??"
print "She cracks you a smile and you feel like maybe it is working"
print "she takes your passports and compares the content"
print "she scans both"
print "you wait...and wait....and wait...for the red laser to beep"
print "it finally does. you can board."
print "you board the airplane"
return 'airplane'
else:
print "DOES NOT COMPUTE!"
return "terminal"
class Airplane(Scene):
def enter(self):
print "You go through the long tunnel"
print "waiting behind the heard of people who also want to see their families"
print "you enter the aircraft, seach for your seat"
print "the ink on your boarding pass is smudged from your sweat"
print "you need to decide which of the five free seats is yours"
print "you have to take a risk"
print "which one do you take?"
#good_pod = randint(1,5)
right_seat = 1
guess = raw_input("[pod #]> ")
if int(guess) != right_seat:
print "OH OH OH! %s is not yours" % guess
print "That is definitely the wrong seat..."
print "The flight attendant comes and throws you off the plane for "
print "disorderly conduct. "
return 'delay'
else:
print "Nervous you wait for everyone else to board the plane, "
print " as you sit in seat %s" % guess
print "You can see the last passengers board"
print "none of them are coming for your seat"
print "you sight a deep sigh of relief, finally you can breath"
print "Now you can focus on seeing your family."
return 'arrival'
class arrival(Scene):
def enter(self):
print "You fly 12 hours home without any more stress"
print "Your family waits at your gate to pick you up."
print "After 3 years, you can finally celebrate Christmas"
print "With your family again. Merry Christmas."
exit(1)
class Map(object):
scenes = {
'Airport': Airport(),
'going_through_security': going_through_security(),
'terminal': Terminal(),
'airplane': Airplane(),
'delay': Delay(),
'arrival': arrival(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
#a_map = Map("Airport")
#a_game = Engine(a_map)
#a_game.play()
| [
"noreply@github.com"
] | noreply@github.com |
00b4d530809478ba8d5e470aab29216b7d93a01e | 6e781205dfb2aa7cf43709b4a9c208f4bb7117b7 | /victory.py | 45dca785110c60f4f79650b494f1ec19b5518a3f | [] | no_license | timofeyegorov/Python-developer-5-Console_file_manager | d04458e6c9750dbb5c98e039facc362b3c6e41a6 | a83bc9d79b6345aef14e54f6a983f9447264950b | refs/heads/master | 2023-06-16T23:23:44.563669 | 2021-07-14T07:32:22 | 2021-07-14T07:32:22 | 384,894,594 | 0 | 0 | null | 2021-07-14T07:32:23 | 2021-07-11T08:18:08 | Python | UTF-8 | Python | false | false | 1,912 | py | import random
def victory_game():
while True:
data = {'Рубен Диаш': ['14.05.1997', 'Четырнадцатое мая 1997 года'],
'Мохаммед Салах': ['15.06.1992', 'Пятнадцатое июня 1992 года'],
'Кевин Де Брюйне': ['28.06.1991', 'Двадцать восьмое июня 1991 года'],
'Неймар': ['05.02.1992', 'Пятое февраля 1992 года'],
'Эрлинг Холланд': ['21.07.2000', 'Двадцать первое июля 2000 года'],
'Килиан Мбаппе': ['20.12.1998', 'Двадцатое декабря 1998 года'],
'Бруну Фернандеш': ['08.09.1994', 'Восьмое сентября 1994 года'],
'Лионель Месси': ['24.06.1987', 'Двадцать четвертое июня 1987 года'],
'Криштиану Роналду': ['05.02.1985', 'Пятое февраля 1985 года'],
'Роберт Левандовски': ['21.08.1988', 'Двадцать первое августа 1988 года']
}
filtered_data = random.sample(list(data.keys()), 5)
wrong, right = 0, 0
for name in filtered_data:
answer = input(f'Введите дату рождения {name} в формате dd.mm.yyyy: ')
if answer != data[name][0]:
print(f'Неверно, дата рождения {name} - {data[name][1]}')
wrong += 1
else:
right += 1
print('Правильных ответов: ', right)
print('Ошибок: ', wrong)
game = input('Сыграть еще? (введите нет, чтобы завершить игру): ')
if game == 'нет':
break
| [
"timofeyegorov48@gmail.com"
] | timofeyegorov48@gmail.com |
7908b1e1cfb79c9a9aed81d50ee32663279e5be3 | 81e14734e111a91a37888dfcc85e6d0f30dce56c | /closet palindrome number/find_palindrome.py | 72b8108fbf4ce69b3de58da9c3e0f7bfa0ae2217 | [] | no_license | arunvemana/pythontasks | b96ad4325c40df9c844403717e174ef2d84f7889 | 2d6f76bb48efcc4e718efaf88a50ec33b1a0c652 | refs/heads/master | 2023-05-24T17:00:27.912830 | 2022-11-30T12:06:32 | 2022-11-30T12:06:32 | 193,498,172 | 3 | 0 | null | 2023-05-23T05:16:05 | 2019-06-24T12:09:23 | Python | UTF-8 | Python | false | false | 452 | py |
def closest_palindrome(num):
num = str(num)
length_number = len(num)
left_shift_index = length_number/2
# print(str(num)[:left_shift_index])
if (left_shift_index % 2) == 0:
return int(num[:left_shift_index]+num[:left_shift_index][::-1])
else:
return int(num[:left_shift_index+1]+num[:left_shift_index][::-1])
if __name__ == '__main__':
for number in [123, 1222]:
print(closest_palindrome(number))
| [
"avemana@loginsoft.com"
] | avemana@loginsoft.com |
69e96d91f1e97b1e4777741ed5926f0e3ffe5d96 | d37ab0fa7dd0026425fc15a13288847ae0954f48 | /src/helixweb/billing/forms_filters.py | dd3d23578a55a833025b34264b3fabe186615716 | [] | no_license | sand8080/helixweb | 4fd84e3df8add42996684a288c16148f8582297b | 5f08b4cc41d6bd72f54382ebe5e9b45c428fac4b | refs/heads/master | 2020-12-24T15:23:16.944216 | 2014-02-17T10:56:45 | 2014-02-17T10:56:45 | 1,048,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,846 | py | from django import forms
from django.utils.translation import ugettext_lazy as _
from helixweb.core.widgets import ConstInput
from helixweb.core.forms_filters import (FilterForm, AbstractFilterActionLogsForm,
AbstractFilterAllActionLogsForm, AbstractFilterSelfActionLogsForm,
AbstractFilterUserActionLogsForm)
from helixweb.billing.forms import BillingForm
class FilterBillingForm(FilterForm, BillingForm):
pass
class AbstractBillingFilterActionLogsForm(AbstractFilterActionLogsForm, FilterBillingForm):
action = 'get_action_logs'
def __init__(self, *args, **kwargs):
kwargs['choices'] = (('', ''),
('add_balance', _('add balance')),
('modify_balance', _('modify balance')),
('add_receipt', _('add receipt')),
('add_bounus', _('add bonus')),
('lock', _('lock')),
('unlock', _('unlock')),
('charge_off', _('charge off')),
('modify_used_currencies', _('modify currencies')),
)
super(AbstractBillingFilterActionLogsForm, self).__init__(*args, **kwargs)
class FilterAllActionLogsForm(AbstractBillingFilterActionLogsForm, AbstractFilterAllActionLogsForm):
pass
class FilterSelfActionLogsForm(AbstractBillingFilterActionLogsForm, AbstractFilterSelfActionLogsForm):
pass
class FilterUserActionLogsForm(AbstractBillingFilterActionLogsForm, AbstractFilterUserActionLogsForm):
pass
class FilterCurrenciesForm(FilterBillingForm):
action = 'get_currencies'
ordering_param = '-code'
class FilterUsedCurrenciesForm(FilterBillingForm):
action = 'get_used_currencies'
ordering_param = '-code'
class FilterBalanceForm(FilterBillingForm):
action = 'get_balances'
def __init__(self, *args, **kwargs):
currencies = kwargs.pop('currencies', [])
super(FilterBalanceForm, self).__init__(*args, **kwargs)
self.fields['id'] = forms.IntegerField(label=_('balance id'), required=False)
self.fields['user_id'] = forms.IntegerField(label=_('user id'), required=False)
self.fields['currency_code'] = self._gen_currency_code(currencies, required=False)
self.fields['from_real_amount'] = forms.DecimalField(label=_('real amount from'),
required=False)
self.fields['to_real_amount'] = forms.DecimalField(label=_('real amount to'),
required=False)
self.fields['from_virtual_amount'] = forms.DecimalField(label=_('virtual amount from'),
required=False)
self.fields['to_virtual_amount'] = forms.DecimalField(label=_('virtual amount to'),
required=False)
self.fields['from_overdraft_limit'] = forms.DecimalField(label=_('overdraft limit from'),
required=False)
self.fields['to_overdraft_limit'] = forms.DecimalField(label=_('overdraft limit to'),
required=False)
self.fields['from_locked_amount'] = forms.DecimalField(label=_('locked amount from'),
required=False)
self.fields['to_locked_amount'] = forms.DecimalField(label=_('locked amount to'),
required=False)
self.fields['is_active'] = forms.ChoiceField(label=_('is active'), required=False, widget=forms.widgets.RadioSelect(),
choices=(('all', _('all')), ('1', _('active')), ('0', _('inactive'))),
initial='all')
def as_helix_request(self):
d = super(FilterBalanceForm, self).as_helix_request()
self._strip_filter_param(d, 'id')
self._strip_filter_param(d, 'user_id')
self._strip_filter_param(d, 'currency_code')
self._strip_filter_param(d, 'from_real_amount')
self._strip_filter_param(d, 'to_real_amount')
self._strip_filter_param(d, 'from_virtual_amount')
self._strip_filter_param(d, 'to_virtual_amount')
self._strip_filter_param(d, 'from_overdraft_limit')
self._strip_filter_param(d, 'to_overdraft_limit')
self._strip_filter_param(d, 'from_locked_amount')
self._strip_filter_param(d, 'to_locked_amount')
if (not d['filter_params']['is_active'] or
d['filter_params']['is_active'] == 'all'):
d['filter_params'].pop('is_active')
else:
val = bool(int(d['filter_params']['is_active']))
d['filter_params']['is_active'] = val
return d
class AbstractFilterLocksForm(FilterBillingForm):
action = 'get_locks'
def _add_common_fields(self):
self.fields['order_id'] = forms.CharField(label=_('order id'),
max_length=64, required=False)
self.fields['from_creation_date'] = forms.DateField(label=_('from'), required=False)
self.fields['to_creation_date'] = forms.DateField(label=_('to'), required=False)
def as_helix_request(self):
d = super(AbstractFilterLocksForm, self).as_helix_request()
self._strip_filter_param(d, 'user_id')
self._strip_filter_param(d, 'order_id')
self._strip_filter_param(d, 'balance_id')
self._strip_from_date_param(d, 'from_creation_date')
self._strip_to_date_param(d, 'to_creation_date')
return d
class FilterLocksForm(AbstractFilterLocksForm):
def __init__(self, *args, **kwargs):
super(FilterLocksForm, self).__init__(*args, **kwargs)
self.fields['user_id'] = forms.IntegerField(label=_('user id'),
required=False)
self.fields['balance_id'] = forms.IntegerField(label=_('balance id'),
required=False)
self._add_common_fields()
class FilterUserBalanceLocksForm(AbstractFilterLocksForm):
def __init__(self, *args, **kwargs):
super(FilterUserBalanceLocksForm, self).__init__(*args, **kwargs)
self.fields['user_id'] = forms.IntegerField(label=_('user id'),
widget=ConstInput, required=False)
self.fields['balance_id'] = forms.IntegerField(label=_('balance id'),
widget=ConstInput, required=False)
self._add_common_fields()
class FilterSelfLocksForm(AbstractFilterLocksForm):
action = 'get_locks_self'
def __init__(self, *args, **kwargs):
super(FilterSelfLocksForm, self).__init__(*args, **kwargs)
self._add_common_fields()
class AbstractFilterTransactionsForm(FilterBillingForm):
action = 'get_transactions'
def _add_common_fields(self):
self.fields['order_id'] = forms.CharField(label=_('order id'),
max_length=64, required=False)
self.fields['type'] = forms.ChoiceField(label=_('type'), required=False,
widget=forms.widgets.Select(),
choices=((None, _('all')), ('receipt', _('receipt')), ('bonus', _('bonus')),
('lock', _('lock')), ('unlock', _('unlock')), ('charge_off', _('charge off'))),
initial='all')
self.fields['from_creation_date'] = forms.DateField(label=_('from'), required=False)
self.fields['to_creation_date'] = forms.DateField(label=_('to'), required=False)
def as_helix_request(self):
d = super(AbstractFilterTransactionsForm, self).as_helix_request()
self._strip_filter_param(d, 'id')
self._strip_filter_param(d, 'user_id')
self._strip_filter_param(d, 'order_id')
self._strip_filter_param(d, 'type')
self._strip_filter_param(d, 'balance_id')
self._strip_from_date_param(d, 'from_creation_date')
self._strip_to_date_param(d, 'to_creation_date')
return d
class FilterTransactionsForm(AbstractFilterTransactionsForm):
def __init__(self, *args, **kwargs):
super(FilterTransactionsForm, self).__init__(*args, **kwargs)
self.fields['user_id'] = forms.IntegerField(label=_('user id'),
required=False)
self.fields['balance_id'] = forms.IntegerField(label=_('balance id'),
required=False)
self.fields['id'] = forms.IntegerField(label=_('id'),
required=False)
self._add_common_fields()
class FilterUserTransactionsForm(AbstractFilterTransactionsForm):
def __init__(self, *args, **kwargs):
super(FilterUserTransactionsForm, self).__init__(*args, **kwargs)
self.fields['user_id'] = forms.IntegerField(label=_('user id'),
widget=ConstInput, required=False)
self.fields['balance_id'] = forms.IntegerField(label=_('balance id'),
widget=ConstInput, required=False)
self.fields['id'] = forms.IntegerField(label=_('id'),
required=False)
self._add_common_fields()
class FilterSelfTransactionsForm(AbstractFilterTransactionsForm):
action = 'get_transactions_self'
def __init__(self, *args, **kwargs):
super(FilterSelfTransactionsForm, self).__init__(*args, **kwargs)
self.fields['id'] = forms.IntegerField(label=_('id'),
required=False)
self._add_common_fields() | [
"sand8080@gmail.com"
] | sand8080@gmail.com |
f6e1dbcd885565e82d2661115d0d44d5701a04c0 | 1dff43a4fd4a8e84f8ce104dbb3a0d79185d9744 | /jekyde/tests/test_jekyde.py | 50005df25c5e0d44d9e7104a73ee31ab9e2912fb | [
"MIT"
] | permissive | devilicecream/jekyde | 090134b060baffc4d75e7248b5113d4ee2a445b5 | 2440c183db00e97bf67ead6a00974a2bd9bc8fac | refs/heads/master | 2020-05-19T07:56:36.774930 | 2019-05-04T17:01:20 | 2019-05-04T17:01:20 | 184,909,369 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | import sqlalchemy as sa
from ming import schema as s
from ming.odm import MappedClass
from jekyde.drivers import Driver
from jekyde.meta import JekydeModel
from sqlalchemy.orm import configure_mappers
from .conftest import BaseModel
def test_change_type(sql_session, ming_session):
class AMingModel(MappedClass):
class __mongometa__:
session = ming_session
name = "amodel"
_id = s.ObjectId()
value = s.String()
class ASQLModel(BaseModel):
__tablename__ = "amodel"
id = sa.Column(sa.Integer(), primary_key=True)
value = sa.Column(sa.String(length=32))
class AModel(JekydeModel):
_driver = Driver.Ming
_models = {Driver.Ming: AMingModel, Driver.SQLAlchemy: ASQLModel}
configure_mappers()
AModel(value="test")
ming_session.flush()
assert AModel.query.find({}).first()
AModel.use(Driver.SQLAlchemy)
with sql_session() as session:
new_obj = AModel(value="test2")
session.add(new_obj)
session.flush()
created = session.query(AModel).get(new_obj.id)
assert created
def test_migration(sql_session, ming_session):
class BMingModel(MappedClass):
class __mongometa__:
session = ming_session
name = "bmodel"
_id = s.ObjectId()
value = s.String()
class BSQLModel(BaseModel):
__tablename__ = "bmodel"
id = sa.Column(sa.Integer(), primary_key=True)
value = sa.Column(sa.String(length=32))
class BModel(JekydeModel):
_driver = Driver.Ming
_models = {Driver.Ming: BMingModel, Driver.SQLAlchemy: BSQLModel}
configure_mappers()
BModel(value="test")
ming_session.flush()
doc = BModel.query.find({}).first()
assert doc
with sql_session() as session:
new_obj = BModel.migrate_to(doc, Driver.SQLAlchemy)
session.add(new_obj)
session.flush()
obj_id = new_obj.id
with sql_session() as session:
assert session.query(BSQLModel).get(obj_id)
| [
"walterdangalante@gmail.com"
] | walterdangalante@gmail.com |
3b02deb956c2887c303ede1abb8311eb09a6c91f | 8cdcf99f1e63c380967a294653f7dc0e1d8e8291 | /tests/test_reverse_polish.py | 443cf4e27e21f32aa64c196dd4147db3b9d3a0ec | [] | no_license | dIgor93/Sandbox | 1092f8e27dd57a4eb04676f7e445165f20df9ee7 | a7ffec83ee4036a6394a8f95e30923523e1d05f8 | refs/heads/master | 2022-02-23T19:46:28.589014 | 2019-10-27T13:31:06 | 2019-10-27T13:31:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | from unittest import TestCase
from polish import reverse_polish
class TestReverse_polish(TestCase):
def test_reverse_polish_0(self):
res = reverse_polish(-1, [2, 3, '+'])
print(res)
assert res == 5
def test_reverse_polish_1(self):
res = reverse_polish(-1, [2, 2, 2, '+', '*'])
print(res)
assert res == 8
def test_reverse_polish_2(self):
res = reverse_polish(-1, [3, 2, 1, '+', '*'])
print(res)
assert res == 9
def test_reverse_polish_3(self):
res = reverse_polish(-1, [3, 2, '+', 1, 3, '+', '*'])
print(res)
assert res == 20
def test_reverse_polish_4(self):
res = reverse_polish(-1, [10, 5, '<', 5, 14, '?'])
print(res)
assert res == 14
def test_reverse_polish_5(self):
res = reverse_polish(-1, [5, 10, '<', 5, 14, '?'])
print(res)
assert res == 5
def test_reverse_polish_6(self):
res = reverse_polish(-1, [5, 14, '-'])
print(res)
assert res == -9
| [
"wildig1014@yandex.ru"
] | wildig1014@yandex.ru |
33ca51a5d85d8352fdbdea328351d34be314446f | c7b1282a1d66eb2756a63909320f515d03917deb | /day00/ex05/kata03.py | 5c2e7ccc39d7df276537cc565f9161a3e8a4fd7a | [] | no_license | AdrianWR/PythonBootcamp | 5603f2108f4ff80292e8964f75af44223fe0febc | 9dc6ef196b3bc51c079efcd19ea421561ceb8a6d | refs/heads/master | 2022-12-23T00:26:22.833866 | 2020-09-23T12:36:14 | 2020-09-23T12:36:14 | 259,933,367 | 1 | 2 | null | 2020-04-29T13:35:31 | 2020-04-29T13:31:00 | Python | UTF-8 | Python | false | false | 107 | py | phrase = "The right format"
if __name__ == "__main__":
s = phrase.rjust(42, '-')
print(s, end='')
| [
"adrian.w.roque@gmail.com"
] | adrian.w.roque@gmail.com |
a4f91d131845e9d72b46556179c3d49e831e15f0 | c7bb2f583148c3851720e1f535520cfb5f3df32e | /lib/models/removal_model.py | 8ded2eec4bfcab124b17f5aacaa9ac33ddc74fd1 | [] | no_license | PeterZhouSZ/Shadow-aware-portrait-relight | e248d8afee96a812997044b7f51d0a0616fce070 | 119fb975de77c58a1049074195bc009660700575 | refs/heads/main | 2023-03-24T06:53:08.161122 | 2021-03-28T10:55:35 | 2021-03-28T10:55:35 | 374,818,414 | 1 | 0 | null | 2021-06-07T22:43:55 | 2021-06-07T22:43:55 | null | UTF-8 | Python | false | false | 4,347 | py |
from lib.networks.base_network import MsImageDis
from lib.networks.removal_network import Gen
from lib.utils.utils import weights_init, get_model_list, vgg_preprocess, load_vgg16, get_scheduler,ssim
from lib.models.base_model import BaseModels
from torch.autograd import Variable
import torch
import torch.nn as nn
import os
import sys
# x y is subject
# a b is illumination
class Models(BaseModels):
def __init__(self, hyperparameters):
super(Models, self).__init__()
lr = hyperparameters['lr']
self.model_name = hyperparameters['models_name']
# Initiate the networks
if(self.model_name=='removal'):
self.gen = Gen(hyperparameters['input_dim_a'], hyperparameters['gen'])
self.dis = MsImageDis(hyperparameters['input_dim_a'],
hyperparameters['dis']) # discriminator for domain a
else:
sys.exit('error on models')
self.instancenorm = nn.InstanceNorm2d(512, affine=False)
self.style_dim = hyperparameters['gen']['style_dim']
self.gen = nn.DataParallel(self.gen)
self.dis = nn.DataParallel(self.dis)
# fix the noise used in sampling
display_size = int(hyperparameters['display_size'])
# Setup the optimizers
beta1 = hyperparameters['beta1']
beta2 = hyperparameters['beta2']
gen_params = list(self.gen.parameters())
self.gen_opt = torch.optim.Adam([p for p in gen_params if p.requires_grad],
lr=lr, betas=(beta1, beta2), weight_decay=hyperparameters['weight_decay'])
self.gen_scheduler = get_scheduler(self.gen_opt, hyperparameters)
dis_params = list(self.dis.parameters())
self.dis_opt = torch.optim.Adam([p for p in dis_params if p.requires_grad],
lr=lr, betas=(beta1, beta2), weight_decay=hyperparameters['weight_decay'])
self.dis_scheduler = get_scheduler(self.dis_opt, hyperparameters)
# Network weight initialization
self.apply(weights_init(hyperparameters['init']))
self.dis.apply(weights_init('gaussian'))
# Load VGG model if needed
if 'vgg_w' in hyperparameters.keys() and hyperparameters['vgg_w'] > 0:
self.vgg = load_vgg16(hyperparameters['vgg_model_path'] + '/models')
self.vgg.eval()
for param in self.vgg.parameters():
param.requires_grad = False
self.vgg = nn.DataParallel(self.vgg)
def gen_update(self, out_data,hyperparameters):
Xa_out, X_removal, mask, depth=out_data
self.gen_opt.zero_grad()
# encode
out_x,p_x = self.gen.forward(torch.mul(Xa_out, mask))
# main relight loss
self.loss_gen_prime_x_b = self.recon_criterion_mask(out_x, X_removal, mask)
# main p loss
self.loss_gen_prime_x_p = self.recon_criterion_mask(p_x, depth, mask[:,0:1,:,:])
# GAN loss
self.loss_gen_adv = self.calc_gen_loss(self.dis.forward(torch.mul(out_x, mask)))
self.loss_gen_total = hyperparameters['relight'] * self.loss_gen_prime_x_b + \
hyperparameters['x_p'] * self.loss_gen_prime_x_p + hyperparameters['gan_w']* self.loss_gen_adv
self.loss_gen_total.backward()
self.gen_opt.step()
image_anchor = Xa_out[0:1].detach().cpu()[:3]
image_recons = torch.mul(out_x, mask)[0:1].detach().cpu()[:3]
image_gt = X_removal[0:1].detach().cpu()[:3]
depth_gt = (depth[0:1].detach().cpu()[:3]).repeat(1,3,1,1)
depth = (p_x[0:1].detach().cpu()[:3]).repeat(1,3,1,1)
self.image_display = torch.cat((image_anchor, image_recons, image_gt,depth_gt,depth),dim=3)
def dis_update(self, x_a,gt_xb,x_mask,hyperparameters):
self.dis_opt.zero_grad()
out_x,_ = self.gen.forward(torch.mul(x_a, x_mask))
# D loss
out_x = torch.mul(out_x, x_mask)
gt_xb = torch.mul(gt_xb, x_mask)
self.loss_dis = self.calc_dis_loss(self.dis.forward(out_x.detach()),self.dis.forward(gt_xb))
#self.loss_dis = self.dis.calc_dis_loss(out_x.detach(), gt_xb)
self.loss_dis_total = hyperparameters['gan_w'] * self.loss_dis
self.loss_dis_total.backward()
self.dis_opt.step()
| [
"guoxian.song@bytedance.com"
] | guoxian.song@bytedance.com |
1c2030fad87f3a2f73634809687e3293afdb9848 | 79991f71b68590de24a5f8bb904054e30d38e77f | /main.py | cc71bc69feec84cef3cb93add31bf5d67ab407b7 | [] | no_license | ottinger/housing-price-prediction-web | 7d745d335b5253d27396ffe3a31c148bd0e11423 | 12d574159274d8afc0345be5bc97868fc7c753ca | refs/heads/master | 2020-09-08T13:33:16.744118 | 2019-11-28T07:57:38 | 2019-11-28T07:57:38 | 221,148,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | from flask import Flask, send_from_directory, request
import pandas as pd
import pickle
import json
app = Flask(__name__)
@app.route('/predict', methods=['POST','GET'])
def predict():
json_ = request.get_json()
print(json_)
predict_df = pd.DataFrame(columns=field_names) # empty df for req data
predict_df = predict_df.append(pd.Series(), ignore_index=True)
if json_:
predict_data = json_
else:
with open("sample_item.json", "rb") as file:
predict_data = json.loads(file.read())
for item in predict_data.items():
if type(item[1]) != str:
predict_df[item[0]][0] = item[1]
else: # it's a string - onehot encoded
for col_name, col_val in predict_df.iteritems():
if(col_name.startswith(item[0])):
if col_name == item[0] + "-" + item[1]:
predict_df[col_name][0] = 1
else:
predict_df[col_name][0] = 0
prediction = model.predict(predict_df)
return_dict = {"prediction": prediction[0]}
return(json.dumps(return_dict))
@app.route('/<path:path>')
def send_static(path):
return send_from_directory('static', path)
@app.route('/')
def send_root():
return app.send_static_file('index.html')
if __name__ == '__main__':
field_names = pickle.load(open("column_names.pkl", "rb"))
model = pickle.load(open("model.pkl", "rb"))
app.run(port=8000)
| [
"michael@ottinger.net"
] | michael@ottinger.net |
658c5b53a50e59b89e284ab9c792c46081f0daeb | d20626ef3b9ae6b9a702d67333209d678e27105d | /robot_booking/freebusy/FreeBusyEvent_Builder.py | 7690f3f737a404f94c5917cfcd0c9e42d122705a | [] | no_license | geleazar1000111/bebop | e3d95b641b036847e50913690b07d0eb12958693 | 435f134248079f369a8f0004a0e07b8d73e0ce21 | refs/heads/master | 2022-11-24T01:46:37.334014 | 2020-07-26T17:54:27 | 2020-07-26T17:54:27 | 263,472,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,707 | py | """This class builds free time slots based on the specified time range and events that are booked in that time range.
First, days are generated as nested dictionaries that are stored in the days in range attribute. The workday is
defined as the value for the 'range' key. 'booked' is another key that gets filled in later, if there are any booked
events in that specified time range. Once any booked events are appended to the 'booked' list, free time slots are
then created. If there are no booked events, then the whole day is created as a free event."""
from datetime import datetime, timedelta
import re
class FreeBusyEventBuilder:
def __init__(self, robot_id):
self.robot_id = robot_id
self.is_available = 0
self.days_in_range = {}
self.free_events = []
def construct_free_events(self):
for day in self.days_in_range:
curstart = self.days_in_range[day]['range']['start']
if self.days_in_range[day]['booked']:
for event in self.days_in_range[day]['booked']:
curend = event['start']
self.make_free_event(curstart, curend)
curstart = event['end']
curend = self.days_in_range[day]['range']['end']
self.make_free_event(curstart, curend)
else:
self.make_free_event(self.days_in_range[day]['range']['start'], self.days_in_range[day]['range']['end'])
def initialize_free_events(self, booked, min_year, min_month, min_day, max_year, max_month, max_day):
curstart = datetime(min_year, min_month, min_day, hour=9)
curend = datetime(max_year, max_month, max_day, hour=17)
self.fill_days(curstart, curend)
for event in booked:
event_start = convert_google_datetime(event['start'])
event_end = convert_google_datetime(event['end'])
self.days_in_range[event_start.date()]['booked'].append({'start': event_start, 'end': event_end})
self.construct_free_events()
return self.free_events
def fill_days(self, start_range, end_range):
delta = end_range - start_range
for i in range(delta.days + 1):
date = start_range + timedelta(days=i)
date_start = date.replace(hour=9)
date_end = date.replace(hour=17)
self.days_in_range[date.date()] = {'range': {'start': date_start, 'end': date_end, 'duration': 8},
'booked': []}
def calculate_gap(self, prev_event, next_event):
diff = next_event - prev_event
diff_hours = diff.total_seconds() // 3600
return diff_hours
def make_free_event(self, start, end):
if self.calculate_gap(start, end) >= 0.5:
self.free_events.append({'start': start, 'end': end, 'duration': self.calculate_gap(start, end)})
# self.free_events[start] = {'start': start, 'end': end, 'duration': self.calculate_gap(start, end)}
def check_if_free(self, booked):
if self.free_events:
self.is_available = 1
elif booked and not self.free_events:
self.is_available = -1
# else:
# self.is_available = 2
"""Helper Functions"""
def is_single_day_event(datetime_str):
regex_obj = re.compile('.*-.*-.*T.*:.*:.*')
if regex_obj.match(datetime_str):
return True
return False
def convert_google_datetime(datetime_str):
if not is_single_day_event(datetime_str[:datetime_str.rfind("-")]):
return datetime.strptime(datetime_str, '%Y-%m-%d')
else:
return datetime.strptime(datetime_str[:datetime_str.rfind("-")], '%Y-%m-%dT%H:%M:%S%f') | [
"geraldine@osaro.com"
] | geraldine@osaro.com |
20bcbefc778347fd1c292368763549aed1dee9e5 | d51c769deeb16ea0e2f17275e269f6a9e59b1d28 | /WOS + MDM/mapeo_colores.py | 309e292a8fda3c29c516dfd44a5ee3badfa6cd3a | [] | no_license | MDG99/Rubiks-Cube-Solver | 16cd97330987328f10d9e612e5b865acb6c13275 | f17ac944e352478e5b50d9aef6f034476f8c7663 | refs/heads/main | 2023-02-25T20:39:05.111517 | 2021-02-05T11:22:39 | 2021-02-05T11:22:39 | 330,028,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | import numpy as np
M = [[4, 52, 81], # Azul ya
[198, 51, 3], # Roj0 ***
[39, 169, 43], # Verde pálido
[228, 109, 6], # Naranja ya
[153, 163, 30], # Amarillo ***
[147, 165, 120], # Blanco
[27, 55, 17]
] # Fondo
def mapeo(img):
w = np.shape(img)[0]
h = np.shape(img)[1]
contador = np.zeros(7)
for i in range(w):
for j in range(h):
if img[i, j, 2] == M[0][0] and img[i, j, 1] == M[0][1] and img[i, j, 0] == M[0][2]:
contador[0] = contador[0] + 1
if img[i, j, 2] == M[1][0] and img[i, j, 1] == M[1][1] and img[i, j, 0] == M[1][2]:
contador[1] = contador[1] + 1
if img[i, j, 2] == M[2][0] and img[i, j, 1] == M[2][1] and img[i, j, 0] == M[2][2]:
contador[2] = contador[2] + 1
if img[i, j, 2] == M[3][0] and img[i, j, 1] == M[3][1] and img[i, j, 0] == M[3][2]:
contador[3] = contador[3] + 1
if img[i, j, 2] == M[4][0] and img[i, j, 1] == M[4][1] and img[i, j, 0] == M[4][2]:
contador[4] = contador[4] + 1
if img[i, j, 2] == M[5][0] and img[i, j, 1] == M[5][1] and img[i, j, 0] == M[5][2]:
contador[5] = contador[5] + 1
if img[i, j, 2] == M[6][0] and img[i, j, 1] == M[6][1] and img[i, j, 0] == M[6][2]:
contador[6] = 1
moda = max(contador)
indice = 0
for i in range(7):
if contador[i] == moda:
indice = i
if indice == 0:
return 'B'
if indice == 1:
return 'R'
if indice == 2:
return 'G'
if indice == 3:
return 'O'
if indice == 4:
return 'Y'
if indice == 5:
return 'W'
if indice == 6:
return 'k'
| [
"ignacioisaac30@gmail.com"
] | ignacioisaac30@gmail.com |
ca7114b9431d0b12bef71d9fd7e5c95a095770dd | a1c08cca122d580994e6a495095fbfeb3e7120d6 | /bert4keras/__init__.py | e559963fc6776e35aaaee73347b221f421fde74d | [
"Apache-2.0"
] | permissive | meissenzheng/bert4keras | cc4c4b25a52521612b9e460e3356e38ad4158d93 | 93c1b8c78a8efac7843e634ee7d5b3b1ea631f5d | refs/heads/master | 2023-04-17T14:14:57.103530 | 2021-04-25T05:10:56 | 2021-04-25T05:10:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | #! -*- coding: utf-8 -*-
__version__ = '0.10.5'
| [
"noreply@github.com"
] | noreply@github.com |
3ecdd02380aca40b278797ea18ccfd30972aae5d | 2d768a41df277b92ca8a15407f24ff01eafb3de6 | /4_Database_Flask_FastAPI/2_FastAPI/main.py | 23468e2a732f7a0ab686277e55b99de58d1a6f3e | [] | no_license | morganpartee/docker_class | 1a2de1e252373f71efa548fe8cccff8911f4b2cd | b9b5977a91133533b144dbcbd889cc25bb4d6070 | refs/heads/master | 2023-03-08T01:41:54.892533 | 2021-02-21T23:46:50 | 2021-02-21T23:46:50 | 284,179,958 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | from fastapi import FastAPI
from pickle import load
import numpy as np
import uvicorn
app = FastAPI()
with open("model.pkl", "rb") as f:
clf = load(f)
@app.get("/")
def root():
return {"Hello": "World"}
@app.get("/predict")
def predict(sep_len: int, sep_wid: int, ped_len: int, ped_wid: int):
return {
"result": clf.predict(
np.array([sep_len, sep_wid, ped_len, ped_wid]).reshape(1, -1)
)[0]
}
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=80)
| [
"morganpartee@gmail.com"
] | morganpartee@gmail.com |
cf0e83ceacc341a163c92016f970b462250aa926 | 4d58c58165792bf22986c3cf53990ba17b28085a | /maya/python/lib/mpcJob.py | 4e27b28d3f9522420d08f0b94fcfcab4e47d8368 | [] | no_license | esernaalonso/dev | aabb54827db0f8507a0abbc24de5e1f7d27fe6c9 | 4fa41b8007e44dc0b574e372617ae65d198e27e9 | refs/heads/master | 2021-01-21T14:02:00.113420 | 2016-06-04T11:56:41 | 2016-06-04T11:56:41 | 47,589,157 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | #######################################
# imports
import maya.cmds as cmds
import maya.OpenMayaUI as apiUI
import sys
import time
from mpc import jobtools as jobTls
#######################################
# functionality
def getJob():
""" Return the job from the environment.
Returns:
(str or None): Job, or None if no job is found.
"""
return jobTls.jobName()
def getScene():
""" Returns the scene from the environment.
Returns:
(str or None): Scene, or None if no scene is found.
"""
return jobTls.sceneName()
def getShot():
""" Returns the shot from the environment.
Returns:
(str or None): Shot, or None if no shot is found.
"""
return jobTls.shotName()
def getPlayblastPath():
jobName = jobTls.jobName()
sceneName = jobTls.sceneName()
shotName = jobTls.shotName()
if jobName and sceneName and shotName:
return ("/jobs/" + jobName + "/" + sceneName + "/" + shotName + "/maya/playblasts/")
else:
return None
#######################################
# execution
if __name__ == "__main__":
pass | [
"edusernalonso@gmail.com"
] | edusernalonso@gmail.com |
406e7aae33727bb43762bd69aaa12abd355c309f | fd85b477888a3061233d9ef4814ce45953145618 | /demo/SimplePlot.py | b679ed0e3a0dee702055abc760d3149382a038d9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | anondroid5/Tkinter | 799f318006c886b592dc340d1d08f34870b5e998 | 3556224ec2e30b31da9f4a472dfe978c639c2c9f | refs/heads/master | 2021-01-17T07:43:32.166211 | 2016-07-18T09:10:12 | 2016-07-18T09:10:12 | 37,201,096 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | #coding: utf-8
from Tkinter import *
def main():
root = Tk()
root.title('Simple Plot')
try:
canvas = Canvas(root, width=450, height=300, bg = 'white')
canvas.pack()
Button(root, text='Quit', command=root.quit).pack()
canvas.create_line(100,250,400,250, width=2)
canvas.create_line(100,250,100,50, width=2)
for i in range(11):
x = 100 + (i * 30)
canvas.create_line(x,250,x,245, width=2)
canvas.create_text(x,254, text='%d'% (10*i), anchor=N)
for i in range(6):
x = 250 - (i + 40)
canvas.create_line(100,y,105,y, width=2)
canvas.create_text(96,y, text='%5.1f'% (50.*i), anchor=E)
scaled = []
for x,y in [(12, 56), (20, 94), (33, 98), (45, 120), (61, 180),
(75, 160), (98, 223)]:
scaled.append(100 + 3*x, 250 - (4*y)/5)
canvas.create_line(scaled, fill='black', smooth=1)
for xs,ys in scaled:
canvas.create_oval(x-6,y-6,x+6,y+6, width=1,
outline='black', fill='SkyBlue2')
except:
print 'An error has occured!'
root.mainloop()
main() | [
"boy11922960shooping@gmail.com"
] | boy11922960shooping@gmail.com |
502d4b86495a11eb9b80fc09726abe7faa363290 | 93de0276028ee893ef25e269f25c0492fec6d0d8 | /Client-bl.py | c540da59914e76373be612207e2c7ccaf3d50520 | [] | no_license | antarasargam/Lab2 | dcbdee7cf8c460773df558f39dc1b1541150cfc4 | 3854a175dff5ffde136d213db54c01b2e93c2a37 | refs/heads/master | 2021-08-24T13:19:56.899452 | 2017-11-21T07:32:31 | 2017-11-21T07:32:31 | 105,047,853 | 0 | 3 | null | 2017-11-21T07:32:31 | 2017-09-27T17:24:39 | Python | UTF-8 | Python | false | false | 19,464 | py | #Client
import asyncio
import playground
import random, logging
from .Servera import PEEPpacket
from playground.network.packet import PacketType
from playground.network.packet.fieldtypes import UINT32, STRING, UINT16, UINT8, BUFFER
from playground.network.packet.fieldtypes.attributes import Optional
from playground.network.common.Protocol import StackingProtocol, StackingProtocolFactory, StackingTransport
import zlib
'''class PEEPpacket(PacketType):
DEFINITION_IDENTIFIER = "PEEP.Packet"
DEFINITION_VERSION = "1.0"
FIELDS = [
("Type", UINT8),
("SequenceNumber", UINT32({Optional: True})),
("Checksum", UINT16),
("Acknowledgement", UINT32({Optional: True})),
("Data", BUFFER({Optional: True}))
]'''
class PeepClientTransport(StackingTransport):
def __init__(self,protocol,transport):
self.protocol = protocol
self.transport = transport
self.exc = None
super().__init__(self.transport)
def write(self, data):
self.protocol.write(data)
def close(self):
self.protocol.close()
def connection_lost(self):
self.protocol.connection_lost(self.exc)
class PEEPClient(StackingProtocol):
global_number_seq = 0
global_number_ack = 0
count_of_function_call = 0
first_data_seq_number = 0
count_of_function_call_ack = 0
global_packet_size = 0
number_of_packs = 0
recv_window = {}
prev_sequence_number = 0
expected_ackno = 0
sending_window = {}
sending_window_count = 0
global_pig = 0
keylist1= []
t = {}
n = 0
global_received_ack = 0
prev_ack_number = 0
backlog_window = []
def __init__(self, loop):
self.transport = None
self.loop = loop
self._state = 0
def calculateChecksum(self, c):
self.c = c
self.c.Checksum = 0
checkbytes = self.c.__serialize__()
return zlib.adler32(checkbytes) & 0xffff
def checkChecksum(self, instance):
self.instance = instance
pullChecksum = self.instance.Checksum
instance.Checksum = 0
bytes = self.instance.__serialize__()
if pullChecksum == zlib.adler32(bytes) & 0xffff :
return True
else:
return False
async def syn_timeout(self):
while self._state < 2:
await asyncio.sleep(1)
if self._state < 2:
self.transport.write(self.syn)
async def ack_timeout(self):
while self._state < 3:
await asyncio.sleep(0.9)
if self._state < 3:
self.transport.write(self.clientpacketbytes)
'''async def data_timeout(self):
packets = list(self.t.values())
for each_packet in packets:
while each_packet.packet.Acknowledgement < each_packet.packet.SequenceNumber:
await asyncio.sleep(0.3)
for each_packet in packets:
if each_packet.packet.SequenceNumber == each_packet.packet.Acknowledgement:
print("Inside Data Timer")
self.transport.write(each_packet.packet.__serialize__())'''
async def data_timeout(self):
print("Inside Data Timer")
packets = list(self.t.values())
while self.global_received_ack < self.global_number_seq:
await asyncio.sleep(0.1)
for each_packet in packets:
await asyncio.sleep(0.1)
if self.global_received_ack < self.global_number_seq:
if each_packet.packet.SequenceNumber == self.global_received_ack and each_packet.flag<10:
self.transport.write(each_packet.packet.__serialize__())
each_packet.flag += 1
print("Packet Retransmitted.")
def connection_made(self, transport):
print("=============== PEEP Client Connection_made CALLED =========\n")
self.transport = transport
self.protocol = self
if self._state == 0:
packet = PEEPpacket()
packet.Type = 0
packet.SequenceNumber = random.randrange(1, 1000, 1)
packet.Acknowledgement = 0
packet.Data = b"Piggy"
self._state += 1
print("Value of actual state is",self._state)
print("=============== Sending SYN packet ==================\n")
packet.Checksum = self.calculateChecksum(packet)
self.syn = packet.__serialize__()
self.transport.write(self.syn)
self.ta = Timerx(0.1, self.syn_timeout, self.syn)
def data_received(self, data):
print("=============== PEEP Client Data_Received CALLED =============\n")
self.deserializer = PEEPpacket.Deserializer()
self.deserializer.update(data)
for packet in self.deserializer.nextPackets():
checkvalue = self.checkChecksum(packet)
if self._state == 1 and packet.Type == 1:
if checkvalue:
print("SYN-ACK Received. Seqno= ", packet.SequenceNumber, " Ackno=", packet.Acknowledgement)
self.ta.cancel()
#Sending ACK
if packet.Data == b"Piggy":
self.global_pig = 56
print(self.global_pig)
print("Choosing Piggybacking")
else:
print ("Choosing Selective")
ack = PEEPpacket()
ack.Type = 2
ack.SequenceNumber = packet.Acknowledgement
self.global_number_seq = ack.SequenceNumber
ack.Acknowledgement = packet.SequenceNumber + 1
if self.global_pig == 56:
ack.Data = b"Piggy"
self.global_number_ack = ack.Acknowledgement
self._state += 1
ack.Checksum = self.calculateChecksum(ack)
self.clientpacketbytes = ack.__serialize__()
print ("\n=================== Sending ACK =================\n")
self.transport.write(self.clientpacketbytes)
self.tb = Timerx(0.1, self.ack_timeout, self.clientpacketbytes)
peeptransport = PeepClientTransport(self, self.transport)
self.higherProtocol().connection_made(peeptransport)
else:
print("Corrupt SYN-ACK packet received. Please check on server end.")
elif packet.Type == 5:
if checkvalue:
if self._state == 2:
self.tb.cancel()
print("====================Got Encapasulated Packet and Deserialized==================")
#print(packet.Data)
self._state +=1
self.global_received_ack = packet.Acknowledgement
self.global_packet_size = len(packet.Data)
print("The size of packet is:", self.global_packet_size)
print("Seq number of incoming packet", packet.SequenceNumber)
print("Ack Number of incoming packet", packet.Acknowledgement)
self.receive_window(packet)
#if self.global_pig != 56:
# self.sendack(self.update_ack(packet.SequenceNumber, self.global_packet_size))
#self.higherProtocol().data_received(packet.Data)
else:
print("Corrupt Data packet received. Please check on server end.")
elif packet.Type == 2:
if checkvalue:
'''self.return_value = self.check_if_ack_received_before(packet)
if self.return_value == 1:
self.prev_ack_number = 0
else:'''
self.prev_ack_number = packet.Acknowledgement
self.pop_sending_window(packet.Acknowledgement)
#self.prev_ack_number = packet.Acknowledgement
print("ACK Received from the server. Removing data from buffer.", packet.Acknowledgement)
self.global_received_ack = packet.Acknowledgement
#self.pop_sending_window(packet.Acknowledgement)
elif packet.Type == 3:
if checkvalue:
print("RIP Received from Server. Sending RIP-ACK")
# RIPack
ripack = PEEPpacket()
self.exc = 0
ripack.Type = 4
ripack.Acknowledgement = packet.SequenceNumber + 1
ripack.SequenceNumber = 5555
calcChecksum = PEEPClient(self.loop)
ripack.Checksum = calcChecksum.calculateChecksum(ripack)
ripz = ripack.__serialize__()
self.transport.write(ripz)
else:
print("Corrupt RIP packet received. Please check on server end.")
elif packet.Type == 4:
if checkvalue:
print("RIP-ACK Received from Server. Closing down the connection.")
self.exc = 0
self.connection_lost(self.exc)
else:
print("Corrupt RIP-ACK packet received. Please check on server end.")
else:
print("======== Incorrect packet received. Closing connection!=========\n")
self.transport.close()
def sendack(self, ackno):
print ("================== Sending ACK ================\n")
ack = PEEPpacket()
calcChecksum = PEEPClient(self.loop)
ack.Type = 2
ack.Acknowledgement = ackno
print ("ACK No:" + str(ack.Acknowledgement))
# For debugging
ack.Checksum = calcChecksum.calculateChecksum(ack)
#print(ack.Checksum)
bytes = ack.__serialize__()
self.transport.write(bytes)
'''def check_if_ack_received_before(self, packet):
keylist = list(self.sending_window)
self.keylist1 = sorted(keylist)
if self.prev_ack_number == packet.Acknowledgement:
print ("REceived two acks of the same value")
print ("33333333333",self.keylist1)
for key in self.keylist1:
if key == packet.Acknowledgement:
print ("found a key that equals the acknow received")
packet_to_be_retrans = self.sending_window[self.keylist1[0]]
print("So far so goood!")
packet_to_be_retrans.Acknowledgment = self.global_number_ack
bytes_retrans = packet_to_be_retrans.__serialize__()
self.transport.write(bytes_retrans)
print ("ready to return")
return 1'''
def write(self,data):
print ("=================== Writing Data down to wire from Client ================\n")
self.backlog_window.append(data)
print("Post appending BL window in client", self.backlog_window)
if self.sending_window_count <= 100:
print("About to pop backlog in client")
data_from_BL = self.backlog_window.pop(0)
self.encapsulating_packet(data_from_BL)
def encapsulating_packet(self,data_from_BL_1):
udata = data_from_BL_1
self.data_from_BL = data_from_BL_1
i = 0
l = 1
while i < len(udata):
# print("Chunk {}". format(l))
chunk, self.data_from_BL = self.data_from_BL[:1024], self.data_from_BL[1024:]
self.Cencap = PEEPpacket()
self.n += 1
calcChecksum = PEEPClient(self.loop)
self.Cencap.Type = 5
self.Cencap.SequenceNumber = self.update_sequence(chunk)
self.prev_sequence_number = self.Cencap.SequenceNumber # prev_sequence_number is the seq number of the packet sent by client
print("SEQ No:" + str(self.Cencap.SequenceNumber))
self.Cencap.Acknowledgement = self.global_number_ack #
print("ACK No:" + str(self.Cencap.Acknowledgement))
self.Cencap.Data = chunk
# print ("Data is", chunk)
print("Size of data", len(chunk))
self.Cencap.Checksum = calcChecksum.calculateChecksum(self.Cencap)
if self.sending_window_count <= 100:
# print (" Entered count ")
self.Cencap = self.update_sending_window(self.Cencap)
self.bytes = self.Cencap.__serialize__()
i += 1024
l += 1
self.transport.write(self.bytes)
# Creating timer for each data packet
self.timer = PEEPClient(loop)
self.tx = Timerx(0.1, self.data_timeout, self.Cencap)
self.chabi = self.global_number_seq
self.t[self.chabi] = self.tx
else:
print(" Sorry, window is full. ")
i += 1024
#### Put some return statement to handle this exception. Code shouldn't hang. ###
def receive_window(self, pkt):
self.number_of_packs += 1
self.packet = pkt
if self.packet.SequenceNumber == self.global_number_ack:
self.global_number_ack = self.update_ack(self.packet.SequenceNumber, self.global_packet_size) #It's actually updating the expected Seq Number
self.sendack(self.update_ack(self.packet.SequenceNumber, self.global_packet_size))
self.higherProtocol().data_received(self.packet.Data)
self.check_receive_window()
elif self.number_of_packs <= 100:
#and self.packet.SequenceNumber <= self.global_number_ack + (1024*1000):
self.recv_window[self.packet.SequenceNumber] = self.packet.Data
self.sendack(self.global_number_ack)
else:
print ("Receive window is full or the packet has already been received!")
def check_receive_window(self):
sorted_list = []
sorted_list = self.recv_window.keys()
for k in sorted_list:
if k == self.global_number_ack:
self.packet_to_be_popped = self.recv_window[k]
self.sendack(self.update_ack(self.packet_to_be_popped.SequenceNumber, self.global_packet_size))
self.higherProtocol().data_received(self.packet_to_be_popped.Data)
else:
return
prev_packet_size = 0
def calculate_length(self, data):
self.prev_packet_size = len(data)
def update_sequence(self, data):
if self.count_of_function_call == 0:
self.count_of_function_call = 1
self.calculate_length(data)
return self.global_number_seq #for first packet this is equal to synack.ackno
else:
self.global_number_seq = self.prev_sequence_number + self.prev_packet_size
self.calculate_length(data)
return self.global_number_seq
def update_ack(self, received_seq_number, size):
self.received_seq_number = received_seq_number
self.global_number_ack = self.received_seq_number + size
return self.global_number_ack
def update_sending_window(self, packet):
self.packet = packet
self.sending_window_count += 1
self.key = self.global_number_seq
#self.key = self.prev_sequence_number + self.prev_packet_size #removed this because it is redundant to the previous line.
self.sending_window[self.key] = self.packet
#for k,v in self.sending_window.items():
#print ("Key is: ",k, "Packet is: ", v)
#self.sending_window = (sorted(self.sending_window.items()))
keylist = self.sending_window.keys()
self.keylist1 = sorted(keylist)
print("###########################################", self.keylist1)
#print("Sorted keys list is", keylist)
#print("dic type is", type(self.sending_window))
return self.packet
def pop_sending_window(self, AckNum):
#print (" Entered Popping Values ")
self.AckNum = AckNum
print (" Ack Number is: ", self.AckNum)
#self.sending_window = OrderedDict(sorted(self.sending_window.items()))
#print("Keylist1 is", self.keylist1)
for key in self.keylist1:
#print ("Key is: ", key)
if (self.AckNum > key):
#print("Inside Acknum loo.")
#print("The current Dictionary is", self.sending_window)
#Finishing off timers for the packets with ACKs received
'''
seqs = list(self.t.keys())
for chabi in seqs:
if self.AckNum > chabi:
(self.t[chabi]).cancel()
self.t.pop(chabi)
'''
#print("Key value to pop is", key)
self.sending_window.pop(key)
print ("sending window count is",self.sending_window_count)
self.sending_window_count = self.sending_window_count - 1
if self.sending_window_count <= 100:
print("About to pop backlog")
data_from_BL = self.backlog_window.pop(0)
self.encapsulating_packet(data_from_BL)
#else:
#print (" Popped all packets ")
#self.k
self.keylist1 = []
return
def close(self):
#RIPpacket
rip = PEEPpacket()
rip.Type = 3
rip.Acknowledgement = 0
rip.SequenceNumber = 9999
calcChecksum = PEEPClient(self.loop)
rip.Checksum = calcChecksum.calculateChecksum(rip)
ripz = rip.__serialize__()
self.transport.write(ripz)
def connection_lost(self,exc):
print ("============== PEEPClient Closing connection ===========\n")
self.transport.close()
self.loop.stop()
#Timer Function code block starts here
class Timerx():
def __init__(self, timeout, callback, packet):
self._timeout = timeout
self._callback = callback
self.packet = packet
self.flag = 0
self._task = asyncio.ensure_future(self._job())
async def _job(self):
await asyncio.sleep(self._timeout)
await self._callback()
def cancel(self):
self._task.cancel()
loop = asyncio.get_event_loop()
#logging.getLogger().setLevel(logging.NOTSET) # this logs *everything*
#logging.getLogger().addHandler(logging.StreamHandler()) # logs to stderr
Clientfactory = StackingProtocolFactory(lambda: PEEPClient(loop))
'''if __name__ == "__main__":
loop = asyncio.get_event_loop()
logging.getLogger().setLevel(logging.NOTSET) # this logs *everything*
logging.getLogger().addHandler(logging.StreamHandler()) # logs to stderr
Clientfactory = StackingProtocolFactory(lambda: PEEPClient(loop))
ptConnector = playground.Connector(protocolStack=Clientfactory)
playground.setConnector("passthrough", ptConnector)
go = initiate(loop)
coro = playground.getConnector('passthrough').create_playground_connection(go.send_first_packet, '20174.1.1.1', 8888)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()'''
| [
"noreply@github.com"
] | noreply@github.com |
d0fd73ad51f76303fa3a3baf786e05f599ca263c | 59f8e9d5c273974adcac14ccbd410c1e2689c4e1 | /setup.py | a58a482d26006affe9310b191c0521ccebe38755 | [
"MIT"
] | permissive | andrewzeneski/pylodge | 74a9f8c33af4e7d509ca0dc91013ad966ca3b685 | f5fdc470ef986228f8a44ad1c9d2d45ea221a95b | refs/heads/master | 2020-12-27T03:52:11.185337 | 2016-06-02T16:50:55 | 2016-06-02T16:50:55 | 59,770,088 | 1 | 1 | null | 2016-05-26T17:33:06 | 2016-05-26T17:33:06 | null | UTF-8 | Python | false | false | 833 | py | __author__ = 'ashwin'
"""A Test Lodge based pylodge module.
"""
from setuptools import setup
setup(
name='pylodge',
version='0.2.8',
description='Test Automation framework for TestLodge',
url='https://github.com/gettalent/pylodge',
# Author details
author='Ashwin Kondapalli',
author_email='ashwin@gettalent.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Quality Assurance',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
],
keywords='TestLodge, test automation, pylodge',
packages=['pylodge'],
install_requires=['requests'],
)
| [
"ashwin@gettalent.com"
] | ashwin@gettalent.com |
e50e159516d02e4f28151196e8e7cffa1b2abc36 | 454f5318d68aded03b7ce43371a68f02d51b5e5e | /playlist_website/playlist_website/wsgi.py | 7de9f26408b4609b7b162d9966286c5d1a8c81af | [] | no_license | issaitorres/Portfolio | 5ffd6c3e9c9e59af010a51960e3e116c57557cf9 | b3324b28a0d1c8b8b5f4a1b5fc10428530f18336 | refs/heads/master | 2023-06-18T07:02:43.400662 | 2021-07-15T19:31:13 | 2021-07-15T19:31:13 | 306,566,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for playlist_website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'playlist_website.settings')
application = get_wsgi_application()
| [
"issaitorres@gmail.com"
] | issaitorres@gmail.com |
25a31c75346e71626fe953790437618a840aa0b7 | 3a4860c05baa2791986747bf1d73f6b8da0d7fa6 | /list_append.py | 7387fee0899f55dac4231dc81b7576f89d3b8ca3 | [] | no_license | Raun551/pythontraining | dcdb0acf966d555b7a7ab7c242031a9cff1ac354 | 65e9b32bd92c5e096f8c11fadfca21ed6ea38553 | refs/heads/master | 2020-03-26T14:21:03.761316 | 2018-08-30T14:38:31 | 2018-08-30T14:38:31 | 144,983,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | #Append a list to the second list
list1 = [1, 2, 3, 0]
list2 = ['Red', 'Green', 'Black']
final_list = list1 + list2
print(final_list)
| [
"raunaq.malik23@gmail.com"
] | raunaq.malik23@gmail.com |
6b19da70918b7711aee9f2fda10eb6fbec50ba0d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/126/usersdata/191/29517/submittedfiles/ap2.py | c8f2da701341911eecf630c83018954555844586 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # -*- coding: utf-8 -*-
a=float(input('digite a:'))
b=float(input('digite b:'))
c=float(input('digite c:'))
d=float(input('digite d:'))
if a>=b and b>=c and a>=d:
print(a)
elif b>=a and b>=c and b>=d:
print(b)
elif c>=a and c>=b and c>=d:
print(c)
else:
print(d)
if a<=b and a<=c and a<=d:
print(a)
elif b<=a and b<=c and c<=d:
print(b)
elif c<=a and c<=b and c<=d:
print(c)
else:
print(d)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ae12d6fd85aad0110b05be6b293b0a3b5ce40ede | c9fea70a3dc5efafc718b5ede8e9971804cd5265 | /venv/Scripts/pip3-script.py | 1e4799b5e7995b569d60518ccbce4aa11d7c1567 | [] | no_license | zekrihicham/creche | b4de8df3b92d9080e6d5a8ecf2dd4bdfa5759d64 | c23ca4095777cbb9c7d5ccc25377eff63af460f4 | refs/heads/master | 2020-04-22T01:38:33.513119 | 2019-02-10T20:13:48 | 2019-02-10T20:13:48 | 170,020,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | #!C:\Users\zekri\PycharmProjects\Creche\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"36763072+zekrihicham@users.noreply.github.com"
] | 36763072+zekrihicham@users.noreply.github.com |
1c3d00acafd76a610342ab1ef712ad250ee8870c | b2bdd5997ac84b0e19071c1ddc1c1a4d2f4fab58 | /catkin_ws/devel/.private/p2/lib/python2.7/dist-packages/p2/msg/_Ackermann.py | 0dff4e208b8c08e4de290b065cd192a52bee173e | [] | no_license | hbtslys01/RosCodingProject | 860d18531dabe4a969278deff5dbad8a8703ea83 | 226feda08724e92fd94191e123b9442c028283dd | refs/heads/master | 2020-04-11T09:16:17.808626 | 2018-12-13T17:30:08 | 2018-12-13T17:30:08 | 161,671,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,768 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from p2/Ackermann.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Ackermann(genpy.Message):
_md5sum = "61c7e29a36f91d9c196a9722234d7472"
_type = "p2/Ackermann"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float64 steering_angle
float64 vel
"""
__slots__ = ['steering_angle','vel']
_slot_types = ['float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
steering_angle,vel
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Ackermann, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.steering_angle is None:
self.steering_angle = 0.
if self.vel is None:
self.vel = 0.
else:
self.steering_angle = 0.
self.vel = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.steering_angle, _x.vel))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.steering_angle, _x.vel,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.steering_angle, _x.vel))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.steering_angle, _x.vel,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2d = None
def _get_struct_2d():
global _struct_2d
if _struct_2d is None:
_struct_2d = struct.Struct("<2d")
return _struct_2d
| [
"907098316@qq.com"
] | 907098316@qq.com |
1ca7d40ffa698cb26fa3c02af75ca221a34029a0 | 8112bdea83d0a51c12720f751fb7759ff6741d9d | /tests/test_friends.py | 7d1912acdcf1a75a28e20aa396d280b9cc08ffcf | [] | no_license | dushyantpatel/smap_api | e5ea67a1c28645a7ebfa74576a9be7f848fd9769 | 263e44efd4a976c80ad71413fa05279b4a4efbe0 | refs/heads/master | 2020-03-17T07:39:57.776872 | 2018-06-10T02:37:15 | 2018-06-10T02:37:15 | 133,407,881 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | import unittest
import pymysql
import rds_config
import sys
from tests.event import Event
from gateway import main_handler
# rds settings
rds_host = rds_config.db_host
name = rds_config.db_username
password = rds_config.db_password
db_name = rds_config.db_name
connection = None
# resource settings
path = 'friends'
context = None
class TestUsers(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# global connection
# # connect to database
# try:
# connection = pymysql.connect(rds_host, user=name, passwd=password, db=db_name, connect_timeout=5)
# except pymysql.err.Error as ex:
# template = "ERROR: {0} - Could not connect to MySql instance \n{1!r}"
# message = template.format(type(ex).__name__, ex.args)
# print(message)
# sys.exit()
#
# # setup test users in the database
# add_usr_template = 'INSERT INTO user (display_name, email, first_name, last_name, missionCurator) ' \
# 'VALUES ("{0}", "{1}", "{2}", "{3}", {4});'
# add_usr1 = add_usr_template.format('test_user', 'test1.email@smap.com', 'Test1', 'Case1', 0)
# add_usr2 = add_usr_template.format('test_user', 'test2.email@smap.com', 'Test2', 'Case2', 0)
# add_usr3 = add_usr_template.format('test_user', 'test3.email@smap.com', 'Test3', 'Case3', 0)
# add_usr4 = add_usr_template.format('test_user', 'test4.email@smap.com', 'Test4', 'Case4', 0)
# with connection.cursor() as cur:
# cur.execute(add_usr1)
# cur.execute(add_usr2)
# cur.execute(add_usr3)
# cur.execute(add_usr4)
# connection.commit()
#
# @classmethod
# def tearDownClass(cls):
# # clean up the database
# with connection.cursor() as cur:
# cur.execute('SELECT * FROM user WHERE display_name="test_user"')
# li = cur.fetchall()
# for row in li:
# cur.execute('DELETE FROM user WHERE user_id=' + str(row[0]))
# connection.commit()
# connection.close()
#
# def setUp(self):
# self.event = Event()
# self.event.setPath(path)
# self.req_body = dict()
#
# def tearDown(self):
# return
#
# def test_add_new_user(self):
# self.event.setHttpMethod('POST')
#
# # user 1 sends friend request to user 2
# self.req_body['user_1'] = 'test1.email@smap.com'
# self.req_body['user_2'] = 'test2.email@smap.com'
# self.event.setBody(str(self.req_body))
#
# response = main_handler(self.event.getEvent(), context)
# resp_body = response['body']
# status_code = response['statusCode']
# headers = response['headers']
# print(headers['message'])
# print(headers['details'])
#
# # check for correct status code
# self.assertEqual(201, status_code)
#
# # check for correct body
# self.assertEqual(resp_body, str(None))
def test_dummy(self):
return
| [
"dushyantpatel_r@outlook.com"
] | dushyantpatel_r@outlook.com |
e26f787cdac7bccc03eebca71b634c965ab28d9b | 8bf4243abe4f5f66d0068d96dc09e5cf975430d5 | /Selenium_lessons/neskolko_brauserov/auth_data.py | 92e50613b48ae3cde7e5b5defba74be75c6c227c | [] | no_license | Azhdar1990/Parsers | 899776388b4dbe257eb5b6d08dce3f0ef5ea8190 | aa39fd3f651b92405c12caf1bd784ed3afa4f3fd | refs/heads/master | 2023-09-05T12:45:03.604726 | 2021-10-31T10:51:56 | 2021-10-31T10:51:56 | 404,690,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | login = "maniyev@rambler.ru"
password = "@kmNom@k!k12@"
| [
"amaniyev@gmail.com"
] | amaniyev@gmail.com |
673bb5c0289c9335d604b2d187d96c5573ad2619 | 6ecf6e0545709592996acbe7d7752870a7e7d179 | /GUI/gui1studentinfo.py | 5e0203cf72c384c5532a7ad01a3639ddf854c513 | [] | no_license | namujagtap/Namrata | 9bf152882703ca6d6819c6ddf915d88d47c9a3e2 | f90faabc142238e305511fdd44011e5db9767903 | refs/heads/main | 2023-08-01T22:44:22.360108 | 2021-09-16T05:05:00 | 2021-09-16T05:05:00 | 396,077,357 | 0 | 0 | null | 2021-08-14T17:26:58 | 2021-08-14T17:19:37 | null | UTF-8 | Python | false | false | 1,661 | py | #create student information from using Tkinter
#name,address,email,schooltype,stding in year ,dob,,gender,schooltype,marathi mediunm,english medium,english,convent,semi
import tkinter as tk
from tkinter import *
win=tk.Tk(className="StudentInfoForm......")
win.geometry("1000x1000")
label=tk.Label(win,text="***** STUDENT INFORMATION FORM *****").place(x=380,y=10)
Name=tk.Label(win,text="NAME").place(x=35,y=60)
e1=tk.Entry(win).place(x=170,y=60)
Address=tk.Label(win,text="ADDRESS").place(x=35,y=90)
e2=tk.Entry(win).place(x=170,y=90)
Email=tk.Label(win,text="EMAIL").place(x=35,y=120)
e3=tk.Entry(win).place(x=170,y=120)
Schoolname=tk.Label(win,text="SCHOOL NAME ").place(x=35,y=150)
e4=tk.Entry(win).place(x=170,y=150)
Stding_in_year=tk.Label(win,text="STUDING IN YEAR ").place(x=35,y=180)
e5=tk.Entry(win).place(x=170,y=180)
dob=tk.Label(win,text="DATE OF BIRTH ").place(x=35,y=210)
e6=tk.Entry(win).place(x=170,y=210)
Gender=tk.Label(win,text=" 1] SELECT GENDER ").place(x=35,y=280)
radio1=tk.Radiobutton(win,text="Male",value=0).place(x=120,y=310)
radio2=tk.Radiobutton(win,text="Female",value=1).place(x=320,y=310)
schooltype=tk.Label(win,text=" 2] SCHOOL TYPE ").place(x=35,y=360)
radio3=tk.Radiobutton(win,text="Marathi Medium",value=0).place(x=120,y=390)
radio4=tk.Radiobutton(win,text="English Medium",value=0).place(x=320,y=390)
radio5=tk.Radiobutton(win,text="Convent Medium",value=0).place(x=520,y=390)
radio6=tk.Radiobutton(win,text="Semi-English Medium",value=1).place(x=720,y=390)
submit=tk.Button(win,text="SUBMIT",activebackground="pink",activeforeground="purple").place(x=420,y=500)
win.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
2daf9cca1251a8f201f5a3437dc55c28c447f373 | c54896ba0703fc4de01d19ed9e7eecfdfc4aa810 | /Python_Projects/Python_MySQL/Foreign_Trade_Data_Pipe_Delimination/scripts/merchandise_trade_exports.py | e5ba7c2739bc71535561f9073a09b9bc30fd063e | [
"MIT"
] | permissive | YangLiu928/NDP_Projects | f4d1c3d45161ff353f89dff2ebc36fabde28db62 | d2ebfa7b95a0003481dde1361c6ab563ac94f2e6 | refs/heads/master | 2021-01-10T14:39:17.141520 | 2016-04-28T19:57:48 | 2016-04-28T19:57:48 | 45,988,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | import process_data
data_folder = '../data/merchandise_trade_exports/cdromtxt/'
output_folder = '../output/merchandise_trade_exports/'
process_data.process_concord(data_folder, output_folder)
process_data.process_country(data_folder, output_folder)
process_data.process_district(data_folder, output_folder)
process_data.process_enduse(data_folder, output_folder)
process_data.process_exp_comm(data_folder, output_folder)
process_data.process_exp_cty(data_folder, output_folder)
process_data.process_exp_detl(data_folder, output_folder)
process_data.process_exp_dist(data_folder, output_folder)
process_data.process_hitech(data_folder, output_folder)
process_data.process_hsdesc(data_folder, output_folder)
process_data.process_naics(data_folder, output_folder)
process_data.process_sitc(data_folder, output_folder)
print 'work completed'
| [
"yangliu1989@gwu.edu"
] | yangliu1989@gwu.edu |
3a3429686a4f84cd642ada4255092bcd590d5cb0 | 9a0704ef8a2ccd3aff58af8a9623578c5d4f7353 | /app.py | 86919f4ec581d3240bd567d28ff58e685ff4fefc | [] | no_license | rahulagrawal01/KNN-and-Naive-base- | d29396d6d886455ce9d62d844ac9c74d31e0c5f0 | d62318410106fa0a707b39785b5514f083319cb5 | refs/heads/main | 2023-04-14T16:53:53.494660 | 2021-04-24T10:42:59 | 2021-04-24T10:42:59 | 361,135,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | import streamlit as st
from PIL import Image
import pickle
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
st.set_option('deprecation.showfileUploaderEncoding', False)
# Load the pickled model
model = pickle.load(open('Knearestneighborclassifier.pkl', 'rb'))
model_naive = pickle.load(open('naivebayesclassifier.pkl', 'rb'))
dataset= pd.read_csv('titanic.csv')
X=dataset[["Age","SibSp","Parch","Fare","Sex","Pclass"]]
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
X["Sex"] = labelencoder_X.fit_transform(X["Sex"])
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(X)
def predict_note_authentication(Age,SibSp,Parch,Sex,Fare,Pclass):
output= model.predict(sc.transform([[Age,SibSp,Parch,Sex,Fare,Pclass]]))
print("Passenger will die =", output)
if output==[1]:
prediction="Passanger will survive"
else:
prediction="Passanger will die"
print(prediction)
return prediction
def predict_naive(Age,SibSp,Parch,Fare,Sex,Pclass):
output= model_naive.predict(sc.transform([[Age,SibSp,Parch,Fare,Sex,Pclass]]))
print("Passenger will die =", output)
if output==[1]:
prediction="Passanger will survive"
else:
prediction="Passanger will die"
print(prediction)
return prediction
def main():
html_temp = """
<div class="" style="background-color:green;" >
<div class="clearfix">
<div class="col-md-12">
<center><p style="font-size:35px;color:white;margin-top:10px;">Poornima Institute of Engineering & Technology</p></center>
<center><p style="font-size:29px;color:white;margin-top:10px;">Department of Computer Engineering</p></center>
<center><p style="font-size:26px;color:white;margin-top:10px;"Machine Learning Lab Experiment 4 KNN and Naive base Algo By Rahul Kr. Agrawal PIET18CS116 section 1</p></center>
</div>
</div>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
st.header("Passenger Survived Prediction using KNN And NB By Rahul 116")
Sex = st.number_input('Insert 1 for Male 2 for Female 3 Others',1,3)
Age = st.number_input('Insert a Age',18,60)
SibSp = st.number_input('Insert a SibSp',0,10)
Parch = st.number_input('Insert a Parch',1,10)
Pclass = st.number_input('Insert a Pclass',1,8)
Fare = st.number_input("Insert Fare",1,15000)
resul=""
if st.button("Predict by KNN "):
result=predict_note_authentication(Age,SibSp,Parch,Fare,Sex,Pclass)
st.success('KNN Model has predicted {}'.format(result))
if st.button("Predict by Naive Bayes "):
result=predict_naive(Age,SibSp,Parch,Fare,Sex,Pclass)
st.success('Naive Bayes Model has predicted {}'.format(result))
if st.button("About"):
st.subheader("Developed by Rahul kumar agrawal PIET18CS116")
st.subheader("Department of Computer Engineering Section C1")
if __name__=='__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
210f7ba427b184848d5fb3c985f326bb4dde8314 | d3ac52556da9f8a8c0dddbd4b30a81d67579ed5f | /options.py | 34091e25f6dd787bd663ad5ffd4c92ef9d6de41a | [] | no_license | mustaphaasbbar/Silaty | 59b06eaa614e8d202089a13088fe84c522a4fb79 | f8a4ddc2e7e351b51030759b72bfe9f6b9d3fc77 | refs/heads/master | 2020-04-29T08:16:19.144501 | 2015-04-22T17:59:19 | 2015-04-22T17:59:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,474 | py | import configparser
import os
import datetime
class Calendar(object):
UmmAlQuraUniv, \
EgyptianGeneralAuthorityOfSurvey,\
UnivOfIslamicSciencesKarachi,\
IslamicSocietyOfNorthAmerica,\
MuslimWorldLeague = range(5)
class Madhab(object):
Default, Hanafi = 0, 1
class Options:
def __init__(self):
print ("DEBUG: Initializing the Options module @", (str(datetime.datetime.now())))
cparse = configparser.ConfigParser()
cparse.read([os.path.expanduser('~/.silaty')])
try:
self._city = cparse.get('DEFAULT', 'city')
self._country = cparse.get('DEFAULT', 'country')
self._calcmethodname = cparse.get('DEFAULT', 'calculation-method')
self._madhab = cparse.get('DEFAULT', 'madhab')
self._clockformat = cparse.get('DEFAULT', 'clock-format')
self._latitude = cparse.get('DEFAULT', 'latitude')
self._longitude = cparse.get('DEFAULT', 'longitude')
self._timezone = cparse.get('DEFAULT', 'timezone')
self._notif = cparse.get('DEFAULT', 'notif')
self._iconlabel = cparse.get('DEFAULT', 'iconlabel')
self._startminimized = cparse.get('DEFAULT', 'minimized')
self._fajradhan = cparse.get('DEFAULT', 'fajr-adhan')
self._normaladhan = cparse.get('DEFAULT', 'normal-adhan')
self._audionotifications = cparse.get('DEFAULT', 'audio-notifications')
except configparser.NoOptionError:
print ("DEBUG: No configration file using default settings")
self._city = 'Makkah'
self._country = 'Saudi Arabia'
self._latitude = '21.25'
self._longitude = '39.49'
self._timezone = '3'
self._calcmethodname = 'Makkah'
self._madhab = 'Default'
self._clockformat = '24h'
self._notif = '10'
self._iconlabel = '1'
self._startminimized = '1'
self._fajradhan = (self.get_fajr_adhans())[0]
self._normaladhan = (self.get_normal_adhans())[0]
self._audionotifications = '1'
self.save_options()
except ValueError:
print ("DEBUG: Problem while reading setting file, using the default settings")
os.system("rm ~/.silaty")
self._city = 'Makkah'
self._country = 'Saudi Arabia'
self._latitude = '21.25'
self._longitude = '39.49'
self._timezone = '3'
self._calcmethodname = 'Makkah'
self._madhab = 'Default'
self._clockformat = '24h'
self._notif = '10'
self._iconlabel = '1'
self._startminimized = '1'
self._fajradhan = (self.get_fajr_adhans())[0]
self._normaladhan = (self.get_normal_adhans())[0]
self._audionotifications = '1'
self.save_options()
##Functions with lists for the Buttons
def get_cal_methods(self):
return ['Makkah', 'Egypt', 'Karachi', 'ISNA', 'MWL']
def get_madhahed(self):
return ['Hanafi','Default']
def get_clock_formats(self):
return ['12h', '24h']
def get_fajr_adhans(self):
dirfiles = os.listdir( os.path.dirname(os.path.realpath(__file__))+"/audio/Fajr/")
wavfiles = filter(lambda song: song.endswith(".ogg"), dirfiles)
adhans = list(map(lambda x: os.path.splitext(x)[0], wavfiles))
return adhans
def get_normal_adhans(self):
dirfiles = os.listdir( os.path.dirname(os.path.realpath(__file__))+"/audio/Normal/")
wavfiles = filter(lambda song: song.endswith(".ogg"), dirfiles)
adhans = list(map(lambda x: os.path.splitext(x)[0], wavfiles))
return adhans
##Functions to get and set settings
@property
def audio_notifications_num(self):
return self._audionotifications
@audio_notifications_num.setter
def audio_notifications_num(self, value):
self._audionotifications = value
@property
def audio_notifications(self):
print ("DEBUG: getting icon label settings @", (str(datetime.datetime.now())))
if self.audio_notifications_num == '1':
return True
else:
return False
@audio_notifications.setter
def audio_notifications(self, data):
print ("DEBUG: setting icon label settings @", (str(datetime.datetime.now())))
if data == True:
self.audio_notifications_num = '1'
else:
self.audio_notifications_num = '0'
@property
def fajr_adhan(self):
return self._fajradhan
@fajr_adhan.setter
def fajr_adhan(self, value):
self._fajradhan = value
@property
def normal_adhan(self):
return self._normaladhan
@normal_adhan.setter
def normal_adhan(self, value):
self._normaladhan = value
@property
def city(self):
print ("DEBUG: getting city settings @", (str(datetime.datetime.now())))
return self._city
@city.setter
def city(self, data):
print ("DEBUG: setting city settings @", (str(datetime.datetime.now())))
self._city = data
@property
def country(self):
print ("DEBUG: getting country settings @", (str(datetime.datetime.now())))
return self._country
@country.setter
def country(self, value):
print ("DEBUG: setting country settings @", (str(datetime.datetime.now())))
self._country = value
@property
def calculation_method_name(self):
return self._calcmethodname
@calculation_method_name.setter
def calculation_method_name(self, value):
self._calcmethodname = value
@property
def calculation_method(self):
print ("DEBUG: getting calculation method settings @", (str(datetime.datetime.now())))
if self.calculation_method_name == 'Makkah':
return Calendar.UmmAlQuraUniv
elif self.calculation_method_name == 'Egypt':
return Calendar.EgyptianGeneralAuthorityOfSurvey
elif self.calculation_method_name == 'Karachi':
return Calendar.UnivOfIslamicSciencesKarachi
elif self.calculation_method_name == 'ISNA':
return Calendar.IslamicSocietyOfNorthAmerica
elif self.calculation_method_name == 'MWL':
return Calendar.MuslimWorldLeague
@calculation_method.setter
def calculation_method(self, data):
print ("DEBUG: setting calculation method settings @", (str(datetime.datetime.now())))
self.calculation_method_name = data
@property
def madhab_name(self):
return self._madhab
@madhab_name.setter
def madhab_name(self, value):
self._madhab = value
@property
def madhab(self):
print ("DEBUG: getting madhab settings @", (str(datetime.datetime.now())))
if self.madhab_name == 'Default':
return Madhab.Default
if self.madhab_name == 'Hanafi':
return Madhab.Hanafi
@madhab.setter
def madhab(self, data):
print ("DEBUG: setting madhab settings @", (str(datetime.datetime.now())))
self._madhab = data
@property
def latitude(self):
print ("DEBUG: getting latitude settings @", (str(datetime.datetime.now())))
return float(self._latitude)
@latitude.setter
def latitude(self, data):
print ("DEBUG: setting latitude settings @", (str(datetime.datetime.now())))
self._latitude = str(data)
@property
def longitude(self):
print ("DEBUG: getting longitude settings @", (str(datetime.datetime.now())))
return float(self._longitude)
@longitude.setter
def longitude(self, data):
print ("DEBUG: setting longitude settings @", (str(datetime.datetime.now())))
self._longitude = str(data)
@property
def timezone(self):
print ("DEBUG: getting timezone settings @", (str(datetime.datetime.now())))
return float(self._timezone)
@timezone.setter
def timezone(self, data):
print ("DEBUG: setting timezone settings @", (str(datetime.datetime.now())))
self._timezone = str(data)
@property
def notification_time(self):
print ("DEBUG: getting notification time settings @", (str(datetime.datetime.now())))
return float(self._notif)
@notification_time.setter
def notification_time(self, data):
print ("DEBUG: setting notification time settings @", (str(datetime.datetime.now())))
self._notif = str(data)
@property
def iconlabel_num(self):
return self._iconlabel
@iconlabel_num.setter
def iconlabel_num(self, value):
self._iconlabel = value
@property
def iconlabel(self):
print ("DEBUG: getting icon label settings @", (str(datetime.datetime.now())))
if self.iconlabel_num == '1':
return True
else:
return False
@iconlabel.setter
def iconlabel(self, data):
print ("DEBUG: setting icon label settings @", (str(datetime.datetime.now())))
if data == True:
self.iconlabel_num = '1'
else:
self.iconlabel_num = '0'
@property
def start_minimized_num(self):
return self._startminimized
@start_minimized_num.setter
def start_minimized_num(self, value):
self._startminimized = value
@property
def start_minimized(self):
print ("DEBUG: getting icon label settings @", (str(datetime.datetime.now())))
if self.start_minimized_num == '1':
return True
else:
return False
@start_minimized.setter
def start_minimized(self, data):
print ("DEBUG: setting icon label settings @", (str(datetime.datetime.now())))
if data == True:
self.start_minimized_num = '1'
else:
self.start_minimized_num = '0'
@property
def clock_format(self):
print ("DEBUG: getting clock format settings @", (str(datetime.datetime.now())))
return self._clockformat
@clock_format.setter
def clock_format(self, data):
print ("DEBUG: setting clock format settings @", (str(datetime.datetime.now())))
self._clockformat = data
## Function to save the options
def save_options(self):
print ("DEBUG: saving settings file @", (str(datetime.datetime.now())))
config = open(os.path.expanduser('~/.silaty'), 'w')
Text='''# Silaty Settings File
[DEFAULT]
# Location Information
city = %s
country = %s
latitude = %s
longitude = %s
timezone = %s
# Possible Values for Calculation Methods
# Makkah
# Egypt
# Karachi
# ISNA
# MWL
calculation-method = %s
# Possible Values for Madhaheb
# Default
# Hanafi
madhab = %s
# Possible Values for Clock Format
# 24h
# 12h
clock-format = %s
# Time before prayer for notification
notif = %s
# Display Time by the indicator icon
iconlabel = %s
# Should audio notifications be enabled
audio-notifications = %s
# Should the application state minimized
minimized = %s
# Paths to the audio files
fajr-adhan = %s
normal-adhan = %s
''' % (self.city, self.country, self.latitude, self.longitude, self.timezone,\
self.calculation_method_name, self.madhab_name, self.clock_format, \
self.notification_time, self.iconlabel_num, self.audio_notifications_num,self.start_minimized_num, \
self.fajr_adhan, self.normal_adhan)
config.write(Text)
config.close()
| [
"www.jwb@gmail.com"
] | www.jwb@gmail.com |
f4eb52622028a08e0dec011b2b776b1650007f4e | 00c33337f4023c8d257a7da1c47db4bf36441b94 | /events/migrations/0002_auto_20190807_1713.py | c16f45334c7935886774babcf5dea0f26385fdd3 | [] | no_license | ECellNitrr/EcellWeb2k19 | 694d733737b1ac26c136994ee631f36904ca942d | 75d53aad788d6deac8c5dda72cabf34b8d4bcb15 | refs/heads/master | 2021-06-24T06:23:09.097667 | 2021-01-12T08:37:48 | 2021-01-12T08:37:48 | 189,839,122 | 4 | 23 | null | 2021-01-12T08:24:49 | 2019-06-02T11:39:44 | JavaScript | UTF-8 | Python | false | false | 828 | py | # Generated by Django 2.2.2 on 2019-08-07 17:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('events', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='eventregister',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='event',
name='ecell_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| [
"naveennvrgup@gmail.com"
] | naveennvrgup@gmail.com |
19c873ebd90585623d912f2794ba9b06bb70f3d7 | 0634aed371b1c08888bd0b69be98c02779a49889 | /Chapters/code/appendices3.py | 3917a26bbd1e5e3bb99cbfa0dfeba4f77b3a73f8 | [] | no_license | samiarja/Thesis2 | e3918c3ea2b5ad15412e79368f7a1c6f82d544e8 | 4954b36c9f24881b4a724fdefcd29cd03aa0ec1b | refs/heads/master | 2020-06-01T10:39:41.384465 | 2019-12-21T03:01:52 | 2019-12-21T03:01:52 | 190,751,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | #Make the code running on both python2 and python3
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
# Imports Libraries and dependencies
import os
import cv2
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
raw_data = load_files(os.getcwd() + r'/Data',
shuffle=False)
files = raw_data['filenames']
targets = raw_data['target']
train_files,
test_files,
train_targets,
test_targets = train_test_split(files,
targets,
test_size=1/3,
random_state=191)
# Taking ~25% of the training data for validation
valid_files = train_files[300:]
valid_targets = train_targets[300:]
# Remaining data will be used for training the model
train_files = train_files[:300]
train_targets = train_targets[:300]
# Generic details about the data
print('Total number of videos:', len(files))
print('\nNumber of videos in training data:',
train_files.shape[0])
print('Number of videos in validation data:',
valid_files.shape[0])
print('Number of videos in test data:',
test_files.shape[0])
| [
"sami18040571@outlook.com"
] | sami18040571@outlook.com |
01d0564258a79bdc181836896a6e65794b1dbcee | 6b94e0aba8e1bd3daf4e6ca4ab472007ab13bf97 | /Py 16.py | 8dc56e4adaa822d846e38227617dfeb958671ec6 | [] | no_license | Dre-AsiliVentures/Python-Programming-scripts | 92ccdb29fb302387386bc3f1714918ba2f97c07f | 6ea0408965276afee5b5a1603e56703853075023 | refs/heads/master | 2023-05-10T10:09:56.460191 | 2020-03-02T09:46:52 | 2020-03-02T09:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | numbers=[12,13,14,15,18]
numbers[0]=numbers[2]*2-6
if 20 in numbers:
print(numbers[3])
else:
print(numbers[4])
| [
"noreply@github.com"
] | noreply@github.com |
294e27c16b37a8cdf7816d7381f0bd726197257e | 86a85d81ae484939694b76bda29057e744022016 | /src/tf_transformers/models/roberta.py | 9ea34f9f5a0da63614f2727587e8f64514ed8c91 | [
"Apache-2.0"
] | permissive | Vibha111094/tf-transformers | 903f16224434d4ded9174f6fe2de8bb6bead1fd9 | f26d440a4de0557e0e481279bfd70a732aaa8825 | refs/heads/main | 2023-07-27T16:11:22.101410 | 2021-07-23T15:33:39 | 2021-07-23T15:33:39 | 405,870,934 | 1 | 0 | Apache-2.0 | 2021-09-13T07:15:31 | 2021-09-13T07:15:31 | null | UTF-8 | Python | false | false | 49,343 | py | from __future__ import absolute_import, division, print_function
import tensorflow as tf
from absl import logging
from tf_transformers.activations import get_activation
from tf_transformers.core import LegacyLayer
from tf_transformers.layers import MLMLayer, OnDeviceEmbedding, SimplePositionEmbedding
from tf_transformers.layers.mask import CausalMask, CrossAttentionMask, SelfAttentionMask, prefix_mask
from tf_transformers.layers.transformer import TransformerBERT
logging.set_verbosity("INFO")
class ROBERTAEncoder(LegacyLayer):
"""RoBERTa based encoder / Decoder .
RoBERTa: A Robustly Optimized BERT Pretraining Approach
Authors: Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi,
Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov
Implementation of Roberta in TF2.0
Paper: https://arxiv.org/abs/1907.11692
Official Code: https://github.com/pytorch/fairseq/tree/master/examples/roberta
"""
def __init__(
self,
config,
mask_mode="user_defined",
name=None,
use_dropout=False,
is_training=None,
batch_size=None,
sequence_length=None,
use_type_embeddings=True,
use_positonal_embeddings=True,
pipeline_mode=None,
is_decoder=False,
cross_attention_inside_encoder=False,
share_attention_layers=True,
share_encoder_embeddings=False,
encoder_embedding_layer=None,
encoder_type_embedding_layer=None,
encoder_positional_embedding_layer=None,
use_mlm_layer=False,
return_all_layer_token_embeddings=True,
attention_type="full_attention",
**kwargs,
):
"""
Args:
config: dict
mask_mode: str, `user_defined` BERT by default uses masking for PADDED or MLM. But can be overridden . # noqa
name: str, Name of the model
use_dropout: bool, It is strictly optional. Sometimes,
while training you can set `use_dropout` to False.
If `is_training` is False, `use_dropout` will be automatically set to False. # noqa
batch_size: int, `batch_size` can be None or any int
sequence_length: int, `sequence_length` can be None or any int
use_type_embeddings: bool, By default BERT has type_embeddings, GPT2 don't.
use_positonal_embeddings: bool, T5 don't have postional embeddings
bidirectional: use in relative postional embedding (we can infer it based on mask_mode)
is_decoder: bool, if True it will become decoder mode (as in Seq2Seq)
use_mlm_layer: bool ( To use MLM layer or not )
share_encoder_embeddings: bool, When is_decoder = True, most cases,
it will re-use the embedding layer from Encoder.
So. if you still want to initialize , set this to False.
If True, share embedding layers from encoder
(word_embeddings, positional_embeddings, type_embeddings)
cross_attention_inside_encoder: bool, Encoder Decoder Cross attention in each layer
"""
# Because saved_model causes some serialization problems here
# self.config = config
self.vocab_size = config["vocab_size"]
self.type_vocab_size = config["type_vocab_size"]
self.num_hidden_layers = config["num_hidden_layers"]
self.num_attention_heads = config["num_attention_heads"]
self.attention_head_size = config["attention_head_size"]
self.max_position_embeddings = config["max_position_embeddings"]
self.intermediate_size = config["intermediate_size"]
self.embedding_size = config["embedding_size"]
self.initializer_range = config["initializer_range"]
self.hidden_act = config["hidden_act"]
self.hidden_dropout_prob = config["hidden_dropout_prob"]
self.attention_probs_dropout_prob = config["attention_probs_dropout_prob"]
self.intermediate_act = config["intermediate_act"]
self.layer_norm_epsilon = config["layer_norm_epsilon"]
# Get activation and initiliazers
self.activation = get_activation(self.hidden_act)
self.intermediate_activation = get_activation(self.intermediate_act)
initializer = tf.keras.initializers.TruncatedNormal(stddev=self.initializer_range)
self.initializer = tf.keras.initializers.get(initializer)
self.mask_mode = mask_mode
# If we use self.name , its a conflict with keras property
self.model_name = name
self.pipeline_mode = pipeline_mode
self.is_decoder = is_decoder
# self._self_setattr_tracking = False
self.mask_mode = mask_mode
self.use_dropout = use_dropout
self.is_training = is_training
self.batch_size = batch_size
self.sequence_length = sequence_length
self.use_type_embeddings = use_type_embeddings
self.use_positonal_embeddings = use_positonal_embeddings
self.share_encoder_embeddings = share_encoder_embeddings
self.share_attention_layers = share_attention_layers
self.use_mlm_layer = use_mlm_layer
self.cross_attention_inside_encoder = cross_attention_inside_encoder
self.return_all_layer_token_embeddings = return_all_layer_token_embeddings
self.attention_type = attention_type
if not name.startswith("tf_transformers"):
kwargs["name"] = "tf_transformers/" + self.model_name
else:
kwargs["name"] = self.model_name
self.validate_and_set_inputs()
super(ROBERTAEncoder, self).__init__(is_training=self.is_training, use_dropout=self.use_dropout, **kwargs)
self._config_dict = {
"initializer": tf.keras.initializers.serialize(initializer),
"is_training": self.is_training,
"use_dropout": self.use_dropout,
"batch_size": self.batch_size,
"sequence_length": self.sequence_length,
"name": kwargs["name"],
"use_type_embeddings": self.use_type_embeddings,
"use_positonal_embeddings": self.use_positonal_embeddings,
"is_decoder": self.is_decoder,
"share_encoder_embeddings": self.share_encoder_embeddings,
"share_attention_layers": self.share_attention_layers,
"cross_attention_inside_encoder": cross_attention_inside_encoder,
"return_all_layer_token_embeddings": self.return_all_layer_token_embeddings,
}
# Update config dict with passed config
self._config_dict.update(config)
# Call embedding layers
self._embedding_layer, self._type_embeddings, self._position_embedding_layer = self.get_embedding_layers()
if self.is_decoder:
# If embedding has to shared from the encoder
if self.share_encoder_embeddings:
self._embedding_layer = encoder_embedding_layer
self._type_embeddings = encoder_type_embedding_layer
self._position_embedding_layer = encoder_positional_embedding_layer
# Embedding Norm
self._embedding_norm = tf.keras.layers.LayerNormalization(
name="embeddings/layer_norm",
axis=-1,
epsilon=self.layer_norm_epsilon,
dtype=tf.float32,
)
# Embedding dropout Layer
self._embedding_dropout = tf.keras.layers.Dropout(rate=self.hidden_dropout_prob)
# Transformer Layer
self._transformer_layers = []
for i in range(self.num_hidden_layers):
layer = TransformerBERT(
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
intermediate_activation=self.activation,
dropout_rate=self.hidden_dropout_prob,
attention_dropout_rate=self.attention_probs_dropout_prob,
kernel_initializer=self.initializer,
is_training=self.is_training,
use_dropout=self.use_dropout,
is_decoder=is_decoder,
share_attention_layers=share_attention_layers,
layer_norm_epsilon=self.layer_norm_epsilon,
cross_attention_inside_encoder=self.cross_attention_inside_encoder,
attention_type=self.attention_type,
name="transformer/layer_%d" % i,
)
self._transformer_layers.append(layer)
self._pooler_layer = tf.keras.layers.Dense(
units=self.embedding_size,
activation="tanh",
kernel_initializer=self.initializer,
name="pooler_transform",
)
if self.use_mlm_layer:
self.mlm_layer = MLMLayer(
self.embedding_size,
self.layer_norm_epsilon,
self.hidden_act,
name="mlm_layer",
)
self._last_logits_bias = self.add_weight(
"tf_transformers/last_logits_bias",
shape=(self.vocab_size,),
dtype=tf.float32,
trainable=True,
)
self.call_fn = self.get_call_method()
# Initialize model
self.model_inputs, self.model_outputs = self.get_model(initialize_only=True)
logging.info("Initialized Variables")
def call_predict(self, inputs):
"""Inputs will be pass to this method, when is_training = False.
The need to cache the past `key` and `value` tensors are necessary
while predicting, to make the inference/NLG
faster in case of AutoRegressive Decoding.
"""
input_ids_mod = inputs["input_ids"]
all_cache_key = inputs["all_cache_key"]
all_cache_value = inputs["all_cache_value"]
past_length = inputs["past_length"]
# Come from kwargs
if self.mask_mode in ["user_defined", "prefix"]:
input_mask = inputs["input_mask"]
if self.use_type_embeddings:
input_type_ids = inputs["input_type_ids"]
# Convert past_length 2D to 1D
past_length = tf.squeeze(past_length, 0)
# In case of variable batch decoding, we will pad the inputs with -1
# So, we will replace -1 with 0, because -1 is not a valid index in word embeddings
# >> input_ids_mod = [[ 1, 5, 7, 8, 10],
# 2, 3, -1, -1, -1]]
#
# >> input_ids = [[1, 5, 7, 8,10],
# 2, 3, 0, 0, 0]]
input_ids = input_ids_mod * tf.cast(tf.not_equal(input_ids_mod, -1), tf.int32)
sequence_length = tf.shape(input_ids)[1]
# Asserting
tf.assert_equal(tf.shape(all_cache_value)[0], self.num_hidden_layers)
# Step 0 of inference. For step0, we do not have valid cache. We pass zero tensor
def step_0(input_ids):
sequence_length = tf.shape(input_ids)[1]
position_embeddings = self._position_embedding_layer(tf.range(sequence_length))
return sequence_length, position_embeddings
# From step_1 (autoregressive mode starts) onwards, we need to account for
# `past_length` of previous words (inputs + generated) . Due to our logic,
# we need to take a transpose of `position_embeddings` in this specific setting
def step_other(input_ids):
sequence_length = tf.shape(input_ids)[1]
# Because past_length varies with batch
position_embeddings = self._position_embedding_layer(past_length + sequence_length)
position_embeddings = tf.transpose(position_embeddings, [1, 0, 2])
return sequence_length, position_embeddings
# Condition to switch functions
# if `sum(past_length) = 0` , means no outputs has been generated.
# the given inputs is the first input
sequence_length, positional_embeddings = tf.cond(
tf.equal(tf.reduce_sum(past_length), 0),
lambda: step_0(input_ids),
lambda: step_other(input_ids),
)
all_cache_key = [
tf.squeeze(item, axis=0)
for item in tf.split(all_cache_key, num_or_size_splits=self.num_hidden_layers, axis=0)
]
all_cache_value = [
tf.squeeze(item, axis=0)
for item in tf.split(all_cache_value, num_or_size_splits=self.num_hidden_layers, axis=0)
]
word_embeddings = self._embedding_layer(input_ids)
embeddings = word_embeddings
# Add word_embeddings + position_embeddings + type_embeddings
if self.use_type_embeddings:
type_embeddings = self._type_embeddings(input_type_ids)
embeddings = embeddings + type_embeddings
if self.use_positonal_embeddings:
embeddings = embeddings + positional_embeddings
# Norm + dropout
embeddings = self._embedding_norm(embeddings)
embeddings = self._embedding_dropout(embeddings, training=self.use_dropout)
# Initialize `attention_mask` as empty list
attention_mask = []
if self.mask_mode == "user_defined":
attention_mask = SelfAttentionMask()([embeddings, input_mask])
if self.mask_mode == "prefix":
attention_mask = tf.map_fn(prefix_mask, input_mask, fn_output_signature=tf.float32)
if self.mask_mode == "causal":
attention_mask = CausalMask()(embeddings)
encoder_outputs = []
# Make all -1 positions to 0 (as -1 represents padding in the input)
mask_values = tf.cast(tf.not_equal(input_ids_mod, -1), tf.float32)
# We want zero values , where embeddings inputs where 0 (by replacing PAD -1)
# So we use the mask and multiply it with embeddings
embeddings = embeddings * tf.expand_dims(mask_values, -1)
for i in range(self.num_hidden_layers):
layer = self._transformer_layers[i]
# Fetching
cache_value = all_cache_value[i]
cache_key = all_cache_key[i]
embeddings, cache_key, cache_value = layer(
[embeddings, attention_mask],
cache_key=cache_key,
cache_value=cache_value,
)
# Updating
all_cache_key[i] = cache_key
all_cache_value[i] = cache_value
# Mask next layer embedding (PAD positions to 0)
embeddings = tf.identity(
embeddings * tf.expand_dims(mask_values, -1),
name="encoder_outputs_{}".format(i),
)
encoder_outputs.append(embeddings)
# First word of last layer outputs [CLS]
cls_token_tensor = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(encoder_outputs[-1])
# batch_size x embedding_size
cls_output = self._pooler_layer(cls_token_tensor)
# batch_size x sequence_length x embedding_size
token_embeddings = encoder_outputs[-1]
# MLM Projection
if self.use_mlm_layer:
token_embeddings = self.mlm_layer(token_embeddings)
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = (
tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
+ self._last_logits_bias
)
else:
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
def step_0_gather(past_length, token_embeddings):
cache_length = tf.reduce_sum(tf.cast(tf.not_equal(input_ids_mod, -1), tf.int32), axis=1) - 1
# Getting corresponding last token tensor and last token logits
last_token_tensor = tf.gather_nd(token_embeddings, tf.expand_dims(cache_length, axis=1), batch_dims=1)
past_length = past_length + cache_length
return past_length, last_token_tensor
def step_other_gather(past_length, token_embeddings):
past_length = past_length + sequence_length
last_token_tensor = tf.keras.layers.Lambda(lambda x: x[:, -1, :])(token_embeddings)
return past_length, last_token_tensor
# Condition to switch functionsn (When batch_size > 1,
# past_length will be different for each entry)
# if `sum(past_length) = 0` , means no outputs has been generated.
# the given inputs is the first input
past_length, last_token_tensor = tf.cond(
tf.equal(tf.reduce_sum(past_length), 0),
lambda: step_0_gather(past_length, token_embeddings),
lambda: step_other_gather(past_length, token_embeddings),
)
# token --> vocab ( batch_size x sequence_length x vocab_size)
last_token_logits = tf.matmul(
last_token_tensor,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
# Expand dims of past_length back to 2D
past_length = tf.expand_dims(past_length, 0, name="past_length")
# Stack all layers key and value together
# num_layers x batch_size x num_heads x sequence_length x (hidden_dimension/num_heads)
all_cache_key = tf.stack(all_cache_key, axis=0, name="all_cache_key")
all_cache_value = tf.stack(all_cache_value, axis=0, name="all_cache_value")
return {
"cls_output": cls_output,
"token_logits": token_logits,
"token_embeddings": token_embeddings,
"last_token_logits": last_token_logits,
"past_length": past_length,
"all_cache_key": all_cache_key,
"all_cache_value": all_cache_value,
}
def call_training(self, inputs):
"""Forward Pass for BERT
Args:
inputs: dict
inputs is a dict with keys [`input_ids` , `input_mask`, `input_type_ids`].
These keys might or might not be present based on `mask_mode` and other criterias
"""
input_ids = inputs["input_ids"]
# When `mask_mode` is `causal` , input_mask is not required
if self.mask_mode in ["user_defined", "prefix"]:
input_mask = inputs["input_mask"]
# Default True in BERT
if self.use_type_embeddings:
input_type_ids = inputs["input_type_ids"]
sequence_length = tf.shape(input_ids)[1]
word_embeddings = self._embedding_layer(input_ids)
embeddings = word_embeddings
# Add word_embeddings + position_embeddings + type_embeddings
if self.use_type_embeddings:
type_embeddings = self._type_embeddings(input_type_ids)
embeddings = embeddings + type_embeddings
if self.use_positonal_embeddings:
positional_embeddings = self._position_embedding_layer(tf.range(sequence_length))
embeddings = embeddings + positional_embeddings
# Norm + dropout
embeddings = self._embedding_norm(embeddings)
embeddings = self._embedding_dropout(embeddings, training=self.use_dropout)
if self.attention_type == "block_attention" or self.attention_type == "bigbird":
attention_mask = input_mask
else:
# Initialize `attention_mask` as empty list
attention_mask = []
if self.mask_mode == "user_defined":
attention_mask = SelfAttentionMask()([embeddings, input_mask])
if self.mask_mode == "prefix":
attention_mask = tf.map_fn(prefix_mask, input_mask, dtype=tf.float32)
if self.mask_mode == "causal":
attention_mask = CausalMask()(embeddings)
encoder_outputs = []
for i in range(self.num_hidden_layers):
layer = self._transformer_layers[i]
embeddings, _, _ = layer([embeddings, attention_mask])
encoder_outputs.append(embeddings)
# First word of last layer outputs [CLS]
cls_token_tensor = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(encoder_outputs[-1])
# batch_size x embedding_size
cls_output = self._pooler_layer(cls_token_tensor)
# batch_size x sequence_length x embedding_size
token_embeddings = encoder_outputs[-1]
all_cls_output = []
for per_layer_token_embeddings in encoder_outputs:
per_cls_token_tensor = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(
per_layer_token_embeddings
)
all_cls_output.append(self._pooler_layer(per_cls_token_tensor))
# MLM Projection
if self.use_mlm_layer:
token_embeddings = self.mlm_layer(token_embeddings)
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = (
tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
+ self._last_logits_bias
)
else:
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
last_token_logits = tf.keras.layers.Lambda(lambda x: x[:, -1, :])(token_logits)
result = {
"cls_output": cls_output,
"token_embeddings": token_embeddings,
"token_logits": token_logits,
"last_token_logits": last_token_logits,
}
if self.return_all_layer_token_embeddings:
result["all_layer_token_embeddings"] = encoder_outputs
result["all_layer_cls_output"] = all_cls_output
return result
def call_cross_attention_encoder(self, inputs):
"""[summary]
Args:
inputs ([type]): [description]
"""
encoder_input_ids = inputs["encoder_input_ids"]
decoder_input_ids = inputs["decoder_input_ids"]
encoder_input_type_ids = None
decoder_input_type_ids = None
if self.use_type_embeddings:
encoder_input_type_ids = inputs["encoder_input_type_ids"]
decoder_input_type_ids = inputs["decoder_input_type_ids"]
encoder_input_mask = None
if self.mask_mode in ["user_defined", "prefix"]:
encoder_input_mask = inputs["encoder_input_mask"]
def get_embeddings(input_ids, input_type_ids):
"""Get embedding for encoder as well as decoder
Args:
input_ids ([type]): [description]
input_type_ids ([type]): [description]
"""
embeddings = self._embedding_layer(input_ids)
sequence_length = tf.shape(input_ids)[1]
# Add word_embeddings + position_embeddings + type_embeddings
if self.use_type_embeddings:
type_embeddings = self._type_embeddings(input_type_ids)
embeddings = embeddings + type_embeddings
if self.use_positonal_embeddings:
positional_embeddings = self._position_embedding_layer(tf.range(sequence_length))
embeddings = embeddings + positional_embeddings
# Norm + dropout
embeddings = self._embedding_norm(embeddings)
embeddings = self._embedding_dropout(embeddings, training=self.use_dropout)
return embeddings
encoder_embeddings = get_embeddings(encoder_input_ids, encoder_input_type_ids)
decoder_embeddings = get_embeddings(decoder_input_ids, decoder_input_type_ids)
# Initialize `encoder_attention_mask` as empty list
encoder_attention_mask = []
if self.mask_mode == "user_defined":
encoder_attention_mask = SelfAttentionMask()([encoder_embeddings, encoder_input_mask])
if self.mask_mode == "prefix":
encoder_attention_mask = tf.map_fn(prefix_mask, encoder_input_mask, dtype=tf.float32)
if self.mask_mode == "causal":
encoder_attention_mask = CausalMask()(encoder_embeddings)
# Decoder mask is always None
decoder_attention_mask = CausalMask()(decoder_embeddings)
decoder_encoder_mask = CrossAttentionMask()([decoder_input_ids, encoder_input_mask])
decoder_outputs = []
encoder_outputs = []
# Encoder Layer
for i in range(self.num_hidden_layers):
layer = self._transformer_layers[i]
encoder_embeddings, _, _ = layer(
[
encoder_embeddings,
encoder_attention_mask,
decoder_encoder_mask, # dummy decoder_encoder_mask
encoder_embeddings, # dummy encoder_hidden_states
],
mode="encoder",
)
encoder_outputs.append(encoder_embeddings)
# Decoder Layer
encoder_hidden_states = encoder_outputs[-1]
for i in range(self.num_hidden_layers):
layer = self._transformer_layers[i]
decoder_embeddings, _, _ = layer(
[decoder_embeddings, decoder_attention_mask, decoder_encoder_mask, encoder_hidden_states],
mode="decoder",
)
decoder_outputs.append(decoder_embeddings)
# First word of last layer outputs [CLS]
cls_token_tensor = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(decoder_outputs[-1])
# batch_size x embedding_size
cls_output = self._pooler_layer(cls_token_tensor)
# batch_size x sequence_length x embedding_size
token_embeddings = decoder_outputs[-1]
# MLM Projection
if self.use_mlm_layer:
token_embeddings = self.mlm_layer(token_embeddings)
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = (
tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
+ self._last_logits_bias
)
else:
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
last_token_logits = tf.keras.layers.Lambda(lambda x: x[:, -1, :])(token_logits)
result = {
"cls_output": cls_output,
"token_embeddings": token_embeddings,
"token_logits": token_logits,
"last_token_logits": last_token_logits,
}
if self.return_all_layer_token_embeddings:
result["all_layer_token_embeddings"] = decoder_outputs
return result
def call_cross_attention_encoder_predict(self, inputs):
"""[summary]
Args:
inputs ([type]): [description]
"""
encoder_input_ids = inputs["encoder_input_ids"]
decoder_input_ids = inputs["decoder_input_ids"]
encoder_input_type_ids = None
decoder_input_type_ids = None
if self.use_type_embeddings:
encoder_input_type_ids = inputs["encoder_input_type_ids"]
decoder_input_type_ids = inputs["decoder_input_type_ids"]
encoder_input_mask = None
if self.mask_mode in ["user_defined", "prefix"]:
encoder_input_mask = inputs["encoder_input_mask"]
# self.num_hidden_layers, batch_size, sequence_length, embeddingd_imension
encoder_hidden_states = inputs["encoder_hidden_states"]
all_cache_key = inputs["decoder_all_cache_key"]
all_cache_value = inputs["decoder_all_cache_value"]
def get_encoder_embeddings(input_ids, input_type_ids):
"""Get embedding for encoder as well as decoder
Args:
input_ids ([type]): [description]
input_type_ids ([type]): [description]
"""
embeddings = self._embedding_layer(input_ids)
sequence_length = tf.shape(input_ids)[1]
# Add word_embeddings + position_embeddings + type_embeddings
if self.use_type_embeddings:
type_embeddings = self._type_embeddings(input_type_ids)
embeddings = embeddings + type_embeddings
if self.use_positonal_embeddings:
positional_embeddings = self._position_embedding_layer(tf.range(sequence_length))
embeddings = embeddings + positional_embeddings
# Norm + dropout
embeddings = self._embedding_norm(embeddings)
embeddings = self._embedding_dropout(embeddings, training=self.use_dropout)
return embeddings
# this function is slightly different from the other function
# because, we do not need tf.range(sequence_length)
# we need it for (one word) from, step 1 onwards, as we decode
# word by word. So we use all_cache_key for getting the past_length
def get_decoder_embeddings_step_other(input_ids, input_type_ids):
"""Get embedding for encoder as well as decoder
Args:
input_ids ([type]): [description]
input_type_ids ([type]): [description]
"""
def step_0_cache_length(_):
return tf.constant(0, dtype=tf.int32)
def step_other_cache_length(all_cache_key):
past_length = tf.shape(all_cache_key)[3]
# Why -1, because When iter 2 (our positional
# embedding should be 1 not 2 and so on)
sequence_length = tf.shape(input_ids)[1] + past_length - 1
return sequence_length
sequence_length = tf.cond(
tf.equal(tf.reduce_sum(all_cache_key), 0),
lambda: step_0_cache_length(all_cache_key),
lambda: step_other_cache_length(all_cache_key),
)
embeddings = self._embedding_layer(input_ids)
# Add word_embeddings + position_embeddings + type_embeddings
if self.use_type_embeddings:
type_embeddings = self._type_embeddings(input_type_ids)
embeddings = embeddings + type_embeddings
if self.use_positonal_embeddings:
positional_embeddings = self._position_embedding_layer(sequence_length)
# Make it 3D for sum ( For decoder we decode one at a time)
positional_embeddings = tf.expand_dims(positional_embeddings, 0)
embeddings = embeddings + positional_embeddings
# Norm + dropout
embeddings = self._embedding_norm(embeddings)
embeddings = self._embedding_dropout(embeddings, training=self.use_dropout)
return embeddings
# Encoder embeddings remains same throughout the decoding process
# so we have to calculate it only once
# So , we check if cache_key == 0, if its 0 its step 0
# else, pass a dummy encoder_embeddings, as we dont have to use it from step1
# because, what we need from encoder is encoder_hidden_states_batch
encoder_embeddings = tf.cond(
tf.equal(tf.reduce_sum(all_cache_key), 0.0),
lambda: get_encoder_embeddings(encoder_input_ids, encoder_input_type_ids),
lambda: tf.zeros_like(encoder_hidden_states), # dummy
)
decoder_embeddings = tf.cond(
tf.equal(tf.reduce_sum(all_cache_key), 0.0),
lambda: get_encoder_embeddings(decoder_input_ids, decoder_input_type_ids),
lambda: get_decoder_embeddings_step_other(decoder_input_ids, decoder_input_type_ids),
)
# Initialize `encoder_attention_mask` as empty list
encoder_attention_mask = []
if self.mask_mode == "user_defined":
encoder_attention_mask = SelfAttentionMask()([encoder_embeddings, encoder_input_mask])
if self.mask_mode == "prefix":
encoder_attention_mask = tf.map_fn(prefix_mask, encoder_input_mask, dtype=tf.float32)
if self.mask_mode == "causal":
encoder_attention_mask = CausalMask()(encoder_embeddings)
# Decoder mask is always None
decoder_attention_mask = CausalMask()(decoder_embeddings)
decoder_encoder_mask = CrossAttentionMask()([decoder_input_ids, encoder_input_mask])
all_cache_key = [
tf.squeeze(item, axis=0)
for item in tf.split(all_cache_key, num_or_size_splits=self.num_hidden_layers, axis=0)
]
all_cache_value = [
tf.squeeze(item, axis=0)
for item in tf.split(all_cache_value, num_or_size_splits=self.num_hidden_layers, axis=0)
]
def calculate_encoder_hidden_state(encoder_embeddings):
# Encoder Layer
encoder_outputs = []
for i in range(self.num_hidden_layers):
layer = self._transformer_layers[i]
cache_key = all_cache_key[i]
cache_value = all_cache_value[i]
encoder_embeddings, _, _ = layer(
[
encoder_embeddings,
encoder_attention_mask,
decoder_encoder_mask, # decoder_encoder_mask
encoder_embeddings,
],
mode="encoder",
cache_key=cache_key,
cache_value=cache_value,
)
encoder_outputs.append(encoder_embeddings)
encoder_hidden_states = encoder_outputs[-1]
return encoder_hidden_states
# While decoding we have to calculate it only once
def use_cache_encoder():
return tf.identity(inputs["encoder_hidden_states"])
encoder_hidden_states = tf.cond(
tf.equal(tf.reduce_sum(inputs["encoder_hidden_states"]), 0.0),
lambda: calculate_encoder_hidden_state(encoder_embeddings),
lambda: use_cache_encoder(),
)
# Decoder layer
decoder_outputs = []
for i in range(self.num_hidden_layers):
layer = self._transformer_layers[i]
# Fetching
cache_value = all_cache_value[i]
cache_key = all_cache_key[i]
decoder_embeddings, cache_key, cache_value = layer(
[
decoder_embeddings,
decoder_attention_mask,
decoder_encoder_mask,
encoder_hidden_states,
],
mode="decoder",
cache_key=cache_key,
cache_value=cache_value,
)
# Updating
all_cache_key[i] = cache_key
all_cache_value[i] = cache_value
decoder_outputs.append(decoder_embeddings)
# Stack all layers key and value together
# num_layers x batch_size x num_heads x sequence_length x
# (hidden_dimension/num_heads) # noqa
all_cache_key = tf.stack(all_cache_key, axis=0, name="decoder_all_cache_key")
all_cache_value = tf.stack(all_cache_value, axis=0, name="decoder_all_cache_value")
# First word of last layer outputs [CLS]
cls_token_tensor = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(decoder_outputs[-1])
# batch_size x embedding_size
cls_output = self._pooler_layer(cls_token_tensor)
# batch_size x sequence_length x embedding_size
token_embeddings = decoder_outputs[-1]
# MLM Projection
if self.use_mlm_layer:
token_embeddings = self.mlm_layer(token_embeddings)
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = (
tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
+ self._last_logits_bias
)
else:
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
last_token_logits = tf.keras.layers.Lambda(lambda x: x[:, -1, :])(token_logits)
return {
"encoder_hidden_states": encoder_hidden_states,
"decoder_all_cache_key": all_cache_key,
"decoder_all_cache_value": all_cache_value,
"cls_output": cls_output,
"token_embeddings": token_embeddings,
"token_logits": token_logits,
"last_token_logits": last_token_logits,
}
def call_decoder_predict(self, inputs):
"""Inputs will be pass to this method, when is_training = False and is_decoder = True. # noqa
The need to cache the past `key` and `value`
tensors for decoders necessary while predicting, to make the inference/NLG
faster in case of AutoRegressive Decoding.
"""
input_ids = inputs["input_ids"]
encoder_hidden_state = inputs["encoder_hidden_states"]
decoder_encoder_mask = inputs["decoder_encoder_mask"]
all_cache_key = inputs["all_cache_key"]
all_cache_value = inputs["all_cache_value"]
# Decoder don't need this
# # When `mask_mode` is `causal` , input_mask is not required
# if self.mask_mode in ['user_defined']:
# input_mask = inputs['input_mask']
if self.use_type_embeddings:
input_type_ids = inputs["input_type_ids"]
# cache_length = tf.constant(0, dtype=tf.int32)
def step_0_cache_length(_):
return tf.constant(0, dtype=tf.int32)
def step_other_cache_length(all_cache_key):
past_length = tf.shape(all_cache_key)[3]
# Why -1, because When iter 2 (our positional embedding should be 1 not 2 and so on)
sequence_length = tf.shape(input_ids)[1] + past_length - 1
return sequence_length
sequence_length = tf.cond(
tf.equal(tf.reduce_sum(all_cache_key), 0),
lambda: step_0_cache_length(all_cache_key),
lambda: step_other_cache_length(all_cache_key),
)
all_cache_key = [
tf.squeeze(item, axis=0)
for item in tf.split(all_cache_key, num_or_size_splits=self.num_hidden_layers, axis=0)
]
all_cache_value = [
tf.squeeze(item, axis=0)
for item in tf.split(all_cache_value, num_or_size_splits=self.num_hidden_layers, axis=0)
]
# If decoder is not sharing embeddings
word_embeddings = self._embedding_layer(input_ids)
embeddings = word_embeddings
# Add word_embeddings + position_embeddings + type_embeddings
if self.use_type_embeddings:
type_embeddings = self._type_embeddings(input_type_ids)
embeddings = embeddings + type_embeddings
if self.use_positonal_embeddings:
positional_embeddings = self._position_embedding_layer(sequence_length)
# Make it 3D for sum ( For decoder we decode one at a time)
positional_embeddings = tf.expand_dims(positional_embeddings, 0)
embeddings = embeddings + positional_embeddings
# Norm + dropout
embeddings = self._embedding_norm(embeddings)
embeddings = self._embedding_dropout(embeddings, training=self.use_dropout)
# Initialize `attention_mask` as empty list
attention_mask = []
if self.mask_mode == "causal":
attention_mask = CausalMask()(embeddings)
decoder_outputs = []
for i in range(self.num_hidden_layers):
layer = self._transformer_layers[i]
# Fetching
cache_value = all_cache_value[i]
cache_key = all_cache_key[i]
embeddings, cache_key, cache_value = layer(
[
embeddings,
attention_mask,
encoder_hidden_state,
decoder_encoder_mask,
],
cache_key=cache_key,
cache_value=cache_value,
)
# Updating
all_cache_key[i] = cache_key
all_cache_value[i] = cache_value
decoder_outputs.append(embeddings)
# Stack all layers key and value together
# num_layers x batch_size x num_heads x sequence_length x (hidden_dimension/num_heads)
all_cache_key = tf.stack(all_cache_key, axis=0, name="all_cache_key")
all_cache_value = tf.stack(all_cache_value, axis=0, name="all_cache_value")
# batch_size x sequence_length x embedding_size
token_embeddings = decoder_outputs[-1]
# MLM Projection
if self.use_mlm_layer:
token_embeddings = self.mlm_layer(token_embeddings)
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = (
tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
+ self._last_logits_bias
)
else:
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
last_token_logits = tf.keras.layers.Lambda(lambda x: x[:, -1, :])(token_logits)
return {
"all_cache_key": all_cache_key,
"all_cache_value": all_cache_value,
"token_embeddings": token_embeddings,
"token_logits": token_logits,
"last_token_logits": last_token_logits,
}
def call_decoder(self, inputs):
"""Forward Pass for Decoder
Args:
inputs: dict
inputs is a dict with keys [`input_ids` , `input_mask`, `input_type_ids`,
`encoder_hidden_states`, `decoder_encoder_mask`].
These keys might or might not be present based on `mask_mode` and other criterias
"""
input_ids = inputs["input_ids"]
encoder_output = inputs["encoder_hidden_states"]
decoder_encoder_mask = inputs["decoder_encoder_mask"]
if self.mask_mode in ["user_defined"]:
input_mask = inputs["input_mask"]
if self.use_type_embeddings:
input_type_ids = inputs["input_type_ids"]
sequence_length = tf.shape(input_ids)[1]
# If decoder is not sharing embeddings
word_embeddings = self._embedding_layer(input_ids)
embeddings = word_embeddings
# Add word_embeddings + position_embeddings + type_embeddings
if self.use_type_embeddings:
type_embeddings = self._type_embeddings(input_type_ids)
embeddings = embeddings + type_embeddings
if self.use_positonal_embeddings:
positional_embeddings = self._position_embedding_layer(tf.range(sequence_length))
# positional_embeddings = self._position_embedding_layer(sequence_length)
# # Make it 3D for sum ( For decoder we decode one at a time)
# positional_embeddings = tf.expand_dims(positional_embeddings, 0)
embeddings = embeddings + positional_embeddings
# Norm + dropout
embeddings = self._embedding_norm(embeddings)
embeddings = self._embedding_dropout(embeddings, training=self.use_dropout)
# Initialize `attention_mask` as empty list
attention_mask = []
if self.mask_mode == "user_defined":
attention_mask = SelfAttentionMask()([embeddings, input_mask])
if self.mask_mode == "causal":
attention_mask = CausalMask()(embeddings)
decoder_outputs = []
for i in range(self.num_hidden_layers):
layer = self._transformer_layers[i]
embeddings, _key, _value = layer([embeddings, attention_mask, encoder_output, decoder_encoder_mask])
decoder_outputs.append(embeddings)
# batch_size x sequence_length x embedding_size
token_embeddings = decoder_outputs[-1]
# MLM Projection
if self.use_mlm_layer:
token_embeddings = self.mlm_layer(token_embeddings)
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = (
tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
+ self._last_logits_bias
)
else:
# token --> vocab ( batch_size x sequence_length x vocab_size)
token_logits = tf.matmul(
token_embeddings,
self.get_embedding_table(),
transpose_b=True,
name="token_logits",
)
last_token_logits = tf.keras.layers.Lambda(lambda x: x[:, -1, :])(token_logits)
result = {
"token_embeddings": token_embeddings,
"token_logits": token_logits,
"last_token_logits": last_token_logits,
}
if self.return_all_layer_token_embeddings:
result["all_layer_token_embeddings"] = decoder_outputs
return result
def call(self, inputs):
"""Forward Pass.
We have 2 pipelines . Training pipeline is relatively simpler
Testing pipeline has few changes to accomodate caching
of `key` and `value` for Transformer.
Caching is significant for AutoRegressive modeling.
Also, minor changes to make use of variable batch decoding
Args: inputs, dict
if self.is_training:
self.call_training(inputs)
else:
self.call_predict(inputs)
"""
outputs = self.call_fn(inputs)
return outputs
def extend_positional_embeddings(self, factor):
"""Extends positional embeddings, by a factor.
If factor = 2, we replicate the positional embeddings.
If matrix is 512 x 768 , we convert it into 1024 x 768.
Args:
factor: int
Returns:
a new object of the class method
"""
if not isinstance(factor, int):
raise ValueError(" `factor` must be an int with value > 1")
# Squeeze is used to convert 3D to 2D
updated_pos_embeddings = tf.squeeze(tf.repeat(self._position_embedding_layer.variables, factor, axis=1), 0)
self.config["max_position_embeddings"] = 2 * self.config["max_position_embeddings"]
tf.keras.backend.clear_session()
new_layer = self.__class__(
config=self.config,
mask_mode=self.mask_mode,
name=self.model_name,
use_dropout=self.use_dropout,
is_training=self.is_training,
batch_size=self.batch_size,
sequence_length=self.sequence_length,
use_type_embeddings=self.use_type_embeddings,
pipeline_mode=self.pipeline_mode,
)
# layer to model to instantiate variables
new_model = new_layer.get_model()
del new_model
model_new_dict = {}
for var in self.variables:
if "positional_embedding" in var.name:
# Add the replicated previous embeddings to this embeddings
model_new_dict[var.name] = updated_pos_embeddings
else:
model_new_dict[var.name] = var
# Re assign it to model_new
for var in new_layer.variables:
var.assign(model_new_dict[var.name])
# Release the memory
del model_new_dict
logging.info("Succesfully changed position_embeddings to {}".format(updated_pos_embeddings.shape))
return new_layer
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_config(self):
return self._config_dict
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| [
"legacyai.org@gmail.com"
] | legacyai.org@gmail.com |
5cc1e828fb014c43f7ef00b0a93ab7e27a5e0eb4 | 5ee6858e60d1065c797a105710e9d6c835f3b7ad | /app/user/migrations/0008_alter_customuser_avatar.py | 0d17298ad2e47e51857959c13560f1c37e7deca3 | [] | no_license | kiyoshion/django-rest-framework | 19260d1b864939a69fe6668062fdea12a1b1b1d5 | 8f81064511b9db066ef2475fad78629a5839c213 | refs/heads/main | 2023-07-08T08:29:32.483651 | 2021-08-09T11:36:13 | 2021-08-09T11:36:13 | 394,266,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # Generated by Django 3.2.4 on 2021-06-10 02:09
from django.db import migrations
import imagekit.models.fields
import user.models
class Migration(migrations.Migration):
dependencies = [
('user', '0007_alter_customuser_avatar'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='avatar',
field=imagekit.models.fields.ProcessedImageField(blank=True, default='img/avatar.svg', null=True, upload_to=user.models.CustomUser.user_directory_path),
),
]
| [
"kiyoshion@gmail.com"
] | kiyoshion@gmail.com |
6f5709bc731865f387e0060495401727486d9ca7 | 16fe56a5fd3beea997bea716f103f4165f2f9dfc | /reviewsMapper.py | e9073ec498b7cb4b7ab9568ba1f0b9a46bdf53f0 | [] | no_license | jgnguy/yelp_dataset_project | 2299b1054a0e284762ef3b9c9ab49cc0c6213bd8 | a4099978374053cae2b7702675121ab4051fc16f | refs/heads/master | 2021-01-08T15:12:37.079944 | 2019-12-11T19:55:00 | 2019-12-11T19:55:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | #!/usr/bin/python
#REVIEWS mapper.py
#Evan Yao, Jonathan Nguyen, Richard Pham
#get all the stars and businessID of all businesses within a specified number of years ago
import datetime
import json
import sys
#get today's date and convert to YYYY-MM-DD format
numYearsAgo = 2
today = datetime.datetime.now()
#get a year ago from today
yearAgo = today - datetime.timedelta(numYearsAgo * 365)
reformatYear = yearAgo.strftime('%Y-%m-%d')
pastYear, pastMonth, pastDay = reformatYear.split('-')
pastYearDate = datetime.datetime(int(pastYear), int(pastMonth), int(pastDay))
#adds each review to a list
for line in sys.stdin:
line = line.strip()
review = json.loads(line)
#check if review exists
if review is not None:
try:
reviewDate = review['date']
reviewYear, reviewMonth, reviewDay = reviewDate.split(' ')[0].split('-')
refReviewDate = datetime.datetime(int(reviewYear), int(reviewMonth), int(reviewDay))
#check if review is relevant and current
if pastYearDate < refReviewDate:
#print the business ID the review is for and the amount of stars they gave
print(review['business_id'] + '\t' + str(review['stars']))
except:
continue
| [
"noreply@github.com"
] | noreply@github.com |
650c945ba471ee76b6ba900217fb8ee31eea82ae | ca98a533d53da95249df7aed710a2db9424cdcdc | /pyspark_proxy/sql/udf.py | f95753ccc8abdd05912b9ce41ea60fcbe0d2eeec | [
"Apache-2.0"
] | permissive | abronte/PysparkProxy | 9b3b0cc5dd9c826ca01b1562c639e9ebfc163c84 | cc28bacb0d4ee6fb87ced763a73e9ea791612414 | refs/heads/master | 2021-08-06T12:46:55.562589 | 2018-12-12T21:57:01 | 2018-12-12T21:57:01 | 147,398,292 | 4 | 0 | NOASSERTION | 2018-12-12T21:57:03 | 2018-09-04T19:07:00 | Python | UTF-8 | Python | false | false | 1,101 | py | import base64
import imp
import sys
from pyspark_proxy.proxy import Proxy
from pyspark_proxy.sql.types import DataType
from pyspark_proxy.sql.column import Column
class UDFRegistration(Proxy):
def __init__(self, context_id):
self._context_id = context_id
def register(self, name, f, returnType=None):
if returnType != None:
returnType = {'_PROXY_ID': returnType._id}
self._call(
self._context_id,
'udf.register',
[(name, f), {'returnType': returnType}])
class UserDefinedFunction(Proxy):
def __init__(self, f, returnType=None):
if isinstance(returnType, DataType):
returnType = {'_PROXY_ID': returnType._id}
result = self._call(
'pyspark',
'sql.functions.udf',
[(f, returnType), {}])
self._id = result['id']
def __call__(self, *args, **kwargs):
result = self._call(
self._id,
None,
[args, kwargs])
result._name = args[0]
return result
| [
"adam.bronte@coupa.com"
] | adam.bronte@coupa.com |
b2f59b138ab429063808e0cfaee6e1c099c3be50 | 7313698d76e4606e290702463464fc8eeeff32c0 | /Intermo/migrations/versions/20210512_184835_.py | d140577d9f0f34af9c85ed83b0e26419b21e7c29 | [] | no_license | NathanFrank285/Intermo | 53e55b576b4fcc2741b06f387e706fa6e8a43702 | 69ca818376dbeb22b100cc4a1e5873210fe52ce4 | refs/heads/master | 2023-06-23T05:54:07.452573 | 2021-07-20T21:14:27 | 2021-07-20T21:14:27 | 364,291,740 | 0 | 0 | null | 2021-07-20T21:14:27 | 2021-05-04T14:54:36 | Python | UTF-8 | Python | false | false | 701 | py | """empty message
Revision ID: 271520156ed4
Revises: 591c56ed2dac
Create Date: 2021-05-12 18:48:35.174597
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '271520156ed4'
down_revision = '591c56ed2dac'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('trades_uniqueTradeId_key', 'trades', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('trades_uniqueTradeId_key', 'trades', ['uniqueTradeId'])
# ### end Alembic commands ###
| [
"nathansfrank@yahoo.com"
] | nathansfrank@yahoo.com |
b26444ad2d6f2216e041816a9cd9a0238f7491e6 | 6d493d09085d4d398132204925078a179774f138 | /melgan_vocoder.py | 2ec8f713892afcce0d01ff4faa4f26ebc87935ea | [
"MIT"
] | permissive | zongxiangli/CycleGAN-VC3 | 6a41f843b430fd307d9ea0b43aa5910816fba450 | 431b332fa17638391ca913e6821b526456fd874f | refs/heads/main | 2023-02-21T02:19:39.058010 | 2021-01-25T09:49:00 | 2021-01-25T09:49:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,448 | py | #!python
# -*- coding: utf-8 -*-
import os
import yaml
from pathlib import Path
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
from feature_utils import Audio2Mel
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def WNConv1d(*args, **kwargs):
return weight_norm(nn.Conv1d(*args, **kwargs))
def WNConvTranspose1d(*args, **kwargs):
return weight_norm(nn.ConvTranspose1d(*args, **kwargs))
class ResnetBlock(nn.Module):
def __init__(self, dim, dilation=1):
super().__init__()
self.block = nn.Sequential(
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(dilation),
WNConv1d(dim, dim, kernel_size=3, dilation=dilation),
nn.LeakyReLU(0.2),
WNConv1d(dim, dim, kernel_size=1),
)
self.shortcut = WNConv1d(dim, dim, kernel_size=1)
def forward(self, x):
return self.shortcut(x) + self.block(x)
class Generator(nn.Module):
def __init__(self, input_size, ngf, n_residual_layers):
super().__init__()
ratios = [8, 8, 2, 2]
self.hop_length = np.prod(ratios)
mult = int(2 ** len(ratios))
model = [
nn.ReflectionPad1d(3),
WNConv1d(input_size, mult * ngf, kernel_size=7, padding=0),
]
# Upsample to raw audio scale
for i, r in enumerate(ratios):
model += [
nn.LeakyReLU(0.2),
WNConvTranspose1d(
mult * ngf,
mult * ngf // 2,
kernel_size=r * 2,
stride=r,
padding=r // 2 + r % 2,
output_padding=r % 2,
),
]
for j in range(n_residual_layers):
model += [ResnetBlock(mult * ngf // 2, dilation=3 ** j)]
mult //= 2
model += [
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(3),
WNConv1d(ngf, 1, kernel_size=7, padding=0),
nn.Tanh(),
]
self.model = nn.Sequential(*model)
self.apply(weights_init)
def forward(self, x):
return self.model(x)
def get_default_device():
if torch.cuda.is_available():
return "cuda"
else:
return "cpu"
def load_model(mel2wav_path, device=get_default_device()):
"""
Args:
mel2wav_path (str or Path): path to the root folder of dumped text2mel
device (str or torch.device): device to load the model
"""
root = Path(mel2wav_path)
with open(root / "args.yml", "r") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
netG = Generator(args.n_mel_channels, args.ngf, args.n_residual_layers).to(device)
netG.load_state_dict(torch.load(root / "best_netG.pt", map_location=device))
return netG
class MelVocoder:
def __init__(
self,
path,
device=get_default_device(),
github=False,
model_name="multi_speaker",
):
self.fft = Audio2Mel().to(device)
if github:
netG = Generator(80, 32, 3).to(device)
root = Path(os.path.dirname(__file__)).parent
netG.load_state_dict(
torch.load(root / f"models/{model_name}.pt", map_location=device)
)
self.mel2wav = netG
else:
self.mel2wav = load_model(path, device)
self.device = device
def __call__(self, audio):
"""
Performs audio to mel conversion (See Audio2Mel in mel2wav/modules.py)
Args:
audio (torch.tensor): PyTorch tensor containing audio (batch_size, timesteps)
Returns:
torch.tensor: log-mel-spectrogram computed on input audio (batch_size, 80, timesteps)
"""
return self.fft(audio.unsqueeze(1).to(self.device))
def inverse(self, mel):
"""
Performs mel2audio conversion
Args:
mel (torch.tensor): PyTorch tensor containing log-mel spectrograms (batch_size, 80, timesteps)
Returns:
torch.tensor: Inverted raw audio (batch_size, timesteps)
"""
with torch.no_grad():
return self.mel2wav(mel.to(self.device)).squeeze(1)
| [
"jackaduma@gmail.com"
] | jackaduma@gmail.com |
b1bfb061db1148311e785c410fd9730250806673 | cf954be6c93a3dcd81f5b094faf005af371336f0 | /src/utils/__init__.py | 43d3f48b46546235749fa29c39d05ced1e1c54ac | [] | no_license | urielsinger/text2Mol | 819e20ea2638ddc97ecf7d57944ee1faaa8c97ff | 2d8b0cfb17414f43904856944e3d6f0a11fd2b96 | refs/heads/master | 2020-07-05T18:53:33.064530 | 2019-08-16T14:04:05 | 2019-08-16T14:04:05 | 202,737,161 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from utils.datetime_utils import *
from utils.general_utils import *
from utils.graph_utils import *
from utils.visualize_utils import *
from utils.text_utils import *
from utils.molecule_utils import * | [
"urielsinger@gmail.com"
] | urielsinger@gmail.com |
e4c55d2046bfa5e009793218e4f1236cc147abc2 | b58bf20946b2f0e43ccacd4d8206d513ba5ce6dd | /vecap/Churches/models.py | b57041b56fd66a90dc10f7f4f9bfb22b3af229de | [] | no_license | ifeoluwaDeterminas/vecap | fb5540838ef170fbd91b2075e66a1d19a7df6ade | cc7bc941d82ee3c8852da6fd946756703629b62e | refs/heads/main | 2023-08-16T01:55:45.949424 | 2021-10-11T08:20:19 | 2021-10-11T08:20:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | from django.db import models
from django_tenants.models import TenantMixin, DomainMixin
# Create your models here.
class Client(TenantMixin):
name = models.CharField(max_length=50)
description = models.CharField(max_length=255)
on_trial = models.BooleanField()
created_on = models.DateField(auto_now_add=True)
auto_create_schema = True
class Domain(DomainMixin):
pass | [
"jotunbade@gmail.com"
] | jotunbade@gmail.com |
979b4aba56fd5ea83870647b5611677ae5844785 | ce1b0adade9b0cab3bfa5299f985a3d0d59a40cd | /session_key_test.py | 9a478a1e37167f80b5b494705f84137d584b036a | [
"Apache-2.0"
] | permissive | xyhlk520/GPG-Decrypt | 5e5f8522429df2aaf6026b603ef848849f439396 | 43efec3890b5ff9c905180eef6778a94f10bb6b0 | refs/heads/main | 2023-03-17T20:55:19.215318 | 2021-03-14T09:37:10 | 2021-03-14T09:37:10 | 347,595,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | passphrase = "test"
session_key = b"\x09\x8f\x6b\xcd\x46\x21\xd3\x73\xca\xde\x4e\x83" + \
b"\x26\x27\xb4\xf6\x5f\x8f\x8e\x05\xef\xdc\x22\xe8" | [
"noreply@github.com"
] | noreply@github.com |
a14e1188bdfc65d5b6c7835a865851cf2b468dce | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/srt.py | fe6f0681dbf56a25b61e42d88d931b804baf7678 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 82 | py | ii = [('ShawHDE.py', 1), ('AubePRP.py', 1), ('FerrSDO2.py', 1), ('ClarGE3.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
c98f587d3f300afa98366ee38ee38743ef8a8905 | 025ad48264afd44ccc9a3ec820a74711631a77d7 | /ludo.py | 5195e20168f60670e6bcdfa1552868a8d865f136 | [] | no_license | buffosens/LudoRGB | 1939f6588b624162500ee74dadef7efbd22a5fd9 | 72a940c4e309e3ea128d332528303f246d3bc2de | refs/heads/master | 2023-03-27T20:33:08.731595 | 2021-03-29T06:50:27 | 2021-03-29T06:50:27 | 347,163,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,273 | py | # Simple demo of of the WS2801/SPI-like addressable RGB LED lights.
import time
import RPi.GPIO as GPIO
# Import the WS2801 module.
import Adafruit_WS2801
import Adafruit_GPIO.SPI as SPI
# Configure the count of pixels:
PIXEL_COUNT = 100
# Alternatively specify a hardware SPI connection on /dev/spidev0.0:
SPI_PORT = 0
SPI_DEVICE = 0
pixels = Adafruit_WS2801.WS2801Pixels(PIXEL_COUNT, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE), gpio=GPIO)
# Define the wheel function to interpolate between different hues.
def wheel(pos):
if pos < 85:
return Adafruit_WS2801.RGB_to_color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Adafruit_WS2801.RGB_to_color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Adafruit_WS2801.RGB_to_color(0, pos * 3, 255 - pos * 3)
# Define rainbow cycle function to do a cycle of all hues.
def rainbow_cycle_successive(pixels, wait=0.1):
for i in range(pixels.count()):
# tricky math! we use each pixel as a fraction of the full 96-color wheel
# (thats the i / strip.numPixels() part)
# Then add in j which makes the colors go around per pixel
# the % 96 is to make the wheel cycle around
pixels.set_pixel(i, wheel(((i * 256 // pixels.count())) % 256) )
pixels.show()
if wait > 0:
time.sleep(wait)
def rainbow_cycle(pixels, wait=0.005):
for j in range(256): # one cycle of all 256 colors in the wheel
for i in range(pixels.count()):
pixels.set_pixel(i, wheel(((i * 256 // pixels.count()) + j) % 256) )
pixels.show()
if wait > 0:
time.sleep(wait)
def rainbow_colors(pixels, wait=0.05):
for j in range(256): # one cycle of all 256 colors in the wheel
for i in range(pixels.count()):
pixels.set_pixel(i, wheel(((256 // pixels.count() + j)) % 256) )
pixels.show()
if wait > 0:
time.sleep(wait)
def brightness_decrease(pixels, wait=0.01, step=1):
for j in range(int(256 // step)):
for i in range(pixels.count()):
r, g, b = pixels.get_pixel_rgb(i)
r = int(max(0, r - step))
g = int(max(0, g - step))
b = int(max(0, b - step))
pixels.set_pixel(i, Adafruit_WS2801.RGB_to_color( r, g, b ))
pixels.show()
if wait > 0:
time.sleep(wait)
def blink_color(pixels, blink_times=5, wait=0.5, color=(255,0,0)):
for i in range(blink_times):
# blink two times, then wait
pixels.clear()
for j in range(2):
for k in range(pixels.count()):
pixels.set_pixel(k, Adafruit_WS2801.RGB_to_color( color[0], color[1], color[2] ))
pixels.show()
time.sleep(0.08)
pixels.clear()
pixels.show()
time.sleep(0.08)
time.sleep(wait)
def appear_from_back(pixels, color=(255, 0, 0)):
pos = 0
for i in range(pixels.count()):
for j in reversed(range(i, pixels.count())):
pixels.clear()
# first set all pixels at the begin
for k in range(i):
pixels.set_pixel(k, Adafruit_WS2801.RGB_to_color( color[0], color[1], color[2] ))
# set then the pixel at position j
pixels.set_pixel(j, Adafruit_WS2801.RGB_to_color( color[0], color[1], color[2] ))
pixels.show()
time.sleep(0.02)
def test_led(pixels):
for i in range(pixels.count()):
pixels.set_pixel(i, Adafruit_WS2801.RGB_to_color( 255,255,255 ))
pixels.show()
if __name__ == "__main__":
# Clear all the pixels to turn them off.
pixels.clear()
pixels.show() # Make sure to call show() after changing any pixels!
#rainbow_cycle_successive(pixels, wait=1.0)
#rainbow_cycle(pixels, wait=0.01)
#brightness_decrease(pixels)
#appear_from_back(pixels)
#for i in range(3):
#blink_color(pixels, blink_times = 1, color=(255, 0, 0))
#blink_color(pixels, blink_times = 1, color=(0, 255, 0))
#blink_color(pixels, blink_times = 1, color=(0, 0, 255))
#rainbow_colors(pixels)
#brightness_decrease(pixels)
test_led(pixels)
| [
"volker.weber@swarco.de"
] | volker.weber@swarco.de |
2159710ed19f6e6b65f4a46e2509a0dbadb81e3b | 119c716206804aef3eb99c5ca24e8e16eed3473a | /openaddr/tests/coverage.py | 8693f0645fb3ed0e3777f5f06456c426eb04111b | [
"ISC"
] | permissive | enterstudio/machine-1 | d190db7cd8fceb409e25232b65507ec21c1b7009 | 43d4d3b41e1ad3410c2442b8220e6e1a9fe2255f | refs/heads/master | 2021-08-30T08:06:05.055323 | 2017-12-16T23:30:56 | 2017-12-16T23:30:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,950 | py | import unittest
import unittest.mock
import os
import psycopg2
from httmock import HTTMock, response
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgres:///hooked_on_sources')
from ..ci import recreate_db
from ..ci.coverage import calculate
class TestCalculate (unittest.TestCase):
def setUp(self):
'''
'''
recreate_db.recreate(DATABASE_URL)
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as db:
db.execute("insert into cb_2013_us_state_20m (gid, name, usps_code, geom) values (1, 'Kansas', 'KS', ST_SetSRID('MULTIPOLYGON(((-102.0472 40.0033, -94.6143 40.0033, -94.6143 36.9985, -102.0472 36.9985, -102.0472 40.0033)))'::geometry, 4326))")
db.execute("insert into ne_50m_admin_0_countries (gid, name, name_long, iso_a2, iso_a3, geom) values (1, 'Null Is.', 'Null Island', 'XX', 'XXX', '0106000020E610000002000000010300000001000000270000008EB1135E82533FBF691D554D1075EF3E90BE49D3A0683EBF664CC11A67D3F13EA208A9DBD9573EBF3FABCC94D6DFE23EEC3026FDBD143EBF8DEDB5A0F7C6E03E3659A31EA2D13DBF664CC11A67D3F13E13D21A834E083DBF54E41071732AE93EDCF3FC69A33A3DBF8DEDB5A0F7C6D03E0188BB7A15193DBF0000000000000000CCB6D3D688603CBF54E41071732AC93E0395F1EF332E3CBFF168E388B5F8E4BE7044F7AC6BB43CBF05A227655243EBBE7044F7AC6BB43CBF8DEDB5A0F7C6F0BE2829B000A60C3CBF664CC11A67D3F1BE16DF50F86C1D3CBF180AD80E46EC03BF7044F7AC6BB43CBF180AD80E46EC03BFA72215C616823CBF3677F4BF5C8B06BFEE3D5C72DC293DBF05A2276552430BBFA515DF50F86C3DBFC093162EABB009BF3659A31EA2D13DBF2D431CEBE2360ABF3659A31EA2D13DBF240F441669E20DBF344C6DA983BC3EBF240F441669E20DBF344C6DA983BC3EBFC093162EABB009BFD7D9907F66103FBFC093162EABB009BF691D554D10753FBF4BB0389CF9D50CBF61C1FD80070640BFE7340BB43BA408BFD5CC5A0A48FB3FBFA226FA7C941107BF213EB0E3BF4040BF664CC11A67D301BF4F779E78CE1640BFB75F3E59315CFDBE4F779E78CE1640BF05A227655243FBBEFCA9F1D24D6240BF54E41071732AF9BE213EB0E3BF4040BFF168E388B5F8E4BE46D26EF4311F40BFF168E388B5F8E4BE46D26EF4311F40BF54E41071732AD9BE2AE3DF675C3840BF00000000000000801FF5D72B2CB83FBF3FABCC94D6DFE23E56D3F544D7853FBF54E41071732AD93EA0FB7266BB423FBFB75F3E59315CDD3EA0FB7266BB423FBFA226FA7C9411E73E8EB1135E82533FBF691D554D1075EF3E010300000003000000C7010000C9C7EE022505163F7BA35698BED7303F2829B000A60C1C3F7689EAAD81AD323F1FF5D72B2CB81F3F41B8020AF5F4313F86C613419C87233F0ADAE4F04927323FA019C40776FC273FD2FBC6D79E59323F7D923B6C2233273F7689EAAD81AD323F9E0C8E9257E7283F613255302AA9333FE527D53E1D8F293FAB5AD2510E66333F99F221A81ABD2A3F3D9E961FB8CA333F72512D228AC92B3F2A5437177FDB333F261C7A8B87F72C3F3D9E961FB8CA333F240F441669E22D3FE02BBAF59A1E343FFA60191BBAD92F3FF5824F73F222333F8EB1135E82532F3F3FABCC94D6DF323FFA60191BBAD92F3FF78F85E81038323FFA60191BBAD92F3FE7525C55F65D313F0EF450DB8651303F1F317A6EA12B313FB345D26EF4312F3FD5CC5A0A48FB2F3F6B2A8BC22E8A2E3F46D26EF4311F303F92CB7F48BF7D2D3F4489963C9E962F3FBC79AA436E862B3F691D554D10752F3FE31A9FC9FE792A3F1FF5D72B2CB82F3FC093162EABB0293FD7D9907F66102F3FA019C40776FC273F33880FECF82F303F315D88D51F61283FE95F92CA1473303F6859F78F85E8203F46D26EF4311F303F664CC11A67D3213F581CCEFC6A0E303FF5824F73F222233F6B2A8BC22E8A2E3F3FABCC94D6DF223F48A30227DBC02D3FAB5AD2510E66233F92CB7F48BF7D2D3F3D9E961FB8CA233F95D8B5BDDD922C3FA94D9CDCEF50243F95D8B5BDDD922C3FF168E388B5F8243FDCF3FC69A33A2D3FEC4E779E78CE263FB96C74CE4F712C3FEA4141295AB9273F261C7A8B87F72C3F56F146E6913F283F261C7A8B87F72C3F0CC9C9C4AD82283FDE0033DFC14F2C3F97E5EB32FCA72B3FB96C74CE4F712C3FDE0033DFC14F2C3F4DBD6E1118EB2B3F240F441669E22D3FBE86E0B88C9B2A3F8EB1135E82532F3FBE86E0B88C9B2A3F213EB0E3BF40303F52D7DAFB54152A3F6859F78F85E8303F52D7DAFB54152A3F560F98874CF9303FC093162EABB0293F6859F78F85E8303F9E0C8E9257E7283F317BD976DA1A313F56F146E6913F283F1F317A6EA12B313F13F06B2409C2253F6859F78F85E8303F3A9160AA99B5243F8EB1135E82532F3F3A9160AA99B5243FB345D26EF4312F3F3FABCC94D6DF223F1FF5D72B2CB82F3FAE6708C72C7B223F46D26EF4311F303F1A170E846401233FE95F92CA1473303F3FABCC94D6DF223FC5CBD3B9A294303FF78F85E81038223F7BA35698BED7303FAE6708C72C7B223F1F317A6EA12B313FF78F85E81038223F6859F78F85E8303FFA9CBB5D2F4D213F560F98874CF9303FB28174B169A5203FE7525C55F65D313FFCA9F1D24D62203F8BE07F2BD9B1313F1FF5D72B2CB81F3F41B8020AF5F4313F213EB0E3BF40203FC0B167CF656A323FFCA9F1D24D62203F7689EAAD81AD323F691D554D10751F3FF5824F73F222333F691D554D10751F3FAB5AD2510E66333F90BE49D3A0681E3FAB5AD2510E66333F2829B000A60C1C3F4FE8F527F1B9333F2829B000A60C1C3F180AD80E46EC333F4BB0389CF9D51C3FBB97FBE42840343F4BB0389CF9D51C3F4DDBBFB2D2A4343F4FCAA48636001B3F3A9160AA99B5343F9E0C8E9257E7183FB98AC56F0A2B353FC5AD8218E8DA173F5D18E945ED7E353FE7340BB43BA4183F01A60C1CD0D2353FC5AD8218E8DA173FB77D8FFAEB15363F3677F4BF5C8B163F3677F4BF5C8B363F5D18E945ED7E153FEC4E779E78CE363F5D18E945ED7E153FC7BAB88D06F0363F3A9160AA99B5143FB5705985CD00373FAB5AD2510E66133F58FE7C5BB054373FF5824F73F222133F58FE7C5BB054373FCEE15AED612F143F21205F420587373FCEE15AED612F143FD7F7E12021CA373F3FABCC94D6DF123FD7F7E12021CA373FD2FBC6D79E59123F1F1329CDE671383FD2FBC6D79E59123FB056ED9A90D6383F664CC11A67D3113FB056ED9A90D6383F8DEDB5A0F7C6103F693BA6EECA2E383F05A2276552430B3F315D88D51F61383FC093162EABB0093F315D88D51F61383F3677F4BF5C8B063F7B8505F7031E383F180AD80E46EC033FD7F7E12021CA373F664CC11A67D3F13E6B48DC63E943373F180AD80E46ECF33EFE98D6A6B1BD363F8DEDB5A0F7C6E03EDC114E0B5EF4353F691D554D1075EF3E263ACB2C42B1353F54E41071732AE93E263ACB2C42B1353F54E41071732AD93E94F6065F984C353F54E41071732AD93E4ACE893DB48F353F8DEDB5A0F7C6E0BECCD42478431A353FA226FA7C9411E7BECCD42478431A353F180AD80E46ECF3BE15FDA19927D7343FF168E388B5F8F4BE15FDA19927D7343FB75F3E59315CFDBE84B9DDCB7D72343F90BE49D3A068FEBEF37519FED30D343F54E41071732AF9BE2A5437177FDB333F2D431CEBE236FABE05C078060DFD333FAB5AD2510E6603BF99107349D576333F180AD80E46EC03BF99107349D576333FA226FA7C941107BF7689EAAD81AD323FA226FA7C941107BF7689EAAD81AD323F54E41071732A09BFE54526E0D748323FC093162EABB009BF1C2444F98216323F4BB0389CF9D50CBFF78F85E81038323F691D554D10750FBFF78F85E81038323FB0743E3C4B9011BFC0B167CF656A323FFA9CBB5D2F4D11BF7689EAAD81AD323FD2FBC6D79E5912BFBEA4315A4755333F8DEDB5A0F7C610BF86C613419C87333F89D349B6BA9C12BF180AD80E46EC333FD2FBC6D79E5912BFF37519FED30D343FAB5AD2510E6613BF180AD80E46EC333F3A9160AA99B514BFCEE15AED612F343F3A9160AA99B514BFA94D9CDCEF50343F315D88D51F6118BF05C078060DFD333FE7340BB43BA418BF05C078060DFD333F766B990CC7F319BF86C613419C87333FBC79AA436E861BBF86C613419C87333F4BB0389CF9D51CBF613255302AA9333F4BB0389CF9D51CBF180AD80E46EC333F72512D228AC91BBFA94D9CDCEF50343F95D8B5BDDD921CBFF37519FED30D343F90BE49D3A0681EBFA94D9CDCEF50343FFCA9F1D24D6220BF97033DD4B661343F6859F78F85E820BFBB97FBE42840343FD508FD4CBD6E21BF4DDBBFB2D2A4343FD2FBC6D79E5922BFF168E388B5F8343FD2FBC6D79E5922BFCCD42478431A353F3FABCC94D6DF22BFF168E388B5F8343F86C613419C8723BF94F6065F984C353F5F251FBB0B9424BFC9C7EE022505363F5F251FBB0B9424BFB77D8FFAEB15363FF168E388B5F824BF01A60C1CD0D2353F38842A357BA025BF01A60C1CD0D2353FA43330F2B22626BFC9C7EE022505363F11E335AFEAAC26BFDC114E0B5EF4353FA226FA7C941127BF13F06B2409C2353F58FE7C5BB05427BF13F06B2409C2353FC5AD8218E8DA27BF4ACE893DB48F353FA019C40776FC27BF5D18E945ED7E353F0CC9C9C4AD8228BFEE5BAD1397E3353F2F505260014C29BFEE5BAD1397E3353F745E6397A8DE2ABF263ACB2C42B1353F4FCAA48636002BBF38842A357BA0353F7044F7AC6BB42CBF03B34291EEE7343F4BB0389CF9D52CBFF168E388B5F8343FDAE6C6F484252EBF284701A260C6343FB55208E412472EBF3A9160AA99B5343FD7D9907F66102FBF84B9DDCB7D72343FD7D9907F66102FBF97033DD4B661343F0EF450DB865130BFCEE15AED612F343FA03715A930B630BF3D9E961FB8CA333FC5CBD3B9A29430BF99107349D576333F560F98874CF930BF643F8BA548BE323F43C5387F130A31BFAE6708C72C7B323FC5CBD3B9A29430BF41B8020AF5F4313FC5CBD3B9A29430BF8BE07F2BD9B1313F46D26EF4311F30BF317BD976DA1A313FA03715A930B630BF6859F78F85E8303FB28174B169A530BF8DEDB5A0F7C6303F213EB0E3BF4030BFC5CBD3B9A294303F213EB0E3BF4030BFC5CBD3B9A294303FB28174B169A530BF0EF450DB8651303F317BD976DA1A31BFFCA9F1D24D62303FB0743E3C4B9031BF581CCEFC6A0E303F664CC11A67D331BFB345D26EF4312F3F41B8020AF5F431BFD7D9907F66102F3FD2FBC6D79E5932BF6B2A8BC22E8A2E3FD2FBC6D79E5932BF240F441669E22D3F1C2444F9821632BF4BB0389CF9D52C3FE54526E0D74832BF7044F7AC6BB42C3F78962023A0C231BF0395F1EF332E2C3F9D2ADF3312A131BF0395F1EF332E2C3F41B8020AF5F431BFE31A9FC9FE792A3F41B8020AF5F431BF766B990CC7F3293FE54526E0D74832BF54E41071732A293F1C2444F9821632BFE7340BB43BA4283FE54526E0D74832BF7B8505F7031E283F2F6EA301BC0532BFEA4141295AB9273F540262122EE431BFEA4141295AB9273FB0743E3C4B9031BFEA4141295AB9273F43C5387F130A31BFEA4141295AB9273F8DEDB5A0F7C630BFEC4E779E78CE263F7BA35698BED730BF5D18E945ED7E253F33880FECF82F30BF5D18E945ED7E253F1FF5D72B2CB82FBF15FDA19927D7243FD5CC5A0A48FB2FBF84B9DDCB7D72243FD5CC5A0A48FB2FBFA94D9CDCEF50243FB345D26EF4312FBF180AD80E46EC233FD7D9907F66102FBF3D9E961FB8CA233FB55208E412472EBF3A9160AA99B5243F240F441669E22DBF3A9160AA99B5243F4BB0389CF9D52CBFA7406667D13B253F4DBD6E1118EB2BBFCCD42478431A253FE10D6954E0642BBFCCD42478431A253F745E6397A8DE2ABF180AD80E46EC233FE10D6954E0642BBF3D9E961FB8CA233F99F221A81ABD2ABF1A170E846401233F2A36E675C4212BBF643F8BA548BE223F4FCAA48636002BBFD2FBC6D79E59223FE527D53E1D8F29BF664CC11A67D3213FE527D53E1D8F29BF1F317A6EA12B213FE7340BB43BA428BF1C2444F98216223F336ABE4A3E7627BFF78F85E81038223F809F71E1404826BF89D349B6BA9C223FC9C7EE02250526BF89D349B6BA9C223F38842A357BA025BF8BE07F2BD9B1213FC9C7EE02250526BFD508FD4CBD6E213FEE5BAD1397E325BF8DEDB5A0F7C6203F5B0BB3D0CE6926BF1FF5D72B2CB81F3F82ACA7565F5D25BF240F441669E21D3F38842A357BA025BF240F441669E21D3F15FDA19927D724BF72512D228AC91B3FF168E388B5F824BF72512D228AC91B3FA94D9CDCEF5024BF05A2276552431B3F3D9E961FB8CA23BFBC79AA436E861B3FD0EE9062804423BFDE0033DFC14F1C3F3FABCC94D6DF22BFDE0033DFC14F1C3FF78F85E8103822BFE31A9FC9FE791A3F89D349B6BA9C22BF2D431CEBE2361A3F5F251FBB0B9424BFE31A9FC9FE791A3FA7406667D13B25BFC093162EABB0193F38842A357BA025BF315D88D51F61183F82ACA7565F5D25BFC5AD8218E8DA173F3A9160AA99B524BF7B8505F7031E083F3A9160AA99B524BF7B8505F7031E083F82ACA7565F5D25BF05A2276552430B3F38842A357BA025BFFA9CBB5D2F4D113FA43330F2B22626BF84B9DDCB7D72143FA226FA7C941127BF58FE7C5BB054173F58FE7C5BB05427BF54E41071732A193F0FD6FF39CC9727BF0ABC934F8F6D193F7B8505F7031E28BF0FD6FF39CC97173F7978CF81E50829BF13F06B2409C2153F2F505260014C29BF3FABCC94D6DF123F99F221A81ABD2ABF43C5387F130A113FE10D6954E0642BBFC093162EABB0093FDE0033DFC14F2CBF213EB0E3BF40003FDCF3FC69A33A2DBF54E41071732AC93E240F441669E22DBFA226FA7C9411F7BEDAE6C6F484252EBF7B8505F7031E08BFDAE6C6F484252EBFCEE15AED612F14BFDAE6C6F484252EBFA7406667D13B15BFBE86E0B88C9B2ABFA7406667D13B15BF7B8505F7031E28BF13F06B2409C215BF315D88D51F6128BF0FD6FF39CC9717BF56F146E6913F28BFE7340BB43BA418BFE7340BB43BA428BFE7340BB43BA418BF2F505260014C29BF0FD6FF39CC9717BF9BFF571D39D229BFC5AD8218E8DA17BF2D431CEBE2362ABF9E0C8E9257E718BF08AF5DDA70582ABF0ABC934F8F6D19BFE10D6954E0642BBF2D431CEBE2361ABF0395F1EF332E2CBF2829B000A60C1CBF4DBD6E1118EB2BBF95D8B5BDDD921CBF95D8B5BDDD922CBF4696CCB1BCAB1EBF7044F7AC6BB42CBFD5CC5A0A48FB1FBF0395F1EF332E2CBFB28174B169A520BFDE0033DFC14F2CBFD508FD4CBD6E21BF7044F7AC6BB42CBF41B8020AF5F421BFB96C74CE4F712CBF1C2444F9821622BF4DBD6E1118EB2BBF3FABCC94D6DF22BF4DBD6E1118EB2BBF1A170E84640123BFE10D6954E0642BBF613255302AA923BF2A36E675C4212BBFCEE15AED612F24BFBC79AA436E862BBF5F251FBB0B9424BF4FCAA48636002BBF82ACA7565F5D25BF4FCAA48636002BBF13F06B2409C225BFBC79AA436E862BBF5B0BB3D0CE6926BFBC79AA436E862BBF3677F4BF5C8B26BF99F221A81ABD2ABF11E335AFEAAC26BF2D431CEBE2362ABF7D923B6C223327BFE527D53E1D8F29BFA019C40776FC27BFC093162EABB029BF0CC9C9C4AD8228BF54E41071732A29BF54E41071732A29BF54E41071732A29BFE527D53E1D8F29BFC3A04CA3C9C528BFE527D53E1D8F29BFA019C40776FC27BF766B990CC7F329BFC5AD8218E8DA27BF766B990CC7F329BFC7BAB88D06F026BFE527D53E1D8F29BFEE5BAD1397E325BF766B990CC7F329BF13F06B2409C225BF08AF5DDA70582ABFC7BAB88D06F026BF4FCAA48636002BBFA226FA7C941127BF05A2276552432BBF336ABE4A3E7627BF97E5EB32FCA72BBF0FD6FF39CC9727BF0395F1EF332E2CBF7D923B6C223327BF261C7A8B87F72CBFA226FA7C941127BF0188BB7A15192DBF809F71E1404826BF6D37C1374D9F2DBFC9C7EE02250526BF6D37C1374D9F2DBF84B9DDCB7D7224BFDAE6C6F484252EBF84B9DDCB7D7224BFDAE6C6F484252EBF613255302AA923BFB345D26EF4312FBFAE6708C72C7B22BFB345D26EF4312FBFB0743E3C4B9021BFFC6D4F90D8EE2EBF1F317A6EA12B21BFB345D26EF4312FBF6859F78F85E820BFB345D26EF4312FBF46D26EF4311F20BFFA60191BBAD92FBF46D26EF4311F20BF46D26EF4311F30BF43C5387F130A21BFFCA9F1D24D6230BF43C5387F130A21BFD71533C2DB8330BFB28174B169A520BF8DEDB5A0F7C630BFB28174B169A520BF6859F78F85E830BF46D26EF4311F20BF7BA35698BED730BFDAE6C6F484251EBFFA9CBB5D2F4D31BF95D8B5BDDD921CBF78962023A0C231BF240F441669E21DBF41B8020AF5F431BF0188BB7A15191DBF540262122EE431BFBC79AA436E861BBF0ADAE4F0492732BF4FCAA48636001BBF1C2444F9821632BF766B990CC7F319BF643F8BA548BE32BF2D431CEBE2361ABF1A170E84640133BF05A2276552431BBFBEA4315A475533BFC093162EABB019BFBEA4315A475533BF9E0C8E9257E718BF613255302AA933BFE7340BB43BA418BFBB97FBE4284034BF0FD6FF39CC9717BF726F7EC3448334BF54E41071732A19BFB98AC56F0A2B35BF315D88D51F6118BF6F62484E266E35BFA226FA7C941117BFC9C7EE02250536BF315D88D51F6118BF92E9D0E9793736BF7B8505F7031E18BF92E9D0E9793736BF13F06B2409C215BF48C153C8957A36BF5D18E945ED7E15BF48C153C8957A36BF180AD80E46EC13BF5B0BB3D0CE6936BFAB5AD2510E6613BF5B0BB3D0CE6936BF89D349B6BA9C12BFFE98D6A6B1BD36BF89D349B6BA9C12BF232D95B7239C36BF90BE49D3A0680EBFB5705985CD0037BF99F221A81ABD0ABFC7BAB88D06F036BF3677F4BF5C8B06BF7D923B6C223337BFF168E388B5F804BF21205F42058737BF3677F4BF5C8B06BFEA4141295AB937BFAB5AD2510E6603BF7B8505F7031E38BF180AD80E46EC03BF44A7E7DD585038BFD2FBC6D79E5902BF44A7E7DD585038BF05A227655243FBBE0CC9C9C4AD8238BF2D431CEBE236FABEB056ED9A90D638BF2D431CEBE236FABEC3A04CA3C9C538BF05A227655243EBBE419AB1683A3B39BFA226FA7C9411E7BE419AB1683A3B39BF00000000000000807978CF81E50839BF8DEDB5A0F7C6C03E7978CF81E50839BF54E41071732AE93EF8713447567E39BF54E41071732AE93ED3DD7536E49F39BF691D554D1075EF3EC093162EABB039BFC9C7EE022505F63E662E7079AC1939BFFA9CBB5D2F4D013F54E41071732A39BF180AD80E46EC033FFA7E6ABC749338BF05A2276552430B3F7B8505F7031E38BFDE0033DFC14F0C3F693BA6EECA2E38BF691D554D10750F3FC5AD8218E8DA37BF8DEDB5A0F7C6103F0FD6FF39CC9737BF8DEDB5A0F7C6103F90DC9A745B2237BF8DEDB5A0F7C6103F48C153C8957A36BF1C2444F98216123FA43330F2B22636BF1C2444F98216123F13F06B2409C235BFD2FBC6D79E59123F6F62484E266E35BFCEE15AED612F143F6F62484E266E35BFC9C7EE022505163F15FDA19927D734BF58FE7C5BB054173F97033DD4B66134BF0FD6FF39CC97173FF37519FED30D34BFE7340BB43BA4183F99107349D57633BFE7340BB43BA4183F86C613419C8733BF05A2276552431B3F2C616D8C9DF032BF4BB0389CF9D51C3F9B1DA9BEF38B32BF72512D228AC91B3F2F6EA301BC0532BF4FCAA48636001B3F1C2444F9821632BF90BE49D3A0681E3F7689EAAD81AD32BF4696CCB1BCAB1E3F7689EAAD81AD32BF213EB0E3BF40203FF78F85E8103832BF8DEDB5A0F7C6203FF78F85E8103832BF643F8BA548BE223F1C2444F9821632BF1A170E846401233F8BE07F2BD9B131BFF5824F73F222233FFA9CBB5D2F4D31BFF168E388B5F8243F1F317A6EA12B31BF38842A357BA0253FD71533C2DB8330BFA7406667D13B253F46D26EF4311F30BF11E335AFEAAC263F691D554D10752FBF3677F4BF5C8B263FFF7A8505F7032EBF0FD6FF39CC97273FDCF3FC69A33A2DBF0FD6FF39CC97273F4BB0389CF9D52CBF7978CF81E508293F0395F1EF332E2CBF2F505260014C293F97E5EB32FCA72BBFBE86E0B88C9B2A3F9BFF571D39D229BF0395F1EF332E2C3FE527D53E1D8F29BF0188BB7A15192D3FEA4141295AB927BFB96C74CE4F712C3FC5AD8218E8DA27BF72512D228AC92B3FE7340BB43BA428BFE31A9FC9FE792A3F315D88D51F6128BF9BFF571D39D2293F315D88D51F6128BF2F505260014C293FE7340BB43BA428BFA019C40776FC273FA019C40776FC27BFA019C40776FC273FA226FA7C941127BFEC4E779E78CE263F3677F4BF5C8B26BFEC4E779E78CE263F82ACA7565F5D25BF56F146E6913F283F15FDA19927D724BF315D88D51F61283FCEE15AED612F24BF9E0C8E9257E7283FAB5AD2510E6623BF7B8505F7031E283F1C2444F9821622BF7978CF81E508293FD71533C2DB8320BFC3A04CA3C9C5283FFCA9F1D24D6220BFC5AD8218E8DA273FFC6D4F90D8EE1EBFEA4141295AB9273F4696CCB1BCAB1EBF9E0C8E9257E7283FBC79AA436E861BBF7978CF81E508293FBC79AA436E861BBF97E5EB32FCA72B3FDE0033DFC14F1CBF95D8B5BDDD922C3F4FCAA48636001BBF4BB0389CF9D52C3F99F221A81ABD1ABFDAE6C6F484252E3FC5AD8218E8DA17BF21020EA14ACD2E3FEC4E779E78CE16BF6B2A8BC22E8A2E3FEC4E779E78CE16BF691D554D10752F3F13F06B2409C215BF581CCEFC6A0E303F613255302AA913BF33880FECF82F303F43C5387F130A11BFB345D26EF4312F3FFC6D4F90D8EE0EBFD7D9907F66102F3F90BE49D3A0680EBF4696CCB1BCAB2E3FF168E388B5F804BF6B2A8BC22E8A2E3F84B9DDCB7D7204BF4489963C9E962F3F691D554D1075FFBEFA60191BBAD92F3F691D554D1075FFBEFC6D4F90D8EE2E3F05A227655243FBBEFC6D4F90D8EE2E3F3FABCC94D6DFF2BE0188BB7A15192D3FF168E388B5F8E4BE0188BB7A15192D3FB75F3E59315CDDBEB96C74CE4F712C3F54E41071732AC9BEB96C74CE4F712C3F54E41071732AC93E97E5EB32FCA72B3FF168E388B5F8E43E0395F1EF332E2C3F54E41071732AE93E0188BB7A15192D3F664CC11A67D3F13E0188BB7A15192D3F691D554D1075FF3E6B2A8BC22E8A2E3F664CC11A67D3013F90BE49D3A0682E3F3FABCC94D6DF023F4489963C9E962F3F43C5387F130A113F8EB1135E82532F3F1C2444F98216123FFA60191BBAD92F3FD2FBC6D79E59123F213EB0E3BF40303F8DEDB5A0F7C6103F213EB0E3BF40303FDE0033DFC14F0C3FA03715A930B6303F240F441669E20D3F43C5387F130A313F240F441669E20D3F2F6EA301BC05323F691D554D10750F3F0ADAE4F04927323F8DEDB5A0F7C6103F0CE71A66683C313F1C2444F98216123F1F317A6EA12B313FF5824F73F222133F7BA35698BED7303FC9C7EE022505163F7BA35698BED7303F10000000F5824F73F22213BFC7BAB88D06F026BFFA9CBB5D2F4D11BF11E335AFEAAC26BF3677F4BF5C8B06BFEE5BAD1397E325BF664CC11A67D3F1BE5D18E945ED7E25BF8DEDB5A0F7C6F0BE5F251FBB0B9424BF240F441669E20DBF5F251FBB0B9424BF90BE49D3A0680EBF5D18E945ED7E25BF213EB0E3BF4010BF13F06B2409C225BFFA9CBB5D2F4D11BF5D18E945ED7E25BFB0743E3C4B9011BF180AD80E46EC23BF3FABCC94D6DF12BF180AD80E46EC23BF3FABCC94D6DF12BF5F251FBB0B9424BF664CC11A67D311BFF168E388B5F824BFD2FBC6D79E5912BF38842A357BA025BFD2FBC6D79E5912BF5B0BB3D0CE6926BFF5824F73F22213BFC7BAB88D06F026BF06000000B75F3E59315CDDBE5F251FBB0B9424BFB75F3E59315CDDBEA7406667D13B25BF691D554D1075EF3ECCD42478431A25BFAB5AD2510E66033FA7406667D13B25BFAB5AD2510E66033F3A9160AA99B524BFB75F3E59315CDDBE5F251FBB0B9424BF')")
db.execute("insert into ne_50m_admin_0_countries (gid, name, name_long, iso_a2, iso_a3, geom) values (2, 'USA', 'United States', 'US', 'USA', ST_SetSRID('MULTIPOLYGON(((-123.6 49.6, -65.3 49.6, -65.3 24.0, -123.6 24.0, -123.6 49.6)))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (1, 0, 0, 1, st_setsrid('polygon(( 0 0, 0 1, 1 1, 1 0, 0 0))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (2, 0, -1, 1, st_setsrid('polygon(( 0 -1, 0 0, 1 0, 1 -1, 0 -1))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (3, -1, -1, 1, st_setsrid('polygon((-1 -1, -1 0, 0 0, 0 -1, -1 -1))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (4, -1, 0, 1, st_setsrid('polygon((-1 0, -1 1, 0 1, 0 0, -1 0))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (5, -99, 39, 1, st_setsrid('polygon((-99 38, -99 39, -98 39, -98 38, -99 38))'::geometry, 4326))")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('XX', 1, 2000, 800)")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('XX', 2, 4000, 600)")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('XX', 3, 6000, 400)")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('XX', 4, 8000, 200)")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('US', 5, 17907, 9540)")
db.execute("insert into acs5yr_2015 (usps_code, box_id, population, area) values ('KS', 5, 17907, 9540)")
def test_guess_iso_a2(self):
get_iso3166 = lambda n: 'XX' if (n == 'ISO 3166') else None
get_iso3166_2 = lambda n: 'YY-YY' if (n == 'ISO 3166-2') else None
get_us_census = lambda n: '06001' if (n == 'US Census GEOID') else None
get_intl_src_path = lambda n: 'sources/xx/yy.json' if (n == 'source paths') else None
get_us_src_path = lambda n: 'sources/us/ca/oakland.json' if (n == 'source paths') else None
feature = unittest.mock.Mock()
feature.GetField = get_iso3166
self.assertEqual(calculate.guess_iso_a2(feature), 'XX')
feature.GetField = get_iso3166_2
self.assertEqual(calculate.guess_iso_a2(feature), 'YY')
feature.GetField = get_us_census
self.assertEqual(calculate.guess_iso_a2(feature), 'US')
feature.GetField = get_intl_src_path
self.assertEqual(calculate.guess_iso_a2(feature), 'XX')
feature.GetField = get_us_src_path
self.assertEqual(calculate.guess_iso_a2(feature), 'US')
def test_guess_state_abbrev(self):
get_us_census = lambda n: '06001' if (n == 'US Census GEOID') else None
get_intl_src_path = lambda n: 'sources/xx/yy.json' if (n == 'source paths') else None
get_us_src_path = lambda n: 'sources/us/ca/oakland.json' if (n == 'source paths') else None
feature = unittest.mock.Mock()
feature.GetField = get_us_census
self.assertEqual(calculate.guess_state_abbrev(feature), 'CA')
feature.GetField = get_intl_src_path
self.assertIsNone(calculate.guess_state_abbrev(feature))
feature.GetField = get_us_src_path
self.assertEqual(calculate.guess_state_abbrev(feature), 'CA')
def test_calculate(self):
def response_geojson(url, request):
if (request.method, url.hostname, url.path) == ('GET', 'results.openaddresses.io', '/index.json'):
return response(200, b'{"render_geojson_url": "http://data.openaddresses.io/render-world.geojson"}', headers={'Content-Type': 'application/json'})
if (request.method, url.hostname, url.path) == ('GET', 'data.openaddresses.io', '/render-world.geojson'):
null_geojson = '''{\n"type": "FeatureCollection",\n"features": [\n{ "type": "Feature", "properties": {"source count": 1, "name": "Null Island", "source dates": "2017-03-12 21:54:49.107291+00:00", "source paths": "sources/xx/countrywide.json", "ISO 3166": "XX", "ISO 3166-2": null, "US Census GEOID": null, "status": "good", "address count": 9990}, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -0.000478, 0.000015 ], [ -0.000464, 0.000017 ], [ -0.000463, 0.000009 ], [ -0.000459, 0.000008 ], [ -0.000455, 0.000017 ], [ -0.000443, 0.000012 ], [ -0.000446, 0.000004 ], [ -0.000444, 0.0 ], [ -0.000433, 0.000003 ], [ -0.00043, -0.00001 ], [ -0.000438, -0.000013 ], [ -0.000438, -0.000016 ], [ -0.000428, -0.000017 ], [ -0.000429, -0.000038 ], [ -0.000438, -0.000038 ], [ -0.000435, -0.000043 ], [ -0.000445, -0.000052 ], [ -0.000449, -0.000049 ], [ -0.000455, -0.00005 ], [ -0.000455, -0.000057 ], [ -0.000469, -0.000057 ], [ -0.000469, -0.000049 ], [ -0.000474, -0.000049 ], [ -0.00048, -0.000055 ], [ -0.000489, -0.000047 ], [ -0.000488, -0.000044 ], [ -0.000496, -0.000034 ], [ -0.000491, -0.000028 ], [ -0.000491, -0.000026 ], [ -0.0005, -0.000024 ], [ -0.000496, -0.00001 ], [ -0.000492, -0.00001 ], [ -0.000492, -0.000006 ], [ -0.000495, -0.0 ], [ -0.000484, 0.000009 ], [ -0.000481, 0.000006 ], [ -0.000477, 0.000007 ], [ -0.000477, 0.000011 ], [ -0.000478, 0.000015 ] ] ], [ [ [ 0.000084, 0.000257 ], [ 0.000107, 0.000285 ], [ 0.000121, 0.000274 ], [ 0.000149, 0.000277 ], [ 0.000183, 0.00028 ], [ 0.000177, 0.000285 ], [ 0.00019, 0.0003 ], [ 0.000195, 0.000296 ], [ 0.000204, 0.000302 ], [ 0.000212, 0.000303 ], [ 0.000221, 0.000302 ], [ 0.000228, 0.000307 ], [ 0.000243, 0.000292 ], [ 0.000239, 0.000288 ], [ 0.000243, 0.000278 ], [ 0.000243, 0.000265 ], [ 0.000249, 0.000262 ], [ 0.000238, 0.000244 ], [ 0.000233, 0.000246 ], [ 0.000225, 0.000241 ], [ 0.00021, 0.00024 ], [ 0.000202, 0.000242 ], [ 0.000196, 0.000237 ], [ 0.000183, 0.000247 ], [ 0.000186, 0.000251 ], [ 0.000129, 0.000246 ], [ 0.000136, 0.000245 ], [ 0.000146, 0.000233 ], [ 0.000144, 0.000227 ], [ 0.000148, 0.000225 ], [ 0.000151, 0.000218 ], [ 0.000155, 0.000218 ], [ 0.00016, 0.000223 ], [ 0.000174, 0.000217 ], [ 0.000181, 0.000221 ], [ 0.000185, 0.000221 ], [ 0.000187, 0.000216 ], [ 0.000211, 0.000217 ], [ 0.000216, 0.000213 ], [ 0.000228, 0.000203 ], [ 0.000239, 0.000203 ], [ 0.000248, 0.000199 ], [ 0.000258, 0.000199 ], [ 0.000259, 0.000196 ], [ 0.000258, 0.00019 ], [ 0.000261, 0.000185 ], [ 0.000262, 0.000166 ], [ 0.000258, 0.000158 ], [ 0.000239, 0.000158 ], [ 0.000238, 0.000144 ], [ 0.000242, 0.000141 ], [ 0.000246, 0.000145 ], [ 0.000251, 0.000144 ], [ 0.000253, 0.000139 ], [ 0.000257, 0.000141 ], [ 0.000262, 0.000139 ], [ 0.000258, 0.000132 ], [ 0.000259, 0.000127 ], [ 0.000265, 0.000125 ], [ 0.00027, 0.000121 ], [ 0.000274, 0.000124 ], [ 0.000281, 0.000125 ], [ 0.000285, 0.00012 ], [ 0.000292, 0.00012 ], [ 0.000296, 0.000116 ], [ 0.000296, 0.000107 ], [ 0.000301, 0.000107 ], [ 0.000304, 0.00011 ], [ 0.000309, 0.00011 ], [ 0.000315, 0.000103 ], [ 0.000316, 0.000095 ], [ 0.000323, 0.000091 ], [ 0.000328, 0.000094 ], [ 0.000333, 0.000091 ], [ 0.000337, 0.000086 ], [ 0.000344, 0.000082 ], [ 0.000348, 0.000082 ], [ 0.00035, 0.000079 ], [ 0.000351, 0.000074 ], [ 0.000356, 0.000073 ], [ 0.000356, 0.000077 ], [ 0.000359, 0.000077 ], [ 0.000363, 0.000072 ], [ 0.000363, 0.00007 ], [ 0.000373, 0.00007 ], [ 0.000379, 0.000068 ], [ 0.000379, 0.000064 ], [ 0.000369, 0.000052 ], [ 0.000372, 0.000049 ], [ 0.000372, 0.000043 ], [ 0.000368, 0.000038 ], [ 0.000363, 0.000017 ], [ 0.000355, 0.000019 ], [ 0.000347, 0.000008 ], [ 0.000335, 0.000015 ], [ 0.000331, 0.000012 ], [ 0.000331, 0.000006 ], [ 0.000325, 0.000006 ], [ 0.000329, -0.000008 ], [ 0.000322, -0.000011 ], [ 0.000322, -0.000019 ], [ 0.000318, -0.00002 ], [ 0.000318, -0.000028 ], [ 0.000312, -0.000029 ], [ 0.000306, -0.000024 ], [ 0.000303, -0.000025 ], [ 0.000305, -0.000037 ], [ 0.000297, -0.000038 ], [ 0.000297, -0.000044 ], [ 0.000285, -0.000044 ], [ 0.000285, -0.000048 ], [ 0.000279, -0.000049 ], [ 0.000276, -0.000055 ], [ 0.000278, -0.00006 ], [ 0.000278, -0.000067 ], [ 0.000281, -0.000066 ], [ 0.000285, -0.00007 ], [ 0.000295, -0.000064 ], [ 0.000298, -0.000071 ], [ 0.000304, -0.00007 ], [ 0.000306, -0.000074 ], [ 0.000304, -0.000079 ], [ 0.000308, -0.000079 ], [ 0.00031, -0.000093 ], [ 0.000305, -0.000094 ], [ 0.000305, -0.000099 ], [ 0.000298, -0.000105 ], [ 0.000298, -0.00011 ], [ 0.0003, -0.00011 ], [ 0.000304, -0.000106 ], [ 0.00031, -0.000109 ], [ 0.000306, -0.000116 ], [ 0.00031, -0.000125 ], [ 0.000311, -0.000129 ], [ 0.000309, -0.000133 ], [ 0.000315, -0.00014 ], [ 0.00032, -0.00014 ], [ 0.000322, -0.000144 ], [ 0.00032, -0.000149 ], [ 0.000325, -0.000157 ], [ 0.000336, -0.000157 ], [ 0.000337, -0.00016 ], [ 0.000333, -0.000165 ], [ 0.000333, -0.000169 ], [ 0.000336, -0.000173 ], [ 0.000335, -0.000176 ], [ 0.000332, -0.000178 ], [ 0.000332, -0.000182 ], [ 0.000329, -0.000183 ], [ 0.000328, -0.000187 ], [ 0.000334, -0.000193 ], [ 0.000334, -0.000205 ], [ 0.000331, -0.000206 ], [ 0.00033, -0.000219 ], [ 0.000319, -0.00022 ], [ 0.00032, -0.00023 ], [ 0.000317, -0.000231 ], [ 0.000316, -0.000237 ], [ 0.000312, -0.000237 ], [ 0.000311, -0.000249 ], [ 0.000308, -0.000255 ], [ 0.000302, -0.000253 ], [ 0.000297, -0.000259 ], [ 0.000286, -0.00026 ], [ 0.000282, -0.000253 ], [ 0.000274, -0.000253 ], [ 0.00027, -0.000246 ], [ 0.000261, -0.000255 ], [ 0.000258, -0.000254 ], [ 0.000256, -0.000248 ], [ 0.000253, -0.000248 ], [ 0.000253, -0.000254 ], [ 0.000249, -0.000261 ], [ 0.00025, -0.000268 ], [ 0.000245, -0.000272 ], [ 0.000238, -0.000274 ], [ 0.000237, -0.00028 ], [ 0.000233, -0.00028 ], [ 0.000228, -0.000276 ], [ 0.00022, -0.000279 ], [ 0.000219, -0.000271 ], [ 0.000215, -0.000269 ], [ 0.000215, -0.000274 ], [ 0.000202, -0.000274 ], [ 0.000198, -0.000279 ], [ 0.000192, -0.000276 ], [ 0.000188, -0.000279 ], [ 0.000184, -0.000275 ], [ 0.000181, -0.000273 ], [ 0.000181, -0.000268 ], [ 0.000181, -0.00026 ], [ 0.000181, -0.000256 ], [ 0.000174, -0.000257 ], [ 0.000164, -0.000247 ], [ 0.000164, -0.000242 ], [ 0.000159, -0.000244 ], [ 0.000156, -0.000244 ], [ 0.000155, -0.000238 ], [ 0.000152, -0.000237 ], [ 0.000151, -0.000231 ], [ 0.000158, -0.000228 ], [ 0.000158, -0.00022 ], [ 0.000162, -0.000213 ], [ 0.000161, -0.000209 ], [ 0.000161, -0.000205 ], [ 0.000152, -0.000209 ], [ 0.000151, -0.000204 ], [ 0.000145, -0.000207 ], [ 0.000143, -0.000206 ], [ 0.00014, -0.000195 ], [ 0.000136, -0.000195 ], [ 0.000131, -0.000188 ], [ 0.000138, -0.000179 ], [ 0.000139, -0.00017 ], [ 0.000142, -0.000168 ], [ 0.000142, -0.000165 ], [ 0.000135, -0.000168 ], [ 0.000133, -0.000167 ], [ 0.000128, -0.000171 ], [ 0.000121, -0.000163 ], [ 0.000114, -0.000165 ], [ 0.000114, -0.000159 ], [ 0.000106, -0.00016 ], [ 0.000106, -0.000155 ], [ 0.000104, -0.000151 ], [ 0.000105, -0.000147 ], [ 0.000108, -0.000144 ], [ 0.000108, -0.000139 ], [ 0.000101, -0.000142 ], [ 0.0001, -0.000157 ], [ 0.000101, -0.000162 ], [ 0.000098, -0.000165 ], [ 0.000093, -0.000163 ], [ 0.000091, -0.000158 ], [ 0.000046, -0.000158 ], [ 0.000046, -0.000163 ], [ 0.000052, -0.000165 ], [ 0.000066, -0.000169 ], [ 0.000078, -0.000176 ], [ 0.000089, -0.000178 ], [ 0.000096, -0.00018 ], [ 0.000097, -0.000184 ], [ 0.00009, -0.000191 ], [ 0.000083, -0.000193 ], [ 0.000072, -0.000204 ], [ 0.000065, -0.000209 ], [ 0.000049, -0.000216 ], [ 0.000031, -0.000223 ], [ 0.000003, -0.000228 ], [ -0.000022, -0.00023 ], [ -0.000046, -0.00023 ], [ -0.000077, -0.00023 ], [ -0.000081, -0.000203 ], [ -0.000081, -0.000184 ], [ -0.000083, -0.000186 ], [ -0.00009, -0.000185 ], [ -0.000094, -0.000188 ], [ -0.000094, -0.000193 ], [ -0.00009, -0.000197 ], [ -0.000091, -0.0002 ], [ -0.000095, -0.000201 ], [ -0.000097, -0.000209 ], [ -0.0001, -0.000215 ], [ -0.000107, -0.000213 ], [ -0.000109, -0.000218 ], [ -0.000117, -0.000219 ], [ -0.000122, -0.000215 ], [ -0.000127, -0.000216 ], [ -0.000133, -0.000219 ], [ -0.000137, -0.000217 ], [ -0.000138, -0.000213 ], [ -0.000144, -0.000213 ], [ -0.000145, -0.000209 ], [ -0.00015, -0.000207 ], [ -0.000154, -0.00021 ], [ -0.000157, -0.000206 ], [ -0.000163, -0.000206 ], [ -0.000166, -0.00021 ], [ -0.000171, -0.00021 ], [ -0.000172, -0.000204 ], [ -0.000173, -0.0002 ], [ -0.000177, -0.000195 ], [ -0.000183, -0.000196 ], [ -0.000187, -0.000192 ], [ -0.000192, -0.000192 ], [ -0.000195, -0.000189 ], [ -0.000195, -0.000183 ], [ -0.000198, -0.000182 ], [ -0.000198, -0.000175 ], [ -0.000195, -0.000167 ], [ -0.000198, -0.000166 ], [ -0.000201, -0.000175 ], [ -0.000206, -0.000176 ], [ -0.000208, -0.000179 ], [ -0.000211, -0.00018 ], [ -0.000215, -0.000177 ], [ -0.000221, -0.000176 ], [ -0.000222, -0.00017 ], [ -0.000226, -0.000168 ], [ -0.000226, -0.000156 ], [ -0.00023, -0.000156 ], [ -0.00023, -0.00015 ], [ -0.000238, -0.000141 ], [ -0.000238, -0.000134 ], [ -0.000236, -0.000131 ], [ -0.000238, -0.000129 ], [ -0.000238, -0.000123 ], [ -0.000243, -0.000123 ], [ -0.000246, -0.00013 ], [ -0.00025, -0.00013 ], [ -0.000252, -0.000127 ], [ -0.000256, -0.000127 ], [ -0.000258, -0.000123 ], [ -0.000257, -0.000115 ], [ -0.000264, -0.000109 ], [ -0.000271, -0.000114 ], [ -0.000274, -0.000111 ], [ -0.000273, -0.000105 ], [ -0.000277, -0.000103 ], [ -0.000276, -0.000099 ], [ -0.000286, -0.0001 ], [ -0.00029, -0.000104 ], [ -0.000295, -0.000098 ], [ -0.000295, -0.000095 ], [ -0.0003, -0.000094 ], [ -0.000309, -0.00009 ], [ -0.000313, -0.000096 ], [ -0.000323, -0.000093 ], [ -0.000327, -0.000088 ], [ -0.000336, -0.000093 ], [ -0.000339, -0.000092 ], [ -0.000339, -0.000083 ], [ -0.000343, -0.000082 ], [ -0.000343, -0.000076 ], [ -0.000342, -0.000074 ], [ -0.000342, -0.000071 ], [ -0.000347, -0.000071 ], [ -0.000345, -0.000058 ], [ -0.000351, -0.000051 ], [ -0.00035, -0.000043 ], [ -0.000354, -0.00004 ], [ -0.000359, -0.000043 ], [ -0.000362, -0.000037 ], [ -0.000368, -0.000038 ], [ -0.000371, -0.000035 ], [ -0.000371, -0.000026 ], [ -0.000374, -0.000025 ], [ -0.000379, -0.000025 ], [ -0.000378, -0.000013 ], [ -0.000385, -0.000011 ], [ -0.000385, -0.0 ], [ -0.000382, 0.000002 ], [ -0.000382, 0.000012 ], [ -0.000389, 0.000012 ], [ -0.000391, 0.000015 ], [ -0.000392, 0.000021 ], [ -0.000383, 0.000033 ], [ -0.000384, 0.000038 ], [ -0.000375, 0.000052 ], [ -0.000368, 0.000054 ], [ -0.000369, 0.00006 ], [ -0.000364, 0.000064 ], [ -0.00036, 0.000064 ], [ -0.000353, 0.000064 ], [ -0.000343, 0.000069 ], [ -0.000338, 0.000069 ], [ -0.000332, 0.00007 ], [ -0.000327, 0.000077 ], [ -0.000327, 0.000084 ], [ -0.000318, 0.000089 ], [ -0.000311, 0.00009 ], [ -0.000306, 0.000094 ], [ -0.000297, 0.000094 ], [ -0.000298, 0.000104 ], [ -0.000289, 0.00011 ], [ -0.000283, 0.000106 ], [ -0.000275, 0.000103 ], [ -0.000276, 0.000116 ], [ -0.000285, 0.000117 ], [ -0.000285, 0.000124 ], [ -0.000278, 0.000128 ], [ -0.000278, 0.000143 ], [ -0.000276, 0.000145 ], [ -0.00027, 0.000146 ], [ -0.000264, 0.00016 ], [ -0.000262, 0.000165 ], [ -0.000252, 0.000162 ], [ -0.000246, 0.000173 ], [ -0.00024, 0.000172 ], [ -0.000229, 0.00018 ], [ -0.000223, 0.00018 ], [ -0.00022, 0.000191 ], [ -0.000215, 0.000193 ], [ -0.000211, 0.000203 ], [ -0.000197, 0.000215 ], [ -0.000195, 0.000222 ], [ -0.000181, 0.000217 ], [ -0.000182, 0.000212 ], [ -0.000188, 0.000202 ], [ -0.000186, 0.000197 ], [ -0.000186, 0.000193 ], [ -0.000188, 0.000183 ], [ -0.000183, 0.000183 ], [ -0.000176, 0.000174 ], [ -0.000172, 0.000174 ], [ -0.000163, 0.000185 ], [ -0.000159, 0.000186 ], [ -0.000154, 0.00019 ], [ -0.000148, 0.000184 ], [ -0.000138, 0.000191 ], [ -0.000126, 0.000189 ], [ -0.000125, 0.000182 ], [ -0.000118, 0.000181 ], [ -0.000117, 0.00019 ], [ -0.000105, 0.000191 ], [ -0.000105, 0.000211 ], [ -0.000108, 0.000218 ], [ -0.000103, 0.00022 ], [ -0.000102, 0.00023 ], [ -0.000091, 0.000235 ], [ -0.000087, 0.000233 ], [ -0.000087, 0.00024 ], [ -0.000083, 0.000245 ], [ -0.000075, 0.000247 ], [ -0.000065, 0.000238 ], [ -0.000059, 0.000237 ], [ -0.000058, 0.000234 ], [ -0.00004, 0.000233 ], [ -0.000039, 0.000241 ], [ -0.00003, 0.000243 ], [ -0.00003, 0.000236 ], [ -0.000026, 0.000236 ], [ -0.000018, 0.000222 ], [ -0.00001, 0.000222 ], [ -0.000007, 0.000217 ], [ -0.000003, 0.000217 ], [ 0.000003, 0.000211 ], [ 0.00001, 0.000215 ], [ 0.000012, 0.000222 ], [ 0.000017, 0.000222 ], [ 0.00003, 0.000233 ], [ 0.000034, 0.000232 ], [ 0.000036, 0.000241 ], [ 0.000065, 0.000239 ], [ 0.000069, 0.000243 ], [ 0.00007, 0.000248 ], [ 0.000064, 0.000248 ], [ 0.000054, 0.000255 ], [ 0.000057, 0.00026 ], [ 0.000057, 0.000275 ], [ 0.00006, 0.000277 ], [ 0.000064, 0.000263 ], [ 0.000069, 0.000262 ], [ 0.000073, 0.000257 ], [ 0.000084, 0.000257 ] ], [ [ -0.000073, -0.000175 ], [ -0.000066, -0.000173 ], [ -0.000043, -0.000167 ], [ -0.000017, -0.000164 ], [ -0.000016, -0.000157 ], [ -0.000057, -0.000157 ], [ -0.000058, -0.000164 ], [ -0.000062, -0.000166 ], [ -0.000066, -0.000164 ], [ -0.000067, -0.000152 ], [ -0.000072, -0.000152 ], [ -0.000072, -0.000157 ], [ -0.000068, -0.00016 ], [ -0.00007, -0.000165 ], [ -0.00007, -0.000171 ], [ -0.000073, -0.000175 ] ], [ [ -0.000007, -0.000157 ], [ -0.000007, -0.000162 ], [ 0.000015, -0.000161 ], [ 0.000037, -0.000162 ], [ 0.000037, -0.000158 ], [ -0.000007, -0.000157 ] ] ] ] } }, { "type": "Feature", "properties": {"source count": 1, "name": "Null Ranch", "source dates": "2017-03-12 21:54:49.107291+00:00", "source paths": "sources/us/ks/null-ranch.json", "ISO 3166": null, "ISO 3166-2": null, "US Census GEOID": null, "status": "good", "address count": 9}, "geometry": { "type": "Polygon", "coordinates": [[[-99, 38], [-99, 39], [-98, 39], [-98, 38], [-99, 38]]] } }\n]\n}\n'''
return response(200, null_geojson.encode('utf8'), headers={'Content-Type': 'application/json'})
raise Exception()
with HTTMock(response_geojson):
calculate.calculate(DATABASE_URL)
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as db:
db.execute('select iso_a2, addr_count, area_total, area_pct, pop_total, pop_pct from areas order by iso_a2')
(row1, row2) = db.fetchall()
self.assertEqual(row1, ('US', 9, 9540, 1.0, 17907, 1.0))
self.assertEqual(row2, ('XX', 9990, 2000, 1.0, 20000, 1.0))
| [
"mike@teczno.com"
] | mike@teczno.com |
03d238f1b9ccba256f82bd150d054bb5982e40d6 | c4362b27e969f5ffb1b39c85fc212198de91b646 | /extractLegalOpinionsCstmMetadata.py | 048f239378095a6344d1b548accb795462af0859 | [
"MIT"
] | permissive | OpenLawsGR/judgments2AKN | af8be80f931571b1c67fa957dff6c0ae76c67f9f | 0c6217349cde36058d5599800e289fdf0d3eaf23 | refs/heads/master | 2020-09-05T00:34:53.860129 | 2020-01-08T15:35:49 | 2020-01-08T15:35:49 | 219,934,916 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,896 | py | # -*- coding: utf-8 -*-
import os
import sys
import fnmatch
import argparse
from functions import extract_data_from_nsk
from variables import NSK_METADATA, NSK_CSTM_METADATA, NSK, TXT_EXT
from variables import LEGAL_TEXTS
from lxml import etree
program_description = 'A module for downloading specific data (e.g. keywords) '
program_description += 'from Legal Council of State official website. '
program_description += 'Extracted data may be used later to build some '
program_description += 'appropriate Akoma Ntoso metadata nodes.'
parser = argparse.ArgumentParser(
description = program_description
)
parser.add_argument(
'-fn',
metavar = 'FILENAME',
help = 'choose a specific legal opinion to extract data'
)
# create a namespace object
args = parser.parse_args()
if __name__ == '__main__':
source_path = os.path.join(
os.getcwd(),
os.path.join(
LEGAL_TEXTS,
NSK
)
)
if args.fn is not None:
file_pattern = '*' + args.fn
else:
file_pattern = '*' + TXT_EXT
#print(source_path)
# Create custom metadata folder if it does not exist
if not os.path.exists(source_path.replace(NSK, NSK_CSTM_METADATA)):
os.makedirs(source_path.replace(NSK, NSK_CSTM_METADATA))
for root, dirs, files in os.walk(source_path):
for name in files:
#print name
if fnmatch.fnmatch(name, file_pattern):
print name
# check metadata folder if meta_file exists
# open and get post parameters
meta_file_exists = os.path.isfile(
os.path.join(
source_path.replace(NSK, NSK_METADATA),
name
)
)
if meta_file_exists:
with open(
os.path.join(
source_path.replace(NSK, NSK_METADATA),
name
),
'r') as fin:
XML = etree.parse(fin)
#print XML.getroot().nsmap
XML_root = XML.getroot()
#print list(root.nsmap.values())[0]
try:
ns = list(XML_root.nsmap.values())[0]
protocolNumber = XML.findtext(
'//ns:protocolNumber',
namespaces = {'ns' : ns}
)
issueDate = XML.findtext(
'//ns:issueDate',
namespaces = {'ns' : ns}
)
except IndexError:
protocolNumber = XML.findtext(
'//protocolNumber'
)
issueDate = XML.findtext(
'//issueDate'
)
try:
issueYear = protocolNumber.split('/')[1]
except IndexError:
issueYear = issueDate.split('-')[0]
decisionNumber = protocolNumber.split('/')[0]
#print issueYear
#print decisionNumber
# Create POST url (based in NSK search form) and POST data
post_url ='http://www.nsk.gr/web/nsk/'
post_url +='anazitisi-gnomodoteseon'
post_url +='?p_p_id=nskconsulatories_WAR_nskplatformportlet'
post_url +='&p_p_lifecycle=0&p_p_state=normal&p_p_mode=view'
post_url +='&p_p_col_id=column-4&p_p_col_pos=2'
post_url +='&p_p_col_count=3'
#print post_url
post_data = {
"_nskconsulatories_WAR_nskplatformportlet_isSearch" : "1",
"_nskconsulatories_WAR_nskplatformportlet_inputSuggestionNo" : decisionNumber,
"_nskconsulatories_WAR_nskplatformportlet_inputDatefrom" : issueYear,
"_nskconsulatories_WAR_nskplatformportlet_consulState":"null"
}
extracted_data = extract_data_from_nsk(post_url, post_data)
#print extracted_data
if extracted_data:
# Create a custom element that will hold extracted data
custom_metadata = etree.Element("customMetadata")
keywords = etree.SubElement(custom_metadata, 'keywords')
cnt = 0
for keyword in extracted_data['keywords']:
# If its is not empty string
if keyword:
cnt += 1
keyword_elem = etree.SubElement(
keywords,
'keyword_' + str(cnt)
)
keyword_elem.text = keyword.strip()
chairman = etree.SubElement(custom_metadata, 'chairman')
chairman.text = extracted_data['chairman']
rapporteur = etree.SubElement(custom_metadata, 'rapporteur')
rapporteur.text = extracted_data['rapporteur']
status = etree.SubElement(custom_metadata, 'status')
status.text = extracted_data['status']
#print etree.tostring(
# custom_metadata,
# pretty_print=True,
# encoding="UTF-8"
# )
XmlTree = etree.ElementTree(custom_metadata)
# Write ElementTree to file
with open(
os.path.join(
source_path.replace(NSK, NSK_CSTM_METADATA),
name),
'w') as fin:
fin.write(
etree.tostring(
XmlTree,
pretty_print = True,
encoding = "UTF-8",
xml_declaration = True
)
)
| [
"plessas@ceid.upatras.gr"
] | plessas@ceid.upatras.gr |
81e8a9bdb7e89305a5eb4d8e265ea9bbcb1ec8e1 | d96764c85f86c580029232f226f8cc6bc5fde1e8 | /EyeQ1_33/utils/trainer.py | c2223c6bf323127c7378fecb83c0e624bddf8b1d | [] | no_license | QuantumZhang/FIQA_training_module | b93f775487b11ad172f4d1df57d59a8ad189d035 | 789c11e23d0a120d2f04006b3187f47d4300415d | refs/heads/main | 2023-02-09T14:01:51.707809 | 2021-01-04T06:31:03 | 2021-01-04T06:31:03 | 326,200,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,724 | py | import time
import torch
import numpy as np
import pandas as pd
from sklearn.metrics import cohen_kappa_score, confusion_matrix, accuracy_score, recall_score
from tqdm import tqdm
import torch.nn as nn
import csv
Label_Merge = ['fundus_image_qualified', 'fi_unqualified_disc-position',
'fi_unqualified_macular-position',
'fi_unqualified_focus-clearness', 'fi_unqualified_readable-range',
'fi_unqualified_others']
def show_message(y_tru, y_p, model='train'):
message = []
y_p[y_p > 0.5] = 1
y_p[y_p <= 0.5] = 0
acc = 0
kappa = 0
kappa_weight = [1.0, 1.0, 0.8, 1.2, 0.8, 1.2]
for i in range(y_p.shape[1]):
kappa_t = cohen_kappa_score(y_tru[:, i], y_p[:, i])
kappa += kappa_weight[i] * kappa_t
acc_t = accuracy_score(y_tru[:, i], y_p[:, i])
acc += acc_t
confu_t = confusion_matrix(y_tru[:, i], y_p[:, i])
recall_t = recall_score(y_tru[:, i], y_p[:, i])
recall0_t = recall_score(y_tru[:, i], y_p[:, i], pos_label=0)
message.append(kappa_t)
message.append(acc_t)
message.append(recall_t)
message.append(recall0_t)
print('label==>%s<==>%s<==的信息:'%(Label_Merge[i], model))
print('kappa:', kappa_t, '-----acc:', acc_t, '-----recall:', recall_t, '---recall0', recall0_t)
print('confusion_matrix:\n', confu_t)
return acc / y_p.shape[1], kappa / y_p.shape[1], message
# 定义训练类
def train_step(train_loader, model, epoch, optimizer, criterion, epochs, log_csv, cycle_scheduler= None):
# switch to train mode
model.train()
epoch_loss = 0.0
iters_per_epoch = len(train_loader)
y_tru = None
y_p = None
for step, (imagesA, imagesB, imagesC, labels) in enumerate(train_loader):
imagesA = imagesA.cuda()
imagesB = imagesB.cuda()
imagesC = imagesC.cuda()
if y_tru is None:
y_tru = np.array(labels)
else:
y_tru = np.vstack((y_tru, np.array(labels)))
labels = labels.float().cuda()
# labels = labels.cuda().long()
# labels = torch.tensor(labels).reshape(4,-1)
# labels = labels.reshape(labels.shape[0],1)
# labels = torch.zeros(labels.shape[0], 2).scatter_(1, labels, 1).cuda()
combine = model(imagesA)
combine = torch.sigmoid(combine)
# out_A, out_B, out_C, out_F, combine = model(imagesA, imagesB, imagesC)
# loss_x = criterion(out_A, labels)
# loss_y = criterion(out_B, labels)
# loss_z = criterion(out_C, labels)
# loss_c = criterion(out_F, labels)
loss_f = criterion(combine, labels)
lossValue = loss_f
# lossValue = loss_w[0]*loss_x+loss_w[1]*loss_y+loss_w[2]*loss_z+loss_w[3]*loss_c+loss_w[4]*loss_f
# writer.add_scalar('/epoch_loss', lossValue, step)
# pre = torch.cat((pre, combine), 0)
# tru = torch.cat((tru, labels.float()), 0)
y_pre = combine.detach().cpu().numpy()
if y_p is None:
y_p = np.array(y_pre)
else:
y_p = np.vstack((y_p, np.array(y_pre)))
optimizer.zero_grad()
lossValue.backward()
optimizer.step()
if cycle_scheduler is not None:
cycle_scheduler.batch_step()
epoch_loss += lossValue.item()
acc, kappa, message = show_message(y_tru, y_p, model='train')
with open(log_csv, 'a+') as f:
if epoch == 0:
csv_write = csv.writer(f)
data_row = ['epoch',
'qua_kappa', 'qua_acc', 'qua_recall', 'qua_recall0',
'disc_kappa', 'disc_acc', 'disc_recall', 'disc_recall0',
'macular_kappa', 'macular_acc', 'macular_recall', 'macular_recall0',
'clear_kappa', 'clear_acc', 'clear_recall', 'clear_recall0',
'read_kappa', 'read_acc', 'read_recall', 'read_recall0',
'others_kappa', 'others_acc', 'others_recall', 'others_recall0',
]
csv_write.writerow(data_row)
csv_write = csv.writer(f)
data_row = [epoch] + message
csv_write.writerow(data_row)
epoch_loss = epoch_loss / iters_per_epoch
return epoch_loss, acc, kappa
def validation_step(train_loader, model, epoch, optimizer, criterion, epochs, log_csv, cycle_scheduler=None):
# switch to train mode
model.eval()
epoch_loss = 0.0
iters_per_epoch = len(train_loader)
y_tru = None
y_p = None
for step, (imagesA, imagesB, imagesC, labels) in enumerate(train_loader):
imagesA = imagesA.cuda()
imagesB = imagesB.cuda()
imagesC = imagesC.cuda()
if y_tru is None:
y_tru = np.array(labels)
else:
y_tru = np.vstack((y_tru, np.array(labels)))
labels = labels.float().cuda()
# labels = labels.cuda().long()
# labels = torch.tensor(labels).reshape(4,-1)
# labels = labels.reshape(labels.shape[0],1)
# labels = torch.zeros(labels.shape[0], 2).scatter_(1, labels, 1).cuda()
with torch.no_grad():
combine = model(imagesA)
combine = torch.sigmoid(combine)
# out_A, out_B, out_C, out_F, combine = model(imagesA, imagesB, imagesC)
# loss_x = criterion(out_A, labels)
# loss_y = criterion(out_B, labels)
# loss_z = criterion(out_C, labels)
# loss_c = criterion(out_F, labels)
loss_f = criterion(combine, labels)
lossValue = loss_f
# lossValue = loss_w[0]*loss_x+loss_w[1]*loss_y+loss_w[2]*loss_z+loss_w[3]*loss_c+loss_w[4]*loss_f
# writer.add_scalar('/epoch_loss', lossValue, step)
# pre = torch.cat((pre, combine), 0)
# tru = torch.cat((tru, labels.float()), 0)
y_pre = combine.detach().cpu().numpy()
if y_p is None:
y_p = np.array(y_pre)
else:
y_p = np.vstack((y_p, np.array(y_pre)))
# optimizer.zero_grad()
# lossValue.backward()
# optimizer.step()
# cycle_scheduler.batch_step()
epoch_loss += lossValue.item()
acc, kappa, message = show_message(y_tru, y_p, model='val')
with open(log_csv, 'a+') as f:
if epoch == 0:
csv_write = csv.writer(f)
data_row = ['epoch',
'qua_kappa', 'qua_acc', 'qua_recall', 'qua_recall0',
'disc_kappa', 'disc_acc', 'disc_recall', 'disc_recall0',
'macular_kappa', 'macular_acc', 'macular_recall', 'macular_recall0',
'clear_kappa', 'clear_acc', 'clear_recall', 'clear_recall0',
'read_kappa', 'read_acc', 'read_recall', 'read_recall0',
'others_kappa', 'others_acc', 'others_recall', 'others_recall0',
]
csv_write.writerow(data_row)
csv_write = csv.writer(f)
data_row = [epoch] + message
csv_write.writerow(data_row)
epoch_loss = epoch_loss / iters_per_epoch
return epoch_loss, acc, kappa
#
# def validation_step(val_loader, model, criterion):
#
# # switch to train mode
# model.eval()
# epoch_loss = 0
# iters_per_epoch = len(val_loader)
# y_tru = None
# y_p = None
# for step, (imagesA, imagesB, imagesC, labels) in enumerate(val_loader):
# imagesA = imagesA.cuda()
# imagesB = imagesB.cuda()
# imagesC = imagesC.cuda()
#
# if y_tru is None:
# y_tru = np.array(labels)
# else:
# y_tru = np.vstack((y_tru, np.array(labels)))
#
# labels = labels.float().cuda()
# # _, _, _, _, outputs = model(imagesA, imagesB, imagesC)
# combine = model(imagesA)
# combine = torch.sigmoid(combine)
# with torch.no_grad():
# loss = criterion(combine, labels)
# epoch_loss += loss.item()
#
#
# y_pre = combine.detach().cpu().numpy()
# if y_p is None:
# y_p = np.array(y_pre)
# else:
# y_p = np.vstack((y_p, np.array(y_pre)))
#
# y_tru = y_tru.reshape((-1))
# y_p = y_p.reshape((-1))
# acc = show_message(y_tru, y_p)
# epoch_loss = epoch_loss / iters_per_epoch
# return epoch_loss, acc
def save_output(label_test_file, dataPRED, label_idx, save_file):
label_list = label_idx
n_class = len(label_list)
datanpPRED = np.squeeze(dataPRED.cpu().numpy())
df_tmp = pd.read_csv(label_test_file)
image_names = df_tmp["image"].tolist()
result = {label_list[i]: datanpPRED[:, i] for i in range(n_class)}
result['image_name'] = image_names
out_df = pd.DataFrame(result)
name_older = ['image_name']
for i in range(n_class):
name_older.append(label_list[i])
out_df.to_csv(save_file, columns=name_older)
def acc_mol(val_loader, model):
model.eval()
iters_per_epoch = len(val_loader)
pre_all = []
y_all = []
for step, (imagesA, imagesB, imagesC, labels) in enumerate(val_loader):
imagesA = imagesA.cuda()
imagesB = imagesB.cuda()
imagesC = imagesC.cuda()
labels = labels.cuda()
_, _, _, _, outputs = model(imagesA, imagesB, imagesC)
pre = outputs.argmax(dim=1)
pre_all.append(pre)
y_all.append(labels)
return pre_all, y_all
def save_file(model,loss, kappa, acc, recall,recall0,model_save_file, epoch):
torch.save({'state_dict': model.state_dict(), 'loss':loss,'kappa':kappa,
'acc': acc, 'recall':recall,'recall0':recall0,'epoch': epoch + 1}, model_save_file)
print('已保存模型至:',model_save_file)
| [
"noreply@github.com"
] | noreply@github.com |
4eb1da69c4b7f74b682db604e23967cb55b2f970 | e62606922d4820e90f5f8e59de1a3812cb05f14f | /Centralized BlockChain/secondClient.py | 578a7f8f85479375e6e0685023b8e976e51310f0 | [] | no_license | ozgunozerk/DistributedApplications | 8d0d647020a2856ede5938507d8a5b571d1df0a6 | f9d21174c62e037242a53ca5a6ba66e0d82d8193 | refs/heads/main | 2023-03-04T22:13:15.943790 | 2021-02-12T14:43:54 | 2021-02-12T14:43:54 | 338,344,208 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | import Pyro4
BTC = Pyro4.Proxy("PYRONAME:BTC")
acc1 = BTC.createAccount(100)
print(acc1)
bal = BTC.calculateBalance(acc1)
if bal > 20:
BTC.transfer(acc1, 1, -60)
BTC.printChain()
ETH = Pyro4.Proxy("PYRONAME:ETH")
e1 = ETH.createAccount(30)
print(e1)
bal = ETH.calculateBalance(e1)
if bal > 20:
ETH.transfer(e1, 1, -20)
ETH.printChain()
BTC.exchange(acc1, e1, ETH, 50)
BTC.printChain()
ETH.printChain() | [
"noreply@github.com"
] | noreply@github.com |
befaa9ced886f7bb04ea7ea29f60e173dddb39b8 | 3c660571c9c2b028a88b844bd8c376970bea9f4b | /loginRegister/models.py | 5098e0f9afd5240a604226763323970cbcd8771a | [] | no_license | felipesma/almacen | c1914b30b63d43ca2ed7bc87db4d63a8c0abfd44 | ba2ddbff635db0ae1be7becc1a4ef6520cdaebbf | refs/heads/main | 2023-08-18T14:00:12.886588 | 2021-10-26T15:52:00 | 2021-10-26T15:52:00 | 420,873,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | from django.db import models
# Create your models here.
class UserManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if postData['password'] != postData['password_confirmation']:
errors['password_match'] = "Las constraseñas no coinciden, favor reintente."
if len(postData['password']) < 8:
errors['len_password'] = "La contraseña debe tener al menos 8 carácteres."
return errors
class Usuario(models.Model):
nombre = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
direccion = models.CharField(max_length=255)
telefono = models.CharField(max_length=15)
password = models.CharField(max_length=255)
objects = UserManager()
nivel = models.IntegerField(default=1)
class Categoria(models.Model):
nombre = models.CharField(max_length=50)
class Producto(models.Model):
producto = models.CharField(max_length=50)
precio = models.IntegerField()
categoria = models.ForeignKey(Categoria, related_name="productos", on_delete=models.CASCADE)
class Pedido(models.Model):
cliente = models.ForeignKey(Usuario, related_name="pedidos", on_delete=models.CASCADE)
productos = models.TextField(default='null')
total = models.IntegerField(default=0)
estado = models.IntegerField(default=1)
pago = models.IntegerField(default=1) | [
"felipem39@gmail.com"
] | felipem39@gmail.com |
434fad0eaa4c453385b4cd6adfaddfc88e0bb4e4 | 4db7e83f27a07c7838b80ab5cb25a01da4db7199 | /main.py | acea4071a2692d7355b771ff79fc5c632c99b704 | [
"MIT"
] | permissive | knowmetoowell/PUBG-API | 8f01a0bd65134f98e3d619bb25a27cdcba8a7022 | d26c4ebc054750cb2a28eba8feff00674e34f263 | refs/heads/main | 2023-06-09T18:26:04.271997 | 2021-07-02T13:35:34 | 2021-07-02T13:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,388 | py | import asyncio
import pymysql
import os
import sys
import json
import aiohttp
import importlib
import datetime
from math import trunc
from sanic import Sanic
from pytz import timezone
import sanic.response as response
from log_config import LOGGING_CONFIG
app = Sanic(__name__, log_config=LOGGING_CONFIG)
platform_name = ["Steam","Kakao","XBOX","PS","Stadia"]
platform_site = ["steam","kakao","xbox","psn","stadia"]
DB_platform = ["Steam","Kakao","XBOX","PSN","Stadia"]
directory = os.path.dirname(os.path.abspath(__file__)).replace("\\","/")
db_f = open(f"{directory}/data/database.json",mode='r')
db = db_f.read()
db_f.close()
db_json = json.loads(db)
db_ip = db_json["mysql"]["ip"]
db_user = db_json["mysql"]["user"]
db_pw = db_json["mysql"]["password"]
db_name = db_json["mysql"]["database"]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8') #클라이언트 API키 불러오기.
cur = connect.cursor()
cur.execute("SELECT * from PUBG_BOT")
client_list = cur.fetchall()
pubg_token = client_list[0][2]
connect.close()
sys.path.append(directory + "/modules") #다른 파일내 함수추가
p_info = importlib.import_module("player")
s_info = importlib.import_module("status")
header = {
"Authorization": "Bearer " + pubg_token,
"Accept": "application/vnd.api+json"
}
sample_f = open(f"{directory}/data/last_update_sample.json",mode='r')
sample1 = json.loads(sample_f.read())
sample_f.close()
async def get_season(pubg_platform):
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw, db='PUBG_BOT', charset='utf8')
cur = connect.cursor()
sql = f"SELECT {DB_platform[pubg_platform]} FROM SEASON_STATUS"
cur.execute(sql)
cache = cur.fetchone()
html = cache[0]
data_json = json.loads(html)['data']
for i in data_json:
if i['attributes']['isCurrentSeason']:
least_season = i
return least_season['id']
def time_num(f_playtime):
playtime = datetime.datetime.fromtimestamp(f_playtime, timezone('UTC'))
if playtime.month == 1:
if playtime.day == 1:
if playtime.hour == 0:
if playtime.minute == 0: return f"{playtime.second}초"
return f"{playtime.minute}분 {playtime.second}초"
return f"{playtime.hour}시간 {playtime.minute}분 {playtime.second}초"
return f"{playtime.day-1}일 {playtime.hour}시간 {playtime.minute}분 {playtime.second}초"
return f"{playtime.month-1}달 {playtime.day-1}일 {playtime.hour}시간 {playtime.minute}분 {playtime.second}초"
@app.route("/api/PUBG/")
async def main(request):
return response.redirect("https://github.com/team-alpha-kr/PUBG-API")
@app.route("/api/PUBG/player")
async def player(request):
args = request.get_args(keep_blank_values=True)
if not ("nickname" in args): return response.json({'code':'01', 'msg':"Please write your nickname."}, status=400)
else: nickname = args['nickname'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE name=%s) as succees;")
cur.execute(exist_nickname,(nickname))
exist = cur.fetchone()
if exist[0]:
command = pymysql.escape_string("SELECT id, name, platform, last_update FROM player WHERE name=%s")
cur.execute(command,(nickname))
fetch = cur.fetchone()
connect.close()
data = {
"id":fetch[0],
"nickname":fetch[1],
"platform":fetch[2],
"lastupdate":json.loads(fetch[3])
}
return response.json(data,status=200)
else:
if not ("platform" in args): return response.json({"code":"02","msg":"The value is not stored in DB, so you need to create a platform."},status=400)
else:
try: platform = int(args['platform'][0])
except ValueError: return response.json({'code':'06', 'msg':"Platform values can only contain numbers."}, status=400)
if not (platform >= 0 and platform < 5): return response.json({'code':'07', 'msg':"Platform values can contain only 0-4 values."}, status=400)
async with aiohttp.ClientSession() as session:
async with session.get(f"https://api.pubg.com/shards/{platform_site[platform]}/players?filter[playerNames]={nickname}", headers=header) as resp:
if resp.status == 200:
json_data = await resp.json()
else:
e_resp = s_info.response_num(resp.status)
print(await resp.json(),resp.status)
return response.json({'code': e_resp[1], 'msg': e_resp[2]}, status=e_resp[0])
data = {
"id":json_data["data"][0]["id"],
"nickname":json_data["data"][0]["attributes"]["name"],
"platform":platform,
"lastupdate":sample1
}
command = pymysql.escape_string("insert into player(id,name,last_update,platform) value(%s,%s,%s,%s)")
cur.execute(command,(json_data["data"][0]["id"],json_data["data"][0]["attributes"]["name"],json.dumps(sample1),platform))
connect.commit()
connect.close()
return response.json(data,status=200)
@app.route("/api/PUBG/normal")
async def normal_status(request):
args = request.get_args(keep_blank_values=True)
if not ("id" in args): return response.json({'code':'01', 'msg':"Please write your id."}, status=400)
else: pubg_id = args['id'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE id=%s) as succees;")
cur.execute(exist_nickname,(pubg_id))
fetch1 = cur.fetchone()
if fetch1[0]:
command = pymysql.escape_string("SELECT platform FROM player WHERE id=%s")
cur.execute(command, (pubg_id))
platform_info = cur.fetchone()[0]
else: return response.json({'code':'05', 'msg':"No information about the user was found. Please proceed with \"/PUBG/player\" first."}, status=400)
if ("season" in args):
try: season = int(args['season'][0])
except ValueError: return response.json({'code':'08', 'msg':"Season values can only contain numbers."}, status=400)
if platform_info >= 0 and platform_info <= 1: type_season = "pc-2018"
else: type_season = "console"
if len(str(season)) < 2: season = f"division.bro.official.{type_season}-0{season}"
else: season = f"division.bro.official.{type_season}-{season}"
else: season = await get_season(platform_info)
status, html = await s_info.season_status(pubg_id,platform_info,season)
if not status:
return response.json({'code': html[1], 'msg': html[2]}, status=html[0])
else:
data = {
"id":pubg_id,
"gameMode":{}
}
gamestat = html['data']['attributes']['gameModeStats']
for i in ['solo','solo-fpp','duo','duo-fpp','squad','squad-fpp']:
modestat = gamestat[i]
losses = modestat['losses']
if losses == 0:
losses = 1
KDA_point = round((modestat['assists'] + modestat['kills']) / losses,2)
KD_point = round(modestat['kills'] / losses,2)
i_data = {
i:{
"assists":modestat['assists'],
"boosts": modestat['boosts'],
"dBNOs": modestat['dBNOs'],
"dailyKills": modestat['dailyKills'],
"dailyWins": modestat['dailyWins'],
"damageDealt": modestat['damageDealt'],
"days": modestat['days'],
"headshotKills": modestat['headshotKills'],
"heals": modestat['heals'],
"KDA_point": KDA_point,
"KD_point": KD_point,
"kills": modestat['kills'],
"longestKill": modestat['longestKill'],
"longestTimeSurvived": modestat['longestTimeSurvived'],
"longestTimeSurvivedAnswer": time_num(modestat['longestTimeSurvived']),
"losses": modestat['losses'],
"maxKillStreaks": modestat['maxKillStreaks'],
"mostSurvivalTime": modestat['mostSurvivalTime'],
"revives": modestat['revives'],
"rideDistance": modestat['rideDistance'],
"roadKills": modestat['roadKills'],
"roundMostKills": modestat['roundMostKills'],
"roundsPlayed": modestat['roundsPlayed'],
"suicides": modestat['suicides'],
"swimDistance": modestat['swimDistance'],
"teamKills": modestat['teamKills'],
"timeSurvived": modestat['timeSurvived'],
"timeSurvivedAnswer": time_num(modestat['timeSurvived']),
"top10s": modestat['top10s'],
"vehicleDestroys": modestat['vehicleDestroys'],
"walkDistance": modestat['walkDistance'],
"weaponsAcquired": modestat['weaponsAcquired'],
"weeklyKills": modestat['weeklyKills'],
"weeklyWins": modestat['weeklyWins'],
"wins": modestat['wins']
}
}
data['gameMode'].update(i_data)
return response.json(data, status=200)
@app.route("/api/PUBG/normal/update")
async def update_normal_status(request):
args = request.get_args(keep_blank_values=True)
if not ("id" in args): return response.json({'code':'01', 'msg':"Please write your id."}, status=400)
else: pubg_id = args['id'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE id=%s) as succees;")
cur.execute(exist_nickname,(pubg_id))
fetch1 = cur.fetchone()
if fetch1[0]:
command = pymysql.escape_string("SELECT platform FROM player WHERE id=%s")
cur.execute(command, (pubg_id))
platform_info = cur.fetchone()[0]
else: return response.json({'code':'05', 'msg':"No information about the user was found. Please proceed with \"/PUBG/player\" first."}, status=400)
if ("season" in args):
try: season = int(args['season'][0])
except ValueError: return response.json({'code':'08', 'msg':"Season values can only contain numbers."}, status=400)
if platform_info >= 0 and platform_info <= 1: type_season = "pc-2018"
else: type_season = "console"
if len(str(season)) < 2: season = f"division.bro.official.{type_season}-0{season}"
else: season = f"division.bro.official.{type_season}-{season}"
else: season = await get_season(platform_info)
await s_info.season_status_update(pubg_id, platform_info, season)
return response.json({
"code":"00",
"msg":"Updated successfully."
},status=200)
@app.route("/api/PUBG/ranked")
async def ranked_status(request):
args = request.get_args(keep_blank_values=True)
if not ("id" in args): return response.json({'code':'01', 'msg':"Please write your id."}, status=400)
else: pubg_id = args['id'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE id=%s) as succees;")
cur.execute(exist_nickname,(pubg_id))
fetch1 = cur.fetchone()
if fetch1[0]:
command = pymysql.escape_string("SELECT platform FROM player WHERE id=%s")
cur.execute(command, (pubg_id))
platform_info = cur.fetchone()[0]
else: return response.json({'code':'05', 'msg':"No information about the user was found. Please proceed with \"/PUBG/player\" first."}, status=400)
if ("season" in args):
try: season = int(args['season'][0])
except ValueError: return response.json({'code':'08', 'msg':"Season values can only contain numbers."}, status=400)
if platform_info >= 0 and platform_info <= 1: type_season = "pc-2018"
else: type_season = "console"
if len(str(season)) < 2: season = f"division.bro.official.{type_season}-0{season}"
else: season = f"division.bro.official.{type_season}-{season}"
else: season = await get_season(platform_info)
status, html = await s_info.ranked_status(pubg_id,platform_info,season)
if not status:
return response.json({'code': html[1], 'msg': html[2]}, status=html[0])
else:
data = {
"id":pubg_id,
"gameMode":{}
}
gamestat = html['data']['attributes']['rankedGameModeStats']
for i in ['solo','solo-fpp','squad','squad-fpp']:
if not (i in gamestat):
i_data = {
i: {
"assists": 0,
"avgRank": 0,
"currentRank":{
"tier":"Unranked",
"subTier":"1"
},
"currentRankAnswer":"Unranked",
"currentRankPoint":0,
"bestRank":{
"tier":"Unranked",
"subTier":"1"
},
"bestRankAnswer":"Unranked",
"bestRankPoint": 0,
"damageDealt": 0,
"deaths": 0,
"dBNOs": 0,
"KDA_point": 0,
"KD_point": 0,
"kills": 0,
"top10s": 0,
"top10_point": 0,
"wins": 0,
"win_point": 0
}
}
data['gameMode'].update(i_data)
continue
modestat = gamestat[i]
losses = modestat['deaths']
if losses == 0:
losses = 1
KD_point = round(modestat['kills'] / losses,2)
currentTier1 = modestat["currentTier"]["tier"]
currentTier2 = modestat["currentTier"]["subTier"]
bestTier1 = modestat["bestTier"]["tier"]
bestTier2 = modestat["bestTier"]["subTier"]
if currentTier1 == "Unranked" or currentTier1 == "Master": tier_name1 = currentTier1
else: tier_name1 = f"{currentTier1} {currentTier2}"
if bestTier1 == "Unranked" or bestTier1 == "Master": tier_name2 = bestTier1
else: tier_name2 = f"{bestTier1} {bestTier2}"
i_data = {
i:{
"assists": modestat['assists'],
"avgRank": modestat['avgRank'],
"currentRank":modestat['currentTier'],
"currentRankAnswer":tier_name1,
"currentRankPoint":modestat['currentRankPoint'],
"bestRank":modestat['bestTier'],
"bestRankAnswer":tier_name2,
"bestRankPoint": modestat['bestRankPoint'],
"damageDealt": modestat['damageDealt'],
"deaths": modestat['deaths'],
"dBNOs": modestat['dBNOs'],
"KDA_point": modestat['kda'],
"KD_point": KD_point,
"kills": modestat['kills'],
"roundsPlayed": modestat['roundsPlayed'],
"top10s": trunc(modestat['top10Ratio'] * modestat['roundsPlayed']),
"top10_point": modestat['top10Ratio'],
"wins": modestat['wins'],
"win_point": modestat['winRatio']
}
}
data['gameMode'].update(i_data)
return response.json(data, status=200)
@app.route("/api/PUBG/ranked/update")
async def update_ranked_status(request):
args = request.get_args(keep_blank_values=True)
if not ("id" in args): return response.json({'code':'01', 'msg':"Please write your id."}, status=400)
else: pubg_id = args['id'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE id=%s) as succees;")
cur.execute(exist_nickname,(pubg_id))
fetch1 = cur.fetchone()
if fetch1[0]:
command = pymysql.escape_string("SELECT platform FROM player WHERE id=%s")
cur.execute(command, (pubg_id))
platform_info = cur.fetchone()[0]
else: return response.json({'code':'05', 'msg':"No information about the user was found. Please proceed with \"/PUBG/player\" first."}, status=400)
if ("season" in args):
try: season = int(args['season'][0])
except ValueError: return response.json({'code':'08', 'msg':"Season values can only contain numbers."}, status=400)
if platform_info >= 0 and platform_info <= 1: type_season = "pc-2018"
else: type_season = "console"
if len(str(season)) < 2: season = f"division.bro.official.{type_season}-0{season}"
else: season = f"division.bro.official.{type_season}-{season}"
else: season = await get_season(platform_info)
await s_info.ranked_status_update(pubg_id, platform_info, season)
return response.json({
"code":"00",
"msg":"Updated successfully."
},status=200)
@app.route("/api/PUBG/player/change_platform")
async def change_platform(request):
args = request.get_args(keep_blank_values=True)
if not ("nickname" in args): return response.json({'code':'01', 'msg':"Please write your nickname."}, status=400)
else: nickname = args['nickname'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE name=%s) as succees;")
cur.execute(exist_nickname,(nickname))
exist = cur.fetchone()
if exist[0]:
if not ("platform" in args): return response.json({'code':'02', 'msg':"Please write your platform."}, status=400)
else:
try: platform = int(args['platform'][0])
except ValueError: return response.json({'code':'06', 'msg':"Platform values can only contain numbers."}, status=400)
if not (platform >= 0 and platform < 5): return response.json({'code':'07', 'msg':"Platform values can contain only 0-4 values."}, status=400)
command = pymysql.escape_string("UPDATE player SET platform=%s WHERE name=%s")
cur.execute(command,(platform,nickname))
connect.commit()
connect.close()
return response.json({
"code":"00",
"msg":"Updated successfully."
},status=200)
else:
connect.close()
return response.json({'code': '05','msg': "No information about the user was found. Please proceed with \"/PUBG/player\" first."},status=400)
app.run('127.0.0.1', 3200) | [
"gunyu1019@gmail.com"
] | gunyu1019@gmail.com |
b2751e339d9962f161e087abd2904dce3feee3f8 | 029fe3c82ace900aeeb6b5624aea9f02cccee571 | /dtlearn/nbayes/gaussian.py | e258251abd78c8921142c223696dca2fb7342347 | [
"MIT"
] | permissive | i-agrawal/dtlearn | 5b86bad7d67d84ee5f0ccfb130e142ce6a099748 | 5e925aece613a0402cecf9ee869dfd41e0e18bce | refs/heads/master | 2020-03-19T23:55:28.603406 | 2018-12-27T00:58:37 | 2018-12-27T00:58:37 | 137,024,986 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | import numpy as np
from .. import Model
class Gaussian(Model):
def train(self, x, y):
self.labels = np.unique(y)
groups = [x[y == label] for label in self.labels]
self.means = np.array([np.mean(group, axis=0) for group in groups])
self.varis = np.array([np.var(group, axis=0) for group in groups])
self.fracs = np.array([len(group) for group in groups]) / len(y)
def predict(self, x):
diff = x[:, np.newaxis] - self.means[np.newaxis]
prob = np.exp(-np.multiply(diff, diff) / (2 * self.varis))
chosen = np.argmax(np.prod(prob, axis=2) * self.fracs, axis=1)
return self.labels[chosen]
| [
"ishan.agrawal97@gmail.com"
] | ishan.agrawal97@gmail.com |
09e9bd7c4d26cbef87fec0324296b7b0e260ef3a | 5453a23a6c59bd354e2b07f87336b8fb3a618741 | /Growth curve/doubleing_time.py | 31edccbc56f2420eaf0340889d88101573cd5f5b | [] | no_license | MinTTT/pycode | bfe7d311f7bd14c9775a8d8c5b6a4969b0d1ec57 | 7658b1460d0ca7bef50937bbc18f6bd4bcdb1b4e | refs/heads/master | 2020-04-01T04:09:32.787824 | 2018-10-13T08:54:41 | 2018-10-13T08:54:41 | 152,852,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # -*- coding: utf-8 -*-
'''
This module need three parameters
x: time sequence, y: ABS or OD
max: logmarthtic midpoint
'''
import numpy as np
from scipy import optimize
def doutime(x,y,max):
logtime=x[np.arange(max-38,max-10,1).tolist()]
logod=y.iloc[np.arange(max-38,max-10,1).tolist()]
def residuals(p):
k,b=p
return np.log(logod) - (k*logtime+b)
r=optimize.leastsq(residuals,[0,-10])
k,b=r[0]
k=np.log(2)/k
b=np.e**b
print("Doubling time =",k,"min.")
fity=b*(2**(logtime/k))
return k,logtime,fity
| [
"34529833+MinTTT@users.noreply.github.com"
] | 34529833+MinTTT@users.noreply.github.com |
c439a364452d8c084ee3f038cccde1007cb39e21 | 560d0c8a59b7933d91e4e37ed3302abfd195395c | /test/player_p2.py | b8fb82f201cb18d9905a079ae85572c4034d03bd | [
"Apache-2.0"
] | permissive | kakao/pycon2016apac-gawibawibo | b56043ca0cdfcfe0375a1d86a85a4534f762420d | b74862562c51f0d027f20030346a8a3e8cb081b6 | refs/heads/master | 2020-09-26T22:01:45.387401 | 2016-08-22T09:01:09 | 2016-08-22T09:01:09 | 66,256,293 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | def show_me_the_hand(records):
return 'gawi'
| [
"iolothebard@gmail.com"
] | iolothebard@gmail.com |
0121ed472138d492c9faefbd6f0c04308f5c2a4a | 69cbb90a54ef4c312d7939c31d1da4060776306a | /airWritingPrediction.py | d0716f98214463884e6f81fe5825557eb6af1cd4 | [] | no_license | priyankagohil/AirWritingRecognition | 602d08131e7908b6fc13204dc4d0e9f27ca6e041 | 5228798157f7f21b59e6ef688e8f33e076657416 | refs/heads/master | 2023-08-24T10:48:51.588127 | 2021-01-06T17:52:56 | 2021-01-06T17:52:56 | 312,326,541 | 3 | 0 | null | 2021-10-18T05:43:02 | 2020-11-12T16:01:41 | Python | UTF-8 | Python | false | false | 8,577 | py | import cv2
import numpy as np
import copy
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
rect1_tl = (320, 140)
rect2_tl = (320, 240)
rect3_tl = (320, 340)
rect4_tl = (240, 270)
rect5_tl = (400, 270)
height = 30
width = 30
"""CNN architecture of the model"""
class Cnn(nn.Module):
def __init__(self):
super(Cnn, self).__init__()
# convolutional layer
self.conv1 = nn.Conv2d(1, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 3 * 3, 256)
self.fc2 = nn.Linear(256, 26)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
# Adding sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 64 * 3 * 3)
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
def get_histogram(frame):
roi1 = frame[rect1_tl[1]:rect1_tl[1] + width, rect1_tl[0]:rect1_tl[0] + height]
roi2 = frame[rect2_tl[1]:rect2_tl[1] + width, rect2_tl[0]:rect2_tl[0] + height]
roi3 = frame[rect3_tl[1]:rect3_tl[1] + width, rect3_tl[0]:rect3_tl[0] + height]
roi4 = frame[rect4_tl[1]:rect4_tl[1] + width, rect4_tl[0]:rect4_tl[0] + height]
roi5 = frame[rect5_tl[1]:rect5_tl[1] + width, rect5_tl[0]:rect5_tl[0] + height]
roi = np.concatenate((roi1, roi2, roi3, roi4, roi5), axis=0)
roi_hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
return cv2.calcHist([roi_hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
def draw_rectangles(frame=0):
frame_with_rect = frame
cv2.rectangle(frame_with_rect, rect1_tl, tuple(np.array(rect1_tl) + np.array((height, width))), (0, 0, 255), 1)
cv2.rectangle(frame_with_rect, rect2_tl, tuple(np.array(rect2_tl) + np.array((height, width))), (0, 0, 255), 1)
cv2.rectangle(frame_with_rect, rect3_tl, tuple(np.array(rect3_tl) + np.array((height, width))), (0, 0, 255), 1)
cv2.rectangle(frame_with_rect, rect4_tl, tuple(np.array(rect4_tl) + np.array((height, width))), (0, 0, 255), 1)
cv2.rectangle(frame_with_rect, rect5_tl, tuple(np.array(rect5_tl) + np.array((height, width))), (0, 0, 255), 1)
return frame_with_rect
def get_mask(frame, histogram):
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.calcBackProject([frame_hsv], [0, 1], histogram, [0, 180, 0, 256], 1)
_, mask = cv2.threshold(mask, 10, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
mask = cv2.filter2D(mask, -1, kernel)
kernel1 = np.ones((7, 7), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel1)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
mask = cv2.bilateralFilter(mask, 5, 75, 75)
return mask
def get_max_contour(mask):
contours = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
max = 0
mi = 0
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if area > 1500:
max = area
mi = i
return contours[mi]
def draw_defects(frame_with_rect, max_contour, hull):
defects = cv2.convexityDefects(max_contour, hull)
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
start = tuple(max_contour[s][0])
end = tuple(max_contour[e][0])
far = tuple(max_contour[f][0])
cv2.line(frame_with_rect, start, far, [255, 0, 0], 2)
cv2.line(frame_with_rect, far, end, [0, 255, 0], 2)
cv2.circle(frame_with_rect, far, 5, [0, 0, 255], -1)
def get_centroid(contour):
m = cv2.moments(contour)
cx = int(m['m10'] / m['m00'])
cy = int(m['m01'] / m['m00'])
return cx, cy
def get_farthest_point(defects, contour, centroid):
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
cx, cy = centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, cx), 2)
yp = cv2.pow(cv2.subtract(y, cy), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def get_ROI(canvas):
gray = cv2.bitwise_not(canvas)
ret, thresh = cv2.threshold(gray, 5, 255, cv2.THRESH_BINARY_INV)
_, ctrs, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
areas = []
for i in range(len(ctrs)):
x, y, w, h = cv2.boundingRect(ctrs[i])
areas.append((w * h, i))
def sort_second(val):
return val[0]
areas.sort(key=sort_second, reverse=True)
x, y, w, h = cv2.boundingRect(ctrs[areas[1][1]])
cv2.rectangle(canvas, (x, y), (x + w, y + h), (255, 255, 0), 1)
roi = gray[y:y + h, x:x + w]
return roi
def character_prediction(roi, model):
"""Predicts character written with image processing"""
img = cv2.resize(roi, (28, 28))
img = cv2.GaussianBlur(img, (3, 3), 0)
img = Image.fromarray(img)
normalize = transforms.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]
)
preprocess = transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
normalize
])
p_img = preprocess(img)
model.eval()
p_img = p_img.reshape([1, 1, 28, 28]).float()
output = model(torch.transpose(p_img, 2, 3))
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy())
return preds
def main():
video = cv2.VideoCapture(0)
canvas = np.zeros((720, 1280), np.uint8)
far_points = []
pressed = False
is_drawing = False
made_prediction = False
# Creating the model
model = Cnn()
model.load_state_dict(torch.load('model_emnist.pt', map_location='cpu'))
# Actions to perform with each key
while True:
_, frame = video.read()
frame = cv2.flip(frame, flipCode=1)
original_frame = copy.deepcopy(frame)
original_frame = draw_rectangles(original_frame)
canvas[:, :] = 255
key = cv2.waitKey(1)
# ready to draw
if key & 0xFF == ord('s'):
pressed = True
histogram = get_histogram(frame)
# To start drawing
if key & 0xFF == ord('d'):
is_drawing = True
# To clear drawing
if key & 0xFF == ord('c'):
canvas[:, :] = 255
is_drawing = False
far_points.clear()
made_prediction = False
if is_drawing:
if len(far_points) > 100:
far_points.pop(0)
far_points.append(far)
for i in range(len(far_points) - 1):
cv2.line(original_frame, far_points[i], far_points[i + 1], (255, 5, 255), 20)
cv2.line(canvas, far_points[i], far_points[i + 1], (0, 0, 0), 20)
# To predict the character drawn
if key & 0xFF == ord('p'):
is_drawing = False
roi = get_ROI(canvas)
prediction = character_prediction(roi, model)
print(prediction)
made_prediction = True
name = str(prediction) + '.jpg'
cv2.imwrite(name, roi)
if pressed:
mask = get_mask(frame, histogram)
max_contour = get_max_contour(mask)
hull = cv2.convexHull(max_contour, returnPoints=False)
draw_defects(original_frame, max_contour, hull)
defects = cv2.convexityDefects(max_contour, hull)
far = get_farthest_point(defects, max_contour, get_centroid(max_contour))
cv2.circle(original_frame, far, 10, [0, 200, 255], -1)
if made_prediction:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(original_frame, 'Character written : ' + chr(prediction + 65), (10, 500), font, 4,
(255, 255, 255), 2, cv2.LINE_AA)
# To quit the drawing
if key & 0xFF == ord('q'):
break
cv2.imshow('frame', original_frame)
video.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| [
"priyanka@onlinedegree@iitm.ac.in"
] | priyanka@onlinedegree@iitm.ac.in |
5532be931e0b45443ac49288433d6dbcf3e6db85 | ad297efe52cb150997f6ee7828bb9fbd8512d60b | /scout/Miner/amazon_scraper.py | e2ef13a8379a6122cffd3765cc16b0aad7b9db73 | [] | no_license | happyxiaoxu/zonmine | 8b43f5bbb4dbadda324140bddd04aa603f7f51e8 | e016cbc77c4be05ffdcd74bd98f8da898956a419 | refs/heads/master | 2022-12-16T09:53:04.427931 | 2019-04-03T02:02:38 | 2019-04-03T02:02:38 | 179,192,426 | 1 | 0 | null | 2022-12-08T01:21:41 | 2019-04-03T02:11:17 | CSS | UTF-8 | Python | false | false | 13,415 | py | # from .config import Config
# from .product import Product
from scout.Miner.config import Config
from scout.Miner.product import Product
from scout.Miner.grepwords_api import KeywordsApi
import lxml.html
import re
# from fuzzywuzzy import fuzz
from difflib import SequenceMatcher
from urllib.parse import quote_plus
from collections import OrderedDict
import warnings
warnings.filterwarnings("ignore")
#####################################################################################
# Custom method to get first element or empty element by xpath
def xpath_first(self, elm_xpath):
elements = self.xpath(elm_xpath)
empty_elm = lxml.html.HtmlElement()
empty_elm.text = "-"
return next(iter(elements), empty_elm)
def xpath_get_text(self, elm_xpath):
# gets the first element and trims the text
element = self.xpath_first(elm_xpath)
return " ".join(element.text_content().split())
lxml.html.HtmlElement.xpath_first = xpath_first
lxml.html.HtmlElement.xpath_get_text = xpath_get_text
#####################################################################################
def check_similarity(a, b):
i = SequenceMatcher(None, a, b)
return i.ratio()
class AmazonScraper(object):
def __init__(self, scraper_obj):
# Scraper.__init__(self)
self.scraper_obj = scraper_obj
self.kw_api = KeywordsApi()
return None
#
def asin_search(self, product_obj):
asin = product_obj.asin
print("ASIN : {}".format(asin))
# https://www.amazon.com/dp/B075QDGZX9
asin_url = Config.amazon_asin_url.format(asin)
# asin_resp = self.session.get(asin_url)
asin_resp = self.scraper_obj.make_request(asin_url)
if asin_resp.status_code != 200:
print("Skipping ASIN Search as status code is: {}".format(asin_resp.status_code))
return product_obj
asin_xml = lxml.html.fromstring(asin_resp.content)
print(asin_resp)
print(asin_resp.url)
product_obj.asin = asin
product_obj.url = asin_url
# product_obj.title = asin_xml.xpath_get_text('//span[@id="productTitle"]')
product_obj.bread_crumbs = asin_xml.xpath_get_text('//div[contains(@class, "a-breadcrumb")]')
# brand = asin_xml.xpath_first('//a[@id="bylineInfo"] | //a[@id="brand"]').attrib.get("href")
# if brand:
# product_obj.brand = brand.split("/")[1]
product_obj.brand = asin_xml.xpath_get_text('//a[@id="bylineInfo"] | //a[@id="brand"]')
product_obj.description = asin_xml.xpath_get_text('//div[@id="productDescription" and contains(@class, "a-section")]//p')
# product_obj.weight = asin_xml.xpath_get_text('''
# //th[contains(text(), "Weight")]/following-sibling::td | //text()[contains(.,'Weight')]/ancestor::li
# ''')
product_obj.item_model_number = asin_xml.xpath_get_text('''
//*[contains(text(), "Item model number")]/following-sibling::td | //text()[contains(.,'Item model number')]/ancestor::li
''')
# product_obj.item_dimensions = asin_xml.xpath_get_text('''
# //th[contains(text(), "Product Dimensions")]/following-sibling::td | //th[contains(text(), "Item Dimensions")]/following-sibling::td
# ''')
# BSR Data
bsr_data = asin_xml.xpath_first('''
//*[contains(text(), "Best Sellers Rank")]/following-sibling::td
''').text.strip("(").strip().split("in") + ["-", "-"]
product_obj.bsr = bsr_data[0]
product_obj.bsr_category = bsr_data[1]
#
# Dimensions, in inches
# //th[contains(text(), "Dimensions")]/following-sibling::td | //text()[contains(.,'Dimensions')]/ancestor::li
dimensions_text = asin_xml.xpath_get_text('''
//*[contains(text(), "Dimensions")]/following-sibling::td | //text()[contains(.,'Dimensions')]/ancestor::li
''').split("inches")[0]
dimensions = re.findall(r"[-+]?\d*\.\d+|\d+", dimensions_text)
if dimensions:
dimensions = [float(i) for i in sorted(dimensions, reverse=True)]
dimensions = (dimensions + ["-", "-", "-"])[:3]
product_obj.item_dimensions_length = dimensions[0]
product_obj.item_dimensions_width = dimensions[1]
product_obj.item_dimensions_thickness = dimensions[2]
########
# Item Weight, convert ounces to pounds by dividing with 16
# Alt. Xpaths:
# //th[contains(text(), "Shipping Weight")]/following-sibling::td | //th[contains(text(), "Item Weight")]/following-sibling::td
# //*[contains(text(), "Item Weight")]/following-sibling::td | //*[contains(text(), "Shipping Weight")]/following-sibling::td
item_weight_text = asin_xml.xpath_get_text('''
//*[contains(text(), "Weight")]/following-sibling::td
''')
item_weight = re.search(r"[-+]?\d*\.\d+|\d+", item_weight_text)
if item_weight:
item_weight = item_weight.group()
if "ounce" in item_weight_text:
print("Converting ounce to pound")
item_weight = float(item_weight) / 16
product_obj.item_weight = item_weight
# Fullfillment
# print(result.xpath_first('.//span[contains(@class, "s-sponsored-info-icon")]').tag)
if asin_xml.xpath('//text()[contains(., "sold by Amazon")]'):
product_obj.is_amz = True
elif asin_xml.xpath('//text()[contains(., "Fulfilled by Amazon")]'):
product_obj.is_fba = True
else:
product_obj.is_fbm = True
############################
# /html/body//text()[matches(.,'test', 'i')]
product_obj.manufacturer = asin_xml.xpath_get_text('//th[contains(text(), "Manufacturer")]/following-sibling::td')
# product_obj.sold_by = asin_xml.xpath_get_text('//div[@id="merchant-info"]//a[1] | //div[@id="merchant-info"]')
# product_obj.sold_by = asin_xml.xpath_get_text('//*[@id="merchant-info"]//a[1] | //*[@id="merchant-info"]')
product_obj.sold_by = asin_xml.xpath_get_text('//*[@id="merchant-info"]//a')
# product_obj.in_stock = asin_xml.xpath_get_text('//div[@id="availability"]')
product_obj.in_stock = asin_xml.xpath_get_text('//*[@id="availability"]')
# Extracting Features
features_elm = asin_xml.xpath('//div[@id="feature-bullets"]//li//span[not(descendant:: a) and contains(@class, "a-list-item")]/text()') #not complete
features = [" ".join(each_feature.split()) for each_feature in features_elm] + ["-", "-", "-", "-", "-"]
features = features[:5]
product_obj.feature1 = features[0]
product_obj.feature2 = features[1]
product_obj.feature3 = features[2]
product_obj.feature4 = features[3]
product_obj.feature5 = features[4]
#####
sellers_count = re.search(r'\((.*?)\)', asin_xml.xpath_first('//div[@id="olp_feature_div"]').text_content())
if sellers_count:
product_obj.sellers_count = sellers_count.group(1)
print(product_obj.sellers_count)
#Extracting Images
# images_elm = asin_xml.xpath_first('//script[contains(text(), "ImageBlockATF") and not(contains(text(), "imageBlockATF"))]')[0]
thumb_images = asin_xml.xpath('//li[contains(@class,"item")]//span[contains(@class,"a-button-thumbnail")]//img/@src')
images = [re.sub(pattern = r'_.+_.', string=img, repl = "") for img in thumb_images] + ["-", "-", "-", "-", "-", "-", "-", "-"]
images = images[:7]
product_obj.image1 = images[0]
product_obj.image2 = images[1]
product_obj.image3 = images[2]
product_obj.image4 = images[3]
product_obj.image5 = images[4]
product_obj.image6 = images[5]
product_obj.image7 = images[6]
# product_obj.image_8 = images[7]
# Review Percentage
product_obj.five_star_percentage = asin_xml.xpath_get_text('//a[contains(@class, "5star histogram-review-count")]')
product_obj.four_star_percentage = asin_xml.xpath_get_text('//a[contains(@class, "4star histogram-review-count")]')
product_obj.three_star_percentage = asin_xml.xpath_get_text('//a[contains(@class, "3star histogram-review-count")]')
product_obj.two_star_percentage = asin_xml.xpath_get_text('//a[contains(@class, "2star histogram-review-count")]')
product_obj.one_star_percentage = asin_xml.xpath_get_text('//a[contains(@class, "1star histogram-review-count")]')
del([asin, asin_url, asin_resp, asin_xml, features_elm, thumb_images, images])
return product_obj
#
def keyword_search(self, keyword,job,db_handler):
job.refresh_from_db()
# print("Searching Amazon")
products = []
print(keyword)
print(quote_plus(keyword))
print("Current Keyword {}".format(keyword))
search_url = Config.amazon_search_url.format(quote_plus(keyword))
print(search_url)
search_resp = self.scraper_obj.make_request(search_url)
print(search_resp)
print(search_resp.url)
if search_resp.status_code != 200:
print("Skipping keywords as status code is: {}".format(search_resp.status_code))
return products
search_xml = lxml.html.fromstring(search_resp.content)
print(search_xml)
print('///////////////////')
kw_stats = self.kw_api.get_stats([keyword])
cpc = kw_stats[keyword][0]
monthly_search_volume = kw_stats[keyword][1]
competition = kw_stats[keyword][2]
# Edit xpath to include sponsored listings
# search_results = search_xml.xpath('//div[@id="resultsCol"]//li[not(.//h5) and contains(@class, "s-result-item")]')
# search_results = search_xml.xpath('//div[contains(@class, "s-result-list")]//div[@data-asin]')[:1]
search_results = search_xml.xpath('//div[contains(@class, "s-result-list")]//div[@data-asin]')
# search_results_1 = search_xml.xpath('//div[@id="resultsCol"]//li[@data-asin]//h2')
# search_results = search_xml.xpath('//div[contains(@class, "s-result-list")]//div[@data-asin]')
print(len(search_results))
# if not search_results:
# with open("test.html", "wb") as oo:
# oo.write(search_resp.content)
if not search_results:
print("No results found for keyword : {}".format(keyword))
return products
for result in search_results:
product_obj = Product()
url = result.xpath('.//span[contains(@class, "a-text-normal")]/parent::a/@href')
title = result.xpath_first('.//span[contains(@class, "a-text-normal")]//text() | .//img[contains(@class, "s-image")]//@alt')
print(title.encode())
asin = result.attrib.get("data-asin")
price = result.xpath_get_text('.//span[@class="a-offscreen"]')
primary_image = result.xpath_first('.//img[@srcset and @data-image-load]/@src')
review_count = result.xpath_first('.//a[contains(@href, "customerReviews")]').text_content().strip()
review_score = result.xpath_first('.//i[contains(@class,"a-icon-star")]//span').text_content().split(" ")[0]
is_prime = False
is_addon = False
is_sponsored = False
# print(result.xpath_first('.//span[contains(@class, "s-sponsored-info-icon")]').tag)
if result.xpath('.//i[contains(@class,"a-icon-prime")]'):
is_prime = True
if result.xpath('.//i[contains(@class,"a-icon-addon")]'):
is_addon = True
if result.xpath('.//div[@data-component-type="sp-sponsored-result"]'):
print("Inside is sponsored")
is_sponsored = True
#Updating Counter to rotate proxies
# Get details from ASIN
# self.update_counter()
product_obj.search_keyword = keyword
product_obj.search_rank = search_results.index(result) + 1
product_obj.url = url
product_obj.asin = asin
product_obj.title = title
product_obj.price = price
product_obj.primary_image = primary_image
product_obj.review_count = review_count
product_obj.review_score = review_score
product_obj.is_prime = is_prime
product_obj.is_addon = is_addon
product_obj.sponsored = is_sponsored
product_obj.cpc = cpc
product_obj.monthly_search_volume = monthly_search_volume
product_obj.competition = competition
print(is_sponsored)
# products.append(product_obj)
products.append(self.asin_search(product_obj))
db_handler.save_product(product_obj,job)
job.scout_results_counter += 1
job.save()
job.refresh_from_db()
del([product_obj])
# break
# with open("test.html", "wb") as oo:
# oo.write(search_resp.content)
return products | [
"happyxiaoxu@outlook.com"
] | happyxiaoxu@outlook.com |
3cb54764d75b5f50045410295526124deb9a2ad6 | eda865165f43083ea3281c0b55e8118147caa604 | /mysite/views.py | 654659a8306ec411683d4eaf29189841974542b2 | [] | no_license | MrPathak21/TextMagic | d86aa75c23881b2d28b1b3fb1efa9b708cf413c5 | 92b9a6950d7c36eedb48c6751f7e4dff75ea9f63 | refs/heads/main | 2023-05-08T19:02:02.339862 | 2021-05-28T21:01:11 | 2021-05-28T21:01:11 | 371,816,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,412 | py | # I have created this file - Shikhar
from django.http import HttpResponse
from django.shortcuts import render
'''
def index(request):
return HttpResponse('<a href = "http://127.0.0.1:8000/navigation"> Click here to go to navigation page</a>')
def about(request):
return HttpResponse('This is about page')
def navigation(request):
return HttpResponse('<h1>Navigation</h1><a href = "http://www.google.com"> Google</a> <a href = "http://www.facebook.com"> Facebook</a> <a href = "http://www.twitter.com">Twitter</a>')
'''
def index(request):
#para = {'name':'Shikhar','place':'Etawah'}
return render(request, 'index.html')
def analyze(request):
mytext = request.POST.get('text', 'default')
punc = request.POST.get('removepunc', 'off')
upper = request.POST.get('uppercase', 'off')
lineremove = request.POST.get('newlineremover', 'off')
spaceremover = request.POST.get('spaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
text = mytext
c = "Option Didn't selected "
cn = "Option Didn't selected "
if punc == "on":
mytext = removepunc(mytext)
if upper=="on":
mytext = uppercase(mytext)
if lineremove == 'on':
mytext = lineremover(mytext)
if spaceremover== 'on':
mytext = spacerem(mytext)
if charcount == 'on':
c = charcounter(text)
cn = charcounter(mytext)
params = {'purpose': 'Removed Punctuations', 'analyzedtext': mytext, 'charcountold': c, 'charcountnew': cn}
return render(request, 'analyze.html', params)
#return HttpResponse('''<h1> </h1> <a href = 'http://127.0.0.1:8000'>Back</a>''')
def about(request):
return render(request, 'AboutUs.html')
def contact(request):
return render(request, 'ContactUs.html')
def removepunc(s):
result = ''
p = '''`~!@#$%^&*()[{}]|:;"'<,>.?/'''
for i in s:
if i not in p:
result += i
return result
def uppercase(s):
return s.upper()
def lineremover(s):
result = ''
for i in s:
if i != '\n' and i != '\r':
result += i
return result
def spacerem(s):
result = ''
if s[0] != '':
result += s[0]
for i in range(1, len(s)):
if s[i] == ' ' and s[i-1] == ' ':
continue
result += s[i]
return result
def charcounter(s):
count = 0
for i in s:
count += 1
return count | [
"66026814+MrPathak21@users.noreply.github.com"
] | 66026814+MrPathak21@users.noreply.github.com |
72e036decfd9852b8b0e9d8170a5cb7d4a854ab6 | efd5d310c1a43335a70fbcb89a2480aa7aa1f423 | /goto.py | f3a86a748de3a5238c0a2cc5d452c397e6c070cc | [
"Unlicense"
] | permissive | tianjinghai1978/python-goto | aee690c59ef4e5fecedd742a7db8a44e76de5f52 | acbe736221d2238df3d09beab457d0bb19d05812 | refs/heads/master | 2023-03-19T08:27:22.300630 | 2019-12-15T12:48:22 | 2019-12-15T13:21:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,676 | py | import dis
import struct
import array
import types
import functools
try:
_array_to_bytes = array.array.tobytes
except AttributeError:
_array_to_bytes = array.array.tostring
class _Bytecode:
def __init__(self):
code = (lambda: x if x else y).__code__.co_code
opcode, oparg = struct.unpack_from('BB', code, 2)
# Starting with Python 3.6, the bytecode format has changed, using
# 16-bit words (8-bit opcode + 8-bit argument) for each instruction,
# as opposed to previously 24 bit (8-bit opcode + 16-bit argument)
# for instructions that expect an argument and otherwise 8 bit.
# https://bugs.python.org/issue26647
if dis.opname[opcode] == 'POP_JUMP_IF_FALSE':
self.argument = struct.Struct('B')
self.have_argument = 0
# As of Python 3.6, jump targets are still addressed by their
# byte unit. This is matter to change, so that jump targets,
# in the future might refer to code units (address in bytes / 2).
# https://bugs.python.org/issue26647
self.jump_unit = 8 // oparg
else:
self.argument = struct.Struct('<H')
self.have_argument = dis.HAVE_ARGUMENT
self.jump_unit = 1
@property
def argument_bits(self):
return self.argument.size * 8
_BYTECODE = _Bytecode()
def _make_code(code, codestring):
args = [
code.co_argcount, code.co_nlocals, code.co_stacksize,
code.co_flags, codestring, code.co_consts,
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars
]
try:
args.insert(1, code.co_kwonlyargcount) # PY3
except AttributeError:
pass
return types.CodeType(*args)
def _parse_instructions(code):
extended_arg = 0
extended_arg_offset = None
pos = 0
while pos < len(code):
offset = pos
if extended_arg_offset is not None:
offset = extended_arg_offset
opcode = struct.unpack_from('B', code, pos)[0]
pos += 1
oparg = None
if opcode >= _BYTECODE.have_argument:
oparg = extended_arg | _BYTECODE.argument.unpack_from(code, pos)[0]
pos += _BYTECODE.argument.size
if opcode == dis.EXTENDED_ARG:
extended_arg = oparg << _BYTECODE.argument_bits
extended_arg_offset = offset
continue
extended_arg = 0
extended_arg_offset = None
yield (dis.opname[opcode], oparg, offset)
def _get_instruction_size(opname, oparg=0):
size = 1
extended_arg = oparg >> _BYTECODE.argument_bits
if extended_arg != 0:
size += _get_instruction_size('EXTENDED_ARG', extended_arg)
oparg &= (1 << _BYTECODE.argument_bits) - 1
opcode = dis.opmap[opname]
if opcode >= _BYTECODE.have_argument:
size += _BYTECODE.argument.size
return size
def _get_instructions_size(ops):
size = 0
for op in ops:
if isinstance(op, str):
size += _get_instruction_size(op)
else:
size += _get_instruction_size(*op)
return size
def _write_instruction(buf, pos, opname, oparg=0):
extended_arg = oparg >> _BYTECODE.argument_bits
if extended_arg != 0:
pos = _write_instruction(buf, pos, 'EXTENDED_ARG', extended_arg)
oparg &= (1 << _BYTECODE.argument_bits) - 1
opcode = dis.opmap[opname]
buf[pos] = opcode
pos += 1
if opcode >= _BYTECODE.have_argument:
_BYTECODE.argument.pack_into(buf, pos, oparg)
pos += _BYTECODE.argument.size
return pos
def _write_instructions(buf, pos, ops):
for op in ops:
if isinstance(op, str):
pos = _write_instruction(buf, pos, op)
else:
pos = _write_instruction(buf, pos, *op)
return pos
def _find_labels_and_gotos(code):
labels = {}
gotos = []
block_stack = []
block_counter = 0
opname1 = oparg1 = offset1 = None
opname2 = oparg2 = offset2 = None
opname3 = oparg3 = offset3 = None
for opname4, oparg4, offset4 in _parse_instructions(code.co_code):
if opname1 in ('LOAD_GLOBAL', 'LOAD_NAME'):
if opname2 == 'LOAD_ATTR' and opname3 == 'POP_TOP':
name = code.co_names[oparg1]
if name == 'label':
if oparg2 in labels:
raise SyntaxError('Ambiguous label {0!r}'.format(
code.co_names[oparg2]
))
labels[oparg2] = (offset1,
offset4,
tuple(block_stack))
elif name == 'goto':
gotos.append((offset1,
offset4,
oparg2,
tuple(block_stack)))
elif opname1 in ('SETUP_LOOP',
'SETUP_EXCEPT', 'SETUP_FINALLY',
'SETUP_WITH', 'SETUP_ASYNC_WITH'):
block_counter += 1
block_stack.append(block_counter)
elif opname1 == 'POP_BLOCK' and block_stack:
block_stack.pop()
opname1, oparg1, offset1 = opname2, oparg2, offset2
opname2, oparg2, offset2 = opname3, oparg3, offset3
opname3, oparg3, offset3 = opname4, oparg4, offset4
return labels, gotos
def _inject_nop_sled(buf, pos, end):
while pos < end:
pos = _write_instruction(buf, pos, 'NOP')
def _patch_code(code):
labels, gotos = _find_labels_and_gotos(code)
buf = array.array('B', code.co_code)
for pos, end, _ in labels.values():
_inject_nop_sled(buf, pos, end)
for pos, end, label, origin_stack in gotos:
try:
_, target, target_stack = labels[label]
except KeyError:
raise SyntaxError('Unknown label {0!r}'.format(
code.co_names[label]
))
target_depth = len(target_stack)
if origin_stack[:target_depth] != target_stack:
raise SyntaxError('Jump into different block')
ops = []
for i in range(len(origin_stack) - target_depth):
ops.append('POP_BLOCK')
ops.append(('JUMP_ABSOLUTE', target // _BYTECODE.jump_unit))
if pos + _get_instructions_size(ops) > end:
# not enough space, add code at buffer end and jump there
buf_end = len(buf)
go_to_end_ops = [('JUMP_ABSOLUTE', buf_end // _BYTECODE.jump_unit)]
if pos + _get_instructions_size(go_to_end_ops) > end:
# not sure if reachable
raise SyntaxError('Goto in an incredibly huge function')
pos = _write_instructions(buf, pos, go_to_end_ops)
_inject_nop_sled(buf, pos, end)
buf.extend([0] * _get_instructions_size(ops))
_write_instructions(buf, buf_end, ops)
else:
pos = _write_instructions(buf, pos, ops)
_inject_nop_sled(buf, pos, end)
return _make_code(code, _array_to_bytes(buf))
def with_goto(func_or_code):
if isinstance(func_or_code, types.CodeType):
return _patch_code(func_or_code)
return functools.update_wrapper(
types.FunctionType(
_patch_code(func_or_code.__code__),
func_or_code.__globals__,
func_or_code.__name__,
func_or_code.__defaults__,
func_or_code.__closure__,
),
func_or_code
)
| [
"sebastian.noack@gmail.com"
] | sebastian.noack@gmail.com |
04a6497aa4fb2f2caa9082dd44f7be71664ea415 | 1f96e1101cf253a94efa824a0406d807fd93acc1 | /archive/MyBot_213.py | a4963ca3dc631aa58ab5ddfd54bfe1a6dc0445eb | [] | no_license | sp00/google_ai_bot | 228f48302e556271ed6714ebad9c0e449e992f25 | 1f3f77a00dc8289ad8e5ae9378b748fb099bc756 | refs/heads/master | 2020-12-25T00:49:59.968563 | 2010-11-30T03:14:16 | 2010-11-30T03:14:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,352 | py | from planetwars import BaseBot, Game
from planetwars.universe3 import Universe3
from planetwars.planet import Planet
from planetwars.player import PLAYER1, PLAYER2, NOBODY
from planetwars.planet2 import Planet2, getLogger
from planetwars.universe import player, Fleet
from logging import getLogger
import planetwars.planet
from math import ceil
from copy import copy
import random
HORIZON_FIRST = 40
HORIZON = 40
ATTACK_SCORE_THRESHOLD_FIRST = 0
ATTACK_SCORE_THRESHOLD = 140
ATTACK_SCORE_ENEMY_MULTIPLIER = 2
log = getLogger(__name__)
def zeros(rows,cols):
row = []
data = []
for i in range(cols):
row.append(0)
for i in range(rows):
data.append(row[:])
return data
# v = list of item values or profit
# w = list of item weight or cost
# W = max weight or max cost for the knapsack
def zeroOneKnapsack(v, w, W):
# c is the cost matrix
c = []
n = len(v)
c = zeros(n,W+1)
for i in range(0,n):
#for ever possible weight
for j in range(0,W+1):
#can we add this item to this?
if (w[i] > j):
c[i][j] = c[i-1][j]
else:
c[i][j] = max(c[i-1][j],v[i] +c[i-1][j-w[i]])
return [c[n-1][W], getUsedItems(w,c)]
# w = list of item weight or cost
# c = the cost matrix created by the dynamic programming solution
def getUsedItems(w,c):
# item count
i = len(c)-1
currentW = len(c[0])-1
# set everything to not marked
marked = []
for i in range(i+1):
marked.append(0)
while (i >= 0 and currentW >=0):
if (i==0 and c[i][currentW] >0 )or c[i][currentW] != c[i-1][currentW]:
marked[i] =1
currentW = currentW-w[i]
i = i-1
return marked
class Move(object):
def __init__(self, source, target, turn, ship_count):
self.source = source
self.target = target
self.turn = turn
self.ship_count = int(ship_count)
def __repr__(self):
return "Move from %s to %s at turn %s with %s ships" % (self.source, self.target, self.turn, self.ship_count)
class MyBot(BaseBot):
def __init__(self, universe):
self.universe = universe
self.scheduled_moves_at_turn= {}
def total_fleet_ship_count(self, owner):
return sum( [ fleet.ship_count for fleet in self.universe.find_fleets(owner) ] )
def get_neutrals_under_player_attack(self, player):
result = []
for planet in self.nobodies_planets:
if sum( [ 1 for fleet in planet.attacking_fleets if fleet.owner == player ] ) > 0:
result.append(planet)
return result
def get_available_ships_within_distance(self, planet_to_attack, player, distance):
result = 0
for planet in (list(self.universe.find_planets(player)) + self.get_neutrals_under_player_attack(player)):
if planet.id != planet_to_attack.id and planet.distance(planet_to_attack) <= distance and self.ships_needed[planet] == 0:
ships_avail = self.ships_available_at_turn[planet][distance-planet.distance(planet_to_attack)]
# if planet_to_attack.id == 0:
# log.info("get avail from %s = %s" % (planet, ships_avail))
result += ships_avail
return result
def get_attack_score(self, planet_to_attack, future_owner, distance):
turns = self.max_distance_between_planets - distance + HORIZON
attack_score = turns * planet_to_attack.growth_rate
if future_owner in player.ENEMIES:
attack_score *= ATTACK_SCORE_ENEMY_MULTIPLIER
return attack_score
def get_attack_score_200(self, planet_to_attack, future_owner, distance):
profit_turns = max(200 - self.current_turn - distance, 0)
attack_score = profit_turns * planet_to_attack.growth_rate
if future_owner in player.ENEMIES:
attack_score *= 2
return attack_score
def get_scheduled_fleets_to(self, planet):
result = []
for moves in self.scheduled_moves_at_turn.values():
for move in moves:
if move.target == planet:
distance = move.source.distance(move.target)
turns_remaining = distance + (move.turn - self.universe.game.turn_count)
fleet = Fleet(self.universe,random.randint(1,1000000),1, move.ship_count, move.source.id, move.target.id, distance, turns_remaining)
result.append(fleet)
return result
def get_scheduled_fleets_from(self, planet):
result = []
for moves in self.scheduled_moves_at_turn.values():
for move in moves:
if move.source == planet:
turns_remaining = move.turn - self.universe.game.turn_count
fleet = Fleet(self.universe,random.randint(1,1000000),1, move.ship_count, move.source.id, move.target.id, turns_remaining, turns_remaining)
result.append(fleet)
return result
def get_scheduled_fleets_shipcount_from_within_distance(self, planet, turns):
result = 0
for moves in self.scheduled_moves_at_turn.values():
for move in moves:
if move.source == planet:
turns_remaining = move.turn - self.universe.game.turn_count
if turns_remaining == turns:
result += move.ship_count
return result
def get_attack_ship_count_first_turn(self, planet_to_attack, my_home, enemy_home):
my_dist = my_home.distance(planet_to_attack)
enemy_dist = enemy_home.distance(planet_to_attack)
if my_dist < enemy_dist:
return planet_to_attack.ship_count+1
if my_dist == enemy_dist and planet_to_attack.ship_count <= planet_to_attack.growth_rate:
return planet_to_attack.ship_count+1
return 1000000
def closest_enemy_planet(self, p):
if len(self.enemy_planets) == 0:
return None
sorted_planets = sorted(self.enemy_planets, key=lambda ep : p.distance(ep) + ep.id/1000000.0)
return sorted_planets[0]
def closest_enemy_planet_distance(self, p):
return min((lambda ep:ep.distance(p))(ep) for ep in self.enemy_planets)
def my_fleets_attacking(self, planet):
return sum( [ 1 for fleet in planet.attacking_fleets if fleet.owner == player.ME] )
def closest_to_enemy_neutral_under_my_attack(self):
best_distance = 1000000
result_planet = None
for planet in self.nobodies_planets:
if self.my_fleets_attacking(planet) > 0:
distance = self.enemy_com.distance(planet)
if distance < best_distance:
best_distance = distance
result_planet = planet
return result_planet
def decrease_ships_available(self, planet, start_turn, ship_count):
for turn in range(start_turn, self.max_distance_between_planets + 21):
self.ships_available_at_turn[planet][turn] -= ship_count
def send_fleet(self, source, target, ship_count):
if source.owner == PLAYER1 and ship_count > 0 and ship_count <= source.ship_count:
source.send_fleet(target, ship_count)
else:
log.info("Error sending fleet from %s to %s with % ships" % (source, target, ship_count))
def doScheduled(self):
log.info("Scheduled move phase")
# execute delayed moves first
if self.scheduled_moves_at_turn.has_key(self.current_turn):
for move in self.scheduled_moves_at_turn[self.current_turn]:
#if move.ship_count <= move.source.ship_count and move.ship_count > 0 and move.source.owner == PLAYER1 and self.ships_available_at_turn[move.source][0] >= move.ship_count:
#if move.ship_count <= move.source.ship_count and move.ship_count > 0 and move.source.owner == PLAYER1 and move.source.ship_count >= move.ship_count:
if move.ship_count <= move.source.ship_count and move.ship_count > 0 and move.source.owner == PLAYER1 and move.source.ship_count >= move.ship_count and self.ships_available_at_turn[move.source][0] >= move.ship_count:
self.send_fleet(move.source, move.target, move.ship_count)
self.decrease_ships_available(move.source, 0, move.ship_count)
#self.cumulative_ships_sent += move.ship_count
#self.ships_available[move.source] -= move.ship_count
else:
log.info("Can't execute move: %s, ships avail: %s" % (move, self.ships_available_at_turn[move.source][0]))
del self.scheduled_moves_at_turn[self.current_turn]
def doPrep(self):
log.info("Prep phase")
if self.current_turn == 1:
self.my_home = list(self.my_planets)[0]
self.enemy_home = list(self.enemy_planets)[0]
self.max_distance_between_planets = 0
for p1 in self.all_planets:
for p2 in self.all_planets:
self.max_distance_between_planets = max(self.max_distance_between_planets, p1.distance(p2))
#log.info("Max distance: %s" % self.max_distance_between_planets)
# calculate current high level metrics
self.total_ships = {PLAYER1:0, PLAYER2:0}
self.total_growth_rate = {PLAYER1:0, PLAYER2:0}
self.ships_available_at_turn = {}
self.ships_needed = {}
self.ships_needed_at_turn = {}
self.ships_needed_timeline = {}
self.planet_timeline = {}
for planet in self.all_planets:
self.ships_available_at_turn[planet] = {}
scheduled_fleets_to_planet = self.get_scheduled_fleets_to(planet)
scheduled_fleets_from_planet = self.get_scheduled_fleets_from(planet)
self.planet_timeline[planet] = planet.in_future_timeline(self.max_distance_between_planets + 20, scheduled_fleets_to_planet, scheduled_fleets_from_planet)
need_help = False
# if planet.id == 7:
# log.info("timeline for %s: %s" % (planet, self.planet_timeline[planet]))
#log.info("attacking fleets by me: %s" % (self.universe.find_fleets(PLAYER1, destination=planet)))
prev_owner = planet.owner
for step in self.planet_timeline[planet]:
owner = step[0]
ship_count = step[1]
if owner != prev_owner and prev_owner == planet.owner and prev_owner != NOBODY and not need_help:
self.ships_needed[planet] = ship_count
self.ships_needed_at_turn[planet] = self.planet_timeline[planet].index(step) + 1
need_help = True
self.ships_needed_timeline[planet] = [ship_count]
#log.info("Planet %s needs help %s at %s" % (planet, ship_count, self.ships_needed_at_turn[planet]))
if need_help and owner == prev_owner:
delta = self.planet_timeline[planet].index(step) + 1 - self.ships_needed_at_turn[planet]
ships_needed_delta = ship_count - delta * 2 * planet.growth_rate
self.ships_needed_timeline[planet].append(ships_needed_delta)
prev_owner = owner
if not need_help:
self.ships_needed[planet] = 0
min_available = 1000000
step_index = len(self.planet_timeline[planet])
for step in reversed(self.planet_timeline[planet]):
ship_count = step[1]
min_available = min(min_available, ship_count)
if step[0] == NOBODY:
min_available = 0
if min_available < 0:
log.info("Negative min_available: %s for %s" % (min_available, planet))
min_available = 0
self.ships_available_at_turn[planet][step_index] = min_available
#log.info("avail for %s at %s: %s" % (planet, step_index, min_available))
step_index -= 1
self.ships_available_at_turn[planet][0] = max(0,min(planet.ship_count, self.ships_available_at_turn[planet][1] - planet.growth_rate))
else:
for step_index in range(0, len(self.planet_timeline[planet])+1):
self.ships_available_at_turn[planet][step_index] = 0
if planet.owner != NOBODY:
self.total_ships[planet.owner] += planet.ship_count
self.total_growth_rate[planet.owner] += planet.growth_rate
# if planet.id == 14:
# log.info("avail timeline for %s is: %s" % (planet, self.ships_available_at_turn[planet]))
self.total_ships[PLAYER1] += self.total_fleet_ship_count(PLAYER1)
self.total_ships[PLAYER2] += self.total_fleet_ship_count(PLAYER2)
for my_planet in [self.my_home]:
for enemy_planet in [self.enemy_home]:
# if self.ships_available_at_turn[enemy_planet][0] < self.ships_available_at_turn[my_planet][0]:
# continue
if my_planet.owner != PLAYER1 or enemy_planet.owner != PLAYER2:
continue
max_enemy_fleet = self.ships_available_at_turn[enemy_planet][0]
distance = my_planet.distance(enemy_planet)
ships_needed_for_safety = max_enemy_fleet-(self.planet_timeline[my_planet][distance-1][1] - my_planet.ship_count) - enemy_planet.growth_rate
#ships_needed_for_safety = max_enemy_fleet-(self.planet_timeline[my_planet][distance-1][1] - my_planet.ship_count)
if ships_needed_for_safety > (my_planet.ship_count - self.ships_available_at_turn[my_planet][0]):
deficit = ships_needed_for_safety - (my_planet.ship_count - self.ships_available_at_turn[my_planet][0])
#log.info("deficit for %s: %s, max enemy fleet %s" % (my_planet, deficit, max_enemy_fleet))
if deficit > self.ships_available_at_turn[my_planet][0]:
deficit = self.ships_available_at_turn[my_planet][0]
self.decrease_ships_available(my_planet, 0, deficit)
# calculate enemy's center of mass
weighted_x = 0
weighted_y = 0
div = 0
for planet in self.enemy_planets:
weighted_x += planet.position.x * (self.ships_available_at_turn[planet][0] + planet.growth_rate)
weighted_y += planet.position.y * (self.ships_available_at_turn[planet][0] + planet.growth_rate)
div += self.ships_available_at_turn[planet][0] + planet.growth_rate
if div == 0:
div = 1
self.enemy_com = Planet(self.universe, 666, weighted_x/div, weighted_y/div, 2, 0, 0)
# For every planet, and every turn, calculate how many ships each player can send to it
# TODO should we use ships_available_at_turn here?
self.max_aid_at_turn = {PLAYER1:{}, PLAYER2:{}}
for player in (PLAYER1 | PLAYER2):
source_planets = list(self.universe.find_planets(player)) + self.get_neutrals_under_player_attack(player)
for planet in self.all_planets:
self.max_aid_at_turn[player][planet] = {}
for turn in range(1, self.max_distance_between_planets+21):
max_aid = 0
for source_planet in source_planets:
if source_planet.id != planet.id and planet.distance(source_planet) < turn:
source_planet_time_step = self.planet_timeline[source_planet][turn - planet.distance(source_planet) - 1]
if (source_planet_time_step[0] == player):
#log.info("Max aid by %s for %s from %s at %s: %s" % (player.id, planet.id, source_planet.id, turn, source_planet_time_step[1]))
max_aid += source_planet_time_step[1]
else:
if source_planet.id != planet.id and planet.distance(source_planet) == turn:
if (source_planet.owner == player):
max_aid += source_planet.ship_count
self.max_aid_at_turn[player][planet][turn] = max_aid
#log.info("Max aid by %s for %s at %s: %s" % (player.id, planet.id, turn, self.max_aid_at_turn[player][planet][turn]))
log.info("MY STATUS: %s/%s" % (self.total_ships[PLAYER1], self.total_growth_rate[PLAYER1]))
log.info("ENEMY STATUS: %s/%s" % (self.total_ships[PLAYER2], self.total_growth_rate[PLAYER2]))
def doDefense(self):
log.info("Defense phase")
for planet_to_defend in sorted(self.my_planets, key=lambda p: p.growth_rate + p.id/1000000.0, reverse=True):
ships_to_send = self.ships_needed[planet_to_defend]
if ships_to_send <= 0:
continue
min_distance = self.max_distance_between_planets
max_distance = self.ships_needed_at_turn[planet_to_defend]
for my_planet in self.my_planets:
distance = my_planet.distance(planet_to_defend)
min_distance = min(min_distance, distance)
min_distance = max(min_distance, 1)
timeline = [elem for elem in self.ships_needed_timeline[planet_to_defend] if elem > 0]
ship_counts_to_attempt = sorted(list(set(timeline)), key=lambda p : p, reverse=True)
#log.info("evaluating defense for %s needed %s" % (planet_to_defend, ship_counts_to_attempt))
defended = False
avail_ships_within_distance = {}
for ships_to_send in ship_counts_to_attempt:
for distance in range(min_distance, max_distance+1):
# calculate if we can get enough ships from my planets to planet_to_defend within 'distance' turns
ships_avail_to_defend = 0
if avail_ships_within_distance.has_key((planet_to_defend, distance)):
ships_avail_to_defend = avail_ships_within_distance[(planet_to_defend, distance)]
else:
ships_avail_to_defend = self.get_available_ships_within_distance(planet_to_defend, PLAYER1, distance)
avail_ships_within_distance[(planet_to_defend, distance)] = ships_avail_to_defend
#log.info("Ships avail to defend %s within %s dist: %s" % (planet_to_defend, distance, ships_avail_to_defend))
if ships_avail_to_defend >= ships_to_send:
ships_left_to_send = ships_to_send
for source_planet in sorted(list(self.my_planets) + self.get_neutrals_under_player_attack(PLAYER1), key=lambda p : p.distance(planet_to_defend) + p.id/1000000.0):
if self.ships_needed[source_planet] > 0:
continue
#log.info("evaluating for D: %s" % (source_planet))
current_distance = source_planet.distance(planet_to_defend)
ships_avail = self.ships_available_at_turn[source_planet][distance-current_distance]
if source_planet.id != planet_to_defend.id and ships_avail > 0:
#log.info("Ships avail from %s: %s at dist %s, dist = %s" % (source_planet, ships_avail, current_distance, distance))
ships_to_send = min(ships_left_to_send, ships_avail)
if current_distance == distance:
#log.info("defending avail from %s: %s at dist %s" % (source_planet, ships_to_send, current_distance))
self.send_fleet(source_planet, planet_to_defend, ships_to_send)
#self.cumulative_ships_sent += ships_to_send
if current_distance < distance:
future_turn = self.current_turn + (distance - current_distance)
future_move = Move(source_planet, planet_to_defend, future_turn, ships_to_send)
log.info("Scheduled move: %s" % future_move)
if not self.scheduled_moves_at_turn.has_key(future_turn):
self.scheduled_moves_at_turn[future_turn] = []
self.scheduled_moves_at_turn[future_turn].append(future_move)
ships_left_to_send -= ships_to_send
self.decrease_ships_available(source_planet, 0, ships_to_send)
if ships_left_to_send == 0:
defended = True
break
if defended:
break
if defended:
break
def doFirstTurnOffense(self):
candidates = []
candidate_map = {}
home_planet_distance = self.my_home.distance(self.enemy_home)
ships_available = min(self.my_home.ship_count, self.my_home.growth_rate * (home_planet_distance+0))
i = 0
max_attack_distance=0
for p in sorted(self.nobodies_planets, key=lambda p : self.get_attack_ship_count_first_turn(p, self.my_home, self.enemy_home) + p.id/1000000.0):
if p.distance(self.my_home) < p.distance(self.enemy_home) or p.distance(self.my_home) == p.distance(self.enemy_home):
if p.distance(self.my_home) == p.distance(self.enemy_home) and p.ship_count > 10:
continue
candidates.append(p)
candidate_map[i] = p
max_attack_distance = max(max_attack_distance, p.distance(self.my_home))
i += 1
weights = []
profits = []
for c in candidates:
weight = self.get_attack_ship_count_first_turn(c, self.my_home, self.enemy_home)
attack_score = (self.max_distance_between_planets - c.distance(self.my_home) + HORIZON_FIRST) * c.growth_rate - (weight - 1)
if attack_score < ATTACK_SCORE_THRESHOLD_FIRST:
attack_score = 0
weights.append(weight)
profits.append(attack_score)
#log.info("candidate %s: score %s, weight %s" % (c, attack_score, weight))
best_planets_to_attack = zeroOneKnapsack(profits,weights,ships_available)
#log.info("best planets: %s, ships_avail: %s" % (best_planets_to_attack,ships_available))
sorted_moves = []
for i in range(len(best_planets_to_attack[1])):
if (best_planets_to_attack[1][i] != 0):
planet_to_attack = candidate_map[i]
self.send_fleet(self.my_home, planet_to_attack, planet_to_attack.ship_count+1)
def doOffense(self):
log.info("Offense phase")
if self.current_turn == 1:
self.doFirstTurnOffense()
return
planets_attacked = []
best_planet_to_attack = None
while True:
best_planet_to_attack = None
best_planet_to_attack_score = 0
best_planet_to_attack_distance = 0
best_planet_to_attack_ships_to_send = 0
for planet_to_attack in self.all_planets:
if planet_to_attack in planets_attacked:
continue
min_distance = self.max_distance_between_planets
max_distance = 0
for my_planet in self.my_planets:
distance = my_planet.distance(planet_to_attack)
min_distance = min(min_distance, distance)
max_distance = max(max_distance, distance)
for fleet in self.universe.find_fleets(owner=PLAYER2, destination=planet_to_attack):
max_distance = max(max_distance, fleet.turns_remaining)
#log.info("Max distance for %s: %s" % (planet_to_attack, max_distance))
min_distance = max(min_distance, 1)
for distance in range(min_distance, max_distance+1):
# calculate how many ships we need to get from my planets to planet_to_attack within 'distance' turns
planet_to_attack_future = self.planet_timeline[planet_to_attack][distance-1]
planet_to_attack_future_owner = planet_to_attack_future[0]
if planet_to_attack_future_owner == PLAYER1:
break
cost_to_conquer = 0 if planet_to_attack_future_owner == PLAYER2 else -1
time_to_profit = 0
if planet_to_attack_future_owner == player.NOBODY:
cost_to_conquer = planet_to_attack_future[1]
time_to_profit = int(ceil((cost_to_conquer+0.001)/planet_to_attack.growth_rate)) if planet_to_attack.growth_rate > 0 else 1000000
if planet_to_attack_future_owner == NOBODY and self.enemy_com.distance(planet_to_attack) < distance:
break
#log.info("Time to profit for %s is %s" % (planet_to_attack, time_to_profit))
# if (distance+time_to_profit) >= self.max_distance_between_planets:
# break
can_hold = True
for turn in range(distance, min(distance+time_to_profit+1, self.max_distance_between_planets + 20)):
enemy_max_aid = self.max_aid_at_turn[PLAYER2][planet_to_attack][turn]
if planet_to_attack_future_owner == player.PLAYER2:
enemy_max_aid += self.planet_timeline[planet_to_attack][turn+time_to_profit-1][1]
my_max_aid = self.max_aid_at_turn[PLAYER1][planet_to_attack][turn] - cost_to_conquer + planet_to_attack.growth_rate * (turn-distance) - self.cumulative_ships_sent
if enemy_max_aid > my_max_aid:
can_hold = False
#log.info("can't hold %s at turn %s, enemy %s, me %s" % (planet_to_attack, turn, enemy_max_aid, my_max_aid))
break
if not can_hold:
continue
simulation_distance = min(distance+time_to_profit, self.max_distance_between_planets + 20)
if simulation_distance <= 0:
continue
enemy_max_aid = self.max_aid_at_turn[PLAYER2][planet_to_attack][simulation_distance]
if planet_to_attack_future_owner == player.PLAYER2:
enemy_max_aid += self.planet_timeline[planet_to_attack][simulation_distance-1][1]
my_max_aid = self.max_aid_at_turn[PLAYER1][planet_to_attack][simulation_distance] - (cost_to_conquer + 1) - self.cumulative_ships_sent if planet_to_attack_future_owner == NOBODY else 0
ships_to_send = cost_to_conquer + max(enemy_max_aid - my_max_aid, 0) + 1
#log.info("aids for %s at distance %s: enemy %s , me %s, cost %s" % (planet_to_attack, distance, enemy_max_aid, my_max_aid, cost_to_conquer))
# calculate if we can get enough ships from my planets to planet_to_attack within 'distance' turns
ships_avail_to_attack = self.get_available_ships_within_distance(planet_to_attack, PLAYER1, distance)
#log.info("avail to attack: %s, need to send %s" % (ships_avail_to_attack, ships_to_send))
if ships_avail_to_attack >= ships_to_send:
if self.planet_timeline[planet_to_attack][distance-1][0] in player.ENEMIES and self.planet_timeline[planet_to_attack][distance-2][0] == player.NOBODY:
continue
attack_score = self.get_attack_score(planet_to_attack, planet_to_attack_future_owner, distance)
log.info("Attack score of %s at dist %s is: %s - %s ships, cost %s" % (planet_to_attack, distance, attack_score, ships_to_send, cost_to_conquer))
if planet_to_attack_future_owner in player.ENEMIES or (attack_score-cost_to_conquer) >= ATTACK_SCORE_THRESHOLD:
if attack_score > best_planet_to_attack_score:
best_planet_to_attack_score = attack_score
best_planet_to_attack = planet_to_attack
best_planet_to_attack_distance = distance
best_planet_to_attack_ships_to_send = ships_to_send
break
if best_planet_to_attack is None:
return
log.info("Best planet to attack: %s at dist %s with score %s" % (best_planet_to_attack, best_planet_to_attack_distance, best_planet_to_attack_score))
ships_left_to_send = best_planet_to_attack_ships_to_send
source_planets = list(self.my_planets) + self.get_neutrals_under_player_attack(PLAYER1)
for source_planet in sorted(source_planets, key=lambda p : p.distance(best_planet_to_attack) + p.id/1000000.0):
distance = source_planet.distance(best_planet_to_attack)
if distance > best_planet_to_attack_distance:
continue
ships_avail = self.ships_available_at_turn[source_planet][best_planet_to_attack_distance-distance]
#log.info("ships avail to attack from %s at dist %s: %s" % (source_planet, best_planet_to_attack_distance-distance, ships_avail))
if self.ships_needed[source_planet] > 0:
ships_avail = 0
if source_planet.id != best_planet_to_attack.id and ships_avail > 0:
ships_to_send = min(ships_left_to_send, ships_avail)
#log.info("ships to send from %s: %s" % (source_planet, ships_to_send))
if distance == best_planet_to_attack_distance and source_planet.owner == PLAYER1:
self.send_fleet(source_planet, best_planet_to_attack, ships_to_send)
#self.cumulative_ships_sent += ships_to_send
if distance < best_planet_to_attack_distance:
future_turn = self.current_turn + (best_planet_to_attack_distance - distance)
future_move = Move(source_planet, best_planet_to_attack, future_turn, ships_to_send)
log.info("Scheduled move: %s" % future_move)
if not self.scheduled_moves_at_turn.has_key(future_turn):
self.scheduled_moves_at_turn[future_turn] = []
self.scheduled_moves_at_turn[future_turn].append(future_move)
ships_left_to_send -= ships_to_send
self.decrease_ships_available(source_planet, 0, ships_to_send)
if ships_left_to_send == 0:
break
planets_attacked.append(best_planet_to_attack)
def doPostOffense2(self):
log.info("Post-Offense phase")
if len(self.enemy_planets) == 0:
return
planets_to_send_to = copy(self.my_planets)
neutral_candidate = self.closest_to_enemy_neutral_under_my_attack()
if neutral_candidate is not None:
planets_to_send_to = planets_to_send_to | neutral_candidate
for source_planet in self.my_planets:
closest_enemy_planet = self.closest_enemy_planet(source_planet)
#log.info("Eval Post-Offense for %s: closest enemy is %s" % (source_planet, closest_enemy_planet))
min_distance_to_enemy = 1000000
dest_planet = None
for planet_to_send_to in sorted(planets_to_send_to, key=lambda p : p.id if p.id != source_planet.id else 1000000):
if source_planet.distance(planet_to_send_to) < source_planet.distance(closest_enemy_planet) \
and planet_to_send_to.distance(closest_enemy_planet) < min_distance_to_enemy:
min_distance_to_enemy = planet_to_send_to.distance(closest_enemy_planet)
dest_planet = planet_to_send_to
if dest_planet is not None and source_planet.id != dest_planet.id and self.ships_available_at_turn[source_planet][0] > 0:
ships_to_send = min(self.ships_available_at_turn[source_planet][0], source_planet.ship_count)
self.send_fleet(source_planet, dest_planet, ships_to_send)
self.decrease_ships_available(source_planet, 0, ships_to_send)
def doPostOffense(self):
log.info("Post-Offense phase")
if len(self.enemy_planets) == 0:
return
planets_to_send_to = copy(self.my_planets)
neutral_candidate = self.closest_to_enemy_neutral_under_my_attack()
if neutral_candidate is not None:
planets_to_send_to = planets_to_send_to | neutral_candidate
# cache closest and com enemy planet distances
closest_enemy_planet_distance_map = {}
com_enemy_planet_distance_map = {}
for planet in planets_to_send_to:
closest_enemy_planet_distance_map[planet] = self.closest_enemy_planet_distance(planet)
com_enemy_planet_distance_map[planet] = self.enemy_com.distance(planet)
my_nearest_to_enemy_planets = sorted(planets_to_send_to, key=lambda p : p.distance(self.enemy_com) + p.id/1000000.0)
for source_planet in self.my_planets:
if self.ships_needed[source_planet] == 0 and self.ships_available_at_turn[source_planet][0] > 0:
#log.info("Post-Offense for %s" % source_planet)
for dest_planet in my_nearest_to_enemy_planets:
distance = source_planet.distance(dest_planet)
if distance > 0 and distance < com_enemy_planet_distance_map[source_planet]:
if com_enemy_planet_distance_map[dest_planet] < com_enemy_planet_distance_map[source_planet] and \
closest_enemy_planet_distance_map[dest_planet] <= closest_enemy_planet_distance_map[source_planet]:
self.send_fleet(source_planet, dest_planet, self.ships_available_at_turn[source_planet][0])
self.decrease_ships_available(source_planet, 0, self.ships_available_at_turn[source_planet][0])
break
def do_turn(self):
self.all_planets = self.universe.all_planets
self.my_planets = self.universe.my_planets
self.enemy_planets = self.universe.enemy_planets
self.nobodies_planets = self.universe.nobodies_planets
self.not_my_planets = self.universe.not_my_planets
self.current_turn = self.universe.game.turn_count
if len(self.my_planets) == 0:
return
self.cumulative_ships_sent = 0
self.doPrep()
self.doScheduled()
self.doDefense()
self.doOffense()
self.doPostOffense2()
Game(MyBot, universe_class=Universe3, planet_class=Planet2)
| [
"apinkin@apinkin-lappy.(none)"
] | apinkin@apinkin-lappy.(none) |
61181ee6c9b296d449fba0d588a715d3307c4d76 | 5fca0fc6fcada38c91227ee8b10bfe5ae39d374d | /manage.py | c2404b6994bc86aba007914f31ff62ad12e5e8b2 | [] | no_license | Lex3528/Polling | 4d1712993afa8a2a0516f524862455900135bf3a | bdfa8da16367de518162ca87d8d9aa14951b2368 | refs/heads/master | 2022-12-16T04:51:46.528666 | 2020-09-24T18:13:39 | 2020-09-24T18:13:39 | 298,347,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apps_conf.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"mikhaylov611@mail.ru"
] | mikhaylov611@mail.ru |
18b774f735198bcefe1ca5a4e8ffe56b83d8dfd2 | 120622dd09db3aa677e0c0100ea6380b04883aa7 | /.envPy3/lib/python3.6/site-packages/pip/_vendor/distlib/wheel.py | 9a09509ac907b10ca6b57882f7e41d100c2bf407 | [] | no_license | jeffonmac/MacGyver | bf72beacb14a5ddc2aed5787b616d85871b7f3ba | 9b9db6b79164bad7874ba134695174792e2d9a28 | refs/heads/master | 2021-01-18T13:26:01.335442 | 2017-09-13T13:35:37 | 2017-09-13T13:35:37 | 100,375,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,119 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy for sorting
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
records.sort()
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class_mod:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| [
"cjoffrey@gmail.com"
] | cjoffrey@gmail.com |
2961d204760a2520fcea0110dbca3da1a4547114 | 19e263ce69087e386b5bb92a534f69dc39f46877 | /main.py | d961541aca3d03c63940ce20ef6e66755e29c7f0 | [] | no_license | Jyuukun/PS5-Alerts | 055b855bde75540b8534d7c713c0edf04dbcb807 | f13deb7353f11ea486c204e52e7e4a38a1544161 | refs/heads/main | 2023-04-04T16:51:57.826709 | 2020-11-26T15:44:20 | 2021-04-12T11:36:30 | 316,273,706 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,507 | py | # -*- coding: utf-8 -*-
import logging
import os
import sys
import signal
import smtplib
import tempfile
import time
from configparser import ConfigParser
from weboob.tools.log import createColoredFormatter, getLogger
from amazon import AmazonBrowser
from auchan import AuchanBrowser
from boulanger import BoulangerBrowser
from cdiscount import CdiscountBrowser
from carrefour import CarrefourBrowser
from cultura import CulturaBrowser
from darty import DartyBrowser
from fnac import FnacBrowser
from leclerc import LeclercBrowser
from micromania import MicromaniaBrowser
def get_config():
config = ConfigParser()
config.read(os.path.dirname(os.path.abspath(sys.argv[0])) + '/config')
return config
def send_mail(subject, text):
config = get_config()
try:
with smtplib.SMTP('smtp.gmail.com', 587) as server:
server.starttls()
server.login(
self.config['mail']['login'], self.config['mail']['password']
)
message = 'Subject: {}\n\n{}'.format(subject, text)
server.sendmail(
self.config['mail']['sender'], self.config['mail']['receiver'],
message.encode('utf-8')
)
except Exception as e:
logger = getLogger('send-mail')
logger.error("Something went wrong while sending mail : %s" % str(e))
def create_colored_handler():
# stderr logger
format = '%(asctime)s:%(levelname)s:%(lineno)d:%(funcName)s %(message)s'
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(createColoredFormatter(sys.stderr, format))
return handler
def signal_handler(signal, frame):
sys.exit(0)
def main():
signal.signal(signal.SIGINT, signal_handler)
# create colored logger
logging.root.setLevel(logging.DEBUG)
logging.root.addHandler(create_colored_handler())
logger = getLogger('ps5-availability')
# create temporary directory
responses_dirname = tempfile.mkdtemp(prefix='ps5_availability_session_')
logger.info('Debug data will be saved in this directory: %s' % responses_dirname)
browsers = (
AmazonBrowser, AuchanBrowser, BoulangerBrowser, CarrefourBrowser, CdiscountBrowser,
CulturaBrowser, DartyBrowser, FnacBrowser, LeclercBrowser, MicromaniaBrowser,
)
while True:
waiting_time = 60 * 30
for browser in browsers:
browser = browser(logger=logger, responses_dirname=responses_dirname)
logger.warning("Now trying on %s" % browser.BASEURL)
try:
is_available = browser.is_available
except Exception as e:
logger.error("Something went wrong : %s" % str(e))
continue
if is_available:
logger.warning("Playstation 5 is AVAILABLE !!")
send_mail(
"Playstation 5 Available !",
"Vite mec ! La PS5 est disponible sur %s ! Va dépenser toute ta tune ! :')" % browser.BASEURL
)
# we found one, no need to check to much now
waiting_time = 60 * 60
break
else:
logger.warning("Playstation 5 is not available on %s, so sad. :(" % browser.BASEURL)
else:
logger.critical("No PS5 found on any providers")
logger.critical("waiting %s seconds to check again" % waiting_time)
time.sleep(waiting_time)
if __name__ == '__main__':
main()
| [
"mail@elambert.me"
] | mail@elambert.me |
c72d9299bc10665a4db3242dbdca70d84cf13520 | 68ea05d0d276441cb2d1e39c620d5991e0211b94 | /2714.py | c816933a2eed56ec8282d45061a5d42bbd7766f2 | [] | no_license | mcavalca/uri-python | 286bc43aa157d3a6880dc222e0136c80cf079565 | e22875d2609fe7e215f9f3ed3ca73a1bc2cf67be | refs/heads/master | 2021-11-23T08:35:17.614443 | 2021-10-05T13:26:03 | 2021-10-05T13:26:03 | 131,339,175 | 50 | 27 | null | 2021-11-22T12:21:59 | 2018-04-27T19:54:09 | Python | UTF-8 | Python | false | false | 221 | py | n = int(input())
while n > 0:
n -= 1
ra = input()
saida = 'INVALID DATA'
if len(ra) == 20:
if ra[0:2] == 'RA':
if ra[2:].isdigit():
saida = int(ra[2:])
print(saida)
| [
"m.cavalca@hotmail.com"
] | m.cavalca@hotmail.com |
c22cd593f5f83ae3732d104ca10c62e681b4363f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_159/609.py | 1d4cb7959af1112bc540d578dcf82f9dfd5fc3ae | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | f = open('A-large.in')
#f = open('test.in')
count = int(f.readline())
output = ''
for x in xrange(1, count + 1):
platesCount = int(f.readline())
arr = f.readline().split()
case1 = 0
case2 = 0
case2MaxGap = 0
for i in xrange(0, platesCount - 1):
curPlate = int(arr[i])
nextPlate = int(arr[i+1])
gap = curPlate - nextPlate
case2MaxGap = max(case2MaxGap, gap)
if gap > 0:
case1 += gap
for j in xrange(0, platesCount - 1):
curPlate = int(arr[j])
if curPlate < case2MaxGap:
case2 += curPlate
else:
case2 += case2MaxGap
output += 'Case #' + str(x) + ': ' + str(case1) + ' ' + str(case2) + '\n'
print(output)
newf = open('output.txt','w')
newf.write(output)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
ea09bbbb1e03af80da8927d9c004f5b2689e5ce9 | 31abd09a08ebbec64951b7724fb94b178546bfd0 | /Classifier/binary_classificationTree.py | 287452ddfad30ccab4cb3b759930b3a3cb1bbde9 | [] | no_license | juancho618/TAI_Project | 9839ab014a14cc5c030217f12e72ab164eada297 | 63d55659cc1e4e7f36461762e99aaaa5f4b2b500 | refs/heads/master | 2021-01-23T02:18:58.628859 | 2017-05-31T07:27:33 | 2017-05-31T07:27:33 | 92,917,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,551 | py | import pandas as pd
import matplotlib.pyplot as plt # plot library
from sklearn import tree
from dataProcessing import * # dummy encoding
from sklearn.preprocessing import OneHotEncoder # best codification for categorical data
df = pd.read_csv('../finalDS.csv', header=0)
# print df # preprocess dataset with original values
# Convert all the nominal values to integers (dummy version)
df2 = encodeColumnDummy(df,0) # changing health insurance
df2 = encodeColumnDummy(df2, 2) # changing Diagnosis
df2 = encodeColumnDummy(df2, 3) # changing Speciality
df2 = encodeColumnDummy(df2, 4) # changing Gender
df2 = encodeColumnDummy(df2, 5) # changing Day
# ------- functional way to get the data --------
x = df2.iloc[:,:6] # data with the attributes
y = df2.iloc[:,6] # results
# dataset spliter
from sklearn.model_selection import train_test_split # import split and test functionality
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size = .8)
# tree classifier algorithm
clf = tree.DecisionTreeClassifier() # calling the decision tree clasifier
# Naive Bayes classifier algorithm
from sklearn.naive_bayes import MultinomialNB # import gaussian classi
nb_clf = MultinomialNB()
# --- Trying one hot encoder ------
enc = OneHotEncoder(categorical_features =[0, 2, 3, 4, 5]) # One Hot encoder Specifying the categorical attributes.
enc.fit(x) #fit the encoder to the data
clf.fit(enc.transform(x_train), y_train) # create the learninf instance
nb_clf.fit(enc.transform(x_train), y_train) # Nive Bayes - Multinomial model
# prediction
predictions = clf.predict(enc.transform(x_test))
prediction_NB = nb_clf.predict(enc.transform(x_test))
# import the result metrics
from sklearn import metrics
print("Tree Classifier Precision", metrics.precision_score(y_test, predictions))
print("Tree Classifier Recall", metrics.recall_score(y_test, predictions))
print("Tree Classifier Beta Score 1", metrics.f1_score(y_test, predictions))
print("Tree Classifier Beta Score 0.5", metrics.fbeta_score(y_test, predictions, beta=0.5))
print("Tree Classifier Beta Score 2", metrics.fbeta_score(y_test, predictions, beta=2))
print("Naive Bayes Classifier Precision", metrics.precision_score(y_test,prediction_NB ))
print("Naive Bayes Recall", metrics.recall_score(y_test, prediction_NB))
print("Naive Bayes Beta Score 1", metrics.f1_score(y_test, prediction_NB))
print("Naive Bayes Beta Score 0.5", metrics.fbeta_score(y_test, prediction_NB, beta=0.5))
print("Naive Bayes Beta Score 2", metrics.fbeta_score(y_test, prediction_NB, beta=2)) | [
"jjsorianoe@gmail.com"
] | jjsorianoe@gmail.com |
f725e3ee7f1631ff7c08050264fac3d3468ddd14 | 859951d9fc8ec1377c9fdadbe5e21128879d89b0 | /unit21/a214_simple_window1_AC.py | 288aa54aa9b7e8dd1c0ab955b32a339df182ac98 | [] | no_license | Axis-Denied/csp_files | d61ff240306de8114e40e030b92582b962850b73 | 579a57e56bc616f42eac529de106f603aebc427a | refs/heads/master | 2020-12-06T08:01:10.765190 | 2020-01-09T19:13:03 | 2020-01-09T19:13:03 | 232,401,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | ##############################################################################
# a113_TR_simple_window1.py
# Example solution: Change its size to 200 by 100 pixels.
##############################################################################
import tkinter as tk
# main window
root = tk.Tk()
root.wm_geometry("200x100")
root.mainloop() | [
"cokera01@student.psdr3.org"
] | cokera01@student.psdr3.org |
1116093189d5bbdecb2913592b931a0012e07283 | 4501c310488b7d019098858e8577b020b6346d79 | /Code/PythagoreGUI.spec | 0e1160a49ef72111528452fa78fd7e07d45c8cdb | [] | no_license | nathanbegin/Pythaface | 13d4b030fd9809dd59715f1b0a6cb79db499e012 | 0660421f62b67653a833f31d0fcd8864de7508ae | refs/heads/master | 2020-04-06T04:27:56.778429 | 2017-02-23T01:35:50 | 2017-02-23T01:35:50 | 82,733,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['PythagoreGUI.py'],
pathex=['C:\\Users\\natha\\Desktop\\Pythagore\\Code'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='PythagoreGUI',
debug=False,
strip=False,
upx=True,
console=False , icon='Logo.ico')
| [
"nathanbegin@gmail.com"
] | nathanbegin@gmail.com |
afbade6cf7187497d35c8ebf245bc2caf3732775 | c0e41a88ff45bdb7bbdc8e4276427b276eec5d13 | /kyo/python/1_function/1_args.py | a93d7b955d358025108b2595479cdd21473a90fb | [] | no_license | sunzhqiiang888/KingSun | 7440748825ed72c411f8ff63c478ad412efa51ac | 7473cc30d398f1ae96cbc7175198052d518fef94 | refs/heads/master | 2020-03-08T08:17:17.284210 | 2018-04-15T03:15:25 | 2018-04-15T03:15:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | #!/usr/bin/env python3
def test(name='未知', age=17, sex='男', phone="13602578723"):
print("学生信息: %s(%d) %s %s" % (name, age, sex, phone))
def testV(name, *args, phone="13877777777", **kwargs):
# print(args, type(args), kwargs, type(kwargs))
print(name, args, phone, kwargs)
if __name__ == "__main__":
def testArgs():
test()
test("李四")
test("张三", 23)
test("王二", 17, '女')
test("马六", 19, '未知', '13877669900')
test("李小四", phone='110')
test(phone='119')
def testVargs():
# testV()
testV("李四")
testV("张三", 23, en=34, cn=56)
testV("王二", 17, '女')
testV("马六", 19, '未知', '13877669900')
testV("李小四", phone='110')
testV("王五", phone='119')
testVargs()
| [
"354340684@qq.com"
] | 354340684@qq.com |
788ffc9f63b276da6cc4ebcab3f6f95eb32e6321 | 81384c792f7c8d85e08318abb0b79b1ae8768718 | /controllers/draft.py | 29ddfaecee941213b208aec42a7aac05f804e5a8 | [
"LicenseRef-scancode-public-domain"
] | permissive | drewbeller/team_drafter | b2037afed29268b253a4a03a6dd6122396ad89ae | 152f421d947c6c33442261f229dac4535b1b7bc2 | refs/heads/master | 2021-01-01T17:17:20.900147 | 2015-08-16T20:52:59 | 2015-08-16T20:52:59 | 23,207,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,589 | py | # coding: utf8
import ast
def generate_picks():
# load draft parameters
draft_order = db(db.draft_parameter.draft_parameter == 'Draft Order').select().first()['draft_parameter_value']
rounds = int(db(db.draft_parameter.draft_parameter == 'Rounds').select().first()['draft_parameter_value'])
# pick_times = db(db.draft_parameter.draft_parameter == 'Pick Time').select().first()['draft_parameter_value']
# pick_times = ast.literal_eval(pick_times)
# select all teams and add to dict with id and order
teams = db(db.team).select(orderby=db.team.team_order)
db(db.pick).delete()
pick_num = 0
for round in range(rounds):
round_num = round + 1 # round starts at zero
if (round_num % 2 == 0) and (draft_order == 'snake'):
teams = teams.sort(lambda team: team.team_order, reverse=True)
else:
teams = teams.sort(lambda team: team.team_order)
print round_num
for team in teams:
pick_num = pick_num + 1
pick_id = db.pick.insert(pick_num=pick_num, pick_round=round_num, pick_team=team)
session.flash = "Draft Board is ready for %s teams and %s rounds!" % (len(teams), rounds)
redirect(URL('appadmin', 'manage/league')) ##table-pick
def manage_draft():
#picks = db(db.pick).select(orderby=db.pick.id)
fields = [db.pick.id, db.pick.pick_num, db.pick.pick_round, db.pick.pick_team, db.pick.pick_owner, db.pick.pick_player,
db.player.player_first_name, db.player.player_last_name, db.player.player_position, db.player.player_pro_team]
picks = db(db.pick).select(*fields, left=[db.player.on(db.pick.pick_player == db.player.id)])
teams = db(db.team).select(orderby=db.team.team_order)
rounds = int(db(db.draft_parameter.draft_parameter == 'Rounds').select().first()['draft_parameter_value'])
if request.get_vars['pick'] is None:
request.get_vars['pick'] = 1
pick_id = db(db.pick.pick_num == request.get_vars['pick']).select().first().id
form=crud.update(db.pick, pick_id, deletable=False)
if form.accepts(request.vars, session):
redirect(URL('manage_draft', vars=dict(pick=int(request.get_vars['pick'])+1)))
if (int(request.get_vars['pick'])<11):
pick_time = 180
elif (int(request.get_vars['pick'])<131):
pick_time = 90
else:
pick_time = 30
return dict(picks=picks, teams=teams, rounds=rounds, form=form, pick_time=pick_time)
def trade_pick():
return dict(message=T('Hello World'))
def trade_player():
return dict(message=T('Hello World'))
| [
"Drew@drews-mbp.bellerhome"
] | Drew@drews-mbp.bellerhome |
081d7e5abd1e6534a8b35c34749bb42b37674a75 | 41e59ef6eed28c5c89fc5e5ad54605077819f812 | /mnist_archs.py | e8337f2311dd87a1c7e47e738a2387ba43589f6c | [] | no_license | zueigung1419/IntrinsicDimDeep | 498185a6bf27d61eb2073c5dfa1ba0514611e142 | 2c5140009eb3efccca1d774dc66cb043c04b26d8 | refs/heads/master | 2022-04-11T11:50:42.203540 | 2020-03-13T11:38:19 | 2020-03-13T11:38:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
# F.Chollet architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
# dropout 0.25
self.fc1 = nn.Linear(1600, 128)
# dropout 0.5
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = F.relu(F.max_pool2d( self.conv1(x), 2 ) )
x = F.relu(F.max_pool2d( self.conv2(x), 2 ) )
x = x.view(-1, 1600)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def extract(self, x,verbose=False):
out1 = F.relu(F.max_pool2d(self.conv1(x), 2 ) )
out2 = F.relu(F.max_pool2d(self.conv2(out1), 2 ) )
t = out2.view(-1, 1600)
out3 = F.relu(self.fc1(t))
t = self.fc2(out3)
out4 = F.log_softmax(t, dim=1)
if verbose == True:
print(out1.size())
print(out2.size())
print(out3.size())
print(out4.size())
return out1, out2, out3, out4
def extract_all(self, x,verbose=False):
out1 = self.conv1(x)
out2 = F.relu(F.max_pool2d(out1,2))
out3 = self.conv2(out2)
out4 = F.relu(F.max_pool2d(out3,2))
t = out4.view(-1, 1600)
out5 = F.relu(self.fc1(t))
t = self.fc2(out5)
out6 = F.log_softmax(t, dim=1)
if verbose == True:
print(out1.size())
print(out2.size())
print(out3.size())
print(out4.size())
print(out5.size())
print(out6.size())
return out1, out2, out3, out4, out5, out6
| [
"alessioansuini@gmail.com"
] | alessioansuini@gmail.com |
ec7a051e8f0312346a58fd5aba93972c0e87435a | 3a517a9b62e24eccfa44baf8a93857f14e1042bc | /Infy Assigns/Virus.py | d38330ed11c02ce27a4527cce963040597b38385 | [] | no_license | milindaj/codingground | d20831323603e040f3c1285f222a11eb9e653662 | ee9035d539d33e673df54c681102307a41e93585 | refs/heads/master | 2016-09-06T00:46:05.624018 | 2015-01-22T02:29:19 | 2015-01-22T02:29:19 | 29,096,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | # Hello World program in Python
print "Hello World!\n"
#input = "90 120 Infected,90 150 NotInfected,100 140 Infected,80 130 NotInfected#95 125,95 145,75 160"
input = "80 120 Infected,70 145 Infected,90 100 Infected,80 150 NotInfected,80 80 NotInfected,100 120 NotInfected#120 148,75 148,60 90"
split = input.split('#')
catVals = split[0].split(",")
#catVals = [int(cat) for cat in catVals]
trvVals = split[1].split(",")
#trvVals = [int(trv) for trv in trvVals]
#inpArray = dict()
inTempList = [] #infected temp
inPrsList = [] # infected pressure
unTempList = [] # uninfected temp
unPrsList = [] # uninfected pressure
for cat in catVals:
catTmp = cat.split(" ")
if catTmp[2] == "Infected":
inTempList.append(int(catTmp[0]))
inPrsList.append(int(catTmp[1]))
else:
unTempList.append(int(catTmp[0]))
unPrsList.append(int(catTmp[1]))
inTempList.sort()
inPrsList.sort()
unTempList.sort()
unPrsList.sort()
tmpRangeType = ""
prsRangeType = ""
rangeType = ""
if inTempList[0] < unTempList[0]:
tmpRangeType = "Infected"
elif inTempList[0] > unTempList[0]:
tmpRangeType = "NotInfected"
if inPrsList[len(inPrsList)-1] > unPrsList[len(unPrsList)-1]:
prsRangeType = "Infected"
elif inPrsList[len(inPrsList)-1] < unPrsList[len(unPrsList)-1]:
prsRangeType = "NotInfected"
if tmpRangeType == prsRangeType:
rangeType = prsRangeType
else:
rangeType = "Unknown"
print( "range type is :" + rangeType)
ansList = []
for trv in trvVals:
trvTmp = trv.split(" ")
tmp = int(trvTmp[0])
prs = int(trvTmp[1])
if (tmp >= inTempList[0] and tmp <= inTempList[len(inTempList)-1]) and (prs >= inPrsList[0] and prs <= inPrsList[len(inPrsList)-1]):
ansList.append("Infected")
elif (tmp >= unTempList[0] and tmp <= unTempList[len(unTempList)-1]) and (prs >= unPrsList[0] and prs <= unPrsList[len(unPrsList)-1]):
ansList.append("Notinfected")
else:
if rangeType != "Unknown":
if rangeType == "Infected":
if tmp < inTempList[0] and prs > inPrsList[len(inTempList)-1]:
ansList.append("Infected")
else:
ansList.append("Unknown")
else:
if tmp < unTempList[0] and prs > unPrsList[len(unTempList)-1]:
ansList.append("Notinfected")
else:
ansList.append("Unknown")
else:
ansList.append("Unknown")
print(catVals)
print(trvVals)
print("\n")
print("infected List :")
print(inTempList)
print(inPrsList)
print("\n")
print("Not infected List :")
print(unTempList)
print(unPrsList)
print("\n")
print(ansList)
| [
"milindaj@gmail.com"
] | milindaj@gmail.com |
79397345c631e26cb88ef3385cd94d8a3e22ec70 | 678b5e4378d809f5048a9284de1f845a3c4540f6 | /src/scenes/cards/Card.py | 266f093ce35443c5e4d2b72124060ef11cd7b63b | [
"MIT"
] | permissive | codingblocks/get-outta-here | 2fccc4c97104078fdd70c2a9b5ea8cfbaf6a7706 | 8d37047876a93a050f0116086a470df123421cb9 | refs/heads/main | 2023-02-23T14:52:04.986791 | 2021-01-24T22:32:08 | 2021-01-24T22:32:08 | 332,569,558 | 0 | 1 | MIT | 2021-03-25T06:33:31 | 2021-01-24T22:24:59 | Python | UTF-8 | Python | false | false | 6,676 | py | import pygame
from src.config import (CARD_FRONT_BACKGROUND_IMAGE, CHARACTER_SPRITE_PATH, MAP_TILE_HEIGHT,
MAP_TILE_WIDTH, MAIN_FONT_FILE, MAP_TILE_SCALE, CARD_TEXT_SIZE, CARD_TEXT_COLOR,
CARD_IMAGE_SIZE, CARD_TITLE_SIZE, CARD_TITLE_IMAGE, CARD_TEXT_IMAGE, CARD_TYPE_IMAGE,
CARD_TYPE_SIZE, CARD_TYPE_COLOR, PERSONNEL_CARD_DIR, EFFECT_CARD_DIR, SPECIAL_CARD_DIR)
from src.scenes.text_writer import draw_text
import src.scenes.globals as g
class Card(pygame.sprite.Sprite):
character_sheet = None
def __init__(self, config: dict):
if self.character_sheet is None:
self.character_sheet = pygame.image.load(CHARACTER_SPRITE_PATH).convert_alpha()
pygame.sprite.Sprite.__init__(self)
self.config = config
if self.config['type'].lower() == "special":
self.card_dir = f"{SPECIAL_CARD_DIR}"
elif self.config['type'].lower() == "effect":
self.card_dir = f"{EFFECT_CARD_DIR}"
else:
self.card_dir = f"{PERSONNEL_CARD_DIR}"
card_background_path = f"{self.card_dir}\\{CARD_FRONT_BACKGROUND_IMAGE}"
print(card_background_path)
card_background = pygame.image.load(card_background_path).convert_alpha()
self._draw_portrait(card_background)
self._draw_title(card_background)
self._draw_text(card_background)
self._draw_type(card_background)
self.image = card_background
self.rect = self.image.get_rect()
self.rect.x = -1000
self.rect.y = -1000
def update(self, position: tuple):
self.rect = self.image.get_rect()
self.rect.x = position[0]
self.rect.y = position[1]
def can_be_played(self):
can_be_played = True
modifiers = self.config.get("modifiers", {})
if modifiers.get("energy", 0) and modifiers["energy"] + g.resources.energy <= 0:
can_be_played = False
if modifiers.get("fuel", 0) and modifiers["fuel"] + g.resources.fuel <= 0:
can_be_played = False
if modifiers.get("shields", 0) and modifiers["shields"] + g.resources.shields <= 0:
can_be_played = False
return can_be_played
def play(self):
modifiers = self.config.get("modifiers", {})
for k,v in modifiers.items():
g.resources.modify(k, v)
def is_single_use(self):
return self.config['type'].lower() == "personnel"
def _draw_portrait(self, card_background):
if "sheet" in self.config['sprite']:
self._draw_item_portrait(card_background)
else:
self._draw_character_portrait(card_background)
def _draw_item_portrait(self, card_background):
config = self.config
sheet = pygame.image.load(config['sprite']['sheet']).convert_alpha()
tile_size = self.config['sprite']['sheet_tile_size']
sprite_position_on_sheet = (config['sprite']['x'], config['sprite']['y'])
sprite_rect = pygame.Rect(sprite_position_on_sheet[0] * tile_size, sprite_position_on_sheet[1] * tile_size, tile_size, tile_size)
portrait_image = pygame.Surface((tile_size, tile_size), pygame.SRCALPHA)
portrait_image.blit(sheet, (0, 0), sprite_rect)
scaled_portrait = pygame.transform.scale(portrait_image, (MAP_TILE_WIDTH * 3, MAP_TILE_HEIGHT * 3))
card_center = card_background.get_rect().center
card_center = (card_center[0] - scaled_portrait.get_rect().w // 2, card_center[1] - 190)
card_background.blit(scaled_portrait, card_center)
pass
def _draw_character_portrait(self, card_background):
config = self.config
portrait_position = (config['sprite']['x'], config['sprite']['y'])
blank_image = pygame.Surface((MAP_TILE_WIDTH // MAP_TILE_SCALE, MAP_TILE_HEIGHT // MAP_TILE_SCALE), pygame.SRCALPHA)
sprite_rect = pygame.Rect(portrait_position[0] * MAP_TILE_WIDTH // MAP_TILE_SCALE, portrait_position[1] * MAP_TILE_HEIGHT // MAP_TILE_SCALE, MAP_TILE_WIDTH // MAP_TILE_SCALE, MAP_TILE_HEIGHT // MAP_TILE_SCALE)
blank_image.blit(self.character_sheet, (0, 0), sprite_rect)
scaled_portrait = pygame.transform.scale(blank_image, (MAP_TILE_WIDTH * 3, MAP_TILE_HEIGHT * 3))
portrait_rect = scaled_portrait.get_rect()
card_center = card_background.get_rect().center
card_center = (card_center[0] - scaled_portrait.get_rect().w // 2, card_center[1] - 190)
card_background.blit(scaled_portrait, card_center, portrait_rect)
def _draw_title(self, card_background):
plate_image = pygame.image.load(f"{self.card_dir}\\{CARD_TITLE_IMAGE}").convert_alpha()
font = pygame.font.Font(MAIN_FONT_FILE, CARD_TITLE_SIZE)
font_render = font.render(self.config['title'], True, CARD_TEXT_COLOR)
font_render.get_rect().center = card_background.get_rect().center
font_left = plate_image.get_rect().center[0] - font_render.get_rect().w // 2
font_top = plate_image.get_rect().center[1] - font_render.get_rect().h // 2
font_pos = (font_left, font_top - 8)
plate_image.blit(font_render, font_pos)
card_background.blit(plate_image, (31, 214))
def _draw_type(self, card_background):
plate_image = pygame.image.load(f"{self.card_dir}\\{CARD_TYPE_IMAGE}").convert_alpha()
font = pygame.font.Font(MAIN_FONT_FILE, CARD_TYPE_SIZE)
font_render = font.render(self.config['type'], True, CARD_TYPE_COLOR)
font_render.get_rect().center = card_background.get_rect().center
font_left = plate_image.get_rect().center[0] - font_render.get_rect().w // 2
font_top = plate_image.get_rect().center[1] - font_render.get_rect().h // 2
font_pos = (font_left, font_top)
plate_image.blit(font_render, font_pos)
card_background.blit(plate_image, (73, 259))
def _draw_text(self, card_background):
plate_image = pygame.image.load(f"{self.card_dir}\\{CARD_TEXT_IMAGE}").convert_alpha()
font = pygame.font.Font(MAIN_FONT_FILE, CARD_TEXT_SIZE)
card_center = card_background.get_rect().center
cr = card_background.get_rect()
left_buffer = 70
text_area = pygame.Rect(cr.x + left_buffer, card_center[1] + 80, CARD_IMAGE_SIZE[0] - left_buffer * 2, 200)
card_background.blit(plate_image, (25, 276))
draw_text(card_background, self.config['text'], CARD_TEXT_COLOR, text_area, font)
# Card title, cost | [
"me@joezack.com"
] | me@joezack.com |
05502558d8297623342bf39c101073a7049393f0 | 140003818ac3cf9a7d427ee5f0e87b1bc4f770ec | /metrics/mAP.py | cf8920732faacba76ba21e41f2c07bd950366558 | [
"MIT"
] | permissive | chisyliu/RADDet | e495a80a7bdbd3ecd473130386c00809df5ff4f0 | 5c0020dc24024f5b87e864554769f5926b73593a | refs/heads/main | 2023-04-18T22:21:57.054785 | 2021-05-02T14:16:51 | 2021-05-02T14:16:51 | 364,338,015 | 1 | 0 | MIT | 2021-05-04T17:40:57 | 2021-05-04T17:40:57 | null | UTF-8 | Python | false | false | 4,450 | py | import tensorflow as tf
import tensorflow.keras as K
import numpy as np
import util.helper as helper
def getTruePositive(pred, gt, input_size, iou_threshold=0.5, mode="3D"):
""" output tp (true positive) with size [num_pred, ] """
assert mode in ["3D", "2D"]
tp = np.zeros(len(pred))
detected_gt_boxes = []
for i in range(len(pred)):
current_pred = pred[i]
if mode == "3D":
current_pred_box = current_pred[:6]
current_pred_score = current_pred[6]
current_pred_class = current_pred[7]
gt_box = gt[..., :6]
gt_class = gt[..., 6]
else:
current_pred_box = current_pred[:4]
current_pred_score = current_pred[4]
current_pred_class = current_pred[5]
gt_box = gt[..., :4]
gt_class = gt[..., 4]
if len(detected_gt_boxes) == len(gt): break
if mode == "3D":
iou = helper.iou3d(current_pred_box[np.newaxis, ...], gt_box, input_size)
else:
iou = helper.iou2d(current_pred_box[np.newaxis, ...], gt_box)
iou_max_idx = np.argmax(iou)
iou_max = iou[iou_max_idx]
if iou_max >= iou_threshold and iou_max_idx not in detected_gt_boxes:
tp[i] = 1.
detected_gt_boxes.append(iou_max_idx)
fp = 1. - tp
return tp, fp
def computeAP(tp, fp, num_gt_class):
""" Compute Average Precision """
tp_cumsum = np.cumsum(tp).astype(np.float32)
fp_cumsum = np.cumsum(fp).astype(np.float32)
recall = tp_cumsum / (num_gt_class + 1e-16)
precision = tp_cumsum / (tp_cumsum + fp_cumsum)
########## NOTE: the following is under the reference of the repo ###########
recall = np.insert(recall, 0, 0.0)
recall = np.append(recall, 1.0)
precision = np.insert(precision, 0, 0.0)
precision = np.append(precision, 0.0)
mrec = recall.copy()
mpre = precision.copy()
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
def mAP(predictions, gts, input_size, ap_each_class, tp_iou_threshold=0.5, mode="3D"):
""" Main function for calculating mAP
Args:
predictions -> [num_pred, 6 + score + class]
gts -> [num_gt, 6 + class]"""
gts = gts[gts[..., :6].any(axis=-1) > 0]
all_gt_classes = np.unique(gts[:, 6])
ap_all = []
# ap_all_classes = np.zeros(num_all_classes).astype(np.float32)
for class_i in all_gt_classes:
### NOTE: get the prediction per class and sort it ###
pred_class = predictions[predictions[..., 7] == class_i]
pred_class = pred_class[np.argsort(pred_class[..., 6])[::-1]]
### NOTE: get the ground truth per class ###
gt_class = gts[gts[..., 6] == class_i]
tp, fp = getTruePositive(pred_class, gt_class, input_size, \
iou_threshold=tp_iou_threshold, mode=mode)
ap, mrecall, mprecision = computeAP(tp, fp, len(gt_class))
ap_all.append(ap)
ap_each_class[int(class_i)].append(ap)
mean_ap = np.mean(ap_all)
return mean_ap, ap_each_class
def mAP2D(predictions, gts, input_size, ap_each_class, tp_iou_threshold=0.5, mode="2D"):
""" Main function for calculating mAP
Args:
predictions -> [num_pred, 4 + score + class]
gts -> [num_gt, 4 + class]"""
gts = gts[gts[..., :4].any(axis=-1) > 0]
all_gt_classes = np.unique(gts[:, 4])
ap_all = []
for class_i in all_gt_classes:
### NOTE: get the prediction per class and sort it ###
pred_class = predictions[predictions[..., 5] == class_i]
pred_class = pred_class[np.argsort(pred_class[..., 4])[::-1]]
### NOTE: get the ground truth per class ###
gt_class = gts[gts[..., 4] == class_i]
tp, fp = getTruePositive(pred_class, gt_class, input_size, \
iou_threshold=tp_iou_threshold, mode=mode)
ap, mrecall, mprecision = computeAP(tp, fp, len(gt_class))
ap_all.append(ap)
ap_each_class[int(class_i)].append(ap)
mean_ap = np.mean(ap_all)
return mean_ap, ap_each_class
| [
"zhangaocanada@gmail.com"
] | zhangaocanada@gmail.com |
20635d8de0d79ef721fcfb9b23358878187fd7dd | e1032ba0bff93df82f6418f67edcf896dd134c68 | /Hackerrank/sorted_sums.py | a4a2accb0fc5099a0a231c30429d05c63257d35f | [] | no_license | webdastur/ProgrammingProblems | 2956e1700866eeb957ccc027e4e14bb2a9ec37a2 | 8677ecb4d3e1377e5b73351eee19fe68b6febd9c | refs/heads/master | 2022-11-09T16:43:04.469734 | 2022-10-29T13:51:22 | 2022-10-29T13:51:22 | 183,794,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | class SortedSums:
def __init__(self, size):
self.size = size
self.arr = [0] * self.size
def add(self, x, val):
if x == 0:
self.arr[0] += val
return
while self.size > x:
self.arr[x] += val
x += x & (-x)
def rank(self, x):
if 0 > x:
return 0
res = self.arr[0]
while 0 < x:
res += self.arr[x]
x &= x - 1
return res
def sortedSum(a):
pre = SortedSums(10 ** 6 + 1)
post = SortedSums(10 ** 6 + 1)
temp = total = ans = 0
n = len(a)
for i in range(n):
pos = pre.rank(a[i]) + 1
g = total - post.rank(a[i])
temp = (temp + pos * a[i] + g) % (10 ** 9 + 7)
ans = (ans + temp) % (10 ** 9 + 7)
total += a[i]
pre.add(a[i], 1)
post.add(a[i], a[i])
return ans
if __name__ == '__main__':
print(sortedSum([9, 5, 8]))
| [
"webdasturuz@gmail.com"
] | webdasturuz@gmail.com |
4d71975bc09e3c0a6e6ee256fd6840bf15111f68 | cf0ab8503d4d704045070deea1e2125375711e86 | /apps/sockets/tests/test_importer.py | d4c7afc7997e84b3c918c6e4dc2634f31084008f | [] | no_license | faierbol/syncano-platform | c3c6468600115752fd9fa5e46a0ad59f75f6bc9c | 879111874d1ef70418b4890cf970720b0a2be4d8 | refs/heads/master | 2023-07-20T10:13:40.066127 | 2021-02-08T15:01:13 | 2021-02-08T15:01:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,405 | py | # coding=UTF8
from unittest import mock
from django.test import TestCase
from django.utils import timezone
from apps.sockets.exceptions import ObjectProcessingError, SocketConfigValidationError, SocketMissingFile
from apps.sockets.importer import INTERVAL_REGEX, SocketImporter
from apps.sockets.models import Socket
from apps.sockets.validators import CustomSocketConfigValidator
@mock.patch('apps.sockets.signal_handlers.SocketProcessorTask', mock.MagicMock())
@mock.patch('apps.sockets.download_utils.ZipDownloadFileHandler.get_socket_spec')
class TestSocketImporter(TestCase):
importer_class = SocketImporter
@mock.patch('apps.sockets.download_utils.ZipDownloadFileHandler.read_file',
mock.Mock(side_effect=SocketMissingFile('error')))
def process_socket(self, download_mock, socket_source, **kwargs):
socket = Socket(created_at=timezone.now(), **kwargs)
download_mock.return_value = socket_source
return socket, self.importer_class(socket).process()
def assert_validation(self, download_mock, error_msg, socket_source, line=None):
with self.assertRaisesMessage(ObjectProcessingError, error_msg) as cm:
self.process_socket(download_mock, socket_source)
if line is not None:
self.assertEqual(cm.exception.lineno, line,
'Lines not equal for: "{}"; Expected: {}, got: {}.'.format(str(cm.exception),
line, cm.exception.lineno))
def assert_validation_with_config(self, download_mock, error_msg, socket_source, config=None):
with self.assertRaisesMessage(SocketConfigValidationError, error_msg):
socket, _ = self.process_socket(download_mock, socket_source, config=config or {})
CustomSocketConfigValidator().validate(socket_config=socket.config,
meta_config=socket.metadata.get('config') or {})
def test_serializer_validation(self, download_mock):
self.assert_validation(download_mock, 'No calls defined',
"""
endpoints:
my_endpoint_#1:
script: script_endpoint_1
""", line=3)
def test_basic_validation(self, download_mock):
self.assert_validation(download_mock, 'Too many properties',
'\n'.join(['name{}: name'.format(i)
for i in range(self.importer_class.max_number_of_keys + 1)]))
self.assert_validation(download_mock, 'Wrong format',
'- wrong format')
def test_endpoints_validation(self, download_mock):
self.assert_validation(download_mock, 'No calls defined',
"""
endpoints:
endpoint1: {}
""", line=3)
def test_cache_validation(self, download_mock):
self.assert_validation(download_mock, 'Invalid cache value',
"""
endpoints:
endpoint1:
cache: 100000
source: |
print 1
""", line=3)
def test_timeout_validation(self, download_mock):
self.assert_validation(download_mock, 'Invalid timeout value',
"""
endpoints:
endpoint1:
timeout: 100000
source: |
print 1
""", line=3)
def test_script_endpoints_format_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
- endpoint1
""", line=3)
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
- script
""", line=4)
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
file:
- script.py
""", line=5)
self.assert_validation(download_mock, 'Source file path contains invalid characters',
"""
endpoints:
endpoint1:
file: <script.py
""", line=3)
self.assert_validation(download_mock, 'Source file path is too long',
"""
endpoints:
endpoint1:
file: {}
""".format('a' * 500), line=3)
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
POST:
- script
""", line=5)
def test_channel_endpoints_format_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
channel:
- script
""", line=5)
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
channel: something.{a!bc}.{user}
""", line=4)
self.process_socket(download_mock, """
endpoints:
endpoint1:
channel: something.{ABC}.{user}
""")
self.process_socket(download_mock, """
endpoints:
endpoint1: |
channels.publish("a")
""")
def test_config_validation(self, download_mock):
self.assert_validation_with_config(
download_mock,
'Error validating socket config. "user_key" is required.',
"""
config:
secret_key:
value: some value
user_key:
required: true
value: some value
""")
for socket_yml in (
"""
config:
key: null
""",
"""
config:
- value
"""):
self.assert_validation_with_config(
download_mock,
'Error validating socket config. Wrong format.',
socket_yml)
def test_event_handlers_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format',
"""
event_handlers:
- eh
""", line=3)
self.assert_validation(download_mock, 'Wrong format',
"""
event_handlers:
data.user.create:
- src
""", line=4)
self.assert_validation(download_mock, 'Unsupported event handler type',
"""
event_handlers:
something.bla.bla: |
print 1
""", line=3)
def test_data_event_handlers_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format for data event handler',
"""
event_handlers:
data.usercreate: |
print 1
""", line=3)
def test_schedule_event_handlers_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format for schedule event handler',
"""
event_handlers:
schedule.interval#5_minutes: |
print 1
""", line=3)
self.assert_validation(download_mock, 'Wrong format for schedule interval',
"""
event_handlers:
schedule.interval.5_zonks: |
print 1
""", line=3)
self.assert_validation(download_mock, 'Wrong type of schedule event handler',
"""
event_handlers:
schedule.intercal.5_minutes: |
print 1
""", line=3)
def test_custom_event_handlers_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format for event handler',
"""
event_handlers:
events: |
print 1
""", line=3)
self.assert_validation(download_mock, 'Wrong format for event handler',
"""
event_handlers:
events.socket1.event2.suffix: |
print 1
""", line=3)
class TestSocketEventHandler(TestCase):
def calculate_interval(self, interval_str):
match = INTERVAL_REGEX.match(interval_str)
if not match:
return None
interval_dict = match.groupdict(0)
return int(interval_dict['hours']) * 60 * 60 + int(interval_dict['minutes']) * 60 + \
int(interval_dict['seconds'])
def test_schedule_interval_regex(self):
for interval_str, value in (
('5h', 5 * 60 * 60),
('5m', 5 * 60),
('5s', 5),
('5_hours_10_minutes_30_seconds', 5 * 60 * 60 + 10 * 60 + 30),
('1_hour_1_minute_1_second', 1 * 60 * 60 + 1 * 60 + 1),
('1h_2m_3s', 1 * 60 * 60 + 2 * 60 + 3),
('1h_2m_3s', 1 * 60 * 60 + 2 * 60 + 3),
('3s_2m', None),
('2m_1h', None),
('1_hor', None),
):
self.assertEqual(self.calculate_interval(interval_str), value)
| [
"rk@23doors.com"
] | rk@23doors.com |
d667c4b00b34a3dea6dc5a59897ad12547204d3a | 62f723af2777f987ed25ff4026aa2ad1ef9e5ce7 | /model/modeling.py | 8ca4d7e742ceae0d99665ca8af9b33e4998a5447 | [] | no_license | renjie-liang/Flatten_Net | 0c311b7625323c848d3aac72a14d97647b8ca6fe | fe8ff7932cf05fb36a8bfb674d8061e52a6027ab | refs/heads/master | 2022-12-21T08:02:40.841405 | 2019-10-06T19:09:42 | 2019-10-06T19:09:42 | 211,687,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | import torch
import torch.nn as nn
from model.net import *
from lib.config import cfg
class get_Model(nn.Module):
def __init__(self):
super(get_Model, self).__init__()
self.net = eval(cfg.MODEL.NET_NAME)()
def forward(self, x):
x = self.net(x)
return x
def to_stirng(self):
return '{}'.format(str(self.net))
# BASE_Decoder
| [
"allen_rj@163.com"
] | allen_rj@163.com |
0f3ce92a2ff9742a1df0452ef3c71ce7e361bd2b | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/LearningTensorFlow/Chapter5_Text_Sequence_Tensorboard/scan_example.py | 4cf7e1f4fa42316220ed1621d22dc6ddfdcbd77a | [] | no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | import numpy as np
import tensorflow as tf
elems = np.array(['T', 'e', 'n', 's', 'o', 'r', ' ', 'F', 'l', 'o', 'w'])
scan_sum = tf.scan(lambda a, x: a + x, elems)
sess = tf.InteractiveSession()
print(sess.run(scan_sum))
sess.close() | [
"broodsky1122@hanmail.net"
] | broodsky1122@hanmail.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.