text stringlengths 8 6.05M |
|---|
import numpy as np
class DataScaler():
def __init__(self, nbytes = 2, signed=True):
"""
Initialize scaler for given byte-depth and signed data type
Arguments:
nbytes (int) : Number of bytes to scale data to
signed (bool) : If set, signed integers are used, if False, unsigned
"""
self._nbytes = None
self._dfmt = None
self._dtype = None
self._oMin = None
self._oMax = None
self._miss = None
self._signed = signed
self.nbytes = nbytes
@property
def nbytes(self):
return self._nbytes
@nbytes.setter
def nbytes(self, val):
"""
Updates various information used for scaling when byte-depth is changed
When the nbytes attribute is changed, the min/max of the packed data
must be updated, along with the numpy data type to use. All that
is handled in this setter method
"""
self._nbytes = val # User input value
self._dfmt = f'i{val}' if self.signed else f'u{val}' # Generate type string
self._dtype = np.dtype( self._dfmt ) # Generate numpy dtype based on string
info = np.iinfo( self._dtype ) # Get information about the numpy data type
self._miss = info.min # Set missing value to minimum value that can be represented by given bit depth
self._oMin = self._miss + 1 # Set the output minimum value to one (1) greater than the missing value
self._oMax = info.max # Set the output maximum to the maximum value that can be represented by given bit depth
@property
def signed(self):
return self._signed
@nbytes.setter
def signed(self, val):
"""
Updates signed attribute along with range of output values
When the signed flag changes, the nbytes attribute is reset so
that the nbytes.setter method is run to update the missing value
and output minimum/maximum values
"""
if not isinstance(val, bool):
raise Exception( 'Must be of type bool!' )
self._signed = val
self.nbytes = self._nbytes
@property
def missing_value(self):
"""Read only value for missing_value; set by nbytes.setter"""
return self._miss
@property
def _FillValue(self):
"""Read only value for _FillValue; set by nbytes.setter"""
return self._miss
@property
def dtype(self):
"""Read only value for data type; set by nbytes.setter"""
return self._dtype
def computeScale(self, dataMin, dataMax):
"""
Compute scale factor based on data minimum/maximum
Arguments:
dataMin (int,float) : Minimum value of data to scale
dataMax (int,float) : Maximum value of data to scale
Returns:
int,float : Scale factor for packing data
"""
if dataMax == dataMin: # If the min and max are the same, scale factor is 1
return 1
return (dataMax - dataMin) / (self._oMax - self._oMin)
def computeOffset(self, dataMin, scale):
"""
Compute add offset based on data minimum and scale factor
Arguments:
dataMin (int,float) : Minimum value of data to scale
scale (int,float) : Scale factor for packing data; computed by computeScale()
Returns:
int,float : Add offset for packing data
"""
return -(self._oMin*scale - dataMin)
def packData(self, data, scale, offset):
"""
Pack the data in the specified number of bytes
Arguments:
data (numpy.ndarray, numpy.ma.MaskedArray) : Data to pack
scale (int,float) : Scale factor for packing data; computed by computeScale()
offset (int,float) : Add offset for packing data
Returns:
numpy.ndarray : Packed data
"""
if isinstance( data, np.ma.core.MaskedArray ):
index = data.mask # Get mask
else:
index = ~np.isfinite( data ) # Locate all NaN (i.e., missing values)
data = np.round( (data - offset) / scale).astype( self._dtype )
data[index] = self._miss
#if any(index):
# print('Runtime warning is of no concern, NaN/Inf values in data are set to missing values.')
return data
def unpackData(self, data, scale, offset):
"""
Unpack the data into usable values
Arguments:
data (numpy.ndarray, numpy.ma.MaskedArray) : Packed data values to unpack
scale (int,float) : Scale factor for unpacking data; computed by computeScale()
offset (int,float) : Add offset for unpacking data
Returns:
numpy.ndarray : Unpacked data
"""
index = data == self._miss
data = data * scale + offset
data[index] = np.nan
return data
def scaleData( self, data ):
"""
Scale data to integer type
Will scale the input data to a n byte integer. Smallest value
of integer type is reserved for missing data.
Arguments:
data (np.ndarray) : Numpy array of data to scale
Keyword arguments:
nbytes (int) : Number of bytes to scale to
Returns:
tuple : Scaled data, scaling factor, add offset
"""
dtype = data.dtype
dataMin = np.nanmin(data) # Compute minimum of data
dataMax = np.nanmax(data) # Compute maximum of data
scale = self.computeScale( dataMin, dataMax ) # Compute scale factor
offset = self.computeOffset( dataMin, scale ) # Compute add offset
data = self.packData( data, scale, offset ) # Pack the data
return data, dtype.type(scale), dtype.type(offset) # Return the scaled data, scale factor, add offset, and missing value
|
from api.resources import course_methods, question_methods, exam_methods, token_methods, token_validation, submission_methods, grade_methods
from django.http import HttpResponseNotAllowed
def course(request, course_id = None):
if request.method == 'POST':
return course_methods.create_course(request)
elif request.method == 'PUT':
return course_methods.update_course(request)
elif request.method == 'DELETE':
return course_methods.delete_course(request)
elif request.method == 'GET':
return course_methods.get_course(request, course_id)
else:
return HttpResponseNotAllowed(['GET', 'PUT', 'POST', 'DELETE'])
def exam(request):
if request.method == 'POST':
return exam_methods.create_exam(request)
elif request.method == 'PUT':
return exam_methods.update_exam(request)
elif request.method == 'DELETE':
return exam_methods.delete_exam(request)
elif request.method == 'GET':
return exam_methods.get_exam(request)
else:
return HttpResponseNotAllowed(['GET', 'PUT', 'POST', 'DELETE'])
def question(request):
if request.method == 'POST':
return question_methods.upsert_question(request)
elif request.method == 'DELETE':
return question_methods.delete_question(request)
elif request.method == 'GET':
return question_methods.get_question(request)
else:
return HttpResponseNotAllowed(['GET', 'POST', 'DELETE'])
def token(request):
if request.method == 'POST':
return token_methods.create_token(request)
elif request.method == 'PUT':
return token_methods.update_token(request)
elif request.method == 'DELETE':
return token_methods.delete_token(request)
else:
return HttpResponseNotAllowed(['PUT', 'POST', 'DELETE'])
def validate_token(request):
return token_validation.validate_token(request)
def submission(request):
if request.method == 'POST':
return submission_methods.submit(request)
if request.method == 'PUT':
return submission_methods.manual_grade(request)
return HttpResponseNotAllowed(['PUT', 'POST'])
def grade(request):
return grade_methods.grade(request) |
# -*- coding:utf-8 -*-
from src.sentence_embedding.sentence_emb import UsableEncoder
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import jieba
model_path = './saved_models/skip-best'
dict_path = './data/wiki_clean_cn.txt.pkl'
usable_encoder = UsableEncoder()
stand_q_list = []
simi_q_list = []
standard_q = './data/standard_q.txt'
similar_q = './data/similar_q.txt'
f_in_stand = open(standard_q, 'r')
f_in_simi = open(similar_q,'r')
for line in f_in_stand:
stand_q_list.append(line)
for line in f_in_simi:
simi_q_list.append(line)
print(len(stand_q_list), len(simi_q_list))
stand_q_emb = []
simi_q_emb = []
for sentence in stand_q_list:
|
from common.run_method import RunMethod
import allure
@allure.step("通用/消息通知/删除某则消息")
def notices_noticeId_delete(noticeId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "通用/消息通知/删除某则消息"
url = f"/service-public/notices/{noticeId}"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("通用/消息通知/通知概况")
def notices_overviews_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "通用/消息通知/通知概况"
url = f"/service-public/notices/overviews"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("通用/消息通知/更新通知作业、发布内容详情")
def notices_noticeId_patch(noticeId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "通用/消息通知/更新通知作业、发布内容详情"
url = f"/service-public/notices/{noticeId}"
res = RunMethod.run_request("PATCH", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("通用/消息通知/作业、发布内容的阅读详情")
def notices_noticeId_reading_get(noticeId, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "通用/消息通知/作业、发布内容的阅读详情"
url = f"/service-public/notices/{noticeId}/reading"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("通用/消息通知/获取拒收规则")
def notices_reject_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "通用/消息通知/获取拒收规则"
url = f"/service-public/notices/reject"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("通用/消息通知/拒收某类消息")
def notices_reject_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "通用/消息通知/拒收某类消息"
url = f"/service-public/notices/reject"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("通用/消息通知/提醒学生阅读作业、发布内容")
def notices_noticeId_reminds_post(noticeId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "通用/消息通知/提醒学生阅读作业、发布内容"
url = f"/service-public/notices/{noticeId}/reminds"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
"""Tests for the 'zero' plugin"""
import unittest
from test.helper import TestHelper, control_stdin
from beets.library import Item
from beetsplug.zero import ZeroPlugin
from mediafile import MediaFile
from beets.util import syspath
class ZeroPluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.config['zero'] = {
'fields': [],
'keep_fields': [],
'update_database': False,
}
def tearDown(self):
ZeroPlugin.listeners = None
self.teardown_beets()
self.unload_plugins()
def test_no_patterns(self):
self.config['zero']['fields'] = ['comments', 'month']
item = self.add_item_fixture(
comments='test comment',
title='Title',
month=1,
year=2000,
)
item.write()
self.load_plugins('zero')
item.write()
mf = MediaFile(syspath(item.path))
self.assertIsNone(mf.comments)
self.assertIsNone(mf.month)
self.assertEqual(mf.title, 'Title')
self.assertEqual(mf.year, 2000)
def test_pattern_match(self):
self.config['zero']['fields'] = ['comments']
self.config['zero']['comments'] = ['encoded by']
item = self.add_item_fixture(comments='encoded by encoder')
item.write()
self.load_plugins('zero')
item.write()
mf = MediaFile(syspath(item.path))
self.assertIsNone(mf.comments)
def test_pattern_nomatch(self):
self.config['zero']['fields'] = ['comments']
self.config['zero']['comments'] = ['encoded by']
item = self.add_item_fixture(comments='recorded at place')
item.write()
self.load_plugins('zero')
item.write()
mf = MediaFile(syspath(item.path))
self.assertEqual(mf.comments, 'recorded at place')
def test_do_not_change_database(self):
self.config['zero']['fields'] = ['year']
item = self.add_item_fixture(year=2000)
item.write()
self.load_plugins('zero')
item.write()
self.assertEqual(item['year'], 2000)
def test_change_database(self):
self.config['zero']['fields'] = ['year']
self.config['zero']['update_database'] = True
item = self.add_item_fixture(year=2000)
item.write()
self.load_plugins('zero')
item.write()
self.assertEqual(item['year'], 0)
def test_album_art(self):
self.config['zero']['fields'] = ['images']
path = self.create_mediafile_fixture(images=['jpg'])
item = Item.from_path(path)
self.load_plugins('zero')
item.write()
mf = MediaFile(syspath(path))
self.assertFalse(mf.images)
def test_auto_false(self):
self.config['zero']['fields'] = ['year']
self.config['zero']['update_database'] = True
self.config['zero']['auto'] = False
item = self.add_item_fixture(year=2000)
item.write()
self.load_plugins('zero')
item.write()
self.assertEqual(item['year'], 2000)
def test_subcommand_update_database_true(self):
item = self.add_item_fixture(
year=2016,
day=13,
month=3,
comments='test comment'
)
item.write()
item_id = item.id
self.config['zero']['fields'] = ['comments']
self.config['zero']['update_database'] = True
self.config['zero']['auto'] = False
self.load_plugins('zero')
with control_stdin('y'):
self.run_command('zero')
mf = MediaFile(syspath(item.path))
item = self.lib.get_item(item_id)
self.assertEqual(item['year'], 2016)
self.assertEqual(mf.year, 2016)
self.assertEqual(mf.comments, None)
self.assertEqual(item['comments'], '')
def test_subcommand_update_database_false(self):
item = self.add_item_fixture(
year=2016,
day=13,
month=3,
comments='test comment'
)
item.write()
item_id = item.id
self.config['zero']['fields'] = ['comments']
self.config['zero']['update_database'] = False
self.config['zero']['auto'] = False
self.load_plugins('zero')
with control_stdin('y'):
self.run_command('zero')
mf = MediaFile(syspath(item.path))
item = self.lib.get_item(item_id)
self.assertEqual(item['year'], 2016)
self.assertEqual(mf.year, 2016)
self.assertEqual(item['comments'], 'test comment')
self.assertEqual(mf.comments, None)
def test_subcommand_query_include(self):
item = self.add_item_fixture(
year=2016,
day=13,
month=3,
comments='test comment'
)
item.write()
self.config['zero']['fields'] = ['comments']
self.config['zero']['update_database'] = False
self.config['zero']['auto'] = False
self.load_plugins('zero')
self.run_command('zero', 'year: 2016')
mf = MediaFile(syspath(item.path))
self.assertEqual(mf.year, 2016)
self.assertEqual(mf.comments, None)
def test_subcommand_query_exclude(self):
item = self.add_item_fixture(
year=2016,
day=13,
month=3,
comments='test comment'
)
item.write()
self.config['zero']['fields'] = ['comments']
self.config['zero']['update_database'] = False
self.config['zero']['auto'] = False
self.load_plugins('zero')
self.run_command('zero', 'year: 0000')
mf = MediaFile(syspath(item.path))
self.assertEqual(mf.year, 2016)
self.assertEqual(mf.comments, 'test comment')
def test_no_fields(self):
item = self.add_item_fixture(year=2016)
item.write()
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.year, 2016)
item_id = item.id
self.load_plugins('zero')
with control_stdin('y'):
self.run_command('zero')
item = self.lib.get_item(item_id)
self.assertEqual(item['year'], 2016)
self.assertEqual(mediafile.year, 2016)
def test_whitelist_and_blacklist(self):
item = self.add_item_fixture(year=2016)
item.write()
mf = MediaFile(syspath(item.path))
self.assertEqual(mf.year, 2016)
item_id = item.id
self.config['zero']['fields'] = ['year']
self.config['zero']['keep_fields'] = ['comments']
self.load_plugins('zero')
with control_stdin('y'):
self.run_command('zero')
item = self.lib.get_item(item_id)
self.assertEqual(item['year'], 2016)
self.assertEqual(mf.year, 2016)
def test_keep_fields(self):
item = self.add_item_fixture(year=2016, comments='test comment')
self.config['zero']['keep_fields'] = ['year']
self.config['zero']['fields'] = None
self.config['zero']['update_database'] = True
tags = {
'comments': 'test comment',
'year': 2016,
}
self.load_plugins('zero')
z = ZeroPlugin()
z.write_event(item, item.path, tags)
self.assertEqual(tags['comments'], None)
self.assertEqual(tags['year'], 2016)
def test_keep_fields_removes_preserved_tags(self):
self.config['zero']['keep_fields'] = ['year']
self.config['zero']['fields'] = None
self.config['zero']['update_database'] = True
z = ZeroPlugin()
self.assertNotIn('id', z.fields_to_progs)
def test_fields_removes_preserved_tags(self):
self.config['zero']['fields'] = ['year id']
self.config['zero']['update_database'] = True
z = ZeroPlugin()
self.assertNotIn('id', z.fields_to_progs)
def test_empty_query_n_response_no_changes(self):
item = self.add_item_fixture(
year=2016,
day=13,
month=3,
comments='test comment'
)
item.write()
item_id = item.id
self.config['zero']['fields'] = ['comments']
self.config['zero']['update_database'] = True
self.config['zero']['auto'] = False
self.load_plugins('zero')
with control_stdin('n'):
self.run_command('zero')
mf = MediaFile(syspath(item.path))
item = self.lib.get_item(item_id)
self.assertEqual(item['year'], 2016)
self.assertEqual(mf.year, 2016)
self.assertEqual(mf.comments, 'test comment')
self.assertEqual(item['comments'], 'test comment')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
# coding: utf-8
#CURRENCY CONVERTER
#list of currency symbols
s_currency=["0","XAF","ARS","AUD","BSD","BRL","BGN","CAD","CLP","CNY","COP","HRK","CYP","CZK","DKK","LTC","BTC","XCD","EEK","EUR","FJD","XPF","GHS","GTQ","HNL","HKD","HUF","ISK","INR","IDR","ILS","JMD","JPY","LVL","LTL","MYR","MXN","MAD","MMK","ANG","NZD","NOK","PKR","PAB","PEN","PHP","PLN","GOLD","QAR","RON","RUB","SAR","RSD","SGD","ZAR","KRW","LKR","SEK","CHF","TWD","THB","TTD","TND","TRY","AED","GBP","USD","VND","VEF"]
#list of currency values
s_curr_val=[0,582.43,14.32,1.30,1.0,3.74,1.73,1.2835,663.85,6.4825,2991.92,6.6401,0.519911,23.987,6.6060,0.3112,0.002376,2.6898,11.73,0.88,2.0501,105.90,3.8245,7.8792,22.550,7.7564,276.56,124.49,66.573,13232.29,3.7828,121.78,109.14,0.62,3.05,3.91,17.42,9.70,1178.2,1.79,1.45,8.24,104.67,1.0,3.27,46.14,3.82,0.000813,3.64,3.97,66.07,3.75,109.14,1.36,14.56,1151.75,146.25,8.14,0.97,32.41,35.11,6.60,2.02,2.85,3.67,0.71,1.0,22309.50,9.95,]
#taking user input for amount of UDS to convert then converting to int
firstCurrency=raw_input("How many US Dollars would you like to convert? Enter 0 to quit\n")
firstCurrency=float(firstCurrency)
print firstCurrency.__class__.__name__
if firstCurrency==0:
print("Quitting program..")
print firstCurrency.__class__.__name__
#Enter 0 and change the arg for s_currency to see where I left off....
print(len(s_currency))
print(len(s_curr_val))
print(s_currency[1])
print(s_curr_val[1])
elif firstCurrency!=0:
#taking user inout for currency they want to convert USD into
second_currency=raw_input("Enter Symbol of currency to convert USD into. Enter '0' to quit\n")
print second_currency.__class__.__name__
if second_currency=="0":
print("Quitting program..")
print second_currency.__class__.__name__
elif second_currency==s_currency[1]:
total=firstCurrency*s_curr_val[1]
total=float(total)
print second_currency.__class__.__name__
print s_currency[1].__class__.__name__
print s_curr_val[1].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
print("You will get "+str(total)+" "+str(s_currency[1])+"'s for "+str(firstCurrency)+" US Dollars")
elif second_currency==s_currency[2]:
total=firstCurrency*s_curr_val[2]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[2])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[2].__class__.__name__
print s_curr_val[2].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[3]:
total=firstCurrency*s_curr_val[3]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[3])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[3].__class__.__name__
print s_curr_val[3].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[4]:
total=firstCurrency*s_curr_val[4]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[4])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[4].__class__.__name__
print s_curr_val[4].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[5]:
total=firstCurrency*s_curr_val[5]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[5])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[5].__class__.__name__
print s_curr_val[5].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[6]:
total=firstCurrency*s_curr_val[6]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[6])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[6].__class__.__name__
print s_curr_val[6].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[7]:
total=firstCurrency*s_curr_val[7]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[7])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[7].__class__.__name__
print s_curr_val[7].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[8]:
total=firstCurrency*s_curr_val[8]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[8])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[8].__class__.__name__
print s_curr_val[8].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[9]:
total=firstCurrency*s_curr_val[9]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[9])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[9].__class__.__name__
print s_curr_val[9].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[10]:
total=firstCurrency*s_curr_val[10]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[10])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[10].__class__.__name__
print s_curr_val[10].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[11]:
total=firstCurrency*s_curr_val[11]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[11])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[11].__class__.__name__
print s_curr_val[11].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[12]:
total=firstCurrency*s_curr_val[12]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[12])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[12].__class__.__name__
print s_curr_val[12].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[13]:
total=firstCurrency*s_curr_val[13]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[13])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[13].__class__.__name__
print s_curr_val[13].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[14]:
total=firstCurrency*s_curr_val[14]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[14])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[14].__class__.__name__
print s_curr_val[14].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[15]:
total=firstCurrency*s_curr_val[15]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[15])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[15].__class__.__name__
print s_curr_val[15].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[16]:
total=firstCurrency*s_curr_val[16]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[16])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[16].__class__.__name__
print s_curr_val[16].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[17]:
total=firstCurrency*s_curr_val[17]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[17])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[17].__class__.__name__
print s_curr_val[17].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[18]:
total=firstCurrency*s_curr_val[18]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[18])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[18].__class__.__name__
print s_curr_val[18].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[19]:
total=firstCurrency*s_curr_val[19]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[19])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[19].__class__.__name__
print s_curr_val[19].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[20]:
total=firstCurrency*s_curr_val[20]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[20])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[20].__class__.__name__
print s_curr_val[20].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[21]:
total=firstCurrency*s_curr_val[21]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[21])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[21].__class__.__name__
print s_curr_val[21].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[22]:
total=firstCurrency*s_curr_val[22]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[22])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[22].__class__.__name__
print s_curr_val[22].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[23]:
total=firstCurrency*s_curr_val[23]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[23])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[23].__class__.__name__
print s_curr_val[23].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[24]:
total=firstCurrency*s_curr_val[24]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[24])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[24].__class__.__name__
print s_curr_val[24].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[25]:
total=firstCurrency*s_curr_val[3]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[25])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[25].__class__.__name__
print s_curr_val[25].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[26]:
total=firstCurrency*s_curr_val[26]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[26])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[26].__class__.__name__
print s_curr_val[26].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[27]:
total=firstCurrency*s_curr_val[27]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[27])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[27].__class__.__name__
print s_curr_val[27].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[28]:
total=firstCurrency*s_curr_val[28]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[28])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[28].__class__.__name__
print s_curr_val[28].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[29]:
total=firstCurrency*s_curr_val[29]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[29])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[29].__class__.__name__
print s_curr_val[29].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[30]:
total=firstCurrency*s_curr_val[30]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[30])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[30].__class__.__name__
print s_curr_val[30].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[31]:
total=firstCurrency*s_curr_val[31]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[31])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[31].__class__.__name__
print s_curr_val[31].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[32]:
total=firstCurrency*s_curr_val[32]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[32])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[32].__class__.__name__
print s_curr_val[32].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[33]:
total=firstCurrency*s_curr_val[33]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[33])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[33].__class__.__name__
print s_curr_val[33].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[34]:
total=firstCurrency*s_curr_val[34]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[34])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[34].__class__.__name__
print s_curr_val[34].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[35]:
total=firstCurrency*s_curr_val[35]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[35])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[35].__class__.__name__
print s_curr_val[35].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[36]:
total=firstCurrency*s_curr_val[36]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[36])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[36].__class__.__name__
print s_curr_val[36].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[37]:
total=firstCurrency*s_curr_val[37]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[37])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[37].__class__.__name__
print s_curr_val[37].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[37]:
total=firstCurrency*s_curr_val[37]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[37])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[37].__class__.__name__
print s_curr_val[37].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[38]:
total=firstCurrency*s_curr_val[38]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[38])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[38].__class__.__name__
print s_curr_val[38].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[39]:
total=firstCurrency*s_curr_val[39]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[39])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[39].__class__.__name__
print s_curr_val[39].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[40]:
total=firstCurrency*s_curr_val[40]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[40])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[40].__class__.__name__
print s_curr_val[40].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[41]:
total=firstCurrency*s_curr_val[41]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[41])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[41].__class__.__name__
print s_curr_val[41].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[42]:
total=firstCurrency*s_curr_val[42]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[42])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[42].__class__.__name__
print s_curr_val[42].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[43]:
total=firstCurrency*s_curr_val[43]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[43])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[43].__class__.__name__
print s_curr_val[43].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[44]:
total=firstCurrency*s_curr_val[44]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[44])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[44].__class__.__name__
print s_curr_val[44].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[45]:
total=firstCurrency*s_curr_val[45]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[45])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[45].__class__.__name__
print s_curr_val[45].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[46]:
total=firstCurrency*s_curr_val[46]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[46])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[46].__class__.__name__
print s_curr_val[46].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[47]:
total=firstCurrency*s_curr_val[47]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[47])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[47].__class__.__name__
print s_curr_val[47].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[48]:
total=firstCurrency*s_curr_val[48]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[48])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[48].__class__.__name__
print s_curr_val[48].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[49]:
total=firstCurrency*s_curr_val[49]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[49])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[49].__class__.__name__
print s_curr_val[49].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[50]:
total=firstCurrency*s_curr_val[50]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[50])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[50].__class__.__name__
print s_curr_val[50].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[51]:
total=firstCurrency*s_curr_val[51]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[51])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[51].__class__.__name__
print s_curr_val[51].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[52]:
total=firstCurrency*s_curr_val[52]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[52])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[52].__class__.__name__
print s_curr_val[52].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[53]:
total=firstCurrency*s_curr_val[53]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[54])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[54].__class__.__name__
print s_curr_val[54].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[55]:
total=firstCurrency*s_curr_val[55]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[55])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[55].__class__.__name__
print s_curr_val[55].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[56]:
total=firstCurrency*s_curr_val[56]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[56])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[56].__class__.__name__
print s_curr_val[56].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[57]:
total=firstCurrency*s_curr_val[57]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[57])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[57].__class__.__name__
print s_curr_val[57].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[58]:
total=firstCurrency*s_curr_val[58]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[58])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[58].__class__.__name__
print s_curr_val[58].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[58]:
total=firstCurrency*s_curr_val[58]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[58])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[58].__class__.__name__
print s_curr_val[58].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[59]:
total=firstCurrency*s_curr_val[59]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[59])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[59].__class__.__name__
print s_curr_val[59].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[60]:
total=firstCurrency*s_curr_val[60]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[60])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[60].__class__.__name__
print s_curr_val[60].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[61]:
total=firstCurrency*s_curr_val[61]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[61])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[61].__class__.__name__
print s_curr_val[61].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[61]:
total=firstCurrency*s_curr_val[61]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[61])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[61].__class__.__name__
print s_curr_val[61].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[62]:
total=firstCurrency*s_curr_val[62]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[62])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[62].__class__.__name__
print s_curr_val[62].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[63]:
total=firstCurrency*s_curr_val[63]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[63])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[63].__class__.__name__
print s_curr_val[63].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[64]:
total=firstCurrency*s_curr_val[64]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[64])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[64].__class__.__name__
print s_curr_val[64].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[65]:
total=firstCurrency*s_curr_val[65]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[65])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[65].__class__.__name__
print s_curr_val[65].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[66]:
total=firstCurrency*s_curr_val[66]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[66])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[66].__class__.__name__
print s_curr_val[66].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[67]:
total=firstCurrency*s_curr_val[67]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[67])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[67].__class__.__name__
print s_curr_val[67].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[68]:
total=firstCurrency*s_curr_val[68]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[68])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[68].__class__.__name__
print s_curr_val[68].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
elif second_currency==s_currency[69]:
total=firstCurrency*s_curr_val[69]
total=float(total)
print("You will get "+str(total)+" "+str(s_currency[69])+"'s for "+str(firstCurrency)+" US Dollars")
print second_currency.__class__.__name__
print s_currency[69].__class__.__name__
print s_curr_val[69].__class__.__name__
print firstCurrency.__class__.__name__
print total.__class__.__name__
else:
print("Invalid input.. you suck..")
else:
print("Please enter a valid Unit Name or Symbol or enter 'Quit' to quit")
print("This is the closing Else statement that is displaying...")
print firstCurrency.__class__.__name__
print s_currency.__class__.__name__
|
import numpy as np
from scipy.linalg import expm, norm
from skimage import color
class RandomRotation:
def __init__(self, axis=None, max_theta=180):
self.axis = axis
self.max_theta = max_theta
def _M(self, axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
@staticmethod
def get_params():
return np.random.rand(3), np.random.rand(1), np.random.rand(3), np.random.rand(1)
def __call__(self, frames):
out_frames = []
axis_1, factor_1, axis_2, factor_2 = self.get_params()
if self.axis is not None:
axis = self.axis
else:
axis = axis_1 - 0.5
for xyz in frames:
R = self._M(axis, (np.pi * self.max_theta / 180) * 2 *
(factor_1 - 0.5))
R_n = self._M(
axis_2 - 0.5,
(np.pi * 30 / 180) * 2 * (factor_2 - 0.5))
out_frames.append(xyz @ R @ R_n)
return out_frames
class RandomScale:
def __init__(self, min, max):
self.scale = max - min
self.bias = min
@staticmethod
def get_factor():
return np.random.rand(1)
def __call__(self, frames):
factor = self.get_factor()
out_frames = []
for xyz in frames:
s = self.scale * factor + self.bias
out_frames.append(xyz * s)
return out_frames
class RandomShear:
@staticmethod
def get_matrix():
return np.random.randn(3, 3)
def __call__(self, frames):
matrix = self.get_matrix()
out_frames = []
for xyz in frames:
T = np.eye(3) + 0.1 * matrix #original 0.1
out_frames.append(xyz @ T)
return out_frames
class RandomTranslation:
def __init__(self, scale):
self.scale = scale
@staticmethod
def get_factors():
return np.random.randn(1, 3)
def __call__(self, frames):
factors = self.get_factors()
out_frames = []
for xyz in frames:
trans = self.scale * factors
out_frames.append(xyz + trans)
return out_frames
class RandomGaussianNoise:
def __init__(self, mean=0, var=0.001):
self.mean = mean
self.var = var
def __call__(self, feats):
out_colors = []
for colors in feats:
noise = np.random.normal(self.mean, self.var ** 0.5, colors.shape)
out_colors.append(colors+noise)
return out_colors
class RandomValue:
def __init__(self, min=-0.2, max=0.2):
self.scale = max-min
self.bias = min
@staticmethod
def get_offset():
return np.random.rand()
def __call__(self, feats):
out_colors = []
offset = self.get_offset()
for colors in feats:
colors_hsv = color.rgb2hsv(colors) # transform colors to hsv space
colors_hsv[..., -1] += self.scale * offset + self.bias # apply augmentation
colors_rgb = color.hsv2rgb(colors_hsv) # transform colors back to rgb space
out_colors.append(colors_rgb)
return out_colors
class RandomSaturation:
def __init__(self, min=-0.15, max=0.15):
self.scale = max-min
self.bias = min
@staticmethod
def get_offset():
return np.random.rand()
def __call__(self, feats):
out_colors = []
offset = self.get_offset()
for colors in feats:
colors_hsv = color.rgb2hsv(colors) # transform colors to hsv space
colors_hsv[:, 1] += self.scale * offset + self.bias # apply augmentation
colors_rgb = color.hsv2rgb(colors_hsv) # transform colors back to rgb space
out_colors.append(colors_rgb)
return out_colors
|
class LFSR():
def __init__(self, polyn):
self.polyn = polyn[1:len(polyn)][::-1]
def step(self):
numb = 0
for _ in range(len(self.polyn)):
if self.polyn[_] & self.state[_] == 1:
numb ^= 1
self.state.append(numb)
return self.state.pop(0)
def gener_sequence(self, length):
return [self.step() for _ in range(length)]
def set_state(self, state):
self.state = state
class GeffeGen():
def __init__(self, l1, l2, l3):
self.l1, self.l2, self.l3 = l1, l2, l3
def step(self):
x, y, s = self.l1.step(), self.l2.step(), self.l3.step()
return s & x ^ (1 ^ s) & y
def set_state(self, st1, st2, st3):
self.l1.state, self.l2.state, self.l3.state = st1, st2, st3
def gen_sequence(self, length):
return [self.step() for _ in range(length)] |
"""miniportal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from project import views as miniportal_views
from handover import views as handover_views
admin.site.site_header = "Miniportal Admin"
admin.site.site_title = "Miniportal site admin"
# admin.site.index_title = "Site administration "
urlpatterns = [
url(r'^$', miniportal_views.home_miniportal , name='home'),
url(r'^project/', include('project.urls')),
url(r'^handover/', include('handover.urls')),
url(r'^asset/', include('asset.urls')),
url(r'^admin/', admin.site.urls),
# url(r'^accounts/login/$', auth_views.login, {'template_name': 'login.html'}, name='login'),
url(r'^accounts/login/$', auth_views.login, name='login'),
url(r'^accounts/logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
url(r'^jet/', include('jet.urls', 'jet')), # Django JET URLS
]
|
"""
These are search result related models.
"""
from dataclasses import dataclass, field
from typing import Optional, List
from .base import BaseModel
from .common import BaseApiResponse, BaseResource, Thumbnails
from .mixins import DatetimeTimeMixin
@dataclass
class SearchResultSnippet(BaseModel, DatetimeTimeMixin):
"""
A class representing the search result snippet info.
Refer: https://developers.google.com/youtube/v3/docs/search#snippet
"""
publishedAt: Optional[str] = field(default=None, repr=False)
channelId: Optional[str] = field(default=None)
title: Optional[str] = field(default=None)
description: Optional[str] = field(default=None, repr=False)
thumbnails: Optional[Thumbnails] = field(default=None, repr=False)
channelTitle: Optional[str] = field(default=None, repr=False)
liveBroadcastContent: Optional[str] = field(default=None, repr=False)
@dataclass
class SearchResultId(BaseModel):
"""
A class representing the search result id info.
Refer: https://developers.google.com/youtube/v3/docs/search#id
"""
kind: Optional[str] = field(default=None)
videoId: Optional[str] = field(default=None, repr=False)
channelId: Optional[str] = field(default=None, repr=False)
playlistId: Optional[str] = field(default=None, repr=False)
@dataclass
class SearchResult(BaseResource):
"""
A class representing the search result's info.
Refer: https://developers.google.com/youtube/v3/docs/search
"""
id: Optional[SearchResultId] = field(default=None, repr=False)
snippet: Optional[SearchResultSnippet] = field(default=None, repr=False)
@dataclass
class SearchListResponse(BaseApiResponse):
"""
A class representing the channel's retrieve response info.
Refer: https://developers.google.com/youtube/v3/docs/channels/list#response_1
"""
regionCode: Optional[str] = field(default=None, repr=False)
items: Optional[List[SearchResult]] = field(default=None, repr=False)
|
# Generated by Django 2.2.4 on 2019-08-15 21:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('responses', '0003_auto_20190815_2113'),
]
operations = [
migrations.RenameField(
model_name='response',
old_name='org_comparision',
new_name='org_comparison',
),
]
|
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.get("http://localhost:8081/ses/public/index.html")
driver.find_element_by_xpath("//*[@id='login_form']/div[2]/div/input").send_keys("abc")
driver.find_element_by_xpath("//*[@id='login_form']/div[3]/div/input").send_keys("123")
driver.find_element_by_xpath("//*[@id='login_form']/div[6]/input[2]").click()
driver.find_element_by_xpath("//*[@id='login_form']/div[6]/input[2]").click()
time.sleep(10)
driver.find_element_by_xpath("//*[@id='main_fail_modal_dialog']/div/div[3]/button").click()
time.sleep(5)
driver.find_element_by_xpath("//*[@id='exam_record_tab']").click()
time.sleep(5)
driver.find_element_by_xpath("//*[@id='exam_record_table_toolbar_add']").click()
driver.quit() |
#!/home/raymond/missing/bin/python
# -*- coding: utf-8 -*-
#
# findDupes.py
#
# Copyright 2015 raymond
import os
import re
import itertools
import threading
import time
import guessit
from operator import itemgetter
from pytvdbapi import api
import itertools
import time
import sys
from pytvdbapi import api
import subprocess
import datetime
import pickle
done = False
def animate():
global done
for c in itertools.cycle(['-', '/', '|', '\\']):
if done:
sys.stdout.flush()
time.sleep(.5)
sys.stdout.flush()
break
sys.stdout.write('\rFinding missing episodes and removing duplicates |' + c + "|")
sys.stdout.flush()
time.sleep(.5)
#sys.stdout.write('\rDone! ')
def find_maks(name,seasonNum):
db = api.TVDB('TVDB_API_KEY', ignore_case=True)
result = db.search(name, "en")
if len(result) > 0:
show = result[0]
else:
return -1
if name.upper() == "Once Upon A Time".upper():
for shows in result:
if "2011" in shows.SeriesName:
show = shows
else:
continue
if name.upper() == "Archer".upper():
for shows in result:
if "2009" in shows.SeriesName:
show = shows
else:
continue
if name.upper() == "Legends".upper():
for shows in result:
if "2014" in shows.SeriesName:
show = shows
else:
continue
if name.upper() == "House of cards".upper():
show = result[1]
if len(show) >= seasonNum:
season = show[seasonNum]
else:
return -1
for i in range(len(season)):
epiNumber = len(season)-i
lastEP = season[epiNumber]
lastEPDate = lastEP.FirstAired
if isinstance(lastEPDate, datetime.date):
today = datetime.datetime.today().date()
else:
return -1
if today > lastEPDate:
#print "found date for ",name,season," : ", lastEPDate,lastEP.EpisodeName,epiNumber
return epiNumber
print "should never happen"
return -1
def remove_dupes(numbers, season):
icount = {}
dupes = []
for i in numbers:
icount[i] = icount.get(i, 0) + 1
for el in icount:
if icount[el] > 1:
delete = icount[el]-1
for episode in season:
if delete > 0:
if episode[2] == el:
dupes.append(episode[3])
os.remove(episode[3])
delete=delete-1
return dupes
def findMissingNumber3(array1, array2):
missing = []
for number in array1:
if number not in array2:
missing.append(number)
return missing
def sizeof_fmt(num, suffix='B'):
"""
returns a bytenumber to human readable
:param num: number to convert to human readable
:param suffix: soffix to calulate to. Bytes as standard
:return: human readable size
"""
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def getLength(filename):
""" find playtime of vide """
try:
p1 = subprocess.Popen(["mplayer", "-vo", "dummy", "-ao", "dummy", "-identify",str(filename).encode('utf-8')],
stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
p2 = subprocess.Popen(["grep", "ID_LENGTH"], stdin=p1.stdout, stdout = subprocess.PIPE)
p1.stdout.close()
result = p2.communicate()[0]
p1.wait()
return int(float(result.split("=")[1].strip()))
except Exception as e:
print e
return 0
def rename(filename):
words = filename
match = re.search('S\d\dE\d\d', words)
if match:
m= match.group()
n = int(m[-2:])
n = "%02d-E%02d" % (n,n+1)
p = re.compile('S\d\dE\d\d')
epp= m[0:-2]
newname = p.subn( epp+n, words)[0]
newname = newname
#print "renaming to :", newname
os.rename(filename,newname)
return filename+ " : "+ newname
return ""
def missingLogic(season,missing):
"""Try to find double episodes of series. Used to go by filesize, now go by playtime"""
newName = []
for i,episode in enumerate(season,1):
try:
if i <= len(season):
if (episode[2]+1) in missing and i != len(season):
ep1 = episode
ep2 = season[i]
len1 = getLength(ep1[3])/60
len2 = getLength(ep2[3])/60
min = int((len1*2) - ((15 * len1) / 100.0))
max = int((len1*2) + ((15 * len1) / 100.0))
min2 = int((len2*2) - ((15 * len2) / 100.0))
max2 = int((len2*2) + ((15 * len2) / 100.0))
if min2 <= len1 <= max2:
missing.remove(episode[2]+1)
new = rename(episode[3])
if new != "":
newName.append(new)
elif min <= len2 <= max:
#print episode
#print season[i-1][2]
missing.remove(episode[2]+1)
new = rename(episode[3])
if new != "":
newName.append(new)
elif (episode[2]+1) in missing and i == len(season):
ep1 = episode
ep2 = season[i-2]
len1 = getLength(ep1[3])/60
len2 = getLength(ep2[3])/60
min = int((len1*2) - ((15 * len1) / 100.0))
max = int((len1*2) + ((15 * len1) / 100.0))
min2 = int((len2*2) - ((15 * len2) / 100.0))
max2 = int((len2*2) + ((15 * len2) / 100.0))
if min2 <= len1 <= max2:
missing.remove(episode[2]+1)
new = rename(episode[3])
if new != "":
newName.append(new)
except Exception as e:
print e
continue
return missing,newName
def logic(season,maks):
name = str(season[0][0])+" season "+ str(season[0][1])
newName = []
#print "working on: ", name
actual = []
matching = [s for s in season if 0 in s] # check if there is a 0 episode in the season on server
nums = [x + 1 for x in range(maks)]
if len(matching) > 0:
nums.insert(0,0)
for episode in season:
actual.append(episode[2])
dupes = remove_dupes(actual,season)
missing = findMissingNumber3(nums,actual)
if (len(missing) > 0):
missing,newName = missingLogic(season,missing)
return missing,dupes,newName
def saveFile(missing):
with open('/home/raymond/missing/missing.save', 'wb') as f:
pickle.dump(missing,f)
def find_dupes_and_missing():
lowignore = ["moomi","adventure time with finn and jake","Adventure Time","HJØRDIS"]
ignore = []
newName = ""
for word in lowignore:
ignore.append(word.upper())
all_missing = []
dupes = []
extensionsToCheck = ("mkv", "mp4", "avi")
missing = []
#print "Finding missing episodes and removing duplicates"
t = threading.Thread(target=animate)
t.start()
#go through all files and folders:
for root, dirs, files in os.walk('PATH/TO/SERIES'):
missing = []
newName = []
if "SEASON" in root.upper():
#print root
season = []
for fname in files:
if str(fname).endswith(extensionsToCheck):
guess = guessit.guess_episode_info(fname)
if "episodeNumber" in guess and str(guess['series']).upper() not in ignore:
episode = [guess['series'], guess["season"], guess["episodeNumber"], os.path.join(root, fname)]
season.append(episode)
if "episodeList" in guess:
for number in guess['episodeList']:
if number != guess["episodeNumber"]:
newEpisode = list(episode)
newEpisode[2] = number
#print newEpisode
season.append(newEpisode)
if len(season) > 0:
season = sorted(season, key=itemgetter(2))
try:
if len(season) > 1:
maks = find_maks(str(season[0][0]),season[0][1])#season[-1][2] # find number of episodes in a folder
actualMaks = find_maks(str(season[0][0]),season[0][1])
if actualMaks == -1:
actualMaks = maks
except Exception as e:
print e
if maks != actualMaks:
missing,dupes,newName = logic(season,maks)
for i in range(maks+1,actualMaks+1):
missing.append(i)
if (len(season) != maks):
missing,dupes,newName = logic(season,maks)
if len(missing) > 0:
sname = season[0][0]+" S%02d" % (season[0][1])
all_missing.append([sname,missing])
global done
done = True
time.sleep(1)
print
if len(dupes) > 0:
print "Found duplicated episode(s): "
for ep in dupes:
print "removed ",ep
if len(newName) > 0:
print "Found double episode(s): "
for ep in newName:
oldnew = ep.split(":")
print "Rewrote ", oldnew[0]
if len(all_missing) > 0:
print "Found missing episodes: "
for liste in all_missing:
print liste[0]
for number in liste[1]:
print number,
print
saveFile(all_missing)
print "Trying to download missing episodes"
os.system("/path/to/torrent.py")
else:
print "No missing episodes found :)"
def main():
global done
try:
find_dupes_and_missing()
except KeyboardInterrupt:
global done
done = True
time.sleep(1)
print
print "############ got controll + c, exiting ############"
sys.exit(1)
except Exception as e:
print "Unexpected error:", e
#time.sleep(2)
sys.exit(1)
return 0
if __name__ == '__main__':
main()
|
"""ImageLocation class.
ImageLocation is the pre-octree Image class's ChunkLocation. When we request
that the ChunkLoader load a chunk, we use this ChunkLocation to identify
the chunk we are requesting and once it's loaded.
"""
import numpy as np
from napari.components.experimental.chunk import ChunkLocation, LayerRef
from napari.layers import Layer
def get_data_id(data) -> int:
"""Return the data_id to use for this layer.
Parameters
----------
data
Get the data_id for this data.
Notes
-----
We use data_id rather than just the layer_id, because if someone
changes the data out from under a layer, we do not want to use the
wrong chunks.
"""
if isinstance(data, list):
assert data # data should not be empty for image layers.
return id(data[0]) # Just use the ID from the 0'th layer.
return id(data) # Not a list, just use it.
class ImageLocation(ChunkLocation):
"""The hashable location of a chunk within an image layer.
Attributes
----------
data_id : int
The id of the data in the layer.
data_level : int
The level in the data (for multi-scale).
indices
The indices of the slice.
"""
def __init__(self, layer: Layer, indices) -> None:
super().__init__(LayerRef.from_layer(layer))
self.data_id: int = get_data_id(layer.data)
self.data_level: int = layer._data_level
self.indices = indices
def __str__(self):
return f"location=({self.data_id}, {self.data_level}, {self.indices}) "
def __eq__(self, other) -> bool:
return (
super().__eq__(other)
and self.data_id == other.data_id
and self.data_level == other.data_level
and self._same_indices(other)
)
def _same_indices(self, other) -> bool:
"""Return True if this location has same indices as the other location.
Returns
-------
bool
True if indices are the same.
"""
# TODO_OCTREE: Why is this sometimes ndarray and sometimes not?
# We should normalize when the ImageLocation is constructed?
if isinstance(self.indices, np.ndarray):
return (self.indices == other.indices).all()
return self.indices == other.indices
def __hash__(self) -> int:
"""Return has of this location.
Returns
-------
int
The hash of the location.
"""
return hash(
(
self.layer_ref.layer_id,
self.data_id,
self.data_level,
_flatten(self.indices),
)
)
def _flatten(indices) -> tuple:
"""Return a flat tuple of integers to represent the indices.
Slice objects are not hashable, so we convert them.
"""
result = []
for x in indices:
if isinstance(x, slice):
result.extend([x.start, x.stop, x.step])
else:
result.append(x)
return tuple(result)
|
import ast
import socket
import chatlib
import random
SERVER_IP = "127.0.0.1"
SERVER_PORT = 5678
def build_and_send_message(conn, code, data):
msg = chatlib.build_message(code, data)
conn.send(msg.encode())
def recv_message_and_parse(conn):
full_msg = conn.recv(1024).decode()
cmd, data = chatlib.parse_message(full_msg)
return cmd, data
def connect():
my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_socket.connect(("127.0.0.1", 5678))
return my_socket
def error_and_exit(error_msg):
error_msg.quit()
def login(conn):
while True:
username = input("Please enter username: \n")
password = input("Please enter your password: \n")
data = username + chatlib.DATA_DELIMITER + password
build_and_send_message(conn, chatlib.PROTOCOL_CLIENT["login_msg"], data)
answer = recv_message_and_parse(conn)
if answer[0] == chatlib.PROTOCOL_SERVER["login_ok_msg"]:
print("Logged in!")
return username
elif answer[0] == chatlib.PROTOCOL_SERVER["login_failed_msg"]:
print(answer[1])
def logout(conn):
build_and_send_message(conn, chatlib.PROTOCOL_CLIENT["logout_msg"], "")
print("Good Bye!")
def build_send_recv_parse(conn, msg_code, data):
"""
Helper function that combine the send and receive data to and from the server.
:param conn: The connected socket
:param msg_code: Function to run on server
:param data: The message we want to send
:return: Response from the server
"""
build_and_send_message(conn, msg_code, data)
response = recv_message_and_parse(conn)
return response
def get_my_score(conn, username):
score = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT["get_score"], username)
if score == (None, None):
print("Error !")
else:
print(f"Your score is: {int(score[1])} ")
def get_high_score(conn):
"""
Printing a list of all users and there score when higher first
:param conn: The connected socket
:return: None
"""
high_score = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT["get_high_score"], "")
result = high_score[1]
x = list(ast.literal_eval(result))
for i in x:
print(i[0], ':', i[1])
def print_ans(answers):
for i, ans in enumerate(answers):
print(i + 1, "-", ans)
print('\n To quit press ''9''')
def play_question(conn, username):
"""
Getting the question from the server and process the response into list.
If the user press the correct answer his score will increase by 5 points.
:param conn: The connected socket.
:param username: Updating the score by username
:return: None
"""
response = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT["get_question"], "")
questions_dict = eval(response[1])
questions_list = list(questions_dict.values())
while True:
if len(questions_list) == 0:
print("Sorry, No more question for you genius!")
break
current_question = random.choice(questions_list)
print(current_question['question'])
print_ans(current_question['answers'])
user_ans = int(input("Choose your answer [1-4] "))
if user_ans == 9:
break
if user_ans == int(current_question['correct']):
print("Yes! another question..\n")
questions_list.remove(current_question)
build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT["get_update_score"], username)
else:
print(f"Wrong! the answer is: {current_question['correct']}")
def get_logged_users(conn):
users = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT["get_users"], "")[1]
result = list(ast.literal_eval(users))
for i in result:
print(f'Connected users are: {i}')
def main():
"""
Main function, creating socket and connecting to server,
and printing the menu options
:return: None
"""
my_socket = connect()
username = login(my_socket)
menu_options = ["Play a trivia question", "My score", "Score Table", "Logged users", "Quit"]
while True:
print("\nMAIN MENU:\n")
for i, item in enumerate(menu_options):
print(str(i+1) + "." + " " + item)
print("\n")
select = int(input("Please enter your choice: "))
if select == 1:
play_question(my_socket,username)
if select == 2:
get_my_score(my_socket, username)
elif select == 3:
get_high_score(my_socket)
elif select == 4:
get_logged_users(my_socket)
elif select == 5:
logout(my_socket)
break
if __name__ == '__main__':
main()
|
import test13
from tornado.options import options, define
def aaa():
print options.port
print 12 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
x = abs(100)
y = abs(-1e-7)
print (x)
print (y)
print (x,y)
summ = sum([1e-4,2,3])
print ('sum1,2,3=',summ)
|
from fnp.ml.token_classifier import TokenClassifier
from fnp.ml.token_classifier_multihead import TokenClassifierMultiHead
from argparse import ArgumentParser
def classify_from_args(args):
if args.classifier == "singlehead":
classifier = TokenClassifier(args)
else:
classifier = TokenClassifierMultiHead(args)
classifier.train()
if __name__ == "__main__":
arg_parser = ArgumentParser()
arg_parser.add_argument("-t", "--train", dest="train_file",
help="path of the train file", required=True)
arg_parser.add_argument("-v", "--validation", dest="validation_file",
help="path of the validation file", required=True)
arg_parser.add_argument("-d", "--device", dest="device",
help="device_name", default="cuda")
arg_parser.add_argument("-b", "--batch_size", dest="batch_size",
help="batch size", default="4", type=int)
arg_parser.add_argument("-e", "--epochs", dest="epochs",
help="number of epochs", default="4", type=int)
arg_parser.add_argument("-m", "--model_out", dest="model_out",
help="model saving path", default=None)
arg_parser.add_argument("-c", "--classifier", dest="classifier",
help="classifier type name [singlehead, multihead]", default="multihead")
arg_parser.add_argument("-s", "--seed", dest="seed",
help="seed", default=42, type=int)
arg_parser.add_argument("-l", "--max_len", dest="max_len",
help="max length of tokenized instances", default=510, type=int)
arg_parser.add_argument("-n", "--model_name", dest="model_name",
help="name of bert model", default="bert-base-uncased")
arg_parser.add_argument("-f", "--n_heads", dest="n_heads",
help="number of heads", default=1, type=int)
arg_parser.add_argument("-o", "--out_file", dest="out_file",
help="output file path", default=None)
arg_parser.add_argument("--lt", "--loss_type", dest="loss_type",
help="lost function: standard|change_count|change_count_square|change_count_square_rev",
default="standard")
arg_parser.add_argument("--n_left_shifted_heads", dest="n_left_shifted_heads",
help="number of left shifted heads", default=0, type=int)
arg_parser.add_argument("--n_right_shifted_heads", dest="n_right_shifted_heads",
help="number of right shifted heads", default=0, type=int)
arg_parser.add_argument("--n_cause_heads", dest="n_cause_heads",
help="number of cause heads", default=0, type=int)
arg_parser.add_argument("--n_effect_heads", dest="n_effect_heads",
help="number of effect heads", default=0, type=int)
arg_parser.add_argument("--dropout_prob", dest="dropout_prob",
help="Dropout probability of the heads. "
"You can specify independent dropout probability for each head. "
"The number of parameters must be one or equal with the number of heads. "
"If the number of parameters is one, all head will use that value."
"E.g.: --dropout_prob 0.3 \n"
"--dropout_prob 0.1 0.3 0.5 0.6 0.7 # if the number of heads is 5",
nargs='+', default=[0.1], type=float)
arg_parser.add_argument("--after_dropout_prob", dest="after_dropout_prob",
help="same as in dropout_probs in the top of the heads",
nargs='+', default=[0.0], type=float)
arg_parser.add_argument('--selected_layer', "--sl", dest="selected_layers", nargs='+', type=int,
help="use the selected layers of bert."
"Layer 0: embedding layer, Layer 12: output of the bert (base)."
"Each head will get the concatenation of the selected layers."
"Cannot use with --selected_layer_by_heads."
"Usage:\n --sl 5 8 10")
arg_parser.add_argument('--selected_layers_by_heads', "--slh", dest="selected_layers_by_heads", nargs='+', type=int,
help="use the selected layers of bert."
"Layer 0: embedding layer, Layer 12: output of the bert (base). "
"The number of layers should be the same as the number of heads. "
"You can add concatenated layers by using the \"_\" character."
"Cannot use with --selected_layer."
"Usage:\n --slh 6 6 8 12 6_12")
arg_parser.add_argument("--head_type", dest="head_type",
help="type of head: base/base_shared/base_relu/multi_layer", default="base")
arg_parser.add_argument("--aggregation_type", dest="aggregation_type",
help="The method of the aggregation of different heads."
"If the sum_independent (default) selected the losses are calculated independently on "
"each head. In every other cases the loss is calculated after on the aggregated data."
"Possible values: sum_independent/sum/linear/attention/hidden_state_attention", default="sum_independent")
arg_parser.set_defaults(merge_series=False)
args = arg_parser.parse_args()
classify_from_args(args)
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class UuidToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
if sys.platform == 'darwin':
depends_on('libuuid')
else:
depends_on('uuid-cms')
def install(self, spec, prefix):
values = {}
if sys.platform == 'darwin':
values['VER'] = spec['libuuid'].version
values['PFX'] = spec['libuuid'].prefix
else:
values['VER'] = spec['uuid-cms'].version
values['PFX'] = spec['uuid-cms'].prefix
fname = 'uuid-cms.xml'
contents = str("""<tool name="libuuid" version="$VER">
<lib name="uuid"/>
<client>
<environment name="LIBUUID_BASE" default="$PFX"/>
<environment name="LIBDIR" default="$$LIBUUID_BASE/lib"/>
<environment name="INCLUDE" default="$$LIBUUID_BASE/include"/>
</client>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<use name="root_cxxdefaults"/>
<use name="sockets"/>
</tool>""")
write_scram_toolfile(contents, values, fname, prefix)
|
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
from flask import request
import dataflow_pipeline.ucc2.sms_beam as sms_beam
import dataflow_pipeline.ucc2.sms_opc1_beam as sms_opc1_beam
import dataflow_pipeline.ucc2.Apertura_beam as Apertura_beam
import dataflow_pipeline.ucc2.Rebote_beam as Rebote_beam
import dataflow_pipeline.ucc2.Base_correo_beam as Base_correo_beam
import dataflow_pipeline.ucc2.Doble_via_beam as Doble_via_beam
import os
import socket
import time
import procesos.descargas as descargas
ucc_api = Blueprint('ucc_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
@ucc_api.route("/mensajes")
def mensajes():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Mensajes/Resultado/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('info-sms/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V3` WHERE campana = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sms_beam.run('gs://ct-ucc/info-sms/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Mensajes/Resultado/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
############################## ESTRUCTURA SMS NUEVA ####################################
@ucc_api.route("/sms1")
def sms1():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Mensajes/sms_opc1/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('info-sms/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.ucc.sms_opc1` WHERE campana = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sms_opc1_beam.run('gs://ct-ucc/info-sms/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Mensajes/sms_opc1/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
########################## ESTRUCTURA NUEVA CORREO APERTURA ###################################
@ucc_api.route("/apertura")
def apertura():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Email/Apertura/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('Mailing/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.ucc.Correo_Apertura` WHERE campana = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = Apertura_beam.run('gs://ct-ucc/Mailing/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Email/Apertura/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
########################## ESTRUCTURA NUEVA CORREO REBOTE ###################################
@ucc_api.route("/rebote")
def rebote():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Email/Rebote/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('Mailing/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.ucc.Correo_Rebote` WHERE campana = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = Rebote_beam.run('gs://ct-ucc/Mailing/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Email/Rebote/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
########################## ESTRUCTURA NUEVA BASE CORREO ###################################
@ucc_api.route("/correo")
def correo():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Email/Base/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('Mailing/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.ucc.Base_Correo` WHERE campana = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = Base_correo_beam.run('gs://ct-ucc/Mailing/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Email/Base/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
############################### ESTRUCTURA NUEVA MENSAJE DOBLE VIA ##################################
@ucc_api.route("/doble_via")
def doble_via():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Mensajes/Doble_via/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('sms_doble_via/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.ucc.sms_doble_via` WHERE campana = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = Doble_via_beam.run('gs://ct-ucc/sms_doble_via/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Mensajes/Doble_via/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
#----------------------------------------------------------------------------------------------------#
# DESCARGA DE DATOS DE AGENT SCRIPT #
#----------------------------------------------------------------------------------------------------#
descarga_agent_script_blueprint = Blueprint('descarga_agent_script_blueprint', __name__)
@descarga_agent_script_blueprint.route('/descargar', methods =['POST','GET'])
def DescargaDatosAgentScript():
dateini = request.args.get('desde')
dateend = request.args.get('hasta')
myRoute = '/BI_Archivos/GOOGLE/Ucc/Tipificador/'+dateini+'_'+dateend+'.csv'
myQuery = 'SELECT * FROM `contento-bi.telefonia_vistas.Vista_Agent_Script_UCC` WHERE FECHA BETWEEN'+'"'+dateini+'"'+'AND'+'"'+dateend+'"'
myHeader = ["FECHA","HORA","ID_AGENTE","NOMBRE","INTERES","CANAL","CEDULA","CAMPUS","TIPO_PROGRAMA","PROGRAMA","CELULAR","FIJO","EMAIL","CODIGO_1","CODIGO_2","CODIGO_3","CODIGO_2A","CODIGO_3A","CODIGO_2B","CODIGO_3B","ID_LLAMADA"]
return descargas.descargar_csv(myRoute, myQuery, myHeader) |
import numpy
import argparse
import patchbatch
import glob
import kittitool
import pb_utils as utils
def bench_kitti(images_path, GT_path, model_name, patch_size, batch_size):
""" Used for easily benchmarking kitti, using kitti's file structure
images_path - images path, with image pairs looking like: 000000_10.png, 000000_11.png for example
GT_path - ground truth path, looking like 000000_10.png
model_name - PatchBatch model to use
Retruns nothing, prints results to screen """
images_list = sorted(glob.glob(images_path + '/*.png'))
gt_list = sorted(glob.glob(GT_path + '/*.png'))
all_avg_err = []
all_perc_above_tau = []
for img1_filename, img2_filename, gt_filename in zip(images_list[::2], images_list[1::2], gt_list):
print 'Analyzing', img1_filename.split('/')[-1],img2_filename.split('/')[-1]
gt_flow = kittitool.flow_read(gt_filename)
pb_flow = patchbatch.calc_flow(img1_filename, img2_filename, model_name, None, patch_size, batch_size, False)
avg_err, perc_above_tau = utils.benchmark_flow(pb_flow, gt_flow, debug=False)
all_avg_err.append(avg_err)
all_perc_above_tau.append(perc_above_tau)
print '--- Aggregated: avg_err %.2f perc_above_tau %.2f' % (numpy.mean(all_avg_err), numpy.mean(all_perc_above_tau)*100)
def main(patch_size=51, batch_size=256):
parser = argparse.ArgumentParser(description = 'PatchBatch KITTI benchmark pipeline')
parser.add_argument('images_path', help = 'input images path')
parser.add_argument('gt_path', help = 'ground truth path')
parser.add_argument('model_name', help = 'PatchBatch Model Name')
parser = parser.parse_args()
if 'SPCI' in parser.model_name:
patch_size = 71
batch_size = 255
bench_kitti(parser.images_path, parser.gt_path, parser.model_name, patch_size, batch_size)
if __name__ == '__main__':
main()
|
class Hashtabell:
def __init__(self, antalElement):
self.h = {}
def get(self, namn):
return self.h[namn]
def put(self, namn, nyAtom):
self.h[namn] = nyAtom |
from __future__ import division
from __future__ import with_statement
import json #or cjson
import re
from stemming.porter2 import stem
from operator import itemgetter
from math import log
from collections import defaultdict
import operator
from Tkinter import *
from PIL import Image, ImageTk
from Tkinter import Tk, RIGHT, BOTH, RAISED
from ttk import Frame, Button, Style
import json #or cjson
import re
import math
import sys
import category_predictor
business_list = []
master = ''
submitted_text = ''
business_index = 0
star_score = 0
read_size1 = 5000
read_size2 = 500
def tokenize(string):
unicode_word=re.findall(r'\w*[a-zA-Z0-9]',string.lower())
return [str(word) for word in unicode_word ]
def stopword(a_list_of_words):
stopword = []
for line in open('stop_word','r'):
stopword.append(re.split('\n',line)[0])
new_list=[word for word in a_list_of_words if word not in stopword]
return new_list
class Hw1(object):
def __init__(self):
pass
@staticmethod
def read_line(a_json_string_from_document):
#sample answer:
return json.loads(a_json_string_from_document)
@staticmethod
def tokenize(string):
unicode_word=re.findall(r'\w+',string.lower())
return [str(word) for word in unicode_word ]
#return a list of words
@staticmethod
def stopword(a_list_of_words):
stopword = []
for line in open('stop_word','r'):
stopword.append(re.split('\n',line)[0])
new_list=[word for word in a_list_of_words if word not in stopword]
return new_list
#or alternatively use new_list=filter(lambda x: x not in stopword, a_list_of_words)
#return a list of words
@staticmethod
def stemming(a_list_of_words):
stems=[stem(word) for word in a_list_of_words]
return stems
#return a list of words
list5 = []
list4 = []
list3 = []
list2 = []
list1 = []
tlist5 = []
tlist4 = []
tlist3 = []
tlist2 = []
tlist1 = []
def openfiles():
star5 = open('star5.json', 'r')
star4 = open('star4.json', 'r')
star3 = open('star3.json', 'r')
star2 = open('star2.json', 'r')
star1 = open('star1.json', 'r')
for line in star5:
list5.append(line)
for line in star4:
list4.append(line)
for line in star3:
list3.append(line)
for line in star2:
list2.append(line)
for line in star1:
list1.append(line)
def openfilesLM():
star5 = open('star5.json', 'r')
star4 = open('star4.json', 'r')
star3 = open('star3.json', 'r')
star2 = open('star2.json', 'r')
star1 = open('star1.json', 'r')
cnt = 0
for line in star5:
if cnt < read_size2:
tlist5.append(tokenize(line))
cnt += 1
cnt = 0
for line in star4:
if cnt < read_size2:
tlist4.append(tokenize(line))
cnt += 1
cnt = 0
for line in star3:
if cnt < read_size2:
tlist3.append(tokenize(line))
cnt += 1
cnt = 0
for line in star2:
if cnt < read_size2:
tlist2.append(tokenize(line))
cnt += 1
cnt = 0
for line in star1:
if cnt < read_size2:
tlist1.append(tokenize(line))
cnt += 1
class ReviewCategoryClassifier(object):
@classmethod
def load_data(cls, input_file):
job = category_predictor.CategoryPredictor()
category_counts = None
word_counts = {}
with open(input_file) as src:
for line in src:
category, counts = job.parse_output_line(line)
if category == 'all':
category_counts = counts
else:
word_counts[category] = counts
return category_counts, word_counts
@classmethod
def normalize_counts(cls, counts):
total = sum(counts.itervalues())
lg_total = math.log(total)
return dict((key, math.log(cnt) - lg_total) for key, cnt in counts.iteritems())
def __init__(self, input_file):
category_counts, word_counts = self.load_data(input_file)
self.word_given_cat_prob = {}
for cat, counts in word_counts.iteritems():
self.word_given_cat_prob[cat] = self.normalize_counts(counts)
# filter out categories which have no words
seen_categories = set(word_counts)
seen_category_counts = dict((cat, count) for cat, count in category_counts.iteritems() \
if cat in seen_categories)
self.category_prob = self.normalize_counts(seen_category_counts)
def classify(self, text):
lg_scores = self.category_prob.copy()
for word in category_predictor.words(text):
for cat in lg_scores:
cat_probs = self.word_given_cat_prob[cat]
if word in cat_probs:
lg_scores[cat] += cat_probs[word]
else:
lg_scores[cat] += cat_probs['UNK']
scores = dict((cat, math.exp(score)) for cat, score in lg_scores.iteritems())
total = sum(scores.itervalues())
return dict((cat, prob / total) for cat, prob in scores.iteritems())
def predictor(query):
guesses = ReviewCategoryClassifier("review_new.json").classify(query)
best_guesses = sorted(guesses.iteritems(), key=lambda (_, prob): prob, reverse=True)[:5]
return int(best_guesses[0][0])
def wordcount(list):
count = 0
for line in list:
for i in line:
count += 1
return count
def lm(query):
star1_score = 0
star2_score = 0
star3_score = 0
star4_score = 0
star5_score = 0
query_list = stopword(tokenize(query))
wordcount1 = wordcount(list1)
wordcount2 = wordcount(list2)
wordcount3 = wordcount(list3)
wordcount4 = wordcount(list4)
wordcount5 = wordcount(list5)
for d in tlist5: #calculate score for star5 list
for q in query_list:
tf = 0
token_in_doc_count = 0
for i in d:
token_in_doc_count += 1
if (q == i):
tf += 1
if token_in_doc_count == 0:
token_in_doc_count = 1
a = ( float(tf) / float(token_in_doc_count) )
b = (float(tf) / float(wordcount5))
star5_score += (0.7 * a + 0.3 * b)
star5_score = log(star5_score)
for d in tlist4: #calculate score for star4 list
for q in query_list:
tf = 0
token_in_doc_count = 0
for i in d:
token_in_doc_count += 1
if (q == i):
tf += 1
if token_in_doc_count == 0:
token_in_doc_count = 1
a = ( float(tf) / float(token_in_doc_count) )
b = (float(tf) / float(wordcount4))
star4_score += (0.7 * a + 0.3 * b)
star4_score = log(star4_score)
for d in tlist3: #calculate score for star3 list
for q in query_list:
tf = 0
token_in_doc_count = 0
for i in d:
token_in_doc_count += 1
if (q == i):
tf += 1
if token_in_doc_count == 0:
token_in_doc_count = 1
a = ( float(tf) / float(token_in_doc_count) )
b = (float(tf) / float(wordcount3))
star3_score += (0.7 * a + 0.3 * b)
star3_score = log(star3_score)
for d in tlist2: #calculate score for star2 list
for q in query_list:
tf = 0
token_in_doc_count = 0
for i in d:
token_in_doc_count += 1
if (q == i):
tf += 1
if token_in_doc_count == 0:
token_in_doc_count = 1
a = ( float(tf) / float(token_in_doc_count) )
b = (float(tf) / float(wordcount2))
star2_score += (0.7 * a + 0.3 * b)
star2_score = log(star2_score)
for d in tlist1: #calculate score for star1 list
for q in query_list:
tf = 0
token_in_doc_count = 0
for i in d:
token_in_doc_count += 1
if (q == i):
tf += 1
if token_in_doc_count == 0:
token_in_doc_count = 1
a = ( float(tf) / float(token_in_doc_count) )
b = (float(tf) / float(wordcount1))
star1_score += (0.7 * a + 0.3 * b)
star1_score = log(star1_score)
top_score = star1_score
if star2_score > top_score:
top_score = star2_score
if star3_score > top_score:
top_score = star3_score
if star4_score > top_score:
top_score = star4_score
if star5_score > top_score:
top_score = star5_score
if top_score == star1_score:
return 1
if top_score == star2_score:
return 2
if top_score == star3_score:
return 3
if top_score == star4_score:
return 4
if top_score == star5_score:
return 5
def cosine(tfidf,tf_query):
cosine_similarity=defaultdict(dict)
rank_dict={}
for key in tfidf.keys():
for key1 in tf_query.keys():
if key== key1:
cosine_similarity[key][key1]=-1
else:
similarity=0
a=tfidf[key].keys()
b=tf_query[key1].keys()
intersect= [val for val in a if val in b]
for word in intersect:
similarity+=tfidf[key][word]*tf_query[key1][word]
cosine_similarity[key][key1]=similarity
#Getting the top 10 pairs
rank_dict[key+' '+key1]=similarity
top10=sorted(rank_dict.iteritems(), key=itemgetter(1),reverse=1)[0:10] #they shown up in pairs, so keep 20.
sum_1 = 0
for x in top10:
sum_1 = sum_1+x[1]
return sum_1/10
def tf_calc(list1,my_list):
tf=defaultdict(dict)
idf={}# idf dictionary of terms
rid_mapper={}# map id number to the line number
num_line=1
#my_list = list(["hello hi there"])
new_list_use = list()
for word in my_list[0].split(" "):
new_list_use.append(word)
#list1 = list(["i live in hello", "my name is hi hi hi hello there there", "hi hello waddup", "flavor is not good"])
'''
list_new = list()
for word in my_list:
for line in list1:
if word in line.split(" "):
print word, " is there "
else:
print word, " nope "
'''
for word in new_list_use:
num_line = 0
for line in list1:
num_line += 1
r_id = "doc" + str(num_line)
if word not in tf[r_id].values():
tf[r_id][word] = 0
idf[word] = 0
#for word in new_list_use:
num_line = 0
for word in idf.keys():
for line in list1:
if word in line.split(" "):
idf[word] += 1
for line in list1:
num_line += 1
r_id = "doc" + str(num_line)
for word in line.split(" "):
if word in tf[r_id]:
tf[r_id][word] += 1
for key,value in idf.iteritems():
if value != 0:
idf[key]=log(num_line/int(value)) #idf defination:number of document/ number of document has the key
for key,value in tf.iteritems():
sum_tfidf=0
for word,tfreq in value.iteritems():
tf[key][word]=tfreq*idf[word]
sum_tfidf+=(tfreq*idf[word])**2
sum_tfidf=sum_tfidf**0.5
#normalize the tfidf vector to unit length
for word,tfidf in value.iteritems():
if sum_tfidf != 0:
tf[key][word]=tf[key][word]/sum_tfidf
return tf
def similarity_score(user_review):
#user_review = input("Enter your review: ")
line1 = Hw1.tokenize(user_review)
line2 = []
line3 = []
#for word in line1:
line2 = Hw1.stemming(line1)
line3 = Hw1.stopword(line2)
my_list1 = " ".join(line3)
my_list = []
my_list.append(my_list1)
#my_list = list(["pre charter terribl talk sell worthless rude servic avoid repres unhelp main compani program accept outag robot plagu servic unreli midst goal"])
tf1 = tf_calc(list1[0:read_size1],my_list)
tf2 = tf_calc(list2[0:read_size1],my_list)
tf3 = tf_calc(list3[0:read_size1],my_list)
tf4 = tf_calc(list4[0:read_size1],my_list)
tf5 = tf_calc(list5[0:read_size1],my_list)
tf_query=defaultdict(dict)
idf_query={}# idf dictionary of terms
rid_mapper_query={}# map id number to the line number
num_line=1
for line in my_list:
num_line+=1
r_id= "doc"+str(num_line)
for word in line.split(" "):
if word in tf_query[r_id].keys():
tf_query[r_id][word]+=1
else:
tf_query[r_id][word]=1
#if show up first time in a document, count idf++
if word in idf_query.keys():
idf_query[word]+=1
else:
idf_query[word]=1
for key,value in idf_query.iteritems():
idf_query[key]=log(2/value) #idf defination:number of document/ number of document has the key
for key,value in tf_query.iteritems():
sum_tfidf=0
for word,tfreq in value.iteritems():
tf_query[key][word]=tfreq*idf_query[word]
sum_tfidf+=(tfreq*idf_query[word])**2
sum_tfidf=sum_tfidf**0.5
#normalize the tfidf vector to unit length
for word,tfidf in value.iteritems():
if sum_tfidf !=0:
tf_query[key][word]=tf_query[key][word]/sum_tfidf
dictionary = {}
dictionary[1] = cosine(tf1,tf_query)
dictionary[2] = cosine(tf2,tf_query)
dictionary[3] = cosine(tf3,tf_query)
dictionary[4] = cosine(tf4,tf_query)
dictionary[5] = cosine(tf5,tf_query)
final_dict = sorted(dictionary.items(), key = operator.itemgetter(1), reverse = True)
return final_dict[0][0]
def get_rating(text):
score1 = similarity_score(text)
#score2 = lm(text)
score2 = score1
score3 = predictor(text)
if(score1 == score2):
score3 == score1
if(score2 == score3):
score1 == score2
if(score3 == score1):
score2 == score3
rating = (score1+score2+score3)/3.0
print score1
print score2
print score3
return round(rating,0)
class display1(Frame):
def __init__(self,parent):
Frame.__init__(self,parent)
self.parent = parent
self.initUI()
def initUI(self):
w = Label(self, text="This is YelpPD")
w.pack()
listbox = Listbox(self, height=40, width = 150)
last_word = ''
for i in business_list:
if last_word != i['name']:
listbox.insert(END, i['name'])
last_word = i['name']
scrollbar = Scrollbar(self,orient=VERTICAL)
scrollbar.configure(command=listbox.yview)
listbox.configure(yscrollcommand=scrollbar.set)
scrollbar.pack(side=LEFT, fill=Y)
listbox.pack()
scrollbar.config(command=listbox.yview)
reviewbutton = Button(self, text="Write a Review",command=lambda: self.toreview(listbox.curselection()[0]))
reviewbutton.pack(side=BOTTOM, fill=X)
self.pack(fill=BOTH, expand=1)
def toreview(self,text):
global business_index
business_index = text
self.pack_forget()
display2(master)
class display2(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
information = Label(self,text = " \n Information on "+business_list[int(business_index)]['name']+"\n \n Located at: \n " + business_list[int(business_index)]['full_address'] )
information.pack()
num_reviews = Label(self, text = "Number of Reviews : " + str(business_list[int(business_index)]['review_count']) )
num_reviews.pack()
type = Label(self, text = "Type of Restaurant : " + str(business_list[int(business_index)]['type']) )
type.pack()
cat = business_list[int(business_index)]['categories']
text_cat = ''
for item in cat:
text_cat = text_cat + ", " + item
categories = Label(self, text = "Category of the resaurtant "+ text_cat )
categories.pack()
w = Label(self, text=" \n Write a Review for "+business_list[int(business_index)]['name'] )
w.pack()
e = Text(self, height=20, width=100)
e.insert(END,"Insert Review Here")
e.pack()
b = Button(self, text="Submit Review",command=lambda: self.tostars(e.get(1.0, END)))
b.pack(side=BOTTOM, fill=BOTH)
self.pack(fill=BOTH, expand=1)
def tostars(self,text):
global submitted_text,star_score
submitted_text = text
star = get_rating(text)
self.pack_forget()
star_score = star
display3(master)
class display3(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
w = Label(self, text="The Predicted Star Value")
w.pack()
global star_score
image = Image.open("stars_0.jpg")
star = star_score
if (star == 1):
image = Image.open("stars_1.jpg")
if (star == 2):
image = Image.open("stars_2.jpg")
if (star == 3):
image = Image.open("stars_3.jpg")
if (star == 4):
image = Image.open("stars_4.jpg")
if (star == 5):
image = Image.open("stars_5.jpg")
photo = ImageTk.PhotoImage(image)
label = Label(self,image=photo)
label.image = photo
label.pack()
T = Text(self, height=20, width=100)
T.pack()
Textbox = submitted_text
T.insert(END, Textbox)
b1 = Button(self, text="Write another review", command=self.toreview)
b2 = Button(self, text="Change Restaurant", command=self.tolist)
b1.pack()
b2.pack()
self.pack(fill=BOTH, expand=1)
def toreview(self):
self.pack_forget()
display2(master)
def tolist(self):
self.pack_forget()
display1(master)
def loadbusiness():
global business_list
infilename='yelp_academic_dataset_business.json'
f=open(infilename,'r')
for line in f:
load = json.loads(line)
business_list.append(load)
business_list = sorted(business_list,key=lambda x:x['name'])
def main():
global master
master = Tk()
master.title("YelpPD")
master.geometry("900x900")
loadbusiness()
openfiles()
openfilesLM()
display1(master)
mainloop()
if __name__ == '__main__':
main()
|
a = dict()
#다음중 오류가 나는것은?
a['name'] = 'python'
#a[('a',)] = 'python'
#a[[1]] = 'python'
#a[250] = 'python'
#a[[1]] 값은 변할수 있기때문에 사용 불가
print(a) |
'''
Created on Jul 10, 2013
@author: emma
'''
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.action_chains import ActionChains
from UnitTesting.page_objects.modals.base_modal import base_modal
class pledge_modal(base_modal):
def __init__(self, webd_wrap):
base_modal.__init__(self, webd_wrap)
def _confirm_modal(self):
self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, "l-570px")), 'Pledge modal not present')
def close_modal(self):
self._confirm_modal()
_close = self._webd_wrap._driver.find_element_by_class_name('fancybox-skin').find_element_by_xpath('a')
self._webd_wrap._driver.execute_script("(arguments[0]).click()", _close)
self._webd_wrap.wait.until(EC.invisibility_of_element_located((By.CLASS_NAME, 'fancybox-inner')))
########################################################################
def detect_modal_element(self):
self._webd_wrap._driver.find_element_by_xpath("/html/body/div[4]/div/div/div/div/div/div/div/section[2]/div/section[2]/div/div[2]/div/div/a")
return self
def modal_should_be_present(self):
''' raises AssertionError if modal is not displayed '''
self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, "l-570px")), 'modal not present')
return self
def click_signup_button(self):
signup_button = self._webd_wrap._driver.find_element_by_id('sign-in-modal').find_element_by_xpath('section[2]/div/a')
hover = ActionChains(self._webd_wrap._driver).move_to_element(signup_button)
hover.perform()
self._webd_wrap._driver.execute_script('(arguments[0]).click()', signup_button)
return self
def initial_pledge(self):
pledge_button = self._webd_wrap._driver.find_element_by_class_name("l-710px").find_element_by_xpath("section/p/a/img")
if pledge_button is None: raise Exception('pledge button not found')
element_to_hover_over = pledge_button
hover = ActionChains(self._webd_wrap._driver).move_to_element(element_to_hover_over)
hover.perform()
self._webd_wrap._driver.execute_script('(arguments[0]).click()', pledge_button)
def pledge_confirm(self):
#self._driver.find_element_by_class_name("margin-left-250px").find_element_by_xpath("p/label")
pledge_button2 = self._webd_wrap._driver.find_element_by_class_name("l-modal-800px").find_element_by_xpath("section[2]/a/img")
if pledge_button2 is None: raise Exception('pledge button not found')
element_to_hover_over = pledge_button2
hover = ActionChains(self._webd_wrap._driver).move_to_element(element_to_hover_over)
hover.perform()
self._webd_wrap._driver.execute_script('(arguments[0]).click()', pledge_button2)
def initial_unpledge(self):
pledge_button = self._webd_wrap._driver.find_element_by_class_name("l-710px").find_element_by_xpath("section/div[2]/a/img")
if pledge_button is None: raise Exception('pledge button not found')
element_to_hover_over = pledge_button
hover = ActionChains(self._webd_wrap._driver).move_to_element(element_to_hover_over)
hover.perform()
self._webd_wrap._driver.execute_script('(arguments[0]).click()', pledge_button)
def unpledge_confirm(self):
self._webd_wrap._driver.find_element_by_class_name("l-modal-800px").find_element_by_xpath("section[2]/a[2]")
pledge_button2 = self._webd_wrap._driver.find_element_by_class_name("l-modal-800px").find_element_by_xpath("section[2]/a/input")
if pledge_button2 is None: raise Exception('pledge button not found')
element_to_hover_over = pledge_button2
hover = ActionChains(self._webd_wrap._driver).move_to_element(element_to_hover_over)
hover.perform()
self._webd_wrap._driver.execute_script('(arguments[0]).click()', pledge_button2)
|
from django.db import models
# Create your models here.
class Event(models.Model):
EVENT_TYPES = (
("Race", "Race"),
("Training Session", "Training Session"),
)
name = models.CharField(max_length=64)
description = models.TextField()
event_type = models.CharField(max_length=64, choices=EVENT_TYPES)
dateTime = models.DateTimeField()
def __str__(self):
return f"{self.name} - {self.dateTime}" |
# @author: Bogdan Hlevca 2012
from numpy import zeros, array, prod, diagonal, dot
from gaussElimin import *
from gaussSeidel import *
from thomas import *
import timeit
# Gauss Elimination test
print "Gauss Elimination:"
print "------------------"
print "A = b = "
print "8.0, 1.0, 6.0 1 "
print "3.0, 5.0, 7.0 2 "
print "4.0, 9.0, 2.0 3 "
a = zeros((3, 3))
a[0] = array([8.0, 1.0, 6.0])
a[1] = array([3.0, 5.0, 7.0])
a[2] = array([4.0, 9.0, 2.0])
b = array([1.0, 2.0, 3.0])
aOrig = a.copy() # Save original matrix
bOrig = b.copy() # and the constant vector
x = gaussElimin(a, b)
det = prod(diagonal(a))
print 'Solution: x =\n', x
print '\ndet =', det
print '\nCheck result: [a]{x} - b =\n', dot(aOrig, x) - bOrig
print "\n\n-----------------------"
print "Gauss Seidel:"
print "------------------"
print "A = b = "
print "8.0, 1.0, 6.0 1 "
print "3.0, 5.0, 7.0 2 "
print "4.0, 9.0, 2.0 3 "
#Gauss Seidel test
t = timeit.Timer("print 'initialize + solve:'", "print 'Python Gauss-Seidel'")
a = numpy.zeros((3, 3))
a[0] = numpy.array([8.0, 1.0, 6.0])
a[1] = numpy.array([3.0, 5.0, 7.0])
a[2] = numpy.array([4.0, 9.0, 2.0])
b = numpy.array([1.0, 2.0, 3.0])
#a[0] = numpy.array([12.0, 3.0, -5.0])
#a[1] = numpy.array([1.0, 5.0, 3.0])
#a[2] = numpy.array([3.0, 7.0, 13.0])
#b = numpy.array([1.0, 28.0, 76.0])
x0 = numpy.array([1.0, 0.0, 1.0])
y, kiter = gaussSeidel(a, b, x0, 1.0, 20)
print "Timer:", t.timeit(1)
print "\nSolution:", y
print "-----------------------"
print "Number of iterations: %d" % kiter
print "\n\n-----------------------"
print "Gauss Elim & Gauss Seidel:"
print '''
a[0] = numpy.array([20, -5, 0, 0, 0])
a[1] = numpy.array([-5, 15, -5, 0, 0])
a[2] = numpy.array([0, -5, 15, -5, 0])
a[3] = numpy.array([0, 0, -5, 15, -5])
a[4] = numpy.array([0, 0, 0, -5, 10])
b = numpy.array([1100, 100.0, 100.0, 100, 100])
'''
a = zeros((5, 5))
a[0] = numpy.array([20, -5, 0, 0, 0])
a[1] = numpy.array([-5, 15, -5, 0, 0])
a[2] = numpy.array([0, -5, 15, -5, 0])
a[3] = numpy.array([0, 0, -5, 15, -5])
a[4] = numpy.array([0, 0, 0, -5, 10])
b = numpy.array([1100, 100.0, 100.0, 100, 100])
aOrig = a.copy() # Save original matrix
bOrig = b.copy() # and the constant vector
x = gaussElimin(a, b)
det = prod(diagonal(a))
print 'Gauss Elim: x =\n', x
print '\ndet =', det
print '\nCheck result: [a]{x} - b =\n', dot(aOrig, x) - bOrig
x0 = numpy.array([1.0, 0.0, 1.0, 0.0, 1.0])
y, kiter = gaussSeidel(aOrig, bOrig, x0, 1.0, 500)
print "\n-----------------------"
print "Gauss Seidel Solution:", y
print "-----------------------"
print "Number of iterations: %d" % kiter
#Tridiagonal
print ''' \n\n Tridiagonal:
----------------------
3 -1 0 0 5
A = 2 -3 2 0 b = 5
0 1 2 5 10
0 0 1 -1 1
a2,a3, a4 = 2,1,1
b1,b2,b3,b4 = 3,-3,2,-1
c1,c2,c3= -1,2,5
solution:2,1,2,1
'''
t = timeit.Timer("print 'initialize + solve:'", "print 'Python TDMA QUICK'")
bb = (2., 1., 1.)
aa = (3., -3., 2., -1.)
cc = (-1., 2., 5.)
dd = (5, 5, 10, 1)
bb = (-0.7, -0.675, -0.675, -0.817)
aa = (2.175, 1.075, 1.075, 1.075, 1.925)
cc = (-0.592, -0.425, -0.425, -0.425)
dd = (1.583, -0.05, 0, 0, 0)
a = numpy.array(aa)
b = numpy.array(bb)
c = numpy.array(cc)
d = numpy.array(dd)
x = thomas(a.size, b, a, c, d)
print "Timer:", t.timeit(1)
print "Solution:",
print x
print "\n\n\n"
t = timeit.Timer("print 'initialize + solve:'", "print 'Python Gauss-Seidel'")
a = zeros((5, 5))
TA = 100
TB = 500
a[0] = numpy.array([300, -100, 0, 0, 0])
a[1] = numpy.array([-100, 200, -100, 0, 0])
a[2] = numpy.array([0, -100, 200, -100, 0])
a[3] = numpy.array([0, 0, -100, 200, -100])
a[4] = numpy.array([0, 0, 0, -100, 300])
b = numpy.array([200 * TA, 0, 0, 0, 200 * TB])
x = numpy.array([0, 0, 0, 0, 0])
y, kiter = gaussSeidel(a, b, x0, 1.0, 200)
print "Timer:", t.timeit(1)
print "\nSolution:", y
print "-----------------------"
print "Number of iterations: %d" % kiter
|
from django.contrib.gis.db import models
class WorldBorder(models.Model):
# Regular Django fields corresponding to the attributes in the
# world borders shapefile.
name = models.CharField(max_length=50)
area = models.IntegerField()
pop2005 = models.IntegerField('Population 2005')
fips = models.CharField('FIPS Code', max_length=2)
iso2 = models.CharField('2 Digit ISO', max_length=2)
iso3 = models.CharField('3 Digit ISO', max_length=3)
un = models.IntegerField('United Nations Code')
region = models.IntegerField('Region Code')
subregion = models.IntegerField('Sub-Region Code')
lon = models.FloatField()
lat = models.FloatField()
# GeoDjango-specific: a geometry field (MultiPolygonField)
mpoly = models.MultiPolygonField()
# Returns the string representation of the model.
def __str__(self): # __unicode__ on Python 2
return self.name
class Marker(models.Model):
neighborhood_id = models.IntegerField()
name = models.CharField(max_length=50)
lon = models.FloatField()
lat = models.FloatField()
marker_type = models.CharField(default='House', max_length=50)
# Returns the string representation of the model.
def __str__(self): # __unicode__ on Python 2
return self.name
def as_dict(self):
return {
'name': self.name,
'lat': self.lat,
'lon': self.lon,
'type': self.marker_type,
}
class YardSaleMarker(models.Model):
name = models.CharField(max_length=50)
create_date = models.DateTimeField(auto_now=True)
date = models.DateField()
start_time = models.TimeField(null=True)
end_time = models.TimeField(null=True)
lon = models.FloatField()
lat = models.FloatField()
# Returns the string representation of the model.
def __str__(self): # __unicode__ on Python 2
return self.name
class ConstructionMarker(models.Model):
name = models.CharField(max_length=200)
create_date = models.DateTimeField(auto_now=True)
start_date = models.DateField()
end_date = models.DateField()
class TheftMarker(models.Model):
time_of_incident = models.DateTimeField(verbose_name='Time/Date of Incident')
description = models.CharField(max_length=400, verbose_name='Description')
lon = models.FloatField()
lat = models.FloatField()
class TrashMarker(models.Model):
create_date = models.DateTimeField(auto_now=True)
description = models.CharField(max_length=400, verbose_name='Description')
lon = models.FloatField()
lat = models.FloatField()
class ExpenseMarker(models.Model):
create_date = models.DateTimeField(auto_now=True)
expense_id = models.IntegerField()
lon = models.FloatField()
lat = models.FloatField()
class PoolMarker(models.Model):
create_date = models.DateTimeField(auto_now=True)
open = models.TimeField()
close = models.TimeField()
lon = models.FloatField()
lat = models.FloatField()
class SpaMarker(models.Model):
create_date = models.DateTimeField(auto_now=True)
open = models.TimeField()
close = models.TimeField()
lon = models.FloatField()
lat = models.FloatField()
class HazardMarker(models.Model):
create_date = models.DateTimeField(auto_now=True)
description = models.CharField(max_length=400, verbose_name='Description')
lon = models.FloatField()
lat = models.FloatField()
|
"""
Tests for thumbnails.
"""
import io
from .base import BaseTestCase
from pyyoutube.media import Media
class TestThumbnailsResource(BaseTestCase):
RESOURCE = "thumbnails"
def test_set(self, authed_cli):
video_id = "zxTVeyG1600"
media = Media(fd=io.StringIO("jpeg content"), mimetype="image/jpeg")
upload = authed_cli.thumbnails.set(
video_id=video_id,
media=media,
)
assert upload.resumable_progress == 0
|
turmas = {}
def adicionarTurma():
nome = str(input("Nome da turma: "))
alunos = {}
turmas[nome] = alunos
def adicionarAlunoNotas():
nomeTurma= str(input("Nome da turma:"))
matricula=str(input("Matricula: "))
notas=[]
mais = 'Sim'
while (mais =='Sim'):
nota = float(input("Digite a nota: "))
notas.append(nota)
mais = str(input("Deseja inserir mais notas? "))
turmas[nomeTurma][matricula] = notas
def calcularMedia(notas):
soma = 0
for x in notas:
soma += x
return soma/len(notas)
def calcularMediaTurma():
soma = 0
quantidadeDeTurmas = 0
nomeTurma = str(input("Nome da turma: "))
for x in turmas[nomeTurma]:
soma += calcularMedia(turmas[nomeTurma][x])
quantidadeDeTurmas += 1
return soma/quantidadeDeTurmas
def consulta():
op = int(input("1. Turmas;\n2. Alunos;\n3. Aluno;\n"))
if(op == 1):
print(turmas)
elif(op == 2):
nomeTurma= str(input("Nome da turma: "))
print(turmas[nomeTurma])
elif(op == 3):
nomeTurma= str(input("Nome da turma: "))
matr = str(input("Matricula do aluno: "))
print(turmas[nomeTurma][matr])
def Menu():
continuar = 'Sim'
while (continuar == 'Sim'):
opcao = int(input("O que deseja fazer?\n 1: Adicionar turma;\n 2: Adicionar aluno e notas;\n 3: Calcular média de um aluno;\n 4: Calcular média de um aluno;\n 5: Consutar os dados inseridos;\n 6: Sair.\n"))
if (opcao == 1):
adicionarTurma()
elif (opcao == 2):
adicionarAlunoNotas()
elif (opcao == 3):
turma = str(input("Turma: "))
matricula = str(input("Matricula: "))
print(calcularMedia(turmas[turma][matricula]))
elif (opcao == 4):
print(calcularMediaTurma())
elif (opcao == 5):
consulta()
elif (opcao == 6):
continuar = 'Não'
else:
print("Opção inválida! Tente Novamente.\n")
Menu() |
import tkinter
from tkinter import filedialog
import os
from datetime import datetime
today = datetime.now().date()
print('today is ', today)
directory = tkinter.filedialog.askdirectory()
print(directory)
os.chdir(directory)
files = os.listdir(directory)
for file in files:
in_file = open(file, 'rb')
image = in_file.read()
in_file.close()
image = bytearray(image)
key = 46
for index, value in enumerate(image):
image[index] = value ^ key
save_path = r'C:/test/'
s = file.split("/")
file = s[-1]
# os.chdir(save_path)
out_filename = os.path.join(save_path, file + ' ' + str(today) + '.enc')
print(out_filename)
out_file = open(out_filename, 'wb')
out_file.write(image)
# os.startfile(save_path)
out_file.close()
print(directory)
print(files) |
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
import seaborn as sns
%matplotlib
df = sm.datasets.sunspots.load()
dta = pd.DataFrame(df.data['SUNACTIVITY'], index = sm.tsa.datetools.dates_from_range('1700', '2008'), columns = ['SUNACTIVITY'])
dta.plot()
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(ts.spots, lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(ts.spots, lags=40, ax=ax2)
autocorrelation_plot(ts.spots)
# ARMA (2,0)
arma_mod20 = sm.tsa.ARMA(dta, (2,0)).fit()
print arma_mod20.params
print(arma_mod20.aic)
print(arma_mod20.bic)
print(arma_mod20.hqic)
# Durbin Watson
# We will use the Durbin-Watson test for autocorrelation. The Durbin-Watson statistic ranges in value from 0 to 4.
# A value near 2 indicates non-autocorrelation; a value toward 0 indicates positive autocorrelation; a value toward 4 indicates negative autocorrelation.
# analysis of residuals
sm.stats.durbin_watson(arma_mod20.resid)
# => no autocorrelation
stats.normaltest(arma_mod20.resid)
# qq plot or residuals
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
fig = qqplot(arma_mod20.resid, line='q', ax=ax, fit=True)
# Predict
predict_sunspots20 = arma_mod20.predict('1990', '2012', dynamic=True)
# plot:
ax = dta.ix['1950':].plot(figsize=(12,8))
ax = predict_sunspots20.plot(ax=ax, style='r--', label='Dynamic Prediction');
ax.legend();
ax.axis((-20.0, 38.0, -4.0, 200.0));
# Metric
def mean_forecast_err(y, yhat):
return y.sub(yhat).mean()
|
#!/usr/bin/env python3
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__app__ = "mbus2mqtt Adapter"
__VERSION__ = "0.70"
__DATE__ = "10.05.2020"
__author__ = "Markus Schiesser"
__contact__ = "M.Schiesser@gmail.com"
__copyright__ = "Copyright (C) 2020 Markus Schiesser"
__license__ = 'GPL v3'
import sys
import serial
import json
import logging
import meterbus
from configobj import ConfigObj
from library.logger import loghandler
from library.mqttclient import mqttclient
class manager(object):
def __init__(self,configfile='mbus2mqtt.cfg'):
self._configfile = configfile
self._general = None
self._mqttbroker = None
self._ultrasonic = None
self._msg = {}
def readConfig(self):
_config = ConfigObj(self._configfile)
if bool(_config) is False:
print('ERROR config file not found',self._configfile)
sys.exit()
self._loggerCfg = _config.get('LOGGING',None)
self._mqttCfg = _config.get('BROKER',None)
self._interfaceCfg = _config.get('INTERFACE',None)
return True
def startLogger(self):
self._root_logger = loghandler(self._loggerCfg.get('NAME','MBUS2MQTT'))
self._root_logger.handle(self._loggerCfg.get('LOGMODE','PRINT'),self._loggerCfg)
self._root_logger.level(self._loggerCfg.get('LOGLEVEL','DEBUG'))
self._rootLoggerName = self._loggerCfg.get('NAME', 'MBUS2MQTT')
self._log = logging.getLogger(self._rootLoggerName + '.' + self.__class__.__name__)
return True
def startMbus(self):
_device = self._interfaceCfg.get('PORT', '/dev/ttyUSB0')
_baudrate = self._interfaceCfg.get('BAUDRATE', 2400)
self._if = serial.Serial(_device, _baudrate, 8, 'E', 1, 0.5)
self._log.debug('Serial Port configuration %s' % (_device))
self._if.flush()
def readMbus(self):
_data = {}
for k,v in self._interfaceCfg.items():
if isinstance(v, dict):
_slaveId = k
_offset = v.get('OFFSET',0)
meterbus.send_ping_frame(self._if, _slaveId)
frame = meterbus.load(meterbus.recv_frame(ser, 1))
assert isinstance(frame, meterbus.TelegramACK)
meterbus.send_request_frame(self._if, _slaveId)
frame = meterbus.load(meterbus.recv_frame(self._if, meterbus.FRAME_DATA_LENGTH))
assert isinstance(frame, meterbus.TelegramLong)
_data = json.loads(frame.to_JSON())
_payload = {}
_payload['WATER_CONSUMPTION'] = (x['body']['records'][0]['value'])
print(_payload)
_data[_slaveId] = _payload
return(_data)
def publishData(self,data):
self._log.debug('Methode: publishData(%s)',data)
mqttpush = mqttclient(self._rootLoggerName)
mqttpush.pushclient(self._mqttCfg)
for k, v in data.items():
print('send',k,v)
_topic = self._mqttCfg.get('PUBLISH', 'SMARTHOME/CH/BE/SENSOR01/MBUS01')
_topic = _topic + "/" + k
mqttpush.publish(_topic, json.dumps(v))
time.sleep(1)
mqttpush.disconnect()
return True
def run(self):
self.readConfig()
self.startLogger()
self._log.info('Startup, %s %s %s' % (__app__, __VERSION__, __DATE__))
# self._log.info('Start Reading Valuse')
data = self.startMeasure()
print('DAten',data)
self._log.info(data)
self.publishData(data)
return True
if __name__ == '__main__':
if len(sys.argv) == 2:
configfile = sys.argv[1]
else:
configfile = './mbus2mqtt.cfg'
mgr_handle = manager(configfile)
mgr_handle.run()
import os, time, serial, meterbus
import json
import paho.mqtt.publish as publish
|
from pymongo.cursor import Cursor
from models import PostModel, UserModel
import motor.motor_asyncio
from dotenv import dotenv_values
import os
config = dotenv_values(".env")
DATABASE_URI = config.get("DATABASE_URI")
if os.getenv("DATABASE_URI"): DATABASE_URI = os.getenv("DATABASE_URI")
client = motor.motor_asyncio.AsyncIOMotorClient(DATABASE_URI)
database = client.SocMedia
usersCollection = database.users
postsCollection = database.posts
async def fetch_all_users():
users = []
cursor = usersCollection.find()
async for document in cursor:
users.append(UserModel(**document))
return users
async def fetch_all_posts():
posts = []
cursor = postsCollection.find()
async for document in cursor:
posts.append(PostModel(**document))
return posts
async def fetch_all_posts_from_user(user: str):
posts = []
cursor = postsCollection.find()
async for document in cursor:
if document.get("user") == user: posts.append(PostModel(**document))
return posts
async def fetch_one_post(nanoid: str):
document = await postsCollection.find_one({"nanoid": nanoid}, {"_id": 0})
return document
async def fetch_one_user(nanoid: str):
document = await usersCollection.find_one({"nanoid": nanoid}, {"_id": 0})
return document
async def create_user(user: UserModel):
document = user.dict()
usersCollection.insert_one(document)
result = await fetch_one_user(user.nanoid)
return result
async def create_post(post: PostModel):
document = post.dict()
postsCollection.insert_one(document)
result = await fetch_one_post(post.nanoid)
return result
async def create_comment(reply: PostModel, nanoid: str):
postsCollection.insert_one(reply.dict())
result = await fetch_one_post(reply.nanoid)
return result
async def fetch_all_replies(nanoid: str):
replies = []
postList = await fetch_all_posts()
for post in postList:
if post.dict().get("isReply") and post.dict().get("replyTo") == nanoid: replies.append(PostModel(**post.dict()))
return replies |
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework import generics
from .models import UserList, FileList
from rest_framework.response import Response
from .serailzers import UserListSerializer, UserListInfoSerializer, FileListInfoSerializer, FileListSerializer
from django.contrib.auth.hashers import make_password, check_password
import os
# Create your views here.
class UserAll(generics.ListAPIView):
queryset = UserList.objects.all()
serializer_class = UserListSerializer
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(data={
"code":200,
"msg":"成功获取",
"data":serializer.data
})
class UserRegister(generics.CreateAPIView):
serializer_class = UserListSerializer
def post(self, request, *args, **kwargs):
data = request.data
try:
rec_data = {
"code":200,
"msg": "ok",
}
data['user_password'] = make_password(data['user_password'], 'abc', 'pbkdf2_sha256')
UserList(**data).save()
return Response(data=rec_data)
except Exception as e:
print(e)
rec_data = {
"code":200,
"msg": 'error',
}
return Response(data=rec_data)
class UserLogin(generics.CreateAPIView):
serializer_class = UserListSerializer
def post(self, request, *args, **kwargs):
user_info = request.data
r_name = user_info['user_name']
r_psk = make_password(user_info['user_password'], 'abc', 'pbkdf2_sha256')
c_res = UserList.objects.filter(user_name=r_name, user_password=r_psk)
if len(c_res) == 0:
return Response(data={"coade":200,"msg": "用户名密码错误", 'result': False})
# res = check_password(r_psk, c_res[0].user_password)
return Response(data={"code":200,"msg":"登录成功",'result':True})
class CreateDir(generics.CreateAPIView):
serializer_class = FileListSerializer
def post(self, request, *args, **kwargs):
data = request.data
dirname = data['File_name']
user_id = UserList.objects.get(id=data['File_User'])
try:
check = FileList.objects.filter(File_name=dirname, File_User=user_id)
if len(check) > 0:
return Response(data={
'code':200,
'msg':'名称重复',
})
os.mkdir(os.path.join("F:\\A测试测试", dirname))
data['File_User'] = user_id
FileList(**data).save()
return Response(data={
"code":200,
"msg":"创建成功",
})
except Exception as e:
return Response(data={
"code":200,
"msg":"输入正确的创建信息",
"error_info":str(e)
})
class DirList(generics.ListAPIView):
serializer_class = FileListSerializer
def get(self, request, *args, **kwargs):
user = UserList.objects.get(id=kwargs['user_id'])
queryset = FileList.objects.filter(File_User=kwargs['user_id'])
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
print(type(queryset))
return Response(data={
"code":200,
"msg":"ok",
"data": serializer.data
}
)
class DirDetial(generics.CreateAPIView):
def post(self, request, *args, **kwargs):
print(request.data)
return Response(data={
'code':200,
'msg':'ok',
})
|
# Generated by Django 3.2.3 on 2021-06-02 05:03
import datetime
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True, validators=[django.core.validators.RegexValidator(message='Имена могут содержать только Алфавитные символы', regex='^([А-Я]*[а-яё]*|[A-Z]*[a-z]*)$')], verbose_name='имя')),
('last_name', models.CharField(blank=True, max_length=30, null=True, validators=[django.core.validators.RegexValidator(message='Имена могут содержать только Алфавитные символы', regex='^([А-Я]*[а-яё]*|[A-Z]*[a-z]*)$')], verbose_name='фамилия')),
('phone', models.CharField(max_length=10, unique=True, validators=[django.core.validators.RegexValidator(message='Номер телефона должен быть введен в соответствии со следующим форматом: "0000000000".', regex='^[0-9]{10}$')], verbose_name='телефон')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Application',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateField(default=datetime.date.today, null=True, verbose_name='дата заявки')),
('product', models.CharField(choices=[('CS', 'Потреб'), ('CR', 'Авто'), ('PL', 'Залог'), ('MG', 'Ипотека')], max_length=8, verbose_name='продукт')),
('decision', models.CharField(blank=True, choices=[('AP', 'Одобренно'), ('DE', 'Отказано'), ('TD', 'Временный отказ')], max_length=11, null=True, verbose_name='решение')),
('decision_comment', models.TextField(blank=True, null=True, verbose_name='комментарий к решению')),
('client', models.ForeignKey(max_length=10, on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='credit_app.client', verbose_name='клиент (номер телефона)')),
],
options={
'abstract': False,
},
),
]
|
import contextlib
import torch
import torch.nn as nn
import torch.nn.functional as F
@contextlib.contextmanager
def _disable_tracking_bn_stats(model):
def switch_attr(m):
if hasattr(m, "track_running_stats"):
m.track_running_stats ^= True
model.apply(switch_attr)
yield
model.apply(switch_attr)
def _l2_normalize(d):
d_reshaped = d.view(d.shape[0], -1, *(1 for _ in range(d.dim() - 2)))
d /= torch.norm(d_reshaped, dim=1, keepdim=True) + 1e-8
return d
class VATLoss(nn.Module):
def __init__(self, xi=10.0, eps=1.0, ip=1, alpha=1.0, mode="multiclass"):
"""VAT loss
:param xi: hyperparameter of VAT (default: 10.0)
:param eps: hyperparameter of VAT (default: 1.0)
:param ip: iteration times of computing adv noise (default: 1)
"""
super(VATLoss, self).__init__()
self.xi = xi
self.eps = eps
self.ip = ip
self.alpha = alpha
if mode == "multiclass":
self.link_func = F.softmax
self.log_link_func = F.logsoftmax
elif mode == "binary":
self.link_func = F.sigmoid
self.log_link_func = F.logsigmoid
else:
raise ValueError(f"Invalid mode: {mode}")
def forward(self, model, x):
with torch.no_grad():
pred = self.link_func(model(x), dim=1)
# prepare random unit tensor
d = torch.rand(x.shape).sub(0.5).to(x.device)
d = _l2_normalize(d)
with _disable_tracking_bn_stats(model):
# calc adversarial direction
for _ in range(self.ip):
d.requires_grad_()
pred_hat = model(x + self.xi * d)
logp_hat = self.log_link_func(pred_hat, dim=1)
adv_distance = F.kl_div(
logp_hat, pred, reduction="batchmean", log_target=False
)
adv_distance.backward()
d = _l2_normalize(d.grad)
model.zero_grad()
# calc LDS
r_adv = d * self.eps
pred_hat = model(x + r_adv)
logp_hat = self.log_link_func(pred_hat, dim=1)
lds = F.kl_div(logp_hat, pred, reduction="batchmean", log_target=False)
return lds * self.alpha
|
import json
# ToDO: Create users function
# ToDo: Update user information function
#
class User(object):
# User class created dynamically from JSON configuration
def __init__(self, d):
self.__dict__ = d
class Config(object):
def __init__(self):
self._config = config # set it to conf
def get_property(self, property_name):
if property_name not in self._config.keys(): # we don't want KeyError
return None # just return None if not found
return self._config[property_name]
class UserConfig(Config):
@property
def Languages(self):
return self.get_property('Languages')
@property
def PhoneNumbers(self):
return self.get_property('PhoneNumbers')
@property
def Provider(self):
return self.get_property('Provider')
def get_contact():
# Prompt user for information to initialize app
name = input('What is your name?')
phone_number = input('I hate to ask like this, but what\'s your phone number?')
provider = input('Who is your phone provider?')
print(('So you are {}, your phone number is {}, and provider is {}').format(name, phone_number, provider))
print('Your daily translations are being set up')
return name, phone_number, provider
def get_config(path="C:\Python\PycharmProjects\quoteDetect\config.json"):
# Return configuration file with user details
with open(path, 'r') as myjson:
config = (json.load(myjson)).get("Users")
return config
def get_users(self, config):
# Return list of users
users = list(self, config.keys)
for elem in users:
return elem
#
#def get_user_details(user, config=get_config()):
# # Return details for specific user -- Use to instantiate class
# return (config[user][0])
def get_user_details(user, path="C:\Python\PycharmProjects\quoteDetect\config.json"):
# Return details for specific user -- Use to instantiate class
with open(path, 'r') as myjson:
config = (json.load(myjson)).get("Users")
return (config[user][0])
def add_user(config=get_config()):
print("hello")
|
import translator
from substitutiontranslator import *
from .. import utils
class VigenereTranslator(translator.Translator):
"""Adds perpetually key letters to text (Caesar with longer keys)"""
def __init__(self, key="A", ignore_nonletters=True):
self.key = key
self.ignore_nonletters = ignore_nonletters
def parseInput(self, cipher):
return cipher.upper()
def translate(self, cipher, a_is_one = True):
result = ""
char_num = 0
for i in self.parseInput(cipher):
if (utils.alphabet.find(i) != -1):
curr_key_char = utils.alphabet.find(self.key[char_num % len(self.key)]) + int(a_is_one)
result += utils.alphabet[(utils.alphabet.find(i) + curr_key_char) % len(utils.alphabet)]
char_num += 1
else:
if (not self.ignore_nonletters):
char_num += 1
result += i
return result
def encode(self, cipher):
subs = SubstitutionTranslator()
self.key = subs.translate(self.key)
ret = self.translate(cipher, False)
self.key = subs.encode(self.key)
return ret
# def setKey(self, key):
# self.key = "".join(key).upper() |
#only/just Monika
from random import randint
from time import sleep
def main():
while True:
x = randint(0, 1)
if x == 1:
writeLine("Just Monika")
else:
writeLine("Only Monika")
def writeLine(string):
for i in string:
print(i, end='\r', flush=True)
sleep(0.01)
main()
|
from aws_cdk import (
core,
aws_s3,
aws_lambda,
aws_apigateway,
aws_iam
)
class InfraStack(core.Stack):
def __init__(
self,
scope: core.Construct,
id: str,
# env: core.Environment,
**kwargs,
) -> None:
super().__init__(scope, id, **kwargs)
bucket = aws_s3.Bucket(self,
"S3Upload",
versioned=True,)
lambda_upload = aws_lambda.Function(
self, 'LambdaUpload',
runtime=aws_lambda.Runtime.PYTHON_3_8,
code=aws_lambda.Code.asset('lambda'),
handler='upload.handler',
)
lambda_upload.add_to_role_policy(
aws_iam.PolicyStatement(
actions=["s3:PutObject"],
resources=[
"{}/*".format(bucket.bucket_arn)
]
)
)
lambda_listimages = aws_lambda.Function(
self, 'LambdaListImages',
runtime=aws_lambda.Runtime.PYTHON_3_8,
code=aws_lambda.Code.asset('lambda'),
handler='listimages.handler',
)
# create REST API
api = aws_apigateway.RestApi(self, 'RestAPI',
rest_api_name='ELS API')
# adding a method to list images
list_images_resources = api.root.add_resource("list")
list_images_integration = aws_apigateway.LambdaIntegration(lambda_listimages, proxy=True)
list_images_method = list_images_resources.add_method(
"GET",
list_images_integration,
api_key_required=False
)
# adding a method to initiate image upload
upload_image_resources = api.root.add_resource("upload")
upload_image_integration = aws_apigateway.LambdaIntegration(lambda_upload, proxy=True)
upload_image_method = upload_image_resources.add_method(
"POST",
upload_image_integration,
api_key_required=False
)
|
# Name: Taidgh Murray
# Student ID: 15315901
# File: triangle_area.py
############################################################################
import math
def sides():
global a
global b
global c
a=int(input("Please enter the first side: "))
b=int(input("Please enter the second side: "))
c=int(input("Please enter the third side: "))
def perimeter():
global p
p=(a+b+c)/2
print("The perimeter is", p)
def heron():
area=math.sqrt(p*(p-a)*(p-b)*(p-c))
print("The area of the triangle is", area)
sides()
perimeter()
heron()
|
# EXERCISE_9 WORK OF THE BOOK :
for i in range(100):
print(i,"Shivam") |
# -*- coding: utf-8 -*-
import sys
import log
from os import listdir
from os.path import join, isfile, splitext
JAVA_FILE_EXT = ".java"
def get_root_path():
argv = sys.argv
if len(argv) > 1:
root_path = argv[1]
else:
root_path = argv[0]
return root_path
def is_java_file(file_path):
filename, file_ext = splitext(file_path)
if JAVA_FILE_EXT == file_ext:
return True
else:
return False
def ignore_hide_file(file_name):
if (file_name.startswith(".")):
return True
elif (file_name == "build"):
return True
return False
def list_dir_and_java_file(dir_path):
java_files = []
dirs = []
for file in listdir(dir_path):
if (ignore_hide_file(file)):
continue
path = join(dir_path, file)
if not isfile(path):
dirs.append(path)
elif is_java_file(path):
java_files.append(path)
return dirs, java_files
def list_all_java_file(root_path):
dir_stack = []
all_java_files = []
dir_stack.append(root_path)
while len(dir_stack) > 0:
path = dir_stack.pop()
dirs, java_files = list_dir_and_java_file(path)
dir_stack.extend(dirs)
all_java_files.extend(java_files)
return all_java_files
|
import scapy.all as scapy
import optparse
def get_arg():
parser = optparse.OptionParser()
parser.add_option("-t", "--target", dest="target", help="Target IP / IP range")
(opt, arg) = parser.parse_args()
return opt
def scan(ip):
arp_req = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_req_broad = broadcast/arp_req
answ = scapy.srp(arp_req_broad, timeout=5, verbose=False)[0]
lst = []
for some in answ:
dct = {"ip":some[1].psrc, "mac": some[1].hwsrc}
lst.append(dct)
return lst
def print_res(res_lst):
print("IP\t\t\tMAC Address\n----------------------------------------------------------------------")
for some in res_lst:
print(some["ip"] + "\t\t" + some["mac"])
options = get_arg()
scan_res = scan(options.target)
print_res(scan_res)
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from rabbitmq import RabbitMQ
import time
import sys
def callback(ch, method, properties, body):
print(" [x] Received %r" % body)
def main():
queue = "vin"
if len(sys.argv) >= 2:
queue = sys.argv[1]
mq = RabbitMQ(queue=queue)
mq.start_consuming(callback)
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.7 on 2021-07-07 09:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('access_token', models.CharField(max_length=40)),
('type', models.CharField(max_length=3)),
('expires_in', models.BigIntegerField()),
],
options={
'db_table': 'tbl_access_token',
},
),
migrations.CreateModel(
name='B2CRequest',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('phone', models.BigIntegerField()),
('amount', models.DecimalField(decimal_places=2, max_digits=20)),
('conversation_id', models.CharField(blank=True, max_length=40, null=True)),
('originator_conversation_id', models.CharField(blank=True, max_length=40, null=True)),
('response_code', models.CharField(blank=True, max_length=5, null=True)),
('response_description', models.TextField(blank=True, null=True)),
('request_id', models.CharField(blank=True, max_length=20, null=True)),
('error_code', models.CharField(blank=True, max_length=20, null=True)),
('error_message', models.TextField(blank=True, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'B2C Requests',
'db_table': 'tbl_b2c_requests',
},
),
migrations.CreateModel(
name='B2CResponse',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('phone', models.BigIntegerField(blank=True, null=True)),
('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('conversation_id', models.CharField(blank=True, max_length=40, null=True)),
('originator_conversation_id', models.CharField(blank=True, max_length=40, null=True)),
('result_type', models.CharField(blank=True, max_length=5, null=True)),
('result_code', models.CharField(blank=True, max_length=5, null=True)),
('result_description', models.TextField(blank=True, null=True)),
('transaction_id', models.CharField(blank=True, max_length=20, null=True)),
('transaction_receipt', models.CharField(blank=True, max_length=20, null=True)),
('transaction_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('working_funds', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('utility_funds', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('paid_account_funds', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('transaction_date', models.DateTimeField(blank=True, null=True)),
('mpesa_user_name', models.CharField(blank=True, max_length=100, null=True)),
('is_registered_customer', models.CharField(blank=True, max_length=1, null=True)),
],
options={
'verbose_name_plural': 'B2C Responses',
'db_table': 'tbl_b2c_response',
},
),
migrations.CreateModel(
name='C2BRequest',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('transaction_type', models.CharField(blank=True, max_length=20, null=True)),
('transaction_id', models.CharField(max_length=20, unique=True)),
('transaction_date', models.DateTimeField(blank=True, null=True)),
('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('business_short_code', models.CharField(blank=True, max_length=20, null=True)),
('bill_ref_number', models.CharField(blank=True, max_length=50, null=True)),
('invoice_number', models.CharField(blank=True, max_length=50, null=True)),
('org_account_balance', models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=20, null=True)),
('third_party_trans_id', models.CharField(blank=True, max_length=50, null=True)),
('phone', models.BigIntegerField(blank=True, null=True)),
('first_name', models.CharField(blank=True, max_length=50, null=True)),
('middle_name', models.CharField(blank=True, max_length=50, null=True)),
('last_name', models.CharField(blank=True, max_length=50, null=True)),
('is_validated', models.BooleanField(default=False)),
('is_completed', models.BooleanField(default=False)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'C2B Requests',
'db_table': 'tbl_c2b_requests',
},
),
migrations.CreateModel(
name='OnlineCheckout',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('phone', models.BigIntegerField()),
('amount', models.DecimalField(decimal_places=2, max_digits=20)),
('is_paybill', models.BooleanField(default=True)),
('checkout_request_id', models.CharField(default='', max_length=50)),
('account_reference', models.CharField(default='', max_length=50)),
('transaction_description', models.CharField(blank=True, max_length=50, null=True)),
('customer_message', models.CharField(blank=True, max_length=100, null=True)),
('merchant_request_id', models.CharField(blank=True, max_length=50, null=True)),
('response_code', models.CharField(blank=True, max_length=5, null=True)),
('response_description', models.CharField(blank=True, max_length=100, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Online Checkout Requests',
'db_table': 'tbl_online_checkout_requests',
},
),
migrations.CreateModel(
name='OnlineCheckoutResponse',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('merchant_request_id', models.CharField(blank=True, max_length=50, null=True)),
('checkout_request_id', models.CharField(default='', max_length=50)),
('result_code', models.CharField(blank=True, max_length=5, null=True)),
('result_description', models.CharField(blank=True, max_length=100, null=True)),
('mpesa_receipt_number', models.CharField(blank=True, max_length=50, null=True)),
('transaction_date', models.DateTimeField(blank=True, null=True)),
('phone', models.BigIntegerField(blank=True, null=True)),
('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Online Checkout Responses',
'db_table': 'tbl_online_checkout_responses',
},
),
]
|
from fastapi import APIRouter
from config.db import conn
from models.user import users
from schemas.user import User
from cryptography.fernet import Fernet
key = Fernet.generate_key()
f = Fernet(key)
user = APIRouter()
@user.get("/users")
def get_users():
return conn.execute(users.select()).fetchall()
@user.post("/users")
def create_user(user: User):
new_user = {"name": user.name, "email": user.email}
new_user["password"] = f.encrypt(user.password.encode("utf-8"))
result = conn.execute(users.insert().values(new_user))
print(result)
return "hola"
@user.get("/users")
def helloworld():
return "hola"
@user.get("/users")
def helloworld():
return "hola"
@user.get("/users")
def helloworld():
return "hola" |
# Driver for microwave source HP_83650A
#
# Written by Bruno Buijtendorp (brunobuijtendorp@gmail.com)
import logging
from qcodes import VisaInstrument
from qcodes import validators as vals
log = logging.getLogger(__name__)
def parsestr(v):
return v.strip().strip('"')
class HP_83650A(VisaInstrument):
def __init__(self, name, address, verbose=1, reset=False, server_name=None, **kwargs):
""" Driver for HP_83650A
"""
self.verbose = verbose
log.debug('Initializing instrument')
super().__init__(name, address, **kwargs)
self.add_parameter('frequency',
label='Frequency',
get_cmd='FREQ:CW?',
set_cmd='FREQ:CW {}',
vals=vals.Numbers(10e6, 40e9),
docstring='Microwave frequency, ....',
get_parser=float,
unit='Hz',
post_delay = 0.05 )
self.add_parameter('freqmode',
label='Frequency mode',
get_cmd='FREQ:MODE?',
set_cmd='FREQ:MODE {}',
vals=vals.Strings(),
get_parser=parsestr,
docstring='Microwave frequency mode, ....')
self.add_parameter('power',
label='Power',
get_cmd='SOUR:POW?',
set_cmd='SOUR:POW {}',
vals=vals.Numbers(-20, 20),
get_parser=float,
unit='dBm',
docstring='Microwave power, ....')
self.add_parameter('rfstatus',
label='RF status',
get_cmd=':POW:STAT?',
set_cmd=':POW:STAT {}',
val_mapping={'on': '1', 'off': '0'},
vals=vals.Strings(),
get_parser=parsestr,
docstring='Status, ....')
self.add_parameter('fmstatus',
label='FM status',
get_cmd=':FM:STAT?',
set_cmd=':FM:STAT {}',
val_mapping={'on': '1', 'off': '0'},
vals=vals.Strings(),
get_parser=parsestr,
docstring='FM status, ....')
self.add_parameter('fmcoup',
label='FM coupling',
get_cmd=':FM:COUP?',
set_cmd=':FM:COUP {}',
vals=vals.Strings(),
get_parser=parsestr,
docstring='FM coupling, ....')
self.add_parameter('amstatus',
label='AM status',
get_cmd=':AM:STAT?',
set_cmd=':AM:STAT {}',
val_mapping={'on': '1', 'off': '0'},
vals=vals.Strings(),
get_parser=parsestr,
docstring='AM status, ....')
self.add_parameter('pulsestatus',
label='Pulse status',
get_cmd=':PULS:STAT?',
set_cmd=':PULS:STAT {}',
val_mapping={'on': '1', 'off': '0'},
vals=vals.Strings(),
get_parser=parsestr,
docstring='Pulse status, ....')
self.add_parameter('pulsesource',
label='Pulse source',
get_cmd=':PULS:SOUR?',
set_cmd=':PULS:SOUR {}',
vals=vals.Strings(),
get_parser=parsestr,
docstring='Pulse source, ....')
def reset(self):
log.debug('Resetting instrument')
self.write('*RST')
self.print_all()
def print_all(self):
log.debug('Reading all settings from instrument')
print(self.rfstatus.label + ':', self.rfstatus.get())
print(self.power.label + ':', self.power.get(), self.power.unit)
print(self.frequency.label +
': %e' % self.frequency.get(), self.frequency.unit)
print(self.freqmode.label + ':', self.freqmode.get())
self.print_modstatus()
def print_modstatus(self):
print(self.fmstatus.label + ':', self.fmstatus.get())
print(self.fmcoup.label + ':', self.fmcoup.get())
print(self.amstatus.label + ':', self.amstatus.get())
print(self.pulsestatus.label + ':', self.pulsestatus.get())
print(self.pulsesource.label + ':', self.pulsesource.get())
|
# app/api.py
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework.views import APIView
class ListUsers(APIView):
# authentication_classes = [authentication.TokenAuthentication]
# permission_classes = [permissions.IsAdminUser]
def get(self, request, format=None):
"""
Return a list of all users.
"""
usernames = [user.username for user in User.objects.all()]
return Response(usernames)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# in order to get table_2 from eurosport, first have to get the link list for all 380 matches during a year
import urllib2
from bs4 import BeautifulSoup
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
for j in range(5171,5209):
url = "http://www.eurosport.com/_ajax_/results_v8_5/results_teamsports_v8_5.zone?O2=1&langueid=0&domainid=135&sportid=22&revid=323&seasonid=88&mime=text%2fxml&site=&roundid="+str(j)
request = urllib2.urlopen(url)
response = request.read()
soup = BeautifulSoup(response,"html.parser")
hrefs_matches = soup.find_all("a")
for href_match in hrefs_matches:
url_match = "http://www.eurosport.com"+href_match['href']
print url_match
|
import datetime
import random
import sdl2.ext
from sdl2 import *
from sdl2.ext.compat import byteify
from sdl2.sdlmixer import *
import tetris.configuration.Colors
from GameEntities import *
class GameRenderer(sdl2.ext.SoftwareSpriteRenderSystem):
def __init__(self, window):
super(GameRenderer, self).__init__(window)
def render(self, components, x=None, y=None):
sdl2.ext.fill(self.surface, sdl2.ext.Color(0, 0, 0))
super(GameRenderer, self).render(components)
class BlockSpriteUpdateSystem(sdl2.ext.Applicator):
def __init__(self):
super(BlockSpriteUpdateSystem, self).__init__()
self.componenttypes = BoardCoordinates, sdl2.ext.Sprite
def process(self, world, componentsets):
block_height = window_height()/(num_rows())
block_width = window_width()/num_columns()
for board_coordinates, sprite in componentsets:
posx = board_coordinates.pos[0] * block_width
posy = (board_coordinates.pos[1] - 2) * block_height
sprite.position = (posx, posy)
class AudioSystem(sdl2.ext.Applicator) :
def __init__(self):
super(AudioSystem, self).__init__()
self.resources = "C:\\Users\\Ben\\PycharmProjects\\TetrisDemo\\resources\\"
SDL_Init(SDL_INIT_AUDIO)
Mix_OpenAudio(44100, MIX_DEFAULT_FORMAT, 2, 1024)
soft_drop_file = self.resources + "soft-drop.wav"
line_clear_file = self.resources + "line-clear.wav"
self.soft_drop_sample = Mix_LoadWAV(byteify(soft_drop_file, "utf-8"))
self.line_clear_sample = Mix_LoadWAV(byteify(line_clear_file, "utf-8"))
self.componenttypes = MovementInput, AudioState
def process(self, world, componentsets):
for movement_input, audio_state in componentsets:
if audio_state.play_soft_drop is True:
Mix_PlayChannel(-1, self.soft_drop_sample, 0)
if audio_state.play_line_clear is True:
Mix_PlayChannel(-1, self.line_clear_sample, 0)
audio_state.reset()
class MovementSystem(sdl2.ext.Applicator):
def __init__(self):
super(MovementSystem, self).__init__()
self.componenttypes = MovementInput, MovementState, BlockCollection, GameBoard, AudioState
self.last_drop = datetime.datetime.now()
self.drop_timer = 500*1000
def process(self, world, componentsets):
curr_time = datetime.datetime.now()
for movement_input, movement_state, block_collection, game_board, audio_state in componentsets:
if movement_input.rotate_cw and self.check_rotate(game_board, block_collection):
self.rotate(block_collection)
if movement_input.rotate_ccw and self.check_rotate(game_board, block_collection, True):
self.rotate(block_collection, True)
if movement_input.soft_drop and self.check_move(game_board, block_collection, (0, 1)):
self.last_drop = curr_time
audio_state.play_soft_drop = True
self.move(block_collection, (0, 1))
if movement_input.move_left and self.check_move(game_board, block_collection, (-1, 0)):
self.move(block_collection, (-1, 0))
if movement_input.move_right and self.check_move(game_board, block_collection, (1, 0)):
self.move(block_collection, (1, 0))
if movement_input.hard_drop:
while self.check_move(game_board, block_collection, (0, 1)):
self.move(block_collection, (0, 1))
movement_state.locked = True
movement_input.reset()
if (curr_time - self.last_drop).microseconds > self.drop_timer:
self.last_drop = curr_time
if self.check_move(game_board, block_collection, (0, 1)):
self.move(block_collection, (0, 1))
else:
movement_state.locked = True
if movement_state.locked is True:
for block in block_collection.blocks:
game_board.board[block.boardcoordinates.pos[1]][block.boardcoordinates.pos[0]] = block
def check_rotate(self, game_board, block_collection, ccw=False):
for block in block_collection.blocks:
coords = block.boardcoordinates.get_cw_coordinates()
if ccw is True:
coords = block.boardcoordinates.get_ccw_coordinates()
if self.is_pos_valid(game_board, coords) is False:
return False
return True
def rotate(self, block_collection, ccw=False):
for block in block_collection.blocks:
coords = block.boardcoordinates.get_cw_coordinates()
offset = block.boardcoordinates.get_cw_offset()
if ccw is True:
coords = block.boardcoordinates.get_ccw_coordinates()
offset = block.boardcoordinates.get_ccw_offset()
block.boardcoordinates.pos = (coords[0], coords[1])
block.boardcoordinates.offset = offset
def is_pos_valid(self, game_board, pos):
if pos[0] < 0 or pos[0] >= num_columns() or pos[1] < 0 or pos[1] >= (num_rows()+2) or game_board.board[pos[1]][pos[0]] is not None:
return False
return True
def check_move(self, game_board, block_collection, position_modifier):
for block in block_collection.blocks:
check_y = block.boardcoordinates.pos[1] + position_modifier[1]
check_x = block.boardcoordinates.pos[0] + position_modifier[0]
if self.is_pos_valid(game_board, (check_x, check_y)) is False:
return False
return True
def move(self, block_collection, position_modifier):
for block in block_collection.blocks:
block.boardcoordinates.pos = (block.boardcoordinates.pos[0] + position_modifier[0], block.boardcoordinates.pos[1] + position_modifier[1])
class LineClearSystem(sdl2.ext.Applicator):
def __init__(self):
super(LineClearSystem, self).__init__()
self.componenttypes = MovementState, BlockCollection, GameBoard, AudioState
def process(self, world, componentsets):
for movement_state, block_collection, game_board, audio_state in componentsets:
#if we've locked the block we're controlling, we might need to clear lines
if movement_state.locked:
row_dict = {}
line_cleared = False
# for each block in our collection of controlled blocks, if an entire row is full, it needs
# to be cleared. these will be marked True in the row_dict
for block in block_collection.blocks:
row = block.boardcoordinates.pos[1]
if row not in row_dict:
row_dict[row] = True
for space in game_board.board[row]:
if space is None:
row_dict[row] = False
break
# we want to clear them from top to bottom.
for row in sorted(row_dict.keys()):
# if we need to clear this row
if row_dict[row] is True:
line_cleared = True
# delete all entities in the row thats being cleared
for space in game_board.board[row]:
space.delete()
# shuffle the rest of the rows down one starting from the row above the one
# that was just cleared
for y in range(row-1, -1, -1):
# we're going from left to right
for x in range(0, len(game_board.board[y])):
space = game_board.board[y][x]
# if there's an entity in this space,
# adjust its block coordinates (these are used for rendering)
if space is not None:
space.boardcoordinates.pos = (space.boardcoordinates.pos[0], space.boardcoordinates.pos[1]+1)
# set the old location to None, set the new location to the block
game_board.board[y][x] = None
game_board.board[y+1][x] = space
if line_cleared is True:
audio_state.play_line_clear = True
class EndGameSystem(sdl2.ext.Applicator):
def __init__(self):
super(EndGameSystem, self).__init__()
self.componenttypes = GameState, GameBoard
def process(self, world, componentsets):
for game_state, game_board in componentsets:
for row in range(0,2):
for space in game_board.board[row]:
if space is not None:
game_state.game_over = True
class Generator():
def __init__(self, world, game_board):
self.world = world
self.game_board = game_board
self.piece_map = [None for x in range(7)]
self.piece_map[0] = IPiece
self.piece_map[1] = JPiece
self.piece_map[2] = LPiece
self.piece_map[3] = OPiece
self.piece_map[4] = SPiece
self.piece_map[5] = TPiece
self.piece_map[6] = ZPiece
self.color_map = [None for x in range(3)]
self.color_map[0] = tetris.configuration.Colors.RED
self.color_map[1] = tetris.configuration.Colors.GREEN
self.color_map[2] = tetris.configuration.Colors.BLUE
def create_piece(self):
piece_num = random.randrange(7)
color_num = random.randrange(3)
color = self.color_map[color_num]
piece_class = self.piece_map[piece_num]
return piece_class(self.world, self.game_board, color)
def create_specific_piece(self, piece_num):
color_num = random.randrange(3)
color = self.color_map[color_num]
piece_class = self.piece_map[piece_num]
return piece_class(self.world, self.game_board, color)
|
from django.db import models
class ChatMessageManager(models.Manager):
pass
|
"""
publish
=======
A tool to build and publish certain artifacts at certain times.
`publish` was desgined specifically for the automatic publication of course
materials, such as homeworks, lecture slides, etc.
Terminology
-----------
An **artifact** is a file -- usually one that is generated by some build process.
A **publication** is a coherent group of one or more artifacts and their metadata.
A **collection** is a group of publications which all satisfy the same **schema**.
A **schema** is a set of constraints on a publication's artifacts and metadata.
This establishes a **collection -> publication -> artifact hierarchy**: each
artifact belongs to exactly one publication, and each publication belongs to
exactly one collection.
An example of such a hierarchy is the following: all homeworks in a course form
a collection. Each publication within the collection is an individual
homework. Each publication may have several artifacts, such as the PDF of the
problem set, the PDF of the solutions, and a .zip containing the homework's
data.
An artifact may have a **release time**, before which it will not be built or published.
Likewise, entire publications can have release times, too.
Discovering, Building, and Publishing
-------------------------------------
When run as a script, this package follows a three step process of discovering,
building, and publishing artifacts.
In the **discovery** step, the script constructs a collection -> publication ->
artifact hierarchy by recursively searching an input directory for artifacts.
In the **build** step, the script builds every artifact whose release time has passed.
In the **publish** step, the script copies every released artifact to an output
directory.
Discovery
~~~~~~~~~
In the discovery step, the **input directory** is recursively searched for collections,
publications, and artifacts.
A collection is defined by creating a file named ``collections.yaml`` in a directory.
The contents of the file describe the artifacts and metadata that are required
of each of the publications within the collection. For instance:
.. code-block:: yaml
# <input_directory>/homeworks/collection.yaml
schema:
required_artifacts:
- homework.pdf
- solution.pdf
optional_artifacts:
- template.zip
metadata_schema:
name:
type: string
due:
type: datetime
released:
type: date
The file above specifies that publications must have ``homework.pdf`` and
``solution.pdf`` artifacts, and may or may not have a ``template.zip``
artifact. The publications must also have *name*, *due*, and *released* fields
in their metadata with the listed types. The metadata specification is given in a form
recognizable by the *cerberus* Python package.
A publication and its artifacts are defined by creating a ``publish.yaml`` file
in the directory containing the publication. For instance, the file below
describes how and when to build two artifacts named ``homework.pdf`` and ``solution.pdf``,
along with metadata:
.. code-block:: yaml
# <input_directory>/homeworks/01-intro/publish.yaml
metadata:
name: Homework 01
due: 2020-09-04 23:59:00
released: 2020-09-01
artifacts:
homework.pdf:
recipe: make homework
solution.pdf:
file: ./build/solution.pdf
recipe: make solution
release_time: 1 day after metadata.due
ready: false
missing_ok: false
The ``file`` field tells *publish* where the file will appear when the recipe
is run. is omitted, its value is assumed to be the artifact's key -- for
instance, ``homework.pdf``'s ``file`` field is simply ``homework.pdf``.
The ``release_time`` field provides the artifact's release time. It can be a
specific datetime in ISO 8601 format, like ``2020-09-18 17:00:00``, or a
*relative* date of the form "<number> (hour|day)[s]{0,1} (before|after)
metadata.<field>", in which case the date will be calculated relative to the
metadata field. The field it refers to must be a datetime.
The ``ready`` field is a manual override which prevents the artifact from
being built and published before it is ready. If not provided, the artifact
is assumed to be ready.
THe ``missing_ok`` field is a boolean which, if ``false``, causes an error to
be raised if the artifact's file is missing after the build. This is the
default behavior. If set to ``true``, no error is raised. This can be useful
when the artifact file is manually placed in the directory and it is
undesirable to repeatedly edit ``publish.yaml`` to add the artifact.
Publications may also have ``release_time`` and ``ready`` attributes. If these
are provided they will take precedence over the attributes of an individual
artifact in the publication. The release time of the publication can be used
to control when its metadata becomes available -- before the release time,
the publication in effect does not exist.
The file hierarchy determines which publications belong to which collections.
If a publication file is placed in a directory that is a descendent of a
directory containing a collection file, the publication will be placed in that
collection and its contents will be validated against the collection's schema.
Publications which are not under a directory containing a ``collection.yaml``
are placed into a "default" collection with no schema. They may contain any
number of artifacts and metadata keys.
Collections, publications, and artifacts all have **keys** which locate them
within the hierarchy. These keys are inferred from their position in the
filesystem. For example, a collection file placed at
``<input_directory>/homeworks/collection.yaml`` will create a collection keyed
"homeworks". A publication within the collection at
``<input_directory>/homeworks/01-intro/publish.yaml`` will be keyed "01-intro".
The keys of the artifacts are simply their keys within the ``publish.yaml``
file.
Building
~~~~~~~~
Once all collections, publications, and artifacts have been discovered, the
script moves to the build phase.
Artifacts are built by running the command given in the artifact's `recipe`
field within the directory containing the artifact's ``publication.yaml`` file.
Different artifacts should have "orthogonal" build processes so that the order
in which the artifacts are built is inconsequential.
If an error occurs during any build the entire process is halted and the
program returns without continuing on to the publish phase. An error is
considered to occur if the build process returns a nonzero error code, or if
the artifact file is missing after the recipe is run.
Publishing
~~~~~~~~~~
In the publish phase, all published artifacts -- that is, those which are ready
and whose release date has passed -- are copied to an **output directory**.
Additionally, a JSON file containing information about the collection ->
publication -> artifact hierarchy is placed at the root of the output
directory.
Artifacts are copied to a location within the output directory according to the
following "formula":
.. code-block:: text
<output_directory>/<collection_key>/<publication_key>/<artifact_key>
For instance, an artifact keyed ``homework.pdf`` in the ``01-intro`` publication
of the ``homeworks`` collection will be copied to::
<output_directory>/homeworks/01-intro/homework.pdf
An artifact which has not been released will not be copied, even if the
artifact file exists.
*publish* will create a JSON file named ``<output_directory>/published.json``.
This file contains nested dictionaries describing the structure of the
collection -> publication -> artifact hierarchy.
For example, the below code will load the JSON file and print the path of a published
artifact relative to the output directory, as well as a publication's metadata.
.. code-block:: python
>>> import json
>>> d = json.load(open('published.json'))
>>> d['collections']['homeworks']['publications']['01-intro']['artifacts']['homework.pdf']['path']
homeworks/01-intro/homework.pdf
>>> d['collections']['homeworks']['publications']['01-intro']['metadata']['due']
2020-09-10 23:59:00
Only those publications and artifacts which have been published appear in the
JSON file. In particular, if an artifact has not reached its release time, it
will be missing from the JSON representation entirely.
"""
from .types import *
from .exceptions import *
from ._discover import *
from ._validate import *
from ._build import *
from ._publish import *
from ._filter import *
from ._serialize import *
from ._cli import cli
from ._smartdates import resolve_smart_dates
__version__ = (0, 2, 1)
|
class punto:
def __init__(self, valor,izq=None,der=None):
self.valor=valor
self.izq=izq
self.der=der
def inorden(arbol):
if arbol != None:
inorden(arbol.izq)
print(arbol.valor)
inorden(arbol.der)
def buscar(arbol,valor):
if arbol==None:
return False
if arbol.valor==valor:
return True
if valor<arbol.valor:
return buscar(arbol.izq,valor)
return buscar(arbol.der,valor)
def insertar (arbol,valor):
if arbol==None:
return punto(valor)
if valor<arbol.valor:
return punto(arbol.valor,insertar(arbol.izq,valor),arbol.der)
return punto(arbol.valor,arbol.izq,insertar(arbol.der,valor))
def lista(arbol, lista):
if lista==[]:
return arbol
else:
return lista(insertar(arbol,lista[0]),lista[1:])
inorden(lista((punto(5,punto(10,punto(15)),punto(55, punto(25)))),[5,50,20,30,35,45,40]))
|
from django.test import TestCase
from django.test import Client
from django.urls import reverse
class CreatePostTestCase(TestCase):
def test_blog_not_authenicated(self):
client = Client()
client.logout()
url = reverse('post_new')
response = self.client.get(url)
#self.assertEqual(response.status_code, 302)
self.assertRedirects(response, "/registration/login", status_code=302)
|
# Generated by Django 2.2.13 on 2020-07-10 06:30
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('shop', '0043_auto_20200710_1153'),
]
operations = [
migrations.AddField(
model_name='products_women',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='products_women',
name='category',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='products_women',
name='description',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='products_women',
name='discount_price',
field=models.FloatField(blank=True, default=django.utils.timezone.now, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='products_women',
name='featured',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='products_women',
name='image',
field=models.ImageField(default=django.utils.timezone.now, upload_to='women'),
preserve_default=False,
),
migrations.AddField(
model_name='products_women',
name='price',
field=models.FloatField(blank=True, default=django.utils.timezone.now, max_length=50),
preserve_default=False,
),
migrations.AlterField(
model_name='contactmessage',
name='ip',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='contactmessage',
name='name',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='order',
name='address',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='order',
name='city',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='order',
name='email',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='order',
name='items',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='order',
name='name',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='order',
name='state',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='order',
name='total',
field=models.FloatField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='order',
name='zipcode',
field=models.CharField(blank=True, max_length=10),
),
migrations.AlterField(
model_name='products_men',
name='category',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='products_men',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='products_men',
name='discount_price',
field=models.FloatField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='products_men',
name='price',
field=models.FloatField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='products_men',
name='title',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='products_women',
name='title',
field=models.CharField(blank=True, max_length=50),
),
]
|
from Tkinter import *
import random
def draw_square(can, color, len, cent):
'''Takes 4 arguments: can the canvas to draw on, color, len the height and
width of the square, and cent the center of the square. Draws a square of
the color specified centered at cent with dimensions len x len.
'''
can.create_rectangle(cent[0] - (len / 2), cent[1] - (len / 2), cent[0]\
+ (len / 2) + 1, cent[1] + (len / 2) + 1, fill = color\
, outline = color)
def random_size(low_bound, up_bound):
'''Takes two arguments (both non-negative even integers, where the first
argument must be smaller than the second), and returns a random even
integer which is >= the lower number and <= the upper number.
'''
assert low_bound >= 0
assert up_bound >= 0
assert low_bound % 2 == 0
assert up_bound % 2 == 0
assert low_bound < up_bound
out = random.randint(low_bound / 2, up_bound / 2)
out *= 2
assert out % 2 == 0
return out
def random_position(max_x, max_y):
'''Takes as its arguments two integers called max_x and max_y, both of
which are >= 0. It will return a random (x, y) pair, with both x >= 0 and
y >= 0 and with x <= max_x and y <= max_y.
'''
assert max_x >= 0
assert max_y >= 0
x = random.randint(0, max_x)
y = random.randint(0, max_y)
return (x, y)
def random_color():
'''Generates random color values in the format of #RRGGBB.
'''
hex_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\
'c', 'd', 'e', 'f']
color = '#'
for a in range(6):
color += random.choice(hex_list)
return color
if __name__ == '__main__':
root = Tk()
canvas = Canvas(root, width = 800, height = 800)
canvas.pack()
for a in range(50):
draw_square(canvas, random_color(), random_size(20, 150),\
random_position(800, 800))
root.bind('<q>', quit)
root.mainloop()
|
from taiga.requestmaker import RequestMaker
from taiga.models import Issue, Issues, IssueAttributes, IssueAttribute
from taiga.exceptions import TaigaException
import unittest
from mock import patch
from .tools import create_mock_json
from .tools import MockResponse
import six
if six.PY2:
import_open = '__builtin__.open'
else:
import_open = 'builtins.open'
class TestCustomAttributes(unittest.TestCase):
@patch('taiga.requestmaker.RequestMaker.get')
@patch('taiga.requestmaker.RequestMaker.patch')
def test_edit_issue_custom_attribute(self, mock_requestmaker_patch, mock_requestmaker_get):
mock_requestmaker_get.return_value = MockResponse(200,
create_mock_json('tests/resources/issue_customattr_success.json'))
mock_requestmaker_patch.return_value = MockResponse(200,
create_mock_json('tests/resources/issue_customattr_success.json'))
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
issue = Issue(rm, id=1, project=1)
new_attribute = issue.set_attribute(1, 13)
self.assertTrue('attributes_values' in new_attribute)
mock_requestmaker_patch.assert_called_with(
'/{endpoint}/custom-attributes-values/{id}',
endpoint=Issue.endpoint, id=issue.id,
payload={
'attributes_values': {u'1': 13},
'version': 1
}
)
@patch('taiga.requestmaker.RequestMaker.get')
def test_get_issue_custom_attributes(self, mock_requestmaker_get):
mock_requestmaker_get.return_value = MockResponse(200,
create_mock_json('tests/resources/issue_customattr_success.json'))
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
issue = Issue(rm, id=1, project=1)
my_attributes = issue.get_attributes()
self.assertTrue('attributes_values' in my_attributes)
mock_requestmaker_get.assert_called_with(
'/{endpoint}/custom-attributes-values/{id}',
endpoint=Issue.endpoint, id=issue.id, cache=False
)
@patch('taiga.requestmaker.RequestMaker.post')
def test_issue_attribute_creation(self, mock_requestmaker_post):
mock_requestmaker_post.return_value = MockResponse(200,
create_mock_json('tests/resources/issue_details_success.json'))
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
issue_attribute = IssueAttributes(rm).create(1, 'new attribute')
self.assertTrue(isinstance(issue_attribute, IssueAttribute))
|
import os
work_sizes = [32, 64, 128, 256]
elements = [1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216]
if __name__ == '__main__':
for s in work_sizes:
for t in elements:
cmd = f"g++ -DNUM_ELEMENTS={t} -DLOCAL_SIZE={s} -o third third.cpp /usr/local/apps/cuda/10.1/lib64/libOpenCL.so.1.1 -lm -fopenmp"
os.system(cmd)
cmd = "./third"
os.system(cmd) |
# while 循环求1000以内的质数
i = 2
while i < 1000:
j = 2
count = 0
for j in range(1, i+1):
if i % j == 0:
count += 1
if count == 2:
print(i, end=" ")
i += 1
|
from track import Track
from car import Car
def lets_race(drivers=[Car("Rarri"), Car("Tesla")]) -> str:
t = Track()
done = False
while not done:
for d in drivers:
print(d)
d.accelerate()
t.check_winner(d)
if t.winner:
done = True
return "And the winner is: {}".format(t.winner)
def main():
print(lets_race())
if __name__ == "__main__":
main()
|
# import the necessary packages
from matplotlib import pyplot as plt
import argparse
import imutils
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the image")
args = vars(ap.parse_args())
# load the input image from disk
image = cv2.imread(args["image"])
# split the image into its respective channels, then initialize the
# tuple of channel names along with our figure for plotting
chans = cv2.split(image)
colors = ("b", "g", "r")
plt.figure()
plt.title("'Flattened' Color Histogram")
plt.xlabel("Bins")
plt.ylabel("# of Pixels")
# loop over the image channels
for (chan, color) in zip(chans, colors):
# create a histogram for the current channel and plot it
hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
plt.plot(hist, color=color)
plt.xlim([0, 256])
plt.figure()
plt.axis("off")
plt.imshow(imutils.opencv2matplotlib(image))
# show our plots
plt.show()
|
#!/usr/bin/env python
"""
::
ipython -i MockSensorAngularEfficiencyTable.py
"""
import os, numpy as np
path = os.path.expandvars("/tmp/$USER/opticks/opticksgeo/tests/MockSensorAngularEfficiencyTableTest.npy")
a = np.load(path)
assert len(a.shape) == 3
ctx = dict(name=os.path.basename(path),shape=a.shape,num_cat=a.shape[0],num_theta=a.shape[1],num_phi=a.shape[2])
title = "{name} {shape!r} num_theta:{num_theta} num_phi:{num_phi}".format(**ctx)
print(title)
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
pass
if plt:
fig, axs = plt.subplots(ctx["num_cat"])
fig.suptitle(title)
for i in range(ctx["num_cat"]):
ax = axs[i] if ctx["num_cat"] > 1 else axs
ax.imshow(a[i])
pass
plt.ion()
plt.show()
pass
|
import random
N = 10000
Q = 10000
with open('long.in', 'w') as fout:
fout.write('{} {}\n'.format(N, Q))
# for i in range(N-1):
# fout.write('{} {}\n'.format(i+1, i+2))
for i in range(Q):
fout.write('{} {}\n'.format(random.randint(1, N), random.randint(1, N)))
|
affirm = ['y', 'yes', 'ok', 'ys', 'sure', 'fine', 'good', 'hella', 'aye', 'yea', 'yeah']
negate = ['n', 'no', 'nope', 'nah', 'naw', 'na', 'never', 'nay', 'bad']
topics = ['thoughts', 'gaym', 'dev', 'dead']
starter = 0
blogs = []
varies = []
obs = []
midz = [
'<table align=center id="navbar">\n',
'<tr>\n',
'<td><a href=\'index.html\'>HOME</a></td>\n',
'<td>*</td>\n',
'<td><a href=\'blog.html\'>BLOG</a></td><td>*</td>\n',
'<td><a href=\'dead.html\'>GRATEFULDEAD</a></td><td>*</td>\n',
'<td><a href=\'tarot.html\'>TAROT</a></td><td>*</td>\n',
'<td><a href=\'game.html\'>GAME</a></td><td>*</td>\n',
'<td><a href=\'contact.html\'>CONTACT</a></td>\n',
'</tr>\n',
'</table>\n<br><br><br>\n']
introz = ['<!DOCTYPE html>\n',
'<html>\n'
'<head>\n',
'<style>\n\n',
'body {\n',
'background-image: url(\'picss/fractalbg.jpg\');\n}\n\n\n',
'h1 { \n text-decoration: underline;\n}\n\n\n',
'#navbar {\n',
'color: #0066ff;\n',
'width: 80%;\n',
'background-color: white;\n',
'text-align: center;\n\n}\n\n',
'#blogtable {\n',
'width: 80%;\n',
'text-align: center;\n',
'border: 2px solid black;\n',
'\n}\n\n',
'</style>\n\n\n',
'<title>BLOG</title>\n',
'<script language=\"JavaScript\">\n\n'
]
def pyvars():
if starter == 1:
return
print("What is the location of this file?\nEnter the file location!\nEX: blogs/test.html")
loc = input(": ")
print(topics[:])
toppy = '69'
kron = 420
pubg = 710
while toppy.lower() not in topics:
toppie = input("Topic:\n: ")
if type(toppie) is str:
toppy = toppie
else:
print('ahem, STR')
return
while toppy.lower() in topics and starter == 0:
while 99999 >= int(kron) or int(kron) >= 1000000:
kroner = input("6 digit time:\n ")
krone = int(kroner)
if type(krone) is int:
kron = krone
else:
print('INT only please')
return
while 999999999 >= int(pubg) or int(pubg) >= 10000000000:
puber = input("10 digit time:\n ")
if type(krone) is int:
pubg = puber
else:
print('INT only please')
return
pywriter(loc, toppy, kron, pubg)
break
def pywriter(loc, toppy, kron, pubg):
global titles
gud = input("Enter a short blurb - BEWARE quotation marks must be preceded by \:\n ")
global starter
naym = input("Title you blog. Do NOT use spaces or special characters.\nPlease name your blog:\n ")
name = naym.upper()
obies = open('blogs/objlist.txt', 'r+')
obiescontent = obies.read()
obies.seek(0,0)
obies.write(name+' \n'+obiescontent)
listloc = open('blogs/localist.txt', 'r+')
loccontent = listloc.read()
listloc.seek(0,0)
listloc.write(loc +' \n'+loccontent)
pyvar = open('blogs/blogvars.txt', 'r+')
pyvarcontent = pyvar.read()
pyvar.seek(0,0)
pyvar.write('var '+ name + ' = {')
pyvar.write(' title: \''+name+'\',\n')
pyvar.write(' locat: \''+loc+'\',\n')
pyvar.write(' topic: \''+toppy+'\',\n')
pyvar.write(' chron: '+str(kron)+',\n')
pyvar.write(' pub: '+str(pubg)+',\n')
pyvar.write(' blurb: \'' + gud + '...\',\n')
pyvar.write('};\n\n'+pyvarcontent)
obies.close()
pyvar.close()
listloc.close()
starter = 1
return
def start():
global starter
if starter == 1:
return
print('-pyblog 200501-')
enter = input('Press ENTER TO BEGIN')
entry = 'xanadu'
while entry.lower() not in affirm and entry.lower() not in negate:
entry = input('Do you have a new post?')
if entry.lower() in affirm:
pyvars()
return
else:
starter = 1
return
while starter == 0:
start()
print('started up')
bloghtml = open('blog.html', 'w')
bloglist = open('blogs/localist.txt', 'r')
objs = open('blogs/objlist.txt', 'r')
introz_read = 0
varcounter = 0
midz_read = 0
while introz_read < len(introz):
bloghtml.write(introz[introz_read])
introz_read += 1
varyy = open('blogs/blogvars.txt', 'r')
for line in varyy:
bloghtml.write(line)
for line in bloglist:
n = 1
blogs.append(line.strip())
n += 1
for line in objs:
obs.append(line.strip())
bloghtml.write('\n\n'+bloglist.readline())
bloghtml.write('var indicies = [')
varlen = len(blogs)
while varlen > varcounter:
bloghtml.write("\"" + blogs[varcounter] + "\"" + ',')
varcounter += 1
bloghtml.write('];\n')
obiesread = 0
bloghtml.write('var obies = [')
while obiesread < len(obs):
bloghtml.write(obs[obiesread]+',')
obiesread += 1
bloghtml.write('];\n')
bloghtml.write('var titleindex = [')
titleread = 0
while titleread < len(blogs):
bloghtml.write('\'title'+str(titleread)+'\',')
titleread += 1
bloghtml.write('];\n\n')
bloghtml.write('function display() {\n')
bloghtml.write('\n')
bloghtml.write(' var i;\n')
bloghtml.write('\n')
bloghtml.write(' for (i = 0; i < indicies.length; i++) {\n')
bloghtml.write('\n')
bloghtml.write(' var intex = indicies[i];\n')
bloghtml.write(' var title = titleindex[i];\n')
bloghtml.write(' var objecto = obies[i];\n')
bloghtml.write(' document.getElementById(intex).innerHTML = objecto.blurb;\n')
bloghtml.write(' document.getElementById(\'date\'+title).innerHTML = objecto.chron.toString();\n')
bloghtml.write(' document.getElementById(title).innerHTML = objecto.title};\n}\n')
bloghtml.write('\n</script>\n')
bloghtml.write('</head>\n\n')
bloghtml.write('<body onload=\'display();\'>\n')
while midz_read < len(midz):
bloghtml.write(midz[midz_read])
midz_read += 1
blogsread = 0
bloghtml.write('<table align=center id=\'blogtable\'>\n')
bloghtml.write('<tr><td><h1>TITLE</h1></td><td><h1>-</h1></td><td><h1>DATE</h1></td><td><h1>-</h1></td><td><h1>BLURB</h1></td></tr>\n')
while blogsread < len(blogs):
currentblog = blogs[blogsread]
bloghtml.write('<tr>\n<td><a id=\'title'+str(blogsread)+'\'href=\''+currentblog.lower() +'\'>'+ currentblog.upper() +'</a></td><td>-</td> <td><p id=\'datetitle'+str(blogsread)+'\'>XXXXXX XXXXXX</p></td><td>-</td><td><p id=\''+ currentblog.lower() +'\'>XXXXXX XXXXXX</p></td>\n</tr>')
blogsread += 1
bloghtml.write('</table>\n</body>\n</html>')
bloglist.close()
bloghtml.close()
varyy.close()
objs.close()
|
from microbit import *
from math import pi, sin
scale = 50
max_dist = 70
maxx = 5 * scale
def brightness(x, y, coords):
grid_x = x * scale
grid_y = y * scale
x_distance = abs(coords[0] - grid_x)
y_distance = abs(coords[1] - grid_y)
if (x_distance > max_dist or y_distance > max_dist):
return 0
else:
// TODO: update brightness algorithm
// dist = sqrt(x_distance*x_distance+y_distance*y_distance)
// bright = 256 - dist * 256 / max_dist
angle = pi * ((x_distance + y_distance) // 2) // 15
return 9 - int(sin(angle) * 9)
def draw_grid(spot):
for x in range(0, 5):
for y in range(0, 5):
display.set_pixel(x, y, brightness(x, y, spot))
# 5x5 grid * scale
x = 0
y = 0
dx = 2
dy = 3
while True:
spot = [x, y]
draw_grid(spot)
x += dx
y += dy
if (x >= 50 or x < 0):
dx = 0 - dx
x += dx
if (y >= 50 or y < 0):
dy = 0 - dy
y += dy
sleep(1)
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "hola desde Argentina para todo el site de Chile"
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
"""empty message
Revision ID: 03c7857df4c3
Revises: 824c7c370d22
Create Date: 2021-09-17 23:58:55.838555
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '03c7857df4c3'
down_revision = '824c7c370d22'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('characters',
sa.Column('character_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=200), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('mass', sa.Integer(), nullable=True),
sa.Column('hair_color', sa.String(length=200), nullable=True),
sa.Column('skin_color', sa.String(length=200), nullable=True),
sa.Column('eye_color', sa.String(length=200), nullable=True),
sa.Column('birth_year', sa.Integer(), nullable=True),
sa.Column('gender', sa.String(length=200), nullable=True),
sa.Column('homeworld', sa.Integer(), nullable=True),
sa.Column('vehicles_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['homeworld'], ['planets.planet_id'], ),
sa.ForeignKeyConstraint(['vehicles_id'], ['vehicles.vehicle_id'], ),
sa.PrimaryKeyConstraint('character_id')
)
op.create_table('planets',
sa.Column('planet_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=200), nullable=True),
sa.Column('climate', sa.String(length=200), nullable=True),
sa.Column('terrain', sa.String(length=200), nullable=True),
sa.Column('population', sa.Integer(), nullable=True),
sa.Column('diameter', sa.Integer(), nullable=True),
sa.Column('rotation_period', sa.Integer(), nullable=True),
sa.Column('orbital_period', sa.Integer(), nullable=True),
sa.Column('surface_water', sa.Integer(), nullable=True),
sa.Column('residents', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['residents'], ['characters.character_id'], ),
sa.PrimaryKeyConstraint('planet_id')
)
op.create_table('vehicles',
sa.Column('vehicle_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=200), nullable=True),
sa.Column('model', sa.String(length=200), nullable=True),
sa.Column('manufacturer', sa.String(length=200), nullable=True),
sa.Column('cost_in_credits', sa.Integer(), nullable=True),
sa.Column('crew', sa.Integer(), nullable=True),
sa.Column('passengers', sa.Integer(), nullable=True),
sa.Column('cargo_capacity', sa.Integer(), nullable=True),
sa.Column('vehicle_class', sa.String(length=200), nullable=True),
sa.Column('pilots', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pilots'], ['characters.character_id'], ),
sa.PrimaryKeyConstraint('vehicle_id')
)
op.create_table('favorite',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('fav_planet_id', sa.Integer(), nullable=True),
sa.Column('fav_character_id', sa.Integer(), nullable=True),
sa.Column('fav_vehicle_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['fav_character_id'], ['characters.character_id'], ),
sa.ForeignKeyConstraint(['fav_planet_id'], ['planets.planet_id'], ),
sa.ForeignKeyConstraint(['fav_vehicle_id'], ['vehicles.vehicle_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('favorite')
op.drop_table('vehicles')
op.drop_table('planets')
op.drop_table('characters')
# ### end Alembic commands ###
|
from selenium import webdriver
import time
import logging
PATH = "C:\Program Files (x86)\chromedriver.exe"
logging.basicConfig(level=logging.INFO, filename="xboxStock.log")
#patches will return availability
def amazonPatch(link: str):
logging.info("================>Checking AMAZON patch")
driverAmazonPatch = webdriver.Chrome(PATH)
driverAmazonPatch.get(link)
time.sleep(10)
availabilityAmazon = driverAmazonPatch.find_element_by_id("availability_feature_div")
# subAvail = availabilityAmazon.find_element_by_id("availability")
temp = availabilityAmazon.find_element_by_css_selector(".a-size-medium")
logging.info(f"================>Amazon availability on amazonPatch: {temp.text}")
time.sleep(3)
return False if temp.text == "Currently unavailable." else True |
# -*- coding: utf-8 -*-
#==============================================================================
# Name: pubsub
# Purpose: Simple publish & subscribe in pure python
# Author: Zhen Wang
# Created: 23 Oct 2012
# Licence: MIT License
#==============================================================================
from six.moves.queue import Queue as queue, Empty
from threading import Lock as lock
from functools import partial
MAX_QUEUE = 100
MAX_ID = 2 ** 31
PUBLISH_ID = True
channels = {}
count = {}
channels_lock = lock()
count_lock = lock()
class UnsubscribeException(Exception):
pass
def subscribe(channel):
if not channel:
raise ValueError('channel')
if channel not in channels:
channels_lock.acquire()
# Need to check again
if channel not in channels:
channels[channel] = []
channels_lock.release()
msg_q = queue()
channels[channel].append(msg_q)
msg_q.listen = partial(listen, msg_q)
msg_q.unsubscribe = partial(unsubscribe, channel, msg_q)
msg_q.name = channel
return msg_q
def unsubscribe(channel, msg_q):
if not channel:
raise ValueError('channel')
if not msg_q:
raise ValueError('msg_q')
try:
channels[channel].remove(msg_q)
except ValueError:
pass
def listen(msg_q, block=True, timeout=None):
while True:
try:
data = msg_q.get(block=block, timeout=timeout)
except Empty:
return
if data is None:
raise UnsubscribeException()
yield data
def publish(channel, data):
if not channel:
raise ValueError('channel')
if not data:
raise ValueError('data')
if channel not in channels:
channels_lock.acquire()
# Need to check again
if channel not in channels:
channels[channel] = []
channels_lock.release()
# Update message counts
if PUBLISH_ID:
count_lock.acquire()
if channel not in count:
count[channel] = 0
else:
count[channel] = (count[channel] + 1) % MAX_ID
count_lock.release()
else:
count[channel] = 0
# ID of current message
_id = count[channel]
# Push to all subscribers in channel
for q in channels[channel]:
# Remove queues that are not being consumed
if q.qsize() > MAX_QUEUE:
# Send termination msg and unsub
q.put(None, block=False)
unsubscribe(channel, q)
continue
q.put({'data': data, 'id': _id}, block=False) |
# genqueue.py
#
# Generate a sequence of items that put onto a queue
def sendto_queue(source,thequeue):
for item in source:
thequeue.put(item)
thequeue.put(StopIteration)
def genfrom_queue(thequeue):
while True:
item = thequeue.get()
if item is StopIteration: break
yield item
# Example
if __name__ == '__main__':
# A consumer. Prints out 404 records.
def print_r404(log_q):
log = genfrom_queue(log_q)
r404 = (r for r in log if r['status'] == 404)
for r in r404:
print r['host'],r['datetime'],r['request']
import Queue, threading
from follow import *
from apachelog import *
log_q = Queue.Queue()
log_thr = threading.Thread(target=print_r404,
args=(log_q,))
log_thr.setDaemon(True)
log_thr.start()
# Feed the consumer thread
lines = follow(open("run/foo/access-log"))
log = apache_log(lines)
sendto_queue(log,log_q)
|
#!/usr/bin/env python
# coding=utf-8
import subprocess
import time
class BrightnessScale:
def __init__(self):
# get active monitor and current brightness
self.monitor = self.getActiveMonitor()
self.currB = self.getCurrentBrightness()
def initStatus(self):
if(self.monitor == "" or self.currB == ""):
return False
return True
def getActiveMonitor(self):
#Find display monitor
monitor = subprocess.check_output("xrandr -q | grep ' connected' | cut -d ' ' -f1", shell=True)
if(monitor != ""):
monitor = monitor.split('\n')[0]
return monitor
def getCurrentBrightness(self):
#Find current brightness
currB = subprocess.check_output("xrandr --verbose | grep -i brightness | cut -f2 -d ' '", shell=True)
if(currB != ""):
currB = currB.split('\n')[0]
currB = int(float(currB) * 100)
else:
currB = ""
return currB
def ac_daemon(self):
estado=subprocess.check_output ("acpi -a | cut -d ':' -f2", shell=True)
if estado.find("on-line") != -1:
ac_on=1
else:
ac_on=0
return ac_on
def bjr_brillo(self, op):
#Change brightness
if op==1:
newBrightness = float(90)/100
newBacklight= int(25)
else:
newBrightness = float(100)/100
newBacklight= int(92)
cmdb = "xrandr --output %s --brightness %.2f" % (self.monitor, newBrightness)
cmdB = "xrandr --output %s --set BACKLIGHT %d" % (self.monitor,newBacklight)
cmdbStatus = subprocess.check_output(cmdb, shell=True)
cmdBStatus = subprocess.check_output(cmdB, shell=True)
if __name__ == "__main__":
brcontrol=BrightnessScale ()
while brcontrol.initStatus():
while True:
if brcontrol.ac_daemon() == 0:
brcontrol.bjr_brillo(1)
break
else:
brcontrol.bjr_brillo(2)
|
import numpy as np
def calculate(lista):
if len(lista) != 9:
raise ValueError("List must contain nine numbers.")
matrix = np.reshape(lista, (3,3))
#axis1 são colunas, axis2 são linhas
#Mean
mean_axis2 = [np.mean(matrix[0]), np.mean(matrix[1]), np.mean(matrix[2])]
mean_axis1 = [np.mean(matrix[:,0]), np.mean(matrix[:,1]), np.mean(matrix[:,2])]
mean_flattened = np.mean(matrix)
#var
var_axis2 = [np.var(matrix[0]), np.var(matrix[1]), np.var(matrix[2])]
var_axis1 = [np.var(matrix[:,0]), np.var(matrix[:,1]), np.var(matrix[:,2])]
var_flattened = np.var(matrix)
#std
std_axis2 = [np.std(matrix[0]), np.std(matrix[1]), np.std(matrix[2])]
std_axis1 = [np.std(matrix[:,0]), np.std(matrix[:,1]), np.std(matrix[:,2])]
std_flattened = np.std(matrix)
#max
max_axis2 = [np.max(matrix[0]), np.max(matrix[1]), np.max(matrix[2])]
max_axis1 = [np.max(matrix[:,0]), np.max(matrix[:,1]), np.max(matrix[:,2])]
max_flattened = np.max(matrix)
#min
min_axis2 = [np.min(matrix[0]), np.min(matrix[1]), np.min(matrix[2])]
min_axis1 = [np.min(matrix[:,0]), np.min(matrix[:,1]), np.min(matrix[:,2])]
min_flattened = np.min(matrix)
#sum
sum_axis2 = [np.sum(matrix[0]), np.sum(matrix[1]), np.sum(matrix[2])]
sum_axis1 = [np.sum(matrix[:,0]), np.sum(matrix[:,1]), np.sum(matrix[:,2])]
sum_flattened = np.sum(matrix)
data = {
'mean': [mean_axis1, mean_axis2, mean_flattened],
'variance': [var_axis1, var_axis2, var_flattened],
'standard deviation': [std_axis1, std_axis2, std_flattened],
'max': [max_axis1, max_axis2, max_flattened],
'min': [min_axis1, min_axis2, min_flattened],
'sum': [sum_axis1, sum_axis2, sum_flattened]
}
return data
calculate([0,1,2,3,4,5,6,7,8])
|
"""
Bithumb Auto Trading Program
with GUI
Byunghyun Ban
https://github.com/needleworm
"""
import sys
from PyQt5 import QtGui
from PyQt5 import QtWidgets as Q
from PyQt5.QtCore import *
import time
from pybithumb import Bithumb as B
doing_job = False
from ui import Ui_Dialog
ui_class = Ui_Dialog
coin_list = ["-"] + B.get_tickers()
class autoTrader(QThread):
text_out = pyqtSignal(str)
def __init__(self, access_token, secret_key, coin, buyPrice, sellPrice):
super().__init__()
self.access_token = access_token
self.secret_key = secret_key
self.coin = coin
self.buyPrice = buyPrice
self.sellPrice = sellPrice
def sell_all(self, trader):
qty = trader.get_balance(self.coin)
price = B.get_current_price(self.coin)
if price < self.sellPrice:
return None, None
if qty <= 0:
return None, None
trader.sell_limit_order(self.coin, price, qty)
QtGui.QGuiApplication.processEvents()
splt = str(qty).split(".")
qtyStr = splt[0] + "." + splt[-1][:6]
return "TRY> Coin Limit sell\t" + str(time.ctime()) + "\nPrice: " + str(price) + "\tQuantity: " + qtyStr + "\n", price * qty
def buy_all(self, trader):
krw = trader.get_balance("KRW")
price = B.get_current_price(self.coin)
if price > self.buyPrice:
return None, None
qty = krw / price
qty -= qty % 0.0001
if qty <= 0:
return None, None
trader.buy_limit_order(self.coin, price, qty)
QtGui.QGuiApplication.processEvents()
splt = str(qty).split(".")
qtyStr = splt[0] + "." + splt[-1][:6]
return "TRY> Coin Limit Buy\t" + str(time.ctime()) + "\nPrice: " + str(price) + "\tQuantity: " + qtyStr + "\n", price * qty
def run(self):
global doing_job, latest_message
if doing_job:
self.text_out.emit("Auto Trading Bot Initiated.")
self.text_out.emit("Target Coin : " + self.coin + "\n")
latest_message = ""
bithumb = B(self.access_token, self.secret_key)
else:
self.text_out.emit("Stop Auto Trading.\n\n")
while doing_job:
QtGui.QGuiApplication.processEvents()
lastBuyWon = None
lastSellWon = None
coinPrice = B.get_current_price(self.coin)
if coinPrice < self.buyPrice:
message, lastBuyWon = self.buy_all(bithumb)
if not message:
continue
elif message[:20] == latest_message:
continue
elif message:
self.text_out.emit(message)
latest_message = message[:20]
QtGui.QGuiApplication.processEvents()
elif coinPrice > self.sellPrice:
message, lastSellWon = self.sell_all(bithumb)
if not message:
continue
elif message[:20] == latest_message:
continue
elif message:
self.text_out.emit(message)
QtGui.QGuiApplication.processEvents()
latest_message = message[:20]
if lastSellWon and lastBuyWon:
self.text_out.emit("Income : " + str(lastSellWon - lastBuyWon) + "₩\n\n")
QtGui.QGuiApplication.processEvents()
time.sleep(0.5)
class SetCoin(QThread):
text_out = pyqtSignal(str)
def __init__(self):
super().__init__()
def change(self, price):
self.text_out.emit(price)
QtGui.QGuiApplication.processEvents()
class WindowClass(Q.QMainWindow, ui_class):
def __init__(self):
super().__init__()
self.setupUi(self)
self.doing_job = False
self.coin = ""
self.access_token = ""
self.secret_key = ""
self.buyPrice = 0
self.sellPrice = 0
# 코인명 콤보박스에 업로드
self.comboBox.addItems(coin_list)
# 코인명이 바뀔 경우 코인명을 업데이트함
self.comboBox.currentIndexChanged.connect(self.set_coin)
# 버튼이 눌릴 경우 작업을 시작합니다.
self.pushButton.clicked.connect(self.button_pushed)
def button_pushed(self):
if self.coin == "-":
return
# 정보 불러오기
self.access_token = self.lineEdit.text().strip()
self.secret_key = self.lineEdit_2.text().strip()
self.buyPrice = int(self.lineEdit_3.text())
self.sellPrice = int(self.lineEdit_4.text())
self.checked = self.checkBox.isChecked()
if not (self.access_token and self.secret_key and self.buyPrice and self.sellPrice and self.coin and self.checked):
return
global doing_job
doing_job = not doing_job
if doing_job:
self.pushButton.setText("Stop Auto Trading")
else:
self.pushButton.setText("Start Auto Trading")
# 멀티스레드로 오토트레이딩
self.Bot = autoTrader(self.access_token, self.secret_key, self.coin, self.buyPrice, self.sellPrice)
self.Bot.text_out.connect(self.textBrowser.append)
QtGui.QGuiApplication.processEvents()
self.Bot.run()
def set_coin(self):
self.coin = self.comboBox.currentText()
if self.coin != "-":
currentPrice = B.get_current_price(self.coin)
else:
currentPrice = 0
self.coinsetter1 = SetCoin()
self.coinsetter1.text_out.connect(self.lineEdit_3.setText)
self.coinsetter1.change(str(int(currentPrice * 0.996)))
self.coinsetter2 = SetCoin()
self.coinsetter2.text_out.connect(self.lineEdit_4.setText)
self.coinsetter2.change(str(int(currentPrice * 1.004)))
if __name__ == "__main__":
app = Q.QApplication(sys.argv)
myWindow = WindowClass()
myWindow.show()
app.exec_()
sys.exit(app.exec_)
|
num=int(input("Enter the value:"))
if num>1:
for i in range(2,num):
if (num%i==0):
print("It is not a prime number.")
else:
print("It is a prime number.")
else:
print(num,'is not a prime number') |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def list_productor(mean, dis, number):
return np.random.normal(mean, dis*dis, number)
list1 = list_productor(8531, 956, 100)
list2 = list_productor(8631, 656, 100)
list3 = list_productor(8731, 1056, 100)
list4 = list_productor(8831, 756, 100)
data = pd.DataFrame({"Hausdorff":list1,
"City-block":list2,
"Wasserstein":list3,
"KL-divergence":list4
})
data.boxplot()
plt.ylabel("ARI")
plt.xlabel("Dissimilarity Measures")
plt.show() |
import uuid
class MessageTalk():
"""
Object to make parsing talk messages easier, where talk messages are
defined as custom messages published to a set of topics
"""
# pylint: disable=too-few-public-methods
def __init__(self, from_id, origin_id, topics, data, message_id):
# pylint: disable=too-many-arguments
self.msg_type = "talk"
self.from_id = from_id
self.origin_id = origin_id
self.topics = topics
self.data = data
self.message_id = message_id
def to_str(self):
"""
Convert to string
:return: MessageTalk object in string representation
"""
out = self.msg_type + '\n'
out += self.from_id + '\n'
out += self.origin_id + '\n'
out += self.message_id + '\n'
for i in range(len(self.topics)):
out += self.topics[i]
if i < len(self.topics) - 1:
out += ','
out += '\n' + self.data
return out
class MessageSub():
"""
Object to make parsing subscription messages easier, where subscription
messages are defined as indicating the topics a node wishes to subscribe to
or unsubscribe from
"""
# pylint: disable=too-few-public-methods
def __init__(self, from_id, origin_id, subs_map, message_id):
self.msg_type = "subscription"
self.from_id = from_id
self.origin_id = origin_id
self.subs_map = subs_map
self.message_id = message_id
def to_str(self):
"""
Convert to string
:return: MessageSub object in string representation
"""
out = self.msg_type + '\n'
out += self.from_id + '\n'
out += self.origin_id + '\n'
out += self.message_id
if self.subs_map:
out += '\n'
keys = list(self.subs_map)
for i, topic in enumerate(keys):
sub = self.subs_map[topic]
if sub:
out += "sub:"
else:
out += "unsub:"
out += topic
if i < len(keys) - 1:
out += '\n'
return out
def create_message_talk(msg_talk_as_str):
"""
Create a MessageTalk object from a MessageTalk string representation
:param msg_talk_as_str: a MessageTalk object in its string representation
:return: MessageTalk object
"""
msg_comps = msg_talk_as_str.split('\n')
from_id = msg_comps[1]
origin_id = msg_comps[2]
message_id = msg_comps[3]
topics = msg_comps[4].split(',')
data = msg_comps[5]
return MessageTalk(from_id, origin_id, topics, data, message_id)
def create_message_sub(msg_sub_as_str):
"""
Create a MessageSub object from a MessageSub string representation
:param msg_talk_as_str: a MessageSub object in its string representation
:return: MessageSub object
"""
msg_comps = msg_sub_as_str.split('\n')
from_id = msg_comps[1]
origin_id = msg_comps[2]
message_id = msg_comps[3]
subs_map = {}
for i in range(4, len(msg_comps)):
sub_comps = msg_comps[i].split(":")
topic = sub_comps[1]
if sub_comps[0] == "sub":
subs_map[topic] = True
else:
subs_map[topic] = False
return MessageSub(from_id, origin_id, subs_map, message_id)
def generate_message_id():
"""
Generate a unique message id
:return: messgae id
"""
return str(uuid.uuid1())
|
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import null
from sqlalchemy.sql.sqltypes import Boolean
from . import models
from ..schemas import schemas
def get_job(db: Session, job_id: int):
return db.query(models.Job).filter(models.Job.id == job_id).first()
def get_jobs(db: Session):
return db.query(models.Job).all()
def get_jobs_by_job_done(db: Session, job_done: Boolean):
return db.query(models.Job).filter(models.Job.job_done == job_done).all()
def create_job(db: Session, jobIn: schemas.JobCreate):
j = models.Job(name=jobIn.name)
db.add(j)
db.commit()
db.refresh(j)
return j
def update_job(db: Session, job: schemas.JobUpdate):
j = db.query(models.Job).filter(models.Job.id == job.job_id).first()
if job.employee_id != None:
e = db.query(models.Employee).filter(models.Employee.id == job.employee_id).first()
j.employee = e
if job.job_done != None:
j.job_done = job.job_done
if (job.job_done == True):
j.employee = None
db.add(j)
db.commit()
db.refresh(j)
return j |
import json
import os
from django.conf import settings
from django.template.loader import render_to_string
URL_ATTR = 'urlName'
CONFIG_ATTR = 'config'
ROUTE_ATTR = 'route'
JSON_TEMPLATE = getattr(settings, 'ROUTES_JSON_TEMPLATE', 'routes.js')
JS_TEMPLATE = getattr(settings, 'ROUTES_FULL_TEMPLATE', 'full-routes.js')
STATIC_FILE = getattr(settings, 'ROUTES_STATIC', 'routing.js')
def build_js_route_map():
"""
Generates a mapping of the declared routes from
the template 'js_routes.json' with their config
parameters. Used by js_routing.
"""
mapping = {}
data = render_to_string(JSON_TEMPLATE)
data = json.loads(data.strip())
for route_obj in [ x["routes"] for x in data if "routes" in x ]:
for obj in route_obj:
key = ROUTE_ATTR
if URL_ATTR in obj:
key = URL_ATTR
mapping[obj[key]] = obj[CONFIG_ATTR]
return mapping
def get_routing_js():
"""
Renders the full JS template
"""
return render_to_string(JS_TEMPLATE,
{ 'json_template' : JSON_TEMPLATE })
def build_js_file():
"""
Renders the full JS template and then
writes it output to a static file.
"""
data = get_routing_js()
base = os.path.join(settings.DEV_STATIC_ROOT, 'js')
if os.path.exists(base):
filename = os.path.join(base, STATIC_FILE)
with open(filename, 'w') as fp:
fp.write(data)
|
num1 = int(input("Digite o Primeiro Numero: "))
num2 = int(input("Digite o Segundo Numero: "))
soma = num1+num2
print("A soma = ", soma) |
"""
DCC XML Generator Functions
H3A MUX
"""
import os
import dcc
import dccxml
import shutil
wdr_mode = 0
def GenH3AMUXParams(handle, h3amux_params, cls_id):
handle.write(' 1, //enable\n')
handle.write(' 1, //number of LUTs\n')
handle.write(' {\n')
if(wdr_mode == 1):
handle.write(' {#include "lut_h3a_16b_to_10b_g07.txt"},\n')
handle.write(' {#include "lut_h3a_16b_to_10b_g07.txt"},\n')
handle.write(' {#include "lut_h3a_16b_to_10b_g07.txt"},\n')
elif(wdr_mode == 0):
handle.write(' {#include "lut_h3a_12b_to_10b.txt"},\n')
handle.write(' {#include "lut_h3a_12b_to_10b.txt"},\n')
handle.write(' {#include "lut_h3a_12b_to_10b.txt"},\n')
handle.write(' },\n')
def GenH3AMUXXML(directory, filebase, params, h3amux_params):
if (os.path.exists(directory) == False):
print ('Creating directory: %s\n' %directory)
try:
os.makedirs(directory)
except OSError as err:
utils.error('%s: %s' %(err.strerror, err.filename), skip=True)
filename = os.path.join(directory, '%s_%s.xml' %(params['SENSOR'], filebase));
print ('Creating XML File: %s\n' %filename)
global wdr_mode
if(h3amux_params['WDR_MODE'] == 'wdr'):
wdr_mode = 1
else:
wdr_mode = 0
module_params = {}
module_params['NAME'] = 'VISS_H3A_MUX_LUTS_CFG'
module_params['STRUCT_NAME'] = 'iss_h3a_mux_luts'
module_params['DCC_ID'] = dcc.DCC_ID_H3A_MUX
module_params['FUNC_GENPARAMS'] = GenH3AMUXParams
handle = dccxml.OpenFile(filename)
dccxml.GenHeader(handle, params, module_params)
# Create the DCC Structure definition
handle.write(' <!--=======================================================================-->\n')
handle.write(' <typedef>\n')
handle.write(' <%s type="struct">\n' %module_params['STRUCT_NAME'])
handle.write(' <enable type="uint16"> </enable> <!-- enable -->\n')
handle.write(' <num_luts type="uint16"> </num_luts> <!-- number of LUTs (0 ~ 3) -->\n')
handle.write(' <h3a_mux_luts type="uint16[3][639]"> </h3a_mux_luts> <!-- H3A LUTs -->\n')
handle.write(' </%s>\n' %module_params['STRUCT_NAME'])
handle.write(' </typedef>\n')
handle.write(' <!--=======================================================================-->\n')
# Create a DCC Use Case
for i in range(h3amux_params['NUM_USE_CASE']):
dccxml.AddUseCase(handle, module_params, h3amux_params['USE_CASE'][i])
if(wdr_mode==1):
r_table = '../tables/lut_h3a_16b_to_10b_g07.txt'
else:
r_table = '../tables/lut_h3a_12b_to_10b.txt'
shutil.copy(r_table, directory)
dccxml.GenFooter(handle, module_params)
dccxml.CloseFile(handle)
|
# Common variables used in the scripts
import os
import datetime
import sys
import subprocess
import shutil
def get_parent_path(path):
return os.path.abspath(os.path.join(path, os.pardir))
root = get_parent_path(get_parent_path(os.path.realpath(__file__)))
def python():
return "python"
def python3():
return "python3"
# scripts
scripts_root = os.path.join(root, "scripts")
#tools
tools_root = os.path.join(root, "tools")
rf_distance_tool = os.path.join(tools_root, "trees", "rf_distance.py")
rf_cells_tool = os.path.join(tools_root, "families", "rf_cells.py")
treedist_R_script = os.path.join(tools_root, "families", "treedist.R")
getrootedkf_R_script = os.path.join(tools_root, "trees", "get_rooted_kf.R")
# programs
programs_root = os.path.join(root, "programs")
# datasets
datasets_root = os.path.join(root, "datasets")
# results
results_root = os.path.join(root, "results")
# install
installer_root = os.path.join(root, "installer")
historic = os.path.join(root, "historic.txt")
fast_github_root = "/hits/fast/cme/benoit/github"
fast_dataset_archive = os.path.join(fast_github_root, "BenoitDatasets", "families")
# externals
github_root = os.path.join(root, "..")
benoit_datasets_root = os.path.join(fast_github_root, "BenoitDatasets")
#benoit_datasets_root = os.path.join(github_root, "BenoitDatasets")
families_datasets_root = os.path.join(benoit_datasets_root, "families")
benoit_datasets_root_no_fast = os.path.join(github_root, "BenoitDatasets")
families_datasets_root_no_fast = os.path.join(benoit_datasets_root_no_fast, "families")
raw_datasets_root = os.path.join(benoit_datasets_root, "raw_data")
dna4_model_samples = os.path.join(benoit_datasets_root, "DNA_models_sample")
fasttree_exec = os.path.join(github_root, "FastTree", "FastTree")
plausiblerax_exec = os.path.join(github_root, "PlausibleRax", "build", "bin", "plausiblerax")
quartet_counter_exec = os.path.join(github_root, "PlausibleRax", "build", "bin", "quartet_counter")
fastme_exec = os.path.join(github_root, "FastME", "tarball", "fastme-2.1.6.3", "binaries", "fastme-2.1.6.2-linux64")
noah_exec = os.path.join(github_root, "neighbo-rs", "target", "release", "neighbo-rs")
mad_exec = os.path.join(github_root, "MADroot", "madRoot")
freegenetree_exec = os.path.join(github_root, "FreeTree", "build", "bin", "freegenetree")
treerecs_root = os.path.join(github_root, "Treerecs")
treerecs_exec = os.path.join(treerecs_root, "build", "bin", "Treerecs")
joint_likelihood_evaluator_exec = os.path.join(treerecs_root, "build", "bin", "misc", "JointLikelihoodEvaluator")
treerecs_joint_search_exec = os.path.join(treerecs_root, "build", "bin", "misc", "jointTreeSearch")
treefix_exec = "treefixDTL"
joint_search_root = os.path.join(github_root, "GeneRax")
workshop_root = os.path.join(github_root, "GeneRaxWorkshop")
alerax_root = os.path.join(github_root, "AleRax")
joint_search_clone_root = os.path.join(github_root, "GeneRaxClone")
generax_exec = os.path.join(joint_search_root, "build", "bin", "generax")
generaxclone_exec = os.path.join(joint_search_clone_root, "build", "bin", "generax")
treecombine_exec = os.path.join(joint_search_root, "build", "bin", "treecombination")
splitsearch_exec = os.path.join(joint_search_root, "build", "bin", "speciessplitsearch")
minibme_exec = os.path.join(joint_search_root, "build", "bin", "asteroid")
asteroid_root = os.path.join(github_root, "Asteroid")
asteroid_exec = os.path.join(asteroid_root, "build", "bin", "asteroid")
bipstep_exec = os.path.join(asteroid_root, "build", "bin", "bipstep")
concasteroid_exec = os.path.join(asteroid_root, "build", "bin", "concasteroid")
alegenerax_exec = os.path.join(alerax_root, "build", "bin", "alerax")
njrax_exec = os.path.join(joint_search_root, "build", "bin", "njrax")
speciesrax_exec = os.path.join(joint_search_root, "build", "bin", "speciesrax")
generax_gprof_exec = os.path.join(joint_search_root, "gprof_build", "bin", "generax")
generax_scalasca_exec = os.path.join(joint_search_root, "scalasca_build", "bin", "generax")
joint_search_exec = os.path.join(joint_search_root, "build", "bin", "JointSearch")
joint_search_gprof_exec = os.path.join(joint_search_root, "gprof_build", "bin", "JointSearch")
joint_search_scalasca_exec = os.path.join(joint_search_root, "scalasca_build", "bin", "JointSearch")
joint_search_lib = os.path.join(joint_search_root, "build_lib", "src", "JointSearch", "libJointSearch.so")
pargenes_root = os.path.join(github_root, "pargenes")
pargenes_script = os.path.join(pargenes_root, "pargenes", "pargenes-hpc.py")
pargenes_script_laptop = os.path.join(pargenes_root, "pargenes", "pargenes.py")
dicotree_script = os.path.join(github_root, "DiCoTree", "dicotree", "dicotree.py")
pargenes_script_debug = os.path.join(pargenes_root, "pargenes", "pargenes-hpc-debug.py")
mpischeduler_root = os.path.join(github_root, "MPIScheduler")
mpischeduler_exec = os.path.join(mpischeduler_root, "build", "mpi-scheduler")
rfdistance_exec = os.path.join(github_root, "RaxmlRFDistance", "bin", "rfdistance-mpi")
raxml_root = os.path.join(github_root, "raxml-ng")
raxml_exec = os.path.join(raxml_root, "bin", "raxml-ng-mpi")
raxml_exec_no_mpi = os.path.join(raxml_root, "bin", "raxml-ng")
gensamples_exec = os.path.join(github_root, "DeepRax", "build", "bin", "gensamples")
deeprax_exec = os.path.join(github_root, "DeepRax", "build", "bin", "deeprax")
rf_root = os.path.join(github_root, "MPIRaxmlRFDistance")
rf_exec = os.path.join(rf_root, "bin", "rfdistance")
raxml_nompi_exec = os.path.join(raxml_root, "bin", "raxml-ng")
oldraxml_root = os.path.join(github_root, "standard-RAxML")
oldraxml_exec = os.path.join(oldraxml_root, "raxmlHPC-AVX")
bigdatasets_root = os.path.join(github_root, "datasets")
phyldog_root = os.path.join(github_root, "PHYLDOG")
phyldog_light_exec = os.path.join(phyldog_root, "build", "bin", "phyldog_light")
phyldog_exec = os.path.join(phyldog_root, "build", "bin", "phyldog")
zombi_script = os.path.join(github_root, "ZOMBI", "Zombi.py")
zombi_sample_script = os.path.join(github_root, "ZOMBI", "SpeciesSampler.py")
zombi_ratecusto_script = os.path.join(github_root, "ZOMBI", "RateCustomizer.py")
jprime_jar = os.path.join(github_root, "jprime", "jprime-0.3.6.jar")
jprime_deleterious_jar = os.path.join(github_root, "jprime", "jprime_0.3.5c.jar")
ale_root = os.path.join(github_root, "ALE")
ale_simul_exec = os.path.join(ale_root, "build", "bin", "simulation")
seq_gen_exec = os.path.join(github_root, "Seq-Gen-1.3.4", "source", "seq-gen")
notung_jar = os.path.join(github_root, "Notung-2.9", "Notung-2.9.jar")
mafft_exec = os.path.join(github_root, "mafft", "bin", "mafft")
stag_script = os.path.join(github_root, "STAG", "stag", "stag.py")
phylobayes_exec = os.path.join(github_root, "phylobayes4.1c", "data", "pb")
pythia_exec = "pythia"
exabayes_exec = os.path.join(github_root, "exabayes-1.5", "yggdrasil")
mrbayes_exec = os.path.join(github_root, "MrBayes", "src", "mb")
ale_observe_exec = os.path.join(github_root, "ALE", "build", "bin", "ALEobserve")
ale_ml_exec = os.path.join(github_root, "ALE", "build", "bin", "ALEml_undated")
ale_ml_dated_exec = os.path.join(github_root, "ALE", "build", "bin", "ALEml")
raxmlgrove_exec = os.path.join(github_root, "RAxMLGroveScripts", "org_script.py")
deco_exec = os.path.join(github_root, "DeCo", "DeCo")
decostar_exec = os.path.join(github_root, "DeCoSTAR", "bin/DeCoSTAR")
guenomu_exec = os.path.join(github_root, "guenomu", "src", "guenomu")
eccetera_root = os.path.join(github_root, "ecceTERA")
eccetera_exec = os.path.join(eccetera_root, "build", "bin", "ecceTERA")
lensim_exec = os.path.join(github_root, "lenard", "dtl_simulator.py")
simphy_exec = os.path.join(github_root, "SimPhy_1.0.2", "bin", "simphy")
simphy_indelible_wrapper = os.path.join(github_root, "SimPhy_1.0.2", "scripts", "INDELIble_wrapper.pl")
njstm_script = os.path.join(github_root, "NJstM", "njstm.r")
duptree_exec = os.path.join(github_root, "duptree", "duptree")
stride_script = os.path.join(github_root, "STRIDE", "stride", "stride.py")
fastmulrfs_preprocess = os.path.join(github_root, "fastmulrfs", "python-tools", "preprocess_multrees_v3.py")
fastrfs_exec = os.path.join(github_root, "FastRFS", "build", "FastRFS")
disco_script = os.path.join(github_root, "DISCO", "disco.py")
astral_jar = os.path.join(github_root, "ASTRAL", "Astral", "astral.jar")
astralmp_jar = os.path.join(github_root, "ASTRAL-MP", "Astral", "astral.jar")
astralpro_root = os.path.join(github_root, "A-pro")
astralpro_jar = os.path.join(github_root, "A-pro", "ASTRAL-MP", "astral.1.1.2.jar")
astral2_exec = os.path.join(github_root, "ASTER", "bin", "astral")
astralpro2_exec = os.path.join(github_root, "ASTER", "bin", "astral-pro")
astrid_exec = os.path.join(github_root, "ASTRID", "bazel-bin", "src", "ASTRID")
stells_exec = os.path.join(github_root, "STELLS2", "STELLSv2.1.0.1", "stells")
iqtree_root = os.path.join(github_root, "IQ-TREE")
iqtree_exec = os.path.join(iqtree_root, "terragen", "build", "iqtree2")
genetrius_exec = os.path.join(iqtree_root, "terragen", "build", "iqtree2")
#astrid_exec = os.path.join(github_root, "ASTRID", "bazel-bin", "src", "ASTRID-phydstar")
prepare_fastrfs_script = os.path.join(tools_root, "rfs", "prepareTrees.py")
rangerdtl_exec = os.path.join(github_root, "RangerDTL", "ranger-dtl-U.linux")
paul_exec = os.path.join(github_root, "MasterThesis", "njst", "main")
stamatak_tests_dir = os.path.join(github_root, "test-Datasets")
grf_eval = os.path.join(github_root, "GiRaF", "build", "bin", "eval")
# constants
mpi_scheduler_heuristic = "--split-scheduler"
generax_selector_candidates = os.path.join(tools_root, "families", "candidates.txt")
ensembl_plants_species_dict = os.path.join(tools_root, "families", "ensembl_plant_dict.txt")
ensembl_plants_species_blacklist = os.path.join(tools_root, "families", "ensembl_plant_blacklist.txt")
# utils
def get_git_info(repo_path):
initial_dir = os.getcwd()
os.chdir(repo_path)
repo = str(subprocess.check_output(["git", "config", "--get", "remote.origin.url"]))[2:-3]
branch = str(subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]))[2:-3]
commit = str(subprocess.check_output(["git", "rev-parse", "HEAD"]))[2:-3]
diff = "\t\t" + subprocess.check_output(["git", "diff"]).decode("ASCII").replace("\n", "\n\t\t")
os.chdir(initial_dir)
result = "Git repo: " + repo + "\nGit branch: " + branch + "\nGit commit: " + commit + "\n"
result += "diff:\n " + diff + "\n"
return result
def write_results_info(resultsdir, msg):
filename = os.path.join(resultsdir, "results.info")
with open(filename, "w") as writer:
writer.write("Start: "+ str(datetime.datetime.now()) + "\n")
writer.write("Command: " + ' '.join(sys.argv) + "\n")
writer.write("\n")
writer.write("Experiment git: \n")
writer.write(get_git_info("."))
writer.write("\n")
writer.write(msg)
def display_warning_file(warning_filename):
if (not os.path.isfile(warning_filename)):
return
with open(warning_filename) as f:
print("##########################")
print("# Warning file content: #")
print("##########################")
print("")
print(f.read())
print("########################")
print("# End of warning file #")
print("########################")
def create_result_dir(suffix, additional_args = []):
base = os.path.join(results_root, suffix)
for arg in additional_args:
base += "_" + arg
base += "_"
result_dir = ""
for i in range(0, 10000):
result_dir = base + str(i)
if (not os.path.isdir(result_dir)):
os.makedirs(result_dir)
#open(historic, "a+").write("Results directory: " + result_dir + "\n")
#print("Results directory: " + result_dir)
return os.path.abspath(result_dir)
def redirect_logs(result_dir):
logs = os.path.join(result_dir, "logs.txt")
err = os.path.join(result_dir, "err.txt")
print("logs redirected to " + logs)
sys.stdout = open(logs, 'w')
sys.stderr = open(err, 'w')
def submit_cascade(submit_file_path, command, threads, debug):
threads = int(threads)
nodes = str((int(threads) - 1) // 20 + 1)
logfile = os.path.join(os.path.dirname(submit_file_path), "logs.out")
with open(submit_file_path, "w") as f:
f.write("#!/bin/bash\n")
f.write("#SBATCH -o " + logfile + "\n")
#f.write("#SBATCH -B 2:8:1\n")
f.write("#SBATCH -N " + str(nodes) + "\n")
f.write("#SBATCH -n " + str(threads) + "\n")
f.write("#SBATCH --threads-per-core=1\n")
f.write("#SBATCH --cpus-per-task=20\n")
f.write("#SBATCH --hint=compute_bound\n")
if (debug):
f.write("#SBATCH -t 2:00:00\n")
else:
f.write("#SBATCH -t 24:00:00\n")
f.write("\n")
f.write(command)
command = []
command.append("sbatch")
if (debug):
command.append("--qos=debug")
command.append("-s")
command.append(submit_file_path)
out = open(historic, "a+")
subprocess.check_call(command, stdout = out)
out.write("Output in " + logfile + "\n")
print(open(historic).readlines()[-1][:-1])
out.write("\n")
def submit_haswell(submit_file_path, command, threads, debug):
threads = int(threads)
nodes = str((int(threads) - 1) // 16 + 1)
logfile = os.path.join(os.path.dirname(submit_file_path), "logs.out")
with open(submit_file_path, "w") as f:
f.write("#!/bin/bash\n")
f.write("#SBATCH -o " + logfile + "\n")
f.write("#SBATCH -B 2:8:1\n")
f.write("#SBATCH -N " + str(nodes) + "\n")
f.write("#SBATCH -n " + str(threads) + "\n")
f.write("#SBATCH --threads-per-core=1\n")
f.write("#SBATCH --cpus-per-task=1\n")
f.write("#SBATCH --hint=compute_bound\n")
if (debug):
f.write("#SBATCH -t 2:00:00\n")
else:
f.write("#SBATCH -t 24:00:00\n")
f.write("\n")
f.write(command)
command = []
command.append("sbatch")
if (debug):
command.append("--qos=debug")
command.append("-s")
command.append(submit_file_path)
out = open(historic, "a+")
subprocess.check_call(command, stdout = out)
out.write("Output in " + logfile + "\n")
print(open(historic).readlines()[-1][:-1])
out.write("\n")
def submit_magny(submit_file_path, command, threads):
#nodes = str((int(threads) - 1) // 16 + 1)
#logfile = os.path.join(os.path.dirname(submit_file_path), "logs.out")
with open(submit_file_path, "w") as f:
f.write("#!/bin/bash\n")
f.write("#$ -cwd -V\n") # Shift directories and export variables
f.write("#$ -q bridge.q\n") # Select the queue
f.write("#$ -pe mvapich16 " + str(threads) + "\n") # Set the parallel environment
f.write("#$ -l h_rt=24:00:00\n") # Request the time for the job
f.write("#$ -N cyano_bridge\n")
f.write("\n")
f.write(command)
command = []
command.append("qsub")
#if (int(threads) <= 32):
# command.append("--qos=debug")
#command.append("-s")
command.append(submit_file_path)
subprocess.check_call(command)
def submit_normal(submit_file_path, command, log_cout):
commands_list = command.split("\n")
logfile = os.path.join(os.path.dirname(submit_file_path), "logs.out")
for subcommand in commands_list:
if (log_cout):
subprocess.check_call(subcommand, shell=True)
else:
subprocess.check_call(subcommand + " &>> " + logfile , shell=True)
def submit(submit_file_path, command, threads, cluster):
if (cluster == "normal"):
submit_normal(submit_file_path, command, False)
elif (cluster == "normald"):
submit_normal(submit_file_path, command, True)
elif (cluster == "haswell"):
submit_haswell(submit_file_path, command, threads, False)
elif (cluster == "haswelld"):
submit_haswell(submit_file_path, command, threads, True)
elif (cluster == "cascade"):
submit_cascade(submit_file_path, command, threads, False)
elif (cluster == "cascaded"):
submit_cascade(submit_file_path, command, threads, True)
elif (cluster == "magny"):
submit_magny(submit_file_path, command, threads)
else:
print("unknown cluster " + cluster)
sys.exit(1)
def try_make_dir(dir_name):
try:
os.makedirs(dir_name)
except:
pass
def mkdir(dir_name):
try_make_dir(dir_name)
def relative_symlink(src, dest):
relative_path = os.path.relpath(src, os.path.dirname(dest))
tmp = dest + ".sym"
os.symlink(relative_path, tmp)
shutil.move(tmp, dest)
def reset_dir(dir_name):
shutil.rmtree(dir_name, True)
os.makedirs(dir_name)
def checkAndDelete(arg, arguments):
if (arg in arguments):
arguments.remove(arg)
return True
return False
def getAndDelete(arg, arguments, default_value):
print ("looking for " + arg + " in " + str(arguments))
if (arg in arguments):
index = arguments.index(arg)
res = arguments[index + 1]
del arguments[index + 1]
del arguments[index]
return res
else:
return default_value
def getArg(arg, arguments, default_value):
print ("looking for " + arg + " in " + str(arguments))
if (arg in arguments):
index = arguments.index(arg)
res = arguments[index + 1]
return res
else:
return default_value
def run_with_scheduler(executable, command_file, parallelization, cores, scheduler_output_dir, logname = None):
command = ""
out = sys.stdout
if (logname != None):
out = open(os.path.join(scheduler_output_dir, logname), "w")
isMPI = (parallelization == "onecore") or (parallelization == "split")
if (isMPI):
command += "mpirun -np " + str(cores) + " "
command += mpischeduler_exec + " "
command += "--" + parallelization + "-scheduler "
command += str(cores) + " "
command += executable + " "
command += command_file + " "
command += scheduler_output_dir
print("Running " + command)
subprocess.check_call(command.split(" "), stdout = out, stderr = out)
|
from Board.ActionPanel.ActionPanel import *
from Board.Map.Map import *
from Board.MenuRight.MenuRight import *
from FinalScreen import FinalScreen
from Menu.InGameMenu.InGameMenu import InGameMenu
class Board:
def __init__(self, game, actionPanel=None, menuright=None, map=None):
self.Map = map if map is not None else Map(game)
self.ActionPanel = actionPanel if actionPanel is not None else DefaultActionPanel(game)
self.MenuRight = menuright if menuright is not None else MenuRight(game)
def Update(self, game: Game):
if game.Logic.CheckWinner():
return FinalScreen(game.Settings.Resolution, None, None, game.Logic.CheckWinner().Name)
def onSelectedTileChanged(lTile):
if lTile is None:
self.ActionPanel = DefaultActionPanel(game)
elif lTile.Building is not None and lTile.Building.Owner == game.Logic.PlayingPlayer:
self.ActionPanel = BarrackActionPanel(game, lTile)
elif lTile.Unit is not None and lTile.Unit.Owner == game.Logic.PlayingPlayer:
self.ActionPanel = UnitActionPanel(game, lTile)
else:
self.ActionPanel = InfoActionTile(game, lTile)
new_map = self.Map.Update(game, onSelectedTileChanged)
actionPanel = self.ActionPanel.Update(game)
# check to change selected tile
if type(actionPanel) is DefaultActionPanel:
new_map.SetActiveTile(None)
elif actionPanel.NewSelection is not None:
new_map.SetActiveTile(actionPanel.NewSelection)
onSelectedTileChanged(game.Logic.Map.GetTile(actionPanel.NewSelection))
actionPanel = self.ActionPanel
for event in game.Events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return InGameMenu(game.Settings.Resolution, self)
return Board(game, actionPanel, self.MenuRight.Update(game), new_map)
def Draw(self, game: Game):
# Turn background color in color of current player
if game.Logic.PlayingPlayer.Character.Id == 0:
game.Settings.GetScreen().fill(Colors.PLAYER_GREEN)
elif game.Logic.PlayingPlayer.Character.Id == 1:
game.Settings.GetScreen().fill(Colors.PLAYER_BLUE)
elif game.Logic.PlayingPlayer.Character.Id == 2:
game.Settings.GetScreen().fill(Colors.PLAYER_YELLOW)
else:
game.Settings.GetScreen().fill(Colors.PLAYER_RED)
self.ActionPanel.Draw(game)
self.MenuRight.Draw(game)
self.Map.Draw(game)
|
# Generated by Django 3.2.5 on 2021-07-31 04:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('master_file', '0015_auto_20210731_1157'),
]
operations = [
migrations.AddField(
model_name='product',
name='category',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='product_category', to='master_file.category', verbose_name='Category'),
preserve_default=False,
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 09:22:38 2020
@author: rajiv
"""
import numpy as np
from skimage.filters import difference_of_gaussians
from PIL import Image
def DoG(image_name, Test= False, perceptron = False):
train_path = "/home/rajiv/Documents/lectures/BIC/project_conv_sdnn/datasets/TrainingSet/Face/"
test_path = "/home/rajiv/Documents/lectures/BIC/project_conv_sdnn/datasets/TestingSet/faces_labeled/"
perceptron_path = "/home/rajiv/Documents/lectures/BIC/project_conv_sdnn/datasets/TrainingSet/faces_labeled/"
if Test:
image = Image.open(test_path + image_name)
image = np.array(image.resize((400,400), Image.BILINEAR))
filtered_image = difference_of_gaussians(image, 1.5)
filtered_image = np.expand_dims(filtered_image, axis = 0)
filtered_image = np.expand_dims(filtered_image, axis = 0)
return filtered_image
elif perceptron:
image = Image.open(perceptron_path + image_name)
image = np.array(image.resize((400,400), Image.BILINEAR))
filtered_image = difference_of_gaussians(image, 1.5)
filtered_image = np.expand_dims(filtered_image, axis = 0)
filtered_image = np.expand_dims(filtered_image, axis = 0)
return filtered_image
else:
image = Image.open(train_path+image_name)
image = np.array(image.resize((400,400), Image.BILINEAR))
filtered_image = difference_of_gaussians(image, 1.5)
filtered_image = np.expand_dims(filtered_image, axis = 0)
filtered_image = np.expand_dims(filtered_image, axis = 0)
return filtered_image
|
#the function takes indefinite number of arguments (all must be numbers)
def f1(*args):
return sum(args) / len(args)
print(f1(2,4,6,8)) |
import requests
GITHUB_ROOT = "https://api.github.com"
class Client:
def __init__(self, user, password):
self.user = user
self.password = password
def set_status(self, status, owner, repo, sha):
# Valid status: pending, success, error, failure
path = "/repos/%s/%s/statuses/%s" % (owner, repo, sha)
payload = {"state": status, "context": "Test Coverage", "description": "Coverage passed with 50.06%"}
url = gen_url(path)
r = requests.post(url, json=payload, auth=(self.user, self.password))
return r.json()
def zen(self):
r = requests.get(gen_url("/zen"))
print "Got response: %s" % r.text
def gen_url( path):
return "%s%s" % (GITHUB_ROOT, path)
|
def push(l):
l.append(1)
def pop(l):
if len(l) == 0:
l.append(-1)
elif l[0] == -1:
l.append(-1)
else:
l.pop()
T = int(input())
for i in range(T):
stk = []
qus=input()
for k in qus:
if k == '(':
push(stk)
elif k == ')':
pop(stk)
if len(stk) == 0:
print("YES")
elif stk[0] == -1:
print("NO")
else: print("NO")
# Done
|
import socket
def send_message(message, host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(message, (host, port))
chunks = []
while True:
chunk, address = sock.recvfrom(1028)
if chunk == '':
break
chunks.append(chunk)
return ''.join(chunks)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('m', 'message')
parser.add_argument('h', 'host')
parser.add_argument('p', 'port')
send_message(args.message, args.host, args.port) |
# Generated by Django 2.1.3 on 2019-05-20 17:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('job', '0002_auto_20190520_2319'),
]
operations = [
migrations.RenameField(
model_name='job',
old_name='imagef',
new_name='image',
),
]
|
# ============LICENSE_START=======================================================
# Copyright (c) 2019-2022 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
import unittest
import trapd_vb_types
import trapd_settings as tds
class test_trapd_vb_types(unittest.TestCase):
"""
Test snmpv3 module
"""
@classmethod
def setUpClass(cls):
tds.init()
def test_trapd_vb_type_conversion_integer32(self):
"""
Test that pysnmp varbind types Integer converts
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("Integer32"), "integer")
def test_trapd_vb_type_conversion_integer(self):
"""
Test that pysnmp varbind types Integer converts
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("Integer"), "integer")
def test_trapd_vb_type_conversion_gauge32(self):
"""
Test that pysnmp varbind types Integer converts
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("Gauge32"), "unsigned")
def test_trapd_vb_type_conversion_counter32(self):
"""
Test that pysnmp varbind types Integer converts
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("Counter32"), "counter32")
def test_trapd_vb_type_conversion_octetstring(self):
"""
Test that pysnmp varbind types convert accurately
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("OctetString"), "octet")
def test_trapd_vb_type_conversion_py_type_5(self):
"""
Test that pysnmp varbind types convert accurately
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("py_type_5"), "hex")
def test_trapd_vb_type_conversion_py_type_6(self):
"""
Test that pysnmp varbind types convert accurately
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("py_type_6"), "decimal")
def test_trapd_vb_type_conversion_null(self):
"""
Test that pysnmp varbind types convert accurately
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("Null"), "null")
def test_trapd_vb_type_conversion_objectidentifier(self):
"""
Test that pysnmp varbind types convert accurately
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("ObjectIdentifier"), "oid")
def test_trapd_vb_type_conversion_timeticks(self):
"""
Test that pysnmp varbind types convert accurately
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("TimeTicks"), "timeticks")
def test_trapd_vb_type_conversion_ipaddress(self):
"""
Test that pysnmp varbind types convert accurately
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("IpAddress"), "ipaddress")
def test_trapd_vb_type_conversion_bits(self):
"""
Test that pysnmp varbind types convert accurately
"""
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("Bits"), "bits")
def test_trapd_vb_type_conversion_invalid(self):
"""
Test that pysnmp varbind types convert accurately
"""
# should return default of octet if not defined
self.assertEqual(trapd_vb_types.pysnmp_to_netsnmp_varbind_convert("noSuchVarbindType"), "octet")
if __name__ == "__main__": # pragma: no cover
unittest.main(verbosity=2)
|
import tensorflow as tf
import numpy as np
xy = np.loadtxt('xor.txt', unpack=True)
x_data = np.transpose(xy[0:-1])
y_data = np.reshape(xy[-1], (4,1))
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W1 = tf.Variable(tf.random_uniform([2, 4], -1.0,1.0 ), name='w1')
b1 = tf.Variable(tf.zeros([4]), name='Bias1')
W2 = tf.Variable(tf.random_uniform([4, 4], -1.0, 1.0), name='w2')
b2 = tf.Variable(tf.zeros([4]), name='Bias2')
W3 = tf.Variable(tf.random_uniform([4, 4], -1.0, 1.0), name='w3')
b3 = tf.Variable(tf.zeros([4]), name='Bias3')
W4 = tf.Variable(tf.random_uniform([4, 4], -1.0, 1.0), name='w4')
b4 = tf.Variable(tf.zeros([4]), name='Bias4')
W5 = tf.Variable(tf.random_uniform([4, 4], -1.0, 1.0), name='w5')
b5 = tf.Variable(tf.zeros([4]), name='Bias5')
W6 = tf.Variable(tf.random_uniform([4, 4], -1.0, 1.0), name='w6')
b6 = tf.Variable(tf.zeros([4]), name='Bias6')
W7 = tf.Variable(tf.random_uniform([4, 4], -1.0, 1.0), name='w7')
b7 = tf.Variable(tf.zeros([4]), name='Bias7')
W8 = tf.Variable(tf.random_uniform([4, 4], -1.0, 1.0), name='w8')
b8 = tf.Variable(tf.zeros([4]), name='Bias8')
W9 = tf.Variable(tf.random_uniform([4, 4], -1.0, 1.0), name='w9')
b9 = tf.Variable(tf.zeros([4]), name='Bias9')
W10 = tf.Variable(tf.random_uniform([4, 4], -1.0, 1.0), name='w10')
b10 = tf.Variable(tf.zeros([4]), name='Bias10')
W11 = tf.Variable(tf.random_uniform([4, 1], -1.0, 1.0), name='w11')
b11 = tf.Variable(tf.zeros([1]), name='Bias11')
'''
tf.summary.histogram('w1', W1)
tf.summary.histogram('b1', b1)
tf.summary.histogram('w2', W2)
tf.summary.histogram('b2', b2)
tf.summary.histogram('w3', W3)
tf.summary.histogram('b3', b3)
tf.summary.histogram('w4', W4)
tf.summary.histogram('b4', b4)
'''
with tf.name_scope('layer1') :
#L1 = tf.sigmoid(tf.matmul(X, W1) + b1)
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
with tf.name_scope('layer2') :
#L2 = tf.sigmoid(tf.matmul(L1, W2) + b2)
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
with tf.name_scope('layer3') :
#L3 = tf.sigmoid(tf.matmul(L2, W3) + b3)
L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
with tf.name_scope('layer4') :
#L4= tf.sigmoid(tf.matmul(L3, W4) + b4)
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
with tf.name_scope('layer5') :
#L5= tf.sigmoid(tf.matmul(L4, W5) + b5)
L5 = tf.nn.relu(tf.matmul(L4, W5) + b5)
with tf.name_scope('layer6') :
#L6= tf.sigmoid(tf.matmul(L5, W6) + b6)
L6 = tf.nn.relu(tf.matmul(L5, W6) + b6)
with tf.name_scope('layer7') :
#L7= tf.sigmoid(tf.matmul(L6, W7) + b7)
L7 = tf.nn.relu(tf.matmul(L6, W7) + b7)
with tf.name_scope('layer8') :
#L8= tf.sigmoid(tf.matmul(L7, W8) + b8)
L8 = tf.nn.relu(tf.matmul(L7, W8) + b8)
with tf.name_scope('layer9') :
#L9 = tf.sigmoid(tf.matmul(L8, W9) + b9)
L9 = tf.nn.relu(tf.matmul(L8, W9) + b9)
with tf.name_scope('layer10') :
#L10 = tf.sigmoid(tf.matmul(L9, W10) + b10)
L10 = tf.nn.relu(tf.matmul(L9, W10) + b10)
''''''
with tf.name_scope('layer11') :
hypothesis = tf.sigmoid(tf.matmul(L10, W11) + b11)
with tf.name_scope('cost') :
cost = -tf.reduce_mean( Y * tf.log(hypothesis) + ( 1 - Y ) * tf.log(1-hypothesis) )
tf.summary.scalar('cost', cost)
learning_rate = tf.Variable(0.05)
with tf.name_scope('train') :
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train = optimizer.minimize(cost)
with tf.Session() as sess :
tf.global_variables_initializer().run()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("/tmp/mhkim/xor_logs3", sess.graph)
for step in range(20000) :
sess.run(train, feed_dict={X: x_data, Y: y_data})
if step % 2000 == 0 :
print ( step , sess.run(cost, feed_dict={ X:x_data, Y:y_data }) )
summary = sess.run(merged, feed_dict={X: x_data, Y: y_data})
writer.add_summary(summary , step)
#tf.summary.scalar("accuracy", accuracy)
correct_prediction = tf.equal(tf.floor(hypothesis + 0.5), Y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
with tf.name_scope('result') :
tf.summary.scalar('scala', accuracy)
print ("Accuracy : " , accuracy.eval({X:x_data, Y:y_data }))
#print ( sess.run([hypothesis, tf.floor(hypothesis + 0.5 ), correct_prediction, accuracy], feed_dict={X:x_data, Y:y_data}))
#correct_prediction = tf.floor()
#print ( hypothesis.eval(feed_dict={X:x_data}))
|
#!/usr/bin/python3
import os
import sys
remote = 'git@127.0.0.1:rk3229'
if len(sys.argv) == 1:
print('错误!请传入 xml 文件')
elif len(sys.argv) > 2:
print('错误!传入参数太多')
else:
print('传入的文件是 %s' % sys.argv[1])
with open(sys.argv[1], 'r') as fin:
while True:
linestr = fin.readline()
if linestr == '': #表示文件结束
break
#print(linestr)
#下面开始对本行内容分析
if (('name=' in linestr) or ('name =' in linestr)) and (('project' in linestr) or ('path' in linestr)): #本行内容含有name信息
#print(linestr)
#先无条件提取name路径
charistr1 = 'name="'
charistr2 = '"'
namestr = linestr[linestr.index(charistr1)+len(charistr1) : linestr.index(charistr1)+len(charistr1)+ linestr[linestr.index(charistr1)+len(charistr1):].index(charistr2)]
if 'path=' in linestr: #如果path存在则用path的路径作为本地路径
charistr1 = 'path="'
charistr2 = '"'
pathstr = linestr[linestr.index(charistr1)+len(charistr1) : linestr.index(charistr1)+len(charistr1)+ linestr[linestr.index(charistr1)+len(charistr1):].index(charistr2)]
else: #如果path不存在,则认为path路径(本地路径)就是name路径
pathstr = namestr
#print('name="%s", path="%s"' % (namestr, pathstr))
#下面开始初始化并提交git工程
localpath = sys.path[0] + '/' + pathstr # git工程的本地绝对路径
remotepath = remote + '/' + namestr # git工程远程相对路径
#判断本地目录是否为空,为空的话则新建一个文件,空目录会导致git提交失败
if not os.listdir(localpath): # 本地目录为空
#cmd = 'touch %s/._USELESSFILE_' % (localpath)
cmd = 'touch %s/.gitignore' % (localpath)
print(cmd)
os.system(cmd)
cmd = 'cd %s && rm -rf .git && git init && git remote add origin %s && git add . -f && git commit -m "init" &&git push -u origin master && cd %s' % (localpath, remotepath, sys.path[0])
print(cmd)
os.system(cmd)
|
# ====== main code ====================================== #
n, m = map(int, input().split())
a = [[0] * m for _ in range(n)]
dr, dc, r, c = 0, 1, 0, 0
for cnt in range(1, n * m + 1):
a[r][c] = cnt
if a[(r + dr) % n][(c + dc) % m]:
dr, dc = dc, -dr
r += dr
c += dc
for row in a:
print(*(f'{e:<3}' for e in row), sep='')
# ====== end of code ==================================== #
|
import keras.backend as K
import tensorflow as tf
def categorical_focal_loss(gamma=2.0, alpha=0.25):
"""
Implementation of Focal Loss from the paper in multiclass classification
Formula:
loss = -alpha*((1-p)^gamma)*log(p)
Parameters:
alpha -- the same as wighting factor in balanced cross entropy
gamma -- focusing parameter for modulating factor (1-p)
Default value:
gamma -- 2.0 as mentioned in the paper
alpha -- 0.25 as mentioned in the paper
"""
def focal_loss(y_true, y_pred):
# Define epsilon so that the backpropagation will not result in NaN
# for 0 divisor case
epsilon = K.epsilon()
# Add the epsilon to prediction value
#y_pred = y_pred + epsilon
# Clip the prediction value
y_pred = K.clip(y_pred, epsilon, 1.0-epsilon)
# Calculate cross entropy
cross_entropy = -y_true*K.log(y_pred)
# Calculate weight that consists of modulating factor and weighting factor
weight = alpha * y_true * K.pow((1-y_pred), gamma)
# Calculate focal loss
loss = weight * cross_entropy
# Sum the losses in mini_batch
loss = K.sum(loss, axis=1)
return loss
return focal_loss
def weighted_categorical_crossentropy(weights):
"""
A weighted version of keras.objectives.categorical_crossentropy
Variables:
weights: numpy array of shape (C,) where C is the number of classes
Usage:
weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
loss = weighted_categorical_crossentropy(weights)
model.compile(loss=loss,optimizer='adam')
"""
weights = K.variable(weights)
def loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc
loss = y_true * K.log(y_pred) * weights
loss = -K.sum(loss, -1)
return loss
return loss
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from delivery.validators import interval_validator, weight_validator
class Region(models.Model):
"""Класс Region используется для описания модели районов доставки.
Родительский класс -- models.Model.
Атрибуты класса
--------
PK <-- Order, Courier
code : models.PositiveIntegerField()
числовой код района.
"""
code = models.PositiveIntegerField(
primary_key=True,
verbose_name='Код района',
)
class TimeInterval(models.Model):
"""Класс TimeInterval используется для описания модели интервалов времени.
Родительский класс -- models.Model.
Атрибуты класса
--------
PK <-- Order, Courier
name : models.CharField()
Имя интервала в формате 'HH:MM-HH:MM'
begin : models.PositiveIntegerField()
Начало интервала в минутах от 00:00
end : models.PositiveIntegerField()
Конец интервала в минутах от 00:00.
Методы класса
--------
__str__() -- возвращает строковое представление модели.
save() -- вычисляет значения полей begin и end и сохраняет все изменения в
БД.
"""
name = models.CharField(
primary_key=True,
verbose_name='Интервал(HH:MM-HH:MM)',
max_length=11,
validators=[interval_validator]
)
begin = models.PositiveIntegerField(
verbose_name='Начало интервала в минутах'
)
end = models.PositiveIntegerField(
verbose_name='Конец интервала в минутах'
)
def __str__(self) -> str:
"""Вернуть строковое представление в виде имени интервала."""
return self.name
def save(self, *args, **kwargs) -> None:
"""Вычислить значения полей begin и end и сохранить все изменения в БД.
"""
self.begin, self.end = interval_validator(self.name)
super().save(*args, **kwargs)
class Courier(models.Model):
"""Класс Courier используется для описания модели курьера.
Родительский класс -- models.Model.
Атрибуты класса
--------
PK <-- Invoice
courier_id : models.PositiveIntegerField()
идентификатор курьера
courier_type : models.CharField()
тип курьера
regions : models.ManyToManyField() FK --> Region
регионы в которых работает курьер
working_hours = models.ManyToManyField() FK --> TimeInterval
интервалы времени в которых работает курьер
Методы класса
--------
__str__() -- возвращает строковое представление модели.
"""
class CourierType(models.TextChoices):
"""Класс CourierType используется для определения допустимых
типов курьеров."""
FOOT = 'foot', _('Пеший')
BIKE = 'bike', _('Велокурьер')
CAR = 'car', _('Курьер на автомобиле')
courier_id = models.PositiveIntegerField(
primary_key=True,
verbose_name='Идентификатор курьера',
)
courier_type = models.CharField(
max_length=4,
choices=CourierType.choices,
verbose_name='Тип курьера',
)
regions = models.ManyToManyField(
Region,
related_name='couriers',
verbose_name='Районы доставки',
db_index=True,
)
working_hours = models.ManyToManyField(
TimeInterval,
related_name='couriers',
verbose_name='Часы работы',
db_index=True,
)
def __str__(self) -> str:
"""Вернуть строковое представление в виде типа и идентификатора
курьера."""
return f'{self.courier_type} - {self.courier_id}'
class Order(models.Model):
"""Класс Order используется для описания модели заказа.
Родительский класс -- models.Model.
Атрибуты класса
--------
PK <-- InvoiceOrder
order_id : models.PositiveIntegerField()
идентификатор заказа
weight : models.DecimalField()
вес заказа
region : models.ForeignKey() FK --> Region
регион доставки заказа
delivery_hours = models.ManyToManyField() FK --> TimeInterval
интервалы времени в которые удобно принять заказ.
Методы класса
--------
__str__() -- возвращает строковое представление модели.
"""
order_id = models.PositiveIntegerField(
primary_key=True,
verbose_name='Идентификатор заказа',
)
weight = models.DecimalField(
validators=[weight_validator],
max_digits=4, decimal_places=2
)
region = models.ForeignKey(
Region,
related_name='orders',
verbose_name='Район заказа',
on_delete=models.PROTECT,
db_index=True,
)
delivery_hours = models.ManyToManyField(
TimeInterval,
related_name='orders',
verbose_name='Часы работы',
db_index=True,
)
def __str__(self) -> str:
"""Вернуть строковое представление в виде идентификатора заказа."""
return f'order_id: {self.order_id}'
class Invoice(models.Model):
"""Класс Invoice используется для описания модели задания на развоз.
Родительский класс -- models.Model.
Атрибуты класса
--------
PK <-- InvoiceOrder
courier : models.ForeignKey() FK --> Courier
курьер назначенный на развоз
assign_time : models.DateTimeField()
время формирования развоза
orders : models.ManyToManyField() FK --> Order
заказы включенные в развоз
expected_reward = models.PositiveIntegerField()
ожидаемая вознаграждение курьеру за развоз.
"""
courier = models.ForeignKey(
Courier,
related_name='invoices',
verbose_name='Назначенный курьер',
on_delete=models.CASCADE,
db_index=True,
)
assign_time = models.DateTimeField(
auto_now_add=True,
verbose_name='Время выдачи курьеру',
)
orders = models.ManyToManyField(
Order,
through='InvoiceOrder',
related_name='invoices',
verbose_name='Заказы',
db_index=True,
)
expected_reward = models.PositiveIntegerField(
null=False,
verbose_name='Ожидаемое вознаграждение',
)
class InvoiceOrder(models.Model):
"""Класс InvoiceOrder используется для описания модели детализации развоза.
Родительский класс -- models.Model.
Атрибуты класса
--------
invoice : models.ForeignKey() FK --> Invoice
идентификатор развоза
order : models.ForeignKey() FK --> Order
заказ
complete_time : models.DateTimeField()
время завершения заказа
delivery_time : models.PositiveIntegerField()
время доставки заказа в секундах.
"""
invoice = models.ForeignKey(
Invoice,
related_name='invoice_orders',
on_delete=models.CASCADE,
)
order = models.ForeignKey(
Order,
related_name='invoice_orders',
on_delete=models.CASCADE
)
complete_time = models.DateTimeField(
null=True,
verbose_name='Время завершения заказа',
)
delivery_time = models.PositiveIntegerField(
null=True,
verbose_name='Время доставки в секундах',
)
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
from dataclasses import dataclass
from typing import Iterable, Mapping
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import (
MainSpecification,
PexLayout,
PythonRequirementsField,
PythonResolveField,
)
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.local_dists import rules as local_dists_rules
from pants.backend.python.util_rules.pex import (
CompletePlatforms,
OptionalPex,
OptionalPexRequest,
Pex,
PexPlatforms,
PexRequest,
)
from pants.backend.python.util_rules.pex import rules as pex_rules
from pants.backend.python.util_rules.pex_requirements import (
EntireLockfile,
LoadedLockfile,
LoadedLockfileRequest,
Lockfile,
PexRequirements,
Resolve,
)
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
StrippedPythonSourceFiles,
)
from pants.backend.python.util_rules.python_sources import rules as python_sources_rules
from pants.core.goals.generate_lockfiles import NoCompatibleResolveException
from pants.core.goals.package import TraverseIfNotPackageTarget
from pants.core.target_types import FileSourceField
from pants.engine.addresses import Address, Addresses
from pants.engine.collection import DeduplicatedCollection
from pants.engine.fs import Digest, DigestContents, GlobMatchErrorBehavior, MergeDigests, PathGlobs
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
Target,
Targets,
TransitiveTargets,
TransitiveTargetsRequest,
targets_with_sources_types,
)
from pants.engine.unions import UnionMembership
from pants.util.docutil import doc_url
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.pip_requirement import PipRequirement
from pants.util.requirements import parse_requirements_file
from pants.util.strutil import path_safe, softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class PexFromTargetsRequest:
addresses: Addresses
output_filename: str
internal_only: bool
layout: PexLayout | None
main: MainSpecification | None
inject_args: tuple[str, ...]
inject_env: FrozenDict[str, str]
platforms: PexPlatforms
complete_platforms: CompletePlatforms
additional_args: tuple[str, ...]
additional_lockfile_args: tuple[str, ...]
include_source_files: bool
include_requirements: bool
include_local_dists: bool
additional_sources: Digest | None
additional_inputs: Digest | None
hardcoded_interpreter_constraints: InterpreterConstraints | None
warn_for_transitive_files_targets: bool
# This field doesn't participate in comparison (and therefore hashing), as it doesn't affect
# the result.
description: str | None = dataclasses.field(compare=False)
def __init__(
self,
addresses: Iterable[Address],
*,
output_filename: str,
internal_only: bool,
layout: PexLayout | None = None,
main: MainSpecification | None = None,
inject_args: Iterable[str] = (),
inject_env: Mapping[str, str] = FrozenDict(),
platforms: PexPlatforms = PexPlatforms(),
complete_platforms: CompletePlatforms = CompletePlatforms(),
additional_args: Iterable[str] = (),
additional_lockfile_args: Iterable[str] = (),
include_source_files: bool = True,
include_requirements: bool = True,
include_local_dists: bool = False,
additional_sources: Digest | None = None,
additional_inputs: Digest | None = None,
hardcoded_interpreter_constraints: InterpreterConstraints | None = None,
description: str | None = None,
warn_for_transitive_files_targets: bool = False,
) -> None:
"""Request to create a Pex from the transitive closure of the given addresses.
:param addresses: The addresses to use for determining what is included in the Pex. The
transitive closure of these addresses will be used; you only need to specify the roots.
:param output_filename: The name of the built Pex file, which typically should end in
`.pex`.
:param internal_only: Whether we ever materialize the Pex and distribute it directly
to end users, such as with the `binary` goal. Typically, instead, the user never
directly uses the Pex, e.g. with `lint` and `test`. If True, we will use a Pex setting
that results in faster build time but compatibility with fewer interpreters at runtime.
:param layout: The filesystem layout to create the PEX with.
:param main: The main for the built Pex, equivalent to Pex's `-e` or `-c` flag. If
left off, the Pex will open up as a REPL.
:param inject_args: Command line arguments to freeze in to the PEX.
:param inject_env: Environment variables to freeze in to the PEX.
:param platforms: Which platforms should be supported. Setting this value will cause
interpreter constraints to not be used because platforms already constrain the valid
Python versions, e.g. by including `cp36m` in the platform string.
:param additional_args: Any additional Pex flags.
:param additional_lockfile_args: Any additional Pex flags that should be used with the
lockfile.pex. Many Pex args like `--emit-warnings` do not impact the lockfile, and
setting them would reduce reuse with other call sites. Generally, these should only be
flags that impact lockfile resolution like `--manylinux`.
:param include_source_files: Whether to include source files in the built Pex or not.
Setting this to `False` and loading the source files by instead populating the chroot
and setting the environment variable `PEX_EXTRA_SYS_PATH` will result in substantially
fewer rebuilds of the Pex.
:param include_requirements: Whether to resolve requirements and include them in the Pex.
:param include_local_dists: Whether to build local dists and include them in the built pex.
:param additional_sources: Any additional source files to include in the built Pex.
:param additional_inputs: Any inputs that are not source files and should not be included
directly in the Pex, but should be present in the environment when building the Pex.
:param hardcoded_interpreter_constraints: Use these constraints rather than resolving the
constraints from the input.
:param description: A human-readable description to render in the dynamic UI when building
the Pex.
:param warn_for_transitive_files_targets: If True (and include_source_files is also true),
emit a warning if the pex depends on any `files` targets, since they won't be included.
"""
object.__setattr__(self, "addresses", Addresses(addresses))
object.__setattr__(self, "output_filename", output_filename)
object.__setattr__(self, "internal_only", internal_only)
object.__setattr__(self, "layout", layout)
object.__setattr__(self, "main", main)
object.__setattr__(self, "inject_args", tuple(inject_args))
object.__setattr__(self, "inject_env", FrozenDict(inject_env))
object.__setattr__(self, "platforms", platforms)
object.__setattr__(self, "complete_platforms", complete_platforms)
object.__setattr__(self, "additional_args", tuple(additional_args))
object.__setattr__(self, "additional_lockfile_args", tuple(additional_lockfile_args))
object.__setattr__(self, "include_source_files", include_source_files)
object.__setattr__(self, "include_requirements", include_requirements)
object.__setattr__(self, "include_local_dists", include_local_dists)
object.__setattr__(self, "additional_sources", additional_sources)
object.__setattr__(self, "additional_inputs", additional_inputs)
object.__setattr__(
self, "hardcoded_interpreter_constraints", hardcoded_interpreter_constraints
)
object.__setattr__(self, "description", description)
object.__setattr__(
self, "warn_for_transitive_files_targets", warn_for_transitive_files_targets
)
self.__post_init__()
def __post_init__(self):
if self.internal_only and (self.platforms or self.complete_platforms):
raise AssertionError(
softwrap(
"""
PexFromTargetsRequest set internal_only at the same time as setting
`platforms` and/or `complete_platforms`. Platforms can only be used when
`internal_only=False`.
"""
)
)
def to_interpreter_constraints_request(self) -> InterpreterConstraintsRequest:
return InterpreterConstraintsRequest(
addresses=self.addresses,
hardcoded_interpreter_constraints=self.hardcoded_interpreter_constraints,
)
@dataclass(frozen=True)
class InterpreterConstraintsRequest:
addresses: Addresses
hardcoded_interpreter_constraints: InterpreterConstraints | None
def __init__(
self,
addresses: Iterable[Address],
*,
hardcoded_interpreter_constraints: InterpreterConstraints | None = None,
) -> None:
object.__setattr__(self, "addresses", Addresses(addresses))
object.__setattr__(
self, "hardcoded_interpreter_constraints", hardcoded_interpreter_constraints
)
@rule
async def interpreter_constraints_for_targets(
request: InterpreterConstraintsRequest, python_setup: PythonSetup
) -> InterpreterConstraints:
if request.hardcoded_interpreter_constraints:
return request.hardcoded_interpreter_constraints
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
calculated_constraints = InterpreterConstraints.create_from_targets(
transitive_targets.closure, python_setup
)
# If there are no targets, we fall back to the global constraints. This is relevant,
# for example, when running `./pants repl` with no specs or only on targets without
# `interpreter_constraints` (e.g. `python_requirement`).
interpreter_constraints = calculated_constraints or InterpreterConstraints(
python_setup.interpreter_constraints
)
return interpreter_constraints
@dataclass(frozen=True)
class ChosenPythonResolve:
name: str
lockfile: Lockfile
@dataclass(frozen=True)
class ChosenPythonResolveRequest:
addresses: Addresses
@rule
async def choose_python_resolve(
request: ChosenPythonResolveRequest, python_setup: PythonSetup
) -> ChosenPythonResolve:
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
def maybe_get_resolve(t: Target) -> str | None:
if not t.has_field(PythonResolveField):
return None
return t[PythonResolveField].normalized_value(python_setup)
# First, choose the resolve by inspecting the root targets.
root_resolves = {
root[PythonResolveField].normalized_value(python_setup)
for root in transitive_targets.roots
if root.has_field(PythonResolveField)
}
if root_resolves:
if len(root_resolves) > 1:
raise NoCompatibleResolveException.bad_input_roots(
transitive_targets.roots,
maybe_get_resolve=maybe_get_resolve,
doc_url_slug="python-third-party-dependencies#multiple-lockfiles",
workaround=None,
)
chosen_resolve = next(iter(root_resolves))
# Then, validate that all transitive deps are compatible.
for tgt in transitive_targets.dependencies:
if (
tgt.has_field(PythonResolveField)
and tgt[PythonResolveField].normalized_value(python_setup) != chosen_resolve
):
raise NoCompatibleResolveException.bad_dependencies(
maybe_get_resolve=maybe_get_resolve,
doc_url_slug="python-third-party-dependencies#multiple-lockfiles",
root_resolve=chosen_resolve,
root_targets=transitive_targets.roots,
dependencies=transitive_targets.dependencies,
)
else:
# If there are no relevant targets, we fall back to the default resolve. This is relevant,
# for example, when running `./pants repl` with no specs or only on non-Python targets.
chosen_resolve = python_setup.default_resolve
return ChosenPythonResolve(
name=chosen_resolve,
lockfile=Lockfile(
url=python_setup.resolves[chosen_resolve],
url_description_of_origin=(
f"the resolve `{chosen_resolve}` (from `[python].resolves`)"
),
resolve_name=chosen_resolve,
),
)
class GlobalRequirementConstraints(DeduplicatedCollection[PipRequirement]):
"""Global constraints specified by the `[python].requirement_constraints` setting, if any."""
@rule
async def determine_global_requirement_constraints(
python_setup: PythonSetup,
) -> GlobalRequirementConstraints:
if not python_setup.requirement_constraints:
return GlobalRequirementConstraints()
constraints_file_contents = await Get(
DigestContents,
PathGlobs(
[python_setup.requirement_constraints],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin="the option `[python].requirement_constraints`",
),
)
return GlobalRequirementConstraints(
parse_requirements_file(
constraints_file_contents[0].content.decode(),
rel_path=constraints_file_contents[0].path,
)
)
@dataclass(frozen=True)
class _PexRequirementsRequest:
"""Determine the requirement strings used transitively.
This type is private because callers should likely use `RequirementsPexRequest` or
`PexFromTargetsRequest` instead.
"""
addresses: Addresses
@rule
async def determine_requirement_strings_in_closure(
request: _PexRequirementsRequest, global_requirement_constraints: GlobalRequirementConstraints
) -> PexRequirements:
addrs = request.addresses
if len(addrs) == 0:
description_of_origin = ""
elif len(addrs) == 1:
description_of_origin = addrs[0].spec
else:
description_of_origin = f"{addrs[0].spec} and {len(addrs)-1} other targets"
return PexRequirements(
request.addresses,
# This is only set if `[python].requirement_constraints` is configured, which is mutually
# exclusive with resolves.
constraints_strings=(str(constraint) for constraint in global_requirement_constraints),
description_of_origin=description_of_origin,
)
@dataclass(frozen=True)
class _RepositoryPexRequest:
addresses: Addresses
hardcoded_interpreter_constraints: InterpreterConstraints | None
platforms: PexPlatforms
complete_platforms: CompletePlatforms
internal_only: bool
additional_lockfile_args: tuple[str, ...]
def __init__(
self,
addresses: Iterable[Address],
*,
internal_only: bool,
hardcoded_interpreter_constraints: InterpreterConstraints | None = None,
platforms: PexPlatforms = PexPlatforms(),
complete_platforms: CompletePlatforms = CompletePlatforms(),
additional_lockfile_args: tuple[str, ...] = (),
) -> None:
object.__setattr__(self, "addresses", Addresses(addresses))
object.__setattr__(self, "internal_only", internal_only)
object.__setattr__(
self, "hardcoded_interpreter_constraints", hardcoded_interpreter_constraints
)
object.__setattr__(self, "platforms", platforms)
object.__setattr__(self, "complete_platforms", complete_platforms)
object.__setattr__(self, "additional_lockfile_args", additional_lockfile_args)
def to_interpreter_constraints_request(self) -> InterpreterConstraintsRequest:
return InterpreterConstraintsRequest(
addresses=self.addresses,
hardcoded_interpreter_constraints=self.hardcoded_interpreter_constraints,
)
@dataclass(frozen=True)
class _ConstraintsRepositoryPexRequest:
repository_pex_request: _RepositoryPexRequest
async def _determine_requirements_for_pex_from_targets(
request: PexFromTargetsRequest, python_setup: PythonSetup
) -> tuple[PexRequirements | EntireLockfile, Iterable[Pex]]:
if not request.include_requirements:
return PexRequirements(), ()
requirements = await Get(PexRequirements, _PexRequirementsRequest(request.addresses))
pex_native_subsetting_supported = False
if python_setup.enable_resolves:
# TODO: Once `requirement_constraints` is removed in favor of `enable_resolves`,
# `ChosenPythonResolveRequest` and `_PexRequirementsRequest` should merge and
# do a single transitive walk to replace this method.
chosen_resolve = await Get(
ChosenPythonResolve, ChosenPythonResolveRequest(request.addresses)
)
loaded_lockfile = await Get(LoadedLockfile, LoadedLockfileRequest(chosen_resolve.lockfile))
pex_native_subsetting_supported = loaded_lockfile.is_pex_native
if loaded_lockfile.as_constraints_strings:
requirements = dataclasses.replace(
requirements,
constraints_strings=loaded_lockfile.as_constraints_strings,
)
should_return_entire_lockfile = (
python_setup.run_against_entire_lockfile and request.internal_only
)
should_request_repository_pex = (
# The entire lockfile was explicitly requested.
should_return_entire_lockfile
# The legacy `resolve_all_constraints`
or (python_setup.resolve_all_constraints and python_setup.requirement_constraints)
# A non-PEX-native lockfile was used, and so we cannot directly subset it from a
# LoadedLockfile.
or not pex_native_subsetting_supported
)
if not should_request_repository_pex:
if not pex_native_subsetting_supported:
return requirements, ()
chosen_resolve = await Get(
ChosenPythonResolve, ChosenPythonResolveRequest(request.addresses)
)
return (
dataclasses.replace(
requirements, from_superset=Resolve(chosen_resolve.name, use_entire_lockfile=False)
),
(),
)
# Else, request the repository PEX and possibly subset it.
repository_pex_request = await Get(
OptionalPexRequest,
_RepositoryPexRequest(
request.addresses,
hardcoded_interpreter_constraints=request.hardcoded_interpreter_constraints,
platforms=request.platforms,
complete_platforms=request.complete_platforms,
internal_only=request.internal_only,
additional_lockfile_args=request.additional_lockfile_args,
),
)
if should_return_entire_lockfile:
if repository_pex_request.maybe_pex_request is None:
raise ValueError(
softwrap(
f"""
[python].run_against_entire_lockfile was set, but could not find a
lockfile or constraints file for this target set. See
{doc_url('python-third-party-dependencies')} for details.
"""
)
)
repository_pex = await Get(OptionalPex, OptionalPexRequest, repository_pex_request)
if should_return_entire_lockfile:
assert repository_pex_request.maybe_pex_request is not None
assert repository_pex.maybe_pex is not None
return repository_pex_request.maybe_pex_request.requirements, [repository_pex.maybe_pex]
return dataclasses.replace(requirements, from_superset=repository_pex.maybe_pex), ()
async def _warn_about_any_files_targets(
addresses: Addresses, transitive_targets: TransitiveTargets, union_membership: UnionMembership
) -> None:
# Warn if users depend on `files` targets, which won't be included in the PEX and is a common
# gotcha.
file_tgts = targets_with_sources_types(
[FileSourceField], transitive_targets.dependencies, union_membership
)
if file_tgts:
# make it easier for the user to find which targets are problematic by including the alias
targets = await Get(Targets, Addresses, addresses)
formatted_addresses = ", ".join(
f"{a} (`{tgt.alias}`)" for a, tgt in zip(addresses, targets)
)
files_addresses = sorted(tgt.address.spec for tgt in file_tgts)
targets_text, depend_text = (
("target", "depends") if len(addresses) == 1 else ("targets", "depend")
)
logger.warning(
f"The {targets_text} {formatted_addresses} transitively {depend_text} "
"on the below `files` targets, but Pants will not include them in the built package. "
"Filesystem APIs like `open()` may be not able to load files within the binary "
"itself; instead, they read from the current working directory."
f"\n\nInstead, use `resources` targets. See {doc_url('resources')}."
f"\n\nFiles targets dependencies: {files_addresses}"
)
@rule(level=LogLevel.DEBUG)
async def create_pex_from_targets(
request: PexFromTargetsRequest,
python_setup: PythonSetup,
union_membership: UnionMembership,
) -> PexRequest:
requirements, additional_pexes = await _determine_requirements_for_pex_from_targets(
request, python_setup
)
interpreter_constraints = await Get(
InterpreterConstraints,
InterpreterConstraintsRequest,
request.to_interpreter_constraints_request(),
)
sources_digests = []
if request.additional_sources:
sources_digests.append(request.additional_sources)
if request.include_source_files:
transitive_targets = await Get(
TransitiveTargets,
TransitiveTargetsRequest(
request.addresses,
should_traverse_deps_predicate=TraverseIfNotPackageTarget(
roots=request.addresses,
union_membership=union_membership,
),
),
)
sources = await Get(PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure))
if request.warn_for_transitive_files_targets:
await _warn_about_any_files_targets(
request.addresses, transitive_targets, union_membership
)
else:
sources = PythonSourceFiles.empty()
additional_inputs_digests = []
if request.additional_inputs:
additional_inputs_digests.append(request.additional_inputs)
additional_args = request.additional_args
if request.include_local_dists:
local_dists = await Get(
LocalDistsPex,
LocalDistsPexRequest(
request.addresses,
internal_only=request.internal_only,
interpreter_constraints=interpreter_constraints,
sources=sources,
),
)
remaining_sources = local_dists.remaining_sources
additional_inputs_digests.append(local_dists.pex.digest)
additional_args += ("--requirements-pex", local_dists.pex.name)
else:
remaining_sources = sources
remaining_sources_stripped = await Get(
StrippedPythonSourceFiles, PythonSourceFiles, remaining_sources
)
sources_digests.append(remaining_sources_stripped.stripped_source_files.snapshot.digest)
merged_sources_digest, additional_inputs = await MultiGet(
Get(Digest, MergeDigests(sources_digests)),
Get(Digest, MergeDigests(additional_inputs_digests)),
)
description = request.description
return PexRequest(
output_filename=request.output_filename,
internal_only=request.internal_only,
layout=request.layout,
requirements=requirements,
interpreter_constraints=interpreter_constraints,
platforms=request.platforms,
complete_platforms=request.complete_platforms,
main=request.main,
inject_args=request.inject_args,
inject_env=request.inject_env,
sources=merged_sources_digest,
additional_inputs=additional_inputs,
additional_args=additional_args,
description=description,
pex_path=additional_pexes,
)
@rule
async def get_repository_pex(
request: _RepositoryPexRequest, python_setup: PythonSetup
) -> OptionalPexRequest:
# NB: It isn't safe to resolve against an entire lockfile or constraints file if
# platforms are in use. See https://github.com/pantsbuild/pants/issues/12222.
if request.platforms or request.complete_platforms:
return OptionalPexRequest(None)
if python_setup.requirement_constraints:
constraints_repository_pex_request = await Get(
OptionalPexRequest, _ConstraintsRepositoryPexRequest(request)
)
return OptionalPexRequest(constraints_repository_pex_request.maybe_pex_request)
if not python_setup.enable_resolves:
return OptionalPexRequest(None)
chosen_resolve, interpreter_constraints = await MultiGet(
Get(ChosenPythonResolve, ChosenPythonResolveRequest(request.addresses)),
Get(
InterpreterConstraints,
InterpreterConstraintsRequest,
request.to_interpreter_constraints_request(),
),
)
return OptionalPexRequest(
PexRequest(
description=softwrap(
f"""
Installing {chosen_resolve.lockfile.url} for the resolve
`{chosen_resolve.name}`
"""
),
output_filename=f"{path_safe(chosen_resolve.name)}_lockfile.pex",
internal_only=request.internal_only,
requirements=EntireLockfile(chosen_resolve.lockfile),
interpreter_constraints=interpreter_constraints,
layout=PexLayout.PACKED,
platforms=request.platforms,
complete_platforms=request.complete_platforms,
additional_args=request.additional_lockfile_args,
)
)
@rule
async def _setup_constraints_repository_pex(
constraints_request: _ConstraintsRepositoryPexRequest,
python_setup: PythonSetup,
global_requirement_constraints: GlobalRequirementConstraints,
) -> OptionalPexRequest:
request = constraints_request.repository_pex_request
if not python_setup.resolve_all_constraints:
return OptionalPexRequest(None)
constraints_path = python_setup.requirement_constraints
assert constraints_path is not None
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
req_strings = PexRequirements.req_strings_from_requirement_fields(
tgt[PythonRequirementsField]
for tgt in transitive_targets.closure
if tgt.has_field(PythonRequirementsField)
)
# In requirement strings, Foo_-Bar.BAZ and foo-bar-baz refer to the same project. We let
# packaging canonicalize for us.
# See: https://www.python.org/dev/peps/pep-0503/#normalized-names
url_reqs = set() # E.g., 'foobar@ git+https://github.com/foo/bar.git@branch'
name_reqs = set() # E.g., foobar>=1.2.3
name_req_projects = set()
constraints_file_reqs = set(global_requirement_constraints)
for req_str in req_strings:
req = PipRequirement.parse(req_str)
if req.url:
url_reqs.add(req)
else:
name_reqs.add(req)
name_req_projects.add(canonicalize_project_name(req.project_name))
constraint_file_projects = {
canonicalize_project_name(req.project_name) for req in constraints_file_reqs
}
# Constraints files must only contain name reqs, not URL reqs (those are already
# constrained by their very nature). See https://github.com/pypa/pip/issues/8210.
unconstrained_projects = name_req_projects - constraint_file_projects
if unconstrained_projects:
logger.warning(
softwrap(
f"""
The constraints file {constraints_path} does not contain
entries for the following requirements: {', '.join(unconstrained_projects)}.
Ignoring `[python].resolve_all_constraints` option.
"""
)
)
return OptionalPexRequest(None)
interpreter_constraints = await Get(
InterpreterConstraints,
InterpreterConstraintsRequest,
request.to_interpreter_constraints_request(),
)
# To get a full set of requirements we must add the URL requirements to the
# constraints file, since the latter cannot contain URL requirements.
# NB: We can only add the URL requirements we know about here, i.e., those that
# are transitive deps of the targets in play. There may be others in the repo.
# So we may end up creating a few different repository pexes, each with identical
# name requirements but different subsets of URL requirements. Fortunately since
# all these repository pexes will have identical pinned versions of everything,
# this is not a correctness issue, only a performance one.
all_constraints = {str(req) for req in (constraints_file_reqs | url_reqs)}
repository_pex = PexRequest(
description=f"Resolving {constraints_path}",
output_filename="repository.pex",
internal_only=request.internal_only,
requirements=PexRequirements(
all_constraints,
constraints_strings=(str(constraint) for constraint in global_requirement_constraints),
description_of_origin=constraints_path,
),
# Monolithic PEXes like the repository PEX should always use the Packed layout.
layout=PexLayout.PACKED,
interpreter_constraints=interpreter_constraints,
platforms=request.platforms,
complete_platforms=request.complete_platforms,
additional_args=request.additional_lockfile_args,
)
return OptionalPexRequest(repository_pex)
@dataclass(frozen=True)
class RequirementsPexRequest:
"""Requests a PEX containing only thirdparty requirements for internal/non-portable use.
Used as part of an optimization to reduce the "overhead" (in terms of both time and space) of
thirdparty requirements by taking advantage of certain PEX features.
"""
addresses: tuple[Address, ...]
hardcoded_interpreter_constraints: InterpreterConstraints | None
def __init__(
self,
addresses: Iterable[Address],
*,
hardcoded_interpreter_constraints: InterpreterConstraints | None = None,
) -> None:
object.__setattr__(self, "addresses", Addresses(addresses))
object.__setattr__(
self, "hardcoded_interpreter_constraints", hardcoded_interpreter_constraints
)
@rule
async def generalize_requirements_pex_request(
request: RequirementsPexRequest,
) -> PexFromTargetsRequest:
return PexFromTargetsRequest(
addresses=sorted(request.addresses),
output_filename="requirements.pex",
internal_only=True,
include_source_files=False,
hardcoded_interpreter_constraints=request.hardcoded_interpreter_constraints,
)
def rules():
return (*collect_rules(), *pex_rules(), *local_dists_rules(), *python_sources_rules())
|
"""
Module that calculates the number of developers that contributed to each
modified file in the repo in a given time range.
See https://dl.acm.org/doi/10.1145/2025113.2025119
"""
from typing import Optional
from pydriller import ModificationType
from pydriller.metrics.process.process_metric import ProcessMetric
class ContributorsCount(ProcessMetric):
"""
This class is responsible to implement the following metrics:
* Contributors Count: measures the number of contributors who modified a
file.
* Minor Contributors Count: measures the number of contributors who
authored less than 5% of code of a file.
"""
def __init__(self, path_to_repo: str,
since=None,
to=None,
from_commit: Optional[str] = None,
to_commit: Optional[str] = None):
super().__init__(path_to_repo, since=since, to=to, from_commit=from_commit, to_commit=to_commit)
self._initialize()
def _initialize(self):
self.contributors = {}
self.minor_contributors = {}
renamed_files = {}
for commit in self.repo_miner.traverse_commits():
for modified_file in commit.modified_files:
filepath = renamed_files.get(modified_file.new_path,
modified_file.new_path)
if modified_file.change_type == ModificationType.RENAME:
renamed_files[modified_file.old_path] = filepath
author = commit.author.email.strip()
lines_authored = modified_file.added_lines + modified_file.deleted_lines
self.contributors[filepath] = self.contributors.get(filepath, {})
self.contributors[filepath][author] = self.contributors[filepath].get(author, 0) + lines_authored
for path, contributions in list(self.contributors.items()):
total = sum(contributions.values())
if total == 0:
del self.contributors[path]
else:
contributors_count = len(contributions.values())
minor_contributors_count = sum(1
for v in contributions.values()
if v/total < .05)
self.contributors[path] = contributors_count
self.minor_contributors[path] = minor_contributors_count
def count(self):
"""
Return the number of contributors who modified a file.
"""
return self.contributors
def count_minor(self):
"""
Return the number of contributors that authored less than
5% of code of a file.
"""
return self.minor_contributors
|
dwarfDict = {}
while True:
command = input()
if command == 'Once upon a time':
break
else:
command_split = command.split(' <:> ')
name, hat, physics = command_split[0], command_split[1], int(command_split[2])
if hat not in dwarfDict:
dwarfDict[hat] = {name: physics}
else:
if name not in dwarfDict[hat]:
dwarfDict[hat][name] = physics
else:
if dwarfDict[hat][name] < physics:
dwarfDict[hat][name] = physics
orderedList = []
for k, v in dwarfDict.items():
for b, c in v.items():
orderedList.append([b, c, len(v), k])
orderedList = sorted(orderedList, key=lambda x: (-x[1], -x[2]))
for i in orderedList:
print(f"({i[3]}) {i[0]} <-> {i[1]}") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.