max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
app.py | dylan0stewart/data-science | 0 | 12767051 | # https://spotipy.readthedocs.io/en/2.13.0/
# pip install spotipy --upgrade
# pipenv install python-dotenv
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import sys
import time
from flask import Flask, jsonify, Response, render_template, request
from flask_sqlalchemy import SQLAlchemy
import pandas as pd
import numpy as np
from os import getenv
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
market = ["us"]
client_id = getenv('SPOTIPY_CLIENT_ID')
client_secret = getenv('SPOTIPY_CLIENT_SECRET')
credentials = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
token = credentials.get_access_token()
spotify = spotipy.Spotify(auth=token)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/output', methods=['POST'])
def output():
# connecting html to request
# User inputs song name here
user_input_song = request.form['user_input_song']
# spotify search params
results = spotify.search(str(user_input_song), type="track", limit=1)
return results | 2.65625 | 3 |
avista_base/auth/user.py | isu-avista/base-server | 0 | 12767052 | <reponame>isu-avista/base-server
from avista_base.auth import bp
from avista_data.user import User
from flask import request, jsonify, current_app
from avista_base.auth import role_required
from avista_data.role import Role
@bp.route('/api/users', methods=['POST'])
@role_required(Role.ADMIN)
def create_user():
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
response_object = {'status': 'success'}
post_data = request.get_json()
if post_data is None or post_data == {}:
response_object['message'] = 'Missing data'
response_object['status'] = 'failure'
return jsonify(response_object), 400
else:
print("Post Data: " + str(post_data))
user = User(post_data)
current_app.session.add(user)
current_app.session.commit()
response_object['message'] = 'User added!'
return jsonify(response_object), 200
@bp.route('/api/users', methods=['GET', 'POST'])
@role_required(Role.ADMIN)
def read_all_users():
data = []
for user in current_app.session.query(User).all():
data.append(user.to_dict())
response_object = data
return jsonify(response_object)
@bp.route('/api/users/<int:user_id>', methods=['GET'])
@role_required(Role.USER)
def read_one_user(user_id):
user = current_app.session.query(User).filter_by(id=user_id).first()
response_object = user.to_dict()
return jsonify(response_object)
@bp.route('/api/users/<int:user_id>', methods=['PUT'])
@role_required(Role.USER)
def update_user(user_id):
response_object = {'status': 'success'}
post_data = request.get_json()
user = current_app.session.query(User).filter_by(id=user_id).first()
user.update(post_data)
response_object['message'] = 'User updated!'
return jsonify(response_object)
@bp.route('/api/users/<user_id>', methods=['DELETE'])
@role_required(Role.ADMIN)
def delete_user(user_id):
response_object = {'status': 'success'}
user = current_app.session.query(User).filter_by(id=user_id).first()
current_app.session.delete(user)
current_app.session.commit()
response_object['message'] = 'User deleted!'
return jsonify(response_object)
| 2.421875 | 2 |
main.py | sainitishkumar/Song-Identifier | 3 | 12767053 | # # -*- coding: utf-8 -*-
# from chatterbot import ChatBot
# bot = ChatBot(
# "Math & Time Bot",
# logic_adapters=[
# "chatterbot.logic.MathematicalEvaluation",
# "chatterbot.logic.TimeLogicAdapter"
# ],
# input_adapter="chatterbot.input.VariableInputTypeAdapter",
# output_adapter="chatterbot.output.OutputAdapter",
# trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
# )
# # Print an example of getting one math based response
# response = bot.get_response("What is 4 + 9?")
# print(response)
# # Print an example of getting one time based response
# response = bot.get_response("What time is it?")
# print(response)
import numpy as np
from matplotlib import pyplot as plt
import scipy.io.wavfile as wav
from numpy.lib import stride_tricks
import sys
import os
import pickle
def stft(sig, frameSize, overlapFac=0.5, window=np.hanning):
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
samples = np.append(np.zeros(int(np.floor(frameSize/2.0))), sig)
cols = np.ceil( (len(samples) - frameSize) / float(hopSize)) + 1
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(samples, shape=(int(cols), frameSize), strides=(samples.strides[0]*hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
def logscale_spec(spec, sr=22000, factor=20.):
timebins, freqbins = np.shape(spec)
scale = np.linspace(0, 1, freqbins) ** factor
scale *= (freqbins-1)/max(scale)
scale = np.unique(np.round(scale))
newspec = np.complex128(np.zeros([timebins, len(scale)]))
for i in range(0, len(scale)):
if i == len(scale)-1:
newspec[:,i] = np.sum(spec[:,int(scale[i]):], axis=1)
else:
newspec[:,i] = np.sum(spec[:,int(scale[i]):int(scale[i+1])], axis=1)
allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1])
freqs = []
for i in range(0, len(scale)):
if i == len(scale)-1:
freqs += [np.mean(allfreqs[int(scale[i]):])]
else:
freqs += [np.mean(allfreqs[int(scale[i]):int(scale[i+1])])]
return newspec, freqs
def plotstft(audiopath, binsize=2**10, plotpath=None, colormap="jet"):
samplerate, samples = wav.read(audiopath)
s = stft(samples, binsize)
sshow, freq = logscale_spec(s, factor=1.0, sr=samplerate)
ims = 20.*np.log10(np.abs(sshow)/10e-6)
timebins, freqbins = np.shape(ims)
freqbins=freqbins/2
print("timebins: ", timebins)
print("freqbins: ", freqbins)
# plt.title('Spectrogram')
# plt.imshow(np.transpose(ims), origin="lower", aspect="auto", cmap=colormap, interpolation="none")
arr=[]
fingerprint = []
min_var=np.median(ims[0])
for i in range(0,timebins,3):
temp=np.median(ims[i])
arr.append(temp)
plt.plot(temp)
if min_var > temp and temp>0:
min_var = temp
fingerprint.append(temp)
if min_var<0:
min_var = 0
# plt.colorbar()
# plt.xlabel("timebins ")
# plt.ylabel("frequency (hz)")
# plt.xlim([0, timebins-1])
# plt.ylim([0, int(freqbins)])
# plt.plot(arr,'.',color='b')
# plt.show()
# xlocs = np.float32(np.linspace(0, timebins-1, 5))
# plt.xticks(xlocs, ["%.02f" % l for l in ((xlocs*len(samples)/timebins)+(0.5*binsize))/samplerate])
# ylocs = np.int16(np.round(np.linspace(0, freqbins-1, 10)))
# plt.yticks(ylocs, ["%.02f" % freq[i] for i in ylocs])
# if plotpath:
# plt.savefig(plotpath, bbox_inches="tight")
# plt.clf()
return ims,arr,fingerprint
filename1='test.wav'
#ims2,arr2,fingerprint2=plotstft('newSong.wav')
def check_song(filename1,ims2,arr2,fingerprint2):
ims,arr,fingerprint1 = plotstft(filename1)
# ims2,arr2,fingerprint2 = plotstft(filename2)
arrBig = fingerprint1
arrSmall = fingerprint2
l1 = len(fingerprint1)
l2 = len(fingerprint2)
err = 1000
subsong = False
sum1=0
min_sum=20000
newarr=[]
for i in range(0,l1-l2+1):
subArr = np.array(arrBig[i:i+l2])
for j in range(0,l2):
dummy = subArr[j]-arrSmall[j]
if(dummy<0): dummy=dummy*(-1)
newarr.append(dummy)
newarr=np.array(newarr)
sum1 = np.median(newarr)
if sum1<=0:
sum1 = sum1*(-1)
if sum1<err:
subsong=True
newarr=[]
if(min_sum>sum1):
min_sum=sum1
return subsong,min_sum
song_files = os.listdir('./songs')
main_lis={}
#############################
filename1='test.wav'
ims2,arr2,fingerprint1=plotstft(sys.argv[1])
fingerprint1=np.array(fingerprint1[20:])
filename2='db.pkl'
main_dir={}
def check_song1(fingerprint1):
with open(filename2,'rb') as inp:
main_lis = pickle.load(inp)
for fprint in main_lis:
arrBig = main_lis[fprint]
arrSmall = fingerprint1
l1 = len(arrBig)
l2 = len(arrSmall)
err = 1000
subsong = False
sum1=0
min_sum=20000
newarr=[]
for i in range(0,l1-l2+1):
subArr = np.array(arrBig[i:i+l2])
for j in range(0,l2):
dummy = subArr[j]-arrSmall[j]
if(dummy<0): dummy=dummy*(-1)
newarr.append(dummy)
newarr=np.array(newarr)
sum1 = np.median(newarr)
if sum1<=0:
sum1 = sum1*(-1)
if sum1<err:
subsong=True
newarr=[]
if(min_sum>sum1):
min_sum=sum1
main_dir[fprint]=min_sum
check_song1(fingerprint1)
# print(main_dir)
main_dir = sorted(main_dir.items(),key = lambda x:x[1])
print(main_dir)
| 2.546875 | 3 |
Scenes/__init__.py | OrIOg/TronRacerTest | 0 | 12767054 | <gh_stars>0
from Scenes.Game import Scene as Game | 1.015625 | 1 |
nxt/plugins/example.py | dalteocraft/nxt | 53 | 12767055 | from nxt.tokens import register_token
PREFIX = 'ex::'
def detect_token_type(value):
return value.startswith(PREFIX)
def resolve_token(stage, node, value, layer, **kwargs):
value = stage.resolve(node, value, layer, **kwargs)
# Reverses given value
return value[::-1]
register_token(PREFIX, detect_token_type, resolve_token)
| 2.453125 | 2 |
wings/trade.py | mitakash/hummingbot | 0 | 12767056 | <reponame>mitakash/hummingbot
#!/usr/bin/env python
from collections import namedtuple
from typing import (
List,
Dict
)
import pandas as pd
from wings.order_book_row import OrderBookRow
from wings.events import TradeType
class Trade(namedtuple("_Trade", "symbol, side, price, amount")):
symbol: str
side: TradeType
price: float
amount: float
@classmethod
def trades_from_order_book_rows(cls,
symbol: str,
side: TradeType,
order_book_rows: List[OrderBookRow]) -> List["Trade"]:
return [Trade(symbol, side, r.price, r.amount) for r in order_book_rows]
@classmethod
def trade_from_binance_execution_report_event(cls, execution_report: Dict[str, any]) -> "Trade":
execution_type: str = execution_report.get("x")
if execution_type != "TRADE":
raise ValueError(f"Invalid execution type '{execution_type}'.")
return Trade(execution_report["s"],
TradeType.BUY if execution_report["S"] == "BUY" else TradeType.SELL,
float(execution_report["L"]),
float(execution_report["l"]))
@classmethod
def to_pandas(cls, trades: List):
columns: List[str] = ["symbol", "trade_side", "price", "quantity"]
data = [[
trade.symbol,
"BUY" if trade.side is TradeType.BUY else "SELL",
trade.price,
trade.amount,
] for trade in trades]
return pd.DataFrame(data=data, columns=columns)
| 2.890625 | 3 |
pyefun/networkUtil.py | nobodxbodon/pyefun | 1 | 12767057 | <reponame>nobodxbodon/pyefun<filename>pyefun/networkUtil.py
# -*- coding:utf-8 -*-
import sys
import requests
from .public import *
from requests.adapters import HTTPAdapter
@异常处理返回类型逻辑型
def 网页_取外网IP(返回地区=False):
'如果设置返回地区则返回两个参数,ip,地区'
header = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36"}
requests.packages.urllib3.disable_warnings()
try:
源码 = requests.get("http://pv.sohu.com/cityjson", verify=False, headers=header)
结果 = eval(源码.text[19:-1])
if 返回地区 == False:
return 结果['cip']
return 结果['cip'], 结果['cname']
except:
pass
try:
源码 = requests.get("https://ipservice.ws.126.net/locate/api/getLocByIp?callback=bowlder.cb._2", verify=False, headers=header)
结果 = eval(源码.text[14:-1])
if 返回地区 == False:
return 结果['result']['ip']
return 结果['result']['ip'], "{} {} {}".format(结果['result']['country'] , 结果['result']['province'] , 结果['result']['city'])
except:
pass
try:
源码 = requests.get("https://api.bilibili.com/x/web-interface/zone?jsonp=jsonp", verify=False, headers=header)
结果 = eval(源码.text)
if 返回地区 == False:
return 结果['data']['addr']
return 结果['data']['addr'], "{} {} {}".format(结果['data']['country'] , 结果['data']['province'] , 结果['data']['city'])
except:
if 返回地区 == False:
return ''
return '',''
@异常处理返回类型逻辑型
def 网页_COOKIE合并更新(原COOKIE, 新COOKIE):
'传入的Cookie可以是文本型也可以是字典,返回更新后的COOKIE,字典型'
最新Cookie = {}
临时Cookie = {}
if type(原COOKIE) == str:
if 原COOKIE.find(";") == -1:
名称 = 原COOKIE[0:原COOKIE.find("=")].strip(' ')
值 = 原COOKIE[原COOKIE.rfind(名称) + len(名称) + 1:len(原COOKIE)].strip(' ')
if 名称 and 值:
最新Cookie = {名称: 值}
else:
cookie数组 = cookie.split(';')
for x in cookie数组:
名称 = x[0:x.find("=")].strip(' ')
值 = x[原COOKIE.rfind(名称) + len(名称) + 1:len(x)].strip(' ')
if 名称 and 值:
最新Cookie[名称] = 值
else:
最新Cookie = 原COOKIE
if type(新COOKIE) == str:
if 新COOKIE.find(";") == -1:
名称 = 新COOKIE[0:新COOKIE.find("=")].strip(' ')
值 = 新COOKIE[新COOKIE.rfind(名称) + len(名称) + 1:len(新COOKIE)].strip(' ')
if 名称 and 值:
临时Cookie = {名称: 值}
else:
cookie数组 = cookie.split(';')
for x in cookie数组:
名称 = x[0:x.find("=")].strip(' ')
值 = x[新COOKIE.rfind(名称) + len(名称) + 1:len(x)].strip(' ')
if 名称 and 值:
临时Cookie[名称] = 值
else:
临时Cookie = 新COOKIE
for x in 临时Cookie:
最新Cookie[x] = 临时Cookie[x]
return 最新Cookie
class 网页返回类型:
def __init__(self):
self.源码 = ''
self.字节集 = b'' #返回字节集,如图片,视频等文件需要
self.cookie = {}
self.协议头 = {}
self.状态码 = 0
self.原对象 = None
self.json = {}
class 网页_访问_会话:
'requests.session()'
def __init__(self,重试次数=0):
self._requests = requests.session()
if 重试次数:
self._requests.mount('http://', HTTPAdapter(max_retries=重试次数))
self._requests.mount('https://', HTTPAdapter(max_retries=重试次数))
@异常处理返回类型逻辑型
def 网页_访问(self,url, 方式=0, 参数='', cookie='', 协议头={}, 允许重定向=True, 代理地址=None, 编码=None, 证书验证=False, 上传文件=None, 补全协议头=True,json={},连接超时=15, 读取超时=15):
"""
:param url: 链接,能自动补全htpp,去除首尾空格
:param 方式: 0.get 1.post 2.put 3.delete 4.head 5.options
:param 参数: 可以是文本也可以是字典
:param cookie: 可以是文本也可以是字典
:param 协议头: 可以是文本也可以是字典
:param 允许重定向: True 或 False 默认允许
:param 代理地址: 账号:密码@IP:端口 或 IP:端口
:param 编码: utf8,gbk·······
:param 证书验证: 默认为False,需要引用证书时传入证书路径
:param 上传文件: {'upload': ('code.png', 图片字节集, 'image/png')}
:param 补全协议头: 默认补全常规协议头
:param json: post提交参数时可能使用的类型
:param 连接超时: 默认15
:param 读取超时: 默认15
:return: 返回网页对象
"""
网页 = 网页返回类型()
try:
url = url.strip(' ')
url = url if url.startswith('http') else 'http://' + url
_cookie = {}
_协议头 = {}
传入参数 = {}
if url.find('/', 8) != -1:
host = url[url.find('://') + 3:url.find('/', 8)]
else:
host = url[url.find('://') + 3:]
if type(协议头) == str:
协议头数组 = 协议头.split('\n')
for x in 协议头数组:
名称 = x[0:x.find(':')].strip(' ')
值 = x[x.rfind(名称) + len(名称) + 1:len(x)].strip(' ')
if 名称 and 值:
_协议头[名称] = 值
else:
_协议头 = 协议头
if 补全协议头:
if not 'Host' in _协议头:
_协议头['Host'] = host
if not 'Accept' in _协议头:
_协议头['Accept'] = '*/*'
if not 'Content-Type' in _协议头:
_协议头['Content-Type'] = 'application/x-www-form-urlencoded'
if not 'User-Agent' in _协议头:
_协议头['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36'
if not 'Referer' in _协议头:
_协议头['Referer'] = url
if type(cookie) == str:
if cookie.find(";") == -1:
名称 = cookie[0:cookie.find("=")].strip(' ')
值 = cookie[cookie.rfind(名称) + len(名称) + 1:len(cookie)].strip(' ')
if 名称 and 值:
_cookie = {名称: 值}
else:
cookie数组 = cookie.split(';')
for x in cookie数组:
名称 = x[0:x.find("=")].strip(' ')
值 = x[cookie.rfind(名称) + len(名称) + 1:len(x)].strip(' ')
if 名称 and 值:
_cookie[名称] = 值
else:
_cookie = cookie
传入参数['url'] = url
传入参数['verify'] = 证书验证
传入参数['cookies'] = _cookie
传入参数['headers'] = _协议头
传入参数['allow_redirects'] = 允许重定向
if 参数:
if 方式 == 0:
传入参数['params'] = 参数
else:
传入参数['data'] = 参数
if json:
传入参数['json'] = json
if 上传文件:
传入参数['files'] = 上传文件
if 代理地址:
传入参数['proxies'] = {"http": "http://" + 代理地址, "https": "https://" + 代理地址}
if 连接超时 and 读取超时:
传入参数['timeout'] = (连接超时, 读取超时)
# 发送
if 方式 == 0:
网页对象 = requests.get(**传入参数)
elif 方式 == 1:
网页对象 = requests.post(**传入参数)
elif 方式 == 2:
网页对象 = requests.put(**传入参数)
elif 方式 == 3:
网页对象 = requests.delete(**传入参数)
elif 方式 == 4:
网页对象 = requests.head(**传入参数)
elif 方式 == 5:
网页对象 = requests.options(**传入参数)
if 编码:
网页对象.encoding = 编码
网页.原对象 = 网页对象
网页.源码 = 网页对象.text
网页.cookie = dict(网页对象.cookies)
网页.状态码 = 网页对象.status_code
网页.协议头 = 网页对象.headers
网页.字节集 = 网页对象.content
try:
网页.json = 网页对象.json()
except:
pass
except:
print(sys._getframe().f_code.co_name, "函数发生异常", url)
# print("错误发生时间:", str(datetime.datetime.now()))
# print("错误的详细情况:", traceback.format_exc())
return 网页
# @异常处理返回类型逻辑型
def 网页_访问(url, 方式=0, 参数='', cookie='', 协议头={}, 允许重定向=True, 代理地址=None, 编码=None,证书验证=False, 上传文件=None,补全协议头=True,json={}, 连接超时=15, 读取超时=15):
"""
:param url: 链接,能自动补全htpp,去除首尾空格
:param 方式: 0.get 1.post 2.put 3.delete 4.head 5.options
:param 参数: 可以是文本也可以是字典
:param cookie: 可以是文本也可以是字典
:param 协议头: 可以是文本也可以是字典
:param 允许重定向: True 或 False 默认允许
:param 代理地址: 账号:密码@IP:端口 或 IP:端口
:param 编码: utf8,gbk·······
:param 证书验证: 默认为False,需要引用证书时传入证书路径
:param 上传文件: {'upload': ('code.png', 图片字节集, 'image/png')}
:param 补全协议头: 默认补全常规协议头
:param json: post提交参数时可能使用的类型
:param 连接超时: 默认15
:param 读取超时: 默认15
:return: 返回网页对象
"""
网页 = 网页返回类型()
url = url.strip(' ')
url = url if url.startswith('http') else 'http://' + url
_cookie = {}
_协议头 = {}
传入参数 = {}
if url.find('/',8) != -1:
host = url[url.find('://')+3:url.find('/', 8)]
else:
host = url[url.find('://')+3:]
if type(协议头) == str:
协议头数组 = 协议头.split('\n')
for x in 协议头数组:
名称 = x[0:x.find(':')].strip(' ')
值 = x[x.rfind(名称) + len(名称)+1:len(x)].strip(' ')
if 名称 and 值:
_协议头[名称] = 值
else:
_协议头 = 协议头
if 补全协议头:
if not 'Host' in _协议头:
_协议头['Host'] = host
if not 'Accept' in _协议头:
_协议头['Accept'] = '*/*'
if not 'Content-Type' in _协议头:
_协议头['Content-Type'] = 'application/x-www-form-urlencoded'
if not 'User-Agent' in _协议头:
_协议头['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36'
if not 'Referer' in _协议头:
_协议头['Referer'] = url
if type(cookie) == str:
if cookie.find(";") == -1:
名称 = cookie[0:cookie.find("=")].strip(' ')
值 = cookie[cookie.rfind(名称) + len(名称) + 1:len(cookie)].strip(' ')
if 名称 and 值:
_cookie = {名称: 值}
else:
cookie数组 = cookie.split(';')
for x in cookie数组:
名称 = x[0:x.find("=")].strip(' ')
值 = x[cookie.rfind(名称) + len(名称) + 1:len(x)].strip(' ')
if 名称 and 值:
_cookie[名称] = 值
else:
_cookie = cookie
传入参数['url'] = url
传入参数['verify'] = 证书验证
传入参数['cookies'] = _cookie
传入参数['headers'] = _协议头
传入参数['allow_redirects'] = 允许重定向
if 参数:
if 方式 == 0:
传入参数['params'] = 参数
else:
传入参数['data'] = 参数
if json:
传入参数['json'] = json
if 上传文件:
传入参数['files'] = 上传文件
if 代理地址:
传入参数['proxies'] = {"http": "http://" + 代理地址, "https": "https://" + 代理地址}
if 连接超时 and 读取超时:
传入参数['timeout'] = (连接超时,读取超时)
#发送
if 方式 == 0:
网页对象 = requests.get(**传入参数)
elif 方式 == 1:
网页对象 = requests.post(**传入参数)
elif 方式 == 2:
网页对象 = requests.put(**传入参数)
elif 方式 == 3:
网页对象 = requests.delete(**传入参数)
elif 方式 == 4:
网页对象 = requests.head(**传入参数)
elif 方式 == 5:
网页对象 = requests.options(**传入参数)
if 编码:
网页对象.encoding = 编码
网页.原对象 = 网页对象
网页.源码 = 网页对象.text
try:
网页.cookie = dict(网页对象.cookies)
except:
pass
网页.状态码 = 网页对象.status_code
网页.协议头 = 网页对象.headers
网页.字节集 = 网页对象.content
try:
网页.json = 网页对象.json()
except:
pass
# except:
# print(sys._getframe().f_code.co_name, "函数发生异常",url)
# print("错误发生时间:", str(datetime.datetime.now()))
# print("错误的详细情况:", traceback.format_exc())
return 网页
| 2.578125 | 3 |
scripts/disaster_recovery/process_dlq.py | ministryofjustice/staff-device-logging-infrastructure | 1 | 12767058 | <reponame>ministryofjustice/staff-device-logging-infrastructure<filename>scripts/disaster_recovery/process_dlq.py
#!/usr/bin/env python
"""
Move all the messages from one SQS queue to another.
Usage: Run from Makefile. Run make process-dead-letter-queue and add required values
"""
import boto3
import itertools
import os
import sys
import uuid
def get_messages_from_queue(sqs_client, queue_url):
while True:
resp = sqs_client.receive_message(
QueueUrl=queue_url, AttributeNames=["All"], MaxNumberOfMessages=10
)
try:
yield from resp["Messages"]
except KeyError:
return
entries = [
{"Id": msg["MessageId"], "ReceiptHandle": msg["ReceiptHandle"]}
for msg in resp["Messages"]
]
resp = sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=entries)
if len(resp["Successful"]) != len(entries):
raise RuntimeError(
f"Failed to delete messages: entries={entries!r} resp={resp!r}"
)
def chunked_iterable(iterable, *, size):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, size))
if not chunk:
break
yield chunk
if __name__ == "__main__":
src_queue_url = os.environ.get("DLQ_SQS_URL")
dst_queue_url = os.environ.get("SQS_DESTINATION_URL")
if src_queue_url == dst_queue_url:
sys.exit("Source and destination queues cannot be the same.")
sqs_client = boto3.client("sqs")
messages = get_messages_from_queue(sqs_client, queue_url=src_queue_url)
# The SendMessageBatch API supports sending up to ten messages at once.
for message_batch in chunked_iterable(messages, size=10):
print(f"Writing {len(message_batch):2d} messages to {dst_queue_url}")
sqs_client.send_message_batch(
QueueUrl=dst_queue_url,
Entries=[
{"Id": str(uuid.uuid4()), "MessageBody": message["Body"]}
for message in message_batch
],
)
| 2.125 | 2 |
external/loaders/tests/test_stacking.py | ai2cm/fv3net | 1 | 12767059 | import loaders
import xarray as xr
import numpy as np
from loaders._utils import SAMPLE_DIM_NAME
import pytest
def test_multiple_unstacked_dims():
na, nb, nc, nd = 2, 3, 4, 5
ds = xr.Dataset(
data_vars={
"var1": xr.DataArray(
np.zeros([na, nb, nc, nd]), dims=["a", "b", "c", "d"],
),
"var2": xr.DataArray(np.zeros([na, nb, nc]), dims=["a", "b", "c"],),
}
)
unstacked_dims = ["c", "d"]
expected = xr.Dataset(
data_vars={
"var1": xr.DataArray(
np.zeros([na * nb, nc, nd]), dims=[SAMPLE_DIM_NAME, "c", "d"],
),
"var2": xr.DataArray(np.zeros([na * nb, nc]), dims=[SAMPLE_DIM_NAME, "c"],),
}
)
result = loaders.stack(ds=ds, unstacked_dims=unstacked_dims)
xr.testing.assert_identical(result.drop(result.coords.keys()), expected)
@pytest.fixture
def gridded_dataset(request):
num_nans, zdim, ydim, xdim = request.param
coords = {"z": range(zdim), "y": range(ydim), "x": range(xdim)}
# unique values for ease of set comparison in test
var = xr.DataArray(
[
[[(100 * k) + (10 * j) + i for i in range(10)] for j in range(10)]
for k in range(zdim)
],
dims=["z", "y", "x"],
coords=coords,
)
var = var.where(var >= num_nans) # assign nan values
return xr.Dataset({"var": var})
@pytest.mark.parametrize(
"gridded_dataset", [(0, 1, 10, 10), (0, 10, 10, 10)], indirect=True,
)
def test_stack_dims(gridded_dataset):
s_dim = SAMPLE_DIM_NAME
ds_train = loaders.stack(["z"], gridded_dataset)
assert set(ds_train.dims) == {s_dim, "z"}
assert len(ds_train["z"]) == len(gridded_dataset.z)
assert ds_train["var"].dims[0] == s_dim
| 2.21875 | 2 |
prisitri/urls.py | soslaio/agreg | 0 | 12767060 | <filename>prisitri/urls.py
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from core.urls import corepatterns
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView
)
urlpatterns = [
path('', include(corepatterns)),
path('admin/', admin.site.urls),
path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('token/verify/', TokenVerifyView.as_view(), name='token_verify')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 1.734375 | 2 |
Python/sWAP cASE.py | MonwarAdeeb/HackerRank-Solutions | 0 | 12767061 | def swap_case(s):
swapped = s.swapcase()
return swapped
| 2.671875 | 3 |
doorman/models.py | ESGuardian/doorman-docker | 614 | 12767062 | # -*- coding: utf-8 -*-
import datetime as dt
import string
import uuid
from flask_login import UserMixin
from doorman.database import (
Column,
Table,
ForeignKey,
Index,
Model,
SurrogatePK,
db,
reference_col,
relationship,
ARRAY,
JSONB,
INET,
declared_attr,
)
from doorman.extensions import bcrypt
querypacks = Table(
'query_packs',
Column('pack.id', db.Integer, ForeignKey('pack.id')),
Column('query.id', db.Integer, ForeignKey('query.id'))
)
pack_tags = Table(
'pack_tags',
Column('tag.id', db.Integer, ForeignKey('tag.id')),
Column('pack.id', db.Integer, ForeignKey('pack.id'), index=True)
)
node_tags = Table(
'node_tags',
Column('tag.id', db.Integer, ForeignKey('tag.id')),
Column('node.id', db.Integer, ForeignKey('node.id'), index=True)
)
query_tags = Table(
'query_tags',
Column('tag.id', db.Integer, ForeignKey('tag.id')),
Column('query.id', db.Integer, ForeignKey('query.id'), index=True)
)
file_path_tags = Table(
'file_path_tags',
Column('tag.id', db.Integer, ForeignKey('tag.id')),
Column('file_path.id', db.Integer, ForeignKey('file_path.id'), index=True)
)
class Tag(SurrogatePK, Model):
value = Column(db.String, nullable=False, unique=True)
nodes = relationship(
'Node',
secondary=node_tags,
back_populates='tags',
)
packs = relationship(
'Pack',
secondary=pack_tags,
back_populates='tags',
)
queries = relationship(
'Query',
secondary=query_tags,
back_populates='tags',
)
file_paths = relationship(
'FilePath',
secondary=file_path_tags,
back_populates='tags',
)
def __init__(self, value, **kwargs):
self.value = value
def __repr__(self):
return '<Tag: {0.value}>'.format(self)
@property
def packs_count(self):
return db.session.object_session(self) \
.query(Pack.id).with_parent(self, 'packs').count()
@property
def nodes_count(self):
return db.session.object_session(self) \
.query(Node.id).with_parent(self, 'nodes').count()
@property
def queries_count(self):
return db.session.object_session(self) \
.query(Query.id).with_parent(self, 'queries').count()
@property
def file_paths_count(self):
return db.session.object_session(self) \
.query(FilePath.id).with_parent(self, 'file_paths').count()
class Query(SurrogatePK, Model):
name = Column(db.String, nullable=False)
sql = Column(db.String, nullable=False)
interval = Column(db.Integer, default=3600)
platform = Column(db.String)
version = Column(db.String)
description = Column(db.String)
value = Column(db.String)
removed = Column(db.Boolean, nullable=False, default=True)
shard = Column(db.Integer)
packs = relationship(
'Pack',
secondary=querypacks,
back_populates='queries',
)
tags = relationship(
'Tag',
secondary=query_tags,
back_populates='queries',
lazy='joined',
)
def __init__(self, name, query=None, sql=None, interval=3600, platform=None,
version=None, description=None, value=None, removed=True,
shard=None, **kwargs):
self.name = name
self.sql = query or sql
self.interval = int(interval)
self.platform = platform
self.version = version
self.description = description
self.value = value
self.removed = removed
self.shard = shard
def __repr__(self):
return '<Query: {0.name}>'.format(self)
def to_dict(self):
return {
'query': self.sql,
'interval': self.interval,
'platform': self.platform,
'version': self.version,
'description': self.description,
'value': self.value,
'removed': self.removed,
'shard': self.shard,
}
class Pack(SurrogatePK, Model):
name = Column(db.String, nullable=False, unique=True)
platform = Column(db.String)
version = Column(db.String)
description = Column(db.String)
shard = Column(db.Integer)
queries = relationship(
'Query',
secondary=querypacks,
back_populates='packs',
)
tags = relationship(
'Tag',
secondary=pack_tags,
back_populates='packs',
)
def __init__(self, name, platform=None, version=None,
description=None, shard=None, **kwargs):
self.name = name
self.platform = platform
self.version = version
self.description = description
self.shard = shard
def __repr__(self):
return '<Pack: {0.name}>'.format(self)
def to_dict(self):
queries = {}
discovery = []
for query in self.queries:
if 'discovery' in (t.value for t in query.tags):
discovery.append(query.sql)
else:
queries[query.name] = query.to_dict()
return {
'platform': self.platform,
'version': self.version,
'shard': self.shard,
'discovery': discovery,
'queries': queries,
}
class Node(SurrogatePK, Model):
node_key = Column(db.String, nullable=False, unique=True)
enroll_secret = Column(db.String)
enrolled_on = Column(db.DateTime)
host_identifier = Column(db.String)
last_checkin = Column(db.DateTime)
node_info = Column(JSONB, default={}, nullable=False)
is_active = Column(db.Boolean, default=True, nullable=False)
last_ip = Column(INET, nullable=True)
tags = relationship(
'Tag',
secondary=node_tags,
back_populates='nodes',
lazy='joined',
)
def __init__(self, host_identifier, node_key=None,
enroll_secret=None, enrolled_on=None, last_checkin=None,
is_active=True, last_ip=None,
**kwargs):
self.node_key = node_key or str(uuid.uuid4())
self.host_identifier = host_identifier
self.enroll_secret = enroll_secret
self.enrolled_on = enrolled_on
self.last_checkin = last_checkin
self.is_active = is_active
self.last_ip = last_ip
def __repr__(self):
return '<Node-{0.id}: node_key={0.node_key}, host_identifier={0.host_identifier}>'.format(self)
def get_config(self, **kwargs):
from doorman.utils import assemble_configuration
return assemble_configuration(self)
def get_new_queries(self, **kwargs):
from doorman.utils import assemble_distributed_queries
return assemble_distributed_queries(self)
@property
def display_name(self):
if 'display_name' in self.node_info and self.node_info['display_name']:
return self.node_info['display_name']
elif 'hostname' in self.node_info and self.node_info['hostname']:
return self.node_info['hostname']
elif 'computer_name' in self.node_info and self.node_info['computer_name']:
return self.node_info['computer_name']
else:
return self.host_identifier
@property
def packs(self):
return db.session.object_session(self) \
.query(Pack) \
.join(pack_tags, pack_tags.c['pack.id'] == Pack.id) \
.join(node_tags, node_tags.c['tag.id'] == pack_tags.c['tag.id']) \
.filter(node_tags.c['node.id'] == self.id) \
.options(db.lazyload('*'))
@property
def queries(self):
return db.session.object_session(self) \
.query(Query) \
.join(query_tags, query_tags.c['query.id'] == Query.id) \
.join(node_tags, node_tags.c['tag.id'] == query_tags.c['tag.id']) \
.filter(node_tags.c['node.id'] == self.id) \
.options(db.lazyload('*'))
@property
def file_paths(self):
return db.session.object_session(self) \
.query(FilePath) \
.join(file_path_tags, file_path_tags.c['file_path.id'] == FilePath.id) \
.join(node_tags, node_tags.c['tag.id'] == file_path_tags.c['tag.id']) \
.filter(node_tags.c['node.id'] == self.id) \
.options(db.lazyload('*'))
def to_dict(self):
# NOTE: deliberately not including any secret values in here, for now.
return {
'id': self.id,
'display_name': self.display_name,
'enrolled_on': self.enrolled_on,
'host_identifier': self.host_identifier,
'last_checkin': self.last_checkin,
'node_info': self.node_info.copy(),
'last_ip': self.last_ip,
'is_active': self.is_active
}
class FilePath(SurrogatePK, Model):
category = Column(db.String, nullable=False, unique=True)
target_paths = Column(db.String)
tags = relationship(
'Tag',
secondary=file_path_tags,
back_populates='file_paths',
lazy='joined',
)
def __init__(self, category=None, target_paths=None, *args, **kwargs):
self.category = category
if target_paths is not None:
self.set_paths(*target_paths)
elif args:
self.set_paths(*args)
else:
self.target_paths = ''
def to_dict(self):
return {
self.category: self.get_paths()
}
def get_paths(self):
return self.target_paths.split('!!')
def set_paths(self, *target_paths):
self.target_paths = '!!'.join(target_paths)
class ResultLog(SurrogatePK, Model):
name = Column(db.String, nullable=False)
timestamp = Column(db.DateTime, default=dt.datetime.utcnow)
action = Column(db.String)
columns = Column(JSONB)
node_id = reference_col('node', nullable=False)
node = relationship(
'Node',
backref=db.backref('result_logs', lazy='dynamic')
)
def __init__(self, name=None, action=None, columns=None, timestamp=None,
node=None, node_id=None, **kwargs):
self.name = name
self.action = action
self.columns = columns or {}
self.timestamp = timestamp
if node:
self.node = node
elif node_id:
self.node_id = node_id
@declared_attr
def __table_args__(cls):
return (
Index('idx_%s_node_id_timestamp_desc' % cls.__tablename__,
'node_id', cls.timestamp.desc()),
)
class StatusLog(SurrogatePK, Model):
line = Column(db.Integer)
message = Column(db.String)
severity = Column(db.Integer)
filename = Column(db.String)
created = Column(db.DateTime, default=dt.datetime.utcnow)
version = Column(db.String)
node_id = reference_col('node', nullable=False)
node = relationship(
'Node',
backref=db.backref('status_logs', lazy='dynamic')
)
def __init__(self, line=None, message=None, severity=None,
filename=None, created=None, node=None, node_id=None,
version=None, **kwargs):
self.line = int(line)
self.message = message
self.severity = int(severity)
self.filename = filename
self.created = created
self.version = version
if node:
self.node = node
elif node_id:
self.node_id = node_id
@declared_attr
def __table_args__(cls):
return (
Index('idx_%s_node_id_created_desc' % cls.__tablename__,
'node_id', cls.created.desc()),
)
class DistributedQuery(SurrogatePK, Model):
description = Column(db.String, nullable=True)
sql = Column(db.String, nullable=False)
timestamp = Column(db.DateTime, default=dt.datetime.utcnow)
not_before = Column(db.DateTime, default=dt.datetime.utcnow)
def __init___(self, sql, description=None, not_before=None):
self.sql = sql
self.description = description
self.not_before = not_before
class DistributedQueryTask(SurrogatePK, Model):
NEW = 0
PENDING = 1
COMPLETE = 2
FAILED = 3
guid = Column(db.String, nullable=False, unique=True)
status = Column(db.Integer, default=0, nullable=False)
timestamp = Column(db.DateTime)
distributed_query_id = reference_col('distributed_query', nullable=False)
distributed_query = relationship(
'DistributedQuery',
backref=db.backref('tasks',
cascade='all, delete-orphan',
lazy='dynamic'),
)
node_id = reference_col('node', nullable=False)
node = relationship(
'Node',
backref=db.backref('distributed_queries', lazy='dynamic'),
)
def __init__(self, node=None, node_id=None,
distributed_query=None, distributed_query_id=None):
self.guid = str(uuid.uuid4())
if node:
self.node = node
elif node_id:
self.node_id = node_id
if distributed_query:
self.distributed_query = distributed_query
elif distributed_query_id:
self.distributed_query_id = distributed_query_id
@declared_attr
def __table_args__(cls):
return (
Index('idx_%s_node_id_status' % cls.__tablename__, 'node_id', 'status'),
)
class DistributedQueryResult(SurrogatePK, Model):
columns = Column(JSONB)
timestamp = Column(db.DateTime, default=dt.datetime.utcnow)
distributed_query_task_id = reference_col('distributed_query_task', nullable=False)
distributed_query_task = relationship(
'DistributedQueryTask',
backref=db.backref('results',
cascade='all, delete-orphan',
lazy='joined'),
)
distributed_query_id = reference_col('distributed_query', nullable=False)
distributed_query = relationship(
'DistributedQuery',
backref=db.backref('results',
cascade='all, delete-orphan',
lazy='joined'),
)
def __init__(self, columns, distributed_query=None, distributed_query_task=None):
self.columns = columns
self.distributed_query = distributed_query
self.distributed_query_task = distributed_query_task
class Rule(SurrogatePK, Model):
name = Column(db.String, nullable=False)
alerters = Column(ARRAY(db.String), nullable=False)
description = Column(db.String, nullable=True)
conditions = Column(JSONB)
updated_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
def __init__(self, name, alerters, description=None, conditions=None, updated_at=None):
self.name = name
self.description = description
self.alerters = alerters
self.conditions = conditions
self.updated_at = updated_at
@property
def template(self):
return string.Template("{name}\r\n\r\n{description}".format(
name=self.name, description=self.description or '')
)
class User(UserMixin, SurrogatePK, Model):
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String)
password = Column(db.String, nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
# oauth related stuff
social_id = Column(db.String)
first_name = Column(db.String)
last_name = Column(db.String)
def __init__(self, username, password=<PASSWORD>, email=None, social_id=None,
first_name=None, last_name=None):
self.username = username
self.email = email
if password:
self.set_password(password)
else:
self.password = <PASSWORD>
self.social_id = social_id
self.first_name = first_name
self.last_name = last_name
def set_password(self, password):
self.update(password=<PASSWORD>password_hash(password))
return
def check_password(self, value):
if not self.password:
# still do the computation
return bcrypt.generate_password_hash(value) and False
return bcrypt.check_password_hash(self.password, value)
| 2.171875 | 2 |
tests/test_pandas.py | ehw-fit/py-paretoarchive | 1 | 12767063 | from paretoarchive.pandas import pareto
import pandas as pd
def test_df():
df = pd.DataFrame(
[[1, 3, 3], [1, 2, 3], [1, 1, 2]], columns=["a", "b", "c"]
)
assert (pareto(df, ["a", "b"]).index == [2]).all()
assert (pareto(df, ["a", "b", "c"]).index == [2]).all()
assert (pareto(df, ["a", "b", "c"], minimizeObjective2=False).index == [0, 2]).all()
if __name__ == "__main__":
test_df() | 3.0625 | 3 |
quadclass/seqlearn.py | vavrusa/seqalpha | 2 | 12767064 | <gh_stars>1-10
#!/usr/bin/env python
import re
import sys, getopt
import pylab as pl
from sklearn.metrics import roc_curve, auc
from matplotlib.lines import Line2D
def load_classlist(source = 'gqclass.tsv'):
''' Load sequence classlist. '''
gq_classlist = dict()
with open(source) as datafile:
for line in datafile:
# Skip header
if line.startswith(';'):
continue
# Unpack rows of data
(name, qclass, planarity, planarity_std, twist, twist_std, chains, topology, loops) = line.strip().split('\t')
if not qclass in gq_classlist:
gq_classlist[qclass] = {'id': qclass, 'topology': set([])}
# Inosine -> Guanine ambiguity
loops = '|'.join(loops.replace('I', 'G').split('|')[0:3])
gq_classlist[qclass]['topology'].add((topology, loops))
return gq_classlist
def loop_len_config(loops):
''' Return length configuration for L{1,2,3} loops. '''
try:
shortest = min([re.match(r'^G+', loop).end(0) for loop in loops])
except ValueError:
return ''
return ''.join([str(len(loop) - shortest) for loop in loops])
def loop_len_dt(loops):
''' Return length derivation for L{1,2,3} loops. '''
config = loop_len_config(loops)
if len(config) < 1:
return '?'
result = config[0]
for i in range(1, len(config)):
if config[0] < config[i]:
result += '+'
elif config[0] == config[i]:
result += '='
else:
result += '-'
return result
def loop_composition(loops):
''' Calculate loop sequences nucleotide composition expressed as nucleotide relative frequency. '''
composition = {'A':0, 'C':0, 'G':0, 'T':0, 'U':0}
if len(loops) < 1:
return composition
n_sum = 0
for loop in loops:
for n in loop:
composition[n] += 1
n_sum += 1
# Normalize nucleotide frequency
norm = 1.0 / n_sum
return {n: round(composition[n] * norm, 2) for n in composition}
def find_fragments(seq):
''' Identify L{1,2,3} loops in sequence. '''
loops = []
loop = ''
in_loop = False
seq = seq[seq.find('G'):]
for n in seq:
loop += n
if not in_loop:
# Find loop opening
if n != 'G' and len(loop) > 0:
in_loop = True
else:
# G2 is a loop closure
if loop.endswith('GG'):
loops.append(loop[:-2])
loop = loop[-2:]
in_loop = False
# Join shortest G-tracts until L1-L3 are identified
while len(loops) > 3:
shortest = min(enumerate(loops), key = lambda x: x[1].count('G'))[0]
# Merge with previous loop
if shortest > 0:
loops[shortest - 1] += loops[shortest]
loops.pop(shortest)
return loops
def fit_candidate(candidates, qclass, topology, reason, val, pval):
reason_str = '%s:%s:%.02f' % (reason, val, pval)
key = (qclass, topology)
if key in candidates:
candidates[key].add(reason_str)
else:
candidates[key] = set([reason_str])
def get_pval(pval, ptype, qclass):
''' Return p-value for given class. '''
n = 0
clslist = pval[ptype]
for cls in clslist.keys():
n += clslist[cls]
return 1 - clslist[qclass] / float(n)
def ins_pval(clslist, obs, qclass):
''' Insert observation of an occurence in the qclass. '''
if obs not in clslist:
clslist[obs] = dict()
if qclass not in clslist[obs]:
clslist[obs][qclass] = 0
clslist[obs][qclass] += 1
def calc_pval(clslist):
''' Calculate p-values for predictors. '''
pval = { 'dt': dict(), 'config': dict() }
for qclass, info in clslist.items():
for (topology, loops) in info['topology']:
ins_pval(pval['dt'], loop_len_dt(loops.split('|')), qclass)
ins_pval(pval['config'], loop_len_config(loops.split('|')), qclass)
return pval
def fit(gq_classlist, loops, pval_table):
''' Decompose input sequence and attempt to fit it to the identified GQ classes. '''
if len(loops) == 0:
return set([])
config = loop_len_config(loops)
len_dt = loop_len_dt(loops)
n_freq = loop_composition(loops)
candidates = dict()
# Calculate least error composition
k3best = 1.0
k3best_match = (None, None, None)
for qclass, info in gq_classlist.items():
for (gq_topology, gq_loops) in info['topology']:
# Match based on L1-L3 length configuration
candidate_config = loop_len_config(gq_loops.split('|'))
if config == candidate_config:
pval = get_pval(pval_table['config'], config, qclass)
fit_candidate(candidates, qclass, gq_topology, 'length_match', config, pval)
# Match based on length sequence derivation
gq_len_dt = loop_len_dt(gq_loops.split('|'))
if len_dt == gq_len_dt:
pval = get_pval(pval_table['dt'], len_dt, qclass)
fit_candidate(candidates, qclass, gq_topology, 'length_dt', len_dt, pval)
# Match based on sequence composition
gq_n_freq = loop_composition(gq_loops.split('|'))
k3err = sum([abs(n_freq[n] - gq_n_freq[n]) for n in gq_n_freq.keys()]) / 5.0
if k3err < k3best:
k3best = k3err
k3best_match = (qclass, gq_topology, gq_loops)
# Pick least sequence composition error match
if k3best < 1.0:
(qclass, gq_topology, gq_loops) = k3best_match
fit_candidate(candidates, qclass, gq_topology, 'composition', 'match', k3best)
return candidates
def evaluate_k(qclass, pred, y_pred, y_true, why = None):
best = [None, 1.0]
for key in pred:
for reason_str in pred[key]:
reason = reason_str.split(':')
pval = float(reason[2])
if why is None:
if pval < best[1]:
best = (key[0], pval)
else:
if pval < best[1] and reason[0] == why:
best = (key[0], pval)
y_true.append(best[0] == qclass)
y_pred.append(1 - best[1])
return best
def evaluate_show(name, y_pred, y_true, pl, style):
print '%s accuracy: %f' % (name, y_true.count(True)/float(len(y_true)))
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
# Plot ROC curve
pl.plot(fpr, tpr, marker=style, label=name)
def validate(gq_classlist, input_file, pval_table, graph = True):
k_style = [ '^', 'o', 's' ]
k_name = [ 'length_match', 'length_dt', 'composition' ]
y_pred = [ [], [], [] ]
y_true = [ [], [], [] ]
for line in input_file:
line = line.strip().split('\t')
qclass, loops = (line[1], line[8].replace('I', 'G').split('|'))
pred = fit(gq_classlist, loops, pval_table)
for k in range(len(k_name)):
evaluate_k(qclass, pred, y_pred[k], y_true[k], k_name[k])
# Plot ROC curve
pl.clf()
dpi = 96.0
fig = pl.figure(1, figsize=(round(1000/dpi), round(600/dpi)))
for k in range(0, len(k_name)):
evaluate_show(k_name[k], y_pred[k], y_true[k], pl, k_style[k])
pl.xlim([0.0, 1.0])
pl.ylim([0.0, 1.0])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title('Receiver operating characteristic')
pl.plot([0, 1], [0, 1], 'k--')
pl.legend(loc="lower right")
pl.show()
fig.savefig('seqlearn-roc.pdf')
def help():
''' Print help and exit. '''
print('Usage: %s [-t <path>] [-v <path>] [-g] [sequences] ' % sys.argv[0])
print('Parameters:')
print('\t-t <path>, --training=<path>\tTraining dataset (TSV file).')
print('\t-v <path>, --validate=<path>\tValidation dataset (TSV file).')
print('\t-g, --graph\tPrint ROC curve.')
print('\t[directory]\tDirectories with GQ class families.')
print('Notes:')
print('\tThe "-t" default is "gqclass.tsv".')
print('Example:')
print('"%s" ... print all predictors and p-values' % sys.argv[0])
print('"%s -t classes.tsv GGGTGGGTTAGGGTGGG" ... predict the topology for given sequence' % sys.argv[0])
sys.exit(1)
if __name__ == '__main__':
# Process parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "ht:v:g", ["help", "training=", "validate=", "graph"])
except getopt.GetoptError as err:
print str(err)
help()
class_file = 'gqclass.tsv'
validate_file = None
show_graph = False
for o, a in opts:
if o in ('-h', '--help'):
help()
elif o in ('-t', '--training'):
class_file = a
elif o in ('-v', '--validate'):
validate_file = a
elif o in ('-g', '--graph'):
show_graph = True
else:
help()
gq_classlist = load_classlist(class_file)
pval = calc_pval(gq_classlist)
# Accept sequences as parameters
if len(args) > 0:
for seq in args:
seq = seq.trim()
print('> %s ...' % seq)
loops = find_fragments(seq)
print('%s %s %s %s' % (loop_len_config(loops), loop_len_dt(loops), '|'.join(loops), loop_composition(loops).values()))
candidates = fit(gq_classlist, loops, pval)
for key in candidates:
print '%s (%s)' % (key[0], key[1])
for reason_str in candidates[key]:
reason = reason_str.split(':')
print(' * %s..\'%s\' p-value=%s' % (reason[0], reason[1], reason[2]))
# Validate file if presented
elif validate_file != None:
validate(gq_classlist, open(validate_file), pval)
# No parameters, just print out current fitting info
else:
print('; Loop lenghts, Loop lengths dt, p-val(lengths), p-val(dt), topology, loops, composition')
for qclass, info in gq_classlist.items():
print('; ---- %s ----' % qclass)
for (topology, loops) in info['topology']:
dt = loop_len_dt(loops.split('|'))
config = loop_len_config(loops.split('|'))
dt_pval = get_pval(pval['dt'], dt, qclass)
dt_config = get_pval(pval['config'], config, qclass)
print config, dt, "%.03f %.03f" % (dt_config, dt_pval), topology, loops, \
loop_composition(loops.split('|')).values()
| 2.421875 | 2 |
src/wagtail_localize_panel/views.py | Gandi/wagtail-localize-panel | 1 | 12767065 | <filename>src/wagtail_localize_panel/views.py<gh_stars>1-10
import logging
from django.template.loader import render_to_string
from .models import get_missing_translations_stat
log = logging.getLogger(__name__)
class WorkflowPagesToTranslatePanel:
name = "workflow_pages_to_translate"
order = 100
def __init__(self, request, locale):
self.request = request
self.locale = locale
self.pages = get_missing_translations_stat(locale)
def render(self):
log.info("Rendering the translation workflow")
pages = list(self.pages)
return render_to_string(
"wagtail_localize_panel/home/workflow_pages_to_translate.html",
{"pages": pages, "locale": self.locale},
request=self.request,
)
| 2.140625 | 2 |
sged/data.py | pygongnlp/gramcorrector | 5 | 12767066 | from torch.utils.data import Dataset
from utils import load_data, get_labels
class SGEDDataset(Dataset):
def __init__(self, file_path, mode):
src_lst, trg_lst = load_data(file_path, mode)
self.src_lst = src_lst
self.trg_lst = trg_lst
self.labels = get_labels(src_lst, trg_lst)
def __len__(self):
return len(self.labels)
def __getitem__(self, item):
return self.src_lst[item], self.trg_lst[item], self.labels[item]
| 2.59375 | 3 |
miniml/tensor.py | oniani/miniml | 3 | 12767067 | # type: ignore
"""
A Tensor module on top of Numpy arrays.
TODO: Implement the reverse mode autodiff to compute gradients. It will have
to go backward through the computation graph.
"""
from __future__ import annotations
from typing import Union
import os
import pkgutil
import numpy as np
import pyopencl as cl
import pyopencl.array as clarray
import pyopencl.clmath as clmath
import pyopencl.clrandom as clrandom
import pyopencl.bitonic_sort as clbitonicsort
# Initialize the context
CONTEXT: cl.Context = cl.create_some_context(answers=[0, 1])
# Instantiate a queue
QUEUE: cl.CommandQueue = cl.CommandQueue(CONTEXT)
# OpenCL options
CLOPTS: str = "-cl-mad-enable -cl-fast-relaxed-math"
# Scalar type
Scalar = Union[float, int, np.float32]
def readcl(filename: str) -> str:
"""Read an OpenCL file and return it as a string."""
return pkgutil.get_data("miniml", f"opencl/{filename}").decode()
class Tensor:
"""A tensor class. Computations can be delegated to the GPU."""
def __init__(
self, data: Union[cl.array.Array, list, np.ndarray], gpu: bool = False
) -> None:
"""Initialize variables."""
self._gpu: bool = gpu
if isinstance(data, list):
self._data: np.ndarray = np.array(data, dtype=np.float32)
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, np.ndarray):
if data.dtype != np.float32:
# NOTE: The NumPy array has to be converted into a list first.
# Otherwise, the operations on cpu and gpu produce
# different results. This behavior can be caused by many
# reasons including OpenCL and even the operating system
# itself. Some research is needed to figure out cause and
# eliminate extra work for rebuilding the array.
self._data: np.ndarray = np.array(data.tolist(), np.float32)
else:
self._data: np.ndarray = data
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, cl.array.Array):
self._data: cl.array.Array = data
self._gpu: bool = True
else:
raise TypeError(
"Expected `list`, `np.ndarray`, or `pyopencl.array.Array` got "
f"`{type(data)}`"
)
@property
def data(self) -> Union[np.ndarray, cl.array.Array]:
"""The data inside of a tensor."""
return self._data
@data.setter
def data(self, data: Union[cl.array.Array, list, np.ndarray]) -> None:
"""Set the data inside of a tensor."""
if isinstance(data, list):
self._data: np.ndarray = np.array(data, dtype=np.float32)
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, np.ndarray):
if data.dtype != np.dtype("float32"):
self._data: np.ndarray = data.astype(np.float32)
else:
self._data: np.ndarray = data
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, cl.array.Array):
self._data: cl.array.Array = data
self._gpu: bool = True
else:
raise TypeError(
"Expected `list`, `np.ndarray`, or `pyopencl.array.Array` got "
f"`{type(data)}`"
)
def to_cpu(self) -> Tensor:
"""Load the data into CPU."""
if self._gpu:
self._data = self._data.get()
self._gpu = False
return self
def to_gpu(self) -> Tensor:
"""Load the data into GPU."""
if not self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
self._gpu = True
return self
def to_numpy(self) -> np.ndarray:
"""Return a numpy ndarray."""
if self._gpu:
return self._data.get()
return self._data
@property
def gpu(self) -> bool:
"""Return the state of the GPU."""
return self._gpu
def __repr__(self) -> str:
"""A representation of a tensor."""
state: str = "GPU" if self._gpu else "CPU"
return f"{self._data}\n\nTensor[{state}]"
def __iter__(self) -> Union[np.ndarray, cl.array.Array]:
"""An iterator for tensors."""
for i in self._data:
yield i
def __len__(self) -> int:
"""Return a length of tensors."""
return len(self._data)
def __getitem__(self, idx: int) -> Union[np.ndarray, cl.array.Array]:
"""Return a length of tensors."""
return self._data[idx]
def __setitem__(
self, idx: int, item: Union[np.ndarray, cl.array.Array]
) -> None:
"""Return a length of tensors."""
self._data[idx] = item
def __add__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Add two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data + other, gpu=self._gpu)
return Tensor(self._data + other._data, gpu=self._gpu or other._gpu)
__radd__ = __add__
def __iadd__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Add two tensors in-place."""
if not isinstance(other, Tensor):
self._data += other
else:
self._data += other._data
return self
def __sub__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Subtract two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data - other, gpu=self._gpu)
return Tensor(self._data - other._data, gpu=self._gpu or other._gpu)
__rsub__ = __sub__
def __isub__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Subtract two tensors in-place."""
if not isinstance(other, Tensor):
self._data -= other
else:
self._data -= other._data
return self
def __mul__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Multiply two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data * other, gpu=self._gpu)
return Tensor(self._data * other._data, gpu=self._gpu or other._gpu)
__rmul__ = __mul__
def __imul__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Multiply two tensors in-place."""
if not isinstance(other, Tensor):
self._data *= other
else:
self._data *= other._data
return self
def __truediv__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Divide two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data / other, gpu=self._gpu)
return Tensor(self._data / other._data, gpu=self._gpu or other._gpu)
__rtruediv__ = __truediv__
def __itruediv__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Divide two tensors in-place."""
if not isinstance(other, Tensor):
self._data /= other
else:
self._data /= other._data
return self
def __lt__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Less than operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data < other, gpu=self._gpu)
return Tensor(self._data < other._data, gpu=self._gpu or other._gpu)
def __le__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Less than or equal operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data <= other, gpu=self._gpu)
return Tensor(self._data <= other._data, gpu=self._gpu or other._gpu)
def __eq__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Equal to operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data == other, gpu=self._gpu)
return Tensor(self._data == other._data, gpu=self._gpu or other._gpu)
def __ne__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Not equal to operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data != other, gpu=self._gpu)
return Tensor(self._data != other._data, gpu=self._gpu or other._gpu)
def __ge__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Greater than or equal operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data >= other, gpu=self._gpu)
return Tensor(self._data >= other._data, gpu=self._gpu or other._gpu)
def __gt__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Greater than operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data > other, gpu=self._gpu)
return Tensor(self._data > other._data, gpu=self._gpu or other._gpu)
def __neg__(self) -> Tensor:
"""Return a negated tensor."""
return Tensor(-self._data, gpu=self._gpu)
def all(self) -> bool:
"""Returns the true value if all values of a tensor are true."""
return self._data.all()
def any(self) -> bool:
"""Returns the true value if at least one value of a tensor is true."""
return self._data.any()
def view(self, dtype: np.dtype) -> None:
"""Returns the view of a tensor with the same data. If dtype is
different from current dtype, the actual bytes of memory will be
reinterpreted.
"""
return Tensor(self._data.view(dtype), gpu=self._gpu)
def astype(self, dtype: np.dtype) -> Tensoor:
"""Return a copy of self, cast to dtype."""
return Tensor(self._data.astype(dtype), gpu=self._gpu)
def squeeze(self) -> None:
"""Returns a view of the tensor with dimensions of length 1 removed."""
return Tensor(self._data.squeeze(), gpu=self._gpu)
def sort(self) -> None:
"""Sorts a tensor, uses the parallel bitonic sort when on GPU."""
if self._gpu:
sorter = clbitonicsort.BitonicSort(CONTEXT)
sorter(self._data)
else:
self._data.sort()
@property
def T(self) -> Tensor:
"""Returns a transpose of a tensor."""
return Tensor(self._data.T, gpu=self._gpu)
@property
def dtype(self) -> np.dtype:
"""The data type of a tensor."""
return self._data.dtype
@property
def flags(self) -> Union[cl.compyte.array.ArrayFlags, np.flagsobj]:
"""Return an object with attributes `c_contiguous`, `f_contiguous` and
`forc`, which may be used to query contiguity properties in analogy
to `numpy.ndarray.flags`.
"""
return self._data.size
@property
def ndim(self) -> int:
"""The dimensions of a tensor."""
return self._data.ndim
@property
def nbytes(self) -> int:
"""Return the number of bytes."""
return self._data.nbytes
@property
def shape(self) -> tuple[int, ...]:
"""The tuple of lengths of each dimension in the tensor."""
return self._data.shape
@property
def strides(self) -> tuple[int, ...]:
"""tuple of bytes to step in each dimension."""
self._data.strides
@property
def size(self) -> int:
"""The number of meaningful entries in the tensor."""
self._data.size
class Ops:
"""Tensor operations."""
@staticmethod
def dot(t1: Tensor, t2: Tensor, gpu=False) -> Tensor:
"""Returns a dot product (matrix multiplication) of two tensors."""
if gpu:
# Convert back to numpy ndarrays
t1 = t1.data.get().astype(np.float32)
t2 = t2.data.get().astype(np.float32)
t1_w = np.int32(t1.shape[1])
t1_h = np.int32(t1.shape[0])
t2_w = np.int32(t2.shape[1])
t2_h = np.int32(t2.shape[0])
rt_h = t1_h
rt_w = t2_w
rt = np.empty((rt_h, rt_w)).astype(np.float32)
# Mem flags
mf = cl.mem_flags
# Buffer variables
t1_buf = cl.Buffer(
CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t1
)
t2_buf = cl.Buffer(
CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t2
)
rt_buf = cl.Buffer(CONTEXT, mf.WRITE_ONLY, size=rt.nbytes)
# OpenCL program for computing a matrix multiply
prg = cl.Program(CONTEXT, readcl("matmul.cl")).build(
options=CLOPTS
)
# Perform the matrix multiplication and return the resulting tensor
prg.matmul(
QUEUE, rt.shape, None, t1_buf, t2_buf, rt_buf, t1_h, t2_w, t1_w
)
cl.enqueue_copy(QUEUE, rt, rt_buf)
return Tensor(rt, gpu=True)
return Tensor(np.dot(t1.data, t2.data))
@staticmethod
def vdot(m1: Tensor, m2: Tensor) -> Tensor:
"""Returns a dot product of two tensors."""
if m1.gpu or m2.gpu:
return Tensor(clarray.dot(m1.data, m2.data), gpu=True)
return Tensor(np.vdot(m1.data, m2.data))
@staticmethod
def flatten(t: Tensor) -> Tensor:
"""Returns flattened tensor containing the same data."""
return Tensor(t._data.ravel(), gpu=t.gpu)
@staticmethod
def fill(shape: tuple[int, ...], val: np.float32, gpu=False) -> Tensor:
"""Fill the tensor with scalar."""
if gpu:
return Tensor(
clarray.empty(QUEUE, shape, dtype=np.float32).fill(val),
gpu=True,
)
return Tensor(np.full(shape, val))
@staticmethod
def where(
cond: Tensor,
fst: Union[Tensor, Scalar],
snd: Union[Tensor, Scalar],
) -> Tensor:
"""Fill the tensor based on a condition."""
if cond.gpu:
if isinstance(fst, Tensor) and isinstance(snd, Tensor):
return Tensor(
clarray.if_positive(cond._data, fst._data, snd._data),
gpu=True,
)
shape: tuple[int, ...] = cond._data.shape
if not isinstance(fst, Tensor) and isinstance(snd, Tensor):
snd = snd._data
fst = clarray.empty(QUEUE, shape, dtype=np.float32).fill(fst)
elif isinstance(fst, Tensor) and not isinstance(snd, Tensor):
fst = fst._data
snd = clarray.empty(QUEUE, shape, dtype=np.float32).fill(snd)
elif not isinstance(fst, Tensor) and not isinstance(snd, Tensor):
fst = clarray.empty(QUEUE, shape, dtype=np.float32).fill(fst)
snd = clarray.empty(QUEUE, shape, dtype=np.float32).fill(snd)
return Tensor(clarray.if_positive(cond._data, fst, snd), gpu=True)
if not isinstance(fst, Tensor) and isinstance(snd, Tensor):
return Tensor(np.where(cond._data, fst, snd._data))
if isinstance(fst, Tensor) and not isinstance(snd, Tensor):
return Tensor(np.where(cond._data, fst._data, snd))
if not isinstance(fst, Tensor) and not isinstance(snd, Tensor):
return Tensor(np.where(cond._data, fst, snd))
return Tensor(np.where(cond._data, fst._data, snd._data))
@staticmethod
def reshape(t: Tensor, shape: tuple) -> Tensor:
"""Returns a tensor containing the same data with a new shape."""
if t.gpu:
return Tensor(clarray.reshape(t._data, shape), gpu=True)
return Tensor(np.reshape(t._data, shape))
@staticmethod
def log(t: Tensor) -> Tensor:
"""Returns a natural logarithm of a tensor."""
if t.gpu:
return Tensor(clmath.log(t._data), gpu=True)
return Tensor(np.log(t._data))
@staticmethod
def tanh(t: Tensor) -> Tensor:
"""Returns a tanh of a tensor."""
if t.gpu:
return Tensor(clmath.tanh(t._data), gpu=True)
return Tensor(np.tanh(t._data))
@staticmethod
def exp(t: Tensor) -> Tensor:
"""Returns a natural exponent of a tensor."""
if t.gpu:
return Tensor(clmath.exp(t._data), gpu=True)
return Tensor(np.exp(t._data))
@staticmethod
def maximum(t: Tensor, uts: Union[Tensor, Scalar]) -> Tensor:
"""Returns the maximum of a tensor."""
if t.gpu:
if not isinstance(uts, Tensor):
ot: cl.array.Array = clarray.empty(
QUEUE, t.shape, dtype=np.float32
).fill(uts)
return Tensor(clarray.maximum(t._data, ot), gpu=True)
return Tensor(clarray.maximum(t._data, uts._data), gpu=True)
if not isinstance(uts, Tensor):
return Tensor(np.maximum(t._data, uts))
return Tensor(np.maximum(t._data, uts._data))
@staticmethod
def minimum(t: Tensor, uts: Union[Tensor, Scalar]) -> Tensor:
"""Returns the minimum of a tensor."""
if t.gpu:
if not isinstance(uts, Tensor):
ot: cl.array.Array = clarray.empty(
QUEUE, t.shape, dtype=np.float32
).fill(uts)
return Tensor(clarray.minimum(t._data, ot), gpu=True)
return Tensor(clarray.minimum(t._data, uts._data), gpu=True)
if not isinstance(uts, Tensor):
return Tensor(np.minimum(t._data, uts))
return Tensor(np.minimum(t._data, uts._data))
@staticmethod
def power(t: Tensor, exponent: Union[Tensor, Scalar]) -> Tensor:
"""Raise all elements of the tensor to the specified power."""
if not isinstance(exponent, Tensor):
return Tensor(t._data ** exponent, gpu=t.gpu)
return Tensor(t._data ** exponent._data, gpu=t.gpu or exponent.gpu)
@staticmethod
def square(t: Tensor) -> Tensor:
"""Return a square-valued tensor."""
return Tensor(t._data ** 2, gpu=t.gpu)
@staticmethod
def transpose(t: Tensor) -> Tensor:
"""Returns a transpose of a tensor."""
if t.gpu:
return Tensor(clarray.transpose(t._data), gpu=True)
return Tensor(np.transpose(t._data), gpu=t.gpu)
@staticmethod
def zeros(shape: tuple = (1, 1), gpu=False) -> Tensor:
"""Return a new tensor of given shape and type, filled with zeros."""
if gpu:
return Tensor(clarray.zeros(QUEUE, shape, np.float32), gpu=True)
return Tensor(np.zeros(shape, dtype=np.float32))
@staticmethod
def zeros_like(t: Tensor, gpu=False) -> Tensor:
"""Return a tensor of zeros with the same shape and type as a given
tensor.
"""
if gpu:
return Tensor(clarray.zeros_like(t._data), gpu=True)
return Tensor(np.zeros_like(t._data, dtype=np.float32))
class Random:
"""Random number generation for tensors."""
@staticmethod
def normal(
shape: Union[tuple[int, ...], int] = (1, 1), gpu=False
) -> Tensor:
"""Draw random samples from a normal (Gaussian) distribution."""
if gpu:
return Tensor(
clrandom.PhiloxGenerator(CONTEXT).normal(
cq=QUEUE, shape=shape, dtype=np.float32
),
gpu=True,
)
return Tensor(np.random.normal(size=shape).astype(np.float32))
@staticmethod
def rand(shape: Union[tuple[int, ...], int] = (1, 1), gpu=False) -> Tensor:
"""Returns a tensor of random values in a given shape."""
if gpu:
return Tensor(clrandom.rand(QUEUE, shape, np.float32), gpu=True)
if isinstance(shape, tuple):
return Tensor(np.random.rand(*shape).astype(np.float32))
return Tensor(np.random.rand(shape).astype(np.float32))
@staticmethod
def uniform(
shape: Union[tuple[int, ...], int] = (1, 1),
min: float = 0.0,
max: float = 1.0,
gpu=False,
) -> Tensor:
"""Draw samples from a uniform distribution."""
if gpu:
return Tensor(
clrandom.PhiloxGenerator(CONTEXT).uniform(
cq=QUEUE, shape=shape, dtype=np.float32, a=min, b=max
),
gpu=True,
)
return Tensor(
np.random.uniform(min, max, size=shape).astype(np.float32)
)
class Reduce:
"""Reduction operations on tensors."""
@staticmethod
def max(t: Tensor) -> np.float32:
"""The maximum of the values in a tensor."""
if t.gpu:
return clarray.max(t._data).get().flat[0]
return np.max(t._data)
@staticmethod
def min(t: Tensor) -> np.float32:
"""The minimum of the values in a tensor."""
if t.gpu:
return clarray.min(t._data).get().flat[0]
return np.min(t._data)
@staticmethod
def sum(t: Tensor) -> np.float32:
"""The sum of the values in a tensor."""
if t.gpu:
return clarray.sum(t._data).get().flat[0]
return np.sum(t._data)
@staticmethod
def mean(t: Tensor) -> np.float32:
"""The mean of the values in a tensor."""
if t.gpu:
return clarray.sum(t._data).get().flat[0] / t._data.size
return np.mean(t._data)
| 2.8125 | 3 |
fftbg/download.py | rainbowbismuth/birb-brains-bot | 1 | 12767068 | import json
import logging
from datetime import datetime
import requests
from fftbg.config import FFTBG_API_URL, TOURNAMENTS_ROOT
LOG = logging.getLogger(__name__)
def get_tournament_list():
j = requests.get(f'{FFTBG_API_URL}/api/tournaments?limit=6000').json()
return [(t['ID'], datetime.fromisoformat(t['LastMod'])) for t in j]
def get_tournament(tid):
return requests.get(f'{FFTBG_API_URL}/tournament/{tid}/json').text
def get_latest_tournament():
LOG.info('Retrieving latest tournament json')
return requests.get(f'{FFTBG_API_URL}/tournament/latest/json').text
def tournament_sync():
LOG.info('Beginning tournament sync')
TOURNAMENTS_ROOT.mkdir(exist_ok=True)
changed = False
for (tid, last_mod) in get_tournament_list():
t_path = TOURNAMENTS_ROOT / f'{tid}.json'
if t_path.exists():
text = t_path.read_text()
tournament_json = json.loads(text)
modified = datetime.fromisoformat(tournament_json['LastMod'])
if last_mod <= modified:
continue
LOG.info(f'Downloading tournament {tid} modified {last_mod.isoformat()}')
t_path.write_text(get_tournament(tid))
changed = True
return changed
| 2.53125 | 3 |
Craps/testGame.py | Kevin7196/CrapsIsFun | 0 | 12767069 | __author__ = '<NAME>'
from craps import CrapsGame
aCrapsGame = CrapsGame()
print(aCrapsGame.getCurrentBank())
aCrapsGame.placeBet(50)
aCrapsGame.throwDice()
aCrapsGame.throwDice()
print(aCrapsGame.getCurrentBank()) | 2.296875 | 2 |
nevergrad/optimization/test_externalbo.py | juliendehos/nevergrad | 0 | 12767070 | <reponame>juliendehos/nevergrad<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import numpy as np
import nevergrad as ng
from .optimizerlib import registry
from .externalbo import _hp_parametrization_to_dict, _hp_dict_to_parametrization
@pytest.mark.parametrize( # type: ignore
"parametrization,has_transform",
[
(ng.p.Choice(list(range(10))), True),
(ng.p.Scalar(lower=0, upper=1), True),
(ng.p.Scalar(lower=0, upper=10).set_integer_casting(), True),
(ng.p.Log(lower=1e-3, upper=1e3), True),
(ng.p.Array(init=np.zeros(10)), True),
(ng.p.Instrumentation(ng.p.Scalar(lower=0, upper=1), a=ng.p.Choice(list(range(10)))), False),
(
ng.p.Instrumentation(
a=ng.p.Choice([ng.p.Scalar(lower=0, upper=1), ng.p.Scalar(lower=100, upper=1000)])
),
True,
),
(
ng.p.Instrumentation(
a=ng.p.Choice(
[
ng.p.Choice(list(range(10))),
ng.p.Scalar(lower=0, upper=1),
]
)
),
False,
),
(
ng.p.Instrumentation(
a=ng.p.Choice(
[
ng.p.Instrumentation(
b=ng.p.Choice(list(range(10))), c=ng.p.Log(lower=1e-3, upper=1e3)
),
ng.p.Instrumentation(
d=ng.p.Scalar(lower=0, upper=1), e=ng.p.Log(lower=1e-3, upper=1e3)
),
]
)
),
False,
),
],
)
def test_hyperopt(parametrization, has_transform) -> None:
optim1 = registry["HyperOpt"](parametrization=parametrization, budget=5)
optim2 = registry["HyperOpt"](parametrization=parametrization.copy(), budget=5)
for it in range(4):
cand = optim1.ask()
optim1.tell(cand, 0) # Tell asked
del cand._meta["trial_id"]
optim2.tell(cand, 0) # Tell not asked
assert optim1.trials._dynamic_trials[it]["misc"]["vals"] == optim2.trials._dynamic_trials[it]["misc"]["vals"] # type: ignore
assert optim1.trials.new_trial_ids(1) == optim2.trials.new_trial_ids(1) # type: ignore
assert optim1.trials.new_trial_ids(1)[0] == (it + 2) # type: ignore
assert (optim1._transform is not None) == has_transform # type: ignore
# Test parallelization
opt = registry["HyperOpt"](parametrization=parametrization, budget=30, num_workers=5)
for k in range(40):
cand = opt.ask()
if not k:
opt.tell(cand, 1)
@pytest.mark.parametrize( # type: ignore
"parametrization,values",
[
(
ng.p.Instrumentation(
a=ng.p.Choice([ng.p.Choice(list(range(10))), ng.p.Scalar(lower=0, upper=1)])
),
[
(((), {"a": 0.5}), {"a": [1], "a__1": [0.5]}, {"args": {}, "kwargs": {"a": 0.5}}),
(((), {"a": 1}), {"a": [0], "a__0": [1]}, {"args": {}, "kwargs": {"a": 1}}),
],
),
(
ng.p.Instrumentation(ng.p.Scalar(lower=0, upper=1), a=ng.p.Choice(list(range(10)))),
[
(((0.5,), {"a": 3}), {"0": [0.5], "a": [3]}, {"args": {"0": 0.5}, "kwargs": {"a": 3}}),
(((0.99,), {"a": 0}), {"0": [0.99], "a": [0]}, {"args": {"0": 0.99}, "kwargs": {"a": 0}}),
],
),
(
ng.p.Instrumentation(
a=ng.p.Choice(
[
ng.p.Instrumentation(
b=ng.p.Choice(list(range(10))), c=ng.p.Log(lower=1e-3, upper=1e3)
),
ng.p.Instrumentation(
d=ng.p.Scalar(lower=0, upper=1), e=ng.p.Log(lower=1e-3, upper=1e3)
),
]
)
),
[
(
((), {"a": ((), {"d": 0.5, "e": 1.0})}),
{"a": [1], "d": [0.5], "e": [1.0]},
{"args": {}, "kwargs": {"a": {"args": {}, "kwargs": {"d": 0.5, "e": 1.0}}}},
),
(
((), {"a": ((), {"b": 0, "c": 0.014})}),
{"a": [0], "b": [0], "c": [0.014]},
{"args": {}, "kwargs": {"a": {"args": {}, "kwargs": {"b": 0, "c": 0.014}}}},
),
],
),
],
)
def test_hyperopt_helpers(parametrization, values):
for val, dict_val, hyperopt_val in values:
parametrization.value = val
assert _hp_parametrization_to_dict(parametrization) == dict_val
assert _hp_dict_to_parametrization(hyperopt_val) == parametrization.value
| 1.914063 | 2 |
code/GA.py | xijunlee/SPC-POSM | 0 | 12767071 | import math
import random
from random import randint
import copy
from sklearn.metrics import mean_squared_error
import numpy as np
from pyspark import SparkContext, SparkConf
import time
import pandas as pd
import sys
class Chromosome:
def __init__(self):
self.geneSerial = []
self.v = []
self.fitness = 0
self.sigmaCost = 0
self.sigmaDemand = 0
self.sigmaCapacity = 0
self.mmd = 0
self.pbest = None
self.cluster = None
class Customer:
def __init__(self):
self.x = 0
self.y = 0
self.demand = 0
class Provider:
def __init__(self):
self.x = 0
self.y = 0
self.capacity = 0
self.cost = 0
class ProviderPlus:
def __init__(self):
self.x = 0
self.y = 0
self.cnt = 0
self.capacity = []
self.cost = []
class PO:
def __init__(self):
self.PROVIDERS = []
self.CUSTOMERS = []
class Match:
def __init__(self):
self.o = 0
self.p = 0
self.w = 0
self.dis = 0
class Queue:
def __init__(self):
self.num = 0
self.parent = 0
class SwapChainSolver:
def __init__(self, providers, customers):
self.P = providers
self.O = customers
self.Assignment = []
def Solver(self):
self.initiallize_assignment()
while True:
extremeMatch = copy.deepcopy(self.find_d_satisfiable())
if not extremeMatch:
break
else:
self.swap(extremeMatch)
self.Assignment = sorted(self.Assignment, key=self.returnDis)
return self.Assignment[len(self.Assignment) - 1].dis
def swap(self, m):
self.sub_match(m)
chain = []
while True:
chain = self.find_chain(m)
if not chain:
break
else:
# chain breaking
ws = float('inf')
ws = min(ws, self.P[chain[0] - len(self.O)].capacity)
ws = min(ws, self.O[chain[len(chain) - 1]].demand)
for i in range(1, len(chain) - 1, 2):
# if i%2 == 1:
tmpo = chain[i]
tmpp = chain[i + 1] - len(self.O)
for tmp in self.Assignment:
if tmp.o == tmpo and tmp.p == tmpp:
ws = min(ws, tmp.w)
break
for i in range(1, len(chain) - 1, 2):
# if i%2 == 1:
tmpo = chain[i]
tmpp = chain[i + 1] - len(self.O)
for tmp in self.Assignment:
if tmp.o == tmpo and tmp.p == tmpp:
tmpm = copy.deepcopy(tmp)
self.sub_match(tmp)
if tmpm.w != ws:
tmpm.w = tmpm.w - ws
self.add_match(tmpm)
break
# chain matching
for i in range(0, len(chain), 2):
tmpo = chain[i + 1]
tmpp = chain[i] - len(self.O)
tmpm = Match()
tmpm.o = tmpo
tmpm.p = tmpp
tmpm.w = ws
tmpm.dis = math.sqrt(
(self.O[tmpo].x - self.P[tmpp].x) ** 2 + (self.O[tmpo].y - self.P[tmpp].y) ** 2)
self.add_match(tmpm)
if self.O[m.o].demand == 0:
break
# post matching
if self.O[m.o].demand > 0:
tmpm = Match()
tmpm.o = m.o
tmpm.p = m.p
tmpm.w = self.O[m.o].demand
tmpm.dis = math.sqrt((self.O[m.o].x - self.P[m.p].x) ** 2 + (self.O[m.o].y - self.P[m.p].y) ** 2)
self.add_match(tmpm)
def find_chain(self, m):
chain = []
flag = False
maxDis = m.dis
Q = []
hash = []
for i in range(0, 2 * (len(self.O) + len(self.P))):
Q.append(Queue())
hash.append(0)
head = 0
tail = 0
hash[m.o] = 1
Q[head].num = m.o
Q[head].parent = -1
tail = tail + 1
while not flag and head != tail:
CurrentNode = Q[head].num
if CurrentNode < len(self.O):
for i in range(0, len(self.P)):
tmpDis = math.sqrt(
(self.O[CurrentNode].x - self.P[i].x) ** 2 + (self.O[CurrentNode].y - self.P[i].y) ** 2)
if tmpDis < maxDis and hash[i + len(self.O)] == 0:
Q[tail].num = i + len(self.O)
Q[tail].parent = head
hash[i + len(self.O)] = 1
tail = (tail + 1) % len(Q)
else:
pNode = CurrentNode - len(self.O)
if self.P[pNode].capacity == 0:
for tmp in self.Assignment:
if tmp.p == pNode and hash[tmp.o] == 0:
hash[tmp.o] = 1
Q[tail].num = tmp.o
Q[tail].parent = head
tail = (tail + 1) % len(Q)
else:
flag = True
tmp = head
while tmp >= 0:
chain.append(Q[tmp].num)
tmp = Q[tmp].parent
head = (head + 1) % len(Q)
if flag:
return chain
else:
return flag
def find_d_satisfiable(self):
hash = []
myQueue = []
haveFound = False
for i in range(0, len(self.O) + len(self.P)):
hash.append(0)
for i in range(0, 2 * (len(self.O) + len(self.P))):
myQueue.append(Queue())
self.Assignment = sorted(self.Assignment, key=self.returnDis)
maxDis = self.Assignment[len(self.Assignment) - 1].dis
k = len(self.Assignment) - 1
extremeMatch = False
while not haveFound and self.Assignment[k].dis == maxDis and k >= 0:
for tmp in hash:
tmp = 0
for tmp in myQueue:
tmp.num = 0
tmp.parent = 0
head = 0
tail = 0
hash[self.Assignment[k].o] = 1
myQueue[head].num = self.Assignment[k].o
myQueue[head].parent = -1
tail += 1
extremeMatch = copy.deepcopy(self.Assignment[k])
self.sub_match(extremeMatch)
while head != tail and not haveFound:
CurrentNode = myQueue[head].num
if CurrentNode < len(self.O):
for i in range(0, len(self.P)):
tmpDis = math.sqrt(
(self.O[CurrentNode].x - self.P[i].x) ** 2 + (self.O[CurrentNode].y - self.P[i].y) ** 2)
if tmpDis < maxDis and hash[i + len(self.O)] == 0:
myQueue[tail].num = i + len(self.O)
myQueue[tail].parent = head
hash[i + len(self.O)] = 1
tail = (tail + 1) % len(myQueue)
else:
pNode = CurrentNode - len(self.O)
if self.P[pNode].capacity == 0:
for tmp in self.Assignment:
if tmp.p == pNode and hash[tmp.o] == 0:
hash[tmp.o] = 1
myQueue[tail].num = tmp.o
myQueue[tail].parent = head
tail = (tail + 1) % len(myQueue)
else:
haveFound = True
head = (head + 1) % len(myQueue)
self.add_match(extremeMatch)
k = k - 1
if haveFound:
return extremeMatch
else:
return False
def distance(self, s):
return s['distance']
def returnDis(self, s):
return s.dis
def add_match(self, m):
flag = False
for tmp in self.Assignment:
if (m.o == tmp.o and m.p == tmp.p):
tmp.w += m.w
flag = True
break
if flag == False:
self.Assignment.append(copy.deepcopy(m))
self.P[m.p].capacity -= m.w
self.O[m.o].demand -= m.w
def sub_match(self, m):
self.P[m.p].capacity += m.w
self.O[m.o].demand += m.w
for tmp in self.Assignment:
if m.o == tmp.o and m.p == tmp.p:
tmp.w -= m.w
if tmp.w == 0:
self.Assignment.remove(tmp)
break
def initiallize_assignment(self):
distanceList = []
for i in range(0, len(self.O)):
distanceList = []
for j in range(0, len(self.P)):
dis = math.sqrt((self.O[i].x - self.P[j].x) ** 2 + (self.O[i].y - self.P[j].y) ** 2)
tmp = {'p': j, 'distance': dis}
distanceList.append(tmp)
distanceList = sorted(distanceList, key=self.distance)
for j in range(0, len(self.P)):
tmp = min(self.O[i].demand, self.P[distanceList[j]['p']].capacity)
if (tmp > 0):
m = Match()
m.o = i
m.p = distanceList[j]['p']
m.w = tmp
m.dis = distanceList[j]['distance']
self.add_match(m)
if self.O[i].demand == 0:
break
self.Assignment = sorted(self.Assignment, key=self.returnDis)
# print for debug
'''for i in range(0,len(self.Assignment)):
print(self.Assignment[i].o, self.Assignment[i].p, self.Assignment[i].w, self.Assignment[i].dis)
'''
class Surrogate:
def __init__(self, dataPool):
self.m_X = dataPool['X']
self.m_Y = dataPool['Y']
# self.m_SampleSize = sampleSize
# self.m_Data = data
self.m_Params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
self.m_Regressor = ensemble.GradientBoostingRegressor()
'''
def calcMMD(self, geneSerial, data):
customers = []
for item in data.CUSTOMERS:
tmp = Customer()
tmp.x = copy.deepcopy(item.x)
tmp.y = copy.deepcopy(item.y)
tmp.demand = copy.deepcopy(item.demand)
customers.append(tmp)
providers = []
sigmaCost = 0
sigmaCapacity = 0
sigmaDemand = 0
mmd = -1000.00
for i in range(0, len(geneSerial)):
tmpProvider = Provider()
tmpProvider.x = copy.deepcopy(data.PROVIDERS[i].x)
tmpProvider.y = copy.deepcopy(data.PROVIDERS[i].y)
tmpProvider.capacity = copy.deepcopy(data.PROVIDERS[i].capacity[geneSerial[i]])
tmpProvider.cost = copy.deepcopy(data.PROVIDERS[i].cost[geneSerial[i]])
sigmaCost = sigmaCost + tmpProvider.cost
sigmaCapacity = sigmaCapacity + tmpProvider.capacity
providers.append(tmpProvider)
for item in customers:
sigmaDemand = sigmaDemand + item.demand
if sigmaCapacity >= sigmaDemand:
swapchainsolver = SwapChainSolver(providers, customers)
mmd = swapchainsolver.Solver()
return mmd
def genenrateData(self):
print "generating surrogate model data ..."
for _ in range(self.m_SampleSize):
x = []
for j in range(len(self.m_Data.PROVIDERS)):
x.append(randint(0, self.m_Data.PROVIDERS[j].cnt - 1))
# for test,will be deleted in real environment
y = self.calcMMD(x, self.m_Data)
self.m_X.append(x)
self.m_Y.append(y)
'''
def trainModel(self):
# self.genenrateData()
X = np.array(self.m_X)
Y = np.array(self.m_Y)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset, ], Y[:offset]
X_test, y_test = X[offset:, ], Y[offset:]
self.m_Regressor.fit(X_train, y_train)
mse = mean_squared_error(y_test, self.m_Regressor.predict(X_test))
#rmse = math.pow(mse, 0.5)
print("MSE: %.4f" % mse)
def predict(self, x):
return self.m_Regressor.predict(x)
def LoadDataFromText(txtpath):
"""
load data from text,return PROVIDERS,CUSTOMERS
"""
fp = open(txtpath, "r")
arr = []
for line in fp.readlines():
arr.append(line.replace("\n", "").split(" "))
fp.close()
NumberOfProviders = int(arr[0][0])
PROVIDERS = []
for i in range(1, NumberOfProviders + 1):
tmp = arr[i]
tmpProvider = ProviderPlus()
tmpProvider.x = float(tmp[0])
tmpProvider.y = float(tmp[1])
tmpProvider.cnt = int(tmp[2])
for j in range(0, tmpProvider.cnt):
tmpProvider.capacity.append(float(tmp[j + 3]))
tmpProvider.cost.append(float(tmp[j + 3 + tmpProvider.cnt]))
PROVIDERS.append(tmpProvider)
NumberOfCustomers = int(arr[NumberOfProviders + 1][0])
CUSTOMERS = []
for i in range(0, NumberOfCustomers):
tmp = arr[i + NumberOfProviders + 2]
tmpCustomer = Customer()
tmpCustomer.x = float(tmp[0])
tmpCustomer.y = float(tmp[1])
tmpCustomer.demand = float(tmp[2])
CUSTOMERS.append(tmpCustomer)
return PROVIDERS, CUSTOMERS
class GA:
def __init__(self, maxIter, maxBlock, populationSize, probMutate, probCross, probSelect, D, po, alpha, beta, surrogateFlag):
self.m_MaxIter = maxIter
self.m_MaxBlock = maxBlock
self.m_PopulationSize = populationSize
self.m_Population = []
self.m_SurrogateFlag = surrogateFlag
self.m_Runtime = 0
self.m_ProbMutate = probMutate
self.m_ProbCross = probCross
self.m_ProbSelect = probSelect
self.m_PO = po
self.m_D = D
self.m_Alpha = alpha
self.m_Beta = beta
self.m_Block = 0
self.m_BestSolution = None
self.m_BestFitness = -1000
self.m_Iter = 0
self.m_TabuList = []
self.m_CandidateList = []
self.m_TabuMaxLength = tabuMaxLength
self.m_TabuMaxIter = tabuMaxIter
self.m_MaxNumCandidate = maxNumCandidate
self.m_CurrentSolution = None
self.m_BestCostPerGen = []
self.m_ConverGen = 0 # mark the generation when algorithm converges
def select(self):
nextPopulation = []
pi = []
fitnessSum = 0
self.m_Population = sorted(self.m_Population, key=lambda x:x.fitness)
nextPopulation.append(copy.deepcopy(self.m_Population[-1]))
for ind in self.m_Population:
fitnessSum = fitnessSum + ind.fitness
pi.append(self.m_Population[0].fitness / fitnessSum)
for ri in range(1, len(self.m_Population)):
pi.append(self.m_Population[ri].fitness / fitnessSum + pi[ri - 1])
copyNum = len(self.m_Population) - 1
for ri in range(1, len(self.m_Population)):
randnum = random.random()
for j in range(len(pi)):
if randnum <= pi[j]:
copyNum = j
break
nextPopulation.append(copy.deepcopy(self.m_Population[copyNum]))
self.m_Population = nextPopulation
def crossover(self):
# chromosomes cross
hash = []
for ci in range(len(self.m_Population)):
hash.append(0)
hash[0] = 1
for ci in range(1, len(self.m_Population) / 2):
hash[ci] = 1
j = 0
while hash[j] == 1:
j = len(self.m_Population) / 2 + randint(0, len(self.m_Population) / 2 - 1)
hash[j] = 1
if random.random() > self.m_ProbCross:
# cross gene between pointA and pointB
pointA = randint(0, len(self.m_Population[0].geneSerial) - 1)
pointB = randint(0, len(self.m_Population[0].geneSerial) - 1)
if pointA >= pointB:
tmp = pointA
pointA = pointB
pointB = tmp
if ci != 0 and j != 0:
for k in range(pointA, pointB + 1):
tmp = self.m_Population[ci].geneSerial[k]
self.m_Population[ci].geneSerial[k] = self.m_Population[j].geneSerial[k]
self.m_Population[j].geneSerial[k] = tmp
def mutate(self):
# chromosomes mutation
for k in range(0, int(len(self.m_Population) * len(self.m_Population[0].geneSerial) * self.m_ProbMutate) + 1):
mi = randint(0, len(self.m_Population) - 1)
ik = randint(0, len(self.m_Population[0].geneSerial) - 1)
vk = randint(0, self.m_PO.PROVIDERS[ik].cnt - 1)
self.m_Population[mi].geneSerial[ik] = vk
def calcPopulationFitness(self, sc):
'''
for chromosome in self.m_Population:
if self.m_SurrogateFlag:
chromosome.fitness, chromosome.mmd, chromosome.sigmaCapacity, chromosome.sigmaCost, chromosome.sigmaDemand = self.calcFitnessWithSurrogate(
chromosome.geneSerial, self.m_PO, self.m_D)
else:
chromosome.fitness, chromosome.mmd, chromosome.sigmaCapacity, chromosome.sigmaCost, chromosome.sigmaDemand = self.calcFitness(
chromosome.geneSerial, self.m_PO, self.m_D)
'''
raw_data = []
for chromosome in self.m_Population:
raw_data.append(chromosome.geneSerial)
self.m_Population = []
distPop = sc.parallelize(raw_data)
fitnessCalc = distPop.map(lambda geneSerial: self.calcFitnessParallel(geneSerial, copy.copy(self.m_PO), copy.copy(self.m_D)))
chromosomeCollect = fitnessCalc.collect()
for (geneSerial, fitness, mmd, sigmaCapacity, sigmaCost, sigmaDemand) in chromosomeCollect:
chromosome = Chromosome()
chromosome.geneSerial = geneSerial
chromosome.fitness = fitness
chromosome.mmd = mmd
chromosome.sigmaCapacity = sigmaCapacity
chromosome.sigmaCost = sigmaCost
chromosome.sigmaDemand = sigmaDemand
self.m_Population.append(chromosome)
def LocalSearch(self, sc):
# local search using tabu search
self.m_Iter, self.m_Block = 0, 0
self.m_CurrentSolution = self.m_BestSolution
while self.m_Iter < self.m_TabuMaxIter and self.m_Block < self.m_MaxBlock:
self.m_CandidateList = []
raw_data = []
for _ in range(self.m_MaxNumCandidate):
flag = randint(0, 1)
geneSerial = self.m_CurrentSolution.geneSerial
if flag == 0:
pointA = randint(0, len(self.m_CurrentSolution.geneSerial) - 1)
pointB = randint(0, len(self.m_CurrentSolution.geneSerial) - 1)
tmp = geneSerial[pointA]
geneSerial[pointA] = geneSerial[pointB]
geneSerial[pointB] = tmp
else:
pointA = -1
pointB = randint(0, len(self.m_CurrentSolution.geneSerial) - 1)
geneSerial[pointB] = (geneSerial[pointB] + 1) % self.m_PO.PROVIDERS[
pointB].cnt
if (flag, pointA, pointB) not in set(self.m_TabuList):
raw_data.append(geneSerial)
# parallelly compute the fitness for each individual
distPop = sc.parallelize(raw_data)
fitnessCalc = distPop.map(lambda geneSerial: self.calcFitnessParallel(geneSerial, copy.copy(self.m_PO), copy.copy(self.m_D)))
chromosomeCollect = fitnessCalc.collect()
for (geneSerial, fitness, mmd, sigmaCapacity, sigmaCost, sigmaDemand) in chromosomeCollect:
chromosome = Chromosome()
chromosome.geneSerial = geneSerial
chromosome.fitness = fitness
chromosome.mmd = mmd
chromosome.sigmaCapacity = sigmaCapacity
chromosome.sigmaCost = sigmaCost
chromosome.sigmaDemand = sigmaDemand
self.m_CandidateList.append((chromosome, chromosome.fitness, (flag, pointA, pointB)))
nextBestChromosome, nextBestFitness, tabu = sorted(self.m_CandidateList, key=lambda x: x[1], reverse=True)[
0]
if self.m_BestSolution.fitness <= nextBestFitness:
self.m_BestSolution = copy.deepcopy(nextBestChromosome)
self.m_Block = 0
elif math.fabs(self.m_BestSolution.fitness - nextBestFitness) <= 0.001:
self.m_Block += 1
if len(self.m_TabuList) >= self.m_TabuMaxLength:
self.m_TabuList.pop(0)
self.m_TabuList.append(tabu)
self.m_CurrentSolution = nextBestChromosome
self.m_Iter += 1
def GASearch(self, sc):
self.m_Iter, self.m_Block = 0, 0
for _ in range(self.m_PopulationSize):
self.m_Population.append(self.generateRandomChromosome())
self.calcPopulationFitness(sc)
tmp = sorted(self.m_Population, key=lambda x:x.fitness, reverse=True)
self.m_BestSolution = tmp[0]
self.m_BestFitness = self.m_BestSolution.fitness
# startTime = time.time()
while self.m_Iter < self.m_MaxIter and self.m_Block < self.m_MaxBlock:
#print "the " + str(iter) + " th iteration"
self.select()
self.crossover()
self.mutate()
self.calcPopulationFitness(sc)
sortedPopulation = sorted(self.m_Population, key=lambda x: x.fitness, reverse=True)
if sortedPopulation[0].fitness > self.m_BestFitness:
self.m_BestFitness = sortedPopulation[0].fitness
self.m_BestSolution = copy.deepcopy(sortedPopulation[0])
self.m_Block = 0
elif math.fabs(sortedPopulation[0].fitness - self.m_BestFitness) <= 0.001:
self.m_Block += 1
self.m_BestCostPerGen.append(self.m_BestSolution.sigmaCost)
#print "the best individual serial, fitness, mmd, sigmaCost, sigmaCapacity, sigmaDemand ",\
# sortedPopulation[0].geneSerial, sortedPopulation[0].fitness,sortedPopulation[0].mmd, sortedPopulation[0].sigmaCost, sortedPopulation[0].sigmaCapacity, sortedPopulation[0].sigmaDemand
#print sortedPopulation[0].sigmaCost
self.m_Iter += 1
#endTime = time.time()
#self.m_Runtime = endTime - startTime
self.m_ConverGen = self.m_Iter
def Search(self, sc):
startTime = time.time()
self.GASearch(sc)
#self.LocalSearch(sc)
endTime = time.time()
self.m_Runtime = endTime - startTime
def generateRandomChromosome(self):
chromosome = Chromosome()
for i in range(len(self.m_PO.PROVIDERS)):
chromosome.geneSerial.append(randint(0, self.m_PO.PROVIDERS[i].cnt - 1))
#if self.m_SurrogateFlag:
# chromosome.fitness, chromosome.mmd, chromosome.sigmaCapacity, chromosome.sigmaCost, chromosome.sigmaDemand = self.calcFitnessWithSurrogate(
# chromosome.geneSerial, self.m_PO, self.m_D)
#else:
# chromosome.fitness, chromosome.mmd, chromosome.sigmaCapacity, chromosome.sigmaCost, chromosome.sigmaDemand = self.calcFitness(
# chromosome.geneSerial, self.m_PO, self.m_D)
return chromosome
def calcFitnessParallel(self, geneSerial, data, D):
# alpha and beta are weight factor
alpha = self.m_Alpha
beta = self.m_Beta
customers = []
fitness = 0
for item in data.CUSTOMERS:
tmp = Customer()
tmp.x = copy.deepcopy(item.x)
tmp.y = copy.deepcopy(item.y)
tmp.demand = copy.deepcopy(item.demand)
customers.append(tmp)
providers = []
sigmaCost = 0
sigmaCapacity = 0
sigmaDemand = 0
mmd = self.m_D * 1000.0
for i in range(0, len(geneSerial)):
tmpProvider = Provider()
tmpProvider.x = copy.deepcopy(data.PROVIDERS[i].x)
tmpProvider.y = copy.deepcopy(data.PROVIDERS[i].y)
tmpProvider.capacity = copy.deepcopy(data.PROVIDERS[i].capacity[geneSerial[i]])
tmpProvider.cost = copy.deepcopy(data.PROVIDERS[i].cost[geneSerial[i]])
sigmaCost = sigmaCost + tmpProvider.cost
sigmaCapacity = sigmaCapacity + tmpProvider.capacity
providers.append(tmpProvider)
for item in customers:
sigmaDemand = sigmaDemand + item.demand
if sigmaCapacity >= sigmaDemand:
swapchainsolver = SwapChainSolver(providers, customers)
mmd = swapchainsolver.Solver()
if mmd > D:
fitness = -10.0
else:
if sigmaCost != 0:
fitness = float(20.0 / sigmaCost)
else:
fitness = 20.0
else:
fitness = -20.0
# print("fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand:",fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand)
# return math.exp(fitness), mmd, sigmaCapacity, sigmaCost, sigmaDemand
return (geneSerial, math.exp(fitness), mmd, sigmaCapacity, sigmaCost, sigmaDemand)
def calcFitness(self, geneSerial, data, D):
"""
usage ChromosomeNumber,geneSerial,data,D
return fitness for this1 Chromosome
"""
alpha = self.m_Alpha
beta = self.m_Beta
# alpha and beta are weight factor
customers = []
fitness = 0
for item in data.CUSTOMERS:
tmp = Customer()
tmp.x = copy.deepcopy(item.x)
tmp.y = copy.deepcopy(item.y)
tmp.demand = copy.deepcopy(item.demand)
customers.append(tmp)
providers = []
sigmaCost = 0
sigmaCapacity = 0
sigmaDemand = 0
mmd = self.m_D * 1000.0
for i in range(0, len(geneSerial)):
tmpProvider = Provider()
tmpProvider.x = copy.deepcopy(data.PROVIDERS[i].x)
tmpProvider.y = copy.deepcopy(data.PROVIDERS[i].y)
tmpProvider.capacity = copy.deepcopy(data.PROVIDERS[i].capacity[geneSerial[i]])
tmpProvider.cost = copy.deepcopy(data.PROVIDERS[i].cost[geneSerial[i]])
sigmaCost = sigmaCost + tmpProvider.cost
sigmaCapacity = sigmaCapacity + tmpProvider.capacity
providers.append(tmpProvider)
for item in customers:
sigmaDemand = sigmaDemand + item.demand
if sigmaCapacity >= sigmaDemand:
swapchainsolver = SwapChainSolver(providers, customers)
mmd = swapchainsolver.Solver()
if mmd > D:
fitness = -4.0
else:
if sigmaCost != 0:
fitness = float(4.0 / sigmaCost)
else:
fitness = 8.0
else:
fitness = -8.0
# print("fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand:",fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand)
return math.exp(fitness), mmd, sigmaCapacity, sigmaCost, sigmaDemand
def calcFitnessWithSurrogate(self, geneSerial, data, D):
"""
usage ChromosomeNumber,geneSerial,data,D
return fitness for this1 Chromosome
"""
alpha = self.m_Alpha
beta = self.m_Beta
# alpha and beta are weight factor
customers = []
fitness = 0
for item in data.CUSTOMERS:
tmp = Customer()
tmp.x = copy.deepcopy(item.x)
tmp.y = copy.deepcopy(item.y)
tmp.demand = copy.deepcopy(item.demand)
customers.append(tmp)
providers = []
sigmaCost = 0
sigmaCapacity = 0
sigmaDemand = 0
mmd = self.m_D * 1000.0
for i in range(0, len(geneSerial)):
tmpProvider = Provider()
tmpProvider.x = copy.deepcopy(data.PROVIDERS[i].x)
tmpProvider.y = copy.deepcopy(data.PROVIDERS[i].y)
tmpProvider.capacity = copy.deepcopy(data.PROVIDERS[i].capacity[geneSerial[i]])
tmpProvider.cost = copy.deepcopy(data.PROVIDERS[i].cost[geneSerial[i]])
sigmaCost = sigmaCost + tmpProvider.cost
sigmaCapacity = sigmaCapacity + tmpProvider.capacity
providers.append(tmpProvider)
for item in customers:
sigmaDemand = sigmaDemand + item.demand
if sigmaCapacity >= sigmaDemand:
x = np.array(geneSerial).reshape(1, -1)
mmd = self.m_Surrogate.predict(x)[0]
if mmd > D:
fitness = -1000
elif mmd > 0:
if sigmaCost != 0:
fitness = float(4.0 / sigmaCost)
else:
fitness = 8.0
else:
fitness = -6.0
else:
fitness = -8.0
# print"fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand:",fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand
return math.exp(fitness), mmd, sigmaCapacity, sigmaCost, sigmaDemand
if __name__ == "__main__":
popSize = 100
iterMax = 100
blockMax = 101
probMutate = 0.0001
probCross = 0.7
probSelect = 0.1
D = 40.0
alpha = 10000000.00
beta = 0.01
surrogateFlag = False
tabuMaxLength = 10
tabuMaxIter = 100
maxNumCandidate = 10
core_num = int(sys.argv[1])
conf = SparkConf().setMaster("spark://noah007:7077") \
.setAppName("SPC-POSM-GA") \
.set("spark.submit.deployMode", "client") \
.set("spark.cores.max", core_num) \
.set("spark.executor.cores", "10") \
.set("spark.executor.memory", "20g") \
.set("spark.driver.memory", "40g")
sc = SparkContext(conf=conf)
'''
experiment on different datasets
'''
'''
#instanceSet = ['nuoxi2G'] #, 'nuoxi3G', 'huawei2G', 'huawei3G']
instanceSet = [i for i in range(60)]
aveAns, aveRuntime, aveConverGen = [], [], []
for i in instanceSet:
print i, 'th instance ...'
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
sumAns, sumRuntime, sumConverGen = 0, 0, 0
times = 5
for _ in range(times):
ga = GA(iterMax, blockMax, popSize, probMutate, probCross, probSelect, D, po, alpha, beta,
surrogateFlag)
ga.Search(sc)
sumAns += ga.m_BestSolution.sigmaCost
sumRuntime += ga.m_Runtime
sumConverGen = ga.m_ConverGen
aveAns.append(sumAns / (times*1.0))
aveRuntime.append(sumRuntime / (times*1.0))
aveConverGen.append(sumConverGen / (times*1.0))
df = pd.DataFrame({'cost': aveAns, 'GA runtime': aveRuntime, 'ConverGen':aveConverGen})
df.to_csv('../midResult/gaResult.csv')
'''
'''
experiment of convergence
'''
'''
instList = [4, 25, 47]
costPerGenList = []
for i in instList:
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
ga = GA(iterMax, blockMax, popSize, probMutate, probCross, probSelect, D, po, alpha, beta,
surrogateFlag)
ga.Search(sc)
costPerGenList.append(ga.m_BestCostPerGen)
df = pd.DataFrame({'small': costPerGenList[0], 'medium': costPerGenList[1], 'large': costPerGenList[2]})
df.to_csv('../midResult/gaResultBestCostPerGen.csv')
'''
'''
experiment of convergence
'''
instNum = 20
instList = [i for i in range(instNum)]
costPerGenList = []
for i in instList:
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
ga = GA(iterMax, blockMax, popSize, probMutate, probCross, probSelect, D, po, alpha, beta,
surrogateFlag)
ga.Search(sc)
costPerGenList.append(ga.m_BestCostPerGen)
costPerGenNpArr = np.array(costPerGenList)
# print costPerGenList
# print costPerGenNpArr
# print type(costPerGenNpArr)
costPerGenNpArr = np.sum(costPerGenNpArr, axis=0)
print costPerGenNpArr
# costPerGenNpArr = costPerGenNpArr / float(instNum)
df = pd.DataFrame({'aveCost': costPerGenNpArr})
df.to_csv('../midResult/gaResultBestCostPerGen1.csv')
instNum = 40
instList = [i for i in range(20,instNum)]
costPerGenList = []
for i in instList:
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
ga = GA(iterMax, blockMax, popSize, probMutate, probCross, probSelect, D, po, alpha, beta,
surrogateFlag)
ga.Search(sc)
costPerGenList.append(ga.m_BestCostPerGen)
costPerGenNpArr = np.array(costPerGenList)
# print costPerGenList
# print costPerGenNpArr
# print type(costPerGenNpArr)
costPerGenNpArr = np.sum(costPerGenNpArr, axis=0)
print costPerGenNpArr
# costPerGenNpArr = costPerGenNpArr / float(instNum)
df = pd.DataFrame({'aveCost': costPerGenNpArr})
df.to_csv('../midResult/gaResultBestCostPerGen2.csv')
instNum = 60
instList = [i for i in range(40,instNum)]
costPerGenList = []
for i in instList:
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
ga = GA(iterMax, blockMax, popSize, probMutate, probCross, probSelect, D, po, alpha, beta,
surrogateFlag)
ga.Search(sc)
costPerGenList.append(ga.m_BestCostPerGen)
costPerGenNpArr = np.array(costPerGenList)
# print costPerGenList
# print costPerGenNpArr
# print type(costPerGenNpArr)
costPerGenNpArr = np.sum(costPerGenNpArr, axis=0)
print costPerGenNpArr
# costPerGenNpArr = costPerGenNpArr / float(instNum)
df = pd.DataFrame({'aveCost': costPerGenNpArr})
df.to_csv('../midResult/gaResultBestCostPerGen3.csv') | 2.46875 | 2 |
elements-of-programming-interviews/10.0-balanced-binary-tree.py | vtemian/interviews-prep | 8 | 12767072 | <filename>elements-of-programming-interviews/10.0-balanced-binary-tree.py<gh_stars>1-10
from typing import List, Tuple, Optional
class Node:
def __init__(self, val: int, left: 'Node' = None, right: 'Node' = None):
self.val = val
self.left = left
self.right = right
def is_balanced(tree: Node) -> bool:
def balanced(node: Optional[Node], current_depth: int = 0) -> Tuple[int, bool]:
if not node:
return current_depth, True
left, is_balanced = balanced(node.left, current_depth + 1)
if not is_balanced:
return left + current_depth, False
right, is_balanced = balanced(node.right, current_depth + 1)
if not is_balanced:
return right + current_depth, False
return max(left, right) + 1, abs(left - right) < 2
return balanced(tree)[1]
tree = Node(
1,
Node(
2,
Node(3),
Node(4)
),
Node(
5,
Node(6),
Node(7)
)
)
result = is_balanced(tree)
assert result, result
tree = Node(
1,
Node(
2,
Node(3),
Node(4,
Node(9),
Node(8,
Node(10),
Node(11)))
),
Node(
5,
Node(6),
Node(7))
)
result = is_balanced(tree)
assert not result, result
| 3.75 | 4 |
code/consolefitness.py | ksu-is/workoutmanager | 1 | 12767073 | <filename>code/consolefitness.py
'''
grade tracking program - think through the goal up front- what is the task and design?
needs to enable several basic functions for teachers
needs to have login to protect the student data
'''
#import libraries first
import statistics as s
#add constants next
admins = {'jasmine':'<PASSWORD>','david':'<PASSWORD>'}
students = {'Alex':[87,88,98],
'Sally':[88,67,93],
'Nboke':[90,88,78]}
food = {'chicken':[90, 0, 60, 100, 0, 22]}
#now define functions
def viewnutrition():
print("hi")
for foods in food:
calories = foods[food,0]
print(str(calories))
def enterGrades():
nameToEnter = input('Student name: ')
gradeToEnter = input('Grade: ')
if nameToEnter in students:
print('Adding grade for'+nameToEnter)
students[nameToEnter].append(float(gradeToEnter)) #float will have a .0
print(str(nameToEnter)+' now has these grades:')
print(students[nameToEnter])
else:
print('Student not found. Please check your spelling or go back and add if new.')
def removeStudent():
nameToRemove = input('Who do you want to remove? ')
if nameToRemove in students:
print('Removing '+nameToRemove)
del students[nameToRemove]
print(students)
else:
print('Student not found.')
def averageStudents():
for student in students:
grades = students[student]
average = s.mean(grades)
print(student,' average ',average)
def main():
print("User: " + login)
print("""
Welcome to the Grade Tracker
[1] - Enter Grades
[2] - Remove Student
[3] - Student Averages
[4] - Exit
[5] - View Nutrition Info
""")
action = input('What would you like to do? (Enter a number) ')
if action == '1':
#print('1 selected')
enterGrades()
elif action == '2':
#print('2 selected')
removeStudent()
elif action == '3':
#print('3 selected')
averageStudents()
elif action == '4':
#print('4 selected')
exit()
elif action == '5':
viewnutrition()
else:
print('Valid option not selected.') #need to cause it to reprompt
login = input('User: ')
password = input('Password: ')
if login in admins:
if admins[login] == password:
print('Welcome,',login)
#now run the code
while True:
main()
else:
print('Invalid password.')
else:
print('Invalid user.')
| 3.875 | 4 |
opacus/tests/dpdataloader_test.py | techthiyanes/opacus | 0 | 12767074 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from opacus.data_loader import DPDataLoader
from torch.utils.data import TensorDataset
class DPDataLoaderTest(unittest.TestCase):
def setUp(self):
self.data_size = 10
self.dimension = 7
self.num_classes = 11
def test_collate_classes(self):
x = torch.randn(self.data_size, self.dimension)
y = torch.randint(low=0, high=self.num_classes, size=(self.data_size,))
dataset = TensorDataset(x, y)
data_loader = DPDataLoader(dataset, sample_rate=1e-5)
x_b, y_b = next(iter(data_loader))
self.assertEqual(x_b.size(0), 0)
self.assertEqual(y_b.size(0), 0)
def test_collate_tensor(self):
x = torch.randn(self.data_size, self.dimension)
dataset = TensorDataset(x)
data_loader = DPDataLoader(dataset, sample_rate=1e-5)
(s,) = next(iter(data_loader))
self.assertEqual(s.size(0), 0)
| 2.21875 | 2 |
fft_conv_pytorch/__init__.py | alexhagen/fft-conv-pytorch | 0 | 12767075 | from fft_conv_pytorch.fft_conv import FFTConv1d, FFTConv2d, FFTConv3d, fft_conv
| 1.117188 | 1 |
service/model.py | eifuentes/api-imagenet-1k | 0 | 12767076 | import json
import logging
import requests
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as vtransforms
from torchvision.models import squeezenet1_0, squeezenet1_1
RESCALE_SIZE = 256
CROP_SIZE = 224
IMAGENET_CLASS_MAP = 'imagenet_class_index.json'
logger = logging.getLogger('app')
def _fetch_imagenet_class_map():
"""Parse ImageNet Class Index JSON"""
try:
with open(IMAGENET_CLASS_MAP, 'r') as f:
class_map = json.load(f)
logger.info('successfully loaded imagenet class map')
except Exception:
raise(f'unable to retrieve class map from {IMAGENET_CLASS_MAP}')
class_map = {int(i): str(j[1]) for i, j in class_map.items()}
return class_map
def _maybe_optimize(model):
try:
from torch.jit import trace
model = trace(model, example_inputs=torch.rand(1, 3, 224, 224))
logger.info('successfully optimized PyTorch model using JIT tracing')
except ImportError:
logger.warning('unable to leverage torch.jit.trace optimizations')
pass
return model
class ImageNetEvaluator(nn.Module):
"""Evaluator of ImageNet Classes"""
def __init__(self, device, optimize=False):
super().__init__()
self.device = device
self.optimize = optimize
self.transform = vtransforms.Compose([
vtransforms.Resize(RESCALE_SIZE),
vtransforms.CenterCrop((CROP_SIZE, CROP_SIZE)),
vtransforms.ToTensor(),
vtransforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
model = self._fetch_model()
self.model = model.to(self.device).eval()
if self.optimize:
self.model = _maybe_optimize(model)
self.class_map = _fetch_imagenet_class_map()
def _fetch_model(self):
raise NotImplementedError
def forward(self, x):
x = self.transform(x).to(self.device)
num_dims = len(x.size())
if num_dims != 3:
raise ValueError('number dimensions of x must be 3')
with torch.no_grad():
pred_tensor = self.model(x.unsqueeze(0))
pred_logproba = F.log_softmax(pred_tensor, dim=1)
pred_proba, pred_label = torch.max(pred_logproba.detach().exp(), dim=1)
pred_proba, pred_label = pred_proba.item(), pred_label.item()
pred_class = self.class_map[pred_label]
return pred_class, pred_proba
class SqueezeNetV1Evaluator(ImageNetEvaluator):
"""SqueezeNet V1 Evaluator of ImageNet Classes"""
def _fetch_model(self):
model = squeezenet1_0(pretrained=True)
return model
class SqueezeNetV2Evaluator(ImageNetEvaluator):
"""SqueezeNet V2 Evaluator of ImageNet Classes"""
def _fetch_model(self):
model = squeezenet1_1(pretrained=True)
return model
| 2.46875 | 2 |
pubsub/pubsub-pipe-image/pubsub-to-bigquery.py | eyenAFS/TwitterKubeBQ | 0 | 12767077 | <gh_stars>0
#!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script grabs tweets from a PubSub topic, and stores them in BiqQuery
using the BigQuery Streaming API.
"""
import base64
import datetime
import json
import os
import time
import utils
# Get the project ID and pubsub topic from the environment variables set in
# the 'bigquery-controller.yaml' manifest.
PROJECT_ID = os.environ['PROJECT_ID']
PUBSUB_TOPIC = os.environ['PUBSUB_TOPIC']
NUM_RETRIES = 3
def fqrn(resource_type, project, resource):
"""Returns a fully qualified resource name for Cloud Pub/Sub."""
return "projects/{}/{}/{}".format(project, resource_type, resource)
def create_subscription(client, project_name, sub_name):
"""Creates a new subscription to a given topic."""
print "using pubsub topic: %s" % PUBSUB_TOPIC
name = get_full_subscription_name(project_name, sub_name)
body = {'topic': PUBSUB_TOPIC}
subscription = client.projects().subscriptions().create(
name=name, body=body).execute(num_retries=NUM_RETRIES)
print 'Subscription {} was created.'.format(subscription['name'])
def get_full_subscription_name(project, subscription):
"""Returns a fully qualified subscription name."""
return fqrn('subscriptions', project, subscription)
def pull_messages(client, project_name, sub_name):
"""Pulls messages from a given subscription."""
BATCH_SIZE = 50
tweets = []
subscription = get_full_subscription_name(project_name, sub_name)
body = {
'returnImmediately': False,
'maxMessages': BATCH_SIZE
}
try:
resp = client.projects().subscriptions().pull(
subscription=subscription, body=body).execute(
num_retries=NUM_RETRIES)
except Exception as e:
print "Exception: %s" % e
time.sleep(0.5)
return
receivedMessages = resp.get('receivedMessages')
if receivedMessages is not None:
ack_ids = []
for receivedMessage in receivedMessages:
message = receivedMessage.get('message')
if message:
tweets.append(
base64.urlsafe_b64decode(str(message.get('data'))))
ack_ids.append(receivedMessage.get('ackId'))
ack_body = {'ackIds': ack_ids}
client.projects().subscriptions().acknowledge(
subscription=subscription, body=ack_body).execute(
num_retries=NUM_RETRIES)
return tweets
def write_to_bq(pubsub, sub_name, bigquery):
"""Write the data to BigQuery in small chunks."""
tweets = []
CHUNK = 50 # The size of the BigQuery insertion batch.
# If no data on the subscription, the time to sleep in seconds
# before checking again.
WAIT = 2
tweet = None
mtweet = None
count = 0
count_max = 50000
while count < count_max:
while len(tweets) < CHUNK:
twmessages = pull_messages(pubsub, PROJECT_ID, sub_name)
if twmessages:
for res in twmessages:
try:
tweet = json.loads(res)
except Exception, bqe:
print bqe
# First do some massaging of the raw data
mtweet = utils.parse_zipcodes(utils.cleanup(tweet))
# We only want to write tweets to BigQuery; we'll skip
# 'delete' and 'limit' information.
if 'delete' in mtweet:
continue
if 'limit' in mtweet:
continue
tweets.append(mtweet)
else:
# pause before checking again
print 'sleeping...'
time.sleep(WAIT)
response = utils.bq_data_insert(bigquery, PROJECT_ID, os.environ['BQ_DATASET'],
os.environ['BQ_TABLE'], tweets)
tweets = []
count += 1
if count % 25 == 0:
print ("processing count: %s of %s at %s: %s" %
(count, count_max, datetime.datetime.now(), response))
if __name__ == '__main__':
topic_info = PUBSUB_TOPIC.split('/')
topic_name = topic_info[-1]
sub_name = "tweets-%s" % topic_name
print "starting write to BigQuery...."
credentials = utils.get_credentials()
bigquery = utils.create_bigquery_client(credentials)
pubsub = utils.create_pubsub_client(credentials)
try:
# TODO: check if subscription exists first
subscription = create_subscription(pubsub, PROJECT_ID, sub_name)
except Exception, e:
print e
write_to_bq(pubsub, sub_name, bigquery)
print 'exited write loop'
| 2.375 | 2 |
sis_provisioner/tests/dao/test_pws.py | uw-it-aca/bridge-sis-provisioner | 0 | 12767078 | <filename>sis_provisioner/tests/dao/test_pws.py
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from freezegun import freeze_time
from sis_provisioner.dao import DataFailureException
from sis_provisioner.dao.pws import (
get_person, is_prior_netid, get_updated_persons)
from sis_provisioner.tests import fdao_pws_override
@fdao_pws_override
class TestPwsDao(TestCase):
def test_get_person(self):
person = get_person("faculty")
self.assertIsNotNone(person)
self.assertEqual(person.uwnetid, 'faculty')
self.assertEqual(person.uwregid,
"10000000000000000000000000000005")
self.assertEqual(person.email_addresses[0],
"<EMAIL>")
self.assertEqual(len(person.prior_uwnetids), 1)
self.assertEqual(len(person.prior_uwregids), 1)
self.assertEqual(person.prior_uwnetids[0], "tyler")
self.assertEqual(person.prior_uwregids[0],
"10000000000000000000000000000000")
self.assertEqual(person.display_name, "<NAME>")
self.assertEqual(person.preferred_first_name, "<NAME>")
self.assertEqual(person.preferred_surname, "Faculty")
self.assertEqual(person.employee_id, "000000005")
self.assertIsNone(get_person("not_in_pws"))
self.assertIsNone(get_person("0 in valid uw netid"))
person = get_person("faculty")
self.assertTrue(person.is_emp_state_current())
person = get_person("ellen")
self.assertTrue(person.is_emp_state_current())
person = get_person("retiree")
self.assertTrue(person.is_emp_state_current())
person = get_person("leftuw")
self.assertFalse(person.is_emp_state_current())
person = get_person("alumni")
self.assertFalse(person.is_emp_state_current())
def test_is_prior_netid(self):
person = get_person("faculty")
self.assertTrue(is_prior_netid("tyler", person))
@freeze_time("2019-09-01 20:30:00")
def test_get_updated_persons(self):
persons = get_updated_persons(60)
self.assertEqual(len(persons), 2)
self.assertEqual(persons[0].uwnetid, "javerage")
self.assertEqual(persons[1].uwnetid, "faculty")
self.assertRaises(DataFailureException,
get_updated_persons, 30)
| 2.0625 | 2 |
doc_v3/views.py | julics129/clinic_v3 | 0 | 12767079 | <reponame>julics129/clinic_v3
from django.shortcuts import render
from django import forms
from .forms import appointment_form , contact_form
from .models import contact, appointment, department
import json
# Create your views here.
from django.http import HttpResponse
def home(request):
return render(request, 'new.html')
def index(request):
formsuccess=''
form1success=''
try:
if request.method == 'POST':
print('hi')
form=appointment_form(request.POST)
form1=contact_form(request.POST)
if form.is_valid():
formsuccess='form_ok'
print('form valid')
form.save()
else:
formsuccess='form_not_ok'
print("Form Error :\n")
print(form.errors)
if form1.is_valid():
name_post = request.POST['Name']
print(name_post)
allob = contact.objects.all().filter(Name=name_post).count()
print(allob)
if allob == 0:
form1success='form1_ok'
print('form1 valid')
form1.save()
else:
form1success='Repeat_user'
else:
form1success='form1_not_ok'
print("Form1 Error :\n")
print(form1.errors)
else:
print('h1')
form = appointment_form()
form1=contact_form()
return render(request, 'index.html',{'form':appointment_form,'form1':contact_form, 'formsuccess':formsuccess, 'form1success':form1success})
except Exception as e:
print(e)
formsuccess='Error'
return render(request, 'index.html',{'form':appointment_form,'form1':contact_form,'formsuccess':formsuccess, 'form1success':form1success})
def all_contact(request):
allob = contact.objects.all()
for i in allob:
print(i.Name)
return render(request, 'data_retrive_contact.html',{'allob':allob})
def all_appo(request):
AllAppo = appointment.objects.all()
for i in AllAppo:
print(i)
return render(request, 'data_retrive_appo.html', {'AllAppo':AllAppo})
def count_appo(request):
if request.method == 'GET':
date_post = request.GET['app_date']
print(date_post)
date_count=appointment.objects.all().filter(AppointmentDate1=date_post).count()
print('date')
j='test data'
print(date_count)
data = {'d1':date_count,'d2':j}
return HttpResponse(json.dumps(data))
def email_count(request):
print('hello')
if request.method == 'GET':
email_1 = request.GET['cont_email']
print('new')
print(email_1)
email_count1=contact.objects.all().filter(Email=email_1).count()
print(email_count1)
data = {'d1':email_count1, }
print('hehehe')
return HttpResponse(json.dumps(data))
def department_doc(request):
if request.method == 'GET':
dep_post = request.GET['dep_name']
print(dep_post)
print('h8')
dep_data = department.objects.all().filter(department_name=dep_post)
for i in dep_data:
print(i.doctor_name)
doc_name = i.doctor_name
data = {'d1':doc_name,}
print(data)
return HttpResponse(json.dumps(data)) | 2.296875 | 2 |
AIS/tests/test_S4_SR_class.py | juliotux/AIS | 0 | 12767080 | # -*- coding: utf-8 -*-
"""SPARC4 spectral response tests.
This script tests the operation of the SPARC4 spectral response classes.
"""
import os
import numpy as np
import pandas as pd
import pytest
from AIS.SPARC4_Spectral_Response import (
Abstract_SPARC4_Spectral_Response,
Concrete_SPARC4_Spectral_Response_1,
Concrete_SPARC4_Spectral_Response_2,
Concrete_SPARC4_Spectral_Response_3,
Concrete_SPARC4_Spectral_Response_4,
)
wavelength_interval = range(350, 1150, 50)
n = len(wavelength_interval)
specific_flux = np.ones((4, n))
ccd_transmitance_c1 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 1", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c1 = np.asarray([float(value) for value in ccd_transmitance_c1])
ccd_transmitance_c2 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 2", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c2 = np.asarray([float(value) for value in ccd_transmitance_c2])
ccd_transmitance_c3 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 3", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c3 = np.asarray([float(value) for value in ccd_transmitance_c3])
ccd_transmitance_c4 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 4", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c4 = np.asarray([float(value) for value in ccd_transmitance_c4])
# -------------------------------------------------------------------------------------------------------------
@pytest.fixture
def abs_s4_sr():
chc = Abstract_SPARC4_Spectral_Response()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c1_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_1()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c2_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_2()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c3_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_3()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c4_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_4()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
# -------------------- Initialize the class -----------------------
def test_specific_flux_abs(abs_s4_sr):
vec = abs_s4_sr.get_specific_flux()
boolean_test = vec == specific_flux
assert boolean_test.all()
def test_specific_flux_c1(c1_s4_sr):
vec = c1_s4_sr.get_specific_flux()
boolean_test = vec == specific_flux
assert boolean_test.all()
# -------------------- Channel ID -----------------------
def test_channel_ID_abs(abs_s4_sr):
assert abs_s4_sr.get_channel_ID() == 0
def test_channel_ID_c1(c1_s4_sr):
assert c1_s4_sr.get_channel_ID() == 1
def test_channel_ID_c2(c2_s4_sr):
assert c2_s4_sr.get_channel_ID() == 2
def test_channel_ID_c3(c3_s4_sr):
assert c3_s4_sr.get_channel_ID() == 3
def test_channel_ID_c4(c4_s4_sr):
assert c4_s4_sr.get_channel_ID() == 4
# -------------------- Apply spectral response -----------------------
# def test_calibration_wheel(abs_s4_sr):
# abs_s4_sr.apply_calibration_wheel()
# vec = abs_s4_sr.get_specific_flux()
# boolean_test = vec == specific_flux
# assert boolean_test.all()
# def test_retarder(abs_s4_sr):
# abs_s4_sr.apply_retarder()
# vec = abs_s4_sr.get_specific_flux()
# boolean_test = vec == specific_flux
# assert boolean_test.all()
# def test_analyzer(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# vec = abs_s4_sr.get_specific_flux()
# boolean_test = vec == specific_flux
# assert boolean_test.all()
# def test_collimator(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_collimator()
# assert np.allclose(abs_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(abs_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_dichroic_abs(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_dichroic()
# def test_dichroic_c1(c1_s4_sr):
# c1_s4_sr.apply_analyser()
# c1_s4_sr.apply_dichroic()
# def test_dichroic_c2(c2_s4_sr):
# c2_s4_sr.apply_analyser()
# c2_s4_sr.apply_dichroic()
# def test_dichroic_c3(c3_s4_sr):
# c3_s4_sr.apply_analyser()
# c3_s4_sr.apply_dichroic()
# def test_dichroic_c4(c4_s4_sr):
# c4_s4_sr.apply_analyser()
# c4_s4_sr.apply_dichroic()
# def test_camera_abs(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_camera()
# assert np.allclose(abs_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(abs_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c1(c1_s4_sr):
# c1_s4_sr.apply_analyser()
# c1_s4_sr.apply_camera()
# assert np.allclose(c1_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c1_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c2(c2_s4_sr):
# c2_s4_sr.apply_analyser()
# c2_s4_sr.apply_camera()
# assert np.allclose(c2_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c2_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c3(c3_s4_sr):
# c3_s4_sr.apply_analyser()
# c3_s4_sr.apply_camera()
# assert np.allclose(c3_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c3_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c4(c4_s4_sr):
# c4_s4_sr.apply_analyser()
# c4_s4_sr.apply_camera()
# assert np.allclose(c4_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c4_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_ccd_abs(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_ccd()
# assert np.allclose(abs_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(abs_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_ccd_c1(c1_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c1 / 100
# c1_s4_sr.apply_analyser()
# c1_s4_sr.apply_ccd()
# assert np.allclose(c1_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c1_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# def test_ccd_c2(c2_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c2 / 100
# c2_s4_sr.apply_analyser()
# c2_s4_sr.apply_ccd()
# assert np.allclose(c2_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c2_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# def test_ccd_c3(c3_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c3 / 100
# c3_s4_sr.apply_analyser()
# c3_s4_sr.apply_ccd()
# assert np.allclose(c3_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c3_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# def test_ccd_c4(c4_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c4 / 100
# c4_s4_sr.apply_analyser()
# c4_s4_sr.apply_ccd()
# assert np.allclose(c4_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c4_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# --------------------write specific_flux--------------------
def test_write_specific_flux():
specific_flux = np.asanyarray(
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
)
wavelength_interval = range(350, 1150, 50)
s4_sr = Abstract_SPARC4_Spectral_Response()
s4_sr.write_specific_flux(specific_flux, wavelength_interval)
boolean_test = s4_sr.specific_flux == specific_flux
assert boolean_test.all()
# ---------------------- get_specific_flux -----------------------------
def test_get_specific_flux(abs_s4_sr):
vec = abs_s4_sr.get_specific_flux()
boolean_test = vec.all() == specific_flux.all()
assert boolean_test.all()
# ----------------------- read_spreadsheet---------------------------
def test_read_spreadsheet_calibration_wheel(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "calibration_wheel.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_retarder(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "retarder.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_analyser_ordinary(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "analyser_ordinary.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_analyser_extra_ordinary(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "analyser_extra_ordinary.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_collimator(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "collimator.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "dichroic 1.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "dichroic 2.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "camera.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "ccd.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_1_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "dichroic 1.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_1_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "dichroic 2.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "camera.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "ccd.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_2_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 2", "dichroic 1.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_2_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 2", "dichroic 2.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 2", "camera.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_2(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 2/ccd.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_3_1(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/dichroic 2.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_3_2(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/dichroic 2.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_3(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/camera.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_3(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/ccd.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_4_1(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/dichroic 1.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_4_2(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/dichroic 2.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_4(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/camera.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_4(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/ccd.xlsx"
abs_s4_sr._read_spreadsheet(file)
# ----------------------- miscelaneous ----------------------------
def test_multiply_matrices(abs_s4_sr):
a = np.ones((4, 4))
specific_flux = abs_s4_sr._multiply_matrices(a, a)
boolean_test = specific_flux == a
assert boolean_test.all()
def test_calculate_spline():
transmitance = np.ones((1, n))[0]
chc = Abstract_SPARC4_Spectral_Response()
chc.write_specific_flux(specific_flux, wavelength_interval)
new_transmitance = chc._calculate_spline(transmitance, wavelength_interval)
assert np.allclose(new_transmitance, transmitance)
# def test_get_specific_ordinary_ray(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# ord_ray = abs_s4_sr.get_specific_ordinary_ray()
# assert np.allclose(ord_ray, specific_flux[0, :])
# def test_get_specific_extra_ordinary_ray(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# eord_ray = abs_s4_sr.get_specific_extra_ordinary_ray()
# assert np.allclose(eord_ray, specific_flux[0, :])
| 2.40625 | 2 |
prettyqt/statemachine/mouseeventtransition.py | phil65/PrettyQt | 7 | 12767081 | <reponame>phil65/PrettyQt
from __future__ import annotations
from prettyqt import statemachine
from prettyqt.qt import QtStateMachine
QtStateMachine.QMouseEventTransition.__bases__ = (statemachine.EventTransition,)
class MouseEventTransition(QtStateMachine.QMouseEventTransition):
pass
| 1.515625 | 2 |
api/importer/importer/domain/product_line.py | manisharmagarg/qymatix | 0 | 12767082 | <gh_stars>0
from datetime import datetime
class ProductLine():
def __init__(self, name):
super().__init__()
self._name = name #Column(String(255), nullable=False, unique=True)
self._product_class_id = None #Column(ForeignKey('product_class.id'), nullable=False, index=True)
self._description = None #Column(LONGTEXT, nullable=False)
self._active = None #Column(TINYINT(1), nullable=False)
self._created = None #Column(DateTime, nullable=False)
self._number = None #Column(String(255), nullable=False)
self._serial = None #Column(String(255), nullable=False)
self._product_class = None #relationship('ProductLine')
@property
def name(self):
return self._name
@name.setter
def name(self, value: str):
self._name = value
@property
def product_class_id(self):
return self._product_class_id
@product_class_id.setter
def product_class_id(self, value: int):
self._product_class_id = value
@property
def description(self):
return self._description
@description.setter
def description(self, value: str):
self._description = value
@property
def active(self):
return self._active
@active.setter
def active(self, value: bool):
self._active = value
@property
def created(self):
return self._created
@created.setter
def created(self, value):
self._created = value
@property
def number(self):
return self._number
@number.setter
def number(self, value: str):
self._number = value
| 2.375 | 2 |
lst/12-functional.py | tilorenz/vorlesung-psu | 6 | 12767083 | from functools import partial,reduce
from math import sqrt
import inspect
def nargs(function):
print(inspect.getfullargspec(function))
def inc(x):
return x + 1
def compose(f, g):
return lambda x: f(g(x))
x = compose(inc, inc)
print(x(0))
def partial(f, arg0):
return lambda *args: f(arg0, *args)
def add(a,b):
return a+b
inc = partial(add, 1)
print(inc(0))
points = [(-0.3,0.4), (-0.3, -0.2),
(0.6,-0.4), (1, 1)]
def norm(N, point):
coords = map(lambda c: c ** N, point)
return sum(coords) ** (1/N)
max_distance = \
reduce(max,
filter(lambda d: d <= 1.0,
map(partial(norm, 2),
points)))
print(max_distance)
| 3.125 | 3 |
src/build/fuchsia/boot_data_test.py | uszhen/naiveproxy | 3 | 12767084 | #!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import boot_data
import os
import unittest
from boot_data import _SSH_CONFIG_DIR, _SSH_DIR
class TestBootData(unittest.TestCase):
def testProvisionSSHGeneratesFiles(self):
fuchsia_authorized_keys_path = os.path.join(_SSH_DIR,
'fuchsia_authorized_keys')
fuchsia_id_key_path = os.path.join(_SSH_DIR, 'fuchsia_ed25519')
pub_keys_path = os.path.join(_SSH_DIR, 'fuchsia_ed25519.pub')
ssh_config_path = os.path.join(_SSH_CONFIG_DIR, 'ssh_config')
# Check if the keys exists before generating. If they do, delete them
# afterwards before asserting if ProvisionSSH works.
authorized_key_before = os.path.exists(fuchsia_authorized_keys_path)
id_keys_before = os.path.exists(fuchsia_id_key_path)
pub_keys_before = os.path.exists(pub_keys_path)
ssh_config_before = os.path.exists(ssh_config_path)
ssh_dir_before = os.path.exists(_SSH_CONFIG_DIR)
boot_data.ProvisionSSH()
authorized_key_after = os.path.exists(fuchsia_authorized_keys_path)
id_keys_after = os.path.exists(fuchsia_id_key_path)
ssh_config_after = os.path.exists(ssh_config_path)
if not authorized_key_before:
os.remove(fuchsia_authorized_keys_path)
if not id_keys_before:
os.remove(fuchsia_id_key_path)
if not pub_keys_before:
os.remove(pub_keys_path)
if not ssh_config_before:
os.remove(ssh_config_path)
if not ssh_dir_before:
os.rmdir(_SSH_CONFIG_DIR)
self.assertTrue(os.path.exists(authorized_key_after))
self.assertTrue(os.path.exists(id_keys_after))
self.assertTrue(os.path.exists(ssh_config_after))
if __name__ == '__main__':
unittest.main()
| 2.03125 | 2 |
A5T8SQLite.py | qasimy123/assignment5-CMPUT291 | 0 | 12767085 | <reponame>qasimy123/assignment5-CMPUT291<gh_stars>0
from util import connect
import time
# Inner join reviews and listings table,
# filter for matching listing id and order by date.
FIND_RECENT_REVIEW = \
'''
select
r.comments
from
reviews r
where
r.listing_id = :listing_id
order by
date(r.date) desc;
'''
FIND_LISTING_HOST_AND_PRICE = \
'''
select
l.host_name,
l.price
from
listings l
where l.id = :listing_id;
'''
def main():
listing_id = input("Enter the listing_id: ")
review_data = find_recent_review(listing_id)
if review_data is None:
review_data = [None]
listing_data = find_listing(listing_id)
if listing_data is None:
print("Listing not found")
else:
print("The host_name, rental_price and most recent review for given listing_id")
print("\nHost Name: {}\nPrice: {}\nComment: {}".format(
listing_data[0], listing_data[1], review_data[0]))
def find_recent_review(listing_id: str):
connection = connect()
cursor = connection.cursor()
t_start = time.process_time()
cursor.execute(FIND_RECENT_REVIEW, {"listing_id": listing_id})
t_taken = time.process_time()-t_start
print("Total time taken to find the review: {}s".format(t_taken))
return cursor.fetchone()
def find_listing(listing_id: str):
connection = connect()
cursor = connection.cursor()
t_start = time.process_time()
cursor.execute(FIND_LISTING_HOST_AND_PRICE, {"listing_id": listing_id})
t_taken = time.process_time()-t_start
print("Total time taken to find listing: {}s".format(t_taken))
return cursor.fetchone()
if __name__ == "__main__":
main()
| 3.390625 | 3 |
imdb_scraper.py | hossainsadman/imdb-scraper | 0 | 12767086 | import numpy
import pandas
import requests
from bs4 import BeautifulSoup as bsoup
from time import sleep
from random import randint
# start and end of urls for imbd top 1000 movies site
URL_START = "https://www.imdb.com/search/title/?groups=top_1000&start="
URL_END = "&ref_=adv_nxt"
# data for each movie
titles = []
years = []
runtimes = []
ratings = []
metascores = []
votes = []
grosses = []
headers = {"Accept-Language": "en-US, en;q=0.5"}
pages = numpy.arange(1,1001,50)
for page in pages:
cur_page = requests.get(URL_START + str(page) + URL_END, headers = headers)
soup = bsoup(cur_page.text, "html.parser")
# find all divs containing data for each movie
movie_divs = soup.find_all('div', class_='lister-item mode-advanced')
for div in movie_divs:
name = div.h3.a.text
titles.append(name)
year = div.h3.find('span', class_='lister-item-year').text
years.append(year)
runtime = div.p.find('span', class_='runtime').text
runtimes.append(runtime)
rating = float(div.strong.text)
ratings.append(rating)
score = div.find('span', class_='metascore').text if div.find('span', class_='metascore') else '-'
metascores.append(score)
# nv contains the class for both the votes and gross (if it is present) <span> tags
nv = div.find_all('span', attrs={'name': 'nv'})
vote = nv[0].text
votes.append(vote)
gross = nv[1].text if len(nv) > 1 else '-'
grosses.append(gross)
# slow down crawling of imbd site to avoid disrupting website activity
sleep(randint(2,8))
movies = pandas.DataFrame({
'movie': titles,
'year': years,
'runtime': runtimes,
'imdb': ratings,
'metascore': metascores,
'votes': votes,
'grossMillions': grosses,
})
# CLEANING DATA
# remove brackets from year and cast string to int
movies['year'] = movies['year'].str.extract('(\d+)').astype(int)
# remove ' min' from runtime and cast string to int
movies['runtime'] = movies['runtime'].str.extract('(\d+)').astype(int)
# convert grossMillions to numeric (int) and transform dashes into NaN values
movies['metascore'] = pandas.to_numeric(movies['metascore'], errors='coerce')
# remove commas from votes and cast string to int
movies['votes'] = movies['votes'].str.replace(',', '').astype(int)
# remove '$' and 'M' from grossMillions and cast string to int
movies['grossMillions'] = movies['grossMillions'].map(lambda x: x.lstrip('$').rstrip('M'))
# convert grossMillions to numeric (float) and transform dashes into NaN values
movies['grossMillions'] = pandas.to_numeric(movies['grossMillions'], errors='coerce')
movies.to_csv('movies.csv')
| 2.84375 | 3 |
yats/connector.py | GaryOma/yats | 0 | 12767087 | <filename>yats/connector.py<gh_stars>0
import re
import math
import logging
from datetime import timedelta, timezone
from multiprocessing import Manager
from multiprocessing.pool import ThreadPool
from functools import partial
from yats.custom_datetime import CustomDateTime as datetime
from yats.twitter_request import TwitterRequest
from yats.profile import Profile
from yats.tweet_set import TweetSet
from yats.requests_holder import RequestsHolder
from yats.iterable_queue import IterableQueue
TWITTER_CREATION_DATE = datetime(2006, 3, 21, tzinfo=timezone.utc)
COUNT_QUERY = 20
# COUNT_QUERY = 1000
class Connector:
def __init__(self):
pass
def __repr__(self):
return "<yats.Connector>"
def profile(self, name, request=None):
request = TwitterRequest() if request is None else request
request.get_profile_request(name)
res = request.body
profile_res = Profile(res, verbose=True)
return profile_res
def _create_query(self,
q: str = None,
words: list = None,
sentence: str = None,
words_or: list = None,
words_not: list = None,
hashtag: str = None,
from_account: str = None,
to_account: str = None,
mention: str = None,
min_replies: int = None,
min_likes: int = None,
min_retweets: int = None,
since: datetime = None,
until: datetime = None,
filter_links: bool = None,
filter_replies: bool = None):
if q is not None:
query = f'{q} '
else:
query = ""
if words is not None:
query = f'{" ".join(words)} '
if sentence is not None:
query += f'"{sentence}" '
if words_or is not None:
query += f'({" OR ".join(words_or)}) '
if words_not is not None:
query += f'{" ".join(["-"+x for x in words_not])} '
if hashtag is not None:
query += f'({"#"+hashtag if hashtag[0] != "#" else hashtag}) '
if from_account is not None:
query += f'(from:{from_account}) '
if to_account is not None:
query += f'(to:{to_account}) '
if mention is not None:
query += f'({mention}) '
if min_replies is not None:
query += f'min_replies:{min_replies} '
if min_likes is not None:
query += f'min_faves:{min_likes} '
if min_retweets is not None:
query += f'min_retweets:{min_retweets} '
if filter_links is not None and filter_links:
query += "-filter:links "
if filter_replies is not None and filter_replies:
query += "-filter:replies "
if until is not None:
query += f'until:{until.strftime("%Y-%m-%d")} '
if since is not None:
query += f'since:{since.strftime("%Y-%m-%d")} '
# check if query finishes by a space
if query[-1] == " ":
query = query[:-1]
return query
def _create_payload(self, count, query=None, user_id=None):
payload = {
"include_profile_interstitial_type": "1",
"include_blocking": "1",
"include_blocked_by": "1",
"include_followed_by": "1",
"include_want_retweets": "1",
"include_mute_edge": "1",
"include_can_dm": "1",
"include_can_media_tag": "1",
"skip_status": "1",
"cards_platform": "Web-12",
"include_cards": "1",
"include_ext_alt_text": "true",
"include_quote_count": "true",
"include_reply_count": "1",
"tweet_mode": "extended",
"include_entities": "true",
"include_user_entities": "true",
"include_ext_media_color": "true",
"include_ext_media_availability": "true",
"send_error_codes": "true",
"simple_quoted_tweet": "true",
"count": count,
"query_source": "typed_query",
"pc": "1",
"spelling_corrections": "1",
"ext": "mediaStats,highlightedLabel"
}
if query is not None:
payload["q"] = query
if user_id is not None:
payload["userId"] = user_id
return payload
def _extract_since_until_from_q(self, q):
until = None
regex = r"until:(\d{4}-\d{2}-\d{2})"
se = re.search(regex, q)
if se:
until = (datetime.strptime(se.group(1), "%Y-%m-%d")
.replace(tzinfo=timezone.utc))
q = re.sub(regex, "", q)
since = None
regex = r"since:(\d{4}-\d{2}-\d{2})"
se = re.search(regex, q)
if se:
since = (datetime.strptime(se.group(1), "%Y-%m-%d")
.replace(tzinfo=timezone.utc))
q = re.sub(regex, "", q)
return since, until, q
def _tweet_worker(self, requests, lock, task_queue, limit_cooldown,
max_round, payload):
with lock:
if len(requests) > 0:
request = requests.pop()
else:
request = TwitterRequest()
current_round = payload["round"] + 1
del payload["round"]
try:
data, cursor = request.get_tweets_request(payload)
except TypeError:
request.to_file("error_request.json")
exit(0)
new_tweets = TweetSet(data)
last_inserted = len(new_tweets)
if last_inserted >= limit_cooldown and current_round < max_round:
payload["cursor"] = cursor
payload["round"] = current_round
task_queue.put(payload)
else:
task_queue.put(None)
with lock:
requests.push(request)
return new_tweets
def _payload_generator(self,
verbosity,
count=COUNT_QUERY,
q=None,
since=None,
until=None,
**args):
def_since = TWITTER_CREATION_DATE
def_until = (datetime.now(timezone.utc)
+ timedelta(days=1))
if q is not None:
q_since, q_until, q = self._extract_since_until_from_q(q)
since = q_since if q_since is not None else def_since
until = q_until if q_until is not None else def_until
else:
since = def_since if since is None else since
until = def_until if until is None else until
beg_date = since
end_date = beg_date + timedelta(days=1)
print_str = (f"from {since.strftime('%Y-%m-%d')}"
f" to {until.strftime('%Y-%m-%d')}")
if 0 <= verbosity <= 1:
print(print_str)
elif verbosity > 1:
logging.info(print_str)
while beg_date < until:
query = self._create_query(q=q,
since=beg_date,
until=end_date,
**args)
payload = self._create_payload(query=query, count=count)
payload["round"] = 0
yield payload
beg_date = end_date
end_date += timedelta(days=1)
def get_tweets_request(self,
verbosity,
max_round,
thread=20,
limit_cooldown=5,
**args):
# initiating the initial task lisk for
# the ThreadPool
task_list = [task for task in self._payload_generator(verbosity,
**args)
]
# copying each tasks in the IterableQueue
# thus "allowing" the threads to add
# additional tasks (bit of a dirty hack)
task_queue = IterableQueue(maxsize=len(task_list))
for task in task_list:
task_queue.put(task)
# object that holds the open connections
# couldn't do it with Queues because of the SSLContext
# not pickable :'-(
requests = RequestsHolder()
for _ in range(thread):
requests.push(TwitterRequest())
# creation of the lock for the RequestHolder
manager = Manager()
lock = manager.Lock()
# TweetSet to keep the fetched tweets
tweets = TweetSet()
# formatting variables
task_format = int(math.log(len(task_list), 10)) + 1
task_it = 0
round_format = int(math.log(max_round, 10)) + 1
round_size = len(task_list)
next_round_size = round_size
round_it = 0
disp_str = ""
try:
with ThreadPool(thread) as p:
for new_tweets in p.imap_unordered(
partial(self._tweet_worker, requests,
lock, task_queue, limit_cooldown,
max_round),
task_queue):
tweets.add(new_tweets)
disp_str = (
f"TWEETS={len(tweets):<6} | "
f"NEW={len(new_tweets):<2} | "
f"TASK={task_it:{task_format}}/"
f"{round_size:<{task_format}} | "
f"ROUND={round_it:{round_format}}/"
f"{max_round} | "
f"NEXT ROUND<={next_round_size:{task_format}} TASKS")
if 0 <= verbosity <= 1:
end_char = "\r" if verbosity == 0 else "\n"
print(disp_str, end=end_char)
elif verbosity > 1:
logging.info(disp_str)
if len(new_tweets) < limit_cooldown:
next_round_size -= 1
task_it += 1
if task_it >= round_size:
task_it = 0
round_size = next_round_size
round_it += 1
if 0 <= verbosity <= 1:
print(disp_str)
except KeyboardInterrupt:
if 0 <= verbosity <= 1:
print(disp_str)
print("Stopped by the user")
return tweets
def get_tweets_timeline(self, username, user_id=None):
if user_id is None:
request = TwitterRequest()
profile = self.profile(username, request)
user_id = profile.restid
logging.debug(f"Getting {profile.name}'s timeline tweets...")
requests = RequestsHolder()
requests.push(TwitterRequest())
payload = self._create_payload(user_id=user_id)
manager = Manager()
lock = manager.Lock()
tweets = self._tweet_worker(requests, lock, payload)
return tweets
def get_tweets_user(self, username, verbosity, since=None, **args):
if since is not None:
beg_date = since
else:
request = TwitterRequest()
profile = self.profile(username, request)
beg_date = profile.creation
print_str = f"Getting {username}'s all tweets..."
if 0 <= verbosity <= 1:
print(print_str)
elif verbosity > 1:
logging.info(print_str)
tweets = self.get_tweets_request(from_account=username,
since=beg_date,
filter_replies=True,
verbosity=verbosity,
**args)
return tweets
def request(self, query, **args):
user_query = re.search(r"^@(\S+)$", query)
if user_query:
username = user_query.group(1)
tweets = self.get_tweets_user(username=username, **args)
else:
tweets = self.get_tweets_request(q=query, **args)
return tweets
| 2.3125 | 2 |
thesis/src/connect.py | srinath009/breach | 21 | 12767088 | import socket
import select
import logging
import binascii
from os import system, path
import sys
import signal
from iolibrary import kill_signal_handler, get_arguments_dict, setup_logger
import constants
signal.signal(signal.SIGINT, kill_signal_handler)
class Connector():
'''
Class that handles the network connection for breach.
'''
def __init__(self, args_dict):
'''
Initialize loggers and arguments dictionary.
'''
self.args_dict = args_dict
if 'full_logger' not in args_dict:
if args_dict['verbose'] < 4:
setup_logger('full_logger', 'full_breach.log', args_dict, logging.ERROR)
else:
setup_logger('full_logger', 'full_breach.log', args_dict)
self.full_logger = logging.getLogger('full_logger')
self.args_dict['full_logger'] = self.full_logger
else:
self.full_logger = args_dict['full_logger']
if 'basic_logger' not in args_dict:
if args_dict['verbose'] < 3:
setup_logger('basic_logger', 'basic_breach.log', args_dict, logging.ERROR)
else:
setup_logger('basic_logger', 'basic_breach.log', args_dict)
self.basic_logger = logging.getLogger('basic_logger')
self.args_dict['basic_logger'] = self.basic_logger
else:
self.basic_logger = args_dict['basic_logger']
if 'debug_logger' not in args_dict:
if args_dict['verbose'] < 2:
setup_logger('debug_logger', 'debug.log', args_dict, logging.ERROR)
else:
setup_logger('debug_logger', 'debug.log', args_dict)
self.debug_logger = logging.getLogger('debug_logger')
self.args_dict['debug_logger'] = self.debug_logger
else:
self.debug_logger = args_dict['debug_logger']
return
def log_data(self, data):
'''
Print hexadecimal and ASCII representation of data
'''
pad = 0
output = []
buff = '' # Buffer of 16 chars
for i in xrange(0, len(data), constants.LOG_BUFFER):
buff = data[i:i+constants.LOG_BUFFER]
hex = binascii.hexlify(buff) # Hex representation of data
pad = 32 - len(hex)
txt = '' # ASCII representation of data
for ch in buff:
if ord(ch)>126 or ord(ch)<33:
txt = txt + '.'
else:
txt = txt + chr(ord(ch))
output.append('%2d\t %s%s\t %s' % (i, hex, pad*' ', txt))
return '\n'.join(output)
def parse(self, data, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, is_response = False):
'''
Parse data and print header information and payload.
'''
lg = ['\n']
downgrade = False
# Check for defragmentation between packets
if is_response:
# Check if TLS record header was chunked between packets and append it to the beginning
if chunked_endpoint_header:
data = chunked_endpoint_header + data
chunked_endpoint_header = None
# Check if there are any remaining bytes from previous record
if past_bytes_endpoint:
lg.append('Data from previous TLS record: Endpoint\n')
if past_bytes_endpoint >= len(data):
lg.append(self.log_data(data))
lg.append('\n')
past_bytes_endpoint = past_bytes_endpoint - len(data)
return ('\n'.join(lg), past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)
else:
lg.append(self.log_data(data[0:past_bytes_endpoint]))
lg.append('\n')
data = data[past_bytes_endpoint:]
past_bytes_endpoint = 0
else:
if chunked_user_header:
data = chunked_user_header + data
chunked_user_header = None
if past_bytes_user:
lg.append('Data from previous TLS record: User\n')
if past_bytes_user >= len(data):
lg.append(self.log_data(data))
lg.append('\n')
past_bytes_user = past_bytes_user - len(data)
return ('\n'.join(lg), past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)
else:
lg.append(self.log_data(data[0:past_bytes_user]))
lg.append('\n')
data = data[past_bytes_user:]
past_bytes_user = 0
try:
cont_type = ord(data[constants.TLS_CONTENT_TYPE])
version = (ord(data[constants.TLS_VERSION_MAJOR]), ord(data[constants.TLS_VERSION_MINOR]))
length = 256*ord(data[constants.TLS_LENGTH_MAJOR]) + ord(data[constants.TLS_LENGTH_MINOR])
except Exception as exc:
self.full_logger.debug('Only %d remaining for next record, TLS header gets chunked' % len(data))
self.full_logger.debug(exc)
if is_response:
chunked_endpoint_header = data
else:
chunked_user_header = data
return ('', past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)
if is_response:
if cont_type in constants.TLS_CONTENT:
self.basic_logger.debug('Endpoint %s Length: %d' % (constants.TLS_CONTENT[cont_type], length))
if cont_type == 23:
with open('out.out', 'a') as f:
f.write('Endpoint application payload: %d\n' % length)
f.close()
else:
self.basic_logger.debug('Unassigned Content Type record (len = %d)' % len(data))
lg.append('Source : Endpoint')
else:
if cont_type in constants.TLS_CONTENT:
self.basic_logger.debug('User %s Length: %d' % (constants.TLS_CONTENT[cont_type], length))
if cont_type == 22:
if ord(data[constants.MAX_TLS_POSITION]) > constants.MAX_TLS_ALLOWED:
downgrade = True
if cont_type == 23:
with open('out.out', 'a') as f:
f.write('User application payload: %d\n' % length)
f.close()
else:
self.basic_logger.debug('Unassigned Content Type record (len = %d)' % len(data))
lg.append('Source : User')
try:
lg.append('Content Type : ' + constants.TLS_CONTENT[cont_type])
except:
lg.append('Content Type: Unassigned %d' % cont_type)
try:
lg.append('TLS Version : ' + constants.TLS_VERSION[(version[0], version[1])])
except:
lg.append('TLS Version: Uknown %d %d' % (version[0], version[1]))
lg.append('TLS Payload Length: %d' % length)
lg.append('(Remaining) Packet Data length: %d\n' % len(data))
# Check if TLS record spans to next TCP segment
if len(data) - constants.TLS_HEADER_LENGTH < length:
if is_response:
past_bytes_endpoint = length + constants.TLS_HEADER_LENGTH - len(data)
else:
past_bytes_user = length + constants.TLS_HEADER_LENGTH - len(data)
lg.append(self.log_data(data[0:constants.TLS_HEADER_LENGTH]))
lg.append(self.log_data(data[constants.TLS_HEADER_LENGTH:constants.TLS_HEADER_LENGTH+length]))
lg.append('\n')
# Check if packet has more than one TLS records
if length < len(data) - constants.TLS_HEADER_LENGTH:
more_records, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, _ = self.parse(
data[constants.TLS_HEADER_LENGTH+length:],
past_bytes_endpoint,
past_bytes_user,
chunked_endpoint_header,
chunked_user_header,
is_response
)
lg.append(more_records)
return ('\n'.join(lg), past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)
def start(self):
'''
Start sockets on user side (proxy as server) and endpoint side (proxy as client).
'''
self.full_logger.info('Starting Proxy')
try:
self.user_setup()
self.endpoint_setup()
except:
pass
self.full_logger.info('Proxy is set up')
return
def restart(self, attempt_counter = 0):
'''
Restart sockets in case of error.
'''
self.full_logger.info('Restarting Proxy')
try:
self.user_socket.close()
self.endpoint_socket.close()
except:
pass
try:
self.user_setup()
self.endpoint_setup()
except:
if attempt_counter < 3:
self.full_logger.debug('Reattempting restart')
self.restart(attempt_counter+1)
else:
self.full_logger.debug('Multiple failed attempts to restart')
self.stop(-9)
sys.exit(-1)
self.full_logger.info('Proxy has restarted')
return
def stop(self, exit_code = 0):
'''
Shutdown sockets and terminate connection.
'''
try:
self.user_connection.close()
self.endpoint_socket.close()
except:
pass
self.full_logger.info('Connection closed')
self.debug_logger.debug('Stopping breach object with code: %d' % exit_code)
return
def user_setup(self):
'''
Create and configure user side socket.
'''
try:
self.full_logger.info('Setting up user socket')
user_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
user_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Set options to reuse socket
user_socket.bind((constants.USER, constants.USER_PORT))
self.full_logger.info('User socket bind complete')
user_socket.listen(1)
self.full_logger.info('User socket listen complete')
self.user_connection, self.address = user_socket.accept()
self.user_socket = user_socket
self.full_logger.info('User socket is set up')
except:
self.stop(-8)
sys.exit(-1)
return
def endpoint_setup(self):
'''
Create and configure endpoint side socket
'''
try:
self.full_logger.info('Setting up endpoint socket')
endpoint_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.full_logger.info('Connecting endpoint socket')
endpoint_socket.connect((constants.ENDPOINT, constants.ENDPOINT_PORT))
endpoint_socket.setblocking(0) # Set non-blocking, i.e. raise exception if send/recv is not completed
self.endpoint_socket = endpoint_socket
self.full_logger.info('Endpoint socket is set up')
except:
self.stop(-7)
sys.exit(-1)
return
def execute_breach(self):
'''
Start proxy and execute main loop
'''
# Initialize parameters for execution.
past_bytes_user = 0 # Number of bytes expanding to future user packets
past_bytes_endpoint = 0 # Number of bytes expanding to future endpoint packets
chunked_user_header = None # TLS user header portion that gets stuck between packets
chunked_endpoint_header = None # TLS endpoint header portion that gets stuck between packets
self.start()
self.full_logger.info('Starting main proxy loop')
try:
while 1:
ready_to_read, ready_to_write, in_error = select.select(
[self.user_connection, self.endpoint_socket],
[],
[],
5
)
if self.user_connection in ready_to_read: # If user side socket is ready to read...
data = ''
try:
data = self.user_connection.recv(constants.SOCKET_BUFFER) # ...receive data from user...
except Exception as exc:
self.full_logger.debug('User connection error')
self.full_logger.debug(exc)
self.stop(-6)
break
if len(data) == 0:
self.full_logger.info('User connection closed')
self.stop(-5)
else:
self.basic_logger.debug('User Packet Length: %d' % len(data))
output, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade = self.parse(
data,
past_bytes_endpoint,
past_bytes_user,
chunked_endpoint_header,
chunked_user_header
) # ...parse it...
self.full_logger.debug(output)
try:
if downgrade and constants.ATTEMPT_DOWNGRADE:
alert = 'HANDSHAKE_FAILURE'
output, _, _, _, _, _ = self.parse(
constants.ALERT_MESSAGES[alert],
past_bytes_endpoint,
past_bytes_user,
True
)
self.full_logger.debug('\n\n' + 'Downgrade Attempt' + output)
self.user_connection.sendall(constants.ALERT_MESSAGES[alert]) # if we are trying to downgrade, send fatal alert to user
continue
self.endpoint_socket.sendall(data) # ...and send it to endpoint
except Exception as exc:
self.full_logger.debug('User data forwarding error')
self.full_logger.debug(exc)
self.stop(-4)
break
if self.endpoint_socket in ready_to_read: # Same for the endpoint side
data = ''
try:
data = self.endpoint_socket.recv(constants.SOCKET_BUFFER)
except Exception as exc:
self.full_logger.debug('Endpoint connection error')
self.full_logger.debug(exc)
self.stop(-3)
break
if len(data) == 0:
self.full_logger.info('Endpoint connection closed')
self.stop(5)
break
else:
self.basic_logger.debug('Endpoint Packet Length: %d' % len(data))
output, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, _ = self.parse(
data,
past_bytes_endpoint,
past_bytes_user,
chunked_endpoint_header,
chunked_user_header,
True
)
self.full_logger.debug(output)
try:
self.user_connection.sendall(data)
except Exception as exc:
self.full_logger.debug('Endpoint data forwarding error')
self.full_logger.debug(exc)
self.stop(-2)
break
except Exception as e:
self.stop(-1)
return
if __name__ == '__main__':
args_dict = get_arguments_dict(sys.argv)
conn = Connector(args_dict)
conn.full_logger.info('Hillclimbing parameters file created')
conn.execute_breach()
| 2.34375 | 2 |
netmikoo4.py | lalitshankergarg/pylalit | 0 | 12767089 | #1/usr/bin/python3
import netmiko,time
#multi vendor library
device1={
'username' : 'lalit',
'password' : '<PASSWORD>',
'device_type' : 'cisco_ios',
'host' : '192.168.234.131'
}
#to connect to target device
#by checking couple of things connect handler will allow you to connect
device_connect=netmiko.ConnectHandler(**device1)
#print([i for i in dir(device_connect) if 'send' in i])
#now sending configuration for device
conf=["hostname pyrouter1","username hello privi 10 password <PASSWORD>","end"]
#output=device_connect.send_config_set(conf)
#print(output)
#sending configuration from file
output1=device_connect.send_config_from_file('myrouter.txt')
print(output1)
| 2.453125 | 2 |
data/utils.py | carmelrabinov/contrastive-domain-randomization | 5 | 12767090 | <reponame>carmelrabinov/contrastive-domain-randomization
import re
import shutil
import torch
from torchvision import transforms
import numpy as np
import os
from PIL import Image
import matplotlib.pyplot as plt
from scipy.ndimage.morphology import binary_dilation, generate_binary_structure
import cv2
action_files_sfx = "_state_action_label"
second_video_sfx = "_2"
segmentation_mask_sfx = "_seg_mask"
def resize(x, size: int = 128):
return transforms.Resize(size)(x)
def to_tensor(x):
return transforms.ToTensor()(x)
def normalize(x):
return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(x)
def image_transform(img, resize_to=128):
if np.max(img) > 1:
img = img/255.
if resize_to != 128:
img = cv2.resize(img, (resize_to, resize_to))
return normalize(to_tensor(img))
def seg_mask_transform(mask):
return transforms.ToTensor()(mask)
# TODO: update according to the new env
def im2pos_coordinates(pix_x, pix_z):
"""move from image pixels coordinates to world coordinates"""
# x_lim = [-0.85, 0.86]
# z_lim = [-1.22, 0.47]
x_lim = [-0.365, 0.365]
z_lim = [-0.95, -0.24]
x = x_lim[1] - (x_lim[1] - x_lim[0]) * pix_x/127
z = z_lim[1] - (z_lim[1] - z_lim[0]) * pix_z/127
return x, z
# TODO: update according to the new env
def pos2im_coordinates(x, z):
"""move from world coordinates to image pixels coordinates"""
# x_lim = [-0.85, 0.86]
# z_lim = [-1.22, 0.47]
x_lim = [-0.365, 0.365]
z_lim = [-0.95, -0.24]
pix_x = int(127 * (x_lim[1] - x) / (x_lim[1] - x_lim[0]))
pix_z = int(127 * (z_lim[1] - z) / (z_lim[1] - z_lim[0]))
return pix_x, pix_z
def add_arrow_to_image(image, action):
"""
add action arrow to image
:param image: image of the observation
:param action: action in (x_source, y_source) (x_target, y_target) formant
:return:
"""
x_tail, z_tail = pos2im_coordinates(action[0], action[1])
x_head, z_head = pos2im_coordinates(action[2], action[3])
# visual params
color = (0, 255, 0)
thickness = 3
return cv2.arrowedLine(image, (x_tail, z_tail), (x_head, z_head), color, thickness)
def load_label(path: str) -> dict:
"""load label file in the right format"""
if not os.path.exists(path):
print(f"Warning, try to load non-exist label {path}")
return None
return np.load(path, allow_pickle=True).tolist()
def process_action(label_dict: dict) -> torch.Tensor:
"""extract action from label and transform to Tensor"""
target_pos = np.array(label_dict["action"], dtype=float)[:, :3]
pos = np.array(label_dict["ee_positions"], dtype=float)
action = torch.from_numpy(np.concatenate((pos, target_pos), axis=-1))
# label["collisions"] = torch.from_numpy(np.array(label_dict["collisions"]))
return action
def load_single_image(path: str) -> np.uint8:
"""load to single image file"""
if not os.path.exists(path):
print(f"Warning, try to load non-exist image {path}")
return None
if path.endswith(".npy"):
img = np.load(path)
elif path.endswith(".png") or path.endswith(".jpeg") or path.endswith(".jpg"):
img = plt.imread(path)
if img.dtype != "uint8":
img = (255 * img).astype(np.uint8)
return img
def load_video(path: str) -> np.ndarray:
if not os.path.exists(path):
print(f"Warning, try to load non-exist file {path}")
return None
return np.load(path)
def load_seg_masks_from_video(path: str, frame_index: int = -1) -> np.ndarray:
"""load a full trajectory segmentation mask and return full mask or single frame"""
seg_mask = load_video(path)
if frame_index >= 0 and seg_mask is not None:
return seg_mask[frame_index]
return seg_mask
def load_frame_from_video(path: str, frame_index: int) -> np.ndarray:
"""load a full trajectory video file and return a single frame from it"""
vid = load_video(path)
img = vid[frame_index]
return img
def visualize_trajectory(path, plot_segmentation=False, save_path=None) -> None:
ref_video = load_video(path)
label_path = path[:-4] + action_files_sfx + ".npy"
label = load_label(label_path)
actions = label["action"]
if plot_segmentation:
ref_seg_mask = load_video(path[:-4] + segmentation_mask_sfx + ".npy")
n_cols = len(ref_video)
n_rows = 2 if plot_segmentation else 1
fig = plt.figure(figsize=(3*n_cols, 3*n_rows))
fig.suptitle(path, fontsize=12)
for i in range(n_cols):
ref_image = add_arrow_to_image(np.copy(ref_video[i]), actions[i])
fig.add_subplot(n_rows, n_cols, i+1).set_title(f"{i}", fontsize=20)
plt.imshow(ref_image)
plt.axis('off')
if plot_segmentation:
ref_mask = ref_seg_mask[i]
fig.add_subplot(n_rows, n_cols, n_cols + i+1).set_title(f"segmentation {i}", fontsize=20)
plt.imshow(ref_mask[:, :, 0], cmap=plt.cm.gray)
plt.axis('off')
if save_path is not None:
plt.savefig(save_path + ".jpg")
plt.show()
def videos_to_images(dir_path: str, load_segmantation_masks: bool = False) -> None:
"""
load a full trajectory video file (and optionally segmentation mask trajectory)
and save each image from it as a separate file
"""
os.makedirs(dir_path + "_processed", exist_ok=True)
videos = [v for v in os.listdir(dir_path) if re.match("video_[0-9]+.npy", v)]
for video_path in videos:
video = load_video(os.path.join(dir_path, video_path))
for i in range(len(video)):
im = Image.fromarray(video[i].astype(np.uint8))
im.save(dir_path + f'_processed/{video_path[:-4]}_{i}.png')
if load_segmantation_masks:
seg_mask = load_seg_masks_from_video(os.path.join(dir_path, video_path[:-4] + segmentation_mask_sfx + ".npy"))
for i in range(len(video)):
np.save(dir_path + f'_processed/{video_path[:-4]}_{i}_seg_mask', seg_mask[i])
if __name__ == '__main__':
# videos_to_images("D:/Representation_Learning/datasets/textured_rope_new_ood", load_seg=True)
# process_rope_dataset("/mnt/data/carmel_data/datasets/textured_rope_val_masks")
visualize_trajectory(r"D:\Representation_Learning\datasets\cube_2d_states_textures\video_51.npy", plot_segmentation=False)
# fix_action_bug_rope_dataset()
# videos_to_images("/mnt/data/carmel_data/datasets/textured_rope_val_masks_1", load_seg=True, ) | 2.234375 | 2 |
plantstuff/schema/ecology.py | christabor/plantstuff | 6 | 12767091 | <filename>plantstuff/schema/ecology.py
"""Locale specific data for the master Model.
TODO: add world countries, etc... granularity TBD.
"""
from neomodel import (
ArrayProperty as ListProp,
StructuredNode as Model,
StringProperty as StringProp,
)
US_COUNTIES = [
"alabama:autauga",
"alabama:baldwin",
"alabama:barbour",
"alabama:bibb",
"alabama:blount",
"alabama:bullock",
"alabama:butler",
"alabama:calhoun",
"alabama:chambers",
"alabama:cherokee",
"alabama:chilton",
"alabama:choctaw",
"alabama:clarke",
"alabama:clay",
"alabama:cleburne",
"alabama:coffee",
"alabama:colbert",
"alabama:conecuh",
"alabama:coosa",
"alabama:covington",
"alabama:crenshaw",
"alabama:cullman",
"alabama:dale",
"alabama:dallas",
"alabama:dekalb",
"alabama:elmore",
"alabama:escambia",
"alabama:etowah",
"alabama:fayette",
"alabama:franklin",
"alabama:geneva",
"alabama:greene",
"alabama:hale",
"alabama:henry",
"alabama:houston",
"alabama:jackson",
"alabama:jefferson",
"alabama:lamar",
"alabama:lauderdale",
"alabama:lawrence",
"alabama:lee",
"alabama:limestone",
"alabama:lowndes",
"alabama:macon",
"alabama:madison",
"alabama:marengo",
"alabama:marion",
"alabama:marshall",
"alabama:mobile",
"alabama:monroe",
"alabama:montgomery",
"alabama:morgan",
"alabama:perry",
"alabama:pickens",
"alabama:pike",
"alabama:randolph",
"alabama:russell",
"alabama:shelby",
"alabama:st. clair",
"alabama:sumter",
"alabama:talladega",
"alabama:tallapoosa",
"alabama:tuscaloosa",
"alabama:walker",
"alabama:washington",
"alabama:wilcox",
"alabama:winston",
"arizona:apache",
"arizona:cochise",
"arizona:coconino",
"arizona:gila",
"arizona:graham",
"arizona:greenlee",
"arizona:la paz",
"arizona:maricopa",
"arizona:mohave",
"arizona:navajo",
"arizona:pima",
"arizona:pinal",
"arizona:santa cruz",
"arizona:yavapai",
"arizona:yuma",
"arkansas:arkansas",
"arkansas:ashley",
"arkansas:baxter",
"arkansas:benton",
"arkansas:boone",
"arkansas:bradley",
"arkansas:calhoun",
"arkansas:carroll",
"arkansas:chicot",
"arkansas:clark",
"arkansas:clay",
"arkansas:cleburne",
"arkansas:cleveland",
"arkansas:columbia",
"arkansas:conway",
"arkansas:craighead",
"arkansas:crawford",
"arkansas:crittenden",
"arkansas:cross",
"arkansas:dallas",
"arkansas:desha",
"arkansas:drew",
"arkansas:faulkner",
"arkansas:franklin",
"arkansas:fulton",
"arkansas:garland",
"arkansas:grant",
"arkansas:greene",
"arkansas:hempstead",
"arkansas:hot spring",
"arkansas:howard",
"arkansas:independence",
"arkansas:izard",
"arkansas:jackson",
"arkansas:jefferson",
"arkansas:johnson",
"arkansas:lafayette",
"arkansas:lawrence",
"arkansas:lee",
"arkansas:lincoln",
"arkansas:little river",
"arkansas:logan",
"arkansas:lonoke",
"arkansas:madison",
"arkansas:marion",
"arkansas:miller",
"arkansas:mississippi",
"arkansas:monroe",
"arkansas:montgomery",
"arkansas:nevada",
"arkansas:newton",
"arkansas:ouachita",
"arkansas:perry",
"arkansas:phillips",
"arkansas:pike",
"arkansas:poinsett",
"arkansas:polk",
"arkansas:pope",
"arkansas:prairie",
"arkansas:pulaski",
"arkansas:randolph",
"arkansas:saline",
"arkansas:scott",
"arkansas:searcy",
"arkansas:sebastian",
"arkansas:sevier",
"arkansas:sharp",
"arkansas:st. francis",
"arkansas:stone",
"arkansas:union",
"arkansas:van buren",
"arkansas:washington",
"arkansas:white",
"arkansas:woodruff",
"arkansas:yell",
"california:alameda",
"california:alpine",
"california:amador",
"california:butte",
"california:calaveras",
"california:colusa",
"california:contra costa",
"california:del norte",
"california:el dorado",
"california:fresno",
"california:glenn",
"california:humboldt",
"california:imperial",
"california:inyo",
"california:kern",
"california:kings",
"california:lake",
"california:lassen",
"california:los angeles",
"california:madera",
"california:marin",
"california:mariposa",
"california:mendocino",
"california:merced",
"california:modoc",
"california:mono",
"california:monterey",
"california:napa",
"california:nevada",
"california:orange",
"california:placer",
"california:plumas",
"california:riverside",
"california:sacramento",
"california:san benito",
"california:san bernardino",
"california:san diego",
"california:san francisco",
"california:san joaquin",
"california:san luis obispo",
"california:san mateo",
"california:santa barbara",
"california:santa clara",
"california:santa cruz",
"california:shasta",
"california:sierra",
"california:siskiyou",
"california:solano",
"california:sonoma",
"california:stanislaus",
"california:sutter",
"california:tehama",
"california:trinity",
"california:tulare",
"california:tuolumne",
"california:ventura",
"california:yolo",
"california:yuba",
"colorado:adams",
"colorado:alamosa",
"colorado:arapahoe",
"colorado:archuleta",
"colorado:baca",
"colorado:bent",
"colorado:boulder",
"colorado:chaffee",
"colorado:cheyenne",
"colorado:clear creek",
"colorado:conejos",
"colorado:costilla",
"colorado:crowley",
"colorado:custer",
"colorado:delta",
"colorado:denver",
"colorado:dolores",
"colorado:douglas",
"colorado:eagle",
"colorado:el paso",
"colorado:elbert",
"colorado:fremont",
"colorado:garfield",
"colorado:gilpin",
"colorado:grand",
"colorado:gunnison",
"colorado:hinsdale",
"colorado:huerfano",
"colorado:jackson",
"colorado:jefferson",
"colorado:kiowa",
"colorado:kit carson",
"colorado:la plata",
"colorado:lake",
"colorado:larimer",
"colorado:las animas",
"colorado:lincoln",
"colorado:logan",
"colorado:mesa",
"colorado:mineral",
"colorado:moffat",
"colorado:montezuma",
"colorado:montrose",
"colorado:morgan",
"colorado:otero",
"colorado:ouray",
"colorado:park",
"colorado:phillips",
"colorado:pitkin",
"colorado:prowers",
"colorado:pueblo",
"colorado:rio blanco",
"colorado:rio grande",
"colorado:routt",
"colorado:saguache",
"colorado:san juan",
"colorado:san miguel",
"colorado:sedgwick",
"colorado:summit",
"colorado:teller",
"colorado:washington",
"colorado:weld",
"colorado:yuma",
"connecticut:fairfield",
"connecticut:hartford",
"connecticut:litchfield",
"connecticut:middlesex",
"connecticut:new haven",
"connecticut:new london",
"connecticut:tolland",
"connecticut:windham",
"delaware:kent",
"delaware:new castle",
"delaware:sussex",
"district of columbia:district of columbia",
"florida:alachua",
"florida:baker",
"florida:bay",
"florida:bradford",
"florida:brevard",
"florida:broward",
"florida:calhoun",
"florida:charlotte",
"florida:citrus",
"florida:clay",
"florida:collier",
"florida:columbia",
"florida:desoto",
"florida:dixie",
"florida:duval",
"florida:escambia",
"florida:flagler",
"florida:franklin",
"florida:gadsden",
"florida:gilchrist",
"florida:glades",
"florida:gulf",
"florida:hamilton",
"florida:hardee",
"florida:hendry",
"florida:hernando",
"florida:highlands",
"florida:hillsborough",
"florida:holmes",
"florida:indian river",
"florida:jackson",
"florida:jefferson",
"florida:lafayette",
"florida:lake",
"florida:lee",
"florida:leon",
"florida:levy",
"florida:liberty",
"florida:madison",
"florida:manatee",
"florida:marion",
"florida:martin",
"florida:miami-dade",
"florida:monroe",
"florida:nassau",
"florida:okaloosa",
"florida:okeechobee",
"florida:orange",
"florida:osceola",
"florida:palm beach",
"florida:pasco",
"florida:pinellas",
"florida:polk",
"florida:putnam",
"florida:santa rosa",
"florida:sarasota",
"florida:seminole",
"florida:st. johns",
"florida:st. lucie",
"florida:sumter",
"florida:suwannee",
"florida:taylor",
"florida:union",
"florida:volusia",
"florida:wakulla",
"florida:walton",
"florida:washington",
"georgia:appling",
"georgia:atkinson",
"georgia:bacon",
"georgia:baker",
"georgia:baldwin",
"georgia:banks",
"georgia:barrow",
"georgia:bartow",
"georgia:<NAME>",
"georgia:berrien",
"georgia:bibb",
"georgia:bleckley",
"georgia:brantley",
"georgia:brooks",
"georgia:bryan",
"georgia:bulloch",
"georgia:burke",
"georgia:butts",
"georgia:calhoun",
"georgia:camden",
"georgia:candler",
"georgia:carroll",
"georgia:catoosa",
"georgia:charlton",
"georgia:chatham",
"georgia:chattahoochee",
"georgia:chattooga",
"georgia:cherokee",
"georgia:clarke",
"georgia:clay",
"georgia:clayton",
"georgia:clinch",
"georgia:cobb",
"georgia:coffee",
"georgia:colquitt",
"georgia:columbia",
"georgia:cook",
"georgia:coweta",
"georgia:crawford",
"georgia:crisp",
"georgia:dade",
"georgia:dawson",
"georgia:dekalb",
"georgia:decatur",
"georgia:dodge",
"georgia:dooly",
"georgia:dougherty",
"georgia:douglas",
"georgia:early",
"georgia:echols",
"georgia:effingham",
"georgia:elbert",
"georgia:emanuel",
"georgia:evans",
"georgia:fannin",
"georgia:fayette",
"georgia:floyd",
"georgia:forsyth",
"georgia:franklin",
"georgia:fulton",
"georgia:gilmer",
"georgia:glascock",
"georgia:glynn",
"georgia:gordon",
"georgia:grady",
"georgia:greene",
"georgia:gwinnett",
"georgia:habersham",
"georgia:hall",
"georgia:hancock",
"georgia:haralson",
"georgia:harris",
"georgia:hart",
"georgia:heard",
"georgia:henry",
"georgia:houston",
"georgia:irwin",
"georgia:jackson",
"georgia:jasper",
"georgia:<NAME>",
"georgia:jefferson",
"georgia:jenkins",
"georgia:johnson",
"georgia:jones",
"georgia:lamar",
"georgia:lanier",
"georgia:laurens",
"georgia:lee",
"georgia:liberty",
"georgia:lincoln",
"georgia:long",
"georgia:lowndes",
"georgia:lumpkin",
"georgia:macon",
"georgia:madison",
"georgia:marion",
"georgia:mcduffie",
"georgia:mcintosh",
"georgia:meriwether",
"georgia:miller",
"georgia:mitchell",
"georgia:monroe",
"georgia:montgomery",
"georgia:morgan",
"georgia:murray",
"georgia:muscogee",
"georgia:newton",
"georgia:oconee",
"georgia:oglethorpe",
"georgia:paulding",
"georgia:peach",
"georgia:pickens",
"georgia:pierce",
"georgia:pike",
"georgia:polk",
"georgia:pulaski",
"georgia:putnam",
"georgia:quitman",
"georgia:rabun",
"georgia:randolph",
"georgia:richmond",
"georgia:rockdale",
"georgia:schley",
"georgia:screven",
"georgia:seminole",
"georgia:spalding",
"georgia:stephens",
"georgia:stewart",
"georgia:sumter",
"georgia:talbot",
"georgia:taliaferro",
"georgia:tattnall",
"georgia:taylor",
"georgia:telfair",
"georgia:terrell",
"georgia:thomas",
"georgia:tift",
"georgia:toombs",
"georgia:towns",
"georgia:treutlen",
"georgia:troup",
"georgia:turner",
"georgia:twiggs",
"georgia:union",
"georgia:upson",
"georgia:walker",
"georgia:walton",
"georgia:ware",
"georgia:warren",
"georgia:washington",
"georgia:wayne",
"georgia:webster",
"georgia:wheeler",
"georgia:white",
"georgia:whitfield",
"georgia:wilcox",
"georgia:wilkes",
"georgia:wilkinson",
"georgia:worth",
"hawaii:hawaii",
"hawaii:honolulu",
"hawaii:kalawao",
"hawaii:kauai",
"hawaii:maui",
"idaho:ada",
"idaho:adams",
"idaho:bannock",
"idaho:bear lake",
"idaho:benewah",
"idaho:bingham",
"idaho:blaine",
"idaho:boise",
"idaho:bonner",
"idaho:bonneville",
"idaho:boundary",
"idaho:butte",
"idaho:camas",
"idaho:canyon",
"idaho:caribou",
"idaho:cassia",
"idaho:clark",
"idaho:clearwater",
"idaho:custer",
"idaho:elmore",
"idaho:franklin",
"idaho:fremont",
"idaho:gem",
"idaho:gooding",
"idaho:idaho",
"idaho:jefferson",
"idaho:jerome",
"idaho:kootenai",
"idaho:latah",
"idaho:lemhi",
"idaho:lewis",
"idaho:lincoln",
"idaho:madison",
"idaho:minidoka",
"idaho:nez perce",
"idaho:oneida",
"idaho:owyhee",
"idaho:payette",
"idaho:power",
"idaho:shoshone",
"idaho:teton",
"idaho:twin falls",
"idaho:valley",
"idaho:washington",
"illinois:adams",
"illinois:alexander",
"illinois:bond",
"illinois:boone",
"illinois:brown",
"illinois:bureau",
"illinois:calhoun",
"illinois:carroll",
"illinois:cass",
"illinois:champaign",
"illinois:christian",
"illinois:clark",
"illinois:clay",
"illinois:clinton",
"illinois:coles",
"illinois:cook",
"illinois:crawford",
"illinois:cumberland",
"illinois:de witt",
"illinois:dekalb",
"illinois:douglas",
"illinois:dupage",
"illinois:edgar",
"illinois:edwards",
"illinois:effingham",
"illinois:fayette",
"illinois:ford",
"illinois:franklin",
"illinois:fulton",
"illinois:gallatin",
"illinois:greene",
"illinois:grundy",
"illinois:hamilton",
"illinois:hancock",
"illinois:hardin",
"illinois:henderson",
"illinois:henry",
"illinois:iroquois",
"illinois:jackson",
"illinois:jasper",
"illinois:jefferson",
"illinois:jersey",
"illinois:jo daviess",
"illinois:johnson",
"illinois:kane",
"illinois:kankakee",
"illinois:kendall",
"illinois:knox",
"illinois:la salle",
"illinois:lake",
"illinois:lawrence",
"illinois:lee",
"illinois:livingston",
"illinois:logan",
"illinois:macon",
"illinois:macoupin",
"illinois:madison",
"illinois:marion",
"illinois:marshall",
"illinois:mason",
"illinois:massac",
"illinois:mcdonough",
"illinois:mchenry",
"illinois:mclean",
"illinois:menard",
"illinois:mercer",
"illinois:monroe",
"illinois:montgomery",
"illinois:morgan",
"illinois:moultrie",
"illinois:ogle",
"illinois:peoria",
"illinois:perry",
"illinois:piatt",
"illinois:pike",
"illinois:pope",
"illinois:pulaski",
"illinois:putnam",
"illinois:randolph",
"illinois:richland",
"illinois:rock island",
"illinois:saline",
"illinois:sangamon",
"illinois:schuyler",
"illinois:scott",
"illinois:shelby",
"illinois:st. clair",
"illinois:stark",
"illinois:stephenson",
"illinois:tazewell",
"illinois:union",
"illinois:vermilion",
"illinois:wabash",
"illinois:warren",
"illinois:washington",
"illinois:wayne",
"illinois:white",
"illinois:whiteside",
"illinois:will",
"illinois:williamson",
"illinois:winnebago",
"illinois:woodford",
"indiana:adams",
"indiana:allen",
"indiana:bartholomew",
"indiana:benton",
"indiana:blackford",
"indiana:boone",
"indiana:brown",
"indiana:carroll",
"indiana:cass",
"indiana:clark",
"indiana:clay",
"indiana:clinton",
"indiana:crawford",
"indiana:daviess",
"indiana:<NAME>",
"indiana:dearborn",
"indiana:decatur",
"indiana:delaware",
"indiana:dubois",
"indiana:elkhart",
"indiana:fayette",
"indiana:floyd",
"indiana:fountain",
"indiana:franklin",
"indiana:fulton",
"indiana:gibson",
"indiana:grant",
"indiana:greene",
"indiana:hamilton",
"indiana:hancock",
"indiana:harrison",
"indiana:hendricks",
"indiana:henry",
"indiana:howard",
"indiana:huntington",
"indiana:jackson",
"indiana:jasper",
"indiana:jay",
"indiana:jefferson",
"indiana:jennings",
"indiana:johnson",
"indiana:knox",
"indiana:kosciusko",
"indiana:la porte",
"indiana:lagrange",
"indiana:lake",
"indiana:lawrence",
"indiana:madison",
"indiana:marion",
"indiana:marshall",
"indiana:martin",
"indiana:miami",
"indiana:monroe",
"indiana:montgomery",
"indiana:morgan",
"indiana:newton",
"indiana:noble",
"indiana:ohio",
"indiana:orange",
"indiana:owen",
"indiana:parke",
"indiana:perry",
"indiana:pike",
"indiana:porter",
"indiana:posey",
"indiana:pulaski",
"indiana:putnam",
"indiana:randolph",
"indiana:ripley",
"indiana:rush",
"indiana:scott",
"indiana:shelby",
"indiana:spencer",
"indiana:st. joseph",
"indiana:starke",
"indiana:steuben",
"indiana:sullivan",
"indiana:switzerland",
"indiana:tippecanoe",
"indiana:tipton",
"indiana:union",
"indiana:vanderburgh",
"indiana:vermillion",
"indiana:vigo",
"indiana:wabash",
"indiana:warren",
"indiana:warrick",
"indiana:washington",
"indiana:wayne",
"indiana:wells",
"indiana:white",
"indiana:whitley",
"iowa:adair",
"iowa:adams",
"iowa:allamakee",
"iowa:appanoose",
"iowa:audubon",
"iowa:benton",
"iowa:black hawk",
"iowa:boone",
"iowa:bremer",
"iowa:buchanan",
"iowa:buena vista",
"iowa:butler",
"iowa:calhoun",
"iowa:carroll",
"iowa:cass",
"iowa:cedar",
"iowa:cerro gordo",
"iowa:cherokee",
"iowa:chickasaw",
"iowa:clarke",
"iowa:clay",
"iowa:clayton",
"iowa:clinton",
"iowa:crawford",
"iowa:dallas",
"iowa:davis",
"iowa:decatur",
"iowa:delaware",
"iowa:des moines",
"iowa:dickinson",
"iowa:dubuque",
"iowa:emmet",
"iowa:fayette",
"iowa:floyd",
"iowa:franklin",
"iowa:fremont",
"iowa:greene",
"iowa:grundy",
"iowa:guthrie",
"iowa:hamilton",
"iowa:hancock",
"iowa:hardin",
"iowa:harrison",
"iowa:henry",
"iowa:howard",
"iowa:humboldt",
"iowa:ida",
"iowa:iowa",
"iowa:jackson",
"iowa:jasper",
"iowa:jefferson",
"iowa:johnson",
"iowa:jones",
"iowa:keokuk",
"iowa:kossuth",
"iowa:lee",
"iowa:linn",
"iowa:louisa",
"iowa:lucas",
"iowa:lyon",
"iowa:madison",
"iowa:mahaska",
"iowa:marion",
"iowa:marshall",
"iowa:mills",
"iowa:mitchell",
"iowa:monona",
"iowa:monroe",
"iowa:montgomery",
"iowa:muscatine",
"iowa:o'brien",
"iowa:osceola",
"iowa:page",
"iowa:palo alto",
"iowa:plymouth",
"iowa:pocahontas",
"iowa:polk",
"iowa:pottawattamie",
"iowa:poweshiek",
"iowa:ringgold",
"iowa:sac",
"iowa:scott",
"iowa:shelby",
"iowa:sioux",
"iowa:story",
"iowa:tama",
"iowa:taylor",
"iowa:union",
"iowa:<NAME>",
"iowa:wapello",
"iowa:warren",
"iowa:washington",
"iowa:wayne",
"iowa:webster",
"iowa:winnebago",
"iowa:winneshiek",
"iowa:woodbury",
"iowa:worth",
"iowa:wright",
"kansas:allen",
"kansas:anderson",
"kansas:atchison",
"kansas:barber",
"kansas:barton",
"kansas:bourbon",
"kansas:brown",
"kansas:butler",
"kansas:chase",
"kansas:chautauqua",
"kansas:cherokee",
"kansas:cheyenne",
"kansas:clark",
"kansas:clay",
"kansas:cloud",
"kansas:coffey",
"kansas:comanche",
"kansas:cowley",
"kansas:crawford",
"kansas:decatur",
"kansas:dickinson",
"kansas:doniphan",
"kansas:douglas",
"kansas:edwards",
"kansas:elk",
"kansas:ellis",
"kansas:ellsworth",
"kansas:finney",
"kansas:ford",
"kansas:franklin",
"kansas:geary",
"kansas:gove",
"kansas:graham",
"kansas:grant",
"kansas:gray",
"kansas:greeley",
"kansas:greenwood",
"kansas:hamilton",
"kansas:harper",
"kansas:harvey",
"kansas:haskell",
"kansas:hodgeman",
"kansas:jackson",
"kansas:jefferson",
"kansas:jewell",
"kansas:johnson",
"kansas:kearny",
"kansas:kingman",
"kansas:kiowa",
"kansas:labette",
"kansas:lane",
"kansas:leavenworth",
"kansas:lincoln",
"kansas:linn",
"kansas:logan",
"kansas:lyon",
"kansas:marion",
"kansas:marshall",
"kansas:mcpherson",
"kansas:meade",
"kansas:miami",
"kansas:mitchell",
"kansas:montgomery",
"kansas:morris",
"kansas:morton",
"kansas:nemaha",
"kansas:neosho",
"kansas:ness",
"kansas:norton",
"kansas:osage",
"kansas:osborne",
"kansas:ottawa",
"kansas:pawnee",
"kansas:phillips",
"kansas:pottawatomie",
"kansas:pratt",
"kansas:rawlins",
"kansas:reno",
"kansas:republic",
"kansas:rice",
"kansas:riley",
"kansas:rooks",
"kansas:rush",
"kansas:russell",
"kansas:saline",
"kansas:scott",
"kansas:sedgwick",
"kansas:seward",
"kansas:shawnee",
"kansas:sheridan",
"kansas:sherman",
"kansas:smith",
"kansas:stafford",
"kansas:stanton",
"kansas:stevens",
"kansas:sumner",
"kansas:thomas",
"kansas:trego",
"kansas:wabaunsee",
"kansas:wallace",
"kansas:washington",
"kansas:wichita",
"kansas:wilson",
"kansas:woodson",
"kansas:wyandotte",
"kentucky:adair",
"kentucky:allen",
"kentucky:anderson",
"kentucky:ballard",
"kentucky:barren",
"kentucky:bath",
"kentucky:bell",
"kentucky:boone",
"kentucky:bourbon",
"kentucky:boyd",
"kentucky:boyle",
"kentucky:bracken",
"kentucky:breathitt",
"kentucky:breckinridge",
"kentucky:bullitt",
"kentucky:butler",
"kentucky:caldwell",
"kentucky:calloway",
"kentucky:campbell",
"kentucky:carlisle",
"kentucky:carroll",
"kentucky:carter",
"kentucky:casey",
"kentucky:christian",
"kentucky:clark",
"kentucky:clay",
"kentucky:clinton",
"kentucky:crittenden",
"kentucky:cumberland",
"kentucky:daviess",
"kentucky:edmonson",
"kentucky:elliott",
"kentucky:estill",
"kentucky:fayette",
"kentucky:fleming",
"kentucky:floyd",
"kentucky:franklin",
"kentucky:fulton",
"kentucky:gallatin",
"kentucky:garrard",
"kentucky:grant",
"kentucky:graves",
"kentucky:grayson",
"kentucky:green",
"kentucky:greenup",
"kentucky:hancock",
"kentucky:hardin",
"kentucky:harlan",
"kentucky:harrison",
"kentucky:hart",
"kentucky:henderson",
"kentucky:henry",
"kentucky:hickman",
"kentucky:hopkins",
"kentucky:jackson",
"kentucky:jefferson",
"kentucky:jessamine",
"kentucky:johnson",
"kentucky:kenton",
"kentucky:knott",
"kentucky:knox",
"kentucky:larue",
"kentucky:laurel",
"kentucky:lawrence",
"kentucky:lee",
"kentucky:leslie",
"kentucky:letcher",
"kentucky:lewis",
"kentucky:lincoln",
"kentucky:livingston",
"kentucky:logan",
"kentucky:lyon",
"kentucky:madison",
"kentucky:magoffin",
"kentucky:marion",
"kentucky:marshall",
"kentucky:martin",
"kentucky:mason",
"kentucky:mccracken",
"kentucky:mccreary",
"kentucky:mclean",
"kentucky:meade",
"kentucky:menifee",
"kentucky:mercer",
"kentucky:metcalfe",
"kentucky:monroe",
"kentucky:montgomery",
"kentucky:morgan",
"kentucky:muhlenberg",
"kentucky:nelson",
"kentucky:nicholas",
"kentucky:ohio",
"kentucky:oldham",
"kentucky:owen",
"kentucky:owsley",
"kentucky:pendleton",
"kentucky:perry",
"kentucky:pike",
"kentucky:powell",
"kentucky:pulaski",
"kentucky:robertson",
"kentucky:rockcastle",
"kentucky:rowan",
"kentucky:russell",
"kentucky:scott",
"kentucky:shelby",
"kentucky:simpson",
"kentucky:spencer",
"kentucky:taylor",
"kentucky:todd",
"kentucky:trigg",
"kentucky:trimble",
"kentucky:union",
"kentucky:warren",
"kentucky:washington",
"kentucky:wayne",
"kentucky:webster",
"kentucky:whitley",
"kentucky:wolfe",
"kentucky:woodford",
"louisiana:acadia",
"louisiana:allen",
"louisiana:ascension",
"louisiana:assumption",
"louisiana:avoyelles",
"louisiana:beauregard",
"louisiana:bienville",
"louisiana:bossier",
"louisiana:caddo",
"louisiana:calcasieu",
"louisiana:caldwell",
"louisiana:cameron",
"louisiana:catahoula",
"louisiana:claiborne",
"louisiana:concordia",
"louisiana:de soto",
"louisiana:east baton rouge",
"louisiana:east carroll",
"louisiana:east feliciana",
"louisiana:evangeline",
"louisiana:franklin",
"louisiana:grant",
"louisiana:iberia",
"louisiana:iberville",
"louisiana:jackson",
"louisiana:jefferson",
"louisiana:<NAME>",
"louisiana:la salle",
"louisiana:lafayette",
"louisiana:lafourche",
"louisiana:lincoln",
"louisiana:livingston",
"louisiana:madison",
"louisiana:morehouse",
"louisiana:natchitoches",
"louisiana:orleans",
"louisiana:ouachita",
"louisiana:plaquemines",
"louisiana:pointe coupee",
"louisiana:rapides",
"louisiana:red river",
"louisiana:richland",
"louisiana:sabine",
"louisiana:st. bernard",
"louisiana:st. charles",
"louisiana:st. helena",
"louisiana:st. james",
"louisiana:st. john the baptist",
"louisiana:st. landry",
"louisiana:st. martin",
"louisiana:st. mary",
"louisiana:st. tammany",
"louisiana:tangipahoa",
"louisiana:tensas",
"louisiana:terrebonne",
"louisiana:union",
"louisiana:vermilion",
"louisiana:vernon",
"louisiana:washington",
"louisiana:webster",
"louisiana:west baton rouge",
"louisiana:west carroll",
"louisiana:west feliciana",
"louisiana:winn",
"maine:androscoggin",
"maine:aroostook",
"maine:cumberland",
"maine:franklin",
"maine:hancock",
"maine:kennebec",
"maine:knox",
"maine:lincoln",
"maine:oxford",
"maine:penobscot",
"maine:piscataquis",
"maine:sagadahoc",
"maine:somerset",
"maine:waldo",
"maine:washington",
"maine:york",
"massachusetts:barnstable",
"massachusetts:berkshire",
"massachusetts:bristol",
"massachusetts:dukes",
"massachusetts:essex",
"massachusetts:franklin",
"massachusetts:hampden",
"massachusetts:hampshire",
"massachusetts:middlesex",
"massachusetts:nantucket",
"massachusetts:norfolk",
"massachusetts:plymouth",
"massachusetts:suffolk",
"massachusetts:worcester",
"michigan:alcona",
"michigan:alger",
"michigan:allegan",
"michigan:alpena",
"michigan:antrim",
"michigan:arenac",
"michigan:baraga",
"michigan:barry",
"michigan:bay",
"michigan:benzie",
"michigan:berrien",
"michigan:branch",
"michigan:calhoun",
"michigan:cass",
"michigan:charlevoix",
"michigan:cheboygan",
"michigan:chippewa",
"michigan:clare",
"michigan:clinton",
"michigan:crawford",
"michigan:delta",
"michigan:dickinson",
"michigan:eaton",
"michigan:emmet",
"michigan:genesee",
"michigan:gladwin",
"michigan:gogebic",
"michigan:grand traverse",
"michigan:gratiot",
"michigan:hillsdale",
"michigan:houghton",
"michigan:huron",
"michigan:ingham",
"michigan:ionia",
"michigan:iosco",
"michigan:iron",
"michigan:isabella",
"michigan:jackson",
"michigan:kalamazoo",
"michigan:kalkaska",
"michigan:kent",
"michigan:keweenaw",
"michigan:lake",
"michigan:lapeer",
"michigan:leelanau",
"michigan:lenawee",
"michigan:livingston",
"michigan:luce",
"michigan:mackinac",
"michigan:macomb",
"michigan:manistee",
"michigan:marquette",
"michigan:mason",
"michigan:mecosta",
"michigan:menominee",
"michigan:midland",
"michigan:missaukee",
"michigan:monroe",
"michigan:montcalm",
"michigan:montmorency",
"michigan:muskegon",
"michigan:newaygo",
"michigan:oakland",
"michigan:oceana",
"michigan:ogemaw",
"michigan:ontonagon",
"michigan:osceola",
"michigan:oscoda",
"michigan:otsego",
"michigan:ottawa",
"michigan:presque isle",
"michigan:roscommon",
"michigan:saginaw",
"michigan:sanilac",
"michigan:schoolcraft",
"michigan:shiawassee",
"michigan:st. clair",
"michigan:st. joseph",
"michigan:tuscola",
"michigan:<NAME>",
"michigan:washtenaw",
"michigan:wayne",
"michigan:wexford",
"minnesota:aitkin",
"minnesota:anoka",
"minnesota:becker",
"minnesota:beltrami",
"minnesota:benton",
"minnesota:big stone",
"minnesota:blue earth",
"minnesota:brown",
"minnesota:carlton",
"minnesota:carver",
"minnesota:cass",
"minnesota:chippewa",
"minnesota:chisago",
"minnesota:clay",
"minnesota:clearwater",
"minnesota:cook",
"minnesota:cottonwood",
"minnesota:crow wing",
"minnesota:dakota",
"minnesota:dodge",
"minnesota:douglas",
"minnesota:faribault",
"minnesota:fillmore",
"minnesota:freeborn",
"minnesota:goodhue",
"minnesota:grant",
"minnesota:hennepin",
"minnesota:houston",
"minnesota:hubbard",
"minnesota:isanti",
"minnesota:itasca",
"minnesota:jackson",
"minnesota:kanabec",
"minnesota:kandiyohi",
"minnesota:kittson",
"minnesota:koochiching",
"minnesota:lac qui parle",
"minnesota:lake",
"minnesota:lake of the woods",
"minnesota:le sueur",
"minnesota:lincoln",
"minnesota:lyon",
"minnesota:mahnomen",
"minnesota:marshall",
"minnesota:martin",
"minnesota:mcleod",
"minnesota:meeker",
"minnesota:mille lacs",
"minnesota:morrison",
"minnesota:mower",
"minnesota:murray",
"minnesota:nicollet",
"minnesota:nobles",
"minnesota:norman",
"minnesota:olmsted",
"minnesota:otter tail",
"minnesota:pennington",
"minnesota:pine",
"minnesota:pipestone",
"minnesota:polk",
"minnesota:pope",
"minnesota:ramsey",
"minnesota:red lake",
"minnesota:redwood",
"minnesota:renville",
"minnesota:rice",
"minnesota:rock",
"minnesota:roseau",
"minnesota:scott",
"minnesota:sherburne",
"minnesota:sibley",
"minnesota:st. louis",
"minnesota:stearns",
"minnesota:steele",
"minnesota:stevens",
"minnesota:swift",
"minnesota:todd",
"minnesota:traverse",
"minnesota:wabasha",
"minnesota:wadena",
"minnesota:waseca",
"minnesota:washington",
"minnesota:watonwan",
"minnesota:wilkin",
"minnesota:winona",
"minnesota:wright",
"minnesota:yellow medicine",
"mississippi:adams",
"mississippi:alcorn",
"mississippi:amite",
"mississippi:attala",
"mississippi:benton",
"mississippi:bolivar",
"mississippi:calhoun",
"mississippi:carroll",
"mississippi:chickasaw",
"mississippi:choctaw",
"mississippi:claiborne",
"mississippi:clarke",
"mississippi:clay",
"mississippi:coahoma",
"mississippi:copiah",
"mississippi:covington",
"mississippi:desoto",
"mississippi:forrest",
"mississippi:franklin",
"mississippi:george",
"mississippi:greene",
"mississippi:grenada",
"mississippi:hancock",
"mississippi:harrison",
"mississippi:hinds",
"mississippi:holmes",
"mississippi:humphreys",
"mississippi:issaquena",
"mississippi:itawamba",
"mississippi:jackson",
"mississippi:jasper",
"mississippi:jefferson",
"mississippi:<NAME>",
"mississippi:jones",
"mississippi:kemper",
"mississippi:lafayette",
"mississippi:lamar",
"mississippi:lauderdale",
"mississippi:lawrence",
"mississippi:leake",
"mississippi:lee",
"mississippi:leflore",
"mississippi:lincoln",
"mississippi:lowndes",
"mississippi:madison",
"mississippi:marion",
"mississippi:marshall",
"mississippi:monroe",
"mississippi:montgomery",
"mississippi:neshoba",
"mississippi:newton",
"mississippi:noxubee",
"mississippi:oktibbeha",
"mississippi:panola",
"mississippi:<NAME>",
"mississippi:perry",
"mississippi:pike",
"mississippi:pontotoc",
"mississippi:prentiss",
"mississippi:quitman",
"mississippi:rankin",
"mississippi:scott",
"mississippi:sharkey",
"mississippi:simpson",
"mississippi:smith",
"mississippi:stone",
"mississippi:sunflower",
"mississippi:tallahatchie",
"mississippi:tate",
"mississippi:tippah",
"mississippi:tishomingo",
"mississippi:tunica",
"mississippi:union",
"mississippi:walthall",
"mississippi:warren",
"mississippi:washington",
"mississippi:wayne",
"mississippi:webster",
"mississippi:wilkinson",
"mississippi:winston",
"mississippi:yalobusha",
"mississippi:yazoo",
"missouri:adair",
"missouri:andrew",
"missouri:atchison",
"missouri:audrain",
"missouri:barry",
"missouri:barton",
"missouri:bates",
"missouri:benton",
"missouri:bollinger",
"missouri:boone",
"missouri:buchanan",
"missouri:butler",
"missouri:caldwell",
"missouri:callaway",
"missouri:camden",
"missouri:cape girardeau",
"missouri:carroll",
"missouri:carter",
"missouri:cass",
"missouri:cedar",
"missouri:chariton",
"missouri:christian",
"missouri:clark",
"missouri:clay",
"missouri:clinton",
"missouri:cole",
"missouri:cooper",
"missouri:crawford",
"missouri:dade",
"missouri:dallas",
"missouri:daviess",
"missouri:dekalb",
"missouri:dent",
"missouri:douglas",
"missouri:dunklin",
"missouri:franklin",
"missouri:gasconade",
"missouri:gentry",
"missouri:greene",
"missouri:grundy",
"missouri:harrison",
"missouri:henry",
"missouri:hickory",
"missouri:holt",
"missouri:howard",
"missouri:howell",
"missouri:iron",
"missouri:jackson",
"missouri:jasper",
"missouri:jefferson",
"missouri:johnson",
"missouri:knox",
"missouri:laclede",
"missouri:lafayette",
"missouri:lawrence",
"missouri:lewis",
"missouri:lincoln",
"missouri:linn",
"missouri:livingston",
"missouri:macon",
"missouri:madison",
"missouri:maries",
"missouri:marion",
"missouri:mcdonald",
"missouri:mercer",
"missouri:miller",
"missouri:mississippi",
"missouri:moniteau",
"missouri:monroe",
"missouri:montgomery",
"missouri:morgan",
"missouri:new madrid",
"missouri:newton",
"missouri:nodaway",
"missouri:oregon",
"missouri:osage",
"missouri:ozark",
"missouri:pemiscot",
"missouri:perry",
"missouri:pettis",
"missouri:phelps",
"missouri:pike",
"missouri:platte",
"missouri:polk",
"missouri:pulaski",
"missouri:putnam",
"missouri:ralls",
"missouri:randolph",
"missouri:ray",
"missouri:reynolds",
"missouri:ripley",
"missouri:saline",
"missouri:schuyler",
"missouri:scotland",
"missouri:scott",
"missouri:shannon",
"missouri:shelby",
"missouri:st. charles",
"missouri:st. clair",
"missouri:st. francois",
"missouri:st. louis",
"missouri:st. louis (city)",
"missouri:ste. genevieve",
"missouri:stoddard",
"missouri:stone",
"missouri:sullivan",
"missouri:taney",
"missouri:texas",
"missouri:vernon",
"missouri:warren",
"missouri:washington",
"missouri:wayne",
"missouri:webster",
"missouri:worth",
"missouri:wright",
"montana:beaverhead",
"montana:big horn",
"montana:blaine",
"montana:broadwater",
"montana:carbon",
"montana:carter",
"montana:cascade",
"montana:chouteau",
"montana:custer",
"montana:daniels",
"montana:dawson",
"montana:deer lodge",
"montana:fallon",
"montana:fergus",
"montana:flathead",
"montana:gallatin",
"montana:garfield",
"montana:glacier",
"montana:golden valley",
"montana:granite",
"montana:hill",
"montana:jefferson",
"montana:judith basin",
"montana:lake",
"montana:lewis and clark",
"montana:liberty",
"montana:lincoln",
"montana:madison",
"montana:mccone",
"montana:meagher",
"montana:mineral",
"montana:missoula",
"montana:musselshell",
"montana:park",
"montana:petroleum",
"montana:phillips",
"montana:pondera",
"montana:powder river",
"montana:powell",
"montana:prairie",
"montana:ravalli",
"montana:richland",
"montana:roosevelt",
"montana:rosebud",
"montana:sanders",
"montana:sheridan",
"montana:silver bow",
"montana:stillwater",
"montana:sweet grass",
"montana:teton",
"montana:toole",
"montana:treasure",
"montana:valley",
"montana:wheatland",
"montana:wibaux",
"montana:yellowstone",
"montana:yellowstone national park",
"nebraska:adams",
"nebraska:antelope",
"nebraska:arthur",
"nebraska:banner",
"nebraska:blaine",
"nebraska:boone",
"nebraska:box butte",
"nebraska:boyd",
"nebraska:brown",
"nebraska:buffalo",
"nebraska:burt",
"nebraska:butler",
"nebraska:cass",
"nebraska:cedar",
"nebraska:chase",
"nebraska:cherry",
"nebraska:cheyenne",
"nebraska:clay",
"nebraska:colfax",
"nebraska:cuming",
"nebraska:custer",
"nebraska:dakota",
"nebraska:dawes",
"nebraska:dawson",
"nebraska:deuel",
"nebraska:dixon",
"nebraska:dodge",
"nebraska:douglas",
"nebraska:dundy",
"nebraska:fillmore",
"nebraska:franklin",
"nebraska:frontier",
"nebraska:furnas",
"nebraska:gage",
"nebraska:garden",
"nebraska:garfield",
"nebraska:gosper",
"nebraska:grant",
"nebraska:greeley",
"nebraska:hall",
"nebraska:hamilton",
"nebraska:harlan",
"nebraska:hayes",
"nebraska:hitchcock",
"nebraska:holt",
"nebraska:hooker",
"nebraska:howard",
"nebraska:jefferson",
"nebraska:johnson",
"nebraska:kearney",
"nebraska:keith",
"nebraska:keya paha",
"nebraska:kimball",
"nebraska:knox",
"nebraska:lancaster",
"nebraska:lincoln",
"nebraska:logan",
"nebraska:loup",
"nebraska:madison",
"nebraska:mcpherson",
"nebraska:merrick",
"nebraska:morrill",
"nebraska:nance",
"nebraska:nemaha",
"nebraska:nuckolls",
"nebraska:otoe",
"nebraska:pawnee",
"nebraska:perkins",
"nebraska:phelps",
"nebraska:pierce",
"nebraska:platte",
"nebraska:polk",
"nebraska:<NAME>",
"nebraska:richardson",
"nebraska:rock",
"nebraska:saline",
"nebraska:sarpy",
"nebraska:saunders",
"nebraska:scotts bluff",
"nebraska:seward",
"nebraska:sheridan",
"nebraska:sherman",
"nebraska:sioux",
"nebraska:stanton",
"nebraska:thayer",
"nebraska:thomas",
"nebraska:thurston",
"nebraska:valley",
"nebraska:washington",
"nebraska:wayne",
"nebraska:webster",
"nebraska:wheeler",
"nebraska:york",
"nevada:carson city",
"nevada:churchill",
"nevada:clark",
"nevada:douglas",
"nevada:elko",
"nevada:esmeralda",
"nevada:eureka",
"nevada:humboldt",
"nevada:lander",
"nevada:lincoln",
"nevada:lyon",
"nevada:mineral",
"nevada:nye",
"nevada:pershing",
"nevada:storey",
"nevada:washoe",
"nevada:white pine",
"new hampshire:belknap",
"new hampshire:carroll",
"new hampshire:cheshire",
"new hampshire:coos",
"new hampshire:grafton",
"new hampshire:hillsborough",
"new hampshire:merrimack",
"new hampshire:rockingham",
"new hampshire:strafford",
"new hampshire:sullivan",
"new jersey:atlantic",
"new jersey:bergen",
"new jersey:burlington",
"new jersey:camden",
"new jersey:cape may",
"new jersey:cumberland",
"new jersey:essex",
"new jersey:gloucester",
"new jersey:hudson",
"new jersey:hunterdon",
"new jersey:mercer",
"new jersey:middlesex",
"new jersey:monmouth",
"new jersey:morris",
"new jersey:ocean",
"new jersey:passaic",
"new jersey:salem",
"new jersey:somerset",
"new jersey:sussex",
"new jersey:union",
"new jersey:warren",
"new mexico:bernalillo",
"new mexico:catron",
"new mexico:chaves",
"new mexico:cibola",
"new mexico:colfax",
"new mexico:curry",
"new mexico:debaca",
"new mexico:dona ana",
"new mexico:eddy",
"new mexico:grant",
"new mexico:guadalupe",
"new mexico:harding",
"new mexico:hidalgo",
"new mexico:lea",
"new mexico:lincoln",
"new mexico:los alamos",
"new mexico:luna",
"new mexico:mckinley",
"new mexico:mora",
"new mexico:otero",
"new mexico:quay",
"new mexico:rio arriba",
"new mexico:roosevelt",
"new mexico:san juan",
"new mexico:san miguel",
"new mexico:sandoval",
"new mexico:santa fe",
"new mexico:sierra",
"new mexico:socorro",
"new mexico:taos",
"new mexico:torrance",
"new mexico:union",
"new mexico:valencia",
"new york:albany",
"new york:allegany",
"new york:bronx",
"new york:broome",
"new york:cattaraugus",
"new york:cayuga",
"new york:chautauqua",
"new york:chemung",
"new york:chenango",
"new york:clinton",
"new york:columbia",
"new york:cortland",
"new york:delaware",
"new york:dutchess",
"new york:erie",
"new york:essex",
"new york:franklin",
"new york:fulton",
"new york:genesee",
"new york:greene",
"new york:hamilton",
"new york:herkimer",
"new york:jefferson",
"new york:kings",
"new york:lewis",
"new york:livingston",
"new york:madison",
"new york:monroe",
"new york:montgomery",
"new york:nassau",
"new york:new york",
"new york:niagara",
"new york:oneida",
"new york:onondaga",
"new york:ontario",
"new york:orange",
"new york:orleans",
"new york:oswego",
"new york:otsego",
"new york:putnam",
"new york:queens",
"new york:rensselaer",
"new york:richmond",
"new york:rockland",
"new york:saratoga",
"new york:schenectady",
"new york:schoharie",
"new york:schuyler",
"new york:seneca",
"new york:st. lawrence",
"new york:steuben",
"new york:suffolk",
"new york:sullivan",
"new york:tioga",
"new york:tompkins",
"new york:ulster",
"new york:warren",
"new york:washington",
"new york:wayne",
"new york:westchester",
"new york:wyoming",
"new york:yates",
"north carolina:alamance",
"north carolina:alexander",
"north carolina:alleghany",
"north carolina:anson",
"north carolina:ashe",
"north carolina:avery",
"north carolina:beaufort",
"north carolina:bertie",
"north carolina:bladen",
"north carolina:brunswick",
"north carolina:buncombe",
"north carolina:burke",
"north carolina:cabarrus",
"north carolina:caldwell",
"north carolina:camden",
"north carolina:carteret",
"north carolina:caswell",
"north carolina:catawba",
"north carolina:chatham",
"north carolina:cherokee",
"north carolina:chowan",
"north carolina:clay",
"north carolina:cleveland",
"north carolina:columbus",
"north carolina:craven",
"north carolina:cumberland",
"north carolina:currituck",
"north carolina:dare",
"north carolina:davidson",
"north carolina:davie",
"north carolina:duplin",
"north carolina:durham",
"north carolina:edgecombe",
"north carolina:forsyth",
"north carolina:franklin",
"north carolina:gaston",
"north carolina:gates",
"north carolina:graham",
"north carolina:granville",
"north carolina:greene",
"north carolina:guilford",
"north carolina:halifax",
"north carolina:harnett",
"north carolina:haywood",
"north carolina:henderson",
"north carolina:hertford",
"north carolina:hoke",
"north carolina:hyde",
"north carolina:iredell",
"north carolina:jackson",
"north carolina:johnston",
"north carolina:jones",
"north carolina:lee",
"north carolina:lenoir",
"north carolina:lincoln",
"north carolina:macon",
"north carolina:madison",
"north carolina:martin",
"north carolina:mcdowell",
"north carolina:mecklenburg",
"north carolina:mitchell",
"north carolina:montgomery",
"north carolina:moore",
"north carolina:nash",
"north carolina:new hanover",
"north carolina:northampton",
"north carolina:onslow",
"north carolina:orange",
"north carolina:pamlico",
"north carolina:pasquotank",
"north carolina:pender",
"north carolina:perquimans",
"north carolina:person",
"north carolina:pitt",
"north carolina:polk",
"north carolina:randolph",
"north carolina:richmond",
"north carolina:robeson",
"north carolina:rockingham",
"north carolina:rowan",
"north carolina:rutherford",
"north carolina:sampson",
"north carolina:scotland",
"north carolina:stanly",
"north carolina:stokes",
"north carolina:surry",
"north carolina:swain",
"north carolina:transylvania",
"north carolina:tyrrell",
"north carolina:union",
"north carolina:vance",
"north carolina:wake",
"north carolina:warren",
"north carolina:washington",
"north carolina:watauga",
"north carolina:wayne",
"north carolina:wilkes",
"north carolina:wilson",
"north carolina:yadkin",
"north carolina:yancey",
"north dakota:adams",
"north dakota:barnes",
"north dakota:benson",
"north dakota:billings",
"north dakota:bottineau",
"north dakota:bowman",
"north dakota:burke",
"north dakota:burleigh",
"north dakota:cass",
"north dakota:cavalier",
"north dakota:dickey",
"north dakota:divide",
"north dakota:dunn",
"north dakota:eddy",
"north dakota:emmons",
"north dakota:foster",
"north dakota:golden valley",
"north dakota:grand forks",
"north dakota:grant",
"north dakota:griggs",
"north dakota:hettinger",
"north dakota:kidder",
"north dakota:lamoure",
"north dakota:logan",
"north dakota:mchenry",
"north dakota:mcintosh",
"north dakota:mckenzie",
"north dakota:mclean",
"north dakota:mercer",
"north dakota:morton",
"north dakota:mountrail",
"north dakota:nelson",
"north dakota:oliver",
"north dakota:pembina",
"north dakota:pierce",
"north dakota:ramsey",
"north dakota:ransom",
"north dakota:renville",
"north dakota:richland",
"north dakota:rolette",
"north dakota:sargent",
"north dakota:sheridan",
"north dakota:sioux",
"north dakota:slope",
"north dakota:stark",
"north dakota:steele",
"north dakota:stutsman",
"north dakota:towner",
"north dakota:traill",
"north dakota:walsh",
"north dakota:ward",
"north dakota:wells",
"north dakota:williams",
"ohio:adams",
"ohio:allen",
"ohio:ashland",
"ohio:ashtabula",
"ohio:athens",
"ohio:auglaize",
"ohio:belmont",
"ohio:brown",
"ohio:butler",
"ohio:carroll",
"ohio:champaign",
"ohio:clark",
"ohio:clermont",
"ohio:clinton",
"ohio:columbiana",
"ohio:coshocton",
"ohio:crawford",
"ohio:cuyahoga",
"ohio:darke",
"ohio:defiance",
"ohio:delaware",
"ohio:erie",
"ohio:fairfield",
"ohio:fayette",
"ohio:franklin",
"ohio:fulton",
"ohio:gallia",
"ohio:geauga",
"ohio:greene",
"ohio:guernsey",
"ohio:hamilton",
"ohio:hancock",
"ohio:hardin",
"ohio:harrison",
"ohio:henry",
"ohio:highland",
"ohio:hocking",
"ohio:holmes",
"ohio:huron",
"ohio:jackson",
"ohio:jefferson",
"ohio:knox",
"ohio:lake",
"ohio:lawrence",
"ohio:licking",
"ohio:logan",
"ohio:lorain",
"ohio:lucas",
"ohio:madison",
"ohio:mahoning",
"ohio:marion",
"ohio:medina",
"ohio:meigs",
"ohio:mercer",
"ohio:miami",
"ohio:monroe",
"ohio:montgomery",
"ohio:morgan",
"ohio:morrow",
"ohio:muskingum",
"ohio:noble",
"ohio:ottawa",
"ohio:paulding",
"ohio:perry",
"ohio:pickaway",
"ohio:pike",
"ohio:portage",
"ohio:preble",
"ohio:putnam",
"ohio:richland",
"ohio:ross",
"ohio:sandusky",
"ohio:scioto",
"ohio:seneca",
"ohio:shelby",
"ohio:stark",
"ohio:summit",
"ohio:trumbull",
"ohio:tuscarawas",
"ohio:union",
"ohio:<NAME>",
"ohio:vinton",
"ohio:warren",
"ohio:washington",
"ohio:wayne",
"ohio:williams",
"ohio:wood",
"ohio:wyandot",
"oklahoma:adair",
"oklahoma:alfalfa",
"oklahoma:atoka",
"oklahoma:beaver",
"oklahoma:beckham",
"oklahoma:blaine",
"oklahoma:bryan",
"oklahoma:caddo",
"oklahoma:canadian",
"oklahoma:carter",
"oklahoma:cherokee",
"oklahoma:choctaw",
"oklahoma:cimarron",
"oklahoma:cleveland",
"oklahoma:coal",
"oklahoma:comanche",
"oklahoma:cotton",
"oklahoma:craig",
"oklahoma:creek",
"oklahoma:custer",
"oklahoma:delaware",
"oklahoma:dewey",
"oklahoma:ellis",
"oklahoma:garfield",
"oklahoma:garvin",
"oklahoma:grady",
"oklahoma:grant",
"oklahoma:greer",
"oklahoma:harmon",
"oklahoma:harper",
"oklahoma:haskell",
"oklahoma:hughes",
"oklahoma:jackson",
"oklahoma:jefferson",
"oklahoma:johnston",
"oklahoma:kay",
"oklahoma:kingfisher",
"oklahoma:kiowa",
"oklahoma:latimer",
"oklahoma:le flore",
"oklahoma:lincoln",
"oklahoma:logan",
"oklahoma:love",
"oklahoma:major",
"oklahoma:marshall",
"oklahoma:mayes",
"oklahoma:mcclain",
"oklahoma:mccurtain",
"oklahoma:mcintosh",
"oklahoma:murray",
"oklahoma:muskogee",
"oklahoma:noble",
"oklahoma:nowata",
"oklahoma:okfuskee",
"oklahoma:oklahoma",
"oklahoma:okmulgee",
"oklahoma:osage",
"oklahoma:ottawa",
"oklahoma:pawnee",
"oklahoma:payne",
"oklahoma:pittsburg",
"oklahoma:pontotoc",
"oklahoma:pottawatomie",
"oklahoma:pushmataha",
"oklahoma:roger mills",
"oklahoma:rogers",
"oklahoma:seminole",
"oklahoma:sequoyah",
"oklahoma:stephens",
"oklahoma:texas",
"oklahoma:tillman",
"oklahoma:tulsa",
"oklahoma:wagoner",
"oklahoma:washington",
"oklahoma:washita",
"oklahoma:woods",
"oklahoma:woodward",
"oregon:baker",
"oregon:benton",
"oregon:clackamas",
"oregon:clatsop",
"oregon:columbia",
"oregon:coos",
"oregon:crook",
"oregon:curry",
"oregon:deschutes",
"oregon:douglas",
"oregon:gilliam",
"oregon:grant",
"oregon:harney",
"oregon:hood river",
"oregon:jackson",
"oregon:jefferson",
"oregon:josephine",
"oregon:klamath",
"oregon:lake",
"oregon:lane",
"oregon:lincoln",
"oregon:linn",
"oregon:malheur",
"oregon:marion",
"oregon:morrow",
"oregon:multnomah",
"oregon:polk",
"oregon:sherman",
"oregon:tillamook",
"oregon:umatilla",
"oregon:union",
"oregon:wallowa",
"oregon:wasco",
"oregon:washington",
"oregon:wheeler",
"oregon:yamhill",
"pennsylvania:adams",
"pennsylvania:allegheny",
"pennsylvania:armstrong",
"pennsylvania:beaver",
"pennsylvania:bedford",
"pennsylvania:berks",
"pennsylvania:blair",
"pennsylvania:bradford",
"pennsylvania:bucks",
"pennsylvania:butler",
"pennsylvania:cambria",
"pennsylvania:cameron",
"pennsylvania:carbon",
"pennsylvania:centre",
"pennsylvania:chester",
"pennsylvania:clarion",
"pennsylvania:clearfield",
"pennsylvania:clinton",
"pennsylvania:columbia",
"pennsylvania:crawford",
"pennsylvania:cumberland",
"pennsylvania:dauphin",
"pennsylvania:delaware",
"pennsylvania:elk",
"pennsylvania:erie",
"pennsylvania:fayette",
"pennsylvania:forest",
"pennsylvania:franklin",
"pennsylvania:fulton",
"pennsylvania:greene",
"pennsylvania:huntingdon",
"pennsylvania:indiana",
"pennsylvania:jefferson",
"pennsylvania:juniata",
"pennsylvania:lackawanna",
"pennsylvania:lancaster",
"pennsylvania:lawrence",
"pennsylvania:lebanon",
"pennsylvania:lehigh",
"pennsylvania:luzerne",
"pennsylvania:lycoming",
"pennsylvania:mckean",
"pennsylvania:mercer",
"pennsylvania:mifflin",
"pennsylvania:monroe",
"pennsylvania:montgomery",
"pennsylvania:montour",
"pennsylvania:northampton",
"pennsylvania:northumberland",
"pennsylvania:perry",
"pennsylvania:philadelphia",
"pennsylvania:pike",
"pennsylvania:potter",
"pennsylvania:schuylkill",
"pennsylvania:snyder",
"pennsylvania:somerset",
"pennsylvania:sullivan",
"pennsylvania:susquehanna",
"pennsylvania:tioga",
"pennsylvania:union",
"pennsylvania:venango",
"pennsylvania:warren",
"pennsylvania:washington",
"pennsylvania:wayne",
"pennsylvania:westmoreland",
"pennsylvania:wyoming",
"pennsylvania:york",
"rhode island:bristol",
"rhode island:kent",
"rhode island:newport",
"rhode island:providence",
"rhode island:washington",
"south carolina:abbeville",
"south carolina:aiken",
"south carolina:allendale",
"south carolina:anderson",
"south carolina:bamberg",
"south carolina:barnwell",
"south carolina:beaufort",
"south carolina:berkeley",
"south carolina:calhoun",
"south carolina:charleston",
"south carolina:cherokee",
"south carolina:chester",
"south carolina:chesterfield",
"south carolina:clarendon",
"south carolina:colleton",
"south carolina:darlington",
"south carolina:dillon",
"south carolina:dorchester",
"south carolina:edgefield",
"south carolina:fairfield",
"south carolina:florence",
"south carolina:georgetown",
"south carolina:greenville",
"south carolina:greenwood",
"south carolina:hampton",
"south carolina:horry",
"south carolina:jasper",
"south carolina:kershaw",
"south carolina:lancaster",
"south carolina:laurens",
"south carolina:lee",
"south carolina:lexington",
"south carolina:marion",
"south carolina:marlboro",
"south carolina:mccormick",
"south carolina:newberry",
"south carolina:oconee",
"south carolina:orangeburg",
"south carolina:pickens",
"south carolina:richland",
"south carolina:saluda",
"south carolina:spartanburg",
"south carolina:sumter",
"south carolina:union",
"south carolina:williamsburg",
"south carolina:york",
"south-dakota:aurora",
"south-dakota:beadle",
"south-dakota:bennett",
"south-dakota:bon homme",
"south-dakota:brookings",
"south-dakota:brown",
"south-dakota:brule",
"south-dakota:buffalo",
"south-dakota:butte",
"south-dakota:campbell",
"south-dakota:charles mix",
"south-dakota:clark",
"south-dakota:clay",
"south-dakota:codington",
"south-dakota:corson",
"south-dakota:custer",
"south-dakota:davison",
"south-dakota:day",
"south-dakota:deuel",
"south-dakota:dewey",
"south-dakota:douglas",
"south-dakota:edmunds",
"south-dakota:fall river",
"south-dakota:faulk",
"south-dakota:grant",
"south-dakota:gregory",
"south-dakota:haakon",
"south-dakota:hamlin",
"south-dakota:hand",
"south-dakota:hanson",
"south-dakota:harding",
"south-dakota:hughes",
"south-dakota:hutchinson",
"south-dakota:hyde",
"south-dakota:jackson",
"south-dakota:jerauld",
"south-dakota:jones",
"south-dakota:kingsbury",
"south-dakota:lake",
"south-dakota:lawrence",
"south-dakota:lincoln",
"south-dakota:lyman",
"south-dakota:marshall",
"south-dakota:mccook",
"south-dakota:mcpherson",
"south-dakota:meade",
"south-dakota:mellette",
"south-dakota:miner",
"south-dakota:minnehaha",
"south-dakota:moody",
"south-dakota:pennington",
"south-dakota:perkins",
"south-dakota:potter",
"south-dakota:roberts",
"south-dakota:sanborn",
"south-dakota:shannon",
"south-dakota:spink",
"south-dakota:stanley",
"south-dakota:sully",
"south-dakota:todd",
"south-dakota:tripp",
"south-dakota:turner",
"south-dakota:union",
"south-dakota:walworth",
"south-dakota:yankton",
"south-dakota:ziebach",
"tennessee:anderson",
"tennessee:bedford",
"tennessee:benton",
"tennessee:bledsoe",
"tennessee:blount",
"tennessee:bradley",
"tennessee:campbell",
"tennessee:cannon",
"tennessee:carroll",
"tennessee:carter",
"tennessee:cheatham",
"tennessee:chester",
"tennessee:claiborne",
"tennessee:clay",
"tennessee:cocke",
"tennessee:coffee",
"tennessee:crockett",
"tennessee:cumberland",
"tennessee:davidson",
"tennessee:dekalb",
"tennessee:decatur",
"tennessee:dickson",
"tennessee:dyer",
"tennessee:fayette",
"tennessee:fentress",
"tennessee:franklin",
"tennessee:gibson",
"tennessee:giles",
"tennessee:grainger",
"tennessee:greene",
"tennessee:grundy",
"tennessee:hamblen",
"tennessee:hamilton",
"tennessee:hancock",
"tennessee:hardeman",
"tennessee:hardin",
"tennessee:hawkins",
"tennessee:haywood",
"tennessee:henderson",
"tennessee:henry",
"tennessee:hickman",
"tennessee:houston",
"tennessee:humphreys",
"tennessee:jackson",
"tennessee:jefferson",
"tennessee:johnson",
"tennessee:knox",
"tennessee:lake",
"tennessee:lauderdale",
"tennessee:lawrence",
"tennessee:lewis",
"tennessee:lincoln",
"tennessee:loudon",
"tennessee:macon",
"tennessee:madison",
"tennessee:marion",
"tennessee:marshall",
"tennessee:maury",
"tennessee:mcminn",
"tennessee:mcnairy",
"tennessee:meigs",
"tennessee:monroe",
"tennessee:montgomery",
"tennessee:moore",
"tennessee:morgan",
"tennessee:obion",
"tennessee:overton",
"tennessee:perry",
"tennessee:pickett",
"tennessee:polk",
"tennessee:putnam",
"tennessee:rhea",
"tennessee:roane",
"tennessee:robertson",
"tennessee:rutherford",
"tennessee:scott",
"tennessee:sequatchie",
"tennessee:sevier",
"tennessee:shelby",
"tennessee:smith",
"tennessee:stewart",
"tennessee:sullivan",
"tennessee:sumner",
"tennessee:tipton",
"tennessee:trousdale",
"tennessee:unicoi",
"tennessee:union",
"tennessee:<NAME>",
"tennessee:warren",
"tennessee:washington",
"tennessee:wayne",
"tennessee:weakley",
"tennessee:white",
"tennessee:williamson",
"tennessee:wilson",
"texas:anderson",
"texas:andrews",
"texas:angelina",
"texas:aransas",
"texas:archer",
"texas:armstrong",
"texas:atascosa",
"texas:austin",
"texas:bailey",
"texas:bandera",
"texas:bastrop",
"texas:baylor",
"texas:bee",
"texas:bell",
"texas:bexar",
"texas:blanco",
"texas:borden",
"texas:bosque",
"texas:bowie",
"texas:brazoria",
"texas:brazos",
"texas:brewster",
"texas:briscoe",
"texas:brooks",
"texas:brown",
"texas:burleson",
"texas:burnet",
"texas:caldwell",
"texas:calhoun",
"texas:callahan",
"texas:cameron",
"texas:camp",
"texas:carson",
"texas:cass",
"texas:castro",
"texas:chambers",
"texas:cherokee",
"texas:childress",
"texas:clay",
"texas:cochran",
"texas:coke",
"texas:coleman",
"texas:collin",
"texas:collingsworth",
"texas:colorado",
"texas:comal",
"texas:comanche",
"texas:concho",
"texas:cooke",
"texas:coryell",
"texas:cottle",
"texas:crane",
"texas:crockett",
"texas:crosby",
"texas:culberson",
"texas:dallam",
"texas:dallas",
"texas:dawson",
"texas:dewitt",
"texas:deaf smith",
"texas:delta",
"texas:denton",
"texas:dickens",
"texas:dimmit",
"texas:donley",
"texas:duval",
"texas:eastland",
"texas:ector",
"texas:edwards",
"texas:el paso",
"texas:ellis",
"texas:erath",
"texas:falls",
"texas:fannin",
"texas:fayette",
"texas:fisher",
"texas:floyd",
"texas:foard",
"texas:fort bend",
"texas:franklin",
"texas:freestone",
"texas:frio",
"texas:gaines",
"texas:galveston",
"texas:garza",
"texas:gillespie",
"texas:glasscock",
"texas:goliad",
"texas:gonzales",
"texas:gray",
"texas:grayson",
"texas:gregg",
"texas:grimes",
"texas:guadalupe",
"texas:hale",
"texas:hall",
"texas:hamilton",
"texas:hansford",
"texas:hardeman",
"texas:hardin",
"texas:harris",
"texas:harrison",
"texas:hartley",
"texas:haskell",
"texas:hays",
"texas:hemphill",
"texas:henderson",
"texas:hidalgo",
"texas:hill",
"texas:hockley",
"texas:hood",
"texas:hopkins",
"texas:houston",
"texas:howard",
"texas:hudspeth",
"texas:hunt",
"texas:hutchinson",
"texas:irion",
"texas:jack",
"texas:jackson",
"texas:jasper",
"texas:<NAME>",
"texas:jefferson",
"texas:jim hogg",
"texas:<NAME>",
"texas:johnson",
"texas:jones",
"texas:karnes",
"texas:kaufman",
"texas:kendall",
"texas:kenedy",
"texas:kent",
"texas:kerr",
"texas:kimble",
"texas:king",
"texas:kinney",
"texas:kleberg",
"texas:knox",
"texas:<NAME>",
"texas:lamar",
"texas:lamb",
"texas:lampasas",
"texas:lavaca",
"texas:lee",
"texas:leon",
"texas:liberty",
"texas:limestone",
"texas:lipscomb",
"texas:live oak",
"texas:llano",
"texas:loving",
"texas:lubbock",
"texas:lynn",
"texas:madison",
"texas:marion",
"texas:martin",
"texas:mason",
"texas:matagorda",
"texas:maverick",
"texas:mcculloch",
"texas:mclennan",
"texas:mcmullen",
"texas:medina",
"texas:menard",
"texas:midland",
"texas:milam",
"texas:mills",
"texas:mitchell",
"texas:montague",
"texas:montgomery",
"texas:moore",
"texas:morris",
"texas:motley",
"texas:nacogdoches",
"texas:navarro",
"texas:newton",
"texas:nolan",
"texas:nueces",
"texas:ochiltree",
"texas:oldham",
"texas:orange",
"texas:p<NAME>",
"texas:panola",
"texas:parker",
"texas:parmer",
"texas:pecos",
"texas:polk",
"texas:potter",
"texas:presidio",
"texas:rains",
"texas:randall",
"texas:reagan",
"texas:real",
"texas:<NAME>",
"texas:reeves",
"texas:refugio",
"texas:roberts",
"texas:robertson",
"texas:rockwall",
"texas:runnels",
"texas:rusk",
"texas:sabine",
"texas:san augustine",
"texas:<NAME>",
"texas:san patricio",
"texas:san saba",
"texas:schleicher",
"texas:scurry",
"texas:shackelford",
"texas:shelby",
"texas:sherman",
"texas:smith",
"texas:somervell",
"texas:starr",
"texas:stephens",
"texas:sterling",
"texas:stonewall",
"texas:sutton",
"texas:swisher",
"texas:tarrant",
"texas:taylor",
"texas:terrell",
"texas:terry",
"texas:throckmorton",
"texas:titus",
"texas:tom green",
"texas:travis",
"texas:trinity",
"texas:tyler",
"texas:upshur",
"texas:upton",
"texas:uvalde",
"texas:val verde",
"texas:van zandt",
"texas:victoria",
"texas:walker",
"texas:waller",
"texas:ward",
"texas:washington",
"texas:webb",
"texas:wharton",
"texas:wheeler",
"texas:wichita",
"texas:wilbarger",
"texas:willacy",
"texas:williamson",
"texas:wilson",
"texas:winkler",
"texas:wise",
"texas:wood",
"texas:yoakum",
"texas:young",
"texas:zapata",
"texas:zavala",
"utah:beaver",
"utah:box elder",
"utah:cache",
"utah:carbon",
"utah:daggett",
"utah:davis",
"utah:duchesne",
"utah:emery",
"utah:garfield",
"utah:grand",
"utah:iron",
"utah:juab",
"utah:kane",
"utah:millard",
"utah:morgan",
"utah:piute",
"utah:rich",
"utah:salt lake",
"utah:san juan",
"utah:sanpete",
"utah:sevier",
"utah:summit",
"utah:tooele",
"utah:uintah",
"utah:utah",
"utah:wasatch",
"utah:washington",
"utah:wayne",
"utah:weber",
"vermont:addison",
"vermont:bennington",
"vermont:caledonia",
"vermont:chittenden",
"vermont:essex",
"vermont:franklin",
"vermont:grand isle",
"vermont:lamoille",
"vermont:orange",
"vermont:orleans",
"vermont:rutland",
"vermont:washington",
"vermont:windham",
"vermont:windsor",
"virginia:accomack",
"virginia:albemarle",
"virginia:alexandria (city)",
"virginia:alleghany",
"virginia:amelia",
"virginia:amherst",
"virginia:appomattox",
"virginia:arlington",
"virginia:augusta",
"virginia:bath",
"virginia:bedford",
"virginia:bedford (city)",
"virginia:bland",
"virginia:botetourt",
"virginia:bristol (city)",
"virginia:brunswick",
"virginia:buchanan",
"virginia:buckingham",
"virginia:buena vista (city)",
"virginia:campbell",
"virginia:caroline",
"virginia:carroll",
"virginia:charles city",
"virginia:charlotte",
"virginia:charlottesville (city)",
"virginia:chesapeake (city)",
"virginia:chesterfield",
"virginia:clarke",
"virginia:clifton forge (city)",
"virginia:colonial heights (city)",
"virginia:covington (city)",
"virginia:craig",
"virginia:culpeper",
"virginia:cumberland",
"virginia:danville (city)",
"virginia:dickenson",
"virginia:dinwiddie",
"virginia:emporia (city)",
"virginia:essex",
"virginia:fairfax",
"virginia:fairfax (city)",
"virginia:falls church (city)",
"virginia:fauquier",
"virginia:floyd",
"virginia:fluvanna",
"virginia:franklin",
"virginia:franklin (city)",
"virginia:frederick",
"virginia:fredericksburg (city)",
"virginia:galax (city)",
"virginia:giles",
"virginia:gloucester",
"virginia:goochland",
"virginia:grayson",
"virginia:greene",
"virginia:greensville",
"virginia:halifax",
"virginia:hampton (city)",
"virginia:hanover",
"virginia:harrisonburg (city)",
"virginia:henrico",
"virginia:henry",
"virginia:highland",
"virginia:hopewell (city)",
"virginia:isle of wight",
"virginia:james city",
"virginia:king george",
"virginia:king william",
"virginia:king and queen",
"virginia:lancaster",
"virginia:lee",
"virginia:lexington (city)",
"virginia:loudoun",
"virginia:louisa",
"virginia:lunenburg",
"virginia:lynchburg (city)",
"virginia:madison",
"virginia:manassas (city)",
"virginia:manassas park (city)",
"virginia:martinsville (city)",
"virginia:mathews",
"virginia:mecklenburg",
"virginia:middlesex",
"virginia:montgomery",
"virginia:nelson",
"virginia:new kent",
"virginia:newport news (city)",
"virginia:norfolk (city)",
"virginia:northampton",
"virginia:northumberland",
"virginia:norton (city)",
"virginia:nottoway",
"virginia:orange",
"virginia:page",
"virginia:patrick",
"virginia:petersburg (city)",
"virginia:pittsylvania",
"virginia:poquoson (city)",
"virginia:portsmouth (city)",
"virginia:powhatan",
"virginia:<NAME>",
"virginia:pr<NAME>",
"virginia:<NAME>",
"virginia:pulaski",
"virginia:radford (city)",
"virginia:rappahannock",
"virginia:richmond",
"virginia:richmond (city)",
"virginia:roanoke",
"virginia:roanoke (city)",
"virginia:rockbridge",
"virginia:rockingham",
"virginia:russell",
"virginia:salem (city)",
"virginia:scott",
"virginia:shenandoah",
"virginia:smyth",
"virginia:south boston (city)",
"virginia:southampton",
"virginia:spotsylvania",
"virginia:stafford",
"virginia:staunton (city)",
"virginia:suffolk (city)",
"virginia:surry",
"virginia:sussex",
"virginia:tazewell",
"virginia:virginia beach (city)",
"virginia:warren",
"virginia:washington",
"virginia:waynesboro (city)",
"virginia:westmoreland",
"virginia:williamsburg (city)",
"virginia:winchester (city)",
"virginia:wise",
"virginia:wythe",
"virginia:york",
"washington:adams",
"washington:asotin",
"washington:benton",
"washington:chelan",
"washington:clallam",
"washington:clark",
"washington:columbia",
"washington:cowlitz",
"washington:douglas",
"washington:ferry",
"washington:franklin",
"washington:garfield",
"washington:grant",
"washington:grays harbor",
"washington:island",
"washington:jefferson",
"washington:king",
"washington:kitsap",
"washington:kittitas",
"washington:klickitat",
"washington:lewis",
"washington:lincoln",
"washington:mason",
"washington:okanogan",
"washington:pacific",
"washington:pend oreille",
"washington:pierce",
"washington:san juan",
"washington:skagit",
"washington:skamania",
"washington:snohomish",
"washington:spokane",
"washington:stevens",
"washington:thurston",
"washington:wahkiakum",
"washington:walla walla",
"washington:whatcom",
"washington:whitman",
"washington:yakima",
"west-virginia:barbour",
"west-virginia:berkeley",
"west-virginia:boone",
"west-virginia:braxton",
"west-virginia:brooke",
"west-virginia:cabell",
"west-virginia:calhoun",
"west-virginia:clay",
"west-virginia:doddridge",
"west-virginia:fayette",
"west-virginia:gilmer",
"west-virginia:grant",
"west-virginia:greenbrier",
"west-virginia:hampshire",
"west-virginia:hancock",
"west-virginia:hardy",
"west-virginia:harrison",
"west-virginia:jackson",
"west-virginia:jefferson",
"west-virginia:kanawha",
"west-virginia:lewis",
"west-virginia:lincoln",
"west-virginia:logan",
"west-virginia:marion",
"west-virginia:marshall",
"west-virginia:mason",
"west-virginia:mcdowell",
"west-virginia:mercer",
"west-virginia:mineral",
"west-virginia:mingo",
"west-virginia:monongalia",
"west-virginia:monroe",
"west-virginia:morgan",
"west-virginia:nicholas",
"west-virginia:ohio",
"west-virginia:pendleton",
"west-virginia:pleasants",
"west-virginia:pocahontas",
"west-virginia:preston",
"west-virginia:putnam",
"west-virginia:raleigh",
"west-virginia:randolph",
"west-virginia:ritchie",
"west-virginia:roane",
"west-virginia:summers",
"west-virginia:taylor",
"west-virginia:tucker",
"west-virginia:tyler",
"west-virginia:upshur",
"west-virginia:wayne",
"west-virginia:webster",
"west-virginia:wetzel",
"west-virginia:wirt",
"west-virginia:wood",
"west-virginia:wyoming",
"wisconsin:adams",
"wisconsin:ashland",
"wisconsin:barron",
"wisconsin:bayfield",
"wisconsin:brown",
"wisconsin:buffalo",
"wisconsin:burnett",
"wisconsin:calumet",
"wisconsin:chippewa",
"wisconsin:clark",
"wisconsin:columbia",
"wisconsin:crawford",
"wisconsin:dane",
"wisconsin:dodge",
"wisconsin:door",
"wisconsin:douglas",
"wisconsin:dunn",
"wisconsin:eau claire",
"wisconsin:florence",
"wisconsin:fond du lac",
"wisconsin:forest",
"wisconsin:grant",
"wisconsin:green",
"wisconsin:green lake",
"wisconsin:iowa",
"wisconsin:iron",
"wisconsin:jackson",
"wisconsin:jefferson",
"wisconsin:juneau",
"wisconsin:kenosha",
"wisconsin:kewaunee",
"wisconsin:la crosse",
"wisconsin:lafayette",
"wisconsin:langlade",
"wisconsin:lincoln",
"wisconsin:manitowoc",
"wisconsin:marathon",
"wisconsin:marinette",
"wisconsin:marquette",
"wisconsin:menominee",
"wisconsin:milwaukee",
"wisconsin:monroe",
"wisconsin:oconto",
"wisconsin:oneida",
"wisconsin:outagamie",
"wisconsin:ozaukee",
"wisconsin:pepin",
"wisconsin:pierce",
"wisconsin:polk",
"wisconsin:portage",
"wisconsin:price",
"wisconsin:racine",
"wisconsin:richland",
"wisconsin:rock",
"wisconsin:rusk",
"wisconsin:sauk",
"wisconsin:sawyer",
"wisconsin:shawano",
"wisconsin:sheboygan",
"wisconsin:st. croix",
"wisconsin:taylor",
"wisconsin:trempealeau",
"wisconsin:vernon",
"wisconsin:vilas",
"wisconsin:walworth",
"wisconsin:washburn",
"wisconsin:washington",
"wisconsin:waukesha",
"wisconsin:waupaca",
"wisconsin:waushara",
"wisconsin:winnebago",
"wisconsin:wood",
"wyoming:albany",
"wyoming:big horn",
"wyoming:campbell",
"wyoming:carbon",
"wyoming:converse",
"wyoming:crook",
"wyoming:fremont",
"wyoming:goshen",
"wyoming:hot springs",
"wyoming:johnson",
"wyoming:laramie",
"wyoming:lincoln",
"wyoming:natrona",
"wyoming:niobrara",
"wyoming:park",
"wyoming:platte",
"wyoming:sheridan",
"wyoming:sublette",
"wyoming:sweetwater",
"wyoming:teton",
"wyoming:uinta",
"wyoming:washakie",
"wyoming:weston",
]
COUNTRIES = []
WETLAND_STATUSES = [
"with wetland status",
"--obl (obligate wetland)",
"--obl? (possibly obligate wetland)",
"--facw+ (facultative wetland+)",
"--facw+? (possibly facultative wetland+)",
"--facw (facultative wetland)",
"--facw? (possibly facultative wetland)",
"--facw- (facultative wetland-)",
"--facw-? (possibly facultative wetland-)",
"--fac+ (facultative+)",
"--fac+? (possibly facultative+)",
"--fac (facultative)",
"--fac? (possibly facultative)",
"--fac- (facultative-)",
"--fac-? (possibly facultative-)",
"--facu+ (facultative upland+)",
"--facu (facultative upland)",
"--facu? (possibly facultative upland)",
"--facu- (facultative upland-)",
"--upl (obligate upland)",
"without wetland status (upland plants)",
]
class WetlandEcology(Model):
"""Ecological information specific to wetlands."""
native_wetland_indicator = ListProp(StringProp(choices={
c: c for c in WETLAND_STATUSES}))
us_wetland_region = ListProp(StringProp(choices={c: c for c in [
"region 1 (northeast)",
"region 2 (southeast)",
"region 3 (north central)",
"region 4 (north plains)",
"region 5 (central plains)",
"region 6 (south plains)",
"region 7 (southwest)",
"region 8 (intermountain)",
"region 9 (northwest)",
"region 0 (california)",
"region a (alaska)",
"region c (caribbean)",
"region h (hawaii)"
]}))
class Ecology(Model):
"""The locations and enviromental relationships."""
native_status_code = StringProp(choices={c: c for c in [
"native to plants floristic area",
"--north america native",
"\u00a0\u00a0--l48 native",
"\u00a0\u00a0--ak native",
"\u00a0\u00a0--can native",
"\u00a0\u00a0--gl native",
"\u00a0\u00a0--spm native",
"--hi native",
"--pr native",
"--vi native",
"introduced to plants floristic area",
"--north america introduced",
"\u00a0\u00a0--l48 introduced",
"\u00a0\u00a0--ak introduced",
"\u00a0\u00a0--can introduced",
"\u00a0\u00a0--gl introduced",
"\u00a0\u00a0--spm introduced",
"--hi introduced",
"--pr introduced",
"--vi introduced"
]}, required=True)
locales = ListProp(StringProp(choices={c: c for c in US_COUNTIES}))
# "nreg_wet_status =
| 2.046875 | 2 |
tests/update_test.py | JULIELab/taxonupdate | 0 | 12767092 | <filename>tests/update_test.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the taxonomy updater."""
from typing import cast
from taxonomy_update import make_variants, taxonomy2dict
from DictWriter import DictWriter
def test_allalpha():
"""Only the scientific name should get variants."""
strawberry = {
"ID": "57918",
"RANK": "species",
"SCIENTIFIC NAME": ["Fragaria vesca"],
"GENBANK COMMON NAME": ["wild strawberry"],
"COMMON NAME": ["European strawberry", "alpine strawberry", "wood strawberry"],
}
expected = (
"Alpine strawberry|European strawberry|F. vesca|F.vesca|"
"Fragaria vesca|Wild strawberry|Wood strawberry|"
"alpine strawberry|f. vesca|f.vesca|fragaria vesca|"
"wild strawberry|wood strawberry"
)
variants = sorted(make_variants(strawberry))
variants = "|".join(variants)
assert variants == expected
def test_nonalpha():
"""The scientific name contains a dot, which should lead to
no abbreviation for it."""
sp301 = {"ID": "352854", "RANK": "species", "SCIENTIFIC NAME": ["Fragaria sp. 301"]}
expected = "Fragaria sp. 301"
variants = sorted(make_variants(sp301))
variants = "|".join(variants)
assert variants == expected
def test_create_dict():
"""The number of entries in the dictionary is 45."""
expected = 45
i = 0
for _ in taxonomy2dict("tests/tax-test.dat"):
i += 1
assert i == expected
def test_species_entries():
"""The number of species entries in the dictionary is 18."""
expected = 18
i = 0
for entry in taxonomy2dict("tests/tax-test.dat"):
if entry["RANK"] == "species":
i += 1
assert i == expected
def test_subtree_filter_all():
"""The number of species entries in the dictionary is 18."""
expected = 18
writer = DictWriter()
i = 0
taxa = dict()
for entry in taxonomy2dict("tests/tax-test.dat"):
taxa[cast(str, entry["ID"])] = entry
for entry in writer.filter_by_root(taxa, "1", "species"):
i += 1
assert i == expected
def test_subtree_filter_phylum():
"""There are 2 phyla in the dictionary. One is in the right subtree."""
expected = 1
writer = DictWriter()
i = 0
taxa = dict()
for entry in taxonomy2dict("tests/tax-test.dat"):
taxa[cast(str, entry["ID"])] = entry
phyla = []
for entry in writer.filter_by_root(taxa, "2", "phylum"):
phyla.append(entry["SCIENTIFIC NAME"])
assert len(phyla) == expected
assert phyla[0] == ["Proteobacteria"]
| 2.734375 | 3 |
direction_net/pano_utils/transformation.py | DionysisChristopoulos/google-research | 23,901 | 12767093 | <filename>direction_net/pano_utils/transformation.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformations for equirectangular and perspective images.
The coordinate system is the same as OpenGL's, where -Z is the camera looking
direction, +Y points up and +X points right.
Rotations are applied as pre-multiplication in all cases.
"""
import math
from pano_utils import geometry
from pano_utils import math_utils
import tensorflow.compat.v1 as tf
import tensorflow_addons as tfa
def equirectangular_sampler(images, spherical_coordinates):
"""Sample panorama images using a grid of spherical coordinates.
Args:
images: a 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`.
spherical_coordinates: a float32 tensor with shape
[BATCH, sampling_height, sampling_width, 2] representing spherical
coordinates (colatitude, azimuth) of the sampling grids.
Returns:
a 4-D tensor of shape `[BATCH, sampling_height, sampling_width, CHANNELS]`
representing resampled images.
Raises:
ValueError: 'images' or 'spherical_coordinates' has the wrong dimensions.
"""
with tf.name_scope(
None, 'equirectangular_sampler', [images, spherical_coordinates]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if spherical_coordinates.shape[-1] != 2:
raise ValueError("'spherical_coordinates' has the wrong dimensions.")
shape = images.shape.as_list()
height, width = shape[1], shape[2]
padded_images = geometry.equirectangular_padding(images, [[1, 1], [1, 1]])
colatitude, azimuth = tf.split(spherical_coordinates, [1, 1], -1)
# The colatitude of the equirectangular image goes from 0 (the top row)
# to pi (the bottom), not inclusively. The azimuth goes from 0
# (the leftmost column) to 2*pi (the rightmost column).
# For example, azimuth-colatitude (0, pi/2) is the mid pixel in the first
# column of the equirect image.
# Convert spherical coordinates to equirectangular coordinates on images.
# +1 in the end because of the padding.
x_pano = (tf.mod(azimuth / math.pi, 2) * width / 2.0 - 0.5) + 1
y_pano = ((colatitude / math.pi) * height - 0.5) + 1
pano_coordinates = tf.concat([x_pano, y_pano], -1)
remapped = tfa.image.resampler(padded_images, pano_coordinates)
return remapped
def rectilinear_projection(images,
resolution,
fov,
rotations):
"""Convert equirectangular panoramic images to perspective images.
First, the panorama images are rotated by the input parameter "rotations".
Then, the region with the field of view "fov" centered at camera's look-at -Z
axis is projected into perspective images. The -Z axis corresponds to the
spherical coordinates (pi/2, pi/2) which is (HEIGHT/2, WIDTH/4) on the pano.
Args:
images: a 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`.
resolution: a 2-D tuple or list containing the resolution of desired output.
fov: (float) camera's horizontal field of view in degrees.
rotations: [BATCH, 3, 3] rotation matrices.
Returns:
4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`
Raises:
ValueError: 'images' has the wrong dimensions.
ValueError: 'images' is not a float tensor.
ValueError: 'rotations' has the wrong dimensions.
"""
with tf.name_scope(None, 'rectilinear_projection',
[images, resolution, fov, rotations]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if images.dtype != tf.float32 and images.dtype != tf.float64:
raise ValueError("'images' must be a float tensor.")
if rotations.shape[-2:] != [3, 3]:
raise ValueError("'rotations' has the wrong dimensions.")
shape = images.shape.as_list()
batch = shape[0]
cartesian_coordinates = geometry.generate_cartesian_grid(resolution, fov)
# create batch -> [batch, height, width, 3]
cartesian_coordinates = tf.tile(
tf.expand_dims(cartesian_coordinates, axis=0), [batch, 1, 1, 1])
# The rotation matrices have to be [batch, height, width, 3, 3].
flip_x = tf.constant([[-1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
rotations = tf.matmul(flip_x,
tf.matmul(rotations, flip_x, transpose_a=True))
rotated_coordinates = tf.matmul(
rotations[:, tf.newaxis, tf.newaxis],
tf.expand_dims(cartesian_coordinates, -1), transpose_a=True)
axis_convert = tf.constant([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
rotated_coordinates = tf.matmul(axis_convert, rotated_coordinates)
rotated_coordinates = tf.squeeze(rotated_coordinates, -1)
spherical_coordinates = geometry.cartesian_to_spherical(rotated_coordinates)
# The azimuth of 'spherical_coordinates' decreases from left to right but
# the x should increase from left to right.
spherical_coordinates = tf.reverse(spherical_coordinates, [2])
return equirectangular_sampler(images, spherical_coordinates)
def rotate_pano(images, rotations):
"""Rotate Panoramic images.
Convert the spherical coordinates (colatitude, azimuth) to Cartesian (x, y, z)
then apply SO(3) rotation matrices. Finally, convert them back to spherical
coordinates and remap the equirectangular images.
Note1: The rotations are applied to the sampling sphere instead of the camera.
The camera actually rotates R^T. I_out(x) = I_in(R * x), x are points in the
camera frame.
Note2: It uses a simple linear interpolation for now instead of slerp, so the
pixel values are not accurate but visually plausible.
Args:
images: a 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`.
rotations: [BATCH, 3, 3] rotation matrices.
Returns:
4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`.
Raises:
ValueError: if the `images` or 'rotations' has the wrong dimensions.
"""
with tf.name_scope(None, 'rotate_pano', [images, rotations]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if rotations.shape[-2:] != [3, 3]:
raise ValueError("'rotations' must have 3x3 dimensions.")
shape = images.shape.as_list()
batch, height, width = shape[0], shape[1], shape[2]
spherical = tf.expand_dims(
geometry.generate_equirectangular_grid([height, width]), 0)
spherical = tf.tile(spherical, [batch, 1, 1, 1])
cartesian = geometry.spherical_to_cartesian(spherical)
axis_convert = tf.constant([[0., 1., 0.], [0., 0., -1.], [-1., 0., 0.]])
cartesian = tf.matmul(axis_convert, tf.expand_dims(cartesian, -1))
rotated_cartesian = tf.matmul(
rotations[:, tf.newaxis, tf.newaxis], cartesian)
rotated_cartesian = tf.squeeze(
tf.matmul(axis_convert, rotated_cartesian, transpose_a=True), -1)
rotated_spherical = geometry.cartesian_to_spherical(rotated_cartesian)
return equirectangular_sampler(images, rotated_spherical)
def rotate_image_in_3d(images,
input_rotations,
input_fov,
output_fov,
output_shape):
"""Return reprojected perspective view images given a rotated camera.
This function applies a homography H = K_output * R^T * K_input' where
K_output and K_input are the output and input camera intrinsics, R is the
rotation from the input images' frame to the target frame.
Args:
images: [BATCH, HEIGHT, WIDTH, CHANNEL] perspective view images.
input_rotations: [BATCH, 3, 3] rotations matrices from current camera frame
to target camera frame.
input_fov: [BATCH] a 1-D tensor (float32) of input field of view in degrees.
output_fov: (float) output field of view in degrees.
output_shape: a 2-D list of output dimension [height, width].
Returns:
reprojected images [BATCH, height, width, CHANNELS].
"""
with tf.name_scope(
None, 'rotate_image_in_3d',
[images, input_rotations, input_fov, output_fov, output_shape]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if input_rotations.shape[-2:] != [3, 3]:
raise ValueError("'input_rotations' must have 3x3 dimensions.")
shape = images.shape.as_list()
batch, height, width = shape[0], shape[1], shape[2]
cartesian = geometry.generate_cartesian_grid(output_shape, output_fov)
cartesian = tf.tile(
cartesian[tf.newaxis, :, :, :, tf.newaxis], [batch, 1, 1, 1, 1])
input_rotations = tf.tile(input_rotations[:, tf.newaxis, tf.newaxis, :],
[1]+output_shape+[1, 1])
cartesian = tf.squeeze(
tf.matmul(input_rotations, cartesian, transpose_a=True), -1)
image_coordinates = -cartesian[:, :, :, :2] / cartesian[:, :, :, -1:]
x, y = tf.split(image_coordinates, [1, 1], -1)
w = 2 * tf.tan(math_utils.degrees_to_radians(input_fov / 2))
h = 2 * tf.tan(math_utils.degrees_to_radians(input_fov / 2))
w = w[:, tf.newaxis, tf.newaxis, tf.newaxis]
h = h[:, tf.newaxis, tf.newaxis, tf.newaxis]
nx = x*width / w + width / 2 - 0.5
ny = -y * height / h + height / 2 - 0.5
return tfa.image.resampler(images, tf.concat([nx, ny], -1))
def rotate_image_on_pano(images, rotations, fov, output_shape):
"""Transform perspective images to equirectangular images after rotations.
Return equirectangular panoramic images in which the input perspective images
embedded in after the rotation R from the input images' frame to the target
frame. The image with the field of view "fov" centered at camera's look-at -Z
axis is projected onto the pano. The -Z axis corresponds to the spherical
coordinates (pi/2, pi/2) which is (HEIGHT/2, WIDTH/4) on the pano.
Args:
images: [BATCH, HEIGHT, WIDTH, CHANNEL] perspective view images.
rotations: [BATCH, 3, 3] rotations matrices.
fov: (float) images' field of view in degrees.
output_shape: a 2-D list of output dimension [height, width].
Returns:
equirectangular images [BATCH, height, width, CHANNELS].
"""
with tf.name_scope(None, 'rotate_image_on_pano',
[images, rotations, fov, output_shape]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if rotations.shape[-2:] != [3, 3]:
raise ValueError("'rotations' must have 3x3 dimensions.")
shape = images.shape.as_list()
batch, height, width = shape[0], shape[1], shape[2]
# Generate a mesh grid on a sphere.
spherical = geometry.generate_equirectangular_grid(output_shape)
cartesian = geometry.spherical_to_cartesian(spherical)
cartesian = tf.tile(
cartesian[tf.newaxis, :, :, :, tf.newaxis], [batch, 1, 1, 1, 1])
axis_convert = tf.constant([[0., -1., 0.], [0., 0., 1.], [1., 0., 0.]])
cartesian = tf.matmul(axis_convert, cartesian)
cartesian = tf.squeeze(
tf.matmul(rotations[:, tf.newaxis, tf.newaxis], cartesian), -1)
# Only take one hemisphere. (camera lookat direction)
hemisphere_mask = tf.cast(cartesian[:, :, :, -1:] < 0, tf.float32)
image_coordinates = cartesian[:, :, :, :2] / cartesian[:, :, :, -1:]
x, y = tf.split(image_coordinates, [1, 1], -1)
# Map pixels on equirectangular pano to perspective image.
nx = -x * width / (2 * tf.tan(
math_utils.degrees_to_radians(fov / 2))) + width / 2 - 0.5
ny = y * height / (2 * tf.tan(
math_utils.degrees_to_radians(fov / 2))) + height / 2 - 0.5
transformed = hemisphere_mask * tfa.image.resampler(
images, tf.concat([nx, ny], -1))
return transformed
| 2.21875 | 2 |
layer.py | matiasinsaurralde/yowsup-json-rpc | 4 | 12767094 | <gh_stars>1-10
from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
from yowsup.layers.protocol_messages.protocolentities import TextMessageProtocolEntity
class EchoLayer(YowInterfaceLayer):
@ProtocolEntityCallback("message")
def onMessage(self, messageProtocolEntity):
if messageProtocolEntity.getType() == 'text':
self.onTextMessage(messageProtocolEntity)
elif messageProtocolEntity.getType() == 'media':
return
# self.toLower(messageProtocolEntity.forward(messageProtocolEntity.getFrom()))
self.toLower(messageProtocolEntity.ack())
self.toLower(messageProtocolEntity.ack(True))
@ProtocolEntityCallback("receipt")
def onReceipt(self, entity):
self.toLower(entity.ack())
def onTextMessage(self,messageProtocolEntity):
origin = messageProtocolEntity.getFrom(False)
body = messageProtocolEntity.getBody()
id = messageProtocolEntity.getId()
# print("** Message", origin, body, id)
self.getProp("messages")[id] = { 'origin': origin, 'body': body }
def onMediaMessage(self, messageProtocolEntity):
if messageProtocolEntity.getMediaType() == "image":
print("Echoing image %s to %s" % (messageProtocolEntity.url, messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "location":
print("Echoing location (%s, %s) to %s" % (messageProtocolEntity.getLatitude(), messageProtocolEntity.getLongitude(), messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "vcard":
print("Echoing vcard (%s, %s) to %s" % (messageProtocolEntity.getName(), messageProtocolEntity.getCardData(), messageProtocolEntity.getFrom(False)))
def sendMessage(self, dest, msg):
print("sendMessage", dest,msg)
messageEntity = TextMessageProtocolEntity(msg, to = <EMAIL>" % dest)
self.toLower(messageEntity)
def onEvent(self, e):
if e.name == 'sendMessage':
self.sendMessage( e.args['dest'], e.args['msg'] )
| 2.21875 | 2 |
settings_azure.py | Reinaesaya/munchee | 3 | 12767095 | <filename>settings_azure.py
from munchee.settings import *
# Azure prod-specific variables config
DEBUG = True # temp DANGEROUS
ALLOWED_HOSTS = ["mchee.co"]
SOCIAL_AUTH_LINKEDIN_OAUTH2_KEY = os.environ["LINKEDIN_KEY"]
SOCIAL_AUTH_LINKEDIN_OAUTH2_SECRET = os.environ["LINKEDIN_SECRET"]
SECRET_KEY = os.environ["SECRET_KEY"]
STATIC_ROOT = '/var/www/munchee/static'
RETURN_URL = "http://mchee.co/complete/linkedin-oauth2/"
### log Django errors to the root of your Azure Website
#LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'handlers': {
# 'logfile': {
# 'class': 'logging.handlers.WatchedFileHandler',
# 'filename': 'D:/home/site/wwwroot/error.log'
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['logfile'],
# 'level': 'ERROR',
# 'propagate': False,
# },
# }
#}
# | 1.554688 | 2 |
mnist/src/autoencoder.py | srungta/mnist-and-others | 0 | 12767096 | <reponame>srungta/mnist-and-others
from keras.layers import Input, Dense
from keras.models import Model
import numpy as np
from commonconstants import MNIST_FLATTENED_NORMALISED_PICKLE
from file_helper import read_from_pickle
from mnist_helper import get_mnist_data
# HYPERPARAMETERS
epochs = 10
encoding_dim = 32
batch_size = 256
train_size = 6000
test_size = 1000
# SET UP MODELS
input_img = Input(shape=(784,))
encoded = Dense(encoding_dim, activation='relu')(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input_img, decoded)
encoder = Model(input_img, encoded)
encoded_input = Input(shape=(encoding_dim,))
decoder_layer = autoencoder.layers[-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# SET UP DATA
x_train ,_ , x_test ,_ = get_mnist_data(True)
print(x_train.shape)
print(x_test.shape)
x_train = x_train[:train_size]
x_test = x_test[:test_size]
print(x_train.shape)
print(x_test.shape)
# TRAINING
autoencoder.fit(x_train, x_train, epochs=epochs, batch_size=batch_size,
shuffle=True, validation_data=(x_test, x_test))
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
# VISUALIZATION
import matplotlib.pyplot as plt
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
| 2.734375 | 3 |
LeetCode/_0001_0050/_043_MultiplyStrings.py | BigEggStudy/LeetCode-Py | 1 | 12767097 | <filename>LeetCode/_0001_0050/_043_MultiplyStrings.py<gh_stars>1-10
#-----------------------------------------------------------------------------
# Runtime: 84ms
# Memory Usage:
# Link:
#-----------------------------------------------------------------------------
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if num1 == '0' or num2 == '0':
return '0'
len_num1 = len(num1)
len_num2 = len(num2)
num1 = num1[::-1]
num2 = num2[::-1]
result_list = [0] * 220
dic = {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}
for i in range(len_num1):
for j in range(len_num2):
result_list[i + j] += dic[num1[i]] * dic[num2[j]]
carry = 0
result = ""
for i in range(len_num1 + len_num2):
carry, rest = divmod(result_list[i] + carry, 10)
result = str(rest) + result
for i, ch in enumerate(result):
if ch != '0':
break
return result[i:]
| 3.40625 | 3 |
data_structures/linked_list/doubly_linked/__init__.py | kwahome/data-structures-and-algos | 0 | 12767098 | from .linked_list import DoublyLinkedList, Node
| 1.226563 | 1 |
test/test_trivial.py | chrisfoulon/C-PAC | 1 | 12767099 | <reponame>chrisfoulon/C-PAC
from nose.tools import ok_, eq_
def test_b():
"""
Raw, unparented test.
"""
assert 'b' == 'b'
def test_1_and_1():
assert 1+1 == 2
def test_sum():
eq_(1+1,2)
def test_failing_compare():
ok_(2>3, 'Expected failure')
| 2.296875 | 2 |
Analytics/Tweets/cleaning.py | nicklausong/BT4222-Text-Analysis-For-Stock-Returns-Prediction | 0 | 12767100 | #########################################################################
### Program clean tweets ###
### 1. spaCy POS tagging for relevant tweets (apple fruit vs iphone) ###
### 2. Sentiment analysis of tweets ###
### 3. Group tweets by date ###
### 4. Process tweets by removing URLs, hashtags, emoticons ###
### 5. Feature engineering ###
### 6. Tokenise, remove stopwords, lemmatise tweets ###
### 7. Join with prices, derive price features and target label ###
### Output 1 pickle per ticker ###
#########################################################################
""" Copyright 2017, <NAME>, All rights reserved. """
## Credit for NLP cleaning portion
import pandas as pd
import numpy as np
import json
import string
import ast
from datetime import timedelta
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
# nltk.download('stopwords')
# nltk.download('punkt')
# nltk.download('wordnet')
# nltk.download('averaged_perceptron_tagger')
stoplist = stopwords.words('english')
my_stopwords = "multiExclamation multiQuestion multiStop url atUser st rd nd th am pm" # my extra stopwords
stoplist = stoplist + my_stopwords.split()
lemmatizer = WordNetLemmatizer() # set lemmatizer
from techniques import *
import spacy
from spacy import displacy
import en_core_web_sm
nlp = en_core_web_sm.load()
from nltk.sentiment.vader import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
# Remove 5 companies: CAT, DIS, DOW, TRV, WBA
ticker = ["MMM OR 3M", "AXP OR American Express", "AAPL OR Apple", "BA OR Boeing", \
"CVX OR Chevron", "CSCO OR Cisco", "KO OR Coca-Cola", "XOM OR Exxon Mobil", \
"GS OR Goldman Sachs", "HD OR Home Depot", "IBM", "INTC OR Intel", \
"JNJ OR Johnson & Johnson", "JPM OR JPMorgan Chase", "MCD OR McDonald's", \
"MRK OR Merck", "MSFT OR Microsoft", "NKE OR Nike", "PFE OR Pfizer", \
"PG OR Procter & Gamble", "UTX OR United Technologies", "UNH OR UnitedHealth", \
"VZ OR Verizon", "V OR Visa", "WMT OR Wal-Mart"]
ticker_symbol = ["MMM", "AXP", "AAPL", "BA", \
"CVX", "CSCO", "KO", "XOM", \
"GS", "HD", "IBM", "INTC", \
"JNJ", "JPM", "MCD", \
"MRK", "MSFT", "NKE", "PFE", \
"PG", "UTX", "UNH",
"VZ", "V", "WMT"]
########################################################################
### 1. spaCy POS tagging for relevant tweets (apple fruit vs iphone) ###
########################################################################
def spacy_pos(df, name):
'''
POS-tag each token and filter for texts with "ORG" label
Parameters
----------
df (pandas DataFrame)
name (string) ticker name
Returns
-------
the processed pandas DataFrame
'''
def find_org(text, name):
doc = nlp(text)
for ent in doc.ents:
# print(ent.text, ent.label_)
if (ent.text.lower()==name.lower()) & (ent.label_=='ORG'):
return True
return False
df['relevant'] = [find_org(text,name) for text in df['text']]
print("Before:", df.shape)
df = df[(df['relevant']==True)]
print("After:", df.shape)
return df
########################################################################
### 2. Sentiment analysis of tweets ###
### 3. Group tweets by date ###
########################################################################
def group_tweets_by_date(df, symbol, name):
'''
Aggregate all columns after grouping rows by dates.
Shift weekend tweets to following Monday.
Parameters
----------
df (pandas DataFrame)
symbol (string) ticker symbol eg. AAPL
name (string) ticker name eg. Apple
Returns
-------
the processed pandas DataFrame
'''
df_filter = df[["text", "hashtags", "likes", "replies", "parent_tweet_id", "timestamp"]]
df_filter.likes = df.likes.astype('int64')
df_filter.replies = df.replies.astype('int64')
# remove retweets
df_filter = df_filter[df_filter.parent_tweet_id.isnull()]
df_filter['hashtags'] = df_filter['hashtags'].apply(ast.literal_eval)
df_filter['hashtags'] = df_filter['hashtags'].apply(lambda x : ','.join(x))
df_filter['timestamp'] = pd.to_datetime(df_filter['timestamp'])
df_filter['day'] = df_filter['timestamp'].dt.dayofweek
df_filter['vader'] = [analyser.polarity_scores(tweet)['compound'] for tweet in df_filter['text']]
# carry forward weekend tweets to following Monday (1 or 2 days)
df_filter['stock_date'] = np.where(df_filter['day']>4,
df_filter['timestamp'] + pd.to_timedelta(7-df_filter['day'], unit='d'),
df_filter['timestamp']
)
# group tweets by dates
df_filter['stock_date'] = df_filter['stock_date'].dt.date
df_filter = df_filter.groupby(df_filter['stock_date']).agg({'text': lambda x: ','.join(x),
'hashtags': lambda x: ','.join(x),
'likes':'sum',
'replies': 'sum',
'vader': 'mean'
})
df_filter['hashtags'] = df_filter['hashtags'].apply(lambda hashtags: list(filter(None, hashtags.split(','))))
df_filter['text_removeCompany'] = df_filter.text.str.replace(symbol+' ','')
name = name.lower()
df_filter['text_removeCompany'] = df_filter.text_removeCompany.str.lower().str.replace(name+" ",'')
df_filter = df_filter.reset_index(drop=False)
return df_filter
########################################################################
### 6. Tokenise, remove stopwords, lemmatise tweets ###
########################################################################
def tokenize(text):
'''
Tokenise texts, remove stopwords, lemmatise word.
Parameters
----------
text (string)
Returns
-------
list of tokens (string)
'''
onlyOneSentenceTokens = [] # tokens of one sentence each time
tokens = word_tokenize(text)
tokens = replaceNegations(tokens)
translator = str.maketrans('', '', string.punctuation)
text = text.translate(translator) # Remove punctuation
tokens = nltk.word_tokenize(text)
for w in tokens:
if (w not in stoplist):
final_word = w.lower()
final_word = replaceElongated(final_word)
final_word = lemmatizer.lemmatize(final_word)
onlyOneSentenceTokens.append(final_word)
onlyOneSentence = " ".join(onlyOneSentenceTokens) # form again the sentence from the list of tokens
return onlyOneSentenceTokens
########################################################################
### 4. Process tweets by removing URLs, hashtags, emoticons ###
### 5. Feature engineering of numerical features ###
########################################################################
# A clean tweet should not contain URLs, hashtags (i.e. #happy) or mentions (i.e. @BarackObama)
def clean_dirty_tweets(text_series):
'''
Clean tweets before tokenisation.
Parameters
----------
text_series (pandas Series)
Returns
-------
the pandas DataFrame containing processed text
and other engineered features
'''
clean_tweets = []
for text in text_series:
totalEmoticons = 0
totalSlangs = 0
totalSlangsFound = []
totalElongated = 0
totalMultiExclamationMarks = 0
totalMultiQuestionMarks = 0
totalMultiStopMarks = 0
totalAllCaps = 0
text = removeUnicode(text)
text = replaceURL(text)
text = replaceAtUser(text)
text = removeWholeHashtag(text)
temp_slangs, temp_slangsFound = countSlang(text)
totalSlangs += temp_slangs
for word in temp_slangsFound:
totalSlangsFound.append(word) # all the slangs found in all sentences
text = replaceSlang(text)
text = replaceContraction(text)
text = removeNumbers(text)
emoticons = countEmoticons(text)
totalEmoticons += emoticons
text = removeEmoticons(text)
totalAllCaps += countAllCaps(text)
totalMultiExclamationMarks += countMultiExclamationMarks(text)
totalMultiQuestionMarks += countMultiQuestionMarks(text)
totalMultiStopMarks += countMultiStopMarks(text)
text = replaceMultiExclamationMark(text)
text = replaceMultiQuestionMark(text)
text = replaceMultiStopMark(text)
totalElongated += countElongated(text)
tokenized_tweet = tokenize(text)
clean_tweets.append([tokenized_tweet, totalEmoticons, totalSlangs,
totalSlangsFound, totalElongated, totalMultiExclamationMarks,
totalMultiQuestionMarks, totalMultiStopMarks, totalAllCaps])
# form new dataframe
df_clean_tweets = pd.DataFrame(clean_tweets,columns=['tokenized_tweet', 'totalEmoticons', 'totalSlangs',
'totalSlangsFound', 'totalElongated', 'totalMultiExclamationMarks',
'totalMultiQuestionMarks', 'totalMultiStopMarks', 'totalAllCaps'])
return df_clean_tweets
# def spellcheck(tweet):
# tweet_spellchecked = []
# print(len(tweet))
# for word in tweet:
# if len(word)>1:
# word = spellCorrection(word) # Technique 12: correction of spelling errors
# tweet_spellchecked.append(word)
# return tweet_spellchecked
price_labels = pd.read_csv("../../Raw Data/Price/price_labels.csv")
for i in range(len(ticker_symbol)):
df = pd.read_csv('../Raw Data/Tweets/'+ticker_symbol[i]+'_tweets.csv')
print("Now cleaning:", ticker_symbol[i])
print("Check pos tag...")
if ticker_symbol[i] in ['JPM', "MMM", "KO", "JNJ", "PFE", "TRV", "V", "UNH"]:
df_filter = df
else:
df_filter = spacy_pos(df, ticker_name[i])
print("Group tweets by date...")
df_filter = group_tweets_by_date(df, ticker_symbol[i], ticker_name[i])
print("Number of records (weekdays):", df_filter.shape)
print("Process raw tweets...")
df_clean_tweets = clean_dirty_tweets(df_filter.text_removeCompany)
# # spell_check_col = [spellcheck(tweet) for tweet in df_clean_tweets['tokenized_tweet']]
# # print("spell check")
# # df_clean_tweets['tokenized_tweet_spellcheck'] = spell_check_col
# Join original df with df from tokenising + results
df_tweets_final = pd.concat([df_filter, df_clean_tweets], axis = 1)
####################################################################
### 7. Join with prices, derive price features and target label ###
####################################################################
price_labels_xticker = price_labels[price_labels['Ticker']==ticker_symbol[i]][['Date', "Adj Close"]]
print("Number of business days:", price_labels_xticker.shape)
price_labels_xticker.loc[:,'Date'] = pd.to_datetime(price_labels_xticker['Date']).dt.date
price_labels_xticker.loc[:,'hist_returns'] = np.log10(price_labels_xticker['Adj Close']/price_labels_xticker['Adj Close'].shift())
price_labels_xticker.loc[:,'returns5'] = np.log10(price_labels_xticker['Adj Close'].shift(-5)/price_labels_xticker['Adj Close'])
price_labels_xticker.loc[:,'label5'] = np.where(price_labels_xticker['returns5']>=0,1,-1)
joined_df = price_labels_xticker.join(df_tweets_final.set_index("stock_date"), on='Date', how='left')
print("Longest NaN period:", joined_df.text.isnull().astype(int).groupby(joined_df.text.notnull().astype(int).cumsum()).sum().max())
# joined_df = joined_df.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis=1)
joined_df['Date'] = pd.to_datetime(joined_df['Date'])
joined_df['Year'] = joined_df.Date.dt.year
joined_df['Month'] = joined_df.Date.dt.month
joined_df['vader_standardise'] = (joined_df['vader']-joined_df['vader'].expanding().mean())/joined_df['vader'].expanding().std()
joined_df['vader3'] = joined_df['vader_standardise'].rolling(window=3, min_periods=2).sum()
joined_df.to_pickle("../../Processed Data/Tweets/"+ticker_symbol[i]+"_df.pkl")
| 2.421875 | 2 |
tutorials/generate_egoview_overlaid_vector_map.py | jhonykaesemodel/av2-api | 26 | 12767101 | # <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Generate MP4 videos with map entities rendered on top of sensor imagery, for all cameras, for a single log.
We use a inferred depth map from LiDAR to render only visible map entities (lanes and pedestrian crossings).
"""
import logging
import os
import sys
import time
from pathlib import Path
from typing import Final, List, Tuple
import click
import numpy as np
import av2.geometry.interpolate as interp_utils
import av2.rendering.video as video_utils
import av2.utils.io as io_utils
import av2.utils.raster as raster_utils
from av2.datasets.sensor.av2_sensor_dataloader import AV2SensorDataLoader
from av2.datasets.sensor.constants import RingCameras
from av2.map.map_api import ArgoverseStaticMap
from av2.rendering.color import BLUE_BGR
from av2.rendering.map import EgoViewMapRenderer
from av2.utils.typing import NDArrayByte
RING_CAMERA_FPS: Final[int] = 20
logger = logging.getLogger(__name__)
def generate_egoview_overlaid_map(
data_root: Path,
output_dir: Path,
log_id: str,
max_range_m: float,
use_depth_map_for_occlusion: bool,
dump_single_frames: bool,
cam_names: List[RingCameras],
) -> None:
"""Render the map from a particular camera's viewpoint for each camera frame.
Args:
data_root: path to where the AV2 logs live.
output_dir: path to directory where renderings will be saved.
log_id: unique ID for AV2 scenario/log.
max_range_m: maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).
use_depth_map_for_occlusion: whether to use an inferred depth map for rendering occluded elements.
dump_single_frames: Whether to save to disk individual RGB frames of the rendering, in addition to generating
the mp4 file.
cam_names: list of camera names. For each camera, its viewport will be used to render the map.
"""
loader = AV2SensorDataLoader(data_dir=data_root, labels_dir=data_root)
log_map_dirpath = data_root / log_id / "map"
avm = ArgoverseStaticMap.from_map_dir(log_map_dirpath, build_raster=True)
for _, cam_enum in enumerate(cam_names):
cam_name = cam_enum.value
pinhole_cam = loader.get_log_pinhole_camera(log_id, cam_name)
cam_im_fpaths = loader.get_ordered_log_cam_fpaths(log_id, cam_name)
num_cam_imgs = len(cam_im_fpaths)
video_list = []
for i, img_fpath in enumerate(cam_im_fpaths):
if i % 50 == 0:
logging.info(f"\tOn file {i}/{num_cam_imgs} of camera {cam_name} of {log_id}")
cam_timestamp_ns = int(img_fpath.stem)
city_SE3_ego = loader.get_city_SE3_ego(log_id, cam_timestamp_ns)
if city_SE3_ego is None:
logger.info("missing LiDAR pose")
continue
# load feather file path, e.g. '315978406032859416.feather"
lidar_fpath = loader.get_closest_lidar_fpath(log_id, cam_timestamp_ns)
if lidar_fpath is None:
# without depth map, can't do this accurately
continue
lidar_points = io_utils.read_lidar_sweep(lidar_fpath, attrib_spec="xyz")
lidar_timestamp_ns = int(lidar_fpath.stem)
if use_depth_map_for_occlusion:
depth_map = loader.get_depth_map_from_lidar(
lidar_points=lidar_points,
cam_name=cam_name,
log_id=log_id,
cam_timestamp_ns=cam_timestamp_ns,
lidar_timestamp_ns=lidar_timestamp_ns,
)
else:
depth_map = None
egoview_renderer = EgoViewMapRenderer(
depth_map=depth_map, city_SE3_ego=city_SE3_ego, pinhole_cam=pinhole_cam, avm=avm
)
frame_rgb = render_egoview(
output_dir=output_dir,
img_fpath=img_fpath,
egoview_renderer=egoview_renderer,
cam_timestamp_ns=cam_timestamp_ns,
log_id=log_id,
max_range_m=max_range_m,
dump_single_frames=dump_single_frames,
)
video_list.append(frame_rgb)
video: NDArrayByte = np.stack(video_list).astype(np.uint8)
video_output_dir = output_dir / "videos"
video_utils.write_video(
video=video,
dst=video_output_dir / f"{log_id}_{cam_name}.mp4",
fps=RING_CAMERA_FPS,
preset="medium",
)
def render_egoview(
output_dir: Path,
img_fpath: Path,
egoview_renderer: EgoViewMapRenderer,
cam_timestamp_ns: int,
log_id: str,
max_range_m: float,
dump_single_frames: bool,
) -> NDArrayByte:
"""Synthetically manipulate a vector map, render the map in the ego-view, and save rendering to disk.
Args:
output_dir: path to directory where renderings will be saved.
img_fpath: path to RGB image, from one of the ring or stereo cameras.
egoview_renderer: rendering engine for map elements in the ego-view.
cam_timestamp_ns: nanosecond camera timestamp when image was captured.
log_id: unique ID for AV2 scenario/log.
max_range_m: maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).
dump_single_frames: Whether to save to disk individual RGB frames of the rendering, in addition to generating
the mp4 file.
Returns:
array of shape (H,W,3) and type uint8 representing a RGB image.
"""
save_dir = output_dir / log_id
if dump_single_frames:
# we only create log-specific directories, if dumping individual frames.
save_dir.mkdir(exist_ok=True, parents=True)
img_fname = f"{egoview_renderer.pinhole_cam.cam_name}_{cam_timestamp_ns}_vectormap.jpg"
save_fpath = save_dir / img_fname
if save_fpath.exists():
logger.info("Rendered image already exists, skipping")
img: NDArrayByte = io_utils.read_img(save_fpath)
return img
start = time.time()
img_rgb: NDArrayByte = io_utils.read_img(img_fpath)
# to prevent washing out, can pass in black image, and get just mask back, or can overlay directly.
img_h, img_w, _ = img_rgb.shape
img_empty: NDArrayByte = np.full(
(img_h, img_w, 3), fill_value=128, dtype=np.uint8
) # pure white polylines will disappear @ 255
img_empty = render_egoview_with_occlusion_checks(
img_canvas=img_empty,
egoview_renderer=egoview_renderer,
max_range_m=max_range_m,
)
end = time.time()
duration = end - start
logger.info(f"Rendering single image took {duration:.2f} sec.")
frame_rgb = raster_utils.blend_images(img_rgb, img_empty, alpha=0.45)
if dump_single_frames:
io_utils.write_img(save_fpath, frame_rgb, channel_order="RGB")
return frame_rgb
def render_egoview_with_occlusion_checks(
img_canvas: NDArrayByte, egoview_renderer: EgoViewMapRenderer, max_range_m: float, line_width_px: int = 10
) -> NDArrayByte:
"""Render pedestrian crossings and lane segments in the ego-view.
Pedestrian crossings (crosswalks) will be rendered in blue, and lane markings will be colored according to their
marking color, or otherwise red, if markings are implicit.
Args:
img_canvas: array of shape (H,W,3) representing BGR canvas to rasterize map elements onto.
egoview_renderer: rendering engine for map elements in the ego-view.
max_range_m: maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).
line_width_px: thickness (in pixels) to use for rendering each polyline.
Returns:
array of shape (H,W,3) and type uint8 representing a RGB image.
"""
for ls in egoview_renderer.avm.get_scenario_lane_segments():
img_canvas = egoview_renderer.render_lane_boundary_egoview(img_canvas, ls, "right", line_width_px)
img_canvas = egoview_renderer.render_lane_boundary_egoview(img_canvas, ls, "left", line_width_px)
for pc in egoview_renderer.avm.get_scenario_ped_crossings():
EPS = 1e-5
crosswalk_color = BLUE_BGR
# render ped crossings (pc's)
xwalk_polygon = pc.polygon
# prevent duplicate first and last coords
xwalk_polygon[:-1] += EPS
N_INTERP_PTS = 100
# For pixel-perfect rendering, querying crosswalk boundary ground height at waypoints throughout
# the street is much more accurate than 3d linear interpolation using only the 4 annotated corners.
polygon_city_frame = interp_utils.interp_arc(t=N_INTERP_PTS, points=xwalk_polygon[:, :2])
polygon_city_frame = egoview_renderer.avm.append_height_to_2d_city_pt_cloud(points_xy=polygon_city_frame)
egoview_renderer.render_polyline_egoview(
polygon_city_frame,
img_canvas,
crosswalk_color,
thickness_px=line_width_px,
)
# convert BGR to RGB
img_rgb: NDArrayByte = img_canvas[:, :, ::-1]
return img_rgb
def parse_camera_enum_types(cam_names: Tuple[str, ...]) -> List[RingCameras]:
"""Convert a list of CLI string types, to enums of type RingCameras, and validate each input.
Args:
cam_names: Tuple of camera names to use for rendering the map.
Returns:
List of camera enums to use for rendering the map.
Raises:
ValueError: If an invalid camera name is provided.
"""
valid_ring_cams = set([x.value for x in list(RingCameras)])
cam_enums: List[RingCameras] = []
for cam_name in list(cam_names):
if cam_name in valid_ring_cams:
cam_enums.append(RingCameras(cam_name))
else:
raise ValueError("Must provide _valid_ camera names!")
return cam_enums
@click.command(help="Generate map visualizations on ego-view imagery from the Argoverse 2 Sensor or TbV Datasets.")
@click.option(
"-d",
"--data-root",
required=True,
help="Path to local directory where the Argoverse 2 Sensor Dataset or TbV logs are stored.",
type=click.Path(exists=True),
)
@click.option(
"-o",
"--output-dir",
required=True,
help="Path to local directory where renderings will be saved.",
type=str,
)
@click.option(
"-l",
"--log-id",
default="00a6ffc1-6ce9-3bc3-a060-6006e9893a1a",
help="unique log identifier.",
type=str,
)
@click.option(
"-r",
"--max-range-m",
type=float,
default=100,
help="Maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).",
)
@click.option(
"-d",
"--use-depth-map-for_occlusion",
default=True,
help="Whether to use an inferred depth map for rendering occluded elements (defaults to True).",
type=bool,
)
@click.option(
"-s",
"--dump-single-frames",
default=False,
help="Whether to save to disk individual RGB frames of the rendering, in addition to generating the mp4 file"
"(defaults to False). Note: can quickly generate 100s of MBs, for 200 KB frames.",
type=bool,
)
@click.option(
"-c",
"--cam-names",
default=tuple(x.value for x in list(RingCameras)),
help="List of camera viewpoints to render the map from.",
multiple=True,
type=str,
)
def run_generate_egoview_overlaid_map(
data_root: "os.PathLike[str]",
output_dir: "os.PathLike[str]",
log_id: str,
max_range_m: float,
use_depth_map_for_occlusion: bool,
dump_single_frames: bool,
cam_names: Tuple[str, ...],
) -> None:
"""Click entry point for visualizing map entities rendered on top of sensor imagery."""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
data_root = Path(data_root)
output_dir = Path(output_dir)
logger.info(
"data_root: %s, output_dir: %s, log_id: %s, max_range_m: %f, "
"use_depth_map_for_occlusion: %s, dump_single_frames %s",
data_root,
output_dir,
log_id,
max_range_m,
use_depth_map_for_occlusion,
dump_single_frames,
)
generate_egoview_overlaid_map(
data_root=data_root,
output_dir=output_dir,
log_id=log_id,
max_range_m=max_range_m,
use_depth_map_for_occlusion=use_depth_map_for_occlusion,
dump_single_frames=dump_single_frames,
cam_names=parse_camera_enum_types(cam_names),
)
if __name__ == "__main__":
run_generate_egoview_overlaid_map()
| 2.390625 | 2 |
var/spack/repos/builtin/packages/findutils/package.py | kkauder/spack | 2 | 12767102 | <reponame>kkauder/spack
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import re
class Findutils(AutotoolsPackage, GNUMirrorPackage):
"""The GNU Find Utilities are the basic directory searching
utilities of the GNU operating system."""
homepage = "https://www.gnu.org/software/findutils/"
gnu_mirror_path = "findutils/findutils-4.6.0.tar.gz"
executables = ['^find$']
version('4.6.0', sha256='ded4c9f73731cd48fec3b6bdaccce896473b6d8e337e9612e16cf1431bb1169d')
version('4.4.2', sha256='434f32d171cbc0a5e72cfc5372c6fc4cb0e681f8dce566a0de5b6fccd702b62a')
version('4.4.1', sha256='77a5b85d7fe0dd9c1093e010b61f765707364ec2c89c4f432c1c616215bcc138')
version('4.4.0', sha256='fb108c2959f17baf3559da9b3854495b9bb69fb13309fdd05576c66feb661ea9')
version('4.2.33', sha256='813cd9405aceec5cfecbe96400d01e90ddad7b512d3034487176ce5258ab0f78')
version('4.2.32', sha256='87bd8804f3c2fa2fe866907377afd8d26a13948a4bb1761e5e95d0494a005217')
version('4.2.31', sha256='e0d34b8faca0b3cca0703f6c6b498afbe72f0ba16c35980c10ec9ef7724d6204')
version('4.2.30', sha256='344b9cbb4034907f80398c6a6d3724507ff4b519036f13bb811d12f702043af4')
version('4.2.29', sha256='1a9ed8db0711f8419156e786b6aecd42dd05df29e53e380d8924e696f7071ae0')
version('4.2.28', sha256='aa27de514b44eb763d276ad8f19fef31a07bd63ac7ca6870d2be5cd58de862c8')
version('4.2.27', sha256='546bc7932e716beaa960116766ea4d890f292c6fbde221ec10cdd8ec37329654')
version('4.2.26', sha256='74fa9030b97e074cbeb4f6c8ec964c5e8292cf5a62b195086113417f75ab836a')
version('4.2.25', sha256='a2bc59e80ee599368584f4ac4a6e647011700e1b5230e65eb3170c603047bb51')
version('4.2.23', sha256='d3ca95bf003685c3c34eb59e41c5c4b366fb582a53c4cfa9da0424d98ff23be3')
version('4.2.20', sha256='4e4d72a4387fcc942565c45460e632001db6bde0a46338a6a1b59b956fd3e031')
version('4.2.18', sha256='05c33f3e46fa11275f89ae968af70c83b01a2c578ec4fa5abf5c33c7e4afe44d')
version('4.2.15', sha256='5ede832e70c1691a59e6d5e5ebc2b843120d631b93cd60b905b2edeb078d3719')
version('4.1.20', sha256='8c5dd50a5ca54367fa186f6294b81ec7a365e36d670d9feac62227cb513e63ab')
version('4.1', sha256='487ecc0a6c8c90634a11158f360977e5ce0a9a6701502da6cb96a5a7ec143fac')
depends_on('autoconf', type='build', when='@4.6.0')
depends_on('automake', type='build', when='@4.6.0')
depends_on('libtool', type='build', when='@4.6.0')
depends_on('m4', type='build', when='@4.6.0')
depends_on('texinfo', type='build', when='@4.6.0')
# findutils does not build with newer versions of glibc
patch('https://src.fedoraproject.org/rpms/findutils/raw/97ba2d7a18d1f9ae761b6ff0b4f1c4d33d7a8efc/f/findutils-4.6.0-gnulib-fflush.patch', sha256='84b916c0bf8c51b7e7b28417692f0ad3e7030d1f3c248ba77c42ede5c1c5d11e', when='@4.6.0')
patch('https://src.fedoraproject.org/rpms/findutils/raw/97ba2d7a18d1f9ae761b6ff0b4f1c4d33d7a8efc/f/findutils-4.6.0-gnulib-makedev.patch', sha256='bd9e4e5cc280f9753ae14956c4e4aa17fe7a210f55dd6c84aa60b12d106d47a2', when='@4.6.0')
patch('nvhpc.patch', when='%nvhpc')
build_directory = 'spack-build'
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('--version', output=str, error=str)
match = re.search(r'find \(GNU findutils\)\s+(\S+)', output)
return match.group(1) if match else None
@property
def force_autoreconf(self):
# Run autoreconf due to build system patch (gnulib-makedev)
return self.spec.satisfies('@4.6.0')
@when('@4.6.0')
def patch(self):
# We have to patch out gettext support, otherwise autoreconf tries to
# call autopoint, which depends on find, which is part of findutils.
filter_file('^AM_GNU_GETTEXT.*',
'',
'configure.ac')
filter_file(r'^SUBDIRS = (.*) po (.*)',
r'SUBDIRS = \1 \2',
'Makefile.am')
| 1.53125 | 2 |
pitch/concurrency.py | georgepsarakis/pitch | 6 | 12767103 | from abc import abstractmethod
from concurrent import futures
class Pool(object):
def __init__(self, loops=1, concurrency=1):
self._concurrency = concurrency
self._loops = loops
@abstractmethod
@property
def executor_class(self) -> futures.Executor:
pass
def run(self, fn, *args, **kwargs):
promises = []
with self.executor_class(max_workers=self._concurrency) as pool:
for loop in range(self._loops):
promises.append(
pool.submit(fn, *args, **kwargs)
)
return promises, [p.exception() for p in promises]
class ThreadPool(Pool):
@property
def executor_class(self):
return futures.ThreadPoolExecutor
class ProcessPool(Pool):
@property
def executor_class(self):
return futures.ProcessPoolExecutor
class AsyncIOPool(Pool):
pass
| 3.484375 | 3 |
serif/model/impl/mention/noun_phrase_mention_model_ner_deduplication.py | BBN-E/ZS4IE | 7 | 12767104 | <filename>serif/model/impl/mention/noun_phrase_mention_model_ner_deduplication.py<gh_stars>1-10
import logging
from serif.model.mention_model import MentionModel
logger = logging.getLogger(__name__)
class NounPhraseMentionModelNERDeduplication(MentionModel):
def __init__(self, **kwargs):
super(NounPhraseMentionModelNERDeduplication, self).__init__(**kwargs)
def add_mentions_to_sentence(self, sentence):
raise NotImplementedError("You shouldn't call this endpoint.")
def process_document(self, serif_doc):
# Assuming pron mention detector(parse tree based) and NER(model based) has run
# We'd create NP chunk that 1) not overlap with them 2) if "<NAME>" and "<NAME>, a great business man" both
# Are NP, only keep "<NAME>"
for serif_sentence in serif_doc.sentences:
if serif_sentence.mention_set is None:
serif_sentence.add_new_mention_set()
if serif_sentence.parse is None:
logger.warning("No parse for sentence {}, skipping NounPhraseMentionModel".
format(serif_sentence.id))
continue
token_is_existing_mention = [False for _ in range(len(serif_sentence.token_sequence or ()))]
for mention in serif_sentence.mention_set:
start_token = mention.start_token
end_token = mention.end_token
start_token_idx = start_token.index()
end_token_idx = end_token.index()
for idx in range(start_token_idx, end_token_idx+1):
token_is_existing_mention[idx] = True
nodes = serif_sentence.parse.get_nodes_matching_tags(["NP"])
candidate_synnodes = set()
for node in nodes:
start_token = node.start_token
end_token = node.end_token
start_token_idx = start_token.index()
end_token_idx = end_token.index()
is_good_candidate = True
for idx in range(start_token_idx,end_token_idx+1):
if token_is_existing_mention[idx] is True:
is_good_candidate = False
break
if is_good_candidate:
candidate_synnodes.add(node)
# Find minimal spans
token_to_candate_synnodes = dict()
for node in candidate_synnodes:
start_token = node.start_token
end_token = node.end_token
start_token_idx = start_token.index()
end_token_idx = end_token.index()
for idx in range(start_token_idx,end_token_idx+1):
token_to_candate_synnodes.setdefault(serif_sentence.token_sequence[idx],set()).add(node)
node_to_resolved_node = dict()
for node in candidate_synnodes:
tokens = node.tokens
start_token_idx = tokens[0].index()
end_token_idx = tokens[-1].index()
candidates = set()
for token in node.tokens:
for another_node in token_to_candate_synnodes.get(token,()):
another_node_tokens = another_node.tokens
another_node_start_token_idx = another_node_tokens[0].index()
another_node_end_token_idx = another_node_tokens[-1].index()
if start_token_idx >= another_node_start_token_idx and end_token_idx <= another_node_end_token_idx:
candidates.add(another_node)
selected_candadate = sorted(list(candidates), key=lambda x:len(x.tokens))[0]
node_to_resolved_node[node] = selected_candadate
pending_added = set(node_to_resolved_node.values())
for node in pending_added:
MentionModel.add_new_mention(serif_sentence.mention_set, 'UNDET', 'DESC', node.start_token, node.end_token, model=type(self).__name__)
| 2.53125 | 3 |
DailyProgrammer/DP20140723B.py | DayGitH/Python-Challenges | 2 | 12767105 | <gh_stars>1-10
"""
[7/23/2014] Challenge#172 [Intermediate] Image Rendering 101...010101000101
https://www.reddit.com/r/dailyprogrammer/comments/2ba3nf/7232014_challenge172_intermediate_image_rendering/
#Description
You may have noticed from our
[easy](http://www.reddit.com/r/dailyprogrammer/comments/2ba3g3/7212014_challenge_172_easy/) challenge that finding a
program to render the PBM format is either very difficult or usually just a spammy program that no one would dare
download.
Your mission today, given the knowledge you have gained from last weeks challenge is to create a Renderer for the PBM
format.
For those who didn't do mondays challenge, here's a recap
* a PBM usually starts with 'P1' denoting that it is a .PBM file
* The next line consists of 2 integers representing the width and height of our image
* Finally, the pixel data. 0 is white and 1 is black.
This Wikipedia article will tell you more
http://en.wikipedia.org/wiki/Netpbm_format
#Formal Inputs & Outputs
##Input description
On standard console input you should be prompted to pass the .PBM file you have created from the easy challenge.
##Output description
The output will be a .PBM file rendered to the screen following the conventions where 0 is a white pixel, 1 is a black
pixel
#Notes
This task is considerably harder in some languages. Some languages have large support for image handling (.NET and
others) whilst some will require a bit more grunt work (C and even Python) .
It's up to you to decide the language, but easier alternatives probably do exist.
#Bonus
Create a renderer for the other versions of .PBM (P2 and P3) and output these to the screen.
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
| 2.859375 | 3 |
docs/examples/led_board_2.py | NotBobTheBuilder/gpiozero | 743 | 12767106 | from gpiozero import LEDBoard
from signal import pause
leds = LEDBoard(5, 6, 13, 19, 26, pwm=True)
leds.value = (0.2, 0.4, 0.6, 0.8, 1.0)
pause()
| 2.40625 | 2 |
src/gms/rem.py | hohe12ly/inundation-mapping | 2 | 12767107 | #!/usr/bin/env python3
from numba import njit, typeof, typed, types
import rasterio
import numpy as np
import argparse
import os
from osgeo import ogr, gdal
def rel_dem(dem_fileName, pixel_watersheds_fileName, rem_fileName, thalweg_raster):
"""
Calculates REM/HAND/Detrended DEM
Parameters
----------
dem_fileName : str
File name of pit filled DEM raster.
pixel_watersheds_fileName : str
File name of stream pixel watersheds raster.
rem_fileName : str
File name of output relative elevation raster.
"""
# ------------------------------------------- Get catchment_min_dict --------------------------------------------------- #
# The following creates a dictionary of the catchment ids (key) and their elevation along the thalweg (value).
@njit
def make_catchment_min_dict(flat_dem, catchment_min_dict, flat_catchments, thalweg_window):
for i,cm in enumerate(flat_catchments):
if thalweg_window[i] == 1: # Only allow reference elevation to be within thalweg.
# If the catchment really exists in the dictionary, compare elevation values.
if (cm in catchment_min_dict):
if (flat_dem[i] < catchment_min_dict[cm]):
# If the flat_dem's elevation value is less than the catchment_min_dict min, update the catchment_min_dict min.
catchment_min_dict[cm] = flat_dem[i]
else:
catchment_min_dict[cm] = flat_dem[i]
return(catchment_min_dict)
# Open the masked gw_catchments_pixels_masked and dem_thalwegCond_masked.
gw_catchments_pixels_masked_object = rasterio.open(pixel_watersheds_fileName)
dem_thalwegCond_masked_object = rasterio.open(dem_fileName)
thalweg_raster_object = rasterio.open(thalweg_raster)
# Specify raster object metadata.
meta = dem_thalwegCond_masked_object.meta.copy()
meta['tiled'], meta['compress'] = True, 'lzw'
# -- Create catchment_min_dict -- #
catchment_min_dict = typed.Dict.empty(types.int32,types.float32) # Initialize an empty dictionary to store the catchment minimums.
# Update catchment_min_dict with pixel sheds minimum.
for ji, window in dem_thalwegCond_masked_object.block_windows(1): # Iterate over windows, using dem_rasterio_object as template.
dem_window = dem_thalwegCond_masked_object.read(1,window=window).ravel() # Define dem_window.
catchments_window = gw_catchments_pixels_masked_object.read(1,window=window).ravel() # Define catchments_window.
thalweg_window = thalweg_raster_object.read(1, window=window).ravel() # Define cost_window.
# Call numba-optimized function to update catchment_min_dict with pixel sheds minimum.
catchment_min_dict = make_catchment_min_dict(dem_window, catchment_min_dict, catchments_window, thalweg_window)
dem_thalwegCond_masked_object.close()
gw_catchments_pixels_masked_object.close()
thalweg_raster_object.close()
# ------------------------------------------------------------------------------------------------------------------------ #
# ------------------------------------------- Produce relative elevation model ------------------------------------------- #
@njit
def calculate_rem(flat_dem,catchmentMinDict,flat_catchments,ndv):
rem_window = np.zeros(len(flat_dem),dtype=np.float32)
for i,cm in enumerate(flat_catchments):
if cm in catchmentMinDict:
if catchmentMinDict[cm] == ndv:
rem_window[i] = ndv
else:
rem_window[i] = flat_dem[i] - catchmentMinDict[cm]
return(rem_window)
rem_rasterio_object = rasterio.open(rem_fileName,'w',**meta) # Open rem_rasterio_object for writing to rem_fileName.
pixel_catchments_rasterio_object = rasterio.open(pixel_watersheds_fileName) # Open pixel_catchments_rasterio_object
dem_rasterio_object = rasterio.open(dem_fileName)
for ji, window in dem_rasterio_object.block_windows(1):
dem_window = dem_rasterio_object.read(1,window=window)
window_shape = dem_window.shape
dem_window = dem_window.ravel()
catchments_window = pixel_catchments_rasterio_object.read(1,window=window).ravel()
rem_window = calculate_rem(dem_window, catchment_min_dict, catchments_window, meta['nodata'])
rem_window = rem_window.reshape(window_shape).astype(np.float32)
rem_rasterio_object.write(rem_window, window=window, indexes=1)
dem_rasterio_object.close()
pixel_catchments_rasterio_object.close()
rem_rasterio_object.close()
# ------------------------------------------------------------------------------------------------------------------------ #
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description='Relative elevation from pixel based watersheds')
parser.add_argument('-d','--dem', help='DEM to use within project path', required=True)
parser.add_argument('-w','--watersheds',help='Pixel based watersheds raster to use within project path',required=True)
parser.add_argument('-t','--thalweg-raster',help='A binary raster representing the thalweg. 1 for thalweg, 0 for non-thalweg.',required=True)
parser.add_argument('-o','--rem',help='Output REM raster',required=True)
# extract to dictionary
args = vars(parser.parse_args())
# rename variable inputs
dem_fileName = args['dem']
pixel_watersheds_fileName = args['watersheds']
rem_fileName = args['rem']
thalweg_raster = args['thalweg_raster']
rel_dem(dem_fileName, pixel_watersheds_fileName, rem_fileName, thalweg_raster)
| 2.78125 | 3 |
src/reloadex/linux/reloader_linux.py | iljau/reloadex | 1 | 12767108 | <gh_stars>1-10
import ctypes
import os
import select
import shlex
import signal
import threading
import sys
import logging
from ctypes import c_int, byref, create_string_buffer
from timeit import default_timer
import reloadex.linux.shared
from reloadex.common.utils_app_starter import is_target_str_file
from reloadex.common.utils_reloader import LaunchParams
from reloadex.linux.ctypes_wrappers._eventfd import eventfd, EFD_CLOEXEC, EFD_NONBLOCK, eventfd_write, eventfd_read
from reloadex.linux.ctypes_wrappers._inotify import inotify_init1, IN_CLOEXEC, IN_NONBLOCK, inotify_add_watch, IN_ALL_EVENTS, \
IN_ACCESS, IN_CLOSE, IN_OPEN, inotify_read, IN_CREATE, IN_ISDIR, IN_IGNORED, IN_UNMOUNT, IN_Q_OVERFLOW
from reloadex.linux.ctypes_wrappers._posix_spawn import (
posix_spawnattr_t, posix_spawnattr_init, posix_spawnattr_setflags,
POSIX_SPAWN_USEVFORK,
create_char_array,
posix_spawn,
posix_spawnattr_destroy, posix_spawnattr_setsigmask, POSIX_SPAWN_SETSIGMASK, posix_spawnp)
from reloadex.linux.ctypes_wrappers._signalfd import sigset_t, sigemptyset
from reloadex.linux.ctypes_wrappers._timerfd import CLOCK_MONOTONIC, TFD_CLOEXEC, TFD_NONBLOCK, timerfd_create, itimerspec, \
timerfd_settime, timerfd_read
import reloadex.linux._app_starter
from reloadex.linux.shared import efd_stop_reloader
logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
##
def set_do_start_timer(timerfd_fd, after_ms=None):
# set timer to launch after 50ms
spec = itimerspec()
spec.it_interval.tv_sec = 0
spec.it_interval.tv_nsec = 0
spec.it_value.tv_sec = 0
if after_ms is not None:
spec.it_value.tv_nsec = int(after_ms * 1000 * 1000) # 50ms = 0.05 s
else:
spec.it_value.tv_nsec = 1 # immediately
timerfd_settime(timerfd_fd, 0, ctypes.pointer(spec), None)
def disarm_do_start_timer(timerfd_fd):
# set timer to launch after 50ms
spec = itimerspec()
spec.it_interval.tv_sec = 0
spec.it_interval.tv_nsec = 0
spec.it_value.tv_sec = 0
spec.it_value.tv_nsec = 0
timerfd_settime(timerfd_fd, 0, ctypes.pointer(spec), None)
class _SpawnedProcess:
def __init__(self, process_args, use_spawnp=False, termination_signal=signal.SIGINT):
self.process_args = process_args
self.use_spawnp = use_spawnp
self.termination_signal = termination_signal
self.pid = None
self.attr = None
self.cleanup_lock = threading.Lock()
def start(self):
attr = self.attr = posix_spawnattr_t()
psret = posix_spawnattr_init(attr)
assert psret == 0, "psret = %s" % psret
psret = posix_spawnattr_setflags(
attr, POSIX_SPAWN_USEVFORK
| POSIX_SPAWN_SETSIGMASK
)
assert psret == 0, "psret = %s" % psret
##
# http://lists.llvm.org/pipermail/lldb-dev/2014-January/003104.html
# sigset_t no_signals;
# sigset_t all_signals;
# sigemptyset (&no_signals);
# sigfillset (&all_signals);
# ::posix_spawnattr_setsigmask(&attr, &no_signals);
# ::posix_spawnattr_setsigdefault(&attr, &all_signals);
no_signals = sigset_t()
sigemptyset(no_signals)
posix_spawnattr_setsigmask(attr, no_signals)
argv = create_char_array(self.process_args)
_env = []
for key, value in os.environ.items():
_env.append("%s=%s" % (key, value))
envp = create_char_array(_env)
path = create_string_buffer(self.process_args[0].encode("utf-8"))
c_pid = c_int()
if self.use_spawnp:
posix_spawn_fn = posix_spawnp
else:
posix_spawn_fn = posix_spawn
psret = posix_spawn_fn(
byref(c_pid),
path,
None, # __file_actions
attr,
argv,
envp
)
assert psret == 0, "psret = %s" % psret
##
# TODO: posix_spawnattr_destroy? after process exit?
pid = c_pid.value
self.pid = pid
return pid
def _cleanup(self):
with self.cleanup_lock:
# FIXME: weakref callback to auto-invoke
if self.attr is not None:
logger.debug("_SpawnedProcess:_cleanup:destroying spawnattr")
psret = posix_spawnattr_destroy(self.attr)
assert psret == 0, "psret = %s" % psret
self.attr = None
def stop(self):
self._cleanup()
if self.pid is not None:
try:
# os.kill(self.pid, signal.SIGINT)
logger.debug("killing: %s" % self.pid)
# os.kill(self.pid, signal.SIGUSR1)
os.kill(self.pid, self.termination_signal)
# '''
try:
os.waitpid(self.pid, 0)
except ChildProcessError as e:
if e.errno == 10:
#ChildProcessError: [Errno 10] No child processes
pass
else:
raise
logger.debug("PROCESS killed")
# '''
self.pid = None
except ProcessLookupError as e:
if e.errno == 3:
# ProcessLookupError: [Errno 3] No such process
self.pid = None
logger.debug("terminate_process: process already terminated")
else:
raise e
else:
logger.debug("terminate_process: pid is None")
# FIXME: efd_process_started should be visible per launched app
# FIXME: rename: not strictly process handles
class ProcessHandles:
spawned_process: _SpawnedProcess
def __init__(self, launch_params: LaunchParams):
self.launch_params = launch_params
self.spawned_process = None
# handles
self.efd_process_started = eventfd(0, flags=EFD_NONBLOCK)
# by default terminated -> we use it do continue with loop
self.efd_process_terminated = eventfd(1, flags=EFD_CLOEXEC|EFD_NONBLOCK)
self.efd_do_terminate_app = eventfd(0, flags=EFD_CLOEXEC | EFD_NONBLOCK)
self.tfd_do_start_app = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK)
class AppRunnerThread(threading.Thread):
def set_process_handles(self, process_handles: ProcessHandles):
self.process_handles = process_handles
def run(self):
app_starter_path = reloadex.linux._app_starter.__file__
argparse_args = self.process_handles.launch_params.argparse_args
if argparse_args.cmd == False:
# FIXME: "app.py" should be launched directly using python
target_fn_str = argparse_args.cmd_params[0]
# -u: Force the stdout and stderr streams to be unbuffered. See also PYTHONUNBUFFERED.
# -B: don't try to write .pyc files on the import of source modules. See also PYTHONDONTWRITEBYTECODE.
if is_target_str_file(target_fn_str):
_args = [sys.executable, "-u", "-B", target_fn_str]
use_spawnp = False
termination_signal = signal.SIGINT
else:
_args = [sys.executable, "-u", "-B", app_starter_path, target_fn_str]
use_spawnp = False
termination_signal = signal.SIGUSR1
else:
cmd_params = argparse_args.cmd_params
if len(cmd_params) == 1:
# 'gunicorn app:app' -> as single string
_args = shlex.split(cmd_params[0])
else:
_args = cmd_params
use_spawnp = True
termination_signal = signal.SIGINT
spawned_process = self.process_handles.spawned_process = _SpawnedProcess(_args, use_spawnp=use_spawnp, termination_signal=termination_signal)
pid = spawned_process.start()
# http://code.activestate.com/recipes/578022-wait-for-pid-and-check-for-pid-existance-posix/
# FIXME: process may already be killed
status = 0
logger.debug("WAIT: for process to terminate")
try:
pid, status = os.waitpid(pid, 0)
except ChildProcessError as e:
if e.errno == 10:
logger.debug("already terminated")
else:
pass
logger.debug("WAIT OVER: process terminated")
# FIXME: cleanup may be already be happened
spawned_process._cleanup()
eventfd_write(self.process_handles.efd_process_terminated, 1)
if os.WIFSIGNALED(status):
# process exited due to a signal; return the integer of that signal
signalcode = os.WTERMSIG(status)
logger.debug("pid=%s: Terminated with signal %s:%s " % (pid, signalcode, signal.Signals(signalcode).name))
elif os.WIFEXITED(status):
# process exited using exit(2) system call; return the
# integer exit(2) system call has been called with
exitcode = os.WEXITSTATUS(status)
if exitcode != 0:
logger.debug("pid=%s: Exit code: %s" % (pid, exitcode))
else:
logger.debug("EXITED NORMALLY: _app_starter.py")
else:
# should never happen
raise RuntimeError("unknown process exit status")
class AppRelaunchingThread(threading.Thread):
def set_process_handles(self, process_handles: ProcessHandles):
self.process_handles = process_handles
def run(self):
epoll_events_wait_termination = select.epoll()
epoll_events_wait_termination.register(self.process_handles.efd_process_terminated, select.EPOLLIN) # read
epoll_events_wait_termination.register(efd_stop_reloader, select.EPOLLIN) # read
epoll_events_start = select.epoll()
epoll_events_start.register(efd_stop_reloader, select.EPOLLIN) # read
epoll_events_start.register(self.process_handles.tfd_do_start_app, select.EPOLLIN)
while True:
logger.debug("polling for termination")
events = epoll_events_wait_termination.poll()
for fileno, event in events:
if fileno == self.process_handles.efd_process_terminated and event == select.EPOLLIN:
logger.debug("AppRelaunchingThread:epoll_events_wait_termination:efd_process_terminated")
eventfd_read(fileno)
elif fileno == efd_stop_reloader and event == select.EPOLLIN:
logger.debug("AppRelaunchingThread:epoll_events_wait_termination:efd_stop_reloader")
return
else:
raise Exception("should not happen")
logger.debug("polling for startup")
logger.debug("AppRelaunchingThread:waiting for epoll_events_start")
events = epoll_events_start.poll()
for fileno, event in events:
logger.debug("some start event")
if fileno == efd_stop_reloader and event == select.EPOLLIN:
logger.debug("AppRelaunchingThread:epoll_events_start:efd_stop_reloader")
return
elif fileno == self.process_handles.tfd_do_start_app and event == select.EPOLLIN:
logger.debug("AppRelaunchingThread:epoll_events_start:tfd_do_start_app")
# reset terminate flag, if still set (so we won't terminate immediately without reason)
'''
try:
eventfd_res = eventfd_read(self.process_handles.efd_do_terminate_app)
except BlockingIOError as e:
# BlockingIOError: [Errno 11] Resource temporarily unavailable
if e.errno == 11:
pass
else:
raise
'''
# reset timer (if set)
try:
timerfd_read_res = timerfd_read(fileno)
except BlockingIOError as e:
# BlockingIOError: [Errno 11] Resource temporarily unavailable
if e.errno == 11:
pass
else:
raise
app_runner_thread = AppRunnerThread()
app_runner_thread.set_process_handles(self.process_handles)
app_runner_thread.start()
app_runner_thread.join()
else:
raise Exception("should not happen: (fileno, event) (%s,%s)" % (fileno, event) )
# logging.debug("AppRelaunchingThread:END")
class AppTerminationThread(threading.Thread):
"""Waits for events and sends kill signal to app."""
def set_process_handles(self, process_handles: ProcessHandles):
self.process_handles = process_handles
def run(self):
def terminate_app():
# print("TODO: should terminate app")
self.process_handles.spawned_process.stop()
epoll_events_stop = select.epoll()
epoll_events_stop.register(efd_stop_reloader, select.EPOLLIN) # read
epoll_events_stop.register(self.process_handles.efd_do_terminate_app, select.EPOLLIN)
while True:
logger.debug("AppRelaunchingThread:waiting for epoll_events_stop")
events = epoll_events_stop.poll()
for fileno, event in events:
if fileno == efd_stop_reloader and event == select.EPOLLIN:
logger.debug("AppTerminationThread:epoll_events_stop:efd_stop_reloader")
terminate_app()
return
elif fileno == self.process_handles.efd_do_terminate_app and event == select.EPOLLIN:
logger.debug("AppTerminationThread:epoll_events_stop:efd_do_terminate_app")
eventfd_read(fileno)
terminate_app()
else:
raise Exception("should not happen: (fileno, event) (%s,%s)" % (fileno, event))
class FileChangesMonitoringThread(threading.Thread):
def set_process_handles(self, process_handles: ProcessHandles):
self.process_handles = process_handles
def run(self):
inotify_fd = inotify_init1(IN_CLOEXEC | IN_NONBLOCK)
watched_fds = {}
def add_watch(full_path: bytes):
logger.debug(f"add_watch: {full_path}")
c_path = create_string_buffer(full_path)
watch_descriptor = inotify_add_watch(inotify_fd, c_path, IN_ALL_EVENTS & ~IN_ACCESS & ~IN_CLOSE & ~IN_OPEN)
assert watch_descriptor != -1, "inotify_add_watch error"
watched_fds[watch_descriptor] = full_path
filesystemencoding = sys.getfilesystemencoding()
# FIXME: use provided path
# for root, dirs, files in os.walk('/home/ilja/Code/py_reload_inotify'):
for root, dirs, files in os.walk(self.process_handles.launch_params.working_directory):
add_watch(root.encode(filesystemencoding))
def event_callback(full_path, event):
logger.debug(f"event_callback: {full_path} {event}")
if self.process_handles.launch_params.file_triggers_reload_fn(full_path):
# start termination on first reload event
logger.debug("event_callback:efd_do_terminate_app")
eventfd_write(self.process_handles.efd_do_terminate_app, 1)
set_do_start_timer(self.process_handles.tfd_do_start_app, after_ms=1)
logger.debug("event_callback:END")
else:
pass
##
epoll_events = select.epoll()
epoll_events.register(inotify_fd, select.EPOLLIN) # read
epoll_events.register(efd_stop_reloader, select.EPOLLIN) # read
while True:
events = epoll_events.poll()
for fileno, event in events:
if fileno == efd_stop_reloader and event == select.EPOLLIN:
logger.debug("FileChangesMonitoringThread:stop_reloader")
return
elif fileno == inotify_fd and event == select.EPOLLIN:
start = default_timer()
for event in inotify_read(inotify_fd):
full_path = os.path.join(watched_fds[event.wd], event.name)
if event.mask & IN_CREATE and event.mask & IN_ISDIR:
add_watch(full_path)
elif event.mask & IN_IGNORED:
del watched_fds[event.wd]
continue # to next event
elif event.mask & IN_UNMOUNT:
raise NotImplementedError("handling of IN_UNMOUNT")
elif event.mask & IN_Q_OVERFLOW:
raise NotImplementedError("handling of IN_Q_OVERFLOW")
event_callback(full_path, event)
diff = default_timer() - start
# print("Batch took: %.4f ms" % (diff * 1000))
else:
raise Exception("should not happen")
# FIXME: naming
def main2_threaded(launch_params: LaunchParams):
os.chdir(launch_params.working_directory)
threads = []
process_handles = ProcessHandles(launch_params)
app_termination_thread = AppTerminationThread()
app_termination_thread.set_process_handles(process_handles)
threads.append(app_termination_thread)
app_termination_thread.start()
file_changes_monitoring_thread = FileChangesMonitoringThread()
file_changes_monitoring_thread.set_process_handles(process_handles)
threads.append(file_changes_monitoring_thread)
file_changes_monitoring_thread.start()
app_relaunching_thread = AppRelaunchingThread()
app_relaunching_thread.set_process_handles(process_handles)
threads.append(app_relaunching_thread)
app_relaunching_thread.start()
# start app
set_do_start_timer(process_handles.tfd_do_start_app)
try:
select.select([], [], [])
except KeyboardInterrupt as e:
eventfd_write(efd_stop_reloader, 1)
for thread in threads:
logger.debug("joining: %s" % thread)
thread.join()
logger.debug("joined: %s" % thread)
logger.debug("OVER")
def main(launch_params: LaunchParams):
try:
main2_threaded(launch_params)
except KeyboardInterrupt as e:
eventfd_write(efd_stop_reloader, 1)
| 1.445313 | 1 |
config.py | reillykeele/CMPUT404-assignment-webserver | 0 | 12767109 | class Config:
BUFFER_SIZE = 1024
ROOT = './www' | 1.242188 | 1 |
flaskr/db.py | elijah415hz/finances-flask | 0 | 12767110 | from sqlalchemy import create_engine
import os
FLASK_DB_URI = os.environ.get("FLASK_DB_URI")
# Create database connection
engine = create_engine(FLASK_DB_URI) | 2.1875 | 2 |
kitpy/common/__init__.py | YorkSu/kitpy | 0 | 12767111 | <reponame>YorkSu/kitpy<gh_stars>0
# -*- coding: utf-8 -*-
"""Common package."""
from kitpy.common import cache
from kitpy.common import interface
from kitpy.common import pattern
| 1.09375 | 1 |
scripts/mass_2_weight.py | architectureofthings/openmeta-vahana | 11 | 12767112 | <gh_stars>10-100
'''
# Name: mass_2_weight
# Company: MetaMorph, Inc.
# Author(s): <NAME>
# Email: <EMAIL>
# Create Date: 6/9/2017
# Edit Date: 6/9/2017
# Conversion of Airbus A^3's vahanaTradeStudy>reserveMission.mat code
# (located here: https://github.com/VahanaOpenSource/vahanaTradeStudy )
# to Python 2.7 for use in the MetaMorph, Inc. OpenMETA environment
# http://www.metamorphsoftware.com/openmeta/
# Convert mass [kg] to weight [N]
# Inputs:
# mass - [kg]
# Outputs:
# weight - [N}
'''
from __future__ import print_function
from openmdao.api import Component
class mass_2_weight(Component):
def __init__(self):
super(mass_2_weight, self).__init__()
self.add_param('mass', val=0.0)
self.add_output('weight', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['weight'] = params['mass']*9.8 | 2.25 | 2 |
src/snippets/06_linear_algebra.py | yvesbeutler/tyrannosaurus | 0 | 12767113 | from typing import List, Tuple, Callable
import math
# define some type alias
Vector = List[float]
Matrix = List[List[float]]
###################################
# Vector calculations
###################################
def add(v: Vector, w: Vector) -> Vector:
"""simple vector addition"""
assert len(v) == len(w), 'Vectors need to have the same length'
return [vi + wi for vi, wi in zip(v, w)]
def subtract(v: Vector, w: Vector) -> Vector:
"""simple vector subtraction"""
assert len(v) == len(w), 'Vectors need to have the same length'
return [vi - wi for vi, wi in zip(v, w)]
def scalar_multiply(c: float, v: Vector) -> Vector:
"""multiply a vector by a scalar"""
return [c * vi for vi in v]
def vector_sum(vectors: List[Vector]) -> Vector:
"""sums all vector components together"""
assert vectors, 'Vectors must not be empty'
num_elements = len(vectors[0])
assert all(len(v) == num_elements for v in vectors), 'Vectors need to have the same length'
return [sum(vector[i] for vector in vectors) for i in range(num_elements)]
def vector_mean(vectors: List[Vector]) -> Vector:
"""computes element-wise average"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
def dot(v: Vector, w: Vector) -> float:
"""
computes the dot product (which is a scalar, not a vector)\n
(v1 * w1) + (v2 * w2) + ... + (vn * wn)
"""
assert len(v) == len(w), 'Vectors need to have the same length'
return sum(vi * wi for vi, wi in zip(v, w))
def sum_of_squares(v: Vector) -> float:
"""computes the sum of squares for a single vector"""
return dot(v, v)
def magnitude(v: Vector) -> float:
"""computes the magnitude (length) of a vector"""
return math.sqrt(sum_of_squares(v))
def squared_distance(v: Vector, w: Vector) -> float:
"""
computes the square of the distance between two vectors\n
(v1 - w1)^2 + (v2 - w2)^2 + ... + (vn - wn)^2
"""
return sum_of_squares(subtract(v,w))
def distance(v: Vector, w: Vector) -> float:
"""computes the distance between two vectors"""
return math.sqrt(squared_distance(v, w))
# run some checks
assert add([1,2,3],[4,5,6]) == [5,7,9]
assert subtract([5,7,9],[4,5,6]) == [1,2,3]
assert scalar_multiply(3, [2,4,8]) == [6,12,24]
assert vector_sum([[2,5,1], [2,6,3], [4,1,7]]) == [8,12,11]
assert vector_mean([[1,2], [3,4], [5,6]]) == [3,4]
assert dot([2,4,8], [3,1,4]) == 42
assert sum_of_squares([2,3,4]) == 29
assert magnitude([3,4]) == 5
assert squared_distance([1,4], [3,9]) == 29
assert distance([1,5], [5,8]) == 5
###################################
# Matrix calculations
###################################
def shape(A: Matrix) -> Tuple[int, int]:
"""returns the shape of a given matrix"""
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def get_row(A: Matrix, i: int) -> Vector:
"""returns the i-th row"""
return A[i]
def get_column(A: Matrix, j: int) -> Vector:
"""returns the i-th column"""
return [Ai[j] for Ai in A]
def gen_matrix(num_rows: int,
num_cols: int,
entry_fn: Callable[[int, int], float]) -> Matrix:
"""generates a matrix according to the specified number of
rows and columns with its values generated by the entry function
"""
return [[entry_fn(i, j)
for j in range(num_cols)]
for i in range(num_rows)]
def identity_matrix(n: int) -> Matrix:
"""returns the n x n identity matrix"""
return gen_matrix(n, n, lambda i, j: 1 if i == j else 0)
# run some checks
A: Matrix = [[1,2,3],[4,5,6]]
assert shape(A) == (2, 3)
assert get_row(A,1) == [4,5,6]
assert get_column(A,2) == [3,6]
assert identity_matrix(5) == [[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
| 3.921875 | 4 |
help/urls.py | Shreyanshsachan/College-Predictor | 0 | 12767114 | <reponame>Shreyanshsachan/College-Predictor<filename>help/urls.py<gh_stars>0
from django.conf.urls import url
from . import views
app_name='HELP'
urlpatterns=[
url(r'^$',views.help_view,name='helpapp'),
] | 1.5 | 2 |
src/project_name/urls.py | konoufo/perfectstart | 0 | 12767115 | from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import RedirectView
from profiles.views import SignupView
from . import views
urlpatterns = [
url(r'^$', views.HomePage.as_view(), name='home'),
url(r'^about/$', views.AboutPage.as_view(), name='about'),
url(r'^users/', include('profiles.urls', namespace='profiles')),
url(r'^admin/', include(admin.site.urls)),
url(r"^account/signup/$", SignupView.as_view(), name="account_signup"),
# redirect unneeded/unused social accounts page to settings page
url(r"account/social/accounts/", RedirectView.as_view(url='/account/settings/')),
url(r"^account/", include("account.urls")),
]
# User-uploaded files like profile pics need to be served in development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Include django debug toolbar if DEBUG is on
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| 2.03125 | 2 |
joshgordon/04/bingo.py | VisionistInc/-advent-of-code-2021 | 1 | 12767116 | <filename>joshgordon/04/bingo.py
from collections import defaultdict
import re
# shamelessly borrowed from https://stackoverflow.com/questions/2912231/is-there-a-clever-way-to-pass-the-key-to-defaultdicts-default-factory
# I've used this before and it's super awesome. It's a defaultdict except your lambda function gets your key
# as a parameter.
class keydefaultdict(defaultdict):
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(key)
return ret
class BoardNumber:
def __init__(self, num):
self.called = False
self.number = int(num)
def __repr__(self):
if self.called:
return f"\033[33m{self.number}\033[0m"
else:
return f"\033[41m{self.number}\033[0m"
def call(self):
self.called = True
class Board:
def __init__(self, grid):
if isinstance(grid, list):
self.grid == grid
else:
self.grid = self._parse_grid(grid)
def _parse_grid(self, grid):
return [
[numbers[int(y.strip())] for y in re.split(r"\s+", x.strip())] for x in grid.strip().split("\n")
]
def __repr__(self):
res = ""
for row in self.grid:
for col in row:
res += f"{str(col):13s}"
res += "\n"
return res
def check_board(self):
# iterate through the rows:
checks = [all([col.called for col in row]) for row in self.grid]
checks += [all([row.called for row in col]) for col in list(zip(*self.grid))]
return any(checks)
def get_unmarked_sum(self):
return sum([sum([num.number for num in row if not num.called]) for row in self.grid])
numbers = keydefaultdict(lambda x: BoardNumber(x))
| 3.03125 | 3 |
app/views.py | mugisha-thierry/online-shop | 1 | 12767117 | from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse, Http404,HttpResponseRedirect
from django.contrib.auth.forms import UserCreationForm
from .models import Profile,OrderItem, Order, Transaction,Product, Category, Comment, Rate,Delivery
from django.contrib.auth import login, authenticate
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth.models import User
from django.urls import reverse
from .forms import SignUpForm, UpdateUserProfileForm,CommentForm,RateForm,DeliveryForm
from .decorators import admin_only,allowed_users
from django.contrib import messages
import datetime
# import stripe
# Create your views here.
# @login_required(login_url='login')
def home(request):
object_list = Product.objects.all()
categorys = Category.get_category()
return render(request, 'home.html',{'object_list':object_list,'categorys':categorys})
def search_product(request):
categorys = Category.get_category()
if 'searchproject' in request.GET and request.GET["searchproject"]:
search_term = request.GET.get("searchproject")
searched_project = Product.search_by_name(search_term)
message = f"{search_term}"
context = {'object_list':searched_project,'message': message,'categorys':categorys}
return render(request, "search.html",context)
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
def search_products(request):
categorys = Category.get_category()
filtered_orders = Order.objects.filter(owner=request.user.profile, is_ordered=False)
current_order_products = []
if filtered_orders.exists():
user_order = filtered_orders[0]
user_order_items = user_order.items.all()
current_order_products = [product.product for product in user_order_items]
if 'searchproduct' in request.GET and request.GET["searchproduct"]:
search_term = request.GET.get("searchproduct")
searched_project = Product.search_by_name(search_term)
message = f"{search_term}"
context = {'object_list':searched_project,'message': message,'categorys':categorys,'current_order_products': current_order_products,}
return render(request, "searching.html",context)
else:
message = "You haven't searched for any term"
return render(request, 'searching.html',{"message":message})
def product_category(request, category):
object_list = Product.filter_by_category(category)
categorys = Category.get_category()
context = {'object_list':object_list,'categorys': categorys}
return render(request,'category/notlogged.html',context)
# @login_required(login_url='login')
def comment(request, pk):
image = get_object_or_404(Product, pk=pk)
product = Product.objects.get(id = pk)
rates = Rate.objects.order_by('-date')
current_user = request.user
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.product = image
comment.user = request.user.profile
comment.save()
return HttpResponseRedirect(request.path_info)
else:
form = CommentForm()
if request.method == 'POST':
form_rate = RateForm(request.POST)
if form_rate.is_valid():
test = form_rate.cleaned_data['test']
price = form_rate.cleaned_data['price']
durability = form_rate.cleaned_data['durability']
rate = Rate()
rate.product = image
rate.user = current_user
rate.test = test
rate.price = price
rate.durability = durability
rate.average = (rate.test + rate.price + rate.durability)/3
rate.save()
return HttpResponseRedirect(request.path_info)
else:
form_rate = RateForm()
context = {
'image': image,
'form': form,
'form_rate':form_rate,
'rates':rates,
'product':product,
}
return render(request, 'product.html', context)
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
group = Group.objects.get(name = 'customer')
user.groups.add(group)
messages.info(request, "Your account has been Created successfully.")
return redirect("/login")
else:
form = SignUpForm()
return render(request, 'register/register.html', {'form': form})
def profile(request, username):
my_user_profile = Profile.objects.filter(user=request.user).first()
my_orders = Order.objects.filter(is_ordered=True, owner=my_user_profile)
if request.method == 'POST':
prof_form = UpdateUserProfileForm(request.POST, request.FILES, instance=request.user.profile)
if prof_form.is_valid():
prof_form.save()
return redirect(request.path_info)
else:
prof_form = UpdateUserProfileForm(instance=request.user.profile)
context = {
'prof_form': prof_form,
'my_orders':my_orders,
}
return render(request, 'profile.html', context)
@login_required(login_url='login')
def product_list(request):
object_list = Product.objects.all()
categorys = Category.get_category()
filtered_orders = Order.objects.filter(owner=request.user.profile, is_ordered=False)
current_order_products = []
if filtered_orders.exists():
user_order = filtered_orders[0]
user_order_items = user_order.items.all()
current_order_products = [product.product for product in user_order_items]
context = {
'object_list': object_list,
'current_order_products': current_order_products,
'categorys':categorys
}
return render(request, "products/product_list.html", context)
def products_category(request, category):
object_list = Product.filter_by_category(category)
categorys = Category.get_category()
filtered_orders = Order.objects.filter(owner=request.user.profile, is_ordered=False)
current_order_products = []
if filtered_orders.exists():
user_order = filtered_orders[0]
user_order_items = user_order.items.all()
current_order_products = [product.product for product in user_order_items]
context = {'object_list':object_list,'categorys': categorys,'current_order_products':current_order_products}
return render(request,'category/logedin.html',context)
def get_user_pending_order(request):
# get order for the correct user
user_profile = get_object_or_404(Profile, user=request.user)
order = Order.objects.filter(owner=user_profile, is_ordered=False)
if order.exists():
# get the only order in the list of filtered orders
return order[0]
return 0
@login_required()
def add_to_cart(request, **kwargs):
# get the user profile
user_profile = get_object_or_404(Profile, user=request.user)
# filter products by id
product = Product.objects.filter(id=kwargs.get('item_id', "")).first()
# check if the user already owns this product
# if product in request.user.profile.ebooks.all():
# messages.info(request, 'You already own this ebook')
# return redirect(reverse('product_list'))
# create orderItem of the selected product
order_item, status = OrderItem.objects.get_or_create(product=product)
# create order associated with the user
user_order, status = Order.objects.get_or_create(owner=user_profile, is_ordered=False)
user_order.items.add(order_item)
if status:
# generate a reference code
user_order.ref_code = 221
user_order.save()
# show confirmation message and redirect back to the same page
messages.info(request, "item added to cart")
return redirect(reverse('product_list'))
@login_required(login_url='login')
def delete_from_cart(request, item_id):
item_to_delete = OrderItem.objects.filter(pk=item_id)
if item_to_delete.exists():
item_to_delete[0].delete()
messages.info(request, "Item has been deleted")
return redirect(reverse('order_summary'))
@login_required(login_url='login')
def order_details(request, **kwargs):
existing_order = get_user_pending_order(request)
context = {
'order': existing_order
}
return render(request, 'shopping_cart/order_summary.html', context)
@login_required(login_url='login')
def checkout(request, **kwargs):
client_token = 222
current_user = request.user
existing_order = get_user_pending_order(request)
publishKey = 111
if request.method == 'POST':
form = DeliveryForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.user = current_user
comment.save()
clear_from_cart(request)
return redirect('product_list')
else:
form = DeliveryForm()
context = {
'order': existing_order,
'client_token': client_token,
'form':form,
}
return render(request, 'shopping_cart/checkout.html', context)
@login_required(login_url='login')
def clear_from_cart(request):
current_user = request.user
cat = get_object_or_404(Order, owner=current_user.id)
cat.delete()
messages.info(request, "Thanks for shopping with us")
return redirect('product_list')
def admin_page(request):
return render(request,'admin_page.html')
def about(request):
return render(request,'about.html') | 2.0625 | 2 |
lib/dmcomm/protocol/barcode.py | dmcomm/dmcomm-python | 1 | 12767118 | # This file is part of the DMComm project by BladeSabre. License: MIT.
"""
`dmcomm.protocol.barcode`
=========================
Functions for generating EAN-13 patterns.
"""
# https://en.wikipedia.org/wiki/International_Article_Number
_START_END = "101"
_CENTRE = "01010"
_CODES = {
"L": ["0001101", "0011001", "0010011", "0111101", "0100011", "0110001", "0101111", "0111011", "0110111", "0001011"],
"G": ["0100111", "0110011", "0011011", "0100001", "0011101", "0111001", "0000101", "0010001", "0001001", "0010111"],
"R": ["1110010", "1100110", "1101100", "1000010", "1011100", "1001110", "1010000", "1000100", "1001000", "1110100"],
}
_SELECT = ["LLLLLL", "LLGLGG", "LLGGLG", "LLGGGL", "LGLLGG", "LGGLLG", "LGGGLL", "LGLGLG", "LGLGGL", "LGGLGL"]
def ean13_bits(barcode_number: list) -> str:
result = [_START_END]
selection = _SELECT[barcode_number[0]]
for i in range(6):
digit = barcode_number[i + 1]
code = _CODES[selection[i]][digit]
result.append(code)
result.append(_CENTRE)
for i in range(6):
digit = barcode_number[i + 7]
code = _CODES["R"][digit]
result.append(code)
result.append(_START_END)
return "".join(result)
def run_lengths(seq) -> list:
if len(seq) == 0:
return []
result = []
prev = seq[0]
count = 1
for item in seq[1:]:
if item == prev:
count += 1
else:
result.append(count)
count = 1
prev = item
result.append(count)
return result
def ean13_lengths(barcode_number: list) -> list:
return run_lengths(ean13_bits(barcode_number))
| 2.765625 | 3 |
lib/security.py | qvant/stackexchange_bot | 0 | 12767119 | <filename>lib/security.py
import base64
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
CONFIG_PARAM_SECRET_CONST = "2fggbre34AAftr54"
def is_password_encrypted(password: str) -> bool:
return password is not None and password[-4:] == '????'
def set_up_encryption(server_name: str, port: int) -> Fernet:
salt = bytes(port)
# TODO: rewrite to AES
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000)
key = base64.urlsafe_b64encode(kdf.derive((server_name + CONFIG_PARAM_SECRET_CONST).encode('UTF-8')))
f = Fernet(key)
return f
def decrypt_password(password: str, server_name: str, port: int) -> str:
f = set_up_encryption(server_name, port)
password = f.decrypt(password.encode('UTF-8'))
return password.decode('UTF-8')
def encrypt_password(password: str, server_name: str, port: int) -> str:
f = set_up_encryption(server_name, port)
password = f.encrypt(password.encode('UTF-8'))
# TODO fix this hack for secret strings
return password.decode('UTF-8') + '????'
| 2.78125 | 3 |
apollon/chain.py | ApollonChain/ApollonCore | 2 | 12767120 | <filename>apollon/chain.py<gh_stars>1-10
## Blockchain Object ##
class Blockchain:
# Es wird ein neues Blockchain Objekt erstellt
def __init__(self, RootConfig, ChainDB):
# Blockchain Daten
from threading import Lock
self.mempool = list()
self.root_conf = RootConfig
self.thread_lock = Lock()
## Chain Storage
from apollon.chain_storage import ChainStorage
self.ChainStorageObj = ChainDB
self.last_block = None
# Miner
from apollon.miner import CryptonightMiner
self.miner = CryptonightMiner(self, 2)
# WebController
from apollon.web_controller import WebController
self.wc = WebController(self)
# Dashboard
from apollon.apollon_dashboard import ApollonDashboard
self.dashboard = ApollonDashboard(self)
# Die Coins der Chain werden im RAM abgespeichert
self.current_coins = list()
for i in self.root_conf.getCoins():
i._atempChain(self)
self.current_coins.append(i)
# Die Chain eigenen Adressen werden erstellt
from apollon.apollon_address import BlockchainAddress, AddressTypes
self.burn_address = BlockchainAddress(ChainSeed=self.root_conf.getChainSeed(True), AddressType=AddressTypes.ChainOwnAddress, ChainAddressType=BlockchainAddress.ChainAddressTypes.BURN_ADDRESS, NetworkCMHex=self.root_conf.getNetChecksum(True))
# Started das Mining
def startMining(self, RewardAddress):
# Es wird geprüft ob eine gültige Adresse übergeben wurde
from apollon.apollon_address import LagacyAddress, PhantomAddress
assert isinstance(RewardAddress, LagacyAddress) or isinstance(RewardAddress, PhantomAddress)
# Es wird geprüft ob der miner bereits läuft
if self.miner.Status() != 2: raise Exception('Alrady running') # Es wird bereits ein Miner ausgeführt
# Der Miner wird gestartet
if self.miner.Start(RewardAddress) != 0: raise Exception('Miner cant start') # Der Miner konnte nicht gestartet werden
# Started den Webcontroller
def startWebController(self):
self.wc.Start()
# Started das Dashboard
def startDashboard(self):
self.dashboard.start()
# Fügt eine Transaktion hinzu TODO
def addTransaction(self, *TransactionObj):
from apollon.transaction import SignedTransaction
for i in TransactionObj:
assert isinstance(i, SignedTransaction)
assert i.signaturesAreValidate() == True
# Es wird geprüft, ob die Verwendeten UTXO's bereits ausgegeben wurden
self.mempool.append(i)
# Gibt die Aktuelle Block Höhe aus
def getBlockHeight(self): return self.ChainStorageObj.getBlockHeight()
# Fügt der Blockchain einen neuen Block hinzu TODO
def addBlock(self, BlockObj):
# Es wird geprüft ob es sich um einen Gültigen Block handelt
if BlockObj.getHeight() != self.getBlockHeight() + 1: raise Exception('INVALID BLOCK HEIGHT')
self.ChainStorageObj.addBlock(BlockObj)
self.last_block = BlockObj
# Gibt einen Block aus
def getBlock(self, BlockID):
return self.ChainStorageObj.getBlock(BlockID)
# Gibt den Hashawert des Blockes aus, welchers als nächstes Gemint werden soll TODO
def getBlockTemplate(self): pass
# Erstellt einen neuen Block aus dem Blockblob sowie der Nonce TODO
def addBlockByMinedTemplate(self, BlobHash, Nonce, MinerAddress): pass
# Gibt alle Coins der Blockchain aus
def getChainCoins(self):
if self.current_coins is not None: return self.current_coins
else: return []
# Gibt die Hashrate des Miners aus TODO
def getHashrate(self): return self.miner.getHashRate()
# Gibt die Burning Adresse der Chain aus TODO
def getChainBurningAddress(self): return self.burn_address
# Gibt die Belohnugen für den Aktuellen Block
def GetRewardForBlock(self, BlockHeight):
from apollon.coin import CoinValueUnit
lis = list()
for i in self.getChainCoins():
if i.isMiningLabel() == True and i.hasRewardForBlock(BlockHeight) == True: cnv = CoinValueUnit(i); cnv.add(i.miningReward(BlockHeight)); lis.append(cnv)
return lis
# Diese Funktion überprüft die Nonce des geminten Hashes TODO
def validateMinedHash(self, BlockHeight, BlockHash, Nonce): return True
# Gibt einen Coin der Chain anhand seiner ID aus
def getChainCoinByID(self, CoinID):
if isinstance(CoinID, bytes) == True:
for i in self.root_conf.getCoins():
if i.getCoinID(True) == CoinID: return i
return None
# Gibt die MetaDaten des Letzten Blocks aus
def getLastBlockMetaData(self, AsBytes=False):
# Es wird geprüft ob bereits ein Block in der Chain vorhanden ist
if self.ChainStorageObj.getBlockHeight() == 0:
lbmdc = dict()
lbmdc['block_height'] = 0
lbmdc['block_hash'] = self.root_conf.getChainRootHash(AsBytes)
return lbmdc
# Es ist bereits ein Block vorhanden
else:
lbmd = self.ChainStorageObj.getLastBlockMetaData()
lbmdc = dict()
lbmdc['block_height'] = lbmd['block_height']
if AsBytes == True: lbmdc['block_hash'] = lbmd['block_hash']
else: from apollon.utils import encodeBase58; lbmdc['block_hash'] = encodeBase58(lbmd['block_hash'])
return lbmdc
# Gibt die Informationen der Letzten Blöcke aus
def getLastBlocksMetaData(self, Blocks=50, Page=1):
return self.ChainStorageObj.getLastBlocksMetaData(Blocks, Page)
# Gibt alle Informationen über eine Adresse aus
def getAddressDetails(self, Addresses):
# Es werden alle Adress MetaDaten aus dem Storage Abgerufen
try: storage_data = self.ChainStorageObj.getAddressDetails(Addresses)
except Exception as E: print(E); raise Exception('Storage data')
# Es wird geprüft ob ein gültiges Objekt abgerufen wurde
from apollon.address_utils import AddressChainDetails
if isinstance(storage_data, AddressChainDetails) == False: raise Exception('Invalid chain storage data')
# Das abgerufene Objekt wird zurückgegeben
return storage_data
# Gibt die Schwierigkeit des Aktuellen Blocks an
def getBlockDiff(self, BlockHeight=None):
return 240
# Erstellt eine Vollständig neue Blockchain
@staticmethod
def createNewChain(ChainPath, ChainName, ChainMasterSeed, *ChainCoins):
# Es wird geprüft ob der Path exestiert
import os
if os.path.isdir(ChainPath) == False: os.mkdir(ChainPath)
else:
if os.path.isfile('{}/chain.cdb'.format(ChainPath)) == True: raise Exception('Alrady exists')
if os.path.isfile('{}/chain.rc'.format(ChainPath)) == True: raise Exception('Alrady exists')
# Die Chain Config wird erstellt
from apollon.chain_configs import ChainRootConfig
ChainRootConfig.newChainRootConfig('{}/chain.rc'.format(ChainPath), ChainName, ChainMasterSeed, 0, 645120, int(3*60), *ChainCoins)
# Die Datenbank wird erstellt
from apollon.chain_storage import ChainStorage
ChainStorage.newFile('{}/chain.cdb'.format(ChainPath))
# Die Chain wurde erfolgreich erstellt
return 0
# Gibt alle Transaktionen einer Adresse aus
def getLagacyTransactionsByAddress(self, *Addresses, MaxEntries=25, CurrentPage=1, OutAsJSON=False):
# Es wird geprüft ob die Adressen korrekt sind
from apollon.apollon_address import LagacyAddress, BlockchainAddress
for adr_i in Addresses:
if isinstance(adr_i, LagacyAddress) == False and isinstance(adr_i, BlockchainAddress) == False: raise Exception('Invalid Address')
# Es wird geprüft ob die MaxEntries korrekt ist
if isinstance(MaxEntries, int) == False: raise Exception('Invalid MaxEntries')
# Es wird geprüft ob es sich um eine Zuläassige Seitenangabe handelt
if isinstance(CurrentPage, int) == False or CurrentPage < 1: raise Exception('Invalid CurrentPage, only Numbers')
# Es wird geprüft ob die JSON Ausgabe korrekt ist (JSON)
if isinstance(OutAsJSON, bool) == False: raise Exception('Invalid OutAsJSON, onyl True/False allowed')
# Die Transaktionen werden aus dem Memorypool abgerufen
mempool_res = list()
# Alle Adressen werden in der Datenbank abgerufen
storage_data = list()
for adri in Addresses:
# Es werden alle Transaktionen aus dem Storage abgerufen
try: rcs = self.ChainStorageObj.getAddressTransactions(*mempool_res ,Addresses=adri, MaxEntries=MaxEntries, CurrentPage=CurrentPage)
except: raise Exception('Internal error')
# Es wird geprüft in welcher Form die Transaktionen ausgegeben werden sollen
for xci in rcs:
if OutAsJSON == False: storage_data.append(xci)
else: storage_data.append(xci.toJSON())
# Die Daten werden zurückgegeben
return storage_data | 2.46875 | 2 |
setup.py | wizardsoftheweb/wotw-cookiecutter-base | 0 | 12767121 | """This file sets up the package"""
from setuptools import setup
setup(
name='wotw-cookiecutter-base',
version='0.2.0',
packages=[],
)
| 1.203125 | 1 |
boxes/sensorgw/web/sensor_gateway.py | yagamy4680/myboxes | 0 | 12767122 | <gh_stars>0
#!/usr/bin/env python
from bottle import route, run, post, get, request, static_file
from optparse import OptionParser
myData = {}
@route('/hello')
def hello():
return "Hello World!\n"
@post('/api/data/<name>')
def updateData(name='hello'):
jsonData = request.json
print(jsonData)
myData[name] = jsonData
return "Got it!!\n"
@get('/api/data/<name>')
def updateData(name='hello'):
jsonData = myData[name]
print(jsonData)
return jsonData
@route('/')
def serverRoot():
return static_file('index.html', root='./www')
@route('/scripts/<filepath:path>')
def serverStaticScript(filepath):
return static_file(filepath, root='./www/scripts')
parser = OptionParser()
parser.add_option("-p", "--port", type="int", dest="port", default=4000)
(options, args) = parser.parse_args()
run(host='0.0.0.0', port=options.port, debug=True)
| 2.625 | 3 |
rebuild_tool/rebuild_metadata.py | mcyprian/deps_visualization | 2 | 12767123 | import yaml
from collections import UserDict
from rebuild_tool.exceptions import IncompleteMetadataException, UnknownPluginException
from rebuild_tool.builder_plugins.builder_loader import available_builder_plugins
from rebuild_tool.pkg_source_plugins.pkg_source_loader import available_pkg_source_plugins
def get_file_data(input_file, split=False):
'''
Opens given file and reads it,
returns string datai, can cause IOError exception
'''
with open(input_file, 'r') as fi:
data = fi.read()
if split:
return data.splitlines()
else:
return data
class RebuildMetadata(UserDict):
'''
Class to load, check and store all rebuild metadata
'''
def __init__(self, yaml_data):
super(self.__class__, self).__init__()
self.data = yaml.load(yaml_data)
for attr in ['build_system', 'packages_source', 'repo', 'packages']:
if attr not in self:
raise IncompleteMetadataException("Missing Rebuild file attribute: {}.".format(attr))
if self['build_system'] not in available_builder_plugins:
raise UnknownPluginException("Builder plugin: {} specified in Rebuild file not available.".format(
self['build_system']))
if self['packages_source'] not in available_pkg_source_plugins:
raise UnknownPluginException("Packages source plugin: {} specified in Rebuild file not available.".format(
self['packages_source']))
if 'metapackage' in self:
self['packages'].append(self['metapackage'])
if not 'prefix' in self:
self['prefix'] = ""
for attr in ["chroots", "recipes", "chroot_pkgs", "packages"]:
if attr in self:
if not isinstance(self[attr], list):
self[attr] = [self[attr]]
if self['packages_source'] == 'koji':
if 'koji_tag' not in self:
raise IncompleteMetadataException("Missing Rebuild file attribute: koji_tag necesary to get srpms from koji.")
else:
self['koji_tag'] = None
class Recipe(yaml.YAMLObject):
'''
Class to store order of building recipe, reads data from
yml file in format:
- ['package1', 'bootstrap 0']
- ['package2']
- ['package1', 'bootstrap 1']
...
'''
def __init__(self, recipe_file):
self.packages = set()
self.order = get_file_data(recipe_file)
self.get_packages()
@property
def order(self):
return self.__order
@order.setter
def order(self, recipe_data):
self.__order = yaml.load(recipe_data)
def get_packages(self):
'''
Fills packages set with all packages names present in recipe
'''
if not hasattr(self, 'order'):
return
for item in self.order:
self.packages.add(item[0])
| 2.21875 | 2 |
preprocess/thyroid_tissue_loc.py | PingjunChen/ThyroidGeneralWSI | 2 | 12767124 | # -*- coding: utf-8 -*-
import os, sys
import shutil
import tissueloc as tl
from tissueloc.load_slide import load_slide_img, select_slide_level
import numpy as np
from skimage import io, color
import cv2
if __name__ == "__main__":
slide_dir = "../data/TestSlides/Malignant"
save_dir = "../data/TestSlides/MalignantTissue"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
slide_list = [ele for ele in os.listdir(slide_dir) if "tiff" in ele]
for ind, ele in enumerate(slide_list):
slide_path = os.path.join(slide_dir, ele)
cnts, d_factor = tl.locate_tissue_cnts(slide_path, max_img_size=2048, smooth_sigma=13,
thresh_val=0.88,min_tissue_size=10000)
s_level, d_factor = select_slide_level(slide_path, max_size=2048)
slide_img = load_slide_img(slide_path, s_level)
slide_img = np.ascontiguousarray(slide_img, dtype=np.uint8)
cv2.drawContours(slide_img, cnts, -1, (0, 255, 0), 9)
io.imsave(os.path.join(save_dir, os.path.join(os.path.splitext(ele)[0]+'_cnt.png')), slide_img)
| 2.453125 | 2 |
worker.py | tcbegley/dash-rq-demo | 42 | 12767125 | <filename>worker.py<gh_stars>10-100
from rq import Connection, Worker
from dash_rq_demo import conn, queue
if __name__ == "__main__":
with Connection(conn):
w = Worker([queue])
w.work()
| 1.71875 | 2 |
Darlington/phase-2/FILE 1/O/day 83 solution/qtn3.py | darlcruz/python-challenge-solutions | 0 | 12767126 | # program that takes a text file as input and returns the number of words of a given text file.
def count_words(filepath):
with open(filepath) as f:
data = f.read()
data.replace(",", " ")
return len(data.split(" "))
print(count_words("words.txt")) | 4.3125 | 4 |
Mask_RCNN/forecut_pipeline/save_image.py | tobias-fyi/rmbg | 3 | 12767127 | <filename>Mask_RCNN/forecut_pipeline/save_image.py
"""ForeCut \\ Pipeline :: Save image(s)"""
import os
import skimage.io
from forecut_pipeline.pipeline import Pipeline
class SaveImage(Pipeline):
"""Pipeline task to save images."""
def __init__(self, src, path, image_ext="png"):
self.src = src
self.path = path
self.image_ext = image_ext
super().__init__()
def map(self, data):
image = data[self.src]
image_id = data["image_id"]
# Prepare output for image based on image_id
output = image_id.split(os.path.sep)
dirname = output[:-1]
if len(dirname) > 0:
dirname = os.path.join(*dirname)
dirname = os.path.join(self.path, dirname)
os.makedirs(dirname, exist_ok=True)
else:
dirname = self.path
filename = f"{output[-1].rsplit('.', 1)[0]}.{self.image_ext}"
path = os.path.join(dirname, filename)
skimage.io.imsave(
path, image,
)
return data
| 2.71875 | 3 |
pypaste/pypaste.py | pypaste/pypaste | 0 | 12767128 | # -*- coding: utf-8 -*-
"""Main module."""
# from abc import ABC as _ABC
from contextlib import AbstractContextManager as _AbstractContextManager
from abc import abstractmethod as _abstractmethod
# from abc import abstractclassmethod as _abstractclassmethod
class PyPasteBase(_AbstractContextManager):
@property
@_abstractmethod
def clipboard(self):
return self._clipboard
@clipboard.setter
@_abstractmethod
def clipboard(self, value):
self._clipboard = value
| 2.828125 | 3 |
erica/erica_legacy/elster_xml/grundsteuer/elster_data_representation.py | punknoir101/erica-1 | 0 | 12767129 | from dataclasses import dataclass
from typing import List, Optional
from erica.erica_legacy.elster_xml.common.basic_xml_data_representation import ENutzdaten, construct_basic_xml_data_representation
from erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer import EAngFeststellung, EPersonData, EEigentumsverh, \
EEmpfangsbevollmaechtigter
from erica.erica_legacy.elster_xml.grundsteuer.elster_gebaeude import EAngWohn
from erica.erica_legacy.request_processing.erica_input.v2.grundsteuer_input import GrundsteuerData
from erica.erica_legacy.request_processing.erica_input.v2.grundsteuer_input_eigentuemer import \
Eigentuemer as EigentuemerInput
"""
The content of the Grundsteuer Nutzdaten XML as its data prepresentation.
The classes are prefixed with "E" for "Elster".
"""
@dataclass
class EGW1:
Ang_Feststellung: EAngFeststellung
Eigentuemer: List[EPersonData]
Eigentumsverh: EEigentumsverh
Empfangsv: Optional[EEmpfangsbevollmaechtigter]
def __init__(self, input_data: EigentuemerInput):
self.Ang_Feststellung = EAngFeststellung()
self.Eigentuemer = []
for index, input_eigentuemer in enumerate(input_data.person):
new_eigentuemer = EPersonData(input_eigentuemer, index)
self.Eigentuemer.append(new_eigentuemer)
self.Eigentumsverh = EEigentumsverh(input_data)
if hasattr(input_data, "empfangsbevollmaechtigter") and input_data.empfangsbevollmaechtigter:
self.Empfangsv = EEmpfangsbevollmaechtigter(input_data.empfangsbevollmaechtigter)
else:
self.Empfangsv = None
@dataclass
class EGW2:
Ang_Wohn: EAngWohn
def __init__(self, input_data: GrundsteuerData):
self.Ang_Wohn = EAngWohn(input_data.gebaeude)
@dataclass
class ERueckuebermittlung:
Bescheid: str
def __init__(self):
self.Bescheid = '2' # No "Bescheiddatenabholung"
@dataclass
class EVorsatz:
Unterfallart: str
Vorgang: str
StNr: str
Zeitraum: str
AbsName: str
AbsStr: str
AbsPlz: str
AbsOrt: str
Copyright: str
OrdNrArt: str
Rueckuebermittlung: ERueckuebermittlung
def __init__(self, input_data: GrundsteuerData):
self.Unterfallart = "88" # Grundsteuer
self.Vorgang = "01" # Veranlagung
# TODO
self.StNr = "1121081508150"
self.Zeitraum = "2022" # TODO require on input?
self.AbsName = input_data.eigentuemer.person[0].persoenlicheAngaben.vorname + " " + \
input_data.eigentuemer.person[0].persoenlicheAngaben.name
self.AbsStr = input_data.eigentuemer.person[0].adresse.strasse
self.AbsPlz = input_data.eigentuemer.person[0].adresse.plz
self.AbsOrt = input_data.eigentuemer.person[0].adresse.ort
self.Copyright = "(C) 2022 DigitalService4Germany"
# TODO Steuernummer or Aktenzeichen?
self.OrdNrArt = "S"
self.Rueckuebermittlung = ERueckuebermittlung()
@dataclass
class EGrundsteuerSpecifics:
GW1: EGW1
GW2: EGW2
Vorsatz: EVorsatz
xml_attr_version: str
xml_attr_xmlns: str
def __init__(self, input_data: GrundsteuerData):
self.GW1 = EGW1(input_data.eigentuemer)
self.GW2 = EGW2(input_data)
self.Vorsatz = EVorsatz(input_data)
self.xml_attr_version = "2"
self.xml_attr_xmlns = "http://finkonsens.de/elster/elstererklaerung/grundsteuerwert/e88/v2"
@dataclass
class EGrundsteuerData(ENutzdaten):
E88: EGrundsteuerSpecifics
def __init__(self, input_data: GrundsteuerData):
self.E88 = EGrundsteuerSpecifics(input_data)
def get_full_grundsteuer_data_representation(input_data: GrundsteuerData):
""" Returns the full data representation of an elster XML for the Grundsteuer use case. """
grundsteuer_elster_data_representation = EGrundsteuerData(input_data)
# TODO set BuFa correctly
return construct_basic_xml_data_representation(empfaenger_id='F', empfaenger_text="1121",
nutzdaten_object=grundsteuer_elster_data_representation,
nutzdaten_header_version="11")
| 2.609375 | 3 |
plugins/osulib/utils/misc_utils.py | Jeglerjeg/pcbot | 0 | 12767130 | <reponame>Jeglerjeg/pcbot<filename>plugins/osulib/utils/misc_utils.py
import discord
from plugins.osulib import enums
from plugins.osulib.constants import timestamp_pattern
from plugins.osulib.config import osu_config
def get_diff(old: dict, new: dict, value: str, statistics=False):
""" Get the difference between old and new osu! user data. """
if not new or not old:
return 0
if statistics:
new_value = float(new["statistics"][value]) if new["statistics"][value] else 0.0
old_value = float(old["statistics"][value]) if old["statistics"][value] else 0.0
else:
new_value = float(new[value]) if new[value] else 0.0
old_value = float(old[value]) if old[value] else 0.0
return new_value - old_value
def get_notify_channels(guild: discord.Guild, data_type: str):
""" Find the notifying channel or return the guild. """
if str(guild.id) not in osu_config.data["guild"]:
return None
if "".join([data_type, "-channels"]) not in osu_config.data["guild"][str(guild.id)]:
return None
return [guild.get_channel(int(s)) for s in osu_config.data["guild"][str(guild.id)]["".join([data_type,
"-channels"])]
if guild.get_channel(int(s))]
def get_timestamps_with_url(content: str):
""" Yield every map timestamp found in a string, and an edditor url.
:param content: The string to search
:returns: a tuple of the timestamp as a raw string and an editor url
"""
for match in timestamp_pattern.finditer(content):
editor_url = match.group(1).strip(" ").replace(" ", "%20").replace(")", r")")
yield match.group(0), f"<osu://edit/{editor_url}>"
def calculate_acc(mode: enums.GameMode, osu_score: dict, exclude_misses: bool = False):
""" Calculate the accuracy using formulas from https://osu.ppy.sh/wiki/Accuracy """
# Parse data from the score: 50s, 100s, 300s, misses, katu and geki
keys = ("count_300", "count_100", "count_50", "count_miss", "count_katu", "count_geki")
c300, c100, c50, miss, katu, geki = map(int, (osu_score["statistics"][key] for key in keys))
# Catch accuracy is done a tad bit differently, so we calculate that by itself
if mode is enums.GameMode.fruits:
total_numbers_of_fruits_caught = c50 + c100 + c300
total_numbers_of_fruits = miss + c50 + c100 + c300 + katu
return total_numbers_of_fruits_caught / total_numbers_of_fruits
total_points_of_hits, total_number_of_hits = 0, 0
if mode is enums.GameMode.osu:
total_points_of_hits = c50 * 50 + c100 * 100 + c300 * 300
total_number_of_hits = (0 if exclude_misses else miss) + c50 + c100 + c300
elif mode is enums.GameMode.taiko:
total_points_of_hits = (miss * 0 + c100 * 0.5 + c300 * 1) * 300
total_number_of_hits = miss + c100 + c300
elif mode is enums.GameMode.mania:
# In mania, katu is 200s and geki is MAX
total_points_of_hits = c50 * 50 + c100 * 100 + katu * 200 + (c300 + geki) * 300
total_number_of_hits = miss + c50 + c100 + katu + c300 + geki
return total_points_of_hits / (total_number_of_hits * 300)
async def init_guild_config(guild: discord.Guild):
""" Initializes the config when it's not already set. """
if str(guild.id) not in osu_config.data["guild"]:
osu_config.data["guild"][str(guild.id)] = {}
await osu_config.asyncsave()
| 2.390625 | 2 |
backend/api/migrations/0006_auto_20181113_0325.py | sperrys/YEF_DEBUG | 2 | 12767131 | <gh_stars>1-10
# Generated by Django 2.1.1 on 2018-11-13 03:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20181113_0259'),
]
operations = [
migrations.RenameModel(
old_name='JudgePoints',
new_name='JudgePoint',
),
migrations.RenameModel(
old_name='MemberPoints',
new_name='MemberPoint',
),
migrations.RemoveField(
model_name='round',
name='chair',
),
migrations.RemoveField(
model_name='round',
name='decision',
),
migrations.RemoveField(
model_name='round',
name='win',
),
migrations.AddField(
model_name='matchup',
name='decision',
field=models.CharField(choices=[('Split', 'Split'), ('Unaminous', 'Unaminous')], default='Split', max_length=20),
),
migrations.AddField(
model_name='matchup',
name='win',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.Team'),
),
]
| 1.757813 | 2 |
Question Set 1 - (Selection Statement)/Version 5/main.py | Randula98/Python-For-Beginners | 6 | 12767132 | <reponame>Randula98/Python-For-Beginners<filename>Question Set 1 - (Selection Statement)/Version 5/main.py
#get the user input for Position
position = input("Position : ")
while position != "M" and position != "m" and position != "S" and position != "s":
print("Invalid Input")
position = input("Position : ")
#get the user input for sales amount
sales = input("Sales amount : ")
sales = float(sales)
#get the basic salary by the user input
if position == "M" or position == "m":
basic = 50000
else:
basic = 75000
#check the sales amount for commission
if sales >= 30000:
commission = sales * 10 / 100
else:
commission = 0
#calculate the salary
salary = basic + commission
#display the commission and the salary
print("Commission : " + str(commission))
print("Salary : " + str(salary))
| 4.09375 | 4 |
MachineLearning(Advanced)/p6_graduation_project/process_dog.py | StudyExchange/Udacity | 0 | 12767133 | #! /usr/bin/env python
"""Run a YOLO_v2 style detection model on test images."""
import argparse
import colorsys
import imghdr
import os
import random
import numpy as np
from keras import backend as K
from keras.models import load_model
from PIL import Image, ImageDraw, ImageFont
from yad2k.models.keras_yolo import yolo_eval, yolo_head
import shutil
def _main(session, args_model_path, args_anchors_path, args_classes_path, args_test_path, args_output_path):
model_path = args_model_path
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
anchors_path = args_anchors_path
classes_path = args_classes_path
test_path = args_test_path
output_path = args_output_path
args_score_threshold = .3
args_iou_threshold = .5
if not os.path.exists(output_path):
print('Creating output path {}'.format(output_path))
os.mkdir(output_path)
# sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
sess = session
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
# Verify model, anchors, and classes are compatible
num_classes = len(class_names)
num_anchors = len(anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs,
input_image_shape,
score_threshold=args_score_threshold,
iou_threshold=args_iou_threshold)
for image_file in os.listdir(test_path):
# try:
# image_type = imghdr.what(os.path.join(test_path, image_file))
# if not image_type:
# continue
# except IsADirectoryError:
# continue
image = Image.open(os.path.join(test_path, image_file))
if is_fixed_size: # TODO: When resizing we can use minibatch input.
resized_image = image.resize(
tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
# Due to skip connection + max pooling in YOLO_v2, inputs must have
# width and height as multiples of 32.
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
resized_image = image.resize(new_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
font = ImageFont.truetype(
font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
max_score = 0
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# # My kingdom for a good redistributable image drawing library.
# for i in range(thickness):
# draw.rectangle(
# [left + i, top + i, right - i, bottom - i],
# outline=colors[c])
# draw.rectangle(
# [tuple(text_origin), tuple(text_origin + label_size)],
# fill=colors[c])
# draw.text(text_origin, label, fill=(0, 0, 0), font=font)
# del draw
if predicted_class == 'dog':
if score > max_score:
if max_score > 0:
print('-' * 10)
border = 10
max_score = score
crop_box = left - border, top - border, right + border, bottom + border
cropped_img = image.crop(crop_box)
cropped_img.save(os.path.join(output_path, image_file), quality=90)
else:
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
# image.save(os.path.join(output_path, image_file), quality=90)
def _main_input():
model_path = 'model_data/yolo.h5'
anchors_path = 'model_data/yolo_anchors.txt'
classes_path = 'model_data/pascal_classes.txt'
# model_path = args_model_path
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
# anchors_path = args_anchors_path
# classes_path = args_classes_path
# test_path = args_test_path
# output_path = args_output_path
intput_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input'
data_folders = ['data_train', 'data_val', 'data_test']
args_score_threshold = .3
args_iou_threshold = .5
count_max_dog = 0
count_no_dog = 0
count_no_object = 0
# if not os.path.exists(output_path):
# print('Creating output path {}'.format(output_path))
# os.mkdir(output_path)
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
# sess = session
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
# Verify model, anchors, and classes are compatible
num_classes = len(class_names)
num_anchors = len(anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs,
input_image_shape,
score_threshold=args_score_threshold,
iou_threshold=args_iou_threshold)
for data_folder_name in data_folders:
data_folder = os.path.join(intput_path, data_folder_name)
output_folder = os.path.join(intput_path, 'yolo_' + data_folder_name)
if not os.path.exists(output_folder):
print('Create folders: %s' % output_folder)
os.makedirs(output_folder)
else:
print('Folder exists: %s' % output_folder)
for class_folder_name in os.listdir(data_folder):
test_path = os.path.join(data_folder, class_folder_name)
output_path = os.path.join(output_folder, class_folder_name)
if not os.path.exists(output_path):
print('Create folders: %s' % output_path)
os.makedirs(output_path)
else:
print('Folder exists: %s' % output_path)
for image_file in os.listdir(test_path):
# try:
# image_type = imghdr.what(os.path.join(test_path, image_file))
# if not image_type:
# continue
# except IsADirectoryError:
# continue
image = Image.open(os.path.join(test_path, image_file))
if is_fixed_size: # TODO: When resizing we can use minibatch input.
resized_image = image.resize(
tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
# Due to skip connection + max pooling in YOLO_v2, inputs must have
# width and height as multiples of 32.
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
resized_image = image.resize(new_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
try:
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
except Exception as ex:
print('Err: %s' % image_file)
print(ex)
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
continue
# print('Found {} boxes for {}'.format(len(out_boxes), image_file))
font = ImageFont.truetype(
font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
max_score = 0
if len(out_classes) > 0:
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
# print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# # My kingdom for a good redistributable image drawing library.
# for i in range(thickness):
# draw.rectangle(
# [left + i, top + i, right - i, bottom - i],
# outline=colors[c])
# draw.rectangle(
# [tuple(text_origin), tuple(text_origin + label_size)],
# fill=colors[c])
# draw.text(text_origin, label, fill=(0, 0, 0), font=font)
# del draw
if predicted_class == 'dog':
if score > max_score:
if max_score > 0:
print('+' * 10)
count_max_dog += 1
border = 10
max_score = score
crop_box = left - border, top - border, right + border, bottom + border
cropped_img = image.crop(crop_box)
cropped_img.save(os.path.join(output_path, image_file), quality=90)
else:
count_no_dog += 1
print('-' * 10)
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
else:
count_no_object += 1
print('*' * 10)
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
print('%s %s %s' %(count_max_dog, count_no_dog, count_no_object))
# image.save(os.path.join(output_path, image_file), quality=90)
if __name__ == '__main__':
# sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
# 测试YOLO自带的图片
model_path = 'model_data/yolo.h5'
anchors_path = 'model_data/yolo_anchors.txt'
classes_path = 'model_data/pascal_classes.txt'
# test_path = 'images'
# output_path = 'images/out'
# _main(model_path, anchors_path, classes_path, test_path, output_path)
# 处理inputdata
_main_input()
# # 处理data_train
# test_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/data_train'
# output_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/yolo_data_train'
# for folder_name in os.listdir(test_path):
# in_path = os.path.join(test_path, folder_name)
# out_path = os.path.join(output_path, folder_name)
# if not os.path.exists(out_path):
# print('Create folder: %s' % out_path)
# os.makedirs(out_path)
# else:
# print('Folder exists: %s' % out_path)
# # _main(sess, model_path, anchors_path, classes_path, in_path, out_path)
# # 处理data_val
# test_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/data_val'
# output_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/yolo_data_val'
# for folder_name in os.listdir(test_path):
# in_path = os.path.join(test_path, folder_name)
# out_path = os.path.join(output_path, folder_name)
# if not os.path.exists(out_path):
# print('Create folder: %s' % out_path)
# os.makedirs(out_path)
# else:
# print('Folder exists: %s' % out_path)
# # _main(sess, model_path, anchors_path, classes_path, in_path, out_path)
# # 处理data_test
# test_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/data_test'
# output_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/yolo_data_test'
# for folder_name in os.listdir(test_path):
# in_path = os.path.join(test_path, folder_name)
# out_path = os.path.join(output_path, folder_name)
# if not os.path.exists(out_path):
# print('Create folder: %s' % out_path)
# os.makedirs(out_path)
# else:
# print('Folder exists: %s' % out_path)
# # _main(sess, model_path, anchors_path, classes_path, in_path, out_path)
# sess.close()
| 2.5 | 2 |
venv/Lib/site-packages/lunr/exceptions.py | star10919/drf | 2 | 12767134 | <gh_stars>1-10
from __future__ import unicode_literals
class BaseLunrException(Exception):
pass
class QueryParseError(BaseLunrException):
pass
| 1.570313 | 2 |
scripts/calcular_pagamentos_descontos_com variacao_das_taxas.py | GeisonIsrael/scripts_basicos_python | 0 | 12767135 | # Calcular o preço de um produto com % de desconto para pagamento a vista e % de juros para pagamento parcelado.
print('x=' * 80)
# AQUI FICA O VALOR DO PRODUTO.
preco = float(input('Qual o valor do produto ?R$ '))
# VALOR DO DESCONTO OU ACREŚIMO.
porcentagem = float(input('Desconto ou acréscimo para este produto em (%) ? '))
# QUANTIDADE DE PARCELAS.
prestacoes = float(input('Número de parcelas: '))
# CALCULO PARA COMPRA A VISTA.
avista = preco - (preco * porcentagem / 100)
# CALCULO PARA COMPRA PARCELADA.
parcelado = preco + (preco * porcentagem / 100)
# VALOR POR PARCELAS.
parcelas = parcelado / prestacoes
# VALOR A VISTA.
print('O valor do produto é R$ {:.2f}, se o pagamento for a vista tera um desconto de {}% ficando o valor em R$ {:.2f}.'.format(preco, porcentagem, avista))
# VALOR PARCELADO.
print('Se o pagamento for parcelado tera um acréscimo {}% ficando o valor em R$ {:.2f}.'.format(porcentagem, parcelado))
# PARCELAS.
print('Com {:.0f} parcelas de {:.2f}.'.format(prestacoes, parcelas))
print('x=' * 80)
| 3.890625 | 4 |
test/unit/common/test_db.py | tsg-/swift-ec | 2 | 12767136 | <gh_stars>1-10
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.db"""
import os
import unittest
from tempfile import mkdtemp
from shutil import rmtree, copy
from uuid import uuid4
import simplejson
import sqlite3
from mock import patch, MagicMock
from eventlet.timeout import Timeout
import swift.common.db
from swift.common.db import chexor, dict_factory, get_db_connection, \
DatabaseBroker, DatabaseConnectionError, DatabaseAlreadyExists, \
GreenDBConnection
from swift.common.utils import normalize_timestamp, mkdirs
from swift.common.exceptions import LockTimeout
class TestDatabaseConnectionError(unittest.TestCase):
def test_str(self):
err = \
DatabaseConnectionError(':memory:', 'No valid database connection')
self.assert_(':memory:' in str(err))
self.assert_('No valid database connection' in str(err))
err = DatabaseConnectionError(':memory:',
'No valid database connection',
timeout=1357)
self.assert_(':memory:' in str(err))
self.assert_('No valid database connection' in str(err))
self.assert_('1357' in str(err))
class TestDictFactory(unittest.TestCase):
def test_normal_case(self):
conn = sqlite3.connect(':memory:')
conn.execute('CREATE TABLE test (one TEXT, two INTEGER)')
conn.execute('INSERT INTO test (one, two) VALUES ("abc", 123)')
conn.execute('INSERT INTO test (one, two) VALUES ("def", 456)')
conn.commit()
curs = conn.execute('SELECT one, two FROM test')
self.assertEquals(dict_factory(curs, curs.next()),
{'one': 'abc', 'two': 123})
self.assertEquals(dict_factory(curs, curs.next()),
{'one': 'def', 'two': 456})
class TestChexor(unittest.TestCase):
def test_normal_case(self):
self.assertEquals(
chexor('d41d8cd98f00b204e9800998ecf8427e',
'new name', normalize_timestamp(1)),
'4f2ea31ac14d4273fe32ba08062b21de')
def test_invalid_old_hash(self):
self.assertRaises(ValueError, chexor, 'oldhash', 'name',
normalize_timestamp(1))
def test_no_name(self):
self.assertRaises(Exception, chexor,
'd41d8cd98f00b204e9800998ecf8427e', None,
normalize_timestamp(1))
class TestGreenDBConnection(unittest.TestCase):
def test_execute_when_locked(self):
# This test is dependent on the code under test calling execute and
# commit as sqlite3.Cursor.execute in a subclass.
class InterceptCursor(sqlite3.Cursor):
pass
db_error = sqlite3.OperationalError('database is locked')
InterceptCursor.execute = MagicMock(side_effect=db_error)
with patch('sqlite3.Cursor', new=InterceptCursor):
conn = sqlite3.connect(':memory:', check_same_thread=False,
factory=GreenDBConnection, timeout=0.1)
self.assertRaises(Timeout, conn.execute, 'select 1')
self.assertTrue(InterceptCursor.execute.called)
self.assertEqual(InterceptCursor.execute.call_args_list,
list((InterceptCursor.execute.call_args,) *
InterceptCursor.execute.call_count))
def text_commit_when_locked(self):
# This test is dependent on the code under test calling commit and
# commit as sqlite3.Connection.commit in a subclass.
class InterceptConnection(sqlite3.Connection):
pass
db_error = sqlite3.OperationalError('database is locked')
InterceptConnection.commit = MagicMock(side_effect=db_error)
with patch('sqlite3.Connection', new=InterceptConnection):
conn = sqlite3.connect(':memory:', check_same_thread=False,
factory=GreenDBConnection, timeout=0.1)
self.assertRaises(Timeout, conn.commit)
self.assertTrue(InterceptConnection.commit.called)
self.assertEqual(InterceptConnection.commit.call_args_list,
list((InterceptConnection.commit.call_args,) *
InterceptConnection.commit.call_count))
class TestGetDBConnection(unittest.TestCase):
def test_normal_case(self):
conn = get_db_connection(':memory:')
self.assert_(hasattr(conn, 'execute'))
def test_invalid_path(self):
self.assertRaises(DatabaseConnectionError, get_db_connection,
'invalid database path / name')
def test_locked_db(self):
# This test is dependent on the code under test calling execute and
# commit as sqlite3.Cursor.execute in a subclass.
class InterceptCursor(sqlite3.Cursor):
pass
db_error = sqlite3.OperationalError('database is locked')
mock_db_cmd = MagicMock(side_effect=db_error)
InterceptCursor.execute = mock_db_cmd
with patch('sqlite3.Cursor', new=InterceptCursor):
self.assertRaises(Timeout, get_db_connection, ':memory:',
timeout=0.1)
self.assertTrue(mock_db_cmd.called)
self.assertEqual(mock_db_cmd.call_args_list,
list((mock_db_cmd.call_args,) *
mock_db_cmd.call_count))
class TestDatabaseBroker(unittest.TestCase):
def setUp(self):
self.testdir = mkdtemp()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_DB_PREALLOCATION_setting(self):
u = uuid4().hex
b = DatabaseBroker(u)
swift.common.db.DB_PREALLOCATION = False
b._preallocate()
swift.common.db.DB_PREALLOCATION = True
self.assertRaises(OSError, b._preallocate)
def test_memory_db_init(self):
broker = DatabaseBroker(':memory:')
self.assertEqual(broker.db_file, ':memory:')
self.assertRaises(AttributeError, broker.initialize,
normalize_timestamp('0'))
def test_disk_db_init(self):
db_file = os.path.join(self.testdir, '1.db')
broker = DatabaseBroker(db_file)
self.assertEqual(broker.db_file, db_file)
self.assert_(broker.conn is None)
def test_disk_preallocate(self):
test_size = [-1]
def fallocate_stub(fd, size):
test_size[0] = size
with patch('swift.common.db.fallocate', fallocate_stub):
db_file = os.path.join(self.testdir, 'pre.db')
# Write 1 byte and hope that the fs will allocate less than 1 MB.
f = open(db_file, "w")
f.write('@')
f.close()
b = DatabaseBroker(db_file)
b._preallocate()
# We only wrote 1 byte, so we should end with the 1st step or 1 MB.
self.assertEquals(test_size[0], 1024 * 1024)
def test_initialize(self):
self.assertRaises(AttributeError,
DatabaseBroker(':memory:').initialize,
normalize_timestamp('1'))
stub_dict = {}
def stub(*args, **kwargs):
for key in stub_dict.keys():
del stub_dict[key]
stub_dict['args'] = args
for key, value in kwargs.items():
stub_dict[key] = value
broker = DatabaseBroker(':memory:')
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assert_(hasattr(stub_dict['args'][0], 'execute'))
self.assertEquals(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assert_(hasattr(stub_dict['args'][0], 'execute'))
self.assertEquals(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker._initialize = stub
self.assertRaises(DatabaseAlreadyExists,
broker.initialize, normalize_timestamp('1'))
def test_delete_db(self):
def init_stub(conn, put_timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)',
(str(uuid4),))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
stub_called = [False]
def delete_stub(*a, **kw):
stub_called[0] = True
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker._initialize = init_stub
# Initializes a good broker for us
broker.initialize(normalize_timestamp('1'))
self.assert_(broker.conn is not None)
broker._delete_db = delete_stub
stub_called[0] = False
broker.delete_db('2')
self.assert_(stub_called[0])
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker.db_type = 'test'
broker._initialize = init_stub
broker.initialize(normalize_timestamp('1'))
broker._delete_db = delete_stub
stub_called[0] = False
broker.delete_db('2')
self.assert_(stub_called[0])
# ensure that metadata was cleared
m2 = broker.metadata
self.assert_(not any(v[0] for v in m2.itervalues()))
self.assert_(all(v[1] == normalize_timestamp('2')
for v in m2.itervalues()))
def test_get(self):
broker = DatabaseBroker(':memory:')
got_exc = False
try:
with broker.get() as conn:
conn.execute('SELECT 1')
except Exception:
got_exc = True
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
got_exc = False
try:
with broker.get() as conn:
conn.execute('SELECT 1')
except Exception:
got_exc = True
self.assert_(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('CREATE TABLE test (one TEXT)')
try:
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
raise Exception('test')
conn.commit()
except Exception:
pass
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEquals(
[r[0] for r in conn.execute('SELECT * FROM test')], [])
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEquals(
[r[0] for r in conn.execute('SELECT * FROM test')], ['1'])
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
mkdirs(dbpath)
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'tests', 'db')
with patch('swift.common.db.renamer', lambda a, b: b):
# Test malformed database
copy(os.path.join(os.path.dirname(__file__),
'malformed_example.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'test'
exc = None
try:
with broker.get() as conn:
conn.execute('SELECT * FROM test')
except Exception as err:
exc = err
self.assertEquals(
str(exc),
'Quarantined %s to %s due to malformed database' %
(dbpath, qpath))
# Test corrupted database
copy(os.path.join(os.path.dirname(__file__),
'corrupted_example.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'test'
exc = None
try:
with broker.get() as conn:
conn.execute('SELECT * FROM test')
except Exception as err:
exc = err
self.assertEquals(
str(exc),
'Quarantined %s to %s due to corrupted database' %
(dbpath, qpath))
def test_lock(self):
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'), timeout=.1)
got_exc = False
try:
with broker.lock():
pass
except Exception:
got_exc = True
self.assert_(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.lock():
pass
with broker.lock():
pass
broker2 = DatabaseBroker(os.path.join(self.testdir, '1.db'),
timeout=.1)
broker2._initialize = stub
with broker.lock():
got_exc = False
try:
with broker2.lock():
pass
except LockTimeout:
got_exc = True
self.assert_(got_exc)
try:
with broker.lock():
raise Exception('test')
except Exception:
pass
with broker.lock():
pass
def test_newid(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEquals(len(uuids), 1)
self.assertNotEquals(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute(
'SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEquals(len(points), 1)
self.assertEquals(points[0][0], -1)
self.assertEquals(points[0][1], uuid2)
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
uuid3 = str(uuid4())
broker.newid(uuid3)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEquals(len(uuids), 1)
self.assertNotEquals(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute(
'SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid3,))]
self.assertEquals(len(points), 1)
self.assertEquals(points[0][1], uuid3)
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEquals(len(uuids), 1)
self.assertNotEquals(uuids[0], uuid1)
points = [(r[0], r[1]) for r in conn.execute(
'SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEquals(len(points), 1)
self.assertEquals(points[0][1], uuid2)
def test_get_items_since(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.execute('INSERT INTO test (one) VALUES ("3")')
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
self.assertEquals(broker.get_items_since(-1, 10),
[{'one': '1'}, {'one': '2'}, {'one': '3'}])
self.assertEquals(broker.get_items_since(-1, 2),
[{'one': '1'}, {'one': '2'}])
self.assertEquals(broker.get_items_since(1, 2),
[{'one': '2'}, {'one': '3'}])
self.assertEquals(broker.get_items_since(3, 2), [])
self.assertEquals(broker.get_items_since(999, 2), [])
def test_get_sync(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
pass
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
self.assertEquals(broker.get_sync(uuid2), -1)
broker.newid(uuid2)
self.assertEquals(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
self.assertEquals(broker.get_sync(uuid3), -1)
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.commit()
broker.newid(uuid3)
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
self.assertEquals(broker.get_sync(uuid2, incoming=False), -1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}],
incoming=False)
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
self.assertEquals(broker.get_sync(uuid2, incoming=False), 1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}],
incoming=False)
self.assertEquals(broker.get_sync(uuid2, incoming=False), 1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), 2)
def test_merge_syncs(self):
broker = DatabaseBroker(':memory:')
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}])
self.assertEquals(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}])
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
self.assertEquals(broker.get_sync(uuid2, incoming=False), -1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 3, 'remote_id': uuid2},
{'sync_point': 4, 'remote_id': uuid3}],
incoming=False)
self.assertEquals(broker.get_sync(uuid2, incoming=False), 3)
self.assertEquals(broker.get_sync(uuid3, incoming=False), 4)
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
self.assertEquals(broker.get_sync(uuid2), 5)
def test_get_replication_info(self):
self.get_replication_info_tester(metadata=False)
def test_get_replication_info_with_metadata(self):
self.get_replication_info_tester(metadata=True)
def get_replication_info_tester(self, metadata=False):
broker = DatabaseBroker(':memory:', account='a')
broker.db_type = 'test'
broker.db_contains_type = 'test'
broker_creation = normalize_timestamp(1)
broker_uuid = str(uuid4())
broker_metadata = metadata and simplejson.dumps(
{'Test': ('Value', normalize_timestamp(1))}) or ''
def _initialize(conn, put_timestamp):
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript('''
CREATE TABLE test (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT UNIQUE,
created_at TEXT
);
CREATE TRIGGER test_insert AFTER INSERT ON test
BEGIN
UPDATE test_stat
SET test_count = test_count + 1,
hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER test_update BEFORE UPDATE ON test
BEGIN
SELECT RAISE(FAIL,
'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER test_delete AFTER DELETE ON test
BEGIN
UPDATE test_stat
SET test_count = test_count - 1,
hash = chexor(hash, old.name, old.created_at);
END;
CREATE TABLE test_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
test_count INTEGER,
hash TEXT default '00000000000000000000000000000000',
id TEXT
%s
);
INSERT INTO test_stat (test_count) VALUES (0);
''' % (metadata and ", metadata TEXT DEFAULT ''" or ""))
conn.execute('''
UPDATE test_stat
SET account = ?, created_at = ?, id = ?, put_timestamp = ?
''', (broker.account, broker_creation, broker_uuid, put_timestamp))
if metadata:
conn.execute('UPDATE test_stat SET metadata = ?',
(broker_metadata,))
conn.commit()
broker._initialize = _initialize
put_timestamp = normalize_timestamp(2)
broker.initialize(put_timestamp)
info = broker.get_replication_info()
self.assertEquals(info, {
'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'max_row': -1, 'id': broker_uuid,
'metadata': broker_metadata})
insert_timestamp = normalize_timestamp(3)
with broker.get() as conn:
conn.execute('''
INSERT INTO test (name, created_at) VALUES ('test', ?)
''', (insert_timestamp,))
conn.commit()
info = broker.get_replication_info()
self.assertEquals(info, {
'count': 1,
'hash': 'bdc4c93f574b0d8c2911a27ce9dd38ba',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'max_row': 1, 'id': broker_uuid,
'metadata': broker_metadata})
with broker.get() as conn:
conn.execute('DELETE FROM test')
conn.commit()
info = broker.get_replication_info()
self.assertEquals(info, {
'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'max_row': 1, 'id': broker_uuid,
'metadata': broker_metadata})
return broker
def test_metadata(self):
def reclaim(broker, timestamp):
with broker.get() as conn:
broker._reclaim(conn, timestamp)
conn.commit()
# Initializes a good broker for us
broker = self.get_replication_info_tester(metadata=True)
# Add our first item
first_timestamp = normalize_timestamp(1)
first_value = '1'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
# Add our second item
second_timestamp = normalize_timestamp(2)
second_value = '2'
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Update our first item
first_timestamp = normalize_timestamp(3)
first_value = '1b'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Delete our second item (by setting to empty string)
second_timestamp = normalize_timestamp(4)
second_value = ''
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point before second item was deleted
reclaim(broker, normalize_timestamp(3))
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point second item was deleted
reclaim(broker, normalize_timestamp(4))
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim after point second item was deleted
reclaim(broker, normalize_timestamp(5))
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' not in broker.metadata)
if __name__ == '__main__':
unittest.main()
| 1.960938 | 2 |
web-api/python/ws.py | OpenBEL/openbel-framework-examples | 1 | 12767137 | try:
from suds.client import Client
from suds.wsse import *
except ImportError as ie:
print
print "You're missing suds, the lightweight SOAP python client."
print "(https://fedorahosted.org/suds/)"
print
raise ie
import logging
class WS:
'''
Creates the WS connection from the URL, username, and password.
'''
def __init__(self, wsdl_url, username = None, password = None):
self.client = Client(wsdl_url)
cache = self.client.options.cache
cache.setduration(seconds = 1)
if username and password:
token = UsernameToken(username, password)
token.setnonce()
token.setcreated()
security = Security()
security.tokens.append(token)
self.client.set_options(wsse = security)
self.service = self.client.service
def __str__(self):
return str(self.client)
def create(self, obj):
return self.client.factory.create(obj)
def usage():
print 'Usage:', me, '<wsdl_url> <username> <password>'
print "Try '" + me, " --help' for more information."
def help():
print 'Usage:', me, '<wsdl_url> <username> <password>'
print 'Estalishes a connection to web services.'
print 'Example:', me, 'http://host:8080/GTPWebServices/webapi/webapi.wsdl [myUserName] [myPassword]'
print
print 'Miscellaneous:'
print ' -h, --help\t\tdisplay this help and exit'
print
def exit_success():
sys.exit(0)
def exit_failure():
sys.exit(1)
def ws_print(items):
for i in range(0, len(items)):
print '#%d' % (i + 1)
for entry in items[i]:
name = str(entry[0]).strip('\n')
value = str(entry[1]).strip('\n')
print '\t%s: %s' % (name, value)
print
def start():
global me, ws
me = sys.argv[0]
for arg in sys.argv:
if arg == '--help' or arg == '-h':
help()
exit_failure()
if len(sys.argv) == 4:
url, un, pw = sys.argv[1:4]
ret = WS(url, un, pw)
logger = logging.getLogger('suds.client')
logger.setLevel(logging.CRITICAL)
return ret
if len(sys.argv) == 2:
url = sys.argv[1]
ret = WS(url)
logger = logging.getLogger('suds.client')
logger.setLevel(logging.CRITICAL)
return ret
me = sys.argv[0]
usage()
exit_failure()
| 2.75 | 3 |
scripts/quest/q23600e.py | G00dBye/YYMS | 54 | 12767138 | <reponame>G00dBye/YYMS
# Created by MechAviv
# Quest ID :: 23600
# Not coded yet
OBJECT_6 = sm.getIntroNpcObjectID(2159377)
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.sendDelay(900)
sm.moveCamera(False, 100, -307, -41)
sm.sendDelay(2604)
sm.setSpeakerID(2159377)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("Good, very good! I am very satisfied with these results. Just a few more fine adjustments and...")
sm.startQuest(23724)
sm.completeQuest(23600)
sm.changeBGM("Bgm30.img/fromUnderToUpper", 0, 0)
sm.showEffect("Effect/Direction12.img/effect/tuto/BalloonMsg1/0", 1200, 0, -120, 0, OBJECT_6, False, 0)
sm.moveNpcByObjectId(OBJECT_6, True, 1, 100)
sm.sendDelay(90)
sm.setSpeakerID(2159377)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("An intruder?! It could be Orchid. Turn on the monitor!")
sm.startQuest(23725)
sm.sendDelay(2100)
sm.completeQuest(23725)
sm.sendDelay(1200)
sm.setSpeakerID(2159377)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("Is it the Resistance? I suppose that would be better than Orchid, but... this is the worst possible time!")
sm.setSpeakerID(2159377)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("Wait, wait, wait. Maybe this will work. One more test, yes... they will be perfect... Hahaha... MWAHAHAHA!")
sm.warp(931050940, 0)
| 1.742188 | 2 |
src/api/utils/initial_data.py | andela/andela-societies-backend | 1 | 12767139 | <filename>src/api/utils/initial_data.py
"""Sample Data for Initial Run.
This contains the sample initial data required for the test run of the system.
"""
import datetime
import os
import base64
import requests
from jose import ExpiredSignatureError, JWTError
from api.models import (
Center, Cohort, Society, LoggedActivity, Activity, Role, ActivityType, User
)
from api.services.auth import verify_token
def centre_societies_roles_data_dev(production=False):
"""Generate center societies and role data."""
# test centers
nairobi = Center(name='nairobi')
kampala = Center(name='kampala')
lagos = Center(name='lagos')
# societies
phoenix = Society(name="phoenix")
istelle = Society(name="istelle")
sparks = Society(name="sparks")
invictus = Society(name="invictus")
# roles available
roles = (
Role(uuid="-KXGy1EB1oimjQgFim6F", name="success"),
Role(uuid="-KXGy1EB1oimjQgFim6L", name="finance"),
Role(uuid="-KXGy1EB1oimjQgFim6C", name="fellow"),
Role(uuid="-KkLwgbeJUO0dQKsEk1i", name="success ops"),
Role(uuid="-KiihfZoseQeqC6bWTau", name="andelan"),
Role(name="society president"),
Role(name="society vice president"),
Role(name="society secretary")
)
return (
roles, nairobi, kampala, lagos, phoenix, istelle, sparks, invictus
) if not production else (
roles, phoenix, istelle, sparks, invictus
)
# setup dev user info to access Andela API
def get_andela_api_cohort_location_data():
authorization_token = os.environ.get('DEV_TOKEN')
url = os.environ.get('ANDELA_API_URL')
public_key_token = os.environ.get('PUBLIC_KEY')
cohorts = []
centers = []
if public_key_token and authorization_token and url:
try:
public_key = base64.b64decode(public_key_token).decode("utf-8")
# decode token
payload = verify_token(authorization_token,
public_key,
"andela.com",
"accounts.andela.com")
print('\n\n Getting Data from API : ',
payload.get('UserInfo').get('first_name'))
Bearer = 'Bearer '
headers = {'Authorization': Bearer + authorization_token}
cohort_data_response = requests.get(url + 'cohorts',
headers=headers).json()
location_data_response = requests.get(url + 'locations',
headers=headers).json()
# test centers
locations = {}
for location in location_data_response.get('values'):
name = location.get("name")
locations[name] = Center(name=name.lower(),
uuid=location.get('id'))
centers = list(locations.values())
# cohorts
cohorts = []
for cohort_information in cohort_data_response.get('values'):
name = cohort_information.get('name')
center = locations.get(
cohort_information.get('location').get('name'))
cohort = Cohort(name=name.lower(),
uuid=cohort_information.get('id'),
center_id=center.uuid)
cohorts.append(cohort)
return tuple(cohorts), tuple(centers)
except ExpiredSignatureError:
print("The authorization token supplied is expired.")
except JWTError:
print("Something went wrong while validating your token.")
except Exception:
print("Your initial dev-data, won't work...: I DON'T KNOW WHY.")
finally:
return tuple(cohorts), tuple(centers)
return tuple(), tuple()
# activity types
def activity_types_data():
interview = ActivityType(name="Bootcamp Interviews",
description="Interviewing candidate for a fellow"
" recruiting event",
value=20,
supports_multiple_participants=True)
open_saturdays = ActivityType(name="Open Saturdays Guides",
description="Guide applicants with the"
" recruitment team during open Saturdays",
value=50)
tech_event = ActivityType(name="Tech Event",
description="Organize a tech event",
value=2500)
open_source = ActivityType(name="Open Source Project",
description="Starting an open source project which"
" has at least 40 stars from non-Andelans",
value=2500)
hackathon = ActivityType(name="Hackathon",
description="Participating in a Hackathon",
value=100)
blog = ActivityType(name="Blog",
description="Write a blog that is published on Andela's"
" website",
value=1000)
app = ActivityType(name="App",
description="Build an app that is marketed on Andela's"
" website",
value=10000)
mentor = ActivityType(name="Mentoring",
description="Mentor a prospect for Andela 21",
value=250)
marketing = ActivityType(name="Marketing",
description="Participating in an Andela marketing"
" event with partners",
value=2000)
press = ActivityType(name="Press Interview",
description="Participating in a press interview for"
" Andela marketing",
value=3000)
outside_mentoring = ActivityType(name="External Mentoring",
description="Mentoring students outside of"
" Andela e.g. via SheLovesCode",
value=250)
return (
interview, open_saturdays, tech_event, open_source, hackathon,
blog, app, mentor, marketing, press, outside_mentoring)
def test_dev_user_seed_data(args):
(nairobi,
phoenix,
roles) = args
# cohorts
cohort_14_ke = Cohort(name='Cohort 14 Test', center=nairobi,
society=phoenix)
# users
# member
member = User(
uuid="-KdQsMtixI2U0y_-yJEH",
name="Test User",
photo="https://lh6.googleusercontent.com/-1DhBLOJentg/AAAAAAAAA"
"AI/AAAAAAAAABc/ImeP_cAI/photo.jpg?sz=50",
email="<EMAIL>",
center=nairobi,
cohort=cohort_14_ke,
society=phoenix
)
member.roles.extend([roles[2], roles[4]])
# president
president = User(
uuid="-KdQsMtixG4U0y_-yJEH",
name="<NAME>",
photo="https://lh6.googleusercontent.com/-1DhBLOJentg/AAAAAAAAA"
"AI/AAAAAAnAABc/ImeP_cAI/photo.jpg?sz=50",
email="<EMAIL>",
center=nairobi,
cohort=cohort_14_ke,
society=phoenix
)
president.roles.extend([roles[2], roles[4], roles[5]])
# success ops
success_ops = User(
uuid="-KdQsMtixG4U0y_-yJEF",
name="<NAME> ops",
photo="https://lh6.googleusercontent.com/-1DhBLOJentg/AAAAAAAAA"
"AI/AAAAAAnAABc/ImeP_cAI/photo.jpg?sz=50",
email="<EMAIL>",
center=nairobi
)
success_ops.roles.extend([roles[3], roles[4]])
return (member, president, success_ops)
def test_dev_activities_seed_data(args):
(president, member, success_ops,
hackathon, interview, open_saturdays,
phoenix, sparks, invictus
) = args
# test activities
python_hackathon = Activity(
name="Hacktober Fest", activity_type=hackathon,
activity_date=datetime.date.today() + datetime.timedelta(days=7),
added_by=president
)
interview_2017 = Activity(
name="2017-feb-bootcamp-17", activity_type=interview,
activity_date=datetime.date.today() + datetime.timedelta(days=14),
added_by=president)
open_saturdays_2018 = Activity(
name="2018-feb-meetup", activity_type=open_saturdays,
activity_date=datetime.date.today() + datetime.timedelta(days=21),
added_by=president
)
member.activities.extend([python_hackathon, interview_2017,
open_saturdays_2018])
# Logged Activities
hackathon_points = LoggedActivity(
value=hackathon.value,
activity=python_hackathon,
user=member, society=phoenix,
activity_type=hackathon,
status='approved', approver_id=success_ops.uuid,
reviewer_id=president.uuid,
activity_date=python_hackathon.activity_date
)
phoenix._total_points = hackathon_points.value
interview_points = LoggedActivity(
value=interview.value * 5,
activity=interview_2017,
user=member, society=sparks,
activity_type=interview,
status='rejected', approver_id=success_ops.uuid,
reviewer_id=president.uuid,
activity_date=interview_2017.activity_date
)
open_saturday_points = LoggedActivity(
value=open_saturdays.value,
activity=open_saturdays_2018,
user=member, society=invictus,
activity_type=open_saturdays,
activity_date=open_saturdays_2018.activity_date
)
return (hackathon_points, interview_points, open_saturday_points)
def generete_initial_data_run_time_env():
"""Sequential generate data when called.
Closure: provides the required objects for other functions.
"""
api_cohorts = api_centers = ()
environment = os.getenv("APP_SETTINGS")
if environment and not environment.lower() == 'testing':
# generate andela api data: cohorts, centers
api_cohorts, api_centers = get_andela_api_cohort_location_data()
# generate activity types
(interview, open_saturdays, tech_event, open_source, hackathon,
blog, app, mentor, marketing, press,
outside_mentoring) = activity_types_data()
activity_types = (interview, open_saturdays, tech_event, open_source,
hackathon, blog, app, mentor, marketing, press,
outside_mentoring)
roles = centers = users = logged_activities = ()
if environment and environment.lower() == 'production':
(roles, phoenix, istelle,
sparks, invictus) = centre_societies_roles_data_dev(True)
else:
# generete dev data cohort, societies, roles
(roles, nairobi, kampala, lagos, phoenix,
istelle, sparks, invictus) = centre_societies_roles_data_dev()
centers = (nairobi, kampala, lagos)
# generate user data
args = (
nairobi,
phoenix,
roles
)
(member, president, success_ops) = test_dev_user_seed_data(args)
users = (member, president, success_ops)
# dev logged activities
args = (
president, member, success_ops,
hackathon, interview, open_saturdays,
phoenix, sparks, invictus
)
logged_activities = test_dev_activities_seed_data(args)
societies = (phoenix, istelle, sparks, invictus)
production_data = api_centers + api_cohorts + roles + societies + \
activity_types
dev_data = production_data + centers + users + logged_activities
return dict(
production_data=production_data,
dev_data=dev_data,
activity_types=activity_types,
societies=societies
)
| 2.4375 | 2 |
app/api/ambassador_routes.py | RyanGC93/Worldly | 6 | 12767140 | <filename>app/api/ambassador_routes.py
from flask import Blueprint
from flask_login import current_user, login_required
import json
from app.models import db, Event, Location, Ambassador, User, Review, PhotoGallery, EventCalendar, BookingCalendar
from sqlalchemy import exc
ambassador_routes = Blueprint('ambassadors', __name__)
# Gets All Events Owned by Ambassador
@ambassador_routes.route('/')
@login_required
def ambassadors():
if current_user.is_authenticated:
try:
ambassador = db.session.query(Ambassador).filter(
Ambassador.user_id == current_user.id).first()
if(ambassador):
events = db.session.query(Event.id).filter(Event.ambassador_id == current_user.id).all()
event_ids = [event[0]for event in events]
event_keys = ['event_id', 'title', 'description',
'region', 'country', 'firstname', 'date', 'time', 'location_longitude', 'location_latitude', 'booking_id']
event_values = db.session.query(Event.id, Event.title, Event.description, Location.region, Location.country, User.first_name, EventCalendar.date, EventCalendar.time, Location.longitude, Location.latitude, BookingCalendar.id).filter(Event.id.in_(event_ids), Location.event_id == Event.id, Ambassador.id == Event.ambassador_id, Ambassador.user_id == User.id).all()
ambassador_events_info = {"ambassador_events_info": [
dict(zip(event_keys, event)) for event in event_values]}
photo_gallery_keys = ['photo_id', 'event_id',
'photo_description', 'url']
photo_gallery_values = db.session.query(PhotoGallery.id, PhotoGallery.event_id, PhotoGallery.description, PhotoGallery.url).filter(
PhotoGallery.event_id.in_(event_ids)).all()
photo_gallery = {"photo_gallery": [
dict(zip(photo_gallery_keys, photo)) for photo in photo_gallery_values]}
event_calendar_keys = ['event_calendar_id', 'event_id', 'date', 'time']
event_calendar_values = db.session.query(EventCalendar.id, EventCalendar.event_id, EventCalendar.date, EventCalendar.time).filter(
EventCalendar.event_id.in_(event_ids)).all()
events_calendar = {"event_calendar": [dict(
zip(event_calendar_keys, event_time)) for event_time in event_calendar_values]}
review_keys = ['review_id', 'event_id',
'user_name', 'rating', 'comment', 'created_at']
review_values = db.session.query(Review.id, Review.event_id, User.user_name, Review.rating, Review.comment, Review.date_created).filter(
Review.event_id.in_(event_ids), Review.user_id == User.id).all()
reviews = {"reviews": [dict(zip(review_keys, review))
for review in review_values]}
events = {'events': [ambassador_events_info,
photo_gallery, events_calendar, reviews]}
return json.dumps(events, sort_keys=True, default=str)
except exc.SQLAlchemyError as e:
print(type(e))
return {'errors': ['Cannot Edit Repos Questions Please Try again']}, 500
# Get Ambassador Additional Info
@ambassador_routes.route('/<string:ambassador_name>')
@login_required
def ambassador(ambassador_name):
try:
sensitive_info = db.session.query(User.phone_number, User.email).filter(
User.ambassador_name == ambassador_name).first()
keys = ['phone_number', 'email']
ambassador_dict = dict(zip(keys, sensitive_info))
return(ambassador_dict)
except exc.SQLAlchemyError as e:
print(type(e))
return {'errors': ['Cannot Get Ambassador Additional Info, Please Try again']}, 500
| 2.421875 | 2 |
dqn/dqn_agent.py | ViacheslavBobrov/ReinforcementLearning | 0 | 12767141 | import random
from collections import deque
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
class DQNAgent:
def __init__(self, state_size,
action_size,
memory_size,
hidden_layers_number,
hidden_layers_size,
learning_rate=0.001,
gamma=0.95,
sample_batch_size=32,
exploration_rate=1.0,
exploration_min=0.01,
exploration_decay=0.995):
assert hidden_layers_number > 0
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=memory_size)
self.learning_rate = learning_rate
self.gamma = gamma
self.sample_batch_size = sample_batch_size
self.exploration_rate = exploration_rate
self.exploration_min = exploration_min
self.exploration_decay = exploration_decay
self.model = self._build_model(hidden_layers_number, hidden_layers_size)
self.target_model = self._build_model(hidden_layers_number, hidden_layers_size)
def _build_model(self, hidden_layers_number, hidden_layers_size):
model = Sequential()
model.add(Dense(hidden_layers_size, activation='relu', input_dim=self.state_size))
for i in range(hidden_layers_number - 1):
model.add(Dense(hidden_layers_size, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(optimizer=Adam(lr=self.learning_rate), loss='mse')
return model
def remember(self, state, action, reward, done, next_state):
self.memory.append((state, action, reward, done, next_state))
def sync_weights(self):
self.target_model.set_weights(self.model.get_weights())
def train(self):
""" Double DQN """
if len(self.memory) < self.sample_batch_size:
return
batch = random.sample(self.memory, self.sample_batch_size)
states, actions, rewards, dones, next_states = unpack_batch(batch)
next_state_values_model_indexes = np.argmax(self.target_model.predict(next_states), axis=1)
next_state_values_target_model = self.target_model.predict(next_states)
next_state_values = np.zeros(len(states))
for i, index in enumerate(next_state_values_model_indexes):
next_state_values[i] = next_state_values_target_model[i, index]
# setting values to 0 for episodes that are done. Only rewards should be taken into calculation in this case
next_state_values *= 1 - dones
targets = next_state_values * self.gamma + rewards
# To calculate MSE based only on target (maximum) action values for each state, let's make MSE for the rest
# action values to be equal 0. For this lets predict all action values for states and replace those that are
# expected to be target(maximum) with values calculated by Bellman's equation
expected_state_action_values = self.model.predict(states)
for i in range(len(expected_state_action_values)):
expected_state_action_values[i, actions[i]] = targets[i]
self.model.fit(states, expected_state_action_values, epochs=1, verbose=0, batch_size=1)
if self.exploration_rate > self.exploration_min:
self.exploration_rate *= self.exploration_decay
def act(self, state, test_mode=False):
if (np.random.rand() <= self.exploration_rate) & (not test_mode):
return random.randrange(self.action_size)
act_values = self.model.predict(np.array(state).reshape((1, self.state_size)))
return np.argmax(act_values[0])
def unpack_batch(batch):
states, actions, rewards, dones, next_states = [], [], [], [], []
for state, action, reward, done, next_state in batch:
state = np.array(state, copy=False)
states.append(state)
actions.append(action)
rewards.append(reward)
dones.append(done)
if next_state is None:
next_states.append(state) # the result will be masked anyway
else:
next_states.append(np.array(next_state, copy=False))
return np.array(states, copy=False), np.array(actions), np.array(rewards, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(next_states, copy=False)
| 2.640625 | 3 |
api/migrations/0008_merge_20200313_1603.py | CMPUT404W20-Wed/CMPUT404-project-socialdistribution-tmp | 1 | 12767142 | <filename>api/migrations/0008_merge_20200313_1603.py
# Generated by Django 2.2.10 on 2020-03-13 16:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0007_auto_20200312_2152'),
('api', '0007_comment_contenttype'),
]
operations = [
]
| 1.085938 | 1 |
greentest/test__systemerror.py | Eugeny/gevent | 2 | 12767143 | import sys
import greentest
import gevent
from gevent.hub import get_hub
def raise_(ex):
raise ex
MSG = 'should be re-raised and caught'
class Test(greentest.TestCase):
error_fatal = False
def test_sys_exit(self):
self.start(sys.exit, MSG)
try:
gevent.sleep(0.001)
except SystemExit as ex:
assert str(ex) == MSG, repr(str(ex))
else:
raise AssertionError('must raise SystemExit')
def test_keyboard_interrupt(self):
self.start(raise_, KeyboardInterrupt)
try:
gevent.sleep(0.001)
except KeyboardInterrupt:
pass
else:
raise AssertionError('must raise KeyboardInterrupt')
def test_system_error(self):
self.start(raise_, SystemError(MSG))
try:
gevent.sleep(0.001)
except SystemError as ex:
assert str(ex) == MSG, repr(str(ex))
else:
raise AssertionError('must raise SystemError')
def test_exception(self):
self.start(raise_, Exception('regular exception must not kill the program'))
gevent.sleep(0.001)
class TestCallback(Test):
def tearDown(self):
assert not self.x.pending, self.x
def start(self, *args):
self.x = get_hub().loop.run_callback(*args)
class TestSpawn(Test):
def tearDown(self):
gevent.sleep(0.0001)
assert self.x.dead, self.x
def start(self, *args):
self.x = gevent.spawn(*args)
del Test
if __name__ == '__main__':
greentest.main()
| 2.53125 | 3 |
microbepy/common/isolate.py | ScienceStacks/MicrobEPy | 1 | 12767144 | <gh_stars>1-10
"""
Utilities for manipulating isolates and "generalized" isolates (e.g., lines).
Key conepts:
line (or ancestral line) - the experimental conditions under which an incubation is done (resistance + stiring)
line replica - instances of the same experimental conditions
transfer - the transfer number from which the isolate was obtained. There is a linear relationship
between the transfer number and the generation with transfer 152 being 1,000 generations.
endpoint dilution (EPD) - a sample taken from a line replica after 1K generations
endpoint dilution ID - a numeric identifier for an EPD
isolate - a single genotype
cell - a single organism
species - each isolate is either a DVH or an MMP
clone - an integer identifier of an isolate for an endpoint dilution
Isolates are encoded as follows: LLR.TTT.PP.CC.S.EE
LLR - line replica (so R is a single digit).
TTT - transfer number, an integer up to 3 digits
PP - two digit endpoint dilution
CC - 2 digit clone number
S - 1 character species identifier ('D', 'M')
EE - 2 character experiment. CI - clonal isolate; SC - single cell
An EPD community has the format: LLR.TTT.PP
An clone pairing ID has the format: LLR.TTT.PP.CC
Wild type isolates begin with the string 'WT' and ancestral types with 'AN' followed by 'S' with '*' in the other positions.
"""
from microbepy.common import util
from microbepy.common import constants as cn
import copy
import os
import numpy as np
import pandas as pd
NONWT_ISOLATE_LENGTH = 14
WT_ISOLATE_LENGTH = 6
##################### HELPER FUNCTIONS ####################
def checkDefaults(default_values, non_default_values):
"""
Checks that conditions hold for a set of values.
:param list-of-str default_values:
values that should be cn.ISOLATE_DEFAULT
:param list-of-str non_default_values:
values that should not be cn.ISOLATE_DEFAULT
"""
result = True
defaults = set([cn.ISOLATE_DEFAULT, str(np.nan)])
if not all([str(x) in defaults
for x in default_values]):
result = False
if not all([not str(x) in defaults
for x in non_default_values]):
result = False
return result
##############################################
# Isolate class
##############################################
class Isolate(object):
# Description of the different types of isolates. Isolates
# can be classified by which components are set to the
# default values. This classification is
# used to validate isolates when they are created and to
# classify them.
TYPE_DICT = {
# Ex: HA2.152.01.01.D.CI or
# Ex: HA2.152.01.01.D.SC
cn.ISOLATE_CLONE: lambda s: checkDefaults(
[],
[s.line, s.transfer, s.epd_id, s.clone, s.species,
s.experiment]),
# Ex: HA2.152.01.*.*.*
# Ex: HA2.152.01.02.*.*
cn.ISOLATE_EPD: lambda s: checkDefaults(
[s.species],
[s.line, s.transfer, s.epd_id]),
# Ex: HA2.12.*.*.*.*
# Ex: WT.*.*.01.D.*
# Ex: AN.*.*.01.D.*
cn.ISOLATE_LINE: lambda s: checkDefaults(
[s.epd_id, s.clone, s.species, s.experiment],
[s.line, s.transfer]) \
or (checkDefaults(
[s.epd_id, s.transfer, s.experiment],
[s.line, s.clone, s.species])
and (s.line == cn.LINE_WT))
or checkDefaults(
[s.epd_id, s.transfer, s.experiment],
[s.line, s.clone, s.species])
and (s.line == cn.LINE_AN),
# Ancestral co-culture: ANC.*.*.*.*.*
cn.ISOLATE_ANC: lambda s: checkDefaults(
[s.epd_id, s.transfer, s.clone, s.species, s.experiment],
[s.line]) \
and (s.line == cn.LINE_ANC),
# Ancestral co-culture: AN1.*.*.*.D.*
cn.ISOLATE_ANX: lambda s: checkDefaults(
[s.epd_id, s.transfer, s.clone, s.experiment],
[s.line, s.species]) \
and (s.line in [cn.LINE_AN1, cn.LINE_AN2]),
# Unknown isolate: *.*.*.*.*.*
# The species and experiment may or may not be known.
cn.ISOLATE_UNKNOWN: lambda s: checkDefaults(
[s.line, s.epd_id, s.transfer, s.clone],
[]),
}
def __init__(self,
line=cn.ISOLATE_DEFAULT,
transfer=cn.ISOLATE_DEFAULT,
epd_id=cn.ISOLATE_DEFAULT,
clone=cn.ISOLATE_DEFAULT,
species=cn.ISOLATE_DEFAULT,
experiment=cn.ISOLATE_DEFAULT):
def validate(value, func, msg):
if value == cn.ISOLATE_DEFAULT:
return
elif func(value):
return
else:
raise ValueError(msg)
# line
validate(line,
lambda x: (len(x) in [2, 3, 4]),
"%s is an invalid line" % line)
self.line = line
# transfer
validate(transfer,
lambda x: isinstance(int(x), int),
"%s is an invalid transfer" % transfer)
self.transfer = transfer
# epd_id
validate(epd_id,
lambda x: isinstance(int(x), int),
"%s is an invalid epd_id" % epd_id)
self.epd_id = epd_id
# clone
validate(clone,
lambda x: isinstance(int(x), int),
"%s is an invalid clone" % clone)
self.clone = clone
# species
validate(species,
lambda x: x in [cn.SPECIES_MIX_DVH, cn.SPECIES_MIX_MMP],
"%s is an invalid species" % species)
self.species = species
# experiment
self.experiment=experiment
# Validate have consistent settings
self._validateDefaultValues()
def _validateDefaultValues(self):
"""
Verifies the consistency of the assignment of default values to instance
variables.
:raises ValueError:
"""
cls = self.__class__
count = 0
for _,f in cls.TYPE_DICT.items():
if f(self):
count += 1
if count == 0:
raise ValueError("%s does not match any isolate type" % str(self))
elif count == 1:
return
else:
raise ValueError("%s matches multiple isolate types" % str(self))
@classmethod
def create(cls, isolate_string):
"""
Constructs an isolate object from an isolate string.
:param str isolate_string:
:return Isolate:
"""
if util.isNull(isolate_string):
return Isolate(
line=cn.ISOLATE_DEFAULT,
transfer=cn.ISOLATE_DEFAULT,
epd_id=cn.ISOLATE_DEFAULT,
clone=cn.ISOLATE_DEFAULT,
species=cn.ISOLATE_DEFAULT,
experiment=cn.ISOLATE_DEFAULT)
elements = isolate_string.split(cn.ISOLATE_SEPARATOR)
line = elements[0]
transfer = cn.ISOLATE_DEFAULT
epd_id = cn.ISOLATE_DEFAULT
clone = cn.ISOLATE_DEFAULT
species = cn.ISOLATE_DEFAULT
experiment = cn.ISOLATE_DEFAULT
# E.G. WT.D01
if len(elements) == 2:
species_clone = elements[1]
species = species_clone[0]
clone = species_clone[1:]
# E.G., HA1.152.02.D01
elif len(elements) == 4:
transfer = int(elements[1])
epd_id = elements[2]
species = elements[3][0]
if elements[3] == cn.ISOLATE_DEFAULT:
clone = cn.ISOLATE_DEFAULT
experiment = cn.ISOLATE_DEFAULT
else:
clone = elements[3][1:]
experiment = cn.EXPERIMENT_CI
# E.g., HA1.152.02.01.D.CI
# or AN.*.*.*.D.*
# or WT.*.*.01.D.*
elif len(elements) == 6:
transfer = elements[1]
epd_id = elements[2]
clone = elements[3]
species = elements[4]
experiment = elements[5]
else:
raise ValueError("%s is an invalid isolate string" % isolate_string)
return Isolate(line=line, transfer=transfer, epd_id=epd_id,
clone=clone, species=species, experiment=experiment)
def getCommunity(self):
"""
Determines the community specified by the isolate
:return str: key of TYPE_DICT
"""
cls = self.__class__
for key in list(cls.TYPE_DICT.keys()):
if cls.TYPE_DICT[key](self):
return key
raise RuntimeError("%s doesn't match any of the TYPE_DICT keys"
% str(self))
def getEPDCommunity(self):
"""
Extracts the EPD Community from the isolate.
:return str:
"""
return "%s.%s.%s" % (self.line, self.transfer, self.epd_id)
def getClonePairingID(self):
"""
Extracts the EPD Community from the isolate.
:return str:
"""
return "%s.%s.%s.%s" % (self.line, self.transfer, self.epd_id, self.clone)
def __str__(self):
"""
String representation of an isolate.
"""
return "%s.%s.%s.%s.%s.%s" % (
self.line, str(self.transfer), self.epd_id, self.clone,
self.species, self.experiment)
@classmethod
def isSpecies(cls, isolate_stg, species):
if util.isNull(isolate_stg):
return False
isolate = cls.create(isolate_stg)
return isolate.species == species
@classmethod
def isEPD(cls, isolate_stg):
"""
:param str isolate:
:return bool: True if wild type isolate
False if non-wild type isolate
"""
if util.isNull(isolate_stg):
return False
isolate = cls.create(isolate_stg)
return isolate.getCommunity() == cn.ISOLATE_EPD
@classmethod
def isAN(cls, isolate_stg):
"""
:param str isolate_stg:
:return bool: True if wild type isolate
False if non-wild type isolate
:raises ValueError: not a valid isolate
"""
if util.isNull(isolate_stg):
return False
isolate = cls.create(isolate_stg)
return (isolate.line in
[cn.LINE_ANC, cn.LINE_AN, cn.LINE_AN1, cn.LINE_AN2]
)
@classmethod
def isSpecies(cls, isolate_stg, species):
"""
Determines if the isolate is a particular species
:param str isolate_stg:
:param str species:
:return bool:
"""
if util.isNull(isolate_stg):
return False
isolate = cls.create(isolate_stg)
return isolate.species == species
@classmethod
def isLine(cls, isolate_stg, line):
"""
Determines if the isolate is a particular ancestral line.
Handles case of line and combined line and line-replica.
:param str isolate:
:param str line:
:return bool:
"""
if util.isNull(isolate_stg):
return False
isolate = cls.create(isolate_stg)
offset = len(line)
return isolate.line[0:offset] == line
| 2.234375 | 2 |
summary.py | bubbliiiing/facenet-keras | 39 | 12767145 | <filename>summary.py
#--------------------------------------------#
# 该部分代码只用于看网络结构,并非测试代码
#--------------------------------------------#
import os
from nets.facenet import facenet
if __name__ == "__main__":
input_shape = [160, 160, 3]
model = facenet(input_shape, len(os.listdir("./datasets")), backbone="mobilenet", mode="train")
model.summary()
for i,layer in enumerate(model.layers):
print(i,layer.name)
| 2.703125 | 3 |
Leander_Stephen_D'Souza/ROS/catkin_ws/src/android/src/joystick_publisher.py | leander-dsouza/MRM-Tenure | 2 | 12767146 | <gh_stars>1-10
#!/usr/bin/env python
import rospy
import socket
import serial
from std_msgs.msg import String
ob1 = String()
ser = serial.Serial('/dev/ttyUSB2', 115200)
HOST = '192.168.43.21' # HOST
PORT1 = 1234
BUFFER_SIZE = 1 # Normally 1024, but I want fast response
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST,PORT1))
s.listen(1)
conn,address=s.accept()
x1 = 0
x2 = 0
x3 = 0
y1 = 0
y2 = 0
y3 = 0
count =0
def joystick_decoder(val):
global x1,x2,x3,y1,y2,y3,count
count+=1
if (val & 0b11100000) == 0b00000000:
gear = val & 0b00001111
ha = val & 0b00010000
elif (val & 0b11100000) == 0b00100000:
x1 = val & 0b00001111
elif (val & 0b11100000) == 0b01000000:
x2 = val & 0b00011111
elif (val & 0b11100000) == 0b01100000:
x3 = val & 0b00011111
elif (val & 0b11100000) == 0b10000000:
y1 = val & 0b00001111
elif (val & 0b11100000) == 0b10100000:
y2 = val & 0b00011111
elif (val & 0b11100000) == 0b11000000:
y3 = val & 0b00011111
if count %7 ==0:
x = x1
x = (x << 5) | x2
x = (x << 5) | x3
y = y1
y = (y << 5) | y2
y = (y << 5) | y3
count = 0
return x,y
counter =0
def callback_joy():
global counter
while True:
data_stream = conn.recv(BUFFER_SIZE)
if ord(data_stream) == 109:
ser.write(data_stream)
continue
counter+=1
ser.write(data_stream)
if counter % 7==0:
x,y =joystick_decoder(ord(data_stream))
counter = 1
joy_values = "{} {}".format(x, y)
print(joy_values)
ob1.data = joy_values
pub.publish(ob1)
joystick_decoder(ord(data_stream))
if __name__ == '__main__':
try:
rospy.init_node('Communicator', anonymous=True,disable_signals=True)
pub = rospy.Publisher('joystick_topic', String, queue_size=10)
rate = rospy.Rate(50) # 1hz
callback_joy()
except rospy.ROSInterruptException:
pass
| 2.578125 | 3 |
LittleBigCode/code/ppc.py | ElodieQ/EPIDEMIUM-Season-3 | 0 | 12767147 | <reponame>ElodieQ/EPIDEMIUM-Season-3
"""
Preprocessing related functions
"""
import os
import pandas as pd
from PIL import Image
from pathlib import Path
import datetime
import numpy as np
import cv2
import matplotlib.pyplot as plt
import itertools
from sklearn.model_selection import train_test_split
from functools import partial
import warnings
warnings.filterwarnings('ignore')
def _as_date(x):
""" Helper to cast DataFrame date column """
return datetime.datetime.strptime(x, "%Y-%m-%d")
def read_korl_csv(path):
""" Read the KORL csv and potentially correct stuff """
df = pd.read_csv(path)
df['computed_os'] = df.apply(lambda row: \
(_as_date(row['Date_derniere_nouvelles']) - _as_date(row['Date_biopsie'])).days / 30., axis=1)
return df
def _get_id(x):
""" Get patient ID from image file path """
return str(x).split(os.sep)[-1].split('_')[0]
def get_id2f(markers_dpath):
""" Find all images' paths for each patient """
id2f = {}
for i, dpath in enumerate(markers_dpath):
fpaths = list(dpath.iterdir())
for path in fpaths:
_id = _get_id(path)
if _id in id2f:
id2f[_id].append(str(path))
else:
id2f[_id] = [str(path)]
return id2f
def get_all_combinations(fpaths):
"""
Produce all possible combinations of images for each patient, following
the rule of 1 image per marker for each patient.
"""
subsets = []
for subset in itertools.combinations(fpaths, 6):
skip = False
markers = set(int(e.split('marker')[1].split(os.sep)[0]) for e in subset)
for i in range(1, 7):
if i not in markers:
skip = True
break
if skip:
continue
subsets.append(tuple(sorted(subset)))
return set(subsets)
def prepare_target(x):
""" Encode the OS into 3 categories """
if x <= 24:
return 0
elif x <= 72:
return 1
else:
return 2
def prepare_dataset(db_path, id2f, is_train=True):
"""
Read KORL csv files and produce the dataset : one sample contains 1 image of
each marker for each patient. The dataset contains all combinations for each
patient.
Parameters
--------
db_path: str
Path of the 'data/' directory
id2f: dict
Patient ID to list of images' paths dictionary
is_train: bool
Whether we expect a target column or not
Returns
--------
df_full: pandas DataFrame
Dataset
"""
# Read csv
df = read_korl_csv(db_path)
ids = set(df['Patient_ID'].values.tolist())
if is_train:
id2os = {k: v for k, v in df[['Patient_ID', 'OS']].values.tolist()}
else:
df.iloc[0,0] = "905e61" # Error in data
# Get usable dataframe
df_full = pd.DataFrame()
for patient, fpaths in id2f.items():
if patient not in ids:
continue
combinations = get_all_combinations(fpaths)
cur_df = pd.DataFrame([[patient] + list(tup) for tup in combinations],
columns=['patient']+[f'marker{i}' for i in range(1,7)])
df_full = pd.concat([df_full, cur_df], axis=0).reset_index(drop=True)
if is_train:
df_full['OS'] = df_full['patient'].apply(lambda x: id2os[x])
df_full['target'] = df_full['OS'].apply(prepare_target)
return df_full
def _split_train_val(df, test_size=.3):
"""
Split the training dataframe into actual training and validation.
Splitting based on patient ID
Parameters
--------
test_size: float [0., 1.]
Part of training patients (not samples !) to use as validation
Returns
--------
df_train: pandas DataFrame
Training data
df_val: pandas DataFrame
Validation data
"""
id_train, id_val = train_test_split(df['patient'].unique(),
test_size=.3,
random_state=42)
df_train = df[df['patient'].isin(id_train)].reset_index(drop=True)
df_val = df[df['patient'].isin(id_val)].reset_index(drop=True)
return df_train, df_val
def get_train_val_test_dfs(val_size=.3):
"""
Gather the training and test data without loading images + create a
validation set based on the training data.
Parameters
--------
val_size: float [0., 1.]
Part of training patients (not samples !) to use as validation
Returns
--------
df_train: pandas DataFrame
Training data
df_val: pandas DataFrame
Validation data
df_test: pandas DataFrame
Test data
"""
# Constants
data_path = Path('.').resolve().parents[0].joinpath('data')
train_db_path = str(data_path.joinpath('KORL_avatar_train.csv'))
test_db_path = str(data_path.joinpath('KORL_avatar_test_X.csv'))
markers_dpath = [data_path.joinpath(f'marker{i}') for i in range(1, 7)]
#
id2f = get_id2f(markers_dpath)
df_train = prepare_dataset(train_db_path, id2f, is_train=True)
df_train, df_val = _split_train_val(df_train, test_size=val_size)
df_test = prepare_dataset(test_db_path, id2f, is_train=False)
return df_train, df_val, df_test
def red_count_preprocess(df, red_thresh=50):
"""
Produce a dataframe of size N x 6, where N is the number samples and 6 is the
6 different markers. Each value is the percentage of red pixels in each
image.
Parameters
--------
df: pandas DataFrame
Dataset with unloaded images, contains the images' paths for each sample
red_thresh: int [0,255]
Value above which the pixel is considered red
Returns
--------
df : pandas Dataframe
Datframe with 6 columns ( 'marker_1', ..., 'marker_6)
"""
img2red = {}
# Function for each row
def _df_to_img(row):
img = []
for i in range(1, 7):
fpath = row[f"marker{i}"]
if fpath in img2red:
img.append(img2red[fpath])
else:
tmp = cv2.imread(row[f"marker{i}"])[:,:,0]
tmp[tmp[:,:]<red_thresh] = 0
tmp[tmp[:,:]>0] = 1
res = np.sum(tmp) / (1404*1872)
img.append(res)
img2red[fpath] = res
return img
X = np.array(df.apply(_df_to_img, axis=1).values.tolist())
df = pd.DataFrame( X, columns = ['marker_{}'.format(i) for i in range(1, 7)], index = df['patient'])
return df
def preprocess_KORL (features, db_path ) :
"""
Produce a dataframe of size N_patient x features, where N is of patient in the clinical data.
image.
Parameters
--------
features: list
List of columns of the clinical data to keep
db_path : string
Path of the clinical data
Returns
--------
df : panda dataframe
Datframe with len(features) columns
"""
#Read and preprocess data
df = pd.read_csv(db_path)
df = df.set_index("Patient_ID")
df['N'] = df['N'].replace(to_replace=r'^2[a,b,c]', value='2', regex=True).astype(int)
df['Age_diag'] = round(df['Age_diag']/10).astype(int)
return df[features]
def full_preprocess(features, db_path, df, red_thresh= 50 ) :
"""
Produce a dataframe of size N x 6 + len(features), where N is the number samples, 6 is the
6 different markers. Each value for the market is the percentage of red and there is also the clinical data.
image.
Parameters
--------
features: list
List of columns of the clinical data to keep
df_path : string
Path of the clinical csv data
df: pandas DataFrame
Dataset with unloaded images, contains the images' paths for each sample
red_thresh: int [0,255]
Value above which the pixel is considered red
Returns
--------
df_final : pandas dataframe
Datframe with the 6 columns ( 'marker_1', ..., 'marker_6) and the features columns from the clinical data
"""
df_images = red_count_preprocess(df, red_thresh)
df_clinical = preprocess_KORL(features, db_path)
df_final = pd.merge(df_images, df_clinical, left_index= True, right_index= True, how = 'inner')
return df_final | 2.65625 | 3 |
test/registry/protocol_v2.py | kwestpharedhat/quay | 0 | 12767148 | <gh_stars>0
import hashlib
import json
from typing import Dict
from enum import Enum, unique
from image.docker.schema1 import (
DockerSchema1ManifestBuilder,
DockerSchema1Manifest,
DOCKER_SCHEMA1_CONTENT_TYPES,
)
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
from image.docker.schema2.config import DockerSchema2Config
from image.oci import OCI_CONTENT_TYPES
from image.oci.manifest import OCIManifestBuilder
from image.oci.config import OCIConfig
from image.shared.schemas import (
parse_manifest_from_bytes,
is_manifest_list_type,
MANIFEST_LIST_TYPES,
)
from test.registry.protocols import (
RegistryProtocol,
Failures,
ProtocolOptions,
PushResult,
PullResult,
)
from util.bytes import Bytes
@unique
class V2ProtocolSteps(Enum):
"""
Defines the various steps of the protocol, for matching failures.
"""
AUTH = "auth"
BLOB_HEAD_CHECK = "blob-head-check"
GET_MANIFEST = "get-manifest"
GET_MANIFEST_LIST = "get-manifest-list"
PUT_MANIFEST = "put-manifest"
PUT_MANIFEST_LIST = "put-manifest-list"
MOUNT_BLOB = "mount-blob"
CATALOG = "catalog"
LIST_TAGS = "list-tags"
START_UPLOAD = "start-upload"
GET_BLOB = "get-blob"
class V2Protocol(RegistryProtocol):
FAILURE_CODES: Dict[Enum, Dict[Failures, int]] = {
V2ProtocolSteps.AUTH: {
Failures.UNAUTHENTICATED: 401,
Failures.INVALID_AUTHENTICATION: 401,
Failures.INVALID_REGISTRY: 400,
Failures.APP_REPOSITORY: 405,
Failures.ANONYMOUS_NOT_ALLOWED: 401,
Failures.INVALID_REPOSITORY: 400,
Failures.SLASH_REPOSITORY: 400,
Failures.NAMESPACE_DISABLED: 405,
},
V2ProtocolSteps.MOUNT_BLOB: {
Failures.UNAUTHORIZED_FOR_MOUNT: 202,
Failures.READONLY_REGISTRY: 405,
},
V2ProtocolSteps.GET_MANIFEST: {
Failures.UNKNOWN_TAG: 404,
Failures.UNAUTHORIZED: 401,
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.ANONYMOUS_NOT_ALLOWED: 401,
},
V2ProtocolSteps.GET_BLOB: {
Failures.GEO_BLOCKED: 403,
},
V2ProtocolSteps.BLOB_HEAD_CHECK: {
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
},
V2ProtocolSteps.START_UPLOAD: {
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.READ_ONLY: 401,
Failures.MIRROR_ONLY: 401,
Failures.MIRROR_MISCONFIGURED: 401,
Failures.MIRROR_ROBOT_MISSING: 401,
Failures.READ_ONLY: 401,
Failures.READONLY_REGISTRY: 405,
},
V2ProtocolSteps.PUT_MANIFEST: {
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.MISSING_TAG: 404,
Failures.INVALID_TAG: 404,
Failures.INVALID_IMAGES: 400,
Failures.INVALID_BLOB: 400,
Failures.UNSUPPORTED_CONTENT_TYPE: 415,
Failures.READ_ONLY: 401,
Failures.MIRROR_ONLY: 401,
Failures.MIRROR_MISCONFIGURED: 401,
Failures.MIRROR_ROBOT_MISSING: 401,
Failures.READONLY_REGISTRY: 405,
Failures.INVALID_MANIFEST: 400,
},
V2ProtocolSteps.PUT_MANIFEST_LIST: {
Failures.INVALID_MANIFEST_IN_LIST: 400,
Failures.READ_ONLY: 401,
Failures.MIRROR_ONLY: 401,
Failures.MIRROR_MISCONFIGURED: 401,
Failures.MIRROR_ROBOT_MISSING: 401,
Failures.READONLY_REGISTRY: 405,
},
}
def __init__(self, jwk, schema="schema1"):
self.jwk = jwk
self.schema = schema
def ping(self, session):
result = session.get("/v2/")
assert result.status_code == 401
assert result.headers["Docker-Distribution-API-Version"] == "registry/2.0"
def login(self, session, username, password, scopes, expect_success):
scopes = scopes if isinstance(scopes, list) else [scopes]
params = {
"account": username,
"service": "localhost:5000",
"scope": scopes,
}
auth = (username, password)
if not username or not password:
auth = None
response = session.get("/v2/auth", params=params, auth=auth)
if expect_success:
assert response.status_code // 100 == 2
else:
assert response.status_code // 100 == 4
return response
def auth(self, session, credentials, namespace, repo_name, scopes=None, expected_failure=None):
"""
Performs the V2 Auth flow, returning the token (if any) and the response.
Spec: https://docs.docker.com/registry/spec/auth/token/
"""
scopes = scopes or []
auth = None
username = None
if credentials is not None:
username, _ = credentials
auth = credentials
params = {
"account": username,
"service": "localhost:5000",
}
if scopes:
params["scope"] = scopes
response = self.conduct(
session,
"GET",
"/v2/auth",
params=params,
auth=auth,
expected_status=(200, expected_failure, V2ProtocolSteps.AUTH),
)
expect_token = expected_failure is None or not V2Protocol.FAILURE_CODES[
V2ProtocolSteps.AUTH
].get(expected_failure)
if expect_token:
assert response.json().get("token") is not None
return response.json().get("token"), response
return None, response
def pull_list(
self,
session,
namespace,
repo_name,
tag_names,
manifestlist,
credentials=None,
expected_failure=None,
options=None,
):
options = options or ProtocolOptions()
scopes = options.scopes or [
"repository:%s:push,pull" % self.repo_name(namespace, repo_name)
]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(
session,
credentials,
namespace,
repo_name,
scopes=scopes,
expected_failure=expected_failure,
)
if token is None:
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
return
headers = {
"Authorization": "Bearer " + token,
"Accept": ",".join(MANIFEST_LIST_TYPES),
}
for tag_name in tag_names:
# Retrieve the manifest for the tag or digest.
response = self.conduct(
session,
"GET",
"/v2/%s/manifests/%s" % (self.repo_name(namespace, repo_name), tag_name),
expected_status=(200, expected_failure, V2ProtocolSteps.GET_MANIFEST_LIST),
headers=headers,
)
if expected_failure is not None:
return None
# Parse the returned manifest list and ensure it matches.
ct = response.headers["Content-Type"]
assert is_manifest_list_type(ct), "Expected list type, found: %s" % ct
retrieved = parse_manifest_from_bytes(Bytes.for_string_or_unicode(response.text), ct)
assert retrieved.schema_version == 2
assert retrieved.is_manifest_list
assert retrieved.digest == manifestlist.digest
# Pull each of the manifests inside and ensure they can be retrieved.
for manifest_digest in retrieved.child_manifest_digests():
response = self.conduct(
session,
"GET",
"/v2/%s/manifests/%s" % (self.repo_name(namespace, repo_name), manifest_digest),
expected_status=(200, expected_failure, V2ProtocolSteps.GET_MANIFEST),
headers=headers,
)
if expected_failure is not None:
return None
ct = response.headers["Content-Type"]
manifest = parse_manifest_from_bytes(Bytes.for_string_or_unicode(response.text), ct)
assert not manifest.is_manifest_list
assert manifest.digest == manifest_digest
def push_list(
self,
session,
namespace,
repo_name,
tag_names,
manifestlist,
manifests,
blobs,
credentials=None,
expected_failure=None,
options=None,
):
options = options or ProtocolOptions()
scopes = options.scopes or [
"repository:%s:push,pull" % self.repo_name(namespace, repo_name)
]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(
session,
credentials,
namespace,
repo_name,
scopes=scopes,
expected_failure=expected_failure,
)
if token is None:
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
return
headers = {
"Authorization": "Bearer " + token,
"Accept": ",".join(options.accept_mimetypes)
if options.accept_mimetypes is not None
else "*/*",
}
# Push all blobs.
if not self._push_blobs(
blobs, session, namespace, repo_name, headers, options, expected_failure
):
return
# Push the individual manifests.
for manifest in manifests:
manifest_headers = {"Content-Type": manifest.media_type}
manifest_headers.update(headers)
self.conduct(
session,
"PUT",
"/v2/%s/manifests/%s" % (self.repo_name(namespace, repo_name), manifest.digest),
data=manifest.bytes.as_encoded_str(),
expected_status=(201, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
headers=manifest_headers,
)
# Push the manifest list.
for tag_name in tag_names:
manifest_headers = {"Content-Type": manifestlist.media_type}
manifest_headers.update(headers)
if options.manifest_content_type is not None:
manifest_headers["Content-Type"] = options.manifest_content_type
self.conduct(
session,
"PUT",
"/v2/%s/manifests/%s" % (self.repo_name(namespace, repo_name), tag_name),
data=manifestlist.bytes.as_encoded_str(),
expected_status=(201, expected_failure, V2ProtocolSteps.PUT_MANIFEST_LIST),
headers=manifest_headers,
)
return PushResult(manifests=None, headers=headers)
def build_oci(self, images, blobs, options):
builder = OCIManifestBuilder()
for image in images:
checksum = "sha256:" + hashlib.sha256(image.bytes).hexdigest()
if image.urls is None:
blobs[checksum] = image.bytes
# If invalid blob references were requested, just make it up.
if options.manifest_invalid_blob_references:
checksum = "sha256:" + hashlib.sha256(b"notarealthing").hexdigest()
if not image.is_empty:
builder.add_layer(checksum, len(image.bytes), urls=image.urls)
def history_for_image(image):
history = {
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": (
("/bin/sh -c #(nop) ENTRYPOINT %s" % image.config["Entrypoint"])
if image.config and image.config.get("Entrypoint")
else "/bin/sh -c #(nop) %s" % image.id
),
}
if image.is_empty:
history["empty_layer"] = True
return history
config = {
"os": "linux",
"architecture": "amd64",
"rootfs": {"type": "layers", "diff_ids": []},
"history": [history_for_image(image) for image in images],
}
if images[-1].config:
config["config"] = images[-1].config
config_json = json.dumps(config, ensure_ascii=options.ensure_ascii)
oci_config = OCIConfig(Bytes.for_string_or_unicode(config_json))
builder.set_config(oci_config)
blobs[oci_config.digest] = oci_config.bytes.as_encoded_str()
return builder.build(ensure_ascii=options.ensure_ascii)
def build_schema2(self, images, blobs, options):
builder = DockerSchema2ManifestBuilder()
for image in images:
checksum = "sha256:" + hashlib.sha256(image.bytes).hexdigest()
if image.urls is None:
blobs[checksum] = image.bytes
# If invalid blob references were requested, just make it up.
if options.manifest_invalid_blob_references:
checksum = "sha256:" + hashlib.sha256(b"notarealthing").hexdigest()
if not image.is_empty:
builder.add_layer(checksum, len(image.bytes), urls=image.urls)
def history_for_image(image):
history = {
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": (
("/bin/sh -c #(nop) ENTRYPOINT %s" % image.config["Entrypoint"])
if image.config and image.config.get("Entrypoint")
else "/bin/sh -c #(nop) %s" % image.id
),
}
if image.is_empty:
history["empty_layer"] = True
return history
config = {
"os": "linux",
"rootfs": {"type": "layers", "diff_ids": []},
"history": [history_for_image(image) for image in images],
}
if options.with_broken_manifest_config:
# NOTE: We are missing the history entry on purpose.
config = {
"os": "linux",
"rootfs": {"type": "layers", "diff_ids": []},
}
if images and images[-1].config:
config["config"] = images[-1].config
config_json = json.dumps(config, ensure_ascii=options.ensure_ascii)
schema2_config = DockerSchema2Config(
Bytes.for_string_or_unicode(config_json),
skip_validation_for_testing=options.with_broken_manifest_config,
)
builder.set_config(schema2_config)
blobs[schema2_config.digest] = schema2_config.bytes.as_encoded_str()
return builder.build(ensure_ascii=options.ensure_ascii)
def build_schema1(self, namespace, repo_name, tag_name, images, blobs, options, arch="amd64"):
builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name, arch)
for image in reversed(images):
assert image.urls is None
checksum = "sha256:" + hashlib.sha256(image.bytes).hexdigest()
blobs[checksum] = image.bytes
# If invalid blob references were requested, just make it up.
if options.manifest_invalid_blob_references:
checksum = "sha256:" + hashlib.sha256(b"notarealthing").hexdigest()
layer_dict = {"id": image.id, "parent": image.parent_id}
if image.config is not None:
layer_dict["config"] = image.config
if image.size is not None:
layer_dict["Size"] = image.size
if image.created is not None:
layer_dict["created"] = image.created
builder.add_layer(checksum, json.dumps(layer_dict, ensure_ascii=options.ensure_ascii))
# Build the manifest.
built = builder.build(self.jwk, ensure_ascii=options.ensure_ascii)
# Validate it before we send it.
DockerSchema1Manifest(built.bytes)
return built
def push(
self,
session,
namespace,
repo_name,
tag_names,
images,
credentials=None,
expected_failure=None,
options=None,
):
options = options or ProtocolOptions()
scopes = options.scopes or [
"repository:%s:push,pull" % self.repo_name(namespace, repo_name)
]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(
session,
credentials,
namespace,
repo_name,
scopes=scopes,
expected_failure=expected_failure,
)
if token is None:
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
return
headers = {
"Authorization": "Bearer " + token,
"Accept": ",".join(options.accept_mimetypes)
if options.accept_mimetypes is not None
else "*/*",
}
# Build fake manifests.
manifests = {}
blobs = {}
for tag_name in tag_names:
if self.schema == "oci":
manifests[tag_name] = self.build_oci(images, blobs, options)
elif self.schema == "schema2":
manifests[tag_name] = self.build_schema2(images, blobs, options)
elif self.schema == "schema1":
manifests[tag_name] = self.build_schema1(
namespace, repo_name, tag_name, images, blobs, options
)
else:
raise NotImplementedError(self.schema)
# Push the blob data.
if not self._push_blobs(
blobs, session, namespace, repo_name, headers, options, expected_failure
):
return
# Write a manifest for each tag.
for tag_name in tag_names:
manifest = manifests[tag_name]
# Write the manifest. If we expect it to be invalid, we expect a 404 code. Otherwise, we
# expect a 201 response for success.
put_code = 404 if options.manifest_invalid_blob_references else 201
manifest_headers = {"Content-Type": manifest.media_type}
manifest_headers.update(headers)
if options.manifest_content_type is not None:
manifest_headers["Content-Type"] = options.manifest_content_type
tag_or_digest = tag_name if not options.push_by_manifest_digest else manifest.digest
self.conduct(
session,
"PUT",
"/v2/%s/manifests/%s" % (self.repo_name(namespace, repo_name), tag_or_digest),
data=manifest.bytes.as_encoded_str(),
expected_status=(put_code, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
headers=manifest_headers,
)
return PushResult(manifests=manifests, headers=headers)
def _push_blobs(self, blobs, session, namespace, repo_name, headers, options, expected_failure):
for blob_digest, blob_bytes in blobs.items():
if not options.skip_head_checks:
# Blob data should not yet exist.
self.conduct(
session,
"HEAD",
"/v2/%s/blobs/%s" % (self.repo_name(namespace, repo_name), blob_digest),
expected_status=(404, expected_failure, V2ProtocolSteps.BLOB_HEAD_CHECK),
headers=headers,
)
# Check for mounting of blobs.
if options.mount_blobs and blob_digest in options.mount_blobs:
self.conduct(
session,
"POST",
"/v2/%s/blobs/uploads/" % self.repo_name(namespace, repo_name),
params={
"mount": blob_digest,
"from": options.mount_blobs[blob_digest],
},
expected_status=(201, expected_failure, V2ProtocolSteps.MOUNT_BLOB),
headers=headers,
)
if expected_failure is not None:
return
else:
# Start a new upload of the blob data.
response = self.conduct(
session,
"POST",
"/v2/%s/blobs/uploads/" % self.repo_name(namespace, repo_name),
expected_status=(202, expected_failure, V2ProtocolSteps.START_UPLOAD),
headers=headers,
)
if response.status_code != 202:
continue
upload_uuid = response.headers["Docker-Upload-UUID"]
new_upload_location = response.headers["Location"]
assert new_upload_location.startswith("http://localhost:5000")
# We need to make this relative just for the tests because the live server test
# case modifies the port.
location = response.headers["Location"][len("http://localhost:5000") :]
# PATCH the data into the blob.
if options.chunks_for_upload is None:
self.conduct(
session,
"PATCH",
location,
data=blob_bytes,
expected_status=202,
headers=headers,
)
else:
# If chunked upload is requested, upload the data as a series of chunks, checking
# status at every point.
for chunk_data in options.chunks_for_upload:
if len(chunk_data) == 3:
(start_byte, end_byte, expected_code) = chunk_data
else:
(start_byte, end_byte) = chunk_data
expected_code = 202
patch_headers = {"Content-Range": "%s-%s" % (start_byte, end_byte)}
patch_headers.update(headers)
contents_chunk = blob_bytes[start_byte:end_byte]
assert len(contents_chunk) == (end_byte - start_byte), "%s vs %s" % (
len(contents_chunk),
end_byte - start_byte,
)
self.conduct(
session,
"PATCH",
location,
data=contents_chunk,
expected_status=expected_code,
headers=patch_headers,
)
if expected_code != 202:
return False
# Retrieve the upload status at each point, and ensure it is valid.
status_url = "/v2/%s/blobs/uploads/%s" % (
self.repo_name(namespace, repo_name),
upload_uuid,
)
response = self.conduct(
session, "GET", status_url, expected_status=204, headers=headers
)
assert response.headers["Docker-Upload-UUID"] == upload_uuid
assert response.headers["Range"] == "bytes=0-%s" % end_byte, "%s vs %s" % (
response.headers["Range"],
"bytes=0-%s" % end_byte,
)
if options.cancel_blob_upload:
self.conduct(
session,
"DELETE",
location,
params=dict(digest=blob_digest),
expected_status=204,
headers=headers,
)
# Ensure the upload was canceled.
status_url = "/v2/%s/blobs/uploads/%s" % (
self.repo_name(namespace, repo_name),
upload_uuid,
)
self.conduct(session, "GET", status_url, expected_status=404, headers=headers)
return False
# Finish the blob upload with a PUT.
response = self.conduct(
session,
"PUT",
location,
params=dict(digest=blob_digest),
expected_status=201,
headers=headers,
)
assert response.headers["Docker-Content-Digest"] == blob_digest
# Ensure the blob exists now.
response = self.conduct(
session,
"HEAD",
"/v2/%s/blobs/%s" % (self.repo_name(namespace, repo_name), blob_digest),
expected_status=200,
headers=headers,
)
assert response.headers["Docker-Content-Digest"] == blob_digest
assert response.headers["Content-Length"] == str(len(blob_bytes))
# And retrieve the blob data.
if not options.skip_blob_push_checks:
result = self.conduct(
session,
"GET",
"/v2/%s/blobs/%s" % (self.repo_name(namespace, repo_name), blob_digest),
headers=headers,
expected_status=200,
)
assert result.content == blob_bytes
return True
def delete(
self,
session,
namespace,
repo_name,
tag_names,
credentials=None,
expected_failure=None,
options=None,
):
options = options or ProtocolOptions()
scopes = options.scopes or ["repository:%s:*" % self.repo_name(namespace, repo_name)]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(
session,
credentials,
namespace,
repo_name,
scopes=scopes,
expected_failure=expected_failure,
)
if token is None:
return None
headers = {
"Authorization": "Bearer " + token,
}
for tag_name in tag_names:
self.conduct(
session,
"DELETE",
"/v2/%s/manifests/%s" % (self.repo_name(namespace, repo_name), tag_name),
headers=headers,
expected_status=202,
)
def pull(
self,
session,
namespace,
repo_name,
tag_names,
images,
credentials=None,
expected_failure=None,
options=None,
):
options = options or ProtocolOptions()
scopes = options.scopes or ["repository:%s:pull" % self.repo_name(namespace, repo_name)]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(
session,
credentials,
namespace,
repo_name,
scopes=scopes,
expected_failure=expected_failure,
)
if token is None and not options.attempt_pull_without_token:
return None
headers = {}
if token:
headers = {
"Authorization": "Bearer " + token,
}
if self.schema == "oci":
headers["Accept"] = ",".join(
options.accept_mimetypes
if options.accept_mimetypes is not None
else OCI_CONTENT_TYPES
)
elif self.schema == "schema2":
headers["Accept"] = ",".join(
options.accept_mimetypes
if options.accept_mimetypes is not None
else DOCKER_SCHEMA2_CONTENT_TYPES
)
manifests = {}
image_ids = {}
for tag_name in tag_names:
# Retrieve the manifest for the tag or digest.
response = self.conduct(
session,
"GET",
"/v2/%s/manifests/%s" % (self.repo_name(namespace, repo_name), tag_name),
expected_status=(200, expected_failure, V2ProtocolSteps.GET_MANIFEST),
headers=headers,
)
if response.status_code == 401:
assert "WWW-Authenticate" in response.headers
response.encoding = "utf-8"
if expected_failure is not None:
return None
# Ensure the manifest returned by us is valid.
ct = response.headers["Content-Type"]
if self.schema == "schema1":
assert ct in DOCKER_SCHEMA1_CONTENT_TYPES
if options.require_matching_manifest_type:
if self.schema == "schema1":
assert ct in DOCKER_SCHEMA1_CONTENT_TYPES
if self.schema == "schema2":
assert ct in DOCKER_SCHEMA2_CONTENT_TYPES
if self.schema == "oci":
assert ct in OCI_CONTENT_TYPES
manifest = parse_manifest_from_bytes(Bytes.for_string_or_unicode(response.text), ct)
manifests[tag_name] = manifest
if manifest.schema_version == 1:
image_ids[tag_name] = manifest.leaf_layer_v1_image_id
# Verify the blobs.
layer_index = 0
empty_count = 0
blob_digests = list(manifest.blob_digests)
for image in images:
if manifest.schema_version == 2 and image.is_empty:
empty_count += 1
continue
# If the layer is remote, then we expect the blob to *not* exist in the system.
blob_digest = blob_digests[layer_index]
expected_status = 404 if image.urls else 200
result = self.conduct(
session,
"GET",
"/v2/%s/blobs/%s" % (self.repo_name(namespace, repo_name), blob_digest),
expected_status=(expected_status, expected_failure, V2ProtocolSteps.GET_BLOB),
headers=headers,
options=options,
)
if expected_status == 200:
assert result.content == image.bytes
layer_index += 1
assert (len(blob_digests) + empty_count) >= len(
images
) # OCI/Schema 2 has 1 extra for config
return PullResult(manifests=manifests, image_ids=image_ids)
def tags(
self,
session,
namespace,
repo_name,
page_size=2,
credentials=None,
options=None,
expected_failure=None,
):
options = options or ProtocolOptions()
scopes = options.scopes or ["repository:%s:pull" % self.repo_name(namespace, repo_name)]
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
headers = {}
if credentials is not None:
token, _ = self.auth(
session,
credentials,
namespace,
repo_name,
scopes=scopes,
expected_failure=expected_failure,
)
if token is None:
return None
headers = {
"Authorization": "Bearer " + token,
}
results = []
url = "/v2/%s/tags/list" % (self.repo_name(namespace, repo_name))
params = {}
if page_size is not None:
params["n"] = page_size
while True:
response = self.conduct(
session,
"GET",
url,
headers=headers,
params=params,
expected_status=(200, expected_failure, V2ProtocolSteps.LIST_TAGS),
)
data = response.json()
assert len(data["tags"]) <= page_size
results.extend(data["tags"])
if not response.headers.get("Link"):
return results
link_url = response.headers["Link"]
v2_index = link_url.find("/v2/")
url = link_url[v2_index:]
return results
def catalog(
self,
session,
page_size=2,
credentials=None,
options=None,
expected_failure=None,
namespace=None,
repo_name=None,
bearer_token=None,
):
options = options or ProtocolOptions()
scopes = options.scopes or []
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
headers = {}
if credentials is not None:
token, _ = self.auth(
session,
credentials,
namespace,
repo_name,
scopes=scopes,
expected_failure=expected_failure,
)
if token is None:
return None
headers = {
"Authorization": "Bearer " + token,
}
if bearer_token is not None:
headers = {
"Authorization": "Bearer " + bearer_token,
}
results = []
url = "/v2/_catalog"
params = {}
if page_size is not None:
params["n"] = page_size
while True:
response = self.conduct(
session,
"GET",
url,
headers=headers,
params=params,
expected_status=(200, expected_failure, V2ProtocolSteps.CATALOG),
)
data = response.json()
assert len(data["repositories"]) <= page_size
results.extend(data["repositories"])
if not response.headers.get("Link"):
return results
link_url = response.headers["Link"]
v2_index = link_url.find("/v2/")
url = link_url[v2_index:]
return results
| 2.125 | 2 |
stino/pyarduino/arduino_params_file.py | huangxuantao/MyStino | 2 | 12767149 | <reponame>huangxuantao/MyStino<gh_stars>1-10
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# 1. Copyright
# 2. Lisence
# 3. Author
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from . import base
def get_key_value(line):
key, value = '', ''
if '=' in line:
index = line.index('=')
key = line[:index].strip()
value = line[(index + 1):].strip()
return (key, value)
class ParamsFile(base.abs_file.File):
def __init__(self, path):
super(ParamsFile, self).__init__(path)
self.param_pairs = []
self.load_param_pairs()
def get_params(self):
return dict(self.param_pairs)
def load_param_pairs(self):
text = self.read()
lines = text.split('\n')
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
params_pair = get_key_value(line)
self.param_pairs.append(params_pair)
| 2.609375 | 3 |
tests/test_utils.py | jwsiegel2510/ESPEI | 0 | 12767150 | """
Test espei.utils classes and functions.
"""
import pickle
from tinydb import where
from espei.utils import ImmediateClient, PickleableTinyDB, MemoryStorage, \
flexible_open_string, add_bibtex_to_bib_database, bib_marker_map
from .fixtures import datasets_db, tmp_file
from .testing_data import CU_MG_TDB
MULTILINE_HIPSTER_IPSUM = """Lorem ipsum dolor amet wayfarers kale chips chillwave
adaptogen schlitz lo-fi jianbing ennui occupy pabst health goth chicharrones.
Glossier enamel pin pitchfork PBR&B ennui. Actually small batch marfa edison
bulb poutine, chicharrones neutra swag farm-to-table lyft meggings mixtape
pork belly. DIY iceland schlitz YOLO, four loko pok pok single-origin coffee
normcore. Shabby chic helvetica mustache taxidermy tattooed kombucha cliche
gastropub gentrify ramps hexagon waistcoat authentic snackwave."""
def test_immediate_client_returns_map_results_directly():
"""Calls ImmediateClient.map should return the results, instead of Futures."""
from distributed import LocalCluster
cli = ImmediateClient(LocalCluster(n_workers=1))
num_list = range(0, 11)
# square = lambda x: x**2
def square(x):
return x**2
map_result = cli.map(square, num_list)
assert map_result == [square(x) for x in num_list]
def test_pickelable_tinydb_can_be_pickled_and_unpickled():
"""PickleableTinyDB should be able to be pickled and unpickled."""
test_dict = {'test_key': ['test', 'values']}
db = PickleableTinyDB(storage=MemoryStorage)
db.insert(test_dict)
db = pickle.loads(pickle.dumps(db))
assert db.search(where('test_key').exists())[0] == test_dict
def test_flexible_open_string_raw_string():
"""Raw multiline strings should be directly returned by flexible_open_string."""
returned_string = flexible_open_string(MULTILINE_HIPSTER_IPSUM)
assert returned_string == MULTILINE_HIPSTER_IPSUM
def test_flexible_open_string_file_like(tmp_file):
"""File-like objects support read methods should have their content returned by flexible_open_string."""
fname = tmp_file(MULTILINE_HIPSTER_IPSUM)
with open(fname) as fp:
returned_string = flexible_open_string(fp)
assert returned_string == MULTILINE_HIPSTER_IPSUM
def test_flexible_open_string_path_like(tmp_file):
"""Path-like strings should be opened, read and returned"""
fname = tmp_file(MULTILINE_HIPSTER_IPSUM)
returned_string = flexible_open_string(fname)
assert returned_string == MULTILINE_HIPSTER_IPSUM
def test_adding_bibtex_entries_to_bibliography_db(datasets_db):
"""Adding a BibTeX entries to a database works and the database can be searched."""
TEST_BIBTEX = """@article{Roe1952gamma,
author = {<NAME>. and <NAME>.},
journal = {Trans. Am. Soc. Met.},
keywords = {Fe-Cr,Fe-Ti,Fe-Ti-Cr},
pages = {1030--1041},
title = {{Gamma Loop Studies in the Fe-Ti, Fe-Cr, and Fe-Ti-Cr Systems}},
volume = {44},
year = {1952}
}
@phdthesis{shin2007thesis,
author = {<NAME>},
keywords = {Al-Cu,Al-Cu-Mg,Al-Cu-Si,Al-Mg,Al-Mg-Si,Al-Si,Cu-Mg,Mg-Si,SQS},
number = {May},
school = {The Pennsylvania State University},
title = {{Thermodynamic properties of solid solutions from special quasirandom structures and CALPHAD modeling: Application to aluminum-copper-magnesium-silicon and hafnium-silicon-oxygen}},
year = {2007}
}"""
db = add_bibtex_to_bib_database(TEST_BIBTEX, datasets_db)
search_res = db.search(where('ID') == 'Roe1952gamma')
assert len(search_res) == 1
assert len(db.all()) == 2
def test_bib_marker_map():
"""bib_marker_map should return a proper dict"""
marker_dict = bib_marker_map(['otis2016', 'bocklund2018'])
EXEMPLAR_DICT = {
'bocklund2018': {
'formatted': 'bocklund2018',
'markers': {'fillstyle': 'none', 'marker': 'o'}
},
'otis2016': {
'formatted': 'otis2016',
'markers': {'fillstyle': 'none', 'marker': 'v'}
}
}
assert EXEMPLAR_DICT == marker_dict
| 2.296875 | 2 |