text stringlengths 8 6.05M |
|---|
import consus
c = consus.Client()
t = c.begin_transaction()
assert t.get('the table', 'the key') is None
t.commit()
t = c.begin_transaction()
assert t.put('the table', 'the key', 'the value')
t.commit()
t = c.begin_transaction()
assert t.get('the table', 'the key') == 'the value'
t.commit()
|
class RecentState:
'''Class that maintains a file for marking news posts as old'''
def __init__(self, path):
'''\
Returns a new RecentState instance the uses the given <path>
path -> the path of the state file
It will be read on deamand.'''
self.path = path
self.values = {}
def read(self):
'''Read the state file and store all ids in self.values'''
f = open(self.path, 'rb')
self.values = {}
for line in f.readlines():
if len(line) < 3:
continue
parts = line[:-1].split(b'_')
if len(parts) < 2:
continue
id = parts[0].decode('utf-8')
entry = b'_'.join(parts[1:])
entry = entry.decode('utf-8')
if id not in self.values:
self.values[id] = []
if entry not in self.values[id]:
self.values[id].append(entry)
f.close()
def write(self):
'''Write all ids in self.values to the file'''
f = open(self.path, 'wb')
for id, entries in self.values.items():
for entry in entries:
text = '%s_%s\n'%(id,entry)
f.write(text.encode('utf-8'))
f.close()
def add(self, id, entry):
'''\
Mark a id as old
id -> the provider\'s id
entry -> the post\'s id
The file will be parsed before to ensure consistency'''
id = id.replace('_', '-')
self.read()
if id not in self.values:
self.values[id] = []
if entry not in self.values[id]:
self.values[id].append(entry)
self.write()
def check(self, id, entry):
'''\
Indicates whether the id is marked as old
id -> the provider\'s id
entry -> the post\'s id
The file is parsed each time check() is called.'''
id = id.replace('_', '-')
self.read()
if id in self.values and entry in self.values[id]:
return True
return False
|
import pandas as pd
filename = 'pima-indians-diabetes.data.csv'
data = pd.read_csv(filename)
output_counts = data.groupby("class").size()
print(output_counts) |
# coding: utf-8
__author__ = 'flyingpang'
"""Create at 2017.02.27"""
import time
import uuid
import datetime
import requests
from polyv.conf import APP_ID, APP_SECRET, USER_ID, MAX_VIEWER
from polyv.exceptions import RequestException, MissingParameterException
from polyv.utils import make_sign
# 创建直播频道
def create_channel(password=None, player_color='#666666', auto_play=1):
"""
:param password: 直播频道密码
:param player_color: 播放器控制栏颜色, 默认#666666
:param auto_play: 是否自动播放,0/1,默认1
:return: 成功或失败的json信息
"""
url = 'http://api.live.polyv.net/web/v1/channels/'
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(time.time())
name = 'channel_name' + str(uuid.uuid4())[:8]
if not password:
password = str(uuid.uuid4())[:6]
user_id = USER_ID
str1 = "{app_secret}appId{app_id}autoPlay{autoplay}channelPasswd{password}name{name}playerColor{player_color}timestamp{timestamp}userId{user_id}{app_secret}".format(app_secret=app_secret, app_id=app_id, autoplay=auto_play, password=password, name=name, player_color=player_color, timestamp=timestamp, user_id=user_id)
params = dict()
params.update({
'appId': app_id,
'autoPlay': auto_play,
'name': name,
'playerColor': player_color,
'timestamp': timestamp,
'userId': user_id,
'channelPasswd': password,
'sign': make_sign(str1)
})
response = requests.post(url, params=params)
if response.json()['status'] != 'success':
raise RequestException(response.text)
return response.json()['result']
# 删除直播频道
def delete_channel(channel_id=None):
"""
:param channel_id: 在线直播系统登陆的频道ID.
:return: 删除成功返回True, 否则返回False.
"""
if not channel_id:
raise MissingParameterException("missing channel_id parameter.")
url = 'http://api.live.polyv.net/v1/channels/{channel_id}'.format(channel_id=channel_id)
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(time.time())
user_id = USER_ID
str1 = '{app_secret}appId{app_id}timestamp{timestamp}userId{user_id}{app_secret}'.format(app_secret=app_secret, app_id=app_id, timestamp=timestamp, user_id=user_id)
params = dict()
params.update({
'appId': app_id,
'timestamp': timestamp,
'userId': user_id,
'sign': make_sign(str1)
})
response = requests.delete(url, params=params)
if response.json()['status'] != 'success':
raise RequestException(response.text)
return response.status_code == 200
# 查询直播频道信息
def get_channel(channel_id=None):
"""
:param channel_id: 在线直播系统登陆的频道ID.
:return: channel_id的直播频道信息.
"""
if not channel_id:
raise MissingParameterException("missing channel_id parameter.")
url = 'http://api.live.polyv.net/v1/channels/{channel_id}'.format
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(time.time())
user_id = USER_ID
str1 = "{app_secret}appId{app_id}timestamp{timestamp}userId{user_id}{app_secret}".format(app_secret=app_secret, app_id=APP_ID, timestamp=timestamp, user_id=user_id)
url = 'http://api.live.polyv.net/v1/channels/{channel_id}?appId={app_id}×tamp={timestamp}&userId={user_id}&sign={sign}'.format(channel_id=channel_id, app_id=app_id, timestamp=timestamp, user_id=user_id, sign=make_sign(str1))
response = requests.get(url)
if response.json()['status'] != 'success':
raise RequestException(response.text)
return response.json()['result']
# 查询直播频道是否在直播
def get_channel_live(channel_id=None):
"""
:param channel_id: 在线直播系统登陆的频道ID.
:return: 如果正在直播状态返回True,否则返回False.
"""
if not channel_id:
raise MissingParameterException("missing channel_id parameter.")
url = "http://api.live.polyv.net/live_status/query"
stream = get_channel(channel_id).get('stream')
response = requests.get(url, data={'stream': stream})
if response.status_code != 200:
raise RequestException(response.text)
return response == 'live'
# 获取实时观看人数
def get_channel_realtime_watch_num(channel_id=None):
"""
:param channel_id: 在线直播系统登陆的频道ID.
:return: 15组每8秒统计的观看人数.
"""
if not channel_id:
raise MissingParameterException("missing channel_id parameter.")
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(time.time())
user_id = USER_ID
str1 = "{app_secret}appId{app_id}timestamp{timestamp}userId{user_id}{app_secret}".format(app_secret=app_secret, app_id=APP_ID, timestamp=timestamp, user_id=user_id)
url = "http://api.live.polyv.net/v1/statistics/{channel_id}/realtime?appId={app_id}×tamp={timestamp}&userId={user_id}&sign={sign}".format(channel_id=channel_id, app_id=app_id, timestamp=timestamp, user_id=user_id, sign=make_sign(str1))
response = requests.get(url)
if response.json()['status'] != 'success':
raise RequestException(response.text)
return response.json()['result']
# 设置直播最大在线观看人数
def set_max_viewer(channel_id=None, max_viewer=MAX_VIEWER):
"""
:param channel_id: 在线直播系统登陆的频道ID.
:param max_viewer: 设置直播最大观看人数.
:return:
"""
if not channel_id:
raise MissingParameterException("missing channel_id parameter.")
if not max_viewer:
raise MissingParameterException("missing max_viewer parameter.")
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(time.time())
user_id = USER_ID
str1 = "{app_secret}appId{app_id}maxViewer{max_viewer}timestamp{timestamp}userId{user_id}{app_secret}".format(app_secret=app_secret, app_id=app_id, max_viewer=max_viewer, timestamp=timestamp, user_id=user_id)
url = 'http://api.live.polyv.net/v1/restrict/{channel_id}/update?appId={app_id}×tamp={timestamp}&userId={user_id}&maxViewer={max_viewer}&sign={sign}'.format(channel_id=channel_id, app_id=app_id, timestamp=timestamp, user_id=user_id, max_viewer=max_viewer, sign=make_sign(str1))
response = requests.post(url)
if response.json()['status'] != 'success':
raise RequestException(response.text)
return response.json()['status'] == 'success'
# 禁播直播频道
def cutoff_channel(channel_id=None):
"""
:param channel_id: 在线直播系统登陆的频道ID.
:return: 截断成功返回True, 否则返回False.
"""
if not channel_id:
raise MissingParameterException("missing channel_id parameter.")
url = "http://api.live.polyv.net/v1/stream/{channel_id}/cutoff".format(channel_id=channel_id)
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(time.time())
user_id = USER_ID
str1 = "{app_secret}appId{app_id}timestamp{timestamp}userId{user_id}{app_secret}".format(app_secret=app_secret, app_id=app_id, timestamp=timestamp, user_id=user_id)
data = dict()
data.update({
'appId': app_id,
'timestamp': timestamp,
'userId': user_id,
'sign': make_sign(str1)
})
response = requests.post(url, data=data)
return response.json()['status'] == 'success'
# 获取直播录制文件
def get_record_files(channel_id=None, start_date=None, end_date=None):
"""
:param channel_id: 在线直播系统登陆的频道ID.
:param start_date: 查询起始时间.
:param end_date: 查询终止时间.
:return:
"""
if not channel_id:
raise MissingParameterException("missing channel_id parameter.")
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(time.time())
user_id = USER_ID
if not start_date:
start_date = datetime.datetime.today().strftime('%Y-%m-%d')
if not end_date:
end_date = datetime.datetime.today().strftime('%Y-%m-%d')
str1 = "{app_secret}appId{app_id}endDate{end_date}startDate{start_date}timestamp{timestamp}userId{user_id}{app_secret}".format(app_secret=app_secret, app_id=app_id, start_date=start_date, end_date=end_date, timestamp=timestamp, user_id=user_id)
url = "http://api.live.polyv.net/v1/channels/{channel_id}/recordFiles?appId={app_id}&endDate={end_date}&startDate={start_date}×tamp={timestamp}&userId={user_id}&sign={sign}".format(channel_id=channel_id, app_id=app_id, start_date=start_date, end_date=end_date, timestamp=timestamp, user_id=user_id, sign=make_sign(str1))
response = requests.get(url)
if response.json()['status'] != 'success':
raise RequestException(response.text)
return response.json()['result']
# 查询频道号列表
def get_channels_list():
"""
:return: list形式的频道号.
"""
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(time.time())
user_id = USER_ID
str1 = "{app_secret}appId{app_id}timestamp{timestamp}{app_secret}".format(app_secret=app_secret, app_id=app_id, timestamp=timestamp)
url = "http://api.live.polyv.net/v1/users/{user_id}/channels?appId={app_id}×tamp={timestamp}&sign={sign}".format(user_id=user_id, app_id=app_id, timestamp=timestamp, sign=make_sign(str1))
response = requests.get(url)
if response.json()['status'] != 'success':
raise RequestException(response.text)
return response.json()['result']
# 修改频道名称
def update_channel_name(channel_id=None, name=None):
"""
:param channel_id: 在线直播系统登陆的频道ID.
:param name: 要修改的频道名称
:return: 成功返回True, 否则返回False
"""
if not channel_id:
raise MissingParameterException("missing channel_id parameter.")
if not name:
raise MissingParameterException("missing name parameter.")
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(round(time.time() * 1000))
str1 = "{app_secret}appId{app_id}name{name}timestamp{timestamp}{app_secret}".format(app_secret=app_secret, app_id=app_id, name=name, timestamp=timestamp)
url = "http://api.live.polyv.net/v2/channels/{channel_id}/update?appId={app_id}×tamp={timestamp}&name={name}&sign={sign}".format(channel_id=channel_id, app_id=app_id, timestamp=timestamp, name=name, sign=make_sign(str1))
response = requests.post(url)
return response.json()['status'] == 'success'
# 修改频道密码
def update_channel_password(channel_id=None, password=None):
"""
:param channel_id: 在线直播系统登陆的频道ID.
:param password: 需要修改的密码.
:return: 如果修改成功返回True, 否则返回False.
"""
if not channel_id:
raise MissingParameterException("missing channel_id parameter.")
if not password:
raise MissingParameterException("missing password parameter.")
app_id = APP_ID
app_secret = APP_SECRET
timestamp = int(round(time.time() * 1000))
user_id = USER_ID
str1 = "{app_secret}appId{app_id}channelId{channel_id}passwd{password}timestamp{timestamp}{app_secret}".format(app_secret=app_secret, app_id=app_id, password=password, channel_id=channel_id, timestamp=timestamp)
url = "http://api.live.polyv.net/v2/channels/{user_id}/passwdSetting?appId={app_id}×tamp={timestamp}&channelId={channel_id}&passwd={password}&sign={sign}".format(user_id=user_id, app_id=app_id, timestamp=timestamp, channel_id=channel_id, password=password, sign=make_sign(str1))
response = requests.post(url)
return response.json()['status'] == "success"
# 聊天室 watch token
def watch_token():
"""
http://dev.polyv.net/2016/09/gettoken/
"""
timestamp = int(time.time())
str1 = '{timestamp}polyvsign'.format(timestamp=timestamp)
url = "http://api.live.polyv.net/watchtoken/gettoken"
params = {
'ts': timestamp,
'sign': make_sign(str1).lower()
}
return requests.get(url, params=params).text.strip()
if __name__ == "__main__":
print watch_token()
|
import sys
def usage():
print("Usage: python operations.py <number1> <number2>")
print("Example:")
print("\tpython operations.py 10 3")
quit()
if (len(sys.argv) < 3):
usage()
elif (len(sys.argv) > 3):
print("InputError: too many arguments\n")
usage()
else:
try:
nb1 = int(sys.argv[1])
nb2 = int(sys.argv[2])
except:
print("InputError: only numbers\n")
usage()
print("Sum:\t\t{}".format(nb1 + nb2))
print("Difference:\t{}".format(nb1 - nb2))
print("Product:\t{}".format(nb1 * nb2))
try:
print("Quotient:\t{}".format(float(nb1) / float(nb2)))
print("Remainder:\t{}".format(nb1 % nb2))
except ZeroDivisionError:
print("Quotient:\tERROR (div by zero)")
print("Remainder:\tERROR (modulo by zero)")
|
import random
import string
from faker import Faker
def get_random_low_string(ln: int = 16, with_digits: bool = False):
if with_digits:
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(ln))
else:
return ''.join(random.choice(string.ascii_lowercase) for _ in range(ln))
def get_random_int(a: int = 1, b: int = 2):
return random.randint(a, b)
def get_random_float(a: int = 1, b: int = 2):
return round(random.uniform(a, b), 2)
def get_random_phone_number():
return random.randint(1000000000, 9999999999)
def get_random_stock_name():
return f'Stock-{get_random_low_string(5, with_digits=True)}'
def get_random_login():
return f'Test-{Faker().user_name()}'
def get_random_branch_name():
return f'Location-{get_random_low_string(7, with_digits=True)}'
def get_random_item_name():
return f'Item-{get_random_low_string(8)}'
def get_random_spare_part():
return f'Spare-{get_random_low_string(8)}'
def get_random_service_name():
return f'Service-{get_random_low_string(8)}'
def get_random_cashbox_name():
return f'box_{Faker().user_name()}'
def get_random_address():
return Faker('ru_RU').street_address()
def get_random_client_name():
sex = get_random_int(0, 1)
fake = Faker("ru_RU")
if sex:
name = f'{fake.first_name_male()} {fake.last_name_male()}'
else:
name = f'{fake.first_name_female()} {fake.last_name_female()}'
return name
def get_random_orders_type_name():
return Faker('ru_RU').word()
def get_random_goods_type():
return f'Новый {Faker("ru_RU").word()}'
def get_random_first_name():
return Faker().first_name()
def get_random_last_name():
return Faker().last_name()
def get_random_email():
fake = Faker()
first_n = fake.random.choice([fake.first_name_male(), fake.first_name_female()])
last_n = fake.random.choice([fake.last_name_male(), fake.last_name_female()])
random_letter = random.choice(string.ascii_letters).lower()
username = "{0}{1}{2}@mail.com".format(first_n, random_letter, last_n).lower()
return username
def get_random_serial_numbers(sn_cnt=5):
data = {
'serials': []
}
for _ in range(sn_cnt):
data['serials'].append(get_random_low_string(4, True))
return data
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
def echo(sock):
try:
while True:
data = sock.recv(1024) # 受信できるまでブロック
if not data:
break
sock.sendall(data) # 送信できるまでブロック
finally:
sock.close()
def serve(addr):
sock = socket.socket()
sock.bind(addr); sock.listen(50)
while True:
conn, _ = sock.accept() #
echo(conn) #
if __name__ == '__main__':
serve(('0.0.0.0', 4000))
|
import pydicom
import cv2
import os
from tqdm import tqdm
path = '/mnt/data/rsna-pneumonia-detection-challenge/stage_2_train_images/'
out_path = '/mnt/data/rsna-pneumonia-detection-challenge/stage_2_train_images_jpg/'
path_list = os.listdir(path)
for pa in tqdm(path_list):
ds = pydicom.read_file(path+pa) #读取.dcm文件
img = ds.pixel_array # 提取图像信息
if not os.path.exists(out_path):
os.makedirs(out_path)
cv2.imwrite(out_path+pa.split('.')[0]+'.jpg',img)
|
"""
This will be the server code.
"""
|
import util
def ac_dist(instance,kNN,k):
acdist=0
for i in range(len(kNN)):
acdist+=util.distance_euclidean(instance,kNN[i])*(k+1-i)
acdist=(k*k+k)/acdist*2
return acdist
def outlier_factors(instances,k):
"""Compute the factors for each instance in instances.
Return: factors
"""
factors=[]
for instance in instances:
(k_distance,kNN)=util.k_nearest_neighbors(instances,instance,k)
cof=ac_dist(instance,kNN,k)
factors.append(cof)
return factors
def outlier_factors_withkNNdic(instances,kNNdict,k):
"""Compute the factors for each instance in instances.
Return: factors
"""
factors=[]
for i in range(len(instances)):
instance=instances[i]
kNNNos=kNNdict[i]
kNN=[]
for j in kNNNos:
kNN.append(instances[j])
cof=ac_dist(instance,kNN,k)
factors.append(cof)
return factors |
# -*- coding: future_fstrings -*-
# Copyright 2018 Brandon Shelley. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Language detection for subtitle files.
This module is used to determine the language of subtitle files based on
name or content, by loading a ISO-639-1 language.json map file.
Subtitle: the main class exported by this module.
Sample subtitle filenames:
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.bulgarian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.croatian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.czech.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.danish.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.dutch.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.english-forced.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.english-sdh.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.english.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.estonian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.finnish.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.french.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.german.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.hungarian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.icelandic.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.italian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.latvian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.lithuanian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.norwegian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.portuguese-br.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.portuguese-pt.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.russian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.serbian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.slovenian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.spanish-cas.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.spanish-lat.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.swedish.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.ukrainian.srt
"""
from __future__ import unicode_literals, print_function
from builtins import *
import re
import os
from fylmlib.languages import languages
class Subtitle:
"""A subtitle object that contains information about its language.
Attributes:
path: Subtitle path.
"""
def __init__(self, path):
# Path to original subtitle file.
self.path = path
# The 2-character language identifier code of the subtitle.
self.code = None
# The full-length language of the subtitle.
self.language = None
# The language string captured from the original filename, e.g. 'english' or 'en'.
self.captured = None
# First we loop through languages to determine if the path contains
# a descriptive language string, e.g. 'english', 'dutch', or 'fr'
for lang in languages:
patterns = []
# Compile patterns that matches language strings and codes, case insensitive.
for n in list(filter(None, lang.names)):
patterns.append(re.compile(r'\.(?P<lang>' + re.escape(n).lower() + r'(?:-\w+)?\b)', re.I))
patterns.append(re.compile(r'\.(?P<lang>' + re.escape(lang.code) + r'(?:-\w+)?\b)', re.I))
# Iterate the array of patterns that we want to check for.
for p in patterns:
# Search for rx match.
match = re.search(p, path)
if match is not None and match.group('lang') is not None:
# If a match exists, convert it to lowercase and save the entire
# captured string.
self.captured = match.group('lang')[:1].upper() + match.group('lang')[1:]
# If we find a match, set the values of the subtitle, and break.
self.code = lang.code
self.language = lang.primary_name
break
# Break from parent if captured is set.
if self.captured is not None:
break
def insert_lang(self, path):
"""Returns a new path that includes the captured language string.
Args:
path: (str, utf-8) Path to file to append language.
Returns:
A new path with the subtitle language included in the path.
"""
filename, ext = os.path.splitext(path)
# if self.language is None:
return f'{filename}.{self.captured}{ext}' if self.captured else None |
from multiprocessing import Process,Queue
import os,time,random
def write(q):
#写数据进程
print('写进程的PID: {0}'.format(os.getpid()))
for value in ['两点水','三点水','四点水']:
print('写进Queue 的值为: {0}'.format(value))
q.put(value)
time.sleep(random.random())
def read(q):
#读取数据进程
print('读进程的PID: {0}'.format(os.getpid()))
while True:
value = q.get(True)
print('从Queue读取的值为: {0}'.format(value))
if __name__ == '__main__':
#父进程创建Queue,并传给各个子进程
q = Queue()
pw = Process(target=write,args=(q,))
pr = Process(target=read,args=(q,))
#启动子进程pw
pw.start()
#启动子进程pr
pr.start()
#等待pw结束:
pw.join()
##pr进程里是死循环,无法等待其结束,只能强行终止
pr.terminate()
|
# 숫자 0 혹은 알파벳 b 여러 개가 알파벳 a 뒤에오는 문자열을 찾는 파이썬 프로그램을 만들어라
import re
p = re.compile('(?=[.])0+')
m = p.search("123.08.004.69")
#m = p.sub("", "123.08.004.69")
print(m)
|
from kaa.reach import ReachSet
from kaa.flowpipe import FlowPipePlotter
from models.basic.basic import Basic
def test_phase_basic():
basic_mod = Basic()
basic_reach = ReachSet(basic_mod)
flowpipe = basic_reach.computeReachSet(100)
FlowPipePlotter(flowpipe).plot2DPhase(0,1)
|
from org.apache.commons.io import IOUtils
from java.nio.charset import StandardCharsets
from org.apache.nifi.processor.io import OutputStreamCallback
# Define a subclass of OutputStreamCallback for use in session.write()
class PyOutputStreamCallback(OutputStreamCallback):
def __init__(self):
pass
def process(self, outputStream):
outputStream.write(bytearray('Hello World!'.encode('utf-8')))
# end class
flowFile = session.get()
if(flowFile != None):
flowFile = session.write(flowFile, PyOutputStreamCallback())
# implicit return at the end |
from django.db import models
# Create your models here.
class Upload(models.Model):
audio = models.FileField(upload_to='audio')
video = models.FileField(upload_to='video')
def __str__(self):
return str(self.pk)
|
#!/usr/bin/env python
# Contributed by Bryan Halfpap <Bryanhalf@gmail.com>, Copyright 2015
#TODO: Reduce the usage of globals
import sys
import argparse
import threading
import logging
from time import sleep
from killerbee import *
def create_beacon(panid, coordinator, epanid):
'''Raw creation of beacon packet.'''
#print hex(panid),
beacon = [
# FRAME CONTROL
"\x00", # FCF for beacon 0x00
"\x80", # src addressing mode: short/16-bit 0x0002 (this is the only FCF flag marked)
"\xd8", # Sequence number for 802.15.4 layer
str(struct.pack("H", panid)), # 2-byte shortcode of panid
#str(struct.pack("H", coordinator)[0]), # only some implementations make it 0000 "\x00\x00", # Source address 0
"\x00\x00",
# SUPERFRAME
"\xff\xcf", # beacon interval, superframe interval, final cap slot, jbattery extension, pan coordinator (true!), association permit (true)
# GTS
"\x00",
#Pending addresses
"\x00",
# bullshit zigbee layer packet
"\x00\x22\x8c",
# Extended PAN ID (Zigbee NWK layer)
str(struct.pack("L", epanid)),
"\xff\xff\xff\x00"
]
return ''.join(beacon)
# Thread 1: Sniffs and extracts Source PAN ID
def get_spanids():
spanid = threading.spanid
canidate = 0
restart_threshold = 0
while True:
try:
recvpkt = listen.pnext()
except:
print "Warning: Issue recieving packet."
pass #TODO: Should this be continue instead?
# Check for empty packet (timeout) and valid FCS
if recvpkt != None and recvpkt['validcrc']:
restart_threshold = 0
try:
canidate = struct.unpack('H', recvpkt['bytes'][3:5])[0]
except:
print recvpkt['bytes'][3:5]
pass
if canidate != None and canidate != spanid:
# OK we got a packet, lets go back up and send out a beacon$
print "Got beacon from {0}".format(hex(spanid))
spanid = canidate
# Now start sending beacons on the gathered Source PAN ID by informing the other thread:
threading.spanid = spanid
# BUG: There's an instance where the Killerbee Framework reads a packet generated by
# the smartthings motion sensor and *crashes* - we handle that here by assuming
# that if we didn't get a packet that the interface is busted and we should restart it.
# TODO: Isolate the issue and file a bug.
restart_threshold += 1
if restart_threshold >= 15:
print "Crash is assumed - restarting sniffer interface.",
listen.sniffer_off()
listen.sniffer_on()
restart_threshold = 0
# Thread 2: Injects beacons
def inject():
while True:
sp = create_beacon(threading.spanid, args.coordinator, args.epanid)
#TODO: Is there any need for: args.devleave, args.coordinator, args.device)
kb.inject(sp) #create_beacon returns a string
pass
if __name__ == '__main__':
tohex = lambda s: int(s.replace(':', ''), 16)
# Command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--channel', '-c', action='store', dest='channel', required=True, type=int, default=11)
parser.add_argument('-i', '--interface', action='store', dest='devstring')
parser.add_argument('-l', '--listen', action='store', dest='listeninterface')
parser.add_argument('-p', '--panid', action='store', required=True, type=tohex)
parser.add_argument('-e', '--epanid', help="Extended PAN ID", action='store', required=True, type=tohex)
parser.add_argument('-s', '--coordinator', action='store', required=True, type=tohex)
#parser.add_argument('--numloops', action='store', default=1, type=int)
args = parser.parse_args()
# Can't get the device to like context switching between listen and inject, so we have to have a workaround.......
kb = KillerBee(device=args.devstring)
listen = KillerBee(device=args.listeninterface)
kb.set_channel(args.channel)
listen.set_channel(args.channel)
listen.sniffer_on()
coordinator = struct.pack('>H', args.coordinator)
# I'm committing a sin by using the threading module to hold a shared state variable
threading.spanid = args.panid
getem1 = threading.Thread(target=get_spanids)
getem2 = threading.Thread(target=inject)
getem2.start()
getem1.start()
getem1.join()
|
# Generated by Django 3.1 on 2020-11-21 00:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hello_world', '0002_auto_20201115_2120'),
]
operations = [
migrations.CreateModel(
name='SplendorGame',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='SplendorGameState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('game_turn', models.CharField(max_length=30)),
('t1_offer_string', models.CharField(max_length=100)),
('t2_offer_string', models.CharField(max_length=100)),
('t3_offer_string', models.CharField(max_length=100)),
('nobles_offer', models.CharField(max_length=100)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_world.splendorgame')),
],
),
migrations.CreateModel(
name='SplendorPlayerState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(max_length=30)),
('chip_count_string', models.CharField(max_length=6)),
('nobles_string', models.CharField(max_length=50)),
('reserve_cards_string', models.CharField(max_length=100)),
('played_cards_string', models.CharField(max_length=100)),
('state', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_world.splendorgamestate')),
],
),
]
|
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
const1 = tf.constant([[2, 2]])
const2 = tf.constant([[4], [4]])
# 矩阵乘法运算matrix mul tf.add()
multiple = tf.matmul(const1, const2)
print(multiple)
sess = tf.Session()
result = sess.run(multiple)
print(result)
if const1.graph is tf.get_default_graph():
print("const1所在的图(Graph)是当前上下文默认的图")
sess.close()
with tf.Session() as sess:
result2 = sess.run(multiple)
print("Multiple的结果是 %s " % result2) |
# Find the latest inspection date for the most sanitary restaurants. Assume the highest number of points is the most sanitary.
# Only businesses with 'restaurants' in the name should be considered in your analysis.
# Output the corresponding facility name, inspection score, latest inspection date, previous inspection date,
# and the difference between the latest and previous inspection dates. And order the records based on the latest inspection date in ascending order.
# Import your libraries
import pandas as pd
# Start writing code
df = los_angeles_restaurant_health_inspections
df = df[df.facility_name.str.contains('RESTAURANT')]
df = df[df['score']==df['score'].max()]
df = df[['facility_name', 'score', 'activity_date']]
df['rank'] = df.groupby('facility_name')['activity_date'].rank(ascending=False)
df1 = df[df['rank']==1]
df1 = df1.merge(df[df['rank']==2], on='facility_name', how='left')
df1 = df1[['facility_name','score_x','activity_date_x','activity_date_y']]
df1['day_difference'] = (df1['activity_date_x']-df1['activity_date_y']).dt.days
df1.sort_values('activity_date_x')
|
import os
import sys
sys.path.append(os.getenv('cf'))
from cartoforum_api.orm_classes import sess
from flask import session, render_template, request, jsonify
from flask_mail import Message
from cartoforum_api.orm_classes import Users, PasswordReset
import hashlib
import datetime
# @cfapp.route('/groupselect', methods=['POST'])
def group_select():
user = sess.query(Users).filter_by(userid=session['userid']).one()
username = user.username
return render_template('groupselect.html', username=username)
def create_account(**kwargs):
m = hashlib.sha256()
m.update(kwargs['password'].encode("utf-8"))
hashpass = m.hexdigest()
emailexists = sess.query(Users).filter_by(email=kwargs['email']).count()
if emailexists > 0 and kwargs['email'] is not None:
return False
newuser = Users(email=kwargs['email'], password=hashpass, username=kwargs['username'])
sess.add(newuser)
sess.flush()
sess.commit()
sess.refresh(newuser)
return newuser.userid
# @cfapp.route('/select_username', methods=['POST'])
def select_username():
username = request.form['username']
u = sess.query(Users).filter_by(userid=session['userid']).first()
u.username = username
sess.commit()
# return app.index()
# @cfapp.route('/logout', methods=['POST'])
def do_logout():
session['logged_in'] = False
session['userid'] = None
# return app.index()
# @cfapp.route('/_recover_password', methods=['POST'])
def recover_password():
email = request.form['email']
exists = sess.query(Users).filter_by(email=email).count()
if exists == 0:
return jsonify("Can't find that email address")
elif exists > 1:
return jsonify("Something terrible has happened")
else:
userid = sess.query(Users).filter_by(email=email).one().userid
now = datetime.datetime.utcnow()
m = hashlib.sha256()
for i in [str(userid), str(now), email]:
m.update(i.encode("utf-8"))
token = m.hexdigest()
newrequest = PasswordReset(userid=userid, token=token, date=now, used='f')
sess.add(newrequest)
sess.commit()
resetlink = "https://cartoforum.com/resetpassword?token={}".format(token)
msg = Message(
'Hello',
sender='Cartoforum',
recipients=[email])
msg.body = resetlink
# mail.send(msg)
return render_template('index.html', status='resetlinksent')
# @cfapp.route('/resetpassword', methods=['GET'])
def reset_password():
token = request.args.get('token')
userid = sess.query(PasswordReset).filter_by(token=token).filter_by(used='f').one().userid
if userid > 0:
return render_template('passwordreset.html', userid=userid)
|
'''
Recursividade
Quando uma função chama a si própria
'''
def pot(base, exp):
# caso base
if exp == 0:
return 1
return base * pot(base, exp-1)
print(pot(2, 10)) |
from django.contrib import admin
from django.contrib.auth.views import LoginView, LogoutView, PasswordChangeView
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from blog_django.views import HomeView
urlpatterns = [
path('admin/', admin.site.urls),
path('', HomeView.as_view(), name="home"),
path('accounts/', include('account.urls')), # incluir la url de la aplicacion
path('categoria/', include('category.urls')),
path('blog/', include('blog.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) #dectetar los archivos esticos
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)#arhivos media
|
# This file is only intended for development purposes
from kubeflow.kubeflow.ci import base_runner
base_runner.main(
component_name="notebook_servers.notebook_server_jupyter_scipy_tests",
workflow_name="nb-j-sp-tests")
|
#!/usr/bin/python
import yaml
#Print a dictionary with a nice format
def PrintDict(d,indent=0):
for k,v in d.items():
if type(v)==dict:
print ' '*indent,'[',k,']=...'
PrintDict(v,indent+1)
else:
print ' '*indent,'[',k,']=',v
if indent==0: print ''
#Insert a new dictionary to the base dictionary
def InsertDict(d_base, d_new):
for k_new,v_new in d_new.items():
if k_new in d_base and (type(v_new)==dict and type(d_base[k_new])==dict):
InsertDict(d_base[k_new], v_new)
else:
d_base[k_new]= v_new
attrib={}
attrib['b1']={}
attrib['b1']['g_width']= 0.1
attrib['b1']['p_set']= [[1,2,3], [2,3,4], [1,2,3], [2,3,4]]
attrib['b1']['r_dict']= {}
attrib['b1']['r_dict']['aaa']= 1.25
attrib['b1']['r_dict']['bbb']= 3.14
attrib['b1']['r_dict']['ddd']= [0,0,0]
attrib['b1']['models']=[{'kind':'Sphere','radius':3,'p':[0,0,0]},
{'kind':'Cylinder','radius':2,'p1':[0,0,0],'p2':[1,1,0]}]
attrib['b2']={}
attrib['b2']['g_width']= 0.5
attrib['b2']['p_set']= [[1,2,3]]
attrib['b2']['q_set']= [1,2,3]
attrib['b2']['r_dict']= {}
attrib['b2']['r_dict']['aaa']= 0.00
attrib['b2']['r_dict']['ccc']= 9.99
print 'attrib='
PrintDict(attrib)
yaml_b1_str= yaml.dump(attrib['b1'])
print 'yaml_b1_str='
print yaml_b1_str
yaml_b1_dat= yaml.load(yaml_b1_str)
print 'yaml_b1_dat='
print yaml_b1_dat,'\n'
#attrib['b2']= dict(attrib['b2'], **yaml_b1_dat)
InsertDict(attrib['b2'], yaml_b1_dat)
print 'attrib='
PrintDict(attrib)
yaml_str= yaml.dump(attrib)
print 'yaml_str='
print yaml_str
|
#!/usr/bin/env python2.7
import os
from sqlalchemy import *
from sqlalchemy.pool import NullPool
from flask import Flask, request, render_template, g, redirect, Response
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, template_folder=tmpl_dir)
DATABASEURI = "postgresql://sv2525:6185@35.231.44.137/proj1part2"
#
# This line creates a database engine that knows how to connect to the URI above.
#
engine = create_engine(DATABASEURI)
@app.before_request
def before_request():
"""
This function is run at the beginning of every web request
(every time you enter an address in the web browser).
We use it to setup a database connection that can be used throughout the request.
The variable g is globally accessible.
"""
try:
g.conn = engine.connect()
except:
print "uh oh, problem connecting to database"
import traceback; traceback.print_exc()
g.conn = None
@app.teardown_request
def teardown_request(exception):
"""
At the end of the web request, this makes sure to close the database connection.
If you don't, the database could run out of memory!
"""
try:
g.conn.close()
except Exception as e:
pass
@app.route('/')
def landing():
return render_template("landing.html")
#Page for Hospital Administrator
@app.route('/searchHA')
def hospiAdmin():
id = request.args['eid']
cursor = g.conn.execute("SELECT * FROM hospitaldept where employeeid = %s;", id)
results = []
results.append("Hospital ID, Department ID, Admin ID, Admin since:")
for result in cursor:
hID = result[0]
dID = result[1]
results.append(result)
cursor.close()
context = dict(data=results)
return render_template("searchHA.html", **context)
@app.route('/searchHA2')
def findDetails():
hid = request.args['hid']
rid = request.args['rid']
option = request.args['avail']
results = []
if option == 'check':
cursor = g.conn.execute("SELECT * FROM bloodcapacity WHERE hospitalID = %s;", hid)
results.append("Hospital ID, Blood type, Units of Blood:")
elif option == 'history':
cursor = g.conn.execute("SELECT * FROM transfusions WHERE hospitalID = %s AND recipientID = %s;", (hid, rid))
results.append("Transfusion ID, Request ID, Recipient ID, Hospital ID, Units of Blood, Date:")
elif option == 'available':
cursor = g.conn.execute("SELECT * FROM bloodcapacity WHERE hospitalID = %s AND bloodtype = (SELECT bloodType FROM recipients WHERE recipientID = %s)", (hid, rid))
results.append("Units of Blood:")
elif option == 'internal':
cursor = g.conn.execute("SELECT * FROM internalrequest WHERE hospitalID = %s", hid)
results.append("Request ID, Blood type, Units of Blood, Department ID, Hospital ID")
else:
cursor1 = g.conn.execute("SELECT * FROM transfers WHERE fromID = %s", hid)
results.append("Transfer ID, From ID, To ID, Blood Type, Units")
results.append("Transferred To: ")
for result in cursor1:
results.append(result)
cursor1.close()
cursor = g.conn.execute("SELECT * FROM transfers WHERE toID = %s", hid)
results.append("Received From: ")
for result in cursor:
results.append(result)
cursor.close()
context = dict(data=results)
return render_template("searchHA2.html", **context)
@app.route('/index')
def index():
print request.args
cursor = g.conn.execute("SELECT name FROM test")
names = []
for result in cursor:
names.append(result['name']) # can also be accessed using result[0]
cursor.close()
context = dict(data = names)
return render_template("index.html", **context)
@app.route('/another')
def another():
return render_template("another.html")
# Example of adding new data to the database
@app.route('/addDonor', methods=['POST'])
def add():
name = request.form['name']
id = request.form['did']
bloodtype = request.form['bloodtype']
address = request.form['address']
phone = request.form['phone']
userType = request.form['usertype']
if userType == 'donor':
g.conn.execute('INSERT INTO donor(name, donorID, bloodType, address, phone) VALUES (%s, %s, %s, %s, %s)', (name, id, bloodtype, address, phone))
else:
g.conn.execute('INSERT INTO recipients(name, recipientID, bloodType, address, phone) VALUES (%s, %s, %s, %s, %s)',
(name, id, bloodtype, address, phone))
return render_template("searchAdmin.html")
@app.route('/Users')
def user():
return render_template("searchUser.html")
@app.route('/searchUser')
def searchUser():
userType = request.args['usertype']
id = request.args['name']
if userType == 'donor':
cursor1 = g.conn.execute("SELECT * FROM donor WHERE donorID = %s;", id)
cursor2 = g.conn.execute("SELECT donationID, unitsDonated, institutionID, date FROM donor d1, donations d2 WHERE d1.donorID = d2.donorID AND d1.donorID = %s;", id)
else:
cursor1 = g.conn.execute("SELECT * FROM recipients WHERE recipientID = %s;", id)
cursor2 = g.conn.execute("SELECT transfusionID, unitsTransfused, hospitalID, date FROM recipients R, transfusions T WHERE R.recipientID = T.recipientID AND R.recipientID = %s;", id)
results = []
results.append("ID, Blood Type, Name, Address, Phone: ")
for result in cursor1:
newRow = []
for item in result:
newRow.append(item)
results.append(newRow) # can also be accessed using result[0]
cursor1.close()
results.append("Transaction ID, Units of Blood, Institution ID, Date: ")
for result in cursor2:
newRow = []
for item in result:
newRow.append(item)
results.append(newRow) # can also be accessed using result[0]
cursor2.close()
context = dict(data=results)
return render_template("searchUser.html", **context)
#Landing page for all administrators
@app.route('/searchAdmin')
def searchAdmin():
return render_template("searchAdmin.html")
if __name__ == "__main__":
import click
@click.command()
@click.option('--debug', is_flag=True)
@click.option('--threaded', is_flag=True)
@click.argument('HOST', default='0.0.0.0')
@click.argument('PORT', default=8111, type=int)
def run(debug, threaded, host, port):
HOST, PORT = host, port
print "running on %s:%d" % (HOST, PORT)
app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)
run()
|
class paper:
name = ""
issn = ""
searchWord = ""
dataSet = "GoolgeScholar"
searchConf = ""
year = ""
publication = ""
country = []
typeOfPaper = 0 # 0 paper, 1 app, 2 paper + app, 3 review
technology = ""
reviewTech = ""
comProtocol = ""
cooperative = ""
computing = 0 # 0 local , 1 cloud, 2 edge, 3 mixed
compatibility = ""
aplicationField = ""
specialization = ""
availableSofw = ""
resume = ""
utility = ""
def __init__ (self):
self.name = ""
self.issn = ""
self.searchWord = ""
self.dataSet = "GoolgeScholar"
self.searchConf = ""
self.year = ""
self.publication = ""
self.country = []
self.typeOfPaper = 0 # 0 paper, 1 app, 2 paper + app, 3 review
self.technology = ""
self.reviewTech = ""
self.comProtocol = ""
self.cooperative = ""
self.computing = 0 # 0 local , 1 cloud, 2 edge, 3 mixed
self.compatibility = ""
self.aplicationField = ""
self.specialization = ""
self.availableSofw = ""
self.resume = ""
self.utility = "Default constructor"
def __init__(self, data):
if (len(data) < 18):
print ( error + data[0])
else:
self.name = data[0]
self.issn =data [1]
self.searchWord = data[2]
self.dataSet = data[3]
self.searchConf = data[4]
self.year = data[5]
self.publication = data[6]
self.country =data[7].replace(" ","").split(',')
self.typeOfPaper = data[8]# 0 paper, 1 app, 2 paper + app, 3 review
self.technology = data[9]
self.reviewTech = data[10]
self.comProtocol = data[11]
self.cooperative = data[12]
self.computing = data[13] # 0 local , 1 cloud, 2 edge, 3 mixed
self.compatibility = data[14]
self.aplicationField = data[15]#.replace(" ","").split(',')
self.specialization = data[16]
self.availableSofw = data[17]
self.resume = data[18]
self.utility = data[19]
def print(self):
print(self.name,'/',self.searchWord,'/',self.dataSet,'/',self.searchConf,'/',self.year,'/',self.publication,'/',self.country,'/',self.typeOfPaper,'/',self.technology,'/',self.reviewTech,'/',self.comProtocol,'/',
self.cooperative,'/',self.computing,'/',self.compatibility,'/',self.aplicationField,'/',self.availableSofw,'/',self.resume,'/',self.utility)
|
__author__ = 'Yauhen_Mirotsin'
|
# coding: utf-8
from setuptools import setup, find_packages
setup(
name='expr-eval-2',
version='0.3a',
description='Python safe expression eval (experimental)',
long_description='Python safe expression eval (experimental)',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: MIT License',
'Programming Language :: Python :: 3.7',
'Topic :: Data Processing :: Data Eval :: Utility',
],
keywords='Python safe expression eval (experimental)',
url='',
author='dgr113',
author_email='dmitry-gr87@yandex.ru',
license='MIT',
packages=find_packages(),
install_requires=[
'more-itertools',
'jsonschema'
],
include_package_data=True,
zip_safe=False
)
|
"""
distutilazy.test
-----------------
command classes to help run tests
:license: MIT. For more details see LICENSE file or
https://opensource.org/licenses/MIT
"""
from __future__ import absolute_import
import os
from os.path import abspath, basename, dirname
import sys
import fnmatch
from importlib import import_module
import unittest
from distutils.core import Command
from types import ModuleType
__version__ = "0.4.0"
def test_suite_for_modules(modules):
suite = unittest.TestSuite()
test_loader = unittest.defaultTestLoader
for module in modules:
module_tests = test_loader.loadTestsFromModule(module)
suite.addTests(module_tests)
return suite
def find_source_filename(source_name, dir_path):
"""Find the filename matching the source/module name
in the specified path. For example searching for "queue"
might return "queue.py" or "queue.pyc"
"""
source_filenames = [
os.path.join(dir_path, source_name + ext) for
ext in (".py", "pyc")]
for source_filename in source_filenames:
if os.path.exists(source_filename):
return source_filename
return None
class RunTests(Command):
description = """Run test suite"""
user_options = [("root=", "r", "path to tests suite dir"),
("pattern=", "p", "test file name pattern"),
("verbosity=", "v", "verbosity level [1,2,3]"),
("files=", None,
"run specified test files (comma separated)"),
("except-import-errors", None,
"except import errors when trying to import test "
"modules. Note: might shadow import errors raised "
"by the actual modules being tested")]
def initialize_options(self):
self.root = os.path.join(os.getcwd(), 'tests')
self.pattern = "test*.py"
self.verbosity = 1
self.files = None
self.except_import_errors = False
def finalize_options(self):
if not os.path.exists(self.root):
raise IOError("Failed to access root path '{}'".format(self.root))
verbosity = min(int(self.verbosity), 3)
if verbosity < 1:
self.verbosity = 1
else:
self.verbosity = verbosity
if self.files:
self.files = map(lambda name: name.strip(), self.files.split(','))
self.except_import_errors = bool(self.except_import_errors)
def get_modules_from_files(self, files):
modules = []
for file_name in files:
directory = dirname(file_name)
module_name, _, extension = basename(file_name).rpartition('.')
if not module_name:
self.announce(
"failed to find module name from filename '{}'." +
"skipping this file".format(file_name))
continue
package_name = self._import_dir_as_package(directory)
if package_name:
module_name = '.' + module_name
elif directory not in sys.path:
sys.path.insert(0, directory)
self.announce(
"importing module '{}' from file '{}' ...".format(module_name,
file_name))
module = import_module(module_name, package=package_name)
modules.append(module)
return modules
def _import_dir_as_package(self, directory):
"""Tries to import the specified directory path as a package, if it
seems to be a package. Returns the package name (if import was
successful) or None if directory is not a valid package."""
directory_name = basename(directory)
abs_dir = abspath(directory)
package_name = None
if directory_name and find_source_filename('__init__', abs_dir) is not None:
parent_dir = dirname(abs_dir)
if parent_dir not in sys.path:
sys.path.insert(0, parent_dir)
try:
self.announce(
"importing '{}' as package ...".format(directory_name))
import_module(directory_name)
package_name = directory_name
except ImportError as err:
self.announce(
"failed to import '{}'. {}".format(directory_name, err))
if self.except_import_errors and directory_name in str(err):
package_name = None
else:
raise err
return package_name
def find_test_modules_from_package_path(self, package_path):
"""Import and return modules from package __all__,
if path is found to be a package.
"""
package_dir = dirname(package_path)
package_name = basename(package_path)
if package_dir:
sys.path.insert(0, package_dir)
self.announce(
"importing package '{}' ...".format(package_name)
)
package = import_module(package_name)
if package and hasattr(package, '__all__'):
modules = []
for module_name in package.__all__:
module = import_module('{}.{}'.format(
package_name, module_name))
if type(module) == ModuleType \
and module not in modules:
modules.append(module)
return modules
def find_test_modules_from_test_files(self, root, pattern):
"""Return list of test modules from the the files in the path
whose name match the pattern
"""
modules = []
abs_root = abspath(root)
for (dir_path, directories, file_names) in os.walk(abs_root):
package_name = self._import_dir_as_package(dir_path)
if not package_name and dir_path not in sys.path:
sys.path.insert(0, dir_path)
for filename in fnmatch.filter(file_names, pattern):
module_name, _, extension = basename(filename).rpartition('.')
if not module_name:
self.announce(
"failed to find module name from filename '{}'." +
"skipping this test".format(filename))
continue
module_name_to_import = '.' + module_name if package_name else module_name
self.announce(
"importing module '{}' from '{}' ...".format(
module_name_to_import, filename
)
)
try:
module = import_module(module_name_to_import, package_name)
if type(module) == ModuleType and module not in modules:
modules.append(module)
except ImportError as err:
self.announce(
"failed to import '{}' from '{}'. {}." +
"skipping this file!".format(module_name_to_import, filename, err)
)
if not self.except_import_errors or module_name not in str(err):
raise err
except (ValueError, SystemError) as err:
self.announce(
"failed to import '{}' from '{}'. {}." +
"skipping this file!".format(module_name, filename, err)
)
return modules
def get_test_runner(self):
return unittest.TextTestRunner(verbosity=self.verbosity)
def run(self):
if self.files:
modules = self.get_modules_from_files(self.files)
else:
self.announce("searching for test package modules ...")
modules = self.find_test_modules_from_package_path(self.root)
if not modules:
self.announce("searching for test files ...")
modules = self.find_test_modules_from_test_files(self.root,
self.pattern)
if not modules:
self.announce("found no test files")
return False
suite = test_suite_for_modules(modules)
runner = self.get_test_runner()
self.announce("running tests ...")
runner.run(suite)
run_tests = RunTests
|
from random import randint
n=randint(0,9)
i=0
while(i<5):
num=int(input("guess number"))
if num == n:
print(" correct",n)
break
else:
print("Wrong ")
print("Attempt remaining : ",4-i)
i=i+1
|
"""
Dynamixel Instructions
http://support.robotis.com/en/product/dynamixel/communication/dxl_instruction.htm
"""
import packet
def instructionPing( ser, id ):
""" Ping instruction """
p = makePacket(id, 0x01, [])
sendPacket(ser, p)
p = receivePacket(ser, id)
return
def instructionWriteData( ser, id, params ):
""" Write data instruction """
p = makePacket(id, 0x03, params)
sendPacket(ser, p)
p = receivePacket(ser, id)
return
def instructionRegWrite( ser, id, params ):
""" Write register instruction """
p = makePacket(id, 0x04, params)
sendPacket(ser, p)
p = receivePacket(ser, id)
return
def instructionAction( ser, id ):
""" Action instruction """
p = makePacket(id, 0x05, [])
sendPacket(ser, p)
p = receivePacket(ser, id)
return |
## TLS Motion Determination (TLSMD)
## Copyright 2002-2006 by TLSMD Development Group (see AUTHORS file)
## This code is part of the TLSMD distribution and governed by
## its license. Please see the LICENSE file that should have been
## included as part of this package.
import sys
console_output_enabled = True
def stdout(text):
if console_output_enabled:
sys.stdout.write(text)
sys.stdout.flush()
def stderr(text):
if console_output_enabled:
sys.stdout.write(text)
sys.stdout.flush()
def enable():
global console_output_enabled
console_output_enabled = True
def disable():
global console_output_enabled
console_output_enabled = False
def kvformat(key, value):
stdoutln(key.ljust(40, ".") + ": " + str(value))
def endln():
stdout("\n")
def stdoutln(line):
stdout(line + "\n")
def stderrln(line):
stderr(line + "\n")
|
from Npc import Npc
class Guarda(Npc):
#Utilizando () podemos indicar que uma classe herdará de outra
#O construtor deve passar os atributos da supeclasse
def __init__(self, nome, time):
forca = 100
municao = 20
#Agora precisamos do construtor da classe pai
super().__init__(nome, time, forca, municao)
|
from flask import render_template, redirect, url_for
from flask_login import login_required, current_user
import threading
from app import app, forms, user
from app import db_lock, db, login_manager
import os
from flask import request
import json
@app.route('/')
def index():
login_form = forms.LoginForm()
registration_form = forms.RegistrationForm()
registration_form.country.default = 'GB'
registration_form.process()
return render_template("index.html", loginform = login_form, regform = registration_form)
@app.route('/home')
@app.route('/home/<imgid>')
@login_required
def home(imgid = None):
if(imgid == None):
print(current_user.user_id)
return render_template('home.html')
else:
if(current_user.is_authenticated):
user_id = current_user.user_id
else:
return redirect('index')
db_lock.acquire()
data = db.get_current_slide(imgid, user_id)
db_lock.release()
if(data['success'] == False):
return render_template('home.html')
print("Got slide id, focusing on it")
return render_template('home.html', data=data)
@app.route('/viewimg/<imgid>')
@app.route('/viewimg/<imgid>/<annotation>')
@login_required
def viewimg(imgid=None, annotation = None):
annotation_form = forms.AnnotationForm()
if(imgid == None):
return redirect('home')
if(annotation == None):
show_annotation = -1
else:
show_annotation = annotation
if(current_user.is_authenticated):
user_id = current_user.user_id
db_lock.acquire()
temp_1 = db.get_slide_data_by_id(imgid, user_id) # Get location of file
data = temp_1[1]
slide_id = temp_1[-1]
anno = db.get_annotations(slide_id)
db_lock.release()
annotations = []
for v in anno:
temp = []
for a in v:
temp.append(a)
annotations.append(temp)
for a in annotations:
points = a[3]
p_array = []
for p in points.split("|"):
sub = []
temp = []
for c in p.split(","):
if(len(temp) < 2):
temp.append(float(c))
else:
sub.append(temp)
temp = [float(c)]
p_array.append(sub)
print(len(p_array))
a[3] = p_array
print(annotations)
is_uploader = False
if(temp_1[9] == user_id):
print("Is uploader")
is_uploader = True
dimensions = ""
with open(os.path.abspath('app/static/uploads/' + data + '/dimensions.json'), 'r') as f:
dimensions = f.readline()
#dimensions = json.dumps(dimensions)
print(dimensions)
db_lock.acquire()
db.record_slide_access(slide_id, current_user.user_id)
db_lock.release()
return render_template('viewimg.html', user_id=user_id, imgid=imgid, slide_id = slide_id, loc=str(url_for('static', filename="uploads") + "/" + data), dimensions=dimensions, form = annotation_form, anno=json.dumps(annotations), sa = show_annotation, is_uploader=is_uploader)
@app.route('/accountsettings')
@login_required
def account_settings():
return render_template('accountsettings.html')
@app.route('/uploadimage')
@login_required
def uploadimage():
u_form = forms.UploadForm()
u_form.u_file.default = 0
return render_template('uploadimage.html', form=u_form)
@app.route('/reviewimg/<imgid>')
@login_required
def reviewimg(imgid=None):
if (current_user.is_authenticated):
user_id = current_user.user_id
db_lock.acquire()
data = db.get_slide_data_by_id(imgid, user_id)
anno = db.get_annotations(data[-1])
db_lock.release()
print(anno)
return render_template('reviewimg.html', data=data, data_j=json.dumps(data), anno=anno, anno_j=json.dumps(anno))
@app.route('/testcanvas')
def testcanvas():
return render_template('testcanvas.html')
@app.route('/manage_permissions/<slideid>')
@login_required
def manage_permissions(slideid):
if(current_user.is_authenticated):
user_id = current_user.user_id
db_lock.acquire()
slide = db.get_slide_data_by_id(slideid, user_id)
permitted_users = db.get_permitted_users(slideid, user_id)
db_lock.release()
if(user_id != slide[-2]): # means only the uploader can access the permissions page
return redirect('home')
return render_template('manage_permissions.html', slide=slide, pu=permitted_users, pujs=json.dumps(permitted_users))
@app.route('/feedback')
@login_required
def feedback():
form = forms.FeedbackForm()
return render_template('feedback.html', feedback = form)
|
#!/usr/bin/env python3
# Copyright (c) 2016, Robert Escriva, Cornell University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Consus nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import stat
def lines():
for line in open('Makefile.am'):
yield 'file', line
x = line.split('+=')
if len(x) != 2: continue
g = x[1].strip()
if not g.endswith('.gremlin'): continue
if g.endswith('.valgrind.gremlin'): continue
if not os.path.exists(g): continue
v = g[:-len('.gremlin')] + '.valgrind.gremlin'
f = open(v, 'w')
f.write('#!/usr/bin/env gremlin\n')
f.write("env GREMLIN_PREFIX 'libtool --mode=execute valgrind --tool=memcheck --trace-children=yes --error-exitcode=127 --leak-check=full --gen-suppressions=all --suppressions=\"${CONSUS_SRCDIR}/consus.supp\"'\n")
f.write('include ' + os.path.basename(g) + '\n')
f.flush()
f.close()
os.chmod(v, stat.S_IRWXU)
yield 'auto', (x[0] + '+= ' + v + '\n')
prev = None
fout = open('Makefile.am.tmp', 'w')
for line in lines():
if prev is None or prev[0] != 'auto' or prev[1] != line[1]:
fout.write(line[1])
prev = line
fout.flush()
fout.close
os.rename('Makefile.am.tmp', 'Makefile.am')
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import sys
from twitter.common import app
from twitter.common.exceptions import BasicExceptionHandler
class AppExceptionHandler(app.Module):
"""
An application module that logs or scribes uncaught exceptions.
"""
def __init__(self):
app.Module.__init__(self, __name__, description="twitter.common.log handler.")
def setup_function(self):
self._builtin_hook = sys.excepthook
def forwarding_handler(*args, **kw):
BasicExceptionHandler.handle_error(*args, **kw)
self._builtin_hook(*args, **kw)
sys.excepthook = forwarding_handler
def teardown_function(self):
sys.excepthook = getattr(self, '_builtin_hook', sys.__excepthook__)
|
import os
import re
import tempfile
from django import forms
from django.conf import settings
from django.forms import ModelForm
import happyforms
import Image
from easy_thumbnails import processors
from statsd import statsd
from tower import ugettext as _, ugettext_lazy as _lazy
from phonebook.models import Invite
from groups.models import Group
from users.models import User, UserProfile
PAGINATION_LIMIT = 20
REGEX_NUMERIC = re.compile('\d+', re.IGNORECASE)
class SearchForm(happyforms.Form):
q = forms.CharField(widget=forms.HiddenInput, required=True)
limit = forms.CharField(widget=forms.HiddenInput, required=False)
nonvouched_only = forms.BooleanField(required=False)
def clean_limit(self):
"""Validate that this limit is numeric and greater than 1"""
limit = self.cleaned_data['limit']
if not limit:
limit = PAGINATION_LIMIT
elif not REGEX_NUMERIC.match(str(limit)) or int(limit) < 1:
limit = PAGINATION_LIMIT
return limit
class ProfileForm(forms.ModelForm):
first_name = forms.CharField(label=_lazy(u'First Name'), max_length=30,
required=False)
last_name = forms.CharField(label=_lazy(u'Last Name'), max_length=30,
required=True)
photo = forms.ImageField(label=_lazy(u'Profile Photo'), required=False)
photo_delete = forms.BooleanField(label=_lazy(u'Remove Profile Photo'),
required=False)
# Remote System Ids
# Tightly coupled with larper.UserSession.form_to_service_ids_attrs
groups = forms.CharField(label=_lazy(u'Groups'), required=False)
#: L10n: Street address; not entire address
street = forms.CharField(label=_lazy(u'Address'), required=False)
city = forms.CharField(label=_lazy(u'City'), required=False)
# TODO: Add validation of states/provinces/etc. for known/large countries.
province = forms.CharField(label=_lazy(u'Province/State'), required=False)
# TODO: Add list of countries.
country = forms.CharField(label=_lazy(u'Country'), required=False)
postal_code = forms.CharField(label=_lazy(u'Postal/Zip Code'),
required=False)
class Meta:
model = UserProfile
fields = ('ircname', 'website', 'bio')
widgets = {
'bio': forms.Textarea(),
}
def clean_photo(self):
"""Let's make sure things are right.
Cribbed from zamboni. Thanks Dave Dash!
TODO: this needs to go into celery
- File IT bug for celery
- Ensure files aren't InMemory files
- See zamboni.apps.users.forms
"""
photo = self.cleaned_data['photo']
if not photo:
return
if photo.content_type not in ('image/png', 'image/jpeg'):
raise forms.ValidationError(
_('Images must be either PNG or JPG.'))
if photo.size > settings.MAX_PHOTO_UPLOAD_SIZE:
raise forms.ValidationError(
_('Please use images smaller than %dMB.' %
(settings.MAX_PHOTO_UPLOAD_SIZE / 1024 / 1024 - 1)))
im = Image.open(photo)
# Resize large images
if any(d > 300 for d in im.size):
im = processors.scale_and_crop(im, (300, 300), crop=True)
fn = tempfile.mktemp(suffix='.jpg')
f = open(fn, 'w')
im.save(f, 'JPEG')
f.close()
photo.file = open(fn)
return photo
def clean_groups(self):
"""Groups are saved in lowercase because it's easy and consistent."""
if not re.match(r'^[a-zA-Z0-9 .:,-]*$', self.cleaned_data['groups']):
raise forms.ValidationError(_(u'Groups can only contain '
'alphanumeric characters, dashes, '
'spaces.'))
return [g.strip() for g in (self.cleaned_data['groups']
.lower().split(','))
if g and ',' not in g]
def save(self, request):
"""Save the data to profile."""
self._save_groups(request)
self._save_photos(request)
d = self.cleaned_data
user = request.user
user.first_name = d['first_name']
user.last_name = d['last_name']
super(ModelForm, self).save()
user.save()
def _save_groups(self, request):
"""Parse a string of (usually comma-demilited) groups and save them."""
profile = request.user.get_profile()
# Remove any non-system groups that weren't supplied in this list.
profile.groups.remove(*[g for g in profile.groups.all()
if g.name not in self.cleaned_data['groups']
and not g.system])
# Add/create the rest of the groups
groups_to_add = []
for g in self.cleaned_data['groups']:
(group, created) = Group.objects.get_or_create(name=g)
if not group.system:
groups_to_add.append(group)
profile.groups.add(*groups_to_add)
def _save_photos(self, request):
d = self.cleaned_data
profile = request.user.get_profile()
if d['photo_delete']:
profile.photo = False
try:
os.remove(profile.get_photo_file())
except OSError:
statsd.incr('errors.photo.deletion')
elif d['photo']:
profile.photo = True
with open(profile.get_photo_file(), 'w') as f:
f.write(d['photo'].file.read())
profile.save()
class VouchForm(happyforms.Form):
"""Vouching is captured via a user's id."""
vouchee = forms.IntegerField(widget=forms.HiddenInput)
class InviteForm(happyforms.ModelForm):
def clean_recipient(self):
recipient = self.cleaned_data['recipient']
if User.objects.filter(email=recipient).count() > 0:
raise forms.ValidationError(_(u'You cannot invite someone who has '
'already been vouched.'))
return recipient
def save(self, inviter):
invite = super(InviteForm, self).save(commit=False)
invite.inviter = inviter
invite.save()
return invite
class Meta:
model = Invite
exclude = ('redeemer', 'inviter')
|
from django.urls import path
from . import views
urlpatterns = [
path('all/', views.AllStudent.as_view()),
path('create/', views.AddStudent.as_view()),
path('<int:id>/', views.SpecificStudent.as_view()),
] |
import cv2
import numpy as np
pic=cv2.imread('image.jpg')
cols=pic.shape[1]
rows=pic.shape[0]
center=(cols/2,rows/2)
angle=90
M= cv2.getRotationMatrix2D(center, angle, 1)
rotate=cv2.warpAffine(pic, M, (cols, rows))
cv2.imshow('rotated', rotate)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
# OpenWeatherMap API Key
weather_api_key="6c7fb7b754ae7b8788818eddeb5836a4"
g_key="AIzaSyC9KIH5yzDkZGPgqsqiCKuhl8nIkADdpNM"
|
__version__ = "0.2."
import shutil
import os
import time
startTime = time.time()
# parameters
input_directory = '<input_directory>'
output_directory = '<input_directory>'
text_file = '<text_file>'
f = open(text_file,'r')
dst_folder = ""
instring = f.read()
quad_list =str(instring).split("\n")
for q in quad_list:
for f in os.listdir(input_directory):
if "*" in q:
dst_folder = os.path.join(output_directory,q.replace("*",""))
else:
src_file = os.path.join(input_directory,f)
dst_file = os.path.join(dst_folder,f)
quad_name = f.split(".",1)[0]
file_extension = f.split(".",1)[1]
if quad_name == q:
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
try:
print "Copying:", src_file
shutil.copy(src_file,dst_file)
print "Successfully copied:", src_file
except:
print "Error in copying:", src_file
endTime = time.time() # End timing
print '\nElapsed Time:', str("{0:.2f}".format(round(endTime - startTime,2))), 'seconds'
|
from Paragraphs.GridParagraph import GridParagraph
from Paragraphs.TextParagraph import TextParagraph
import pytest
@pytest.allure.feature('Paragraphs')
@pytest.allure.story('Grid paragraph')
@pytest.mark.usefixtures('init_page')
class TestGridParagraph:
@pytest.allure.title('VDM-936 Grid paragraph - creation')
def test_grid_creating(self):
self.node.fill_page_ct_mandatory()
self.node.add_paragraph('grid')
grid_paragraph = GridParagraph(self.driver)
grid_paragraph.fill_grid()
grid_paragraph.add_paragraph('text')
text_paragraph = TextParagraph(self.driver)
text_paragraph.fill_text_paragraph()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url != url
assert grid_paragraph.grid_data['grid_title'] in self.driver.page_source
assert grid_paragraph.get_grid_columns_class().is_displayed()
assert text_paragraph.text_test_data['description'] in self.driver.page_source
self.node.delete_node()
@pytest.allure.title('VDM-931 Grid paragraph - empty data')
def test_grid_empty_fields(self):
self.node.fill_page_ct_mandatory()
self.node.add_paragraph('grid')
grid_paragraph = GridParagraph(self.driver)
grid_paragraph.get_grid_title()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url == url
assert self.node.get_error_msg()
@pytest.allure.title('VDM-??? Grid paragraph - check fields existing')
def test_grid_fields_existing(self):
self.node.fill_page_ct_mandatory()
self.node.add_paragraph('grid')
grid_paragraph = GridParagraph(self.driver)
grid_paragraph.get_admin_title().is_displayed()
grid_paragraph.get_grid_title().is_displayed()
grid_paragraph.get_background().first_selected_option
grid_paragraph.get_grid_columns().first_selected_option
grid_paragraph.get_grid_dd().is_displayed()
@pytest.allure.title('VDM-938 Grid paragraph - check background changing')
def test_grid_background_changing(self):
self.node.fill_page_ct_mandatory()
self.node.add_paragraph('grid')
grid_paragraph = GridParagraph(self.driver)
grid_paragraph.fill_grid()
grid_paragraph.add_paragraph('text')
text_paragraph = TextParagraph(self.driver)
text_paragraph.fill_text_paragraph_description()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url != url
assert grid_paragraph.get_gray_background_theme().is_displayed()
self.node.delete_node()
|
#!/usr/bin/python
"""Compare failed tests in CTS/VTS test_result.xml.
Given two test_result.xml's (A and B), this script lists all failed tests in A,
and shows result of the same test in B.
"""
import argparse
import collections
import csv
import xml.etree.ElementTree as ET
PASS = 'pass'
FAIL = 'fail'
NO_DATA = 'no_data'
ATTRS_TO_SHOW = ['Result::Build.build_model',
'Result::Build.build_id',
'Result.suite_name',
'Result.suite_plan',
'Result.suite_build_number',
'Result.start_display',
'Result::Build.build_abis_32',
'Result::Build.build_abis_64',]
def parse_attrib_path(attrib_path):
first_dot = attrib_path.index('.')
tags = attrib_path[:first_dot].split('::')
attr_name = attrib_path[first_dot+1:]
return tags, attr_name
def get_test_info(root):
"""Get test info from test_result.xml."""
test_info = collections.OrderedDict()
for attrib_path in ATTRS_TO_SHOW:
tags, attr_name = parse_attrib_path(attrib_path)
node = root
while True:
tags = tags[1:]
if tags:
node = node.find(tags[0])
else:
break
test_info[attr_name] = node.attrib[attr_name]
return test_info
def print_test_infos(test_result_a, test_result_b):
"""Print test infomation of both results in table format."""
info_a = test_result_a['info']
info_b = test_result_b['info']
max_key_len = max([len(k) for k in info_a])
max_value_a_len = max([len(info_a[k]) for k in info_a])
max_value_b_len = max([len(info_b[k]) for k in info_b])
table_len = (max_key_len + 2 + max_value_a_len + 2 + max_value_b_len)
line_format = '{:{}} {:{}} {}'
print '=' * table_len
for key in info_a:
print line_format.format(key, max_key_len,
info_a[key], max_value_a_len,
info_b[key])
print '=' * table_len
print
def get_result(test_result, module_name, testcase_name, test_name):
"""Get result of specifc module, testcase and test name."""
modules = test_result['modules']
if module_name not in modules:
return NO_DATA
testcases = modules[module_name]
if testcase_name not in testcases:
return NO_DATA
tests = testcases[testcase_name]
if test_name not in tests:
return NO_DATA
return ', '.join([x + ': ' + y for x, y in tests[test_name].items()])
def read_test_result_xml(test_result_path):
"""Given the path to a test_result.xml, read that into a ordered dict."""
tree = ET.parse(test_result_path)
root = tree.getroot()
test_result = collections.OrderedDict()
test_result['info'] = get_test_info(root)
modules = collections.OrderedDict()
test_result['modules'] = modules
for module in root.iter('Module'):
abi = module.attrib['abi']
module_name = module.attrib['name']
if not module_name in modules:
modules[module_name] = collections.OrderedDict()
testcases = modules[module_name]
for testcase in module.iter('TestCase'):
testcase_name = testcase.attrib['name']
if not testcase_name in testcases:
testcases[testcase_name] = collections.OrderedDict()
tests = testcases[testcase_name]
for test in testcase.iter('Test'):
test_name = test.attrib['name']
if not test_name in tests:
tests[test_name] = collections.OrderedDict()
if abi in tests[test_name]:
print '[WARNING] duplicated test:', test_name
tests[test_name][abi] = test.attrib['result']
return test_result
def compare_failed_tests(test_result_a, test_result_b, csvfile):
"""Do the comparison.
Given two test result dicts (A and B), list all failed test in A and display
result of the same test in B.
Args:
test_result_a: the dict returned from read_test_result(test_result_a.xml)
test_result_b: the dict returned from read_test_result(test_result_b.xml)
csvfile: a opened file
Returns:
string: diff report, summary
"""
writer = csv.writer(csvfile)
writer.writerow(['module', 'testcase', 'test', 'result in B'])
summary = ''
modules = test_result_a['modules']
for module_name, testcases in modules.iteritems():
module_sub_summary = ''
for testcase_name, tests in testcases.iteritems():
testcase_sub_summary = ''
for test_name, result in tests.iteritems():
if FAIL in result.values():
result_b = get_result(
test_result_b, module_name, testcase_name, test_name)
testcase_sub_summary += ' ' + test_name + ': ' + result_b + '\n'
writer.writerow([module_name, testcase_name, test_name, result_b])
if testcase_sub_summary:
module_sub_summary = ' ' + testcase_name + '\n' + testcase_sub_summary
if module_sub_summary:
summary += module_name + '\n' + module_sub_summary + '\n'
return summary
def main():
parser = argparse.ArgumentParser()
parser.add_argument('test_result_a', help='path to first test_result.xml')
parser.add_argument('test_result_b', help='path to second test_result.xml')
parser.add_argument('--csv', default='diff.csv', help='path to csv output')
args = parser.parse_args()
test_result_a = read_test_result_xml(args.test_result_a)
test_result_b = read_test_result_xml(args.test_result_b)
print_test_infos(test_result_a, test_result_b)
with open(args.csv, 'w') as csvfile:
summary = compare_failed_tests(test_result_a, test_result_b, csvfile)
print summary
if __name__ == '__main__':
main()
|
from django.db import models
# Create your models here.
class Lesson(models.Model):
name = models.CharField(max_length=100)
time = models.DateTimeField(null=True, blank=True)
image = models.ImageField(upload_to='images/') |
# @Title: 平衡二叉树 (Balanced Binary Tree)
# @Author: 2464512446@qq.com
# @Date: 2020-10-11 18:37:56
# @Runtime: 44 ms
# @Memory: 18.1 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
def helper(root):
if not root:
return 0
left = helper(root.left)
if left == - 1:
return - 1
right = helper(root.right)
if right == - 1:
return - 1
return max(left,right) + 1 if abs(left-right) <= 1 else - 1
return helper(root) != -1
|
def main():
encontrado = False
cant_divisores = 0
contador = count(1)
while cant_divisores <= 500:
triangular = mf.triangular_nro(contador.next())
cant_divisores = len(mf.factores_de(triangular))
print triangular, cant_divisores
if __name__ == '__main__':
import mis_funciones as mf
from itertools import count
main()
|
#!/usr/bin/env python
# coding: utf-8
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import json
from time import sleep
options = Options()
options.headless = True
myname = "*"
mypass = "*"
driver_path = "~/Downloads/geckodriver"
login_page = "https://turkcealtyazi.org/login.php"
home_page = "https://turkcealtyazi.org/index.php"
voted_page = "https://turkcealtyazi.org/myvotes.php?p=1"
def login(driver):
username = myname
password = mypass
driver = driver
driver.get(login_page)
username_textbox = driver.find_element_by_css_selector(
"input.regclass:nth-child(3)"
)
username_textbox.send_keys(username)
password_textbox = driver.find_element_by_css_selector(
"input.regclass:nth-child(7)"
)
password_textbox.send_keys(password)
login_button = driver.find_element_by_css_selector(
".nblock > div:nth-child(2) > div:nth-child(8) > div:nth-child(2) > input:nth-child(2)"
)
login_button.click()
if driver.current_url == home_page:
print("Login Successful!")
def getContent(link, driver):
driver = driver
driver.get(link)
content = driver.page_source
return content
def getPoint(link, driver):
driver_point = driver
driver_point.get(link)
cnt = driver_point.page_source
point_sp = bs(cnt, "lxml")
point = point_sp.find("span", attrs={"class": "nPuanDel"}).find_next("span")
if point:
puan = float(point.text)
else:
puan = 0
return puan
def saveToJson(movieDict, name):
json_format = json.dumps(movieDict, indent=4)
# if we want to add an existing file we must write 'a' otherwise 'w'
with open(f"votedjsons/{name}.json", "w") as outfile:
json.dump(movieDict, outfile)
def extract_films(content, movieList, driver):
soup = bs(content, "lxml")
films = soup.find_all("td", attrs={"width": "25%"})
for film in films:
title = film.find_all("a")[-1].get("title")
link = film.find_all("a")[-1].get("href")
film_id = link.split("/")[-2]
print(title)
exact_link = f"https://turkcealtyazi.org{link}"
my_puan = getPoint(exact_link, driver)
sleep(5)
movieList.append(
{"Title": title, "Link": exact_link, "Id": film_id, "Puanım": my_puan}
)
def main():
driver = webdriver.Firefox(options=options, executable_path=driver_path)
login(driver)
total_pages = 16
movieDict = dict()
movieDict["Movies"] = list()
for page in range(1, total_pages + 1):
print(f"Now page: {page}")
link = f"https://turkcealtyazi.org/myvotes.php?p={page}"
content = getContent(link, driver)
extract_films(content, movieDict["Movies"], driver)
saveToJson(movieDict, f'Page{page}')
sleep(10)
saveToJson(movieDict, "MyVotesFinal")
s = bs(content, "lxml")
film_adet = s.find("td", attrs={"height": "30"}).text
print(film_adet)
driver.quit()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework.serializers import ModelSerializer
from .models import *
from datetime import datetime,date,time
class billgroupserializer(ModelSerializer):
class Meta:
model = BillGroup
fields = '__all__'
class billgenerateserializer(ModelSerializer):
class Meta:
model = Bill
fields = '__all__' |
'''
Eric Eckert
eric95
'''
import sys
from queue import PriorityQueue
# DO NOT CHANGE THIS SECTION
if sys.argv==[''] or len(sys.argv)<2:
import EightPuzzleWithHeuristics as Problem
heuristics = lambda s: Problem.HEURISTICS['h_manhattan'](s)
else:
import importlib
Problem = importlib.import_module(sys.argv[1])
heuristics = lambda s: Problem.HEURISTICS[sys.argv[2]](s)
print("\nWelcome to AStar")
COUNT = None
BACKLINKS = {}
# DO NOT CHANGE THIS SECTION
def runAStar():
#initial_state = Problem.CREATE_INITIAL_STATE(keyVal)
initial_state = Problem.CREATE_INITIAL_STATE()
print("Initial State:")
print(initial_state)
global COUNT, BACKLINKS
COUNT = 0
BACKLINKS = {}
path, name = AStar(initial_state)
print(str(COUNT)+" states examined.")
return path, name
def AStar(initial_state):
global COUNT, BACKLINKS
# priority queue with respective priority
# add any auxiliary data structures as needed
OPEN = PriorityQueue()
OPEN.put((heuristics(initial_state), initial_state))
OPENlist = [initial_state]
#OPEN.put(initial_state)
CLOSED = []
BACKLINKS[initial_state] = -1
prioritycount = 0
while not OPEN.empty():
S = OPEN.get()[1]
OPENlist.remove(S)
while S in CLOSED:
S = OPEN.get()[1]
CLOSED.append(S)
# DO NOT CHANGE THIS SECTION: begining
if Problem.GOAL_TEST(S):
print(Problem.GOAL_MESSAGE_FUNCTION(S))
path = backtrace(S)
return path, Problem.PROBLEM_NAME
# DO NOT CHANGE THIS SECTION: end
COUNT += 1
for op in Problem.OPERATORS:
prioritycount += 2
#print(prioritycount)
#print("Trying operator: "+op.name)
if op.precond(S):
new_state = op.state_transf(S)
if not (new_state in CLOSED) and not (new_state in OPENlist):
#print(heuristics(new_state) +prioritycount)
#print(new_state)
#print(OPEN.qsize())
OPEN.put((heuristics(new_state), new_state))
OPENlist.append(new_state)
BACKLINKS[new_state] = S
#OPEN.put(new_state)
# DO NOT CHANGE
def backtrace(S):
global BACKLINKS
path = []
while not S == -1:
path.append(S)
S = BACKLINKS[S]
path.reverse()
print("Solution path: ")
for s in path:
print(s)
print("\nPath length = "+str(len(path)-1))
return path
if __name__=='__main__':
path, name = runAStar()
|
'''
Check if Linked List is Palindrome or not
Given the head of a Singly LinkedList, write a method to check if the LinkedList is a palindrome or not.
Your algorithm should use constant space and the input LinkedList should be in the original form once the algorithm is finished. The algorithm should have O(N)O(N) time complexity where ‘N’ is the number of nodes in the LinkedList.
Example 1:
Input: 2 -> 4 -> 6 -> 4 -> 2 -> null
Output: true
Example 2:
Input: 2 -> 4 -> 6 -> 4 -> 2 -> 2 -> null
Output: false
'''
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def find_middle_node(head):
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
return slow
def reverse(node):
prev = None
curr = node
n = None
while curr:
n = curr.next
curr.next = prev
prev = curr
curr = n
return prev
def is_palindrome(head):
if not head:
return True
# fidn middle node
middle_node = find_middle_node(head)
# reverse from middle node
second_head = reverse(middle_node)
copy_second_head = second_head
# compare left-to-middle list to right-to-middle list
while head and second_head:
if head.value != second_head.value:
break
head = head.next
second_head = second_head.next
# reverse second half back to original state
reverse(copy_second_head)
if head or second_head:
return False
return True
def main():
head = Node(2)
head.next = Node(4)
head.next.next = Node(6)
head.next.next.next = Node(4)
head.next.next.next.next = Node(2)
print("Is palindrome: " + str(is_palindrome(head)))
head.next.next.next.next.next = Node(2)
print("Is palindrome: " + str(is_palindrome(head)))
if __name__ == "__main__":
main()
|
__author__ = "Dohoon Lee"
__copyright__ = "Copyright 2018, Dohoon Lee"
__email__ = "dohlee.bioinfo@gmail.com"
__license__ = "MIT"
from snakemake.shell import shell
# Extract log.
log = snakemake.log_fmt_shell(stdout=False, stderr=True)
def optionify_input(parameter, option):
"""Return optionified parameter."""
try:
param = str(snakemake.input[parameter])
if param:
return option + ' ' + str(snakemake.input[parameter])
else:
return ''
except AttributeError:
return ''
def optionify_params(parameter, option):
"""Return optionified parameter."""
try:
param = str(snakemake.params[parameter])
if param:
return option + ' ' + str(snakemake.params[parameter])
else:
return ''
except AttributeError:
return ''
# Extract required inputs.
reads = snakemake.input.reads
if isinstance(reads, str):
reads = [reads]
assert len(reads) in [1, 2], "Currently star/2-pass wrapper only supports single-sample analysis."
star_index = snakemake.input.star_index
# Extract required outputs.
output_sorted_bam = snakemake.output[0]
output_prefix = output_sorted_bam[:-10] # Strip tailing 'sorted.bam'.
# Extract parameters.
# Extract optional parameters.
extra = snakemake.params.get('extra', '')
# If gzipped reads are given, but user did not specify readFilesCommand option,
# kindly add the option.
read_files_command = '--readFilesCommand cat'
if reads[0].endswith('.gz') and '--readFilesCommand' not in extra:
read_files_command = '--readFilesCommand zcat'
sjdb_gtf_file = optionify_params('sjdb_gtf_file', '--sjdbGTFfile')
sjdb_overhang = optionify_params('sjdb_overhang', '--sjdbOverhang')
sjdb_gtf_chr_prefix = optionify_params('sjdb_gtf_chr_prefix', '--sjdbGTFchrPrefix')
sjdb_gtf_tag_exon_parent_transcript = optionify_params('sjdb_gtf_tag_exon_parent_transcript', '--sjdbGTFtagExonParentTranscript')
# rename {output_prefix}Aligned.sortedByCoord.out.bam into {output_prefix}.sorted.bam
rename_command = '&& mv {prefix}Aligned.sortedByCoord.out.bam {prefix}sorted.bam'.format(prefix=output_prefix)
# Execute shell command.
shell(
"("
"STAR "
"--runMode alignReads "
"--twopassMode Basic "
"--runThreadN {snakemake.threads} "
"--readFilesIn {reads} "
"--genomeDir {star_index} "
"--outFileNamePrefix {output_prefix} "
"--outSAMtype BAM SortedByCoordinate "
"{read_files_command} "
"{extra} "
"{sjdb_gtf_file} "
"{sjdb_overhang} "
"{sjdb_gtf_chr_prefix} "
"{sjdb_gtf_tag_exon_parent_transcript} "
"{rename_command} "
") "
"{log}"
)
|
class BadExecutableError(Exception):
def __init__(self, message = ''):
print 'Error: ' + message
class BadExtensionError(Exception):
def __init__(self, message = ''):
print 'Error: ' + message
class LFSError(Exception):
def __init__(self, message = ''):
print 'Error: ' + message
class ReleaseError(Exception):
def __init__(self, message = ''):
print 'Error: ' + message
|
import pytest
import numpy as np
# test the encode and decode funcs
black = np.zeros((100, 100, 3))
sum_black = np.sum(black)
white = np.ones((100, 100, 3))*255
sum_white = np.sum(white)
@pytest.mark.parametrize("img, expected", [
(black, sum_black),
(white, sum_white), ])
def test_encode_decode(img, expected):
from encode_decode import imgArray2str
from encode_decode import str2imgArray
# turn to string and back to img array
img_str = imgArray2str(img)
imgback = str2imgArray(img_str)
temp = imgback.copy()
temp[:, :, 0] = imgback[:, :, 2]
temp[:, :, 2] = imgback[:, :, 0]
imgback = temp
summa = np.sum(img)
assert summa == expected
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the queensAttack function below.
def queensAttack(n, k, rq, cq, obs):
ans=0
for i in range(rq,n+1):
if [i,cq] in obs:
break
ans+=1
for i in range(min(rq-n,cq-n)):
if [rq+i+1,cq+i+1] in obs:
break
ans+=1
for i in range(cq,n+1):
if [rq,i] in obs:
break
ans+=1
for i in range(min(abs(rq-1),abs(cq-n))):
if [rq-i-1,cq+i+1] in obs:
break
ans+=1
for i in range(rq,0,-1):
if [i,cq] in obs:
break
ans+=1
for i in range(min(abs(rq-1),abs(cq-1))):
if [rq-i-1,cq-i-1] in obs:
break
ans+=1
for i in range(cq,0,-1):
if [rq,i] in obs:
break
ans+=1
for i in range(min(abs(rq-1),abs(cq-1))):
if [rq+i+1,cq-i-1] in obs:
break
ans+=1
return ans
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
r_qC_q = input().split()
r_q = int(r_qC_q[0])
c_q = int(r_qC_q[1])
obstacles = []
for _ in range(k):
obstacles.append(list(map(int, input().rstrip().split())))
result = queensAttack(n, k, r_q, c_q, obstacles)
fptr.write(str(result) + '\n')
fptr.close()
|
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
def plot_graph(dataset_name='DEMOAPP/Data/CleanedData.csv'):
df = pd.read_csv(dataset_name)
#Category front graph
plt.figure(figsize=(30, 10))
fig=sns.set_style({'axes.spines.top':False,'axes.spines.right': False,'axes.spines.left': False,'axes.spines.bottom': False,'font.family': 'frutiger'})
fig = sns.countplot(x=df['Category'], palette="hls")
plt.tick_params(labelcolor='white')
plt.yticks(fontsize=15)
fig.set_xticklabels(fig.get_xticklabels(), rotation=90, fontsize=18)
fig.set_ylabel('Frequency', color='white', fontsize=35)
fig.grid(False)
plt.savefig('DEMOAPP/static/images/graphs/categoryHistogram.png', transparent=True, pad_inches=0, bbox_inches='tight')
#Android Version vs Rating Scatter
plt.figure(figsize=(12, 10))
scatterfig=sns.set()
scatterfig = sns.set_style(
{'axes.spines.top': False, 'axes.spines.right': False, 'axes.spines.left': False, 'axes.spines.bottom': False})
scatterfig = sns.scatterplot(x='Android Version', y='Rating',color = 'purple',data=df)
plt.tick_params(labelcolor='white',labelsize=15)
scatterfig.set_ylabel('Rating', color='white', fontsize=30)
scatterfig.set_xlabel('Android Version', color='white',fontsize=30)
scatterfig.grid(False)
plt.title('Android Version vs Rating',color='white',fontsize=35,pad=20)
plt.savefig('DEMOAPP/static/images/graphs/androidRatingScatter.png', transparent=True, pad_inches=0, bbox_inches= 'tight' )
#Ratings Histogram Graph
plt.figure(figsize=(12, 11))
g = sns.set_style(
{'axes.spines.left': False, 'axes.spines.right': False, 'axes.spines.top': False, 'axes.spines.bottom': False})
g = sns.kdeplot(df.Rating, color='#CCFF99', shade=True)
#g.set_xlabel('Ratings', color='white', fontsize=25, labelpad=20)
plt.title('Ratings Histogram', color='white', fontsize=45, pad=20)
plt.legend(fontsize=20)
g.set_xticklabels(labels=['0', '1', '2', '3', '4', '5'], color='white', fontsize=20)
g.set_yticklabels(labels=['0.0', '0.2', '0.4', '0.6', '0.8', '1.0', '1.2'], color='white', fontsize=15)
g.grid(False)
plt.savefig('DEMOAPP/static/images/graphs/ratingHistogram.png', transparent=True, pad_inches=0, bbox_inches='tight')
#PIECHART
plt.figure(figsize=(10,11))
my_circle=plt.Circle( (0,0), 0.7, color = '#051116')
ax=df['Content Rating'].value_counts().plot(kind='pie',fontsize=12,labels=None,wedgeprops={"edgecolor":(0.10,0.10,0.15),'linewidth': 1, 'linestyle': '-', 'antialiased': True},colors = ['yellowgreen',(0.68,0.31,1),'gold', 'lightskyblue', 'lightcoral'])
labels=['Everyone', 'Teen', 'Everyone 10', 'Mature', 'Adults only 18','Unrated']
plt.legend(labels, loc='best',fontsize = 'medium')
plt.axis('off')
plt.title('Content Rating',fontsize=40,color = 'white',pad=20)
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.savefig('DEMOAPP/static/images/graphs/contentRatingPie.png', transparent=True,pad_inches=0,bbox_inches='tight')
#Category vs Price
plt.figure(figsize=(13, 11))
ax = df.groupby('Category')['Price'].mean().nlargest(10).sort_values().plot('bar', width=0.6, color='#E179CC',
fontsize=17)
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
ax1 = plt.axes()
x_axis = ax1.xaxis
x_axis.set_label_text('foo')
x_axis.label.set_visible(False)
plt.title('Price vs Category', color='white', fontsize=50,pad=0)
ax.grid(False)
plt.savefig('DEMOAPP/static/images/graphs/categoryPriceBar.png', transparent=True,pad_inches=0,bbox_inches='tight')
#Genre vs Installs Line
plt.figure(figsize=(25, 7))
ax = sns.set_style(
{'axes.spines.left': False, 'axes.spines.right': False, 'axes.spines.top': False, 'axes.spines.bottom': False})
ax = df.groupby('Genres')['Installs'].sum().nlargest(10).sort_values().plot('line', color='gold', fontsize=20,
linewidth=5)
ax.set_xticklabels(ax.get_xticklabels(), fontsize=20, color='white')
plt.tick_params(labelcolor='white')
plt.title('Genres vs Installs', color='white', fontsize=35)
plt.xlabel('Genres', color='#051116')
plt.ylabel('Installs', color='white', fontsize=20)
ax.grid(False)
plt.savefig('DEMOAPP/static/images/graphs/genreInstallsLine.png', transparent=True, bbox_inches='tight')
#Android Version Histogram Density
plt.figure(figsize=(12, 10))
# value = df['Android Version'].value_counts()
line2 = sns.kdeplot(df['Android Version'],
color='#39EFFF',shade=True)
plt.legend(fontsize=15)
plt.tick_params(labelcolor='white', labelsize=15)
plt.xlabel('Android Version', color='white', fontsize=25)
plt.ylabel('Frequency', color='white', fontsize=20)
plt.title('Android Version Frequency', color='white', fontsize=35,pad=10)
plt.grid(False)
plt.savefig('DEMOAPP/static/images/graphs/androidHistogramLine.png', transparent=True, pad_inches=0, bbox_inches='tight')
|
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import rc, rcParams
import matplotlib.units as units
import matplotlib.ticker as ticker
#rc('text',usetex=True)
#rc('font',**{'family':'serif','serif':['Woods-Saxon potential']})
#font = {'family' : 'serif',
# 'color' : 'darkred',
# 'weight' : 'normal',
# 'size' : 16,
# }
v0 = 50
A = 100
a = 0.5
r0 = 1.25
R = r0*A**(0.3333)
x = np.linspace(0.0, 10.0)
y = -v0/(1+np.exp((x-R)/a))
plt.plot(x, y, 'b-')
plt.title(r'Woods-Saxon potential', fontsize=20)
plt.text(3, -40, r'Parameters: $A=20$, $V_0=50$ [MeV]')
plt.text(3, -44, r'$a=0.5$ [fm], $r_0=1.25$ [fm]')
plt.xlabel(r'$r$ [fm]',fontsize=20)
plt.ylabel(r'$V(r)$ [MeV]',fontsize=20)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.savefig('woodsaxon.pdf', format='pdf')
plt.show() |
import pytest
from src.app.domain import commands
from src.app.service import messagebus
from src.app.service.messagebus import NotEventOrCommandException
from src.app.service.unit_of_work import FakeWarehouseUnitOfWork
def test_create_new_warehouse():
uow = FakeWarehouseUnitOfWork()
messagebus.handle(commands.CreateWarehouse("Bodega-1"), uow)
assert len(uow.warehouses.get_all()) == 1
assert len(uow.logger.get_all()) == 1
assert uow.committed
def test_allocate_space_in_a_warehouse():
uow = FakeWarehouseUnitOfWork()
messagebus.handle(commands.CreateWarehouse("Bodega-1"), uow)
messagebus.handle(commands.AllocateSpace("Bodega-1", "Espacio-1", 80, 1), uow)
assert len(uow.warehouses.get("Bodega-1").list_allocated_spaces()) == 1
def test_allocate_product_in_a_warehouse_space():
uow = FakeWarehouseUnitOfWork()
messagebus.handle(commands.CreateWarehouse("Bodega-1"), uow)
messagebus.handle(commands.AllocateSpace("Bodega-1", "Espacio-1", 80, 1), uow)
messagebus.handle(commands.AllocateProduct("Espacio-1", "prod-001", "FASHION GLASSES", 0.5, 500, 3, "ol-001",
"Bodega-1"), uow)
assert len(uow.warehouses.get("Bodega-1").list_allocated_spaces()[0].list_prod()) == 1
def test_exception_in_allocate_product_in_a_wrong_warehouse_space():
uow = FakeWarehouseUnitOfWork()
messagebus.handle(commands.CreateWarehouse("Bodega-1"), uow)
messagebus.handle(commands.AllocateSpace("Bodega-1", "Espacio-1", 80, 1), uow)
messagebus.handle(commands.AllocateProduct("Espacio-1", "prod-001", "FASHION GLASSES", 0.5, 500, 3, "ol-001",
"Bodega-1"), uow)
assert len(uow.warehouses.get("Bodega-1").list_allocated_spaces()[0].list_prod()) == 1
def test_no_command_exception():
with pytest.raises(NotEventOrCommandException, match="not_command"):
messagebus.handle("not_command", FakeWarehouseUnitOfWork())
|
from .list import ListCreateSchema
from .task import TaskCreateSchema
|
#!/usr/bin/python
import sys
import builtins
import myModule1.mySubModule1
class myModule1Meta( type ):
def __getattr__( self, name ):
return getattr( sys.modules[ 'myModule1' ], name )
class mySubModule1Meta( type ):
def __getattr__( self, name ):
return getattr( sys.modules[ 'myModule1' ].mySubModule1, name )
class myModule1(metaclass=myModule1Meta):
class mySubModule1(metaclass=myModule1Meta.mySubModule1Meta):
class myClass( sys.modules[ 'myModule1' ].mySubModule1.myClass ):
def run( self ):
print( "shadowModule1::myModule1::mySubModule1::myClass run" )
def _shadowSetAttrMyModule1( shadowModule, originalModule, fromlist ):
if fromlist:
for fromItem in fromlist:
if fromItem not in dir( shadowModule ):
val = getattr( originalModule, fromItem )
setattr( shadowModule, fromItem, val )
_savedImport = builtins.__import__
def _shadowImport1( moduleName, globals={}, locals={}, fromlist=[], level=-1 ):
if moduleName == "myModule1" or moduleName == "myModule1.mySubModule1":
mod = _savedImport( "shadowModule1", globals, locals, [], level )
if moduleName == "myModule1":
_shadowSetAttrMyModule1( mod.myModule1, sys.modules[ "myModule1" ], fromlist )
return mod.myModule1
else:
_shadowSetAttrMyModule1( mod.myModule1.mySubModule1,
sys.modules[ "myModule1" ].mySubModule1,
fromlist )
if fromlist:
return mod.myModule1.mySubModule1
return mod.myModule1
return _savedImport( moduleName, globals, locals, fromlist, level )
builtins.__import__ = _shadowImport1
|
import morepath
import re
from onegov.core import utils
from onegov.core.crypto import random_password
from onegov.core.templates import render_template
from onegov.onboarding import _
from onegov.onboarding.errors import AlreadyExistsError
from onegov.onboarding.forms import FinishForm, TownForm
from onegov.onboarding.layout import MailLayout
from onegov.onboarding.models.assistant import Assistant
from onegov.town6.initial_content import create_new_organisation
from onegov.org.models import Organisation
from onegov.user import UserCollection
_valid_subdomain = re.compile(r'^[a-z0-9]+[a-z0-9-]+[a-z0-9]+$')
class TownAssistant(Assistant):
""" An assistant guiding a user through onegov.town6 onboarding. """
@Assistant.step(form=TownForm)
def first_step(self, request, form):
if form.submitted(request):
request.browser_session['name'] = form.data['name']
request.browser_session['user'] = form.data['user']
request.browser_session['user_name'] = form.data['user_name']
request.browser_session['phone_number'] = form.data['phone_number']
request.browser_session['color'] = form.data['color']
return morepath.redirect(request.link(self.for_next_step()))
form.name.data = request.browser_session.get('name', form.name.data)
form.user.data = request.browser_session.get('user', form.user.data)
form.color.data = request.browser_session.get('color', form.color.data)
form.user_name.data = request.browser_session.get(
"user_name", form.user_name.data
)
form.color.phone_number = request.browser_session.get(
"phone_number", form.phone_number.data
)
return {
'name': 'town-start',
'title': _("Online Counter for Towns Demo"),
'bullets': (
_("Start using the online counter for your town immediately."),
_("Setup takes less than one minute."),
_("Free with no commitment.")
),
}
@Assistant.step(form=FinishForm)
def last_step(self, request, form):
for key in ('name', 'user', 'color'):
if not request.browser_session.has(key):
return morepath.redirect(request.link(self.for_prev_step()))
name = request.browser_session['name']
user_name = request.browser_session['user_name']
phone_number = request.browser_session['phone_number']
user = request.browser_session['user']
color = request.browser_session['color']
if form.submitted(request):
try:
product = self.add_town(name, user, color, request)
error = None
except AlreadyExistsError:
product = None
error = _(
"This town exists already and can't be created. Is it "
"your town but you did not create it? Please contact us."
)
else:
self.app.send_zulip(
subject='OneGov Onboarding',
content='\n'.join((
f"A new OneGov Cloud instance was started by "
f"{user_name}:",
f"[{name}]({product['url']})",
f"Email: {user}",
f"Phone: {phone_number}"
))
)
finally:
del request.browser_session['name']
del request.browser_session['user']
del request.browser_session['color']
del request.browser_session['phone_number']
del request.browser_session['user_name']
if error:
return {
'name': 'town-error',
'title': _("Online Counter for Towns Demo"),
'error': error,
'form': None
}
else:
return {
'name': 'town-success',
'title': _("Online Counter for Towns Demo"),
'product': product,
'message': _("Success! Have a look at your new website!"),
'warning': _(
"Please write down your username and password "
"before you continue. "
),
'form': None
}
return {
'name': 'town-ready',
'title': _("Online Counter for Towns Demo"),
'message': _(
"We are ready to launch! Click continue once you're ready."
),
'preview': {
'name': name,
'user': user,
'domain': self.get_domain(name),
'color': color
},
'cancel': request.link(self.for_prev_step())
}
@property
def config(self):
return self.app.onboarding['onegov.town6']
def get_subdomain(self, name):
return utils.normalize_for_url(name)
def get_domain(self, name):
return '{}.{}'.format(self.get_subdomain(name), self.config['domain'])
def get_schema(self, name):
return '{}-{}'.format(
self.config['namespace'],
self.get_subdomain(name).replace('-', '_')
)
def add_town(self, name, user, color, request):
current_schema = self.app.session_manager.current_schema
password = random_password(16)
try:
schema = self.get_schema(name)
custom_config = self.config['configuration']
self.app.session_manager.set_current_schema(schema)
session = self.app.session_manager.session()
if session.query(Organisation).first():
raise AlreadyExistsError
with self.app.temporary_depot(schema, **custom_config):
create_new_organisation(self.app, name=name, reply_to=user)
org = session.query(Organisation).first()
org.theme_options['primary-color-ui'] = color
users = UserCollection(self.app.session_manager.session())
assert not users.query().first()
users.add(user, password, 'admin')
title = request.translate(_("Welcome to OneGov Cloud"))
welcome_mail = render_template('mail_welcome.pt', request, {
'url': 'https://{}'.format(self.get_domain(name)),
'mail': user,
'layout': MailLayout(self, request),
'title': title,
'org': name
})
self.app.es_perform_reindex()
self.app.send_transactional_email(
subject=title,
receivers=(user, ),
content=welcome_mail,
reply_to='onegov@seantis.ch'
)
finally:
self.app.session_manager.set_current_schema(current_schema)
return {
'info': [
(_("Username"), user),
(_("Password"), password),
],
'url': 'https://{}'.format(self.get_domain(name))
}
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
"""
给出两个 非空 的链表用来表示两个非负的整数。其中,它们各自的位数是按照 逆序 的方式存储的,并且它们的每个节点只能存储 一位 数字。
如果,我们将这两个数相加起来,则会返回一个新的链表来表示它们的和。
您可以假设除了数字 0 之外,这两个数都不会以 0 开头。
"""
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
res = ListNode()
head = res
jinwei = 0
while l1 or l2:
if l1 and l2:
res.next = ListNode((l1.val + l2.val + jinwei) % 10)
jinwei = (l1.val + l2.val + jinwei) // 10
res = res.next
l1 = l1.next
l2 = l2.next
elif l1:
res.next = ListNode((l1.val + jinwei) % 10)
jinwei = (l1.val + jinwei) // 10
res = res.next
l1 = l1.next
elif l2:
res.next = ListNode((l2.val + jinwei) % 10)
jinwei = (l2.val + jinwei) // 10
res = res.next
l2 = l2.next
if jinwei == 1:
res.next = ListNode((jinwei) % 10)
return head.next
|
import json
import email
from django import forms
from django.forms import widgets
from django.contrib.postgres import forms as pg_forms
from wapipelines.models import Pipeline, Step
class HTTPHeaderField(forms.Field):
def clean(self, data):
if data and ':' not in data:
raise forms.ValidationError(
'Headers must be in Key: Value format.')
message = email.message_from_string(data)
return dict([
(key, message.get(key))
for key in message.keys()])
class HTTPHeaderWidget(forms.Textarea):
def format_value(self, raw_value):
if not raw_value:
return ''
if isinstance(raw_value, basestring):
return raw_value
return '\n'.join([
'%s: %s' % (key, value)
for key, value in raw_value.items()
])
class CreatePipelineForm(forms.ModelForm):
name = forms.CharField(
widget=widgets.TextInput(attrs={'class': 'form-control'}))
endpoint = forms.URLField(
widget=widgets.TextInput(attrs={'class': 'form-control'}))
class Meta:
model = Pipeline
fields = (
'name',
'active',
'endpoint',
)
class CreateStepForm(forms.ModelForm):
name = forms.CharField(
widget=widgets.TextInput(attrs={'class': 'form-control'}))
url = forms.URLField(
widget=widgets.TextInput(attrs={'class': 'form-control'}),
help_text=('Specify a URL to request outputs from, leave blank if '
'this step is only going to be used for waiting for '
'outputs from preceding steps'),
required=False)
timeout = forms.CharField(
widget=widgets.Select(attrs={'class': 'form-control'},
choices=Step.TIMEOUT_CHOICES))
inputs = pg_forms.SimpleArrayField(
forms.CharField(),
widget=widgets.TextInput(attrs={'class': 'form-control'}),
required=False)
outputs = pg_forms.SimpleArrayField(
forms.CharField(),
widget=widgets.TextInput(attrs={'class': 'form-control'}),
required=False)
headers = HTTPHeaderField(
widget=HTTPHeaderWidget(attrs={'class': 'form-control'}),
required=False)
class Meta:
model = Step
fields = (
'name',
'url',
'timeout',
'inputs',
'outputs',
'headers',
)
class PipelineRunForm(forms.Form):
data = forms.CharField(
widget=forms.Textarea(attrs={'class': 'form-control'}),
required=True)
def clean_data(self):
raw_data = self.cleaned_data['data']
if not raw_data:
raise forms.ValidationError('A JSON value is required')
try:
return json.loads(raw_data)
except (ValueError,):
raise forms.ValidationError('Invalid JSON')
|
"""
@Author : Laura
@File : __init__.py.py
@Time : 2020/3/18 14:24
""" |
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
foreach ele in s:
dic adding
for each ele in t:
dic2 adding
return if same
"""
def helper(string):
dic = {}
for ele in string:
if ele in dic:
dic[ele] = dic[ele] + 1
else:
dic[ele] = 1
return dic
return helper(s) == helper(t)
# test
s = "qazwsxed1122crfvtgbyhnujm"
t = "qaz1ujmn1hyws2xtg2bedcfrv"
a = Solution()
print a.isAnagram(s,t) |
'''
Return the number (count) of vowels in the given string.
We will consider a, e, i, o, and u as vowels for this Kata.
The input string will only consist of lower case letters and/or spaces.
'''
def getCount(inputStr):
return sum(x in 'aeiou' for x in inputStr) |
import click
from mazel.workspace import Workspace
def current_workspace() -> Workspace:
"""Throw exception if not currently in a workspace"""
workspace = Workspace.find()
if workspace is None:
raise click.ClickException("Not in a workspace")
return workspace
|
Users = {"Tony": "luke", "Abrar": "Dheeru", "SreeKanth": "Sirisha"}
val1 = input("Enter User Name: ")
val2 = input("Enter Password: ")
class InvalidUser(Exception):
def __init__(self, msg="Invalid User"):
Exception.__init__(self, msg)
try:
for i in Users:
if val1[val2] == i:
raise InvalidUser
else:
print("Welcome")
except InvalidUser as e:
print(e)
|
"""
distutilazy.clean
-----------------
command classes to help clean temporary files
:license: MIT. For more details see LICENSE file or
https://opensource.org/licenses/MIT
"""
from __future__ import absolute_import
import os
from shutil import rmtree
from distutils import log
from distutils.core import Command
from distutils.command import clean
from . import util
__version__ = "0.4.0"
class BaseFileSystemCleanerCommand(Command):
@staticmethod
def default_extensions(cls):
return []
@staticmethod
def default_directories(cls):
return []
def initialize_options(self):
self.root = os.getcwd()
self.extensions = ','.join(self.default_extensions())
self.directories = ','.join(self.default_directories())
def finalize_options(self):
if not os.path.exists(self.root):
raise IOError("Failed to access root path '{}'".format(self.root))
self.extensions = [ext.strip() for ext in self.extensions.split(',')]
self.directories = [
dir_.strip() for dir_ in self.directories.split(',')]
def _find_files(self):
"""Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths
"""
files = []
for ext in self.extensions:
ext_files = util.find_files(self.root, "*" + ext)
log.debug("found {} '*{}' files in '{}'".format(
len(ext_files), ext, self.root)
)
files.extend(ext_files)
return files
def _find_directories(self):
directories = []
for dir_name in self.directories:
dirs = util.find_directories(self.root, dir_name)
log.debug("found {} directories in '{}'".format(
len(dirs), self.root))
directories.extend(dirs)
return directories
def _clean_file(self, filename):
"""Clean a file if exists and not in dry run"""
if not os.path.exists(filename):
return
self.announce("removing '{}'".format(filename))
if not self.dry_run:
os.remove(filename)
def _clean_directory(self, name):
"""Clean a directory if exists and not in dry run"""
if not os.path.exists(name):
return
self.announce(
"removing directory '{}' and all its contents".format(name)
)
if not self.dry_run:
rmtree(name, True)
class CleanPyc(BaseFileSystemCleanerCommand):
description = """Clean root dir from complied python files"""
user_options = [("root=", "r", "path to root dir")]
@staticmethod
def default_extensions():
return [".pyc", ".pyo", ".pyd"]
@staticmethod
def default_directories():
return ["__pycache__"]
def find_compiled_files(self):
"""Find compiled Python files recursively in the root path
:return: list of absolute file paths
"""
files = self._find_files()
self.announce(
"found '{}' compiled python files in '{}'".format(
len(files), self.root
)
)
return files
def find_cache_directories(self):
directories = self._find_directories()
self.announce(
"found {} python cache directories in '{}'".format(
len(directories), self.root
)
)
return directories
def run(self):
directories = self.find_cache_directories()
if directories:
self.announce(
"cleaning python cache directories in '{}' ...".format(
self.root))
if not self.dry_run:
for dir_name in directories:
self._clean_directory(dir_name)
files = self.find_compiled_files()
if files:
self.announce(
"cleaning compiled python files in '{}' ...".format(self.root))
if not self.dry_run:
for filename in files:
self._clean_file(filename)
class CleanJythonClass(BaseFileSystemCleanerCommand):
description = """Clean root dir from complied files created by Jython"""
user_options = [("root=", "r", "path to root dir")]
@staticmethod
def default_extensions():
return ["$py.class"]
@staticmethod
def default_directories():
return []
def find_class_files(self):
"""Find compiled class files recursively in the root path
:return: list of absolute file paths
"""
files = self._find_files()
self.announce(
"found '{}' compiled class files in '{}'".format(
len(files), self.root
)
)
return files
def run(self):
files = self.find_class_files()
if files:
self.announce(
"cleaning compiled class files in '{}' ...".format(self.root))
if not self.dry_run:
for filename in files:
self._clean_file(filename)
class CleanAll(clean.clean, BaseFileSystemCleanerCommand):
description = "Clean root dir from temporary files (complied files, etc)"
user_options = [
("keep-build", None, "do not clean build directory"),
("keep-dist", None, "do not clean dist directory"),
("keep-egginfo", None, "do not clean egg info directory"),
("keep-extra", None, "do not clean extra files"),
]
boolean_options = ["keep-build", "keep-dist", "keep-egginfo", "keep-extra"]
@staticmethod
def default_extensions():
return CleanPyc.default_extensions() + \
CleanJythonClass.default_extensions()
@staticmethod
def default_directories():
return CleanPyc.default_directories() + \
CleanJythonClass.default_directories()
def initialize_options(self):
clean.clean.initialize_options(self)
BaseFileSystemCleanerCommand.initialize_options(self)
self.keep_build = None
self.keep_dist = None
self.keep_egginfo = None
self.keep_extra = None
def finalize_options(self):
clean.clean.finalize_options(self)
BaseFileSystemCleanerCommand.finalize_options(self)
self.all = True
def get_egginfo_dir(self):
return self.distribution.metadata.get_name() + ".egg-info"
def get_extra_paths(self):
"""Return list of extra files/directories to be removed"""
return []
def clean_egginfo(self):
"""Clean .egginfo directory"""
dir_name = os.path.join(self.root, self.get_egginfo_dir())
self._clean_directory(dir_name)
def clean_dist(self):
self._clean_directory(os.path.join(self.root, "dist"))
def clean_build(self):
self._clean_directory(os.path.join(self.root, "build"))
def clean_extra(self):
"""Clean extra files/directories specified by get_extra_paths()"""
extra_paths = self.get_extra_paths()
for path in extra_paths:
if not os.path.exists(path):
continue
if os.path.isdir(path):
self._clean_directory(path)
else:
self._clean_file(path)
def run(self):
clean.clean.run(self)
if not self.keep_build:
self.clean_build()
if not self.keep_egginfo:
self.clean_egginfo()
if not self.keep_dist:
self.clean_dist()
if not self.keep_extra:
self.clean_extra()
directories = self._find_directories()
if directories and not self.dry_run:
for dir_name in directories:
self._clean_directory(dir_name)
files = self._find_files()
if files and not self.dry_run:
for filename in files:
self._clean_file(filename)
clean_pyc = CleanPyc
clean_all = CleanAll
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 02:36:52 2019
@author: vishay
"""
# USAGE
# python test_network.py --model santa_not_santa.model --image images/examples/santa_01.png
# import the necessary packages
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import argparse
#import imutils
import cv2
from scipy import ndimage
import scipy.io
import os
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # The GPU id to use, usually either "0" or "1"
#os.environ["CUDA_VISIBLE_DEVICES"]="0" ## here you can give 0 or 1 based on whatever gpu is unused
# construct the argument parse and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-m", "--model", required=True,
# help="path to trained model model")
#ap.add_argument("-i", "--image", required=True,
# help="path to input image")
#args = vars(ap.parse_args())
subject_names=['jyotirmoy_das','nikhil_bhardwaj','manoj','renuka']
distance = ['1ft','1.5ft']
print("[INFO] loading network...")
model = load_model('/home/vishay/Music/CNN/cnn.h5')
q = list(range(0,16))
p = list(range(1))
for subject_name in subject_names:
angle = []
topleft = []
bottomright = []
HRloc = open('/media/vishay/ExternalHDD/DATA/Complete/'+subject_name+'/HRloc.txt', 'r').read().splitlines()
for i in HRloc:
topleft.append(list(map(int,i.split(' ')[0:2])))
bottomright.append(list(map(int,i.split(' ')[2:4])))
angle.append(int(i.split(' ')[4]))
tl = np.reshape(topleft,(16,2))
br = np.reshape(bottomright,(16,2))
a = np.reshape(angle,(16))
for j in p:
for i in q:
index = str(i+1)
#print(index)
jay = str(j)
if i<9:
vidcap = cv2.VideoCapture('/media/vishay/ExternalHDD/DATA/Complete/'+subject_name+'/samsung/'+distance[j]+'/'+'0%s.mp4' %index)
else:
vidcap = cv2.VideoCapture('/media/vishay/ExternalHDD/DATA/Complete/'+subject_name+'/samsung/'+distance[j]+'/'+'%s.mp4' %index)
count = 1
success = True
# angle = 90
left = []
right = []
while success:
success,image = vidcap.read()
if success == False:
break
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = ndimage.rotate(gray,a[i])
hr = gray[tl[i,1]:br[i,1],tl[i,0]:br[i,0]]
width, height = hr.shape[::-1]
b1 = hr[:,0:round(width/2) + 1];
b2 = hr[:,round(width/2) - 1 : width+1];
b1 = cv2.resize(b1, (28, 28))
_,b1 = cv2.threshold(b1,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
b1 = b1.astype("float") / 255.0
b1 = img_to_array(b1)
b1 = np.expand_dims(b1, axis=0)
b2 = cv2.resize(b2, (28, 28))
_,b2 = cv2.threshold(b2,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
b2 = b2.astype("float") / 255.0
b2 = img_to_array(b2)
b2 = np.expand_dims(b2, axis=0)
count += 1
left.append(np.argmax(model.predict(b1)[0]))
right.append(np.argmax(model.predict(b2)[0]))
if count%100==0:
print('frame #'+str(count)+' of video #'+index+' distance '+jay+' '+subject_name)
left = np.array(left)
right = np.array(right)
num = 10*left+right
if i<9:
scipy.io.savemat('/media/vishay/ExternalHDD/DATA/Complete/'+subject_name+'/samsung/'+distance[j]+'/'+'GT/'+'0'+index+'.mat', {'gt':num})
else:
scipy.io.savemat('/media/vishay/ExternalHDD/DATA/Complete/'+subject_name+'/samsung/'+distance[j]+'/'+'GT/'+index+'.mat', {'gt':num}) |
from .redis_cache import cache, cache_time
# from .db import user_db
|
import requests
import pandas as pd
from datetime import datetime,timezone,timedelta
import time
import os
def _get_js_datetime_now(milliseconds=1,microseconds=0,adjust=True):
"""
JSベースの現在時刻を取得
"""
JST = timezone(timedelta(hours=+9),'JST')
base = datetime(1970,1,1,0,0,0,0,JST)
now = datetime.now(JST)
jst_delta = 60 * 60 * 9 *1000* (milliseconds) + 60 * 60*(microseconds)
if adjust:
jst_delta += 1000000 # ラグの部分の調整
delta = int(((now - base)/timedelta(milliseconds=milliseconds,microseconds=microseconds)))
delta = delta - jst_delta
return delta
def scraping_chart_data():
nations = [
"eurjpy","usdjpy","eurusd","gdpjpy","cadjpy",
"chfjpy","gdpusd","usdchf","sekjpy","nokjpy",
"tryjpy","zarjpy","mxnjpy","audjpy","nzdjpy",
"audusd","nzdusd","euraud","cnhjpy","hkdjpy"]
DATA_DIR = "chart_data_gaitame"
url = "https://navi.gaitame.com/v2/info/prices/chart"
df_base = False
now = datetime.now().strftime("%Y%m%d_%H%M")
for nation in nations:
token += token_increment
payloads = {
"pair": nation,
"type": "1",
"count": "500",
"bid": "true",
"ask": "true",
"_": _get_js_datetime_now()
}
response = requests.get(
url,
params = payloads
)
if response.status_code != 200:
print(response)
if not os.path.isdir(os.path.join(DATA_DIR,nation)):
os.makedirs(os.path.join(DATA_DIR,nation))
df = pd.DataFrame(response.json()["data"])
df.to_csv(os.path.join(DATA_DIR,nation,"{}.csv".format(now)))
time.sleep(3)
print("ended chart data")
return token
def scraping_buysell():
"""
売買比率を確認
"""
url = "https://navi.gaitame.com/v2/info/tools/buysell"
DATA_DIR = "buysell_gaitame"
now = datetime.now().strftime("%Y%m%d_%H%M")
nations = [
"eurjpy","usdjpy","eurusd","cadjpy",
"chfjpy","usdchf","sekjpy","nokjpy",
"tryjpy","zarjpy","mxnjpy","audjpy","nzdjpy",
"audusd","nzdusd","euraud","cnhjpy","hkdjpy"]
for nation in nations:
payloads = {
"order": "entryclose",
"interval": "hour",
"pairs": nation
}
response = requests.get(
url,
params = payloads
)
if response.status_code != 200:
print(response,nation)
continue
if not os.path.isdir(os.path.join(DATA_DIR,nation)):
os.makedirs(os.path.join(DATA_DIR,nation))
response_data = response.json()["data"][0]["ratios"]
response_data_normalized = []
for d in response_data:
response_data_normalized.append({
"entry_buy":d["entry"]["buy"],
"entry_sell":d["entry"]["sell"],
"close_buy":d["close"]["buy"],
"close_sell":d["close"]["sell"],
})
df = pd.DataFrame(response_data_normalized)
df.to_csv(os.path.join(DATA_DIR,nation,"{}.csv".format(now)))
time.sleep(3)
print("ended buysell")
while True:
scraping_buysell()
token = scraping_chart_data()
time.sleep(60*60*6)
token = scraping_chart_data()
time.sleep(60*60*6)
|
import csv
def read_data(path):
read=csv.reader(open(path))
text=''
for i,row in enumerate(read):
if i>0:
# print(row[1:])
text+=str(row[1])
text+=':'
text+=str(row[2])
print(text)
if __name__ == '__main__':
path='F:\\2018年暑假科研\\CNN\\my_clone\\mid_cnn_recommend_sim.csv'
read_data(path) |
from flask import Flask, render_template
import random
app = Flask(__name__)
@app.route("/")
def home():
return render_template("home.html")
@app.route("/decisions")
def about():
return render_template("decisions.html")
@app.route("/decisions/")
@app.route("/decisions/<l>")
@app.route("/decisions/<l>/")
@app.route("/decisions/<l>/<f>")
def result(l="",f=""):
if l != "" and f != "":
ivy = ['Brown', 'Columbia', 'Cornell', 'Dartmouth', 'Harvard', 'Princeton', 'UPenn', 'Yale']
d = {'last':l,
'first':f}
numColleges = 0
for i in ivy:
r = random.randint(0,1)
if r > 0:
d[i] = 'True'
numColleges+=1
else:
d[i] = 'False'
d['num'] = numColleges
return render_template("result.html",d=d)
else:
return render_template("decisions.html")
if __name__ == "__main__":
app.debug = True
app.secret_key = "1"
app.run(host='0.0.0.0', port=8000)
|
#Climbing the Leaderboard
num_players = int(input())
player_scores = [int(x) for x in input().split(' ')]
player_scores.sort(reverse = True)
alice_level = int(input())
alice_scores = [int(x) for x in input().split(' ')]
alice_scores.sort()
leaderBoard = []
def assign_player_rank(playerScores):
leaderBoard = []
temp = playerScores[0];
prevRank = 1
for i in range(len(playerScores)):
if temp == playerScores[i]:
leaderBoard.append(prevRank)
else:
prevRank += 1
leaderBoard.append(prevRank)
temp = playerScores[i]
leaderBoard.sort()
return leaderBoard
def get_alice_ranking(level , aliceScores , cur_rank , player_scores):
max_rank = max(cur_rank)
alice_rank = []
#print(cur_rank)
idx = []
rank = 0
for i in range(level):
if aliceScores[i] < min(player_scores):
alice_rank.append(max_rank + 1)
idx.append(aliceScores[i])
#aliceScores.pop(i)
elif aliceScores[i] > max(player_scores):
alice_rank.append(min(cur_rank))
idx.append(aliceScores[i])
#aliceScores.pop(i)
#print(aliceScores)
#print(idx)
for i in idx:
aliceScores.remove(i)
for i in range(len(aliceScores)):
for j in range(len(player_scores)):
if aliceScores[i] > player_scores[j]:
#print(rank ,
rank = cur_rank[j]
#cur_rank[j] = cur_rank[j] + 1
#print(rank , aliceScores[i] , cur_rank[j] , player_scores[j] , j)
break
elif aliceScores[i] < player_scores[j]:
rank = cur_rank[j] + 1
elif aliceScores[i] == player_scores[j]:
rank = cur_rank[j]
#print(rank , aliceScores[i] , cur_rank[j] , player_scores[j] , j)
break
alice_rank.append(rank)
alice_rank.sort(reverse = True)
return alice_rank
#print(assign_player_rank(player_scores))
cur_rank = assign_player_rank(player_scores)
ar = get_alice_ranking(alice_level , alice_scores , cur_rank , player_scores)
for a in ar:
print(a)
|
from funcs_for_handlers import logging_decorator, cancel
from setup_database import open_close_database
from telegram.ext import (
ConversationHandler, MessageHandler, CommandHandler, Filters)
# consts for conversation handlers
STICKER, STICKER_SHORTCUT, PACK_ID = range(3)
SET_PACK_NAME = 0
def get_add_to_db_handlers():
return [
get_conv_sticker_handler(),
get_create_pack_handler()
]
def get_conv_sticker_handler():
return ConversationHandler(
entry_points=[CommandHandler('add_sticker', add_sticker)],
states={
STICKER: [MessageHandler(Filters.sticker, sticker)],
STICKER_SHORTCUT: [MessageHandler(Filters.text, sticker_shortcut)],
PACK_ID: [MessageHandler(Filters.text, pack_id)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
@logging_decorator
def add_sticker(update, context):
update.message.reply_text('Send a sticker you wish to save')
context.user_data['user_added_id'] = update.message.from_user.id
return STICKER
@logging_decorator
def sticker(update, context):
context.user_data['sticker_id'] = update.message.sticker.file_id
update.message.reply_text('Got it! Now send a shortcut for it')
return STICKER_SHORTCUT
@logging_decorator
def sticker_shortcut(update, context):
context.user_data['sticker_shortcut'] = update.message.text
context.user_data['added_dttm'] = update.message.date.strftime(
'%Y-%m-%d %H:%M:%S')
user_id = update.message.from_user.id
pack_id_name = get_admin_packs(user_id=user_id)
answer = '\n'.join([
f'{pack_name}: {pack_id}' for pack_id, pack_name in pack_id_name
])
update.message.reply_text(f'''Now select an id of the pack to add to.
Your packs with admin rights:\n\n{answer}''')
return PACK_ID
@logging_decorator
def pack_id(update, context):
user_id = update.message.from_user.id
pack_id_name = get_admin_packs(user_id=user_id)
pack_ids = [x[0] for x in pack_id_name]
text_pack_id = update.message.text
try:
pack_id = int(text_pack_id)
except ValueError:
update.message.reply_text(
'Pack id should be a number')
return ConversationHandler.END
if pack_id not in pack_ids:
update.message.reply_text(
'You don\'t have admin rights for this pack')
return ConversationHandler.END
context.user_data['pack_id'] = pack_id
add_sticker_preference(user_data=context.user_data)
update.message.reply_text(
'Sticker added successfully!')
return ConversationHandler.END
@open_close_database
def get_admin_packs(mydb, mycursor, user_id):
sql = '''
select
t1.pack_id,
t2.pack_name
from user_pack_roles t1
inner join pack_info t2
on true
and t1.user_id = %s
and t1.role = 'admin'
and t1.pack_id = t2.pack_id;'''
val = (user_id,)
mycursor.execute(sql, val)
pack_id_name = mycursor.fetchall()
return pack_id_name
@open_close_database
def add_sticker_preference(mydb, mycursor, user_data):
sql = '''insert into pack_stickers
values (%s, %s, %s, %s, %s);'''
val = [
user_data['pack_id'], user_data['sticker_id'],
user_data['sticker_shortcut'], user_data['user_added_id'],
user_data['added_dttm']
]
mycursor.execute(sql, val)
def get_create_pack_handler():
return ConversationHandler(
entry_points=[CommandHandler('create_pack', create_pack)],
states={
SET_PACK_NAME: [MessageHandler(Filters.text, set_pack_name)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
@logging_decorator
def create_pack(update, context):
update.message.reply_text('Send a name for your pack. \
Keep in mind that for now all non-personal packs are public.')
return SET_PACK_NAME
@open_close_database
def set_pack_name(update, context, mydb, mycursor):
user_data = {}
user_data['pack_name'] = update.message.text
user_data['pack_author_id'] = update.message.from_user.id
user_data['create_dttm'] = update.message.date.strftime(
'%Y-%m-%d %H:%M:%S')
user_data['type'] = 'public'
sql = '''insert into pack_info (
pack_name, pack_author_id, create_dttm, type)
values (%s, %s, %s, %s)'''
val = (
user_data['pack_name'], user_data['pack_author_id'],
user_data['create_dttm'], user_data['type']
)
mycursor.execute(sql, val)
sql = '''
select
pack_id
from pack_info
where true
and pack_name = %s
and pack_author_id = %s
and create_dttm = %s'''
val = (
user_data['pack_name'], user_data['pack_author_id'],
user_data['create_dttm']
)
mycursor.execute(sql, val)
result = mycursor.fetchall()
created_pack_id = result[0][0]
# add admin rights for new pack
sql = '''insert into user_pack_roles
values (%s, %s, %s, %s, %s)'''
val = (
user_data['pack_author_id'], created_pack_id, 'admin', -1,
user_data['create_dttm'])
mycursor.execute(sql, val)
# add this pack to user packs
sql = '''insert into user_packs
values (%s, %s, %s)'''
val = (
user_data['pack_author_id'], created_pack_id,
user_data['create_dttm']
)
mycursor.execute(sql, val)
update.message.reply_text('Pack created!')
return ConversationHandler.END
|
from django.contrib import admin
from .models import Job
from daterange_filter.filter import DateRangeFilter
from simple_history.admin import SimpleHistoryAdmin
from core.actions.export_to_csv import export_to_csv
class JobAdmin(SimpleHistoryAdmin):
def get_queryset(self, request):
"""To return all jobs in admin including those that are expired"""
qs = self.model.with_expired_objects.get_queryset()
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
change_list_filter_template = "admin/filter_listing.html"
fieldsets = (
('Author', {'fields': ('author',)}),
('Practice', {'classes': ('grp-collapse grp-open',),
'fields': ('practice', 'contact',)}),
('Job Details', {'classes': ('grp-collapse grp-closed',),
'fields': ('title', 'position', 'category',
'profession', 'description')}),
('Location', {'fields': ('county', 'post_code')}),
('Dates', {'fields': ('start_date', 'end_date', 'expiry_date')}),
('Extras', {'fields': ('sole_charge', 'shifts', 'exotics')}),
('Publish', {'fields': ('publish',)}),
('Note', {'fields': ('job_note',)})
)
list_select_related = True
list_display = ["title",
"practice",
'id',
"contact",
"county",
"category",
"position",
"start_date",
"expiry_date",
"job_note", ]
list_editable = ('job_note',)
list_per_page = 15
list_filter = ["practice__practice",
"category",
"position",
"profession",
"title",
"county__county",
"shifts",
("expiry_date", DateRangeFilter),
("start_date", DateRangeFilter),
("end_date", DateRangeFilter), ]
date_hierarchy = 'publish'
search_fields = ['^practice__practice', '^title', '^county__county',
'^position__position', ]
actions = [export_to_csv]
admin.site.register(Job, JobAdmin)
|
class Solution:
# @return a string
def countAndSay(self, n):
i = 0
string = '1'
while i < n-1:
strnew =''
count = 0
for j in range(len(string)):
if j < len(string)-1 and string[j] == string[j+1]:
j += 1
count += 1
else:
count += 1
s = str(count)+string[j]
strnew += s
count = 0
j += 1
string = strnew
i += 1
return string
if __name__ == '__main__':
A = Solution()
print len(A.countAndSay(31)) |
import csv
import json
networkProtocols = {}
with open("protocol-numbers-1.csv", "r") as f:
reader = csv.reader(f, delimiter=";", )
for row in reader:
if row[0] == "Decimal" or row[0] == "143-252" or row[1] == "":
continue
print row[0]
n = int(row[0])
print n
networkProtocols[n] = row[1]
with open("networkProtocols.json", "w") as f:
out = json.dumps(networkProtocols, sort_keys=True)
print out
f.write(out)
|
#!/usr/bin/env python3
"""
test for the Log module.
"""
import os
import signal
import time
import unittest
from base_test import PschedTestBase
from pscheduler.log import Log
class TestLog(PschedTestBase):
"""
Log tests.
"""
def test_log(self):
"""Logging tests"""
# Not much to test here but exercise the code nonetheless
# for regression/coverage.
log = Log(verbose=False, prefix='test', syslog=False)
log.debug("Invisible debug.")
try:
raise ValueError("Test exception")
except ValueError:
log.exception("Test exception with message")
for num in range(1, 5):
log.debug_always("Debug Always")
log.debug("Debug")
log.info("Info")
log.warning("Warning")
log.error("Error")
log.critical("Crtitical")
os.kill(os.getpid(),
signal.SIGUSR1 if (num % 2) != 0 else signal.SIGUSR2)
# TODO: This needs a test of the pickler used to pass data to
# child processes.
if __name__ == '__main__':
unittest.main()
|
#! /usr/local/bin/python2
# FIXME: This currently only works in python2 due to weird library issues with google.
# For now, to hack around this, we're pushing those google imports into the method. If
# the results are already pre-cached, which you force by
import os.path
import cloudpickle
import pandas as pd
from util.caching import cache_today
from util.shared import parse_args
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
@cache_today
def _query_gmail(query, pages_max=100, force=False):
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
def __build_service():
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = cloudpickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)
creds = flow.run_local_server(port=55542)
with open('token.pickle', 'wb') as token:
cloudpickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
service = __build_service()
@cache_today
def __search_gmail(force=False, **kwargs):
return service.users().messages().list(**kwargs).execute()
results = []
params = {
"q": query,
"userId": "me",
}
for ix in range(pages_max):
result = __search_gmail(force=force, **params)
results += result["messages"]
if "nextPageToken" in result:
params["pageToken"] = result["nextPageToken"]
else:
break
@cache_today
def __fetch_details(row, force=False, format="metadata"):
return service.users().messages().get(userId="me", id=row["id"], format=format).execute()
search_df = pd.DataFrame(results)
messages_df = search_df.apply(__fetch_details, axis=1, result_type="expand", force=force)
messages_df["internalDate"] = pd.to_datetime(messages_df["internalDate"], unit="ms")
def __extract_metadata(row):
df = pd.DataFrame(row["payload"]["headers"]).set_index("name").sort_index()
return {
"sender": df.loc["From", "value"],
"subject": df.loc["Subject", "value"],
"sent_at": df.loc["Date", "value"],
}
metadata_df = messages_df.apply(__extract_metadata, axis=1, result_type="expand")
metadata_df["sent_at"] = pd.to_datetime(metadata_df["sent_at"], utc=True).dt.tz_convert("US/Pacific")
full_df = pd.concat([messages_df, metadata_df], axis=1)[["id", "sender", "subject", "sent_at", "snippet"]]
return full_df
if __name__ == '__main__':
args = parse_args("Download emails from Gmail", [
dict(name_or_flags="--force", action="store_true", help="whether to forcibly avoid cache"),
dict(name_or_flags="query", help="email query to use"),
])
results_df = _query_gmail(args.query, force=args.force)
import pdb; pdb.set_trace()
|
from django.apps import AppConfig
class FlowchartConfig(AppConfig):
name = 'flowchart'
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
"""
This script converts the KSCGR name files and structure folder to VOC structure folder,
i.e., convert the structure of folders to a single folder with increasingly sorted names
for VOC annotation.
KSCGR folders are in the format:
KSCGR/
- data1/
- boild-egg/
- 0.jpg
- 1.jpg
- ...
- ham-egg/
- kinshi-egg/
- omelette/
- scramble-egg/
- data2/
- boild-egg/
- ham-egg/
- ...
-...
VOC folder has the format:
VOC/
- JPEGImages/
- 000001.jpg
- 000002.jpg
- ...
We convert to VOC format due to faster-rcnn.pytorch format to train and test data.
"""
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import os
import sys
import shutil
import argparse
from os.path import splitext, basename, isdir, join, dirname
def main(kscgrfile, vocfolder):
""" Convert KSCGR folder format to VOC format """
if isdir(vocfolder):
folderout = join(vocfolder, 'JPEGImages')
if not isdir(folderout):
os.mkdir(folderout)
else:
logger.error("'%s' is not a valid folder" % vocfolder)
sys.exit(0)
fout_paths = join(vocfolder, 'paths.txt')
fout_map = join(dirname(kscgrfile), 'map_paths.txt')
with open(kscgrfile) as fin, \
open(fout_paths, 'w') as fout, \
open(fout_map, 'w') as fout_map:
for i, line in enumerate(fin, start=1):
path = line.strip().split()[0]
namefile, ext = splitext(basename(path))
fname = str(i).zfill(6)
pathout = join(folderout, fname+'.jpg')
fout.write('%s\n' % pathout)
shutil.copy2(path, pathout)
fout_map.write('%s : %s\n' % (path, pathout))
if namefile == '0':
logger.info('Renaming: %s -> %s' % (path, pathout))
logger.info('All files copied into %s' % folderout)
logger.info('Saved map_paths.txt at: %s' % fout_map)
logger.info('Saved paths.txt at: %s' % fout_paths)
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('inputfile', metavar='input', help='Path to the file containing paths of the KSCGR dataset.')
argparser.add_argument('vocfolder', metavar='voc', help='Path to the folder where images are recorded.')
args = argparser.parse_args()
main(args.inputfile, args.vocfolder)
|
#!/usr/bin/env python3
import os
import sys
from distutils.core import setup
import setuptools
from projector.version import __version__
if sys.version_info < (3, 3):
print("THIS MODULE REQUIRES PYTHON 3.3+. YOU ARE CURRENTLY\
USING PYTHON {0}".format(sys.version))
sys.exit(1)
def package_files(directory):
paths = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
EXTRA_FILES = package_files('./projector/data/')
setup(
name="Projector",
version=__version__,
# package_data={'projector': data_files},
package_data={'': EXTRA_FILES},
include_package_data=True,
author="Stanislav Arnaudov",
author_email="stanislav_ts@abv.bg",
description="Tool for easy project creation from templates.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
license="GNU General Public License v3.0",
keywords="projects management generator",
url="https://github.com/palikar/projector",
setup_requires=["pytest-runner", "pystache"],
tests_require=["pytest"],
entry_points={
'console_scripts': [
'projector = projector.create_project:main'
]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Installation/Setup"
],
)
|
# -*- coding: utf-8 -*-
class HtmlGenerator(object):
"""docstring for HtmlGenerator"""
def __init__(self, localhtmlname):
self._localhtmlname = localhtmlname
def createHtmlFromList(self, movieListGenerator, description):
html = '<div class="panel-group" id="accordion">'
html += '<h5>' + description + "</h5>"
for i, m in enumerate(movieListGenerator(50)):
once = movieListGenerator.__name__ + str(i)
html += ' <div class="panel panel-default" id="panel' + once + '">'
html += ' <div class="panel-heading">'
html += ' <h6 class="panel-title">'
html += ' <a data-toggle="collapse" data-target="#collapse' + once + '"'
html += ' >'
html += m.getTitle() + " (" + str(m.getYear()[0] if len(
m.getYear()) > 0 else "Unknown") + ')' + '<span class="label label-warning pull-right"> ' + str(
int(m.getRecommenderScore() * 10) / 10.) + ' </span>'
html += ' </a>'
html += ' </h6>'
html += ' </div>'
html += ' <div id="collapse' + once + '" class="panel-collapse collapse">'
html += ' <div class="panel-body">'
html += "<p><b>Predicted rating: </b>" + str(int(m.getRecommenderScore() * 100) / 100) + "</p>\n"
html += "<p><b>Others users rating: </b>" + str(int(m.getBayesianScore() * 100) / 100) + "</p>\n"
html += "<p><b>Vote: </b>" + str(m.getVote()) + "</p>\n"
html += "<p><b>Genres: </b>" + str(", ".join(m.getGenres())) + "</p>\n"
html += "<p><b>Year: </b>" + str(", ".join(str(x) for x in m.getYear())) + "</p>\n"
html += "<p><b>Kind: </b>" + str(m.getKind()) + "</p>\n"
html += "<p><b>Link: </b>" + '<a href="' + str(m.getLink()) + '">ImDB search</a>' + "</p>\n"
html += ' </div>'
html += ' </div>'
html += '</div>'
html += '</div>'
return html
def createHtmlRankingMovie(self, movieList):
return self.createHtmlFromList(movieList.getTopRatedUnseenMovie, "Top rated un-watched movie")
def createHtmlRecommendationMovie(self, movieList):
return self.createHtmlFromList(movieList.getTopRecommendationUnseenMovie, "Recommendation of un-watched movie")
def createHtmlRankingTV(self, movieList):
return self.createHtmlFromList(movieList.getTopRatedUnseenTV, "Top rated un-watched movie")
def createHtmlRecommendationTV(self, movieList):
return self.createHtmlFromList(movieList.getTopRecommendationUnseenTV, "Recommendation of un-watched movie")
def createHtmlStatistics(self):
html = ""
return html
def createHtml(self, movieList):
html = """
<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" crossorigin="anonymous">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<style>
.panel-heading a:after {
font-family:'Glyphicons Halflings';
content:"\e114";
float: right;
color: grey;
}
.panel-heading a.collapsed:after {
content:"\e080";
}
</style>
</head>
<body>
<div class="container">
<div id="content">
<div class="row">
<div class="col-md-2"></div>
<div class="col-md-8">
"""
html += """
<ul id="tabs" class="nav nav-tabs" data-tabs="tabs">
<li class="active"><a href="#recommendationmovie" data-toggle="tab"><h5>Recommended Movie</h5></a></li>
<li><a href="#rankingmovie" data-toggle="tab"><h5>Top rated movie</h5></a></li>
<li><a href="#recommendationtv" data-toggle="tab"><h5>Recommended TV</h5></a></li>
<li><a href="#rankingtv" data-toggle="tab"><h5>Top rated TV</h5></a></li>
<li><a href="#stats" data-toggle="tab"><h5>Statistics</h5></a></li>
</ul>
</div>
<div class="col-md-2"></div>
</div>
<div class="row">
<div class="col-md-3"></div>
<div class="col-md-6">
"""
html += """<div id="my-tab-content" class="tab-content"> """
html += """<div class="tab-pane active" id="recommendationmovie">"""
html += self.createHtmlRecommendationMovie(movieList)
html += """</div>"""
html += """ <div class="tab-pane" id="rankingmovie"> """
html += self.createHtmlRankingMovie(movieList)
html += """</div>"""
html += """<div class="tab-pane" id="recommendationtv">"""
html += self.createHtmlRecommendationTV(movieList)
html += """</div>"""
html += """ <div class="tab-pane" id="rankingtv"> """
html += self.createHtmlRankingTV(movieList)
html += """</div>"""
html += """ <div class="tab-pane" id="stats"> """
html += self.createHtmlStatistics()
html += """</div>"""
html += """ </div> """
html += """
</div>
<div class="col-md-3"></div>
</div>
</div>
<script type="text/javascript">
jQuery(document).ready(function ($) {
$('#tabs').tab();
});
</script>
</div> <!-- container -->
<!-- Latest compiled and minified JavaScript -->
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js" integrity="sha384-0mSbJDEHialfmuBBQP6A4Qrprq5OVfW37PRR3j5ELqxss1yVqOtnepnHVP9aJ7xS" crossorigin="anonymous"></script>
</body>
</html>
"""
return html
def generate(self, movieList):
html = self.createHtml(movieList)
with open(self._localhtmlname, 'w', encoding='utf8') as file_:
file_.write(html)
|
mylist = [6,1,2,4,8]
index_min = min(range(len(mylist)), key=mylist.__getitem__)
print(index_min)
|
#!/usr/bin/env python
# coding=UTF-8
import socket
import json
class Client(object):
"""
A JSON socket client used to communicate with a JSON socket server. All the
data is serialized in JSON. How to use it:
"""
host = None
port = None
socket = None
def __del__(self):
self.close()
def connect(self, host, port):
self.socket = socket.socket()
self.host = host
self.port = port
self.socket.connect((host, int(port)))
return self
def send(self, data):
if not self.socket:
raise Exception('You have to connect first before sending data')
self._send(self.socket, data)
return self
def close(self):
if self.socket:
self.socket.close()
self.socket = None
def _send(self, socket, data):
try:
serialized = json.dumps(data)
except (TypeError, ValueError):
raise Exception('You can only send JSON-serializable data')
try:
# send the length of the serialized data first
socket.send(('%d\n' % len(serialized)).encode())
# send the serialized data
socket.sendall(serialized.encode())
#catch the exception of the disconnection
except socket.error as e:
# [Errno 104] connection reset by peer exception (aka ECONNRESET) on any call to send()
if e.errno == errno.ECONNRESET:
socket.connect(self.host, self.port)
if __name__ == '__main__':
import time
import random
import sys
host = 'localhost'
port = '8087'
client = Client()
client.connect(host, port)
#line : str
for line in open(sys.argv[-1]):
# transfer the string to dictionary type, because our monitoring agent can only deal with the dictionary
line_dict = json.loads(line)
client.send(line_dict)
client.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 28 12:20:45 2018
@author: opensource
"""
import glob
import cv2
import time
import os
import paracept as pc
while True:
try:
for (i,image_file) in enumerate(glob.iglob('/home/opensource/1970_01_06-1970_01_10/*.jpg')):
# time.sleep(1)
# print(os.path.isfile(image_file))
img = cv2.imread(image_file)
# cv2.imwrite('folder_processings/images/yo{}.JPG'.format(i), img)
# os.remove(image_file)
# process(img, i)
im_name = os.path.basename(image_file)
dandt = im_name[17:31]
pc.accept_and_die(image_file, dandt)
except OSError:
time.sleep(1)
#print(os.path.isfile('images/*.JPG')) |
import fitsio
import numpy as np
import numpy.random as npr
from scipy import interpolate
from scipy.optimize import minimize
from funkyyak import grad, numpy_wrapper as np
from redshift_utils import load_data_clean_split, project_to_bands
from slicesample import slicesample
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os
sns.set_style("white")
current_palette = sns.color_palette()
npr.seed(42)
## save figure output files
out_dir = "/Users/acm/Dropbox/Proj/astro/DESIMCMC/tex/quasar_z/figs/"
## load a handful of quasar spectra
lam_obs, qtrain, qtest = \
load_data_clean_split(spec_fits_file = 'quasar_data.fits', Ntrain = 400)
## load in basis
th = np.load("cache/basis_th.npy")
lls = np.load("cache/lls.npy")
lam0 = np.load("cache/lam0.npy")
N = th.shape[1] - lam0.shape[0]
omegas = th[:,:N]
betas = th[:, N:]
W = np.exp(omegas)
B = np.exp(betas)
B = B / B.sum(axis=1, keepdims=True)
## compute all marginal expected z's and compare
z_pred = np.zeros(qtest['Z'].shape)
z_lo = np.zeros(qtest['Z'].shape)
z_hi = np.zeros(qtest['Z'].shape)
for n in range(qtest['Z'].shape[0]):
z_n = qtest['Z'][n]
spec_n = qtest['spectra'][n, :]
if os.path.exists("cache_remote/cache/ll_samps_train_idx_%d.npy"%n):
ll_samps = np.load("cache_remote/cache/ll_samps_train_idx_%d.npy"%n)
th_samps = np.load("cache_remote/cache/th_samps_train_idx_%d.npy"%n)
Nsamps = th_samps.shape[0]
## reconstruct the basis from samples
samp_idxs = np.arange(Nsamps/2, Nsamps)
z_pred[n] = th_samps[samp_idxs, -1].mean()
z_lo[n], z_hi[n] = np.percentile(th_samps[samp_idxs, -1], [.5, 99.5])
else:
print "missing %d"%n
z_pred[n] = np.nan
continue
z_test = qtest['Z']
## figure out the Max Likelihood weight value with respect to each test example
#def loss_omegas(omegas, B, X, inv_var):
# ll_omega = 1 / (100.) * np.sum(np.square(omegas))
# Xtilde = np.dot(np.exp(omegas), B)
# return np.sum( inv_var * np.square(X - Xtilde) ) + ll_omega
#loss_omegas_grad = grad(loss_omegas)
What = np.zeros((len(z_test), B.shape[0]))
for n in range(len(z_test)):
spec_n = qtest['spectra'][n, :]
ivar_n = qtest['spectra_ivar'][n, :]
z_n = qtest['Z'][n]
What[n, :] = fit_weights_given_basis(B, lam0, spec_n, ivar_n, z_n, lam_obs, sgd_iter=100)
#convert spec_n to lam0
#spec_n_resampled = np.interp(lam0, lam_obs/(1+z_n), spec_n, left=np.nan, right=np.nan)
#ivar_n_resampled = np.interp(lam0, lam_obs/(1+z_n), ivar_n, left=np.nan, right=np.nan)
#spec_n_resampled[np.isnan(spec_n_resampled)] = 0.0
#ivar_n_resampled[np.isnan(ivar_n_resampled)] = 0.0
#omegas = .01*npr.randn(B.shape[0])
#res = minimize(x0 = omegas,
# fun = lambda(th): loss_omegas(th, B, spec_n_resampled, ivar_n_resampled),
# jac = lambda(th): loss_omegas_grad(th, B, spec_n_resampled, ivar_n_resampled),
# method = 'L-BFGS-B',
# options = { 'disp': False, 'maxiter': 1000 })
#What[n, :] = np.exp(res.x)
# visualize What in 2-d
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
Wproj = pca.fit_transform(np.log(What))
plt.scatter(Wproj[:,0], Wproj[:, 1])
## figure out the big misses - remove the NAN
dists = np.abs(z_test - z_pred)
miss_idx = dists.argsort()[::-1][1:]
for idx in miss_idx[1:30]:
plt.text(Wproj[idx,0], Wproj[idx,1], s=idx)
plt.show()
## look at the total magnitude of the latent function (sum of W's)
plt.scatter(z_test, z_pred, s = .05*What.sum(axis=1))
for n in range(len(z_test)):
plt.text(z_test[n], z_pred[n], "test: %d"%n)
plt.show()
## juxtapose the best fits against the worst fits
miss_idx = dists.argsort()[::-1][1:]
fig, axarr = plt.subplots(2, 1)
for idx in miss_idx[0:10]:
axarr[0].plot(lam_obs/(1+qtest['Z'][idx]), qtest['spectra'][idx, :],
label="z = %2.2f"%qtest['Z'][idx])
axarr[0].legend()
for idx in miss_idx[-1:-10:-1]:
axarr[1].plot(lam_obs/(1+qtest['Z'][idx]), qtest['spectra'][idx, :],
label="z = %2.2f"%qtest['Z'][idx])
axarr[1].legend()
plt.show()
## plot some reconstructions for the baddies, and the good ones
Nshow = 3
fig, axarr = plt.subplots(Nshow, 1)
for n, idx in enumerate(miss_idx[0:Nshow]):
axarr[n].plot(lam_obs, qtest['spectra'][idx,:], label="$z = %2.2f"%qtest['Z'][idx])
axarr[n].plot(lam0 * (1 + qtest['Z'][idx]), What[idx,:].dot(B))
axarr[n].plot(lam_obs, qtest['spectra_ivar'][idx,:], alpha = .5, color = 'grey')
axarr[n].set_xlim(lam_obs[0], lam_obs[-1])
axarr[n].set_ylim(qtest['spectra'][idx,:].min(), qtest['spectra'][idx,:].max())
axarr[n].set_title("$|z_{spec} - z_{photo}| = %2.2f"%dists[idx])
axarr[n].legend()
## plot some reconstructions for the baddies, and the good ones
Nshow = 3
fig, axarr = plt.subplots(Nshow, 1)
for n, idx in enumerate(miss_idx[-1:(-Nshow-1):-1]):
axarr[n].plot(lam_obs, qtest['spectra'][idx,:], label="$z = %2.2f"%qtest['Z'][idx])
axarr[n].plot(lam0 * (1 + qtest['Z'][idx]), What[idx,:].dot(B))
axarr[n].plot(lam_obs, qtest['spectra_ivar'][idx,:], alpha = .5, color = 'grey')
axarr[n].set_xlim(lam_obs[0], lam_obs[-1])
axarr[n].set_ylim(qtest['spectra'][idx,:].min(), qtest['spectra'][idx,:].max())
axarr[n].set_title("$|z_{spec} - z_{photo}| = %2.2f"%dists[idx])
axarr[n].legend()
plt.show()
|
import json
import pymongo
import tweepy
import time
import string
from collections import Counter
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
class CustomStreamListener(tweepy.StreamListener):
def __init__(self, api):
self.api = api
super(tweepy.StreamListener, self).__init__()
self.db = pymongo.MongoClient('localhost', 27017).TwitterStreamDB
def on_data(self, tweet):
full_data = json.loads(tweet)
print(full_data)
self.db.tweets.insert_one(full_data)
def on_error(self, status_code):
print(status_code)
return True
def on_timeout(self):
return True
CONSUMER_KEY = '02yhVAcQIyL8HqA043VmCytxy'
CONSUMER_SECRET = 'Oo9bPETrpHBZrsIxxmvWl2GIv7n7FoyNQLAG3m3FKK5CQ5QOgU'
ACCESS_TOKEN = '1079695581348413441-Ndj5edc4tio2VgiTZSLnEKNUoWV14m'
ACCESS_SECRET = 'AhfqaWJSSXC6qGdn3ygmavAia9WzIuthgwo50kGipmxuU'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
sapi = tweepy.streaming.Stream(auth, CustomStreamListener(api))
# the list of keywords for filtering tweets
keyword_list = ['Pemilu','Prabowo','Jokowi','presiden']
sapi.filter(track = keyword_list)
print ('Tweets have been successfully stored into mongoDB.') |
# -*- coding: utf-8 -*-
import pandas as pd
data_file = input("Enter file path for csv data: ")
print(data_file)
test_data = pd.read_csv(data_file, index_col=0)
def explore_algorithms(data: pd.DataFrame, supervised: bool = True,
y: str = None, pred_type: str = 'class',
text_data: bool = False) -> pd.DataFrame:
'''
Based on the pred_type, runs prediction on a set of algorithms and returns
loss for each
Parameters
-------
data = pandas Dataframe of shape [n_observations, n_features+1]
supervised: bool, default = True
If true, y value is the labeled data
y : str, default = None
The column to be predicted
pred_type: str, default = 'class'
Type of prediction either class for classification or reg for
regression
text_data: bool, default = 'False'
If True, then the dataset is comprised of text for analysis
Returns
-------
summary: pandas Dataframe of shape [n_algorithms, 2]
training loss function and dev loss function for each algorithm
'''
# Supervised Learning
assert(pred_type in ['class','reg']), 'use "class" to predict a category\
or "reg" to predict a numeric value'
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
Xtrain,Xdev,ytrain,ydev = train_test_split(data.drop(y,axis=1),data[y],
test_size = .3,
random_state = 117)
def fit_model(model):
model.fit(Xtrain,ytrain)
return(accuracy_score(ytrain,model.predict(Xtrain)),
accuracy_score(ydev,model.predict(Xdev)))
if supervised:
assert y != None, 'need to specify which column contains labels'
assert(y in data.columns), 'y column not in the dataset'
models = []
if pred_type == 'class': # Classification Problems
summary = pd.DataFrame(columns = ['TrainingAccuracy',
'DevAccuracy'])
if data.shape[0] < 100000: # Less than 100,000 samples
from sklearn.svm import LinearSVC
models.append(('LinearSVC',LinearSVC()))
from sklearn.tree import DecisionTreeClassifier
models.append(('DecisionTreeClassifier', DecisionTreeClassifier()))
if text_data:
from sklearn.naive_bayes import GaussianNB
models.append(('GaussianNB', GaussianNB()))
else:
from sklearn.neighbors import KNeighborsClassifier
models.append(('KNeighborsClassifier', KNeighborsClassifier()))
from sklearn.svm import SVC
models.append(('SVC',SVC()))
else: # More than 100,000 samples
algorithms = ['SGD Classifier']
from sklearn.linear_model import SGDClassifier
models.append('SGDClassifier', SGDClassifier())
for name, model in models:
summary.loc[name, ['TrainingAccuracy',
'DevAccuracy']] = fit_model(model)
return summary
else: # Regression Problems
# Algorithms are ['Lasso','ElasticNet','SVR(kernal="rbf"'
# 'Ridge Regression','SVR(kernal="linear"']
print('Coming Soon')
# Unsupervised Learning
else:
print('Coming Soon')
print(explore_algorithms(test_data, y = 'Survived'))
|
# normalizes data
import pandas as pd
import numpy as np
file = "data/disk.csv"
df = pd.read_csv(file)
print(df.columns.values)
df['0'] = df['0']/np.linalg.norm(df['0'])
df['1'] = df['1']/np.linalg.norm(df['1'])
df.to_csv(file.replace("_normal","").replace(".csv", "_normal.csv"), index=False)
|
"""Plot helper stuff."""
# pylint: disable=invalid-name
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import RandomState
from copy import deepcopy
try:
import lmb.plot
CMAP = lmb.plot.CMAP
except:
CMAP = matplotlib.colors.ListedColormap(RandomState(0).rand(256 * 256, 3))
def phd3d(phd_, *args, **kwargs):
"""Plot 3d PHD."""
fig = plt.figure(figsize=(30, 30))
ax = fig.gca(projection='3d')
xi = np.arange(phd_.shape[1])
yi = np.arange(phd_.shape[0])
X, Y = np.meshgrid(xi, yi)
ax.plot_surface(X, Y, phd_, *args, **kwargs)
def path3d(path_, phd_, c=0, *args, **kwargs):
"""Plot 3d path."""
zpath = [phd_[x, y] for x, y in path_.T]
plt.gca().plot(path_[0, :], path_[1, :], zpath, color=CMAP(c), *args, **kwargs)
def phd(phd_, *args, **kwargs):
"""Plot."""
img = plt.gca().imshow(phd_, *args, origin='lower', vmin=0, vmax=phd_.max(), **kwargs)
plt.colorbar(img)
def path(path_, c=0, *args, **kwargs):
ax = plt.gca()
color = kwargs.get("color", CMAP(c))
if 'color' in kwargs:
del kwargs["color"]
ax.plot(path_[1, :], path_[0, :], color=color, *args, **kwargs)
def cumulative_score(phd, paths, pD=1, c=0, label=None, *args, **kwargs):
if label is None:
label = lambda aid: f"Agent {agent+c}"
lenp = paths[0].shape[1]
score = np.zeros((lenp + 1,))
xs = np.arange(lenp)
phd = deepcopy(phd)
for agent, p in enumerate(paths):
ascore = np.zeros((lenp + 1,))
for ii in range(lenp):
ascore[ii + 1] = ascore[ii] + phd[p[0, ii], p[1, ii]]
phd[p[0, ii], p[1, ii]] *= 1 - pD
pre_score = score.copy()
score += ascore
color = kwargs.get("color", CMAP(c + agent))
if 'color' in kwargs:
del kwargs["color"]
plt.plot(xs, score[1:], color=color, *args, **kwargs)
plt.fill_between(xs, pre_score[1:], score[1:], color=color, label=label(agent))
|
print('Testando pela ultima vez')
|
#isnumeric
a=u'this56'
b=u'65156'
c=u'51wef53'
d=u'656.653'
print a.isnumeric()
print b.isnumeric()
print c.isnumeric()
print d.isnumeric()
|
import webbrowser #importing the webbrowser module to work with browser
class Movies(): #defining the class
def __init__(self, movie_title, movie_storyline, movie_poster_url, movie_trailer_url): #defining constructor function using __init__
self.title = movie_title #assigning argument values to instance variables
self.storyline = movie_storyline
self.poster_image_url = movie_poster_url
self.trailer_youtube_url = movie_trailer_url
def show_trailer(self): #defining a function to play trailer
webbrowser.open(self.trailer) #using webbrowser module with .open function to open a trailor in browser
|
import torch
import numpy as np
def SGHMC(data, y, bayes_nn, eta=0.00002, L=5, alpha=0.01, V=1):
current_ps = []
beta = 0.5 * V * eta
parameters = [parameter for parameter in bayes_nn.parameters()]
for parameter in parameters:
p = torch.cuda.FloatTensor(parameter.data.size()).normal_() * np.sqrt(eta)
current_ps.append(p)
momentum = 1.0 - alpha
if beta > alpha:
sys.exit('Eta is too large')
sigma = np.sqrt(2.0 * eta * (alpha - beta))
for l in range(L):
temp_U = bayes_nn.cal_nlpos(data, y)
bayes_nn.zero_grad()
temp_U.backward()
for i in range(len(current_ps)):
reinitilize = torch.cuda.FloatTensor(parameters[i].data.size()).normal_() * sigma
current_ps[i] = momentum * current_ps[i] - eta * parameters[i].grad.data + reinitilize
parameters[i].data = parameters[i].data + current_ps[i]
current_paras = [para.data.cpu().numpy() for para in parameters]
return current_paras, temp_U.view(-1).data.tolist()[0]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-12-23 16:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('item_id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('employee_name', models.CharField(max_length=25)),
('employee_id', models.CharField(max_length=10)),
('asset_type', models.CharField(choices=[('L', 'Laptop'), ('P', 'Printer'), ('S', 'Scanner'), ('D', 'Desktop'), ('K', 'Keyboard'), ('M', 'Mouse'), ('O', 'Others')], max_length=1)),
('date_of_issue', models.DateTimeField()),
('date_of_submission', models.DateTimeField(blank=True, null=True)),
],
),
]
|
# Read, clean, and validate!!
# The first step of almost any data project is to read the data, check for errors and special cases, and prepare data for analysis. This is exactly what you'll do in this chapter, while working with a dataset obtained from the National Survey of Family Growth.
# Exploring the NSFG data
# To get the number of rows and columns in a DataFrame, you can read its shape attribute.
# To get the column names, you can read the columns attribute. The result is an Index, which is a Pandas data structure that is similar to a list. Let's begin exploring the NSFG data! It has been pre-loaded for you into a DataFrame called nsfg.
# Display the number of rows and columns
# Clean a variable
# In the NSFG dataset, the variable 'nbrnaliv' records the number of babies born alive at the end of a pregnancy.
# If you use .value_counts() to view the responses, you'll see that the value 8 appears once, and if you consult the codebook, you'll see that this value indicates that the respondent refused to answer the question.
# Your job in this exercise is to replace this value with np.nan. Recall from the video how Allen replaced the values 98 and 99 in the ounces column using the .replace() method:
# ounces.replace([98, 99], np.nan, inplace=True)
# Replace the value 8 with NaN
nsfg['____'].____(____, ____, ____)
# Print the values and their frequencies
print(nsfg['____'].____())
# Compute a variable
# For each pregnancy in the NSFG dataset, the variable 'agecon' encodes the respondent's age at conception, and 'agepreg' the respondent's age at the end of the pregnancy.
# Both variables are recorded as integers with two implicit decimal places, so the value 2575 means that the respondent's age was 25.75.
# Select the columns and divide by 100
agecon = ____
agepreg = ____
# Make a histogram
# Histograms are one of the most useful tools in exploratory data analysis. They quickly give you an overview of the distribution of a variable, that is, what values the variable can have, and how many times each value appears.
# As we saw in a previous exercise, the NSFG dataset includes a variable 'agecon' that records age at conception for each pregnancy. Here, you're going to plot a histogram of this variable. You'll use the bins parameter that you saw in the video, and also a new parameter - histtype - which you can read more about here in the matplotlib documentation. Learning how to read documentation is an essential skill. If you want to learn more about matplotlib, you can check out DataCamp's Introduction to Matplotlib course.
# Plot the histogram
# Label the axes
plt.xlabel('Age at conception')
plt.ylabel('Number of pregnancies')
# Show the figure
plt.show()
# Compute birth weight
# Now let's pull together the steps in this chapter to compute the average birth weight for full-term babies.
# I've provided a function, resample_rows_weighted, that takes the NSFG data and resamples it using the sampling weights in wgt2013_2015. The result is a sample that is representative of the U.S. population.
# Then I extract birthwgt_lb1 and birthwgt_oz1, replace special codes with NaN, and compute total birth weight in pounds, birth_weight.
# # Resample the data
# nsfg = resample_rows_weighted(nsfg, 'wgt2013_2015')
# # Clean the weight variables
# pounds = nsfg['birthwgt_lb1'].replace([98, 99], np.nan)
# ounces = nsfg['birthwgt_oz1'].replace([98, 99], np.nan)
# # Compute total birth weight
# birth_weight = pounds + ounces/16
# Create a Boolean Series for full-term babies
full_term = ____
# Select the weights of full-term babies
full_term_weight = ____
# Compute the mean weight of full-term babies
print(____)
# Filter
# In the previous exercise, you computed the mean birth weight for full-term babies; you filtered out preterm babies because their distribution of weight is different.
# The distribution of weight is also different for multiple births, like twins and triplets. In this exercise, you'll filter them out, too, and see what effect it has on the mean.
# Filter full-term babies
full_term = nsfg['prglngth'] >= 37
# Filter single births
single = ____
# Compute birth weight for single full-term babies
single_full_term_weight = birth_weight[____ & ____]
print('Single full-term mean:', single_full_term_weight.mean())
# Compute birth weight for multiple full-term babies
mult_full_term_weight = birth_weight[____ & ____]
print('Multiple full-term mean:', mult_full_term_weight.mean())
|
#!/usr/bin/python3
import os
import os.path as path
import subprocess
from pyhocon import ConfigFactory
import argparse
RAW_DATA = [
"customer",
"lineitem",
"nation",
"orders",
"partsupp",
"part",
"region",
"supplier"]
#DST = {'hao-ml-1': ['lineitem', 'supplier'],
# 'hoa-ml-7': ['orders', 'part'],
# 'hoa-ml-6': ['partsupp', 'region'],
# 'hao-ml-8': ['customer', 'nation']}
DST = {'hao-ml-2': ['lineitem', 'supplier', 'region', 'part'],
'hao-ml-5': ['customer', 'orders', 'nation', 'partsupp']}
INIT_SF = 6
SF = 12
def run(cmd):
return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.environ["HADOOP_HOME"])
def put(f, dst):
print("Upload %s to %s" % (f, dst))
return run("./bin/hdfs dfs -put -f %s %s" % (f, dst))
def putx(f, dst, fav_node):
print("Upload %s to %s at %s" % (f, dst, fav_node))
proc = run("./bin/hdfs dfs -putx -f %s %s %s" % (f, dst, fav_node))
stdout, stderr = proc.communicate()
while b"were specified but not chosen" in stdout:
print("retry allocating table")
cmd = "./bin/hdfs dfs -putx -f %s %s %s" % (f, dst, fav_node)
print(cmd)
proc = run(cmd)
stdout, stderr = proc.communicate()
#print(stdout, stderr)
return proc
def mkdir(p):
print("Creating HDFS directory: " + p)
return run("./bin/hdfs dfs -mkdir -p %s" % p)
def parse_config():
print("Parsing config file")
return ConfigFactory.parse_file('../conf/application.conf')
def dbgen(scale):
print("Generating DB with scale " + str(scale))
p = subprocess.Popen(
"./dbgen -qf -s " + str(scale), shell=True, cwd="../dbgen/")
p.wait()
def key_by_val(table_dict, value):
for key, val in table_dict.items():
if value in val:
return key
return None
def main():
if not path.exists(os.environ["HADOOP_HOME"]):
print("Please Specify HADOOP_HOME")
exit(0)
parser = argparse.ArgumentParser()
parser.add_argument('-m', action='store_true', default=False,
help="Multi-node mode")
parser.add_argument('-u', action='store_true', default=False,
help="Reuse existing dbgen data")
opts = parser.parse_args()
conf = parse_config()
input_dir = conf.get_string("all.input-dir")
dbgen_path = path.abspath("../dbgen/")
try:
for data_scale in range(INIT_SF, SF+1):
if not opts.u:
dbgen(data_scale)
procs = []
for d in RAW_DATA:
p = mkdir(input_dir + "/" + d + "-" + str(data_scale))
procs.append(p)
[p.wait() for p in procs]
procs = []
if opts.m:
for d in RAW_DATA:
node = key_by_val(DST, d)
if node is None:
print("Error: " + d + " does not exist!")
exit(0)
p = putx(path.join(dbgen_path, d + ".tbl"),
input_dir + "/" + d + "-" + str(data_scale)
+ "/" + d + "-" + str(data_scale) + ".txt",
node)
procs.append(p)
else:
for d in RAW_DATA:
p = put(path.join(dbgen_path, d + ".tbl"),
input_dir + "/" + d + "-" + str(data_scale)
+ "/" + d + "-" + str(data_scale) + ".txt")
procs.append(p)
[p.wait() for p in procs]
except KeyboardInterrupt:
print("Keyboard Interrupt")
exit(0)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.