text stringlengths 8 6.05M |
|---|
from ethereum import utils as u
from plasma_core.constants import NULL_HASH
from plasma_core.utils.merkle.fixed_merkle import FixedMerkle
def get_empty_merkle_tree_hash(depth):
zeroes_hash = NULL_HASH
for _ in range(depth):
zeroes_hash = u.sha3(zeroes_hash + zeroes_hash)
return zeroes_hash
def get_merkle_of_leaves(depth, leaves):
return FixedMerkle(depth, leaves)
def bytes_fill_left(inp, length):
return bytes(length - len(inp)) + inp
|
import unittest
from katas.kyu_7.lorraine_wants_to_win_tv_contest import unscramble
class UnscrambleTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(unscramble('shi'), ['his'])
def test_equals_2(self):
self.assertEqual(unscramble('nowk'), ['know'])
def test_equals_3(self):
self.assertEqual(unscramble('amle'), ['male', 'meal'])
|
from django.http import JsonResponse
from .models import UserProfile,WeiboUser
from django.core import mail
import json,jwt,random
import datetime,time
import hashlib
from django.db import transaction
from .weiboapi import OAuthWeibo
# Create your views here.
def make_token(username,login_time,exp):
# 生成令牌
key = 'xiaoqi'
login_time = str(login_time)
exp = int(time.time() + exp)
payload = {'username':username,'login_time':login_time,'exp':exp}
token = jwt.encode(payload,key,algorithm='HS256')
return token
def make_validation():
# 生成随机六位验证码
num = random.sample('0123456789',6)
number = num[0]+num[1]+num[2]+num[3]+num[4]+num[5]
return number
def register(request):
# 用户注册
if request.method == 'GET':
email = request.GET.get('email')
username = request.GET.get('username')
if email:
global num_code
num_code = make_validation()
mail.send_mail(subject='小柒优品验证码', message=num_code, from_email='caoxudong910@qq.com',recipient_list=[email])
result = {'code': 200, 'data': '邮箱验证码已发送至您的邮箱,请查收 !'}
return JsonResponse(result)
if username:
user = UserProfile.objects.filter(username=username)
if user:
result = {'code': 10101, 'error': '此用户名已注册,请重新输入 !'}
return JsonResponse(result)
else:
result = {'code': 200, 'data': '此用户名可以使用 !'}
return JsonResponse(result)
return JsonResponse({'code':200})
elif request.method == 'POST':
# 创建用户
json_str = request.body
if not json_str:
result = {'code': 10101, 'error': '请给我数据 !'}
return JsonResponse(result)
json_obj = json.loads(json_str)
emailpass = json_obj.get('emailpass')
if emailpass == num_code:
username = json_obj.get('username')
if not username:
result = {'code': 10102, 'error': '请输入用户名 !'}
return JsonResponse(result)
email = json_obj.get('email')
if not email:
result = {'code': 10103, 'error': '请输入邮箱 !'}
return JsonResponse(result)
userpass_1 = json_obj.get('userpass_1')
userpass_2 = json_obj.get('userpass_2')
if userpass_1 != userpass_2:
result = {'code': 10104, 'error': '两次密码输入不一致,请重新输入 !'}
return JsonResponse(result)
old_user = UserProfile.objects.filter(username=username)
if old_user:
result = {'code': 10105, 'error': '用户名已存在,请重新输入 !'}
return JsonResponse(result)
# 生成散列密码
m5 = hashlib.md5()
m5.update(userpass_1.encode())
password = m5.hexdigest()
wuid = json_obj.get('wuid')
# 创建用户
try:
with transaction.atomic():
user = UserProfile.objects.create(username=username, email=email,password=password)
if wuid:
# 微博用户进行绑定注册
w_obj = WeiboUser.objects.get(wuid=wuid)
w_obj.buser = user
w_obj.save()
except Exception as e:
print('-----create error-----')
print(e)
result = {'code': 10106, 'error': '用户名已存在,请重新输入 !!'}
return JsonResponse(result)
# 可以生成令牌
now_datetime = datetime.datetime.now()
user.login_time = now_datetime
user.save()
token = make_token(username, now_datetime, 86400)
result = {'code': 200, 'data': {'token': token.decode(),'username': username}}
return JsonResponse(result)
else:
result = {'code': 10107, 'error': '验证码输入错误,请重新输入 !'}
return JsonResponse(result)
def login(request):
# 用户登录
if request.method == 'GET':
return JsonResponse({'code': 200})
elif request.method == 'POST':
json_str = request.body
if not json_str:
result = {'code': 10101, 'error': '请给我数据 !'}
return JsonResponse(result)
json_obj = json.loads(json_str)
username = json_obj.get('username')
if not username:
result = {'code': 10102, 'error': '请输入用户名 !'}
return JsonResponse(result)
userpass = json_obj.get('userpass')
if not userpass:
result = {'code': 10108, 'error': '请输入密码 !'}
return JsonResponse(result)
try:
user = UserProfile.objects.get(username=username)
except Exception as e:
result = {'code': 10109, 'error': '用户名或密码输入错误 !'}
return JsonResponse(result)
m5 = hashlib.md5()
m5.update(userpass.encode())
password = m5.hexdigest()
if user.password != password:
result = {'code': 10110, 'error': '用户名或密码输入错误 !'}
return JsonResponse(result)
else:
now_datetime = datetime.datetime.now()
user.login_time = now_datetime
user.save()
token = make_token(username,now_datetime,86400)
result = {'code':200,'data':{'token':token.decode(),'username':username}}
return JsonResponse(result)
def users_weibo_url(request):
oauth = OAuthWeibo('123')
oauth_weibo_url = oauth.get_weibo_login()
return JsonResponse({'code':200,'oauth_url':oauth_weibo_url})
def users_weibo_token(request):
# 接收前端返回的code并去微博校验
code = request.GET.get('code')
oauth = OAuthWeibo()
# 向微博服务器提交code 若校验成功 返回该用户的token
res = oauth.get_access_token_uid(code)
res_obj = json.loads(res)
access_token = res_obj['access_token']
uid = res_obj['uid']
# 检查当前用户是否注册过
try:
bu = WeiboUser.objects.get(wuid=uid)
except Exception as e:
# 用户第一次用微博账号登录
# TODO?
WeiboUser.objects.create(wuid=uid,access_token=access_token)
return JsonResponse({'code':10999,'wuid':uid})
else:
# 检查是否真的绑定过 buser是否为空
buser = bu.buser
if not buser:
return JsonResponse({'code': 10999, 'wuid': uid})
login_time = datetime.datetime.now()
token = make_token(buser.username,login_time,86400)
return JsonResponse({'code':200,'username':buser.username,'token':token.decode()}) |
#!/usr/bin/python3
var1 = 100
if var1:
print("1 - if 表达式条件为true")
print(var1)
var2 = 0
if var2:
print("2 - if 表达式条件为true")
print(var2)
print("Good Bye",end="!") |
import smartcar
import sqlite3
import random
from flask import Flask, request, jsonify, redirect, g
from flask_cors import CORS
import sys, os
import time
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile('config.py')
CORS(app)
#absolute path to .db database, will change depending on user...(config?)
DATABASE = 'C:\Users\Robert Yang\Documents\YHacks2018-master\backend\users.db'
#code to retrieve database, only executed during active app context
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.row_factory = sqlite3.Row #this will convert data retrieved from db from a tuple into a dictionary essentially
return db
#function to do queries, "query" arg is a string in SQL to execute, and
#args is going to be whatever args you want to store results
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
client = smartcar.AuthClient(
client_id=app.config["CLIENT_ID"],
client_secret=app.config["CLIENT_SECRET"],
redirect_uri=app.config["REDIRECT_URI"],
test_mode=True
)
access = None
code = None
user_id = None
user_email = None
#code based off of smartcar api docs to automatically get a refresh Token
#obviously, you must have gone through the /exchange URI before you can run this method
def get_fresh_access():
# this query should fetch ONLY ONE user from the database based on their user_id
access = query_db('SELECT * FROM users_info WHERE userID = ?', [user_id], one=True)
if smartcar.is_expired(access['expiration']):
new_access = client.exchange_refresh_token(access['refresh_token'])
#modify the database to reflect the change of tokens:
query_db('UPDATE users_info SET access_tokens = ? WHERE userID = ?', [new_access, user_id], one=True)
return new_access
else:
return access
#this will close connection with database
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
#we need something for the index/default page
@app.route('/')
def index():
cur = get_db().cursor()
#maybe some sort of HTML here?...
#going to add a separate login route
@app.route('/login', methods=['GET'])
def login():
# TODO: Authorization Step 1b: Launch Smartcar authentication dialog
auth_url = client.get_auth_url()
return redirect(auth_url)
# htpp://localhost:3000/exchange?code=<authorization_code>
@app.route('/exchange', methods=['GET'])
def exchange():
# notice I made all these vars global just for simplicity sake, for a real secure application, this would not be the case...
global code = request.args.get('code')
#we need to account for when users deny permission for anything using exception handling:
global access
access = client.exchange_code(code)
#getting their email:
global user_email = request.form.get('email') #not sure if this will work since email/etc... is inputeed into smartcar connect?
#generate a random userID:
global user_id = random.randint(1,100) #if necessary, we can check to make sure the random int isn't in the database using SQL querying
#here we insert values into the sql database:
query_db('INSERT INTO users_info VALUES ?', [user_email, user_id, code, access], one=True)
return '', 200
# htpp://localhost:8000/vehicles
@app.route('/vehicles', methods=['GET'])
def vehicles():
global access
access = get_fresh_access()
vehicle_ids = smartcar.get_vehicle_ids(access['access_token'])['vehicles']
cars = []
for id in vehicle_ids:
vehicle = smartcar.Vehicle(id, access['access_token']);
cars.append(vehicle.info())
return jsonify(cars)
# htpp://localhost:8000/vehicle/<id>
@app.route('/vehicle/<vehicle_id>', methods=['GET'])
def vehicle(vehicle_id):
global access
access = get_fresh_access()
vehicle = smartcar.Vehicle(vehicle_id, access['access_token'])
info = vehicle.info()
print(info)
return jsonify(info)
# htpp://localhost:8000/vehicle/<id>/unlock
@app.route('/vehicle/<vehicle_id>/unlock', methods=['GET'])
def unlock(vehicle_id):
global access
access = get_fresh_access()
vehicle = smartcar.Vehicle(vehicle_id, access['access_token'])
response = vehicle.unlock()
return jsonify(response) # 200 if succesful
# htpp://localhost:8000/vehicle/<id>/lock
@app.route('/vehicle/<vehicle_id>/lock', methods=['GET'])
def lock(vehicle_id):
global access
access = get_fresh_access()
vehicle = smartcar.Vehicle(vehicle_id, access['access_token'])
response = vehicle.lock()
return jsonify(response) # 200 if succesful
# htpp://localhost:8000/vehicle/<id>/location
@app.route('/vehicle/<vehicle_id>/location', methods=['GET'])
def location(vehicle_id):
global access
access = get_fresh_access()
vehicle = smartcar.Vehicle(vehicle_id, access['access_token']);
location = vehicle.location()
# new_access = client.exchange_refresh_token(access['refresh_token'])
return jsonify(location);
#logout endpoint to disconnet application for a particular vehicle
@app.route('/logout', methods=['GET'])
def logout(vehicle_id):
global access
close_connection() #this might be valid, might not be
access = get_fresh_access()
vehicle = smartcar.vehicle(vehicle_id, access['access_token']);
vehicle.disconnet()
"""
#get's car info for a user based on the facebookID
@app.route('/getUserCar', methods=['GET'])
def getUserCar(facebookID):
if(not SQlitetest.check_new_user(facebookID)):
return SQlitetest.get_user_car_info(facebookID) #returns the JSON object
else:
return False;
###
#need to choose a car...
@app.route('/addUserToDatabase', methods=['GET'])
def addUserToDatabase(firstname, lastname, facebookID, carId):
#for now defining carID to just be the first car:
global access
carId = smartcar.get_vehicle_ids(access['access_token'])['vehicles'][0];
# first table:
SQlitetest.car_data_entry(facebookID, smartcar.Vehicle(carId, access['access_token']))
#second table:
SQlitetest.data_entry_user_info(firstname, lastname, facebookID, access['access_token'],
access['refresh_token_expiration'])
###
@app.route('/getUserInfo', methods=['GET'])
def getUserInfo(facebookID):
return SQlitetest.user_info(facebookID) #JSON including user firstname, lastname, etc...
###
def get_fresh_access():
access = load_access_from_database()
if smartcar.expired(access['expiration']):
new_access = client.exchange_refresh_token(access['refresh_token'])
put_access_into_database(new_access)
return new_access
else:
return access
###
fresh_access_token = get_fresh_access()['access_token']
"""
if __name__ == '__main__':
app.run(port=3000)
|
import unittest
import testutil
import shutil
import os
import time
import datetime
import hdbfs
import hdbfs.ark
import hdbfs.model
hdbfs.imgdb.MIN_THUMB_EXP = 4
class ThumbCases( testutil.TestCase ):
def setUp( self ):
self.init_env()
def tearDown( self ):
self.uninit_env()
def test_create_thumb( self ):
blue = self._load_data( self.blue )
h = hdbfs.Database()
h.enable_write_access()
obj = h.register_file( blue, False )
root_stream = obj.get_root_stream()
thumb_stream = obj.get_thumb_stream( 4 )
self.assertFalse( thumb_stream.get_stream_id()
== root_stream.get_stream_id(),
'Root returned for small thumb' )
self.assertFalse( self._diff( root_stream.read(),
thumb_stream.read() ),
'Smaller thumb stream identical' )
self.assertTrue( thumb_stream.get_priority()
== hdbfs.model.SP_EXPENDABLE,
'Thumb priority not set correctly' )
def test_return_orig( self ):
blue = self._load_data( self.blue )
h = hdbfs.Database()
h.enable_write_access()
obj = h.register_file( blue, False )
root_stream = obj.get_root_stream()
thumb_stream = obj.get_thumb_stream( 10 )
self.assertTrue( thumb_stream.get_stream_id()
== root_stream.get_stream_id(),
'Root not returned large small thumb' )
self.assertTrue( thumb_stream.get_priority()
== root_stream.get_priority(),
'Oddity in return root for large priority' )
def test_rot_does_not_return_orig( self ):
blue = self._load_data( self.blue )
h = hdbfs.Database()
h.enable_write_access()
obj = h.register_file( blue, False )
obj.rotate_cw()
root_stream = obj.get_root_stream()
thumb_stream = obj.get_thumb_stream( 10 )
self.assertFalse( thumb_stream.get_stream_id()
== root_stream.get_stream_id(),
'Root returned on rotated image' )
def test_thumb_points_to_root( self ):
blue = self._load_data( self.blue )
h = hdbfs.Database()
h.enable_write_access()
obj = h.register_file( blue, False )
root_stream = obj.get_root_stream()
thumb_stream = obj.get_thumb_stream( 4 )
origin_stream = thumb_stream.get_origin_stream()
self.assertTrue( origin_stream is not None,
'Thumb has not origin' )
self.assertTrue( origin_stream.get_stream_id()
== root_stream.get_stream_id(),
'Origin stream is not root stream' )
def test_create_very_small( self ):
blue = self._load_data( self.blue )
h = hdbfs.Database()
h.enable_write_access()
obj = h.register_file( blue, False )
thumb_stream = obj.get_thumb_stream( 4 )
small_stream = obj.get_thumb_stream( 3 )
self.assertTrue( thumb_stream.get_stream_id()
== small_stream.get_stream_id(),
'Very small does not match small' )
self.assertTrue( small_stream.get_priority()
== hdbfs.model.SP_EXPENDABLE,
'Very small priority not set correctly' )
def test_thumbs_not_moved( self ):
red = self._load_data( self.red )
blue = self._load_data( self.blue )
h = hdbfs.Database()
h.enable_write_access()
o1 = h.register_file( blue, False )
o2 = h.register_file( red, False )
t2_4_hash = o2.get_thumb_stream( 4 ).get_hash()
t2_5_hash = o2.get_thumb_stream( 5 ).get_hash()
h.merge_objects( o1, o2 )
t1_4_hash = o1.get_thumb_stream( 4 ).get_hash()
t1_5_hash = o1.get_thumb_stream( 5 ).get_hash()
self.assertFalse( t1_4_hash == t2_4_hash,
'New thumb matches moved from o2' )
self.assertFalse( t1_5_hash == t2_5_hash,
'New thumb matches moved from o2' )
def test_thumbs_not_moved_with_existing( self ):
red = self._load_data( self.red )
blue = self._load_data( self.blue )
h = hdbfs.Database()
h.enable_write_access()
o1 = h.register_file( blue, False )
o2 = h.register_file( red, False )
t1_4_hash = o1.get_thumb_stream( 4 ).get_hash()
t1_5_hash = o1.get_thumb_stream( 5 ).get_hash()
t2_4_hash = o2.get_thumb_stream( 4 ).get_hash()
t2_5_hash = o2.get_thumb_stream( 5 ).get_hash()
h.merge_objects( o1, o2 )
tx_4_hash = o1.get_thumb_stream( 4 ).get_hash()
tx_5_hash = o1.get_thumb_stream( 5 ).get_hash()
self.assertTrue( tx_4_hash == t1_4_hash,
'New thumb not matching from o1' )
self.assertTrue( tx_5_hash == t1_5_hash,
'New thumb not matching from o1' )
self.assertFalse( tx_4_hash == t2_4_hash,
'New thumb matches moved from o2' )
self.assertFalse( tx_5_hash == t2_5_hash,
'New thumb matches moved from o2' )
if( __name__ == '__main__' ):
unittest.main()
|
import json
from simstream import PikaAsyncConsumer
def recv_log(body):
try:
logs = json.loads(body.decode())
for log in logs:
print(log) |
from collections import MutableMapping
from dingus import Dingus, DingusTestCase, exception_raiser, DontCare
__all__ = [
'DeterministicDingus',
'Dingus',
'DingusTestCase',
'DingusWhitelistTestCase',
'DontCare',
'exception_raiser',
]
class NonHashingMap(MutableMapping):
"""This is a :class:`dict`\ -like object that supports unhashable keys.
This is a theoretically less performant mapping than a normal :class:`dict`
but it can use keys containing values that cannot be hashed. Since
:class:`DeterministicDingus` shouldn't be storing very large mappings the
fact that lookups are O(n) is irrelevant.
"""
def __init__(self):
self.__mapping = []
def __setitem__(self, key, value):
self.__mapping.append((key, value,))
def __getitem__(self, key):
for my_key, value in self.__mapping:
if my_key == key:
return value
raise KeyError(key)
def __contains__(self, key):
for my_key, value in self.__mapping:
if my_key == key:
return True
return False
def __iter__(self):
return [key for key, value in self.__mapping]
def __delitem__(self):
raise NotImplementedError
def __len__(self):
return len(self.__mapping)
class DeterministicDingus(Dingus):
"""This dingus returns a different Dingus depending on the arguments it's called with.
It has the property of being purely deterministic (i.e. the same
arguments always return the same object). Unfortunately this
means that the behaviour of returning an identical `Dingus` when
called without arguments is lost.
>>> d = DeterministicDingus()
>>> d('an arg') == d('other arg')
False
>>> d('an arg') == d('an arg')
True
>>> d.func('an arg') == d.func('other arg')
False
>>> d.func('an arg') == d.func('an arg')
True
>>> d.func('an arg') == d.func()
False
"""
def __init__(self, *args, **kwargs):
self.__argument_map = NonHashingMap()
Dingus.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
key = (args, kwargs,)
if key in self.__argument_map:
rv = self.__argument_map[key]
else:
self.__argument_map[key] = rv = self._create_child('()')
self._children['()'] = rv
self._log_call('()', args, kwargs, rv)
return rv
class _DingusWhitelistTestCaseMetaclass(type):
"""Aggregate relevant attributes.
This metaclass aggregates attributes of base classes so that subclasses can
specify just the values that are specific to their tests.
"""
__aggretated_attrs = ('additional_mocks', 'module_mocks', 'mock_list')
def __new__(cls, name, bases, attrs):
for attr_name in cls.__aggretated_attrs:
if attr_name in attrs:
attrs[attr_name] = set(attrs[attr_name])
else:
attrs[attr_name] = set()
for base in bases:
if hasattr(base, attr_name):
attrs[attr_name].update(getattr(base, attr_name))
attrs['module_mocks'].update(attrs['mock_list'])
return type.__new__(cls, name, bases, attrs)
class DingusWhitelistTestCase(object):
"""A helpful base test case for unit testing.
This class is similar in function to the original :class:`DingusTestCase`
except that it operates on white list of what to mock rather than a black
list. What to actually mock should be set as a class attribute on
inheriting classes as :data:`module_mocks`.
"""
__metaclass__ = _DingusWhitelistTestCaseMetaclass
module = None
"""The module to mock.
This should be set in the concrete subclass.
"""
mock_list = set()
"""Old name for `module_mocks`.
This attribute works identically to `module_mocks` but is honored for
backwords compatability. Newly written tests should use `module_mocks`
instead.
"""
module_mocks = set()
"""A collection of names that should be mocked out in `module`.
`module` will be reset to its original contents during :meth:`teardown`.
"""
additional_mocks = set()
"""A collection of additional attributes to be set on the class during :meth:`setup`.
"""
def setup(self):
self.__old_module_dict = self.module.__dict__.copy()
for key in self.module_mocks:
self.module.__dict__[key] = Dingus(key)
for key in self.additional_mocks:
setattr(self, key, Dingus(key))
if hasattr(self, 'run') and callable(self.run):
self.run()
def teardown(self):
self.module.__dict__.clear()
self.module.__dict__.update(self.__old_module_dict)
|
# start a thread for each digit to search for the next occurrance of that digit
# record all digits between the first occurance and the next occurance and check of the same digits
# occur after the next occurance of the original digit
from decimal import *
import asyncio
# find possible recurring patterns(is)
async def findposspatterns(s):
print(s)
b = s[0]
patterns = []
for i in range(1, len(s)):
if b == s[i]:
patterns.append(s[-i - 1:])
return patterns
loop = asyncio.get_event_loop()
getcontext().prec = 100
for i in range(2, 100):
print(str(1/Decimal(i))[2:-1].lstrip("0"))
print(loop.run_until_complete(findposspatterns(str(1/Decimal(i))[2:].lstrip("0"))))
print()
loop.close()
|
import MapReduce
import sys
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
# ["a", 0, 1, 45]
# ["b", 0, 1, 18]
def mapper(record):
# create a key that shows what cell reference the record will end up in
for i in range(5):
if record[0] == "a":
mr.emit_intermediate(str.format("{0},{1}", record[1], i), record)
elif record[0] == "b":
mr.emit_intermediate(str.format("{0},{1}", i, record[2]), record)
def reducer(key, list_of_values):
# ['0,0'] [[u'a', 0, 0, 63], [u'a', 0, 1, 45], [u'a', 0, 2, 93], [u'a', 0, 3, 32], [u'a', 0, 4, 49], [u'b', 0, 0, 63], [u'b', 1, 0, 59], [u'b', 2, 0, 30], [u'b', 3, 0, 77]]
# ['0,1'] [[u'a', 0, 0, 63], [u'a', 0, 1, 45], [u'a', 0, 2, 93], [u'a', 0, 3, 32], [u'a', 0, 4, 49], [u'b', 0, 1, 18], [u'b', 1, 1, 76], [u'b', 2, 1, 52], [u'b', 3, 1, 75]]
list_a = []
list_b = []
for itr in list_of_values:
if itr[0] == "a":
list_a += [(itr)]
elif itr[0] == "b":
list_b += [(itr)]
if (list_a and list_b):
# TODO: find a more efficient way
# multiple each row cell value in 'a' with the correct column cell value from 'b'
ab_total = 0
for a in list_a:
ab = 0
for b in list_b:
if a[2] == b[1]:
ab = a[3] * b[3]
break
ab_total += ab
if ab_total > 0:
mr.emit( (int(list(key)[0]), int(list(key)[2]), ab_total))
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
# inputdata = open(
#'./data/matrix.json')
# mr.execute(inputdata, mapper, reducer)
# raw_input()
|
"""Top-level package for VirtualCrypto.py."""
from .structs import User, Currency, Claim, ClaimStatus, Scope, Balance
from .errors import VirtualCryptoException, MissingScope, BadRequest
from .client import VirtualCryptoClient
from .async_client import AsyncVirtualCryptoClient
__author__ = """sizumita"""
__email__ = 'contact@sumidora.com'
__version__ = '0.1.3'
|
import transaction
import lock
from collections import namedtuple
def test1():
TM = namedtuple('TM', ['timestamp'])
tm = TM(1)
t1 = transaction.ReadWriteTransaction(tm, 't1', transaction.Status.running)
t2 = transaction.ReadWriteTransaction(tm, 't2', transaction.Status.running)
t3 = transaction.ReadWriteTransaction(tm, 't3', transaction.Status.running)
t4 = transaction.ReadWriteTransaction(tm, 't4', transaction.Status.running)
t5 = transaction.ReadWriteTransaction(tm, 't5', transaction.Status.running)
lk = lock.FIFOLock()
assert lk.acquire(t1, lock.Mode.read)
print map(lambda x: x.name, list(lk.holders))
assert lk.acquire(t2, lock.Mode.read)
print map(lambda x: x.name, list(lk.holders))
assert lk.acquire(t3, lock.Mode.read)
print map(lambda x: x.name, list(lk.holders))
assert lk.acquire(t4, lock.Mode.write) is not True
print map(lambda x: x.name, list(lk.holders))
assert lk.acquire(t5, lock.Mode.read) is not True
print map(lambda x: x.name, list(lk.holders))
assert lk.acquire(t1, lock.Mode.write) is not True
print map(lambda x: x.name, list(lk.holders))
assert lk.acquire(t1, lock.Mode.read)
print map(lambda x: x.name, list(lk.holders))
assert lk.acquire(t2, lock.Mode.read)
print map(lambda x: x.name, list(lk.holders))
assert lk.acquire(t3, lock.Mode.read)
print map(lambda x: x.name, list(lk.holders))
assert lk.acquire(t1, lock.Mode.write) is not True
lk.release(t2)
assert lk.acquire(t1, lock.Mode.read)
assert lk.acquire(t3, lock.Mode.read)
assert lk.acquire(t1, lock.Mode.write) is not True
lk.release(t3)
assert lk.acquire(t1, lock.Mode.read)
assert lk.acquire(t1, lock.Mode.write)
lk.release(t1)
assert lk.acquire(t4, lock.Mode.write)
lk.release(t4)
assert lk.acquire(t5, lock.Mode.read)
def test2():
TM = namedtuple('TM', ['timestamp'])
tm = TM(1)
t1 = transaction.ReadWriteTransaction(tm, 't1', transaction.Status.running)
t2 = transaction.ReadWriteTransaction(tm, 't2', transaction.Status.running)
t3 = transaction.ReadWriteTransaction(tm, 't3', transaction.Status.running)
lk = lock.FIFOLock()
assert lk.acquire(t1, lock.Mode.read)
assert lk.acquire(t2, lock.Mode.write) is not True
assert lk.acquire(t3, lock.Mode.read) is not True
t2.status = transaction.Status.aborted
assert lk.acquire(t3, lock.Mode.read)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import Collection
from pants.backend.go.subsystems.golang import GolangSubsystem
from pants.core.util_rules import asdf, search_paths
from pants.core.util_rules.asdf import AsdfPathString, AsdfToolPathsResult
from pants.core.util_rules.environments import EnvironmentTarget
from pants.core.util_rules.search_paths import ValidatedSearchPaths, ValidateSearchPathsRequest
from pants.engine.env_vars import PathEnvironmentVariable
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
from pants.util.ordered_set import FrozenOrderedSet
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class GoBootstrap:
go_search_paths: tuple[str, ...]
async def _go_search_paths(
env_tgt: EnvironmentTarget, golang_subsystem: GolangSubsystem, paths: Collection[str]
) -> tuple[str, ...]:
asdf_result = await AsdfToolPathsResult.get_un_cachable_search_paths(
paths,
env_tgt=env_tgt,
tool_name=golang_subsystem.asdf_tool_name,
tool_description="Go distribution",
paths_option_name="[golang].go_search_paths",
bin_relpath=golang_subsystem.asdf_bin_relpath,
)
special_strings = {
AsdfPathString.STANDARD.value: asdf_result.standard_tool_paths,
AsdfPathString.LOCAL.value: asdf_result.local_tool_paths,
}
path_variables = await Get(PathEnvironmentVariable)
expanded: list[str] = []
for s in paths:
if s == "<PATH>":
expanded.extend(path_variables)
elif s in special_strings:
special_paths = special_strings[s]
expanded.extend(special_paths)
else:
expanded.append(s)
return tuple(expanded)
@rule
async def resolve_go_bootstrap(
golang_subsystem: GolangSubsystem, golang_env_aware: GolangSubsystem.EnvironmentAware
) -> GoBootstrap:
search_paths = await Get(
ValidatedSearchPaths,
ValidateSearchPathsRequest(
env_tgt=golang_env_aware.env_tgt,
search_paths=tuple(golang_env_aware.raw_go_search_paths),
option_origin=f"[{GolangSubsystem.options_scope}].go_search_paths",
environment_key="golang_go_search_paths",
is_default=golang_env_aware._is_default("_go_search_paths"),
local_only=FrozenOrderedSet((AsdfPathString.STANDARD, AsdfPathString.LOCAL)),
),
)
paths = await _go_search_paths(golang_env_aware.env_tgt, golang_subsystem, search_paths)
return GoBootstrap(go_search_paths=paths)
def compatible_go_version(*, compiler_version: str, target_version: str) -> bool:
"""Can the Go compiler handle the target version?
Inspired by
https://github.com/golang/go/blob/30501bbef9fcfc9d53e611aaec4d20bb3cdb8ada/src/cmd/go/internal/work/exec.go#L429-L445.
Input expected in the form `1.17`.
"""
if target_version == "1.0":
return True
def parse(v: str) -> tuple[int, int]:
major, minor = v.split(".", maxsplit=1)
return int(major), int(minor)
return parse(target_version) <= parse(compiler_version)
def rules():
return (
*collect_rules(),
*asdf.rules(),
*search_paths.rules(),
)
|
import tensorflow as tf
import numpy as np
class DistributionSimulator:
def __init__(self, x, y):
self.x = np.array(x); self.y = np.array(y)
self.standardize()
def standardize(self):
self.x = (self.x - self.x.mean(axis=0, keepdims=True))/self.x.std(axis=0, keepdims=True)
#self.x = (self.x - self.x.mean())/self.x.std()
self.y = (self.y - self.y.mean())/self.y.std()
def reshuffle(self):
y_cpy = self.y.copy()
pool = np.concatenate([self.x, self.y.reshape(-1, 1)], axis=1)
#pool = np.array([self.x, self.y]).T
np.random.shuffle(pool)
np.random.shuffle(y_cpy)
self.pool = np.concatenate([pool, y_cpy.reshape(-1, 1)], axis=1)
def init_batches(self, batch_size):
self.batch_size = batch_size
self.n_batch = (self.x.shape[0] // batch_size) + ((self.x.shape[0] % batch_size) > 0)*1
self.batch_num = 0
def next_batch(self):
end_idx = min((self.batch_num + 1) * self.batch_size, self.pool.shape[0])
batch = self.pool[self.batch_size*self.batch_num : end_idx]
self.batch_num += 1
return batch, end_idx == self.pool.shape[0]
def build_net(n_hidden, lr, global_step, decay_steps, xy_shape=2):
initializer = tf.variance_scaling_initializer(distribution='uniform')
xy_in = tf.placeholder(tf.float32, shape=[None, xy_shape])
xy_bar_in = tf.placeholder(tf.float32, shape=[None, xy_shape])
W_1 = tf.Variable(initializer([xy_shape, n_hidden]), dtype=tf.float32)
b_1 = tf.Variable(tf.zeros(n_hidden), dtype=tf.float32)
z_1 = tf.matmul(xy_in, W_1) + b_1
z_1_bar = tf.matmul(xy_bar_in, W_1) + b_1
a_1 = z_1 * tf.nn.sigmoid(z_1)
a_1_bar = z_1_bar * tf.nn.sigmoid(z_1_bar)
W_2 = tf.Variable(initializer([n_hidden, 1]), dtype=tf.float32)
b_2 = tf.Variable(tf.zeros(1), dtype=tf.float32)
z_2 = tf.matmul(a_1, W_2) + b_2
z_2_bar = tf.matmul(a_1_bar, W_2) + b_2
a_2 = tf.nn.leaky_relu(z_2)
a_2_bar = tf.nn.leaky_relu(z_2_bar)
neural_info_measure = tf.reduce_mean(a_2, axis=0) - tf.math.log(tf.reduce_mean( \
tf.math.exp(a_2_bar), axis=0))
learning_rate = tf.train.exponential_decay(lr, global_step, decay_steps, 0.99, staircase=True)
optimize = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(-neural_info_measure)
return xy_in, xy_bar_in, neural_info_measure, optimize
def mine(x, y, n_hidden=50, lr=0.05, batch_size=128, early_stopping=40, stop_wait=100):
ds = DistributionSimulator(x, y)
ds.init_batches(batch_size)
xy_shape = ds.x.shape[1] + 1
global_step = ds.n_batch * 100
decay_steps = int(global_step / 100)
xy_in, xy_bar_in, neural_info_measure, optimize = build_net(n_hidden, lr, global_step, decay_steps, xy_shape)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
neural_info_estimates = []
for epoch in range(1000):
ds.init_batches(batch_size)
ds.reshuffle()
done = False
batch_mi = []
while not done:
batch, done = ds.next_batch()
batch_xy = batch[:, :-1]
batch_x_y = np.concatenate([batch[:, :-2], batch[:, -1].reshape(-1,1)], axis=1)
_, mi = sess.run([optimize, neural_info_measure], feed_dict={xy_in: batch_xy, \
xy_bar_in: batch_x_y})
batch_mi.append(mi)
if epoch > stop_wait:
if mi >= np.max(neural_info_estimates[-early_stopping:]):
break
print(f'epoch: {epoch}, MI estimation: {np.mean(batch_mi)}')
neural_info_estimates.append(np.mean(batch_mi))
sess.close()
eval_idx = max(int(early_stopping/4), 5)
return np.mean(neural_info_estimates[-eval_idx:]), neural_info_estimates |
"""This module runs the main loop for the robotics simulation."""
import time
import timeit
import robotics
import environment
from graphical_output import Field
# [AI]-Bot parameters
MAXIMUM_SPEED = 6.0 # Squares per second
NUMBER_OF_SENSORS = 5 # At least 2
SENSOR_CONE_WIDTH = 42
SENSOR_MOUNT_ANGLE = 160
REPULSION_FORCE = 35.0
DISTANCE_DECAY = 1.5
# Environment and simulation parameters
ENVIRONMENT_WIDTH = 30
ENVIRONMENT_HEIGHT = 20
NUMBER_OF_OBSTACLES = 20
TIME_SCALE = 1 / 120.0
def main():
"""Execute main function."""
# Initialize environment
env = environment.Environment(ENVIRONMENT_WIDTH, ENVIRONMENT_HEIGHT)
env.add_random_obstacles(NUMBER_OF_OBSTACLES)
# Initialize [AI]-Bot
x_start, y_start = env.get_free_position()
rob = robotics.Robot(
x_start, y_start, MAXIMUM_SPEED, NUMBER_OF_SENSORS, SENSOR_CONE_WIDTH, SENSOR_MOUNT_ANGLE,
REPULSION_FORCE, DISTANCE_DECAY, TIME_SCALE
)
# Initialize GUI
field = Field(env)
# Draw only one out of draw_counter_mod steps
# This is useful because we compute 1/TIME_SCALE simulation steps per second,
# which is usually a lot more than we want to draw
fps = 50.0 # 25 is NOT fluent motion
draw_counter = 0
draw_counter_mod = max(int(round((1.0 / TIME_SCALE) / fps)), 1)
# Main loop, runs indefinitely
while not field.halt:
t_start = timeit.default_timer()
x_pos, y_pos, theta, _ = rob.step(env)
if draw_counter == 0:
field.moveBot(x_pos, y_pos, theta)
field.paint()
draw_counter = (draw_counter + 1) % draw_counter_mod
t_end = timeit.default_timer()
# In this case the overhead of time.sleep is not an issue
if TIME_SCALE - (t_end - t_start) > 0.0011:
time.sleep(TIME_SCALE - (t_end - t_start))
if __name__ == "__main__":
main()
|
import logging
from openpyxl.cell.read_only import ReadOnlyCell as _ReadOnlyCell, EmptyCell as _EmptyCell
from .Cell import Cell
logger = logging.getLogger(__name__)
class ReadOnlyCell(_ReadOnlyCell):
__slots__ = ('_cache', '_cache_type')
def __init__(self, sheet, row, column, value, data_type='n', style_id=0, cache=None, cache_type=None):
super().__init__(sheet, row, column, value, data_type=data_type, style_id=style_id)
self._cache = cache
self._cache_type = cache_type
@property
def is_formula(self):
return Cell.is_formula.__get__(self)
@property
def cache(self):
return self._cache
@property
def cache_type(self):
return self._cache_type
@property
def data(self):
return Cell.data.__get__(self)
@data.setter
def data(self, value):
if self._value is not None:
raise AttributeError("Cell is read only")
self._value = value
@property
def horizontal(self):
return Cell.horizontal.__get__(self)
@property
def vertical(self):
return Cell.vertical.__get__(self)
@property
def top(self):
return Cell.top.__get__(self)
@property
def bottom(self):
return Cell.bottom.__get__(self)
@property
def left(self):
return Cell.left.__get__(self)
@property
def right(self):
return Cell.right.__get__(self)
|
#this makes my plots pretty! but it is totally not mandatory to do it
import json
os.system("curl -O https://raw.githubusercontent.com/fedhere/UInotebooks/master/fbb_matplotlibrc.json")
os.system("mv " + "fbb_matplotlibrc.json " + os.getenv("PUIDATA"))
s = json.load( open(os.getenv ('PUIDATA')+"/fbb_matplotlibrc.json") )
plt.rcParams.update(s)
|
# !constructor! this is a tag for inserting code snippets into the documentation
from typing import cast
import numpy as np
import pandas as pd # type: ignore
from mpi4py import MPI
comm = MPI.COMM_WORLD
import neworder
class Parallel(neworder.Model):
def __init__(self, timeline: neworder.Timeline, p: float, n: int):
# initialise base model (essential!)
super().__init__(timeline, neworder.MonteCarlo.nondeterministic_stream)
# enumerate possible states
self.s = np.arange(neworder.mpi.size())
# create transition matrix with all off-diagonal probabilities equal to p
self.p = np.identity(neworder.mpi.size()) * (1 - neworder.mpi.size() * p) + p
# record initial population size
self.n = n
# individuals get a unique id and their initial state is the MPI rank
self.pop = pd.DataFrame({"id": neworder.df.unique_index(n),
"state": np.full(n, neworder.mpi.rank()) }).set_index("id")
#!constructor!
# !step!
def step(self) -> None:
# generate some movement
neworder.df.transition(self, self.s, self.p, self.pop, "state")
# send emigrants to other processes
for s in range(neworder.mpi.size()):
if s != neworder.mpi.rank():
emigrants = self.pop[self.pop.state == s]
neworder.log("sending %d emigrants to %d" % (len(emigrants), s))
comm.send(emigrants, dest=s)
# remove the emigrants
self.pop = self.pop[self.pop.state == neworder.mpi.rank()]
# receive immigrants
for s in range(neworder.mpi.size()):
if s != neworder.mpi.rank():
immigrants = comm.recv(source=s)
if len(immigrants):
neworder.log("received %d immigrants from %d" % (len(immigrants), s))
self.pop = pd.concat((self.pop, immigrants))
# !step!
# !check!
def check(self) -> bool:
# Ensure we haven't lost (or gained) anybody
totals = comm.gather(len(self.pop), root=0)
if totals:
if sum(totals) != self.n * neworder.mpi.size():
return False
# And check each process only has individuals that it should have
out_of_place = comm.gather(len(self.pop[self.pop.state != neworder.mpi.rank()]))
if out_of_place and any(out_of_place):
return False
return True
# !check!
# !finalise!
def finalise(self) -> None:
# process 0 assembles all the data and prints a summary
pops = comm.gather(self.pop, root=0)
if pops:
pop = pd.concat(pops)
neworder.log("State counts (total %d):\n%s" % (len(pop), pop["state"].value_counts().to_string()))
# !finalise!
|
from os import getcwd, path
import pytest
from json_ref_dict import RefDict
from json_ref_dict.exceptions import DocumentParseError, ReferenceParseError
PINNED_FILE_URL = (
"https://raw.githubusercontent.com/jacksmith15/json-ref-dict/091af2"
"c19989a95449df587b62abea89aeb83676/tests/schemas/master.yaml"
)
class TestRefDictIORefs:
@staticmethod
@pytest.fixture(
scope="class",
params=[
# relative filepath
"tests/schemas/master.yaml#/definitions",
# absolute filepath
path.join(getcwd(), "tests/schemas/master.yaml#/definitions"),
# explicit file scheme
(
"file://"
+ path.join(getcwd(), "tests/schemas/master.yaml#/definitions")
),
# https URI
PINNED_FILE_URL + "#/definitions",
],
)
def schema(request):
return RefDict(request.param)
@staticmethod
def test_schema_loads(schema):
assert str(schema) == str(
{
"foo": {"type": "string"},
"local_ref": {"$ref": "#/definitions/foo"},
"remote_ref": {"$ref": "other.yaml#/definitions/bar"},
"backref": {"$ref": "other.yaml#/definitions/baz"},
}
)
@staticmethod
def test_local_ref(schema):
assert schema["local_ref"] == {"type": "string"}
@staticmethod
def test_remote_ref(schema):
assert schema["remote_ref"] == {"type": "integer"}
@staticmethod
def test_backref(schema):
assert schema["backref"] == {"type": "string"}
@staticmethod
def test_casting_to_dict_dereferences_all(schema):
assert dict(schema) == {
"foo": {"type": "string"},
"local_ref": {"type": "string"},
"remote_ref": {"type": "integer"},
"backref": {"type": "string"},
}
@staticmethod
def test_loading_unknown_file_raises_document_parse_error():
with pytest.raises(DocumentParseError):
_ = RefDict("tests/schemas/nonexistent.yaml#/definitions")
@staticmethod
def test_loading_a_json_file_with_tabs_falls_back_to_json_loader():
"""YAML is _mostly_ compatible with JSON.
However, JSON allows tabs between tokens, whilst YAML does not.
"""
value = RefDict("tests/schemas/with-tabs.json")
assert dict(value) == {"some": {"json": ["with", "tabs"]}}
def test_immediately_circular_reference_fails():
with pytest.raises(ReferenceParseError):
_ = RefDict("tests/schemas/bad-circular.yaml#/definitions/foo")
def test_immediate_references_is_detected():
value = RefDict.from_uri("tests/schemas/immediate-ref.json")
assert value == {"type": "integer"}
def test_immediate_references_can_be_bypassed():
value = RefDict.from_uri("tests/schemas/immediate-ref.json#/type")
assert value == "integer"
|
from BJ import *
|
from functools import partial
from keras.losses import binary_crossentropy
from keras import backend as K
import tensorflow as tf
import numpy as np
import SimpleITK as sitk
from enum import Enum
def contour_losses_and_dice_loss_func(y_true, y_pred, input_shape, beta, gamma, smooth=1.):
dice = dice_coefficient(y_true, y_pred, smooth)
true_contours = extract_volume_2D_contours_tf(y_true[:,0,:,:,:], input_shape)
pred_contours = extract_volume_2D_contours_tf(y_pred[:,0,:,:,:], input_shape)
contour_dice = dice_coefficient(true_contours, pred_contours, smooth)
return dice + (beta*binary_crossentropy(y_true, y_pred)) + (gamma*contour_dice)
def dice_coefficient(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def extract_volume_2D_contours_tf(mask, input_shape):
kernel = tf.zeros((3,3,input_shape[2]), dtype=tf.int32)
int_mask = tf.cast(mask, dtype=tf.int32)
eroded_mask = tf.nn.erosion2d(int_mask, kernel=kernel, strides=(1,1,1,1), rates=(1,1,1,1), padding="SAME")
contour = tf.cast(tf.bitwise.bitwise_xor(int_mask,eroded_mask), dtype=tf.float32)
return contour
class SurfaceDistanceMeasures(Enum):
hausdorff_distance, mean_surface_distance, median_surface_distance, std_surface_distance, max_surface_distance = \
range(5)
def recall_rate(y_true, y_pred, smooth=1.):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
true_positives = np.sum(y_true_f * y_pred_f)
return true_positives / (np.sum(y_true_f) + smooth)
def false_positive_rate(y_true, y_pred, smooth=1.):
y_true_f = (1 - y_true).flatten()
y_pred_f = y_pred.flatten()
false_positives = np.sum(y_true_f * y_pred_f)
return false_positives / (np.sum(y_pred_f) + smooth)
def false_negative_rate(y_true, y_pred, smooth=1.):
y_true_f = y_true.flatten()
y_pred_f = (1 - y_pred).flatten()
false_negatives = np.sum(y_true_f * y_pred_f)
return false_negatives / (np.sum(y_pred_f) + smooth)
def dice_coefficient(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coefficient_np(y_true, y_pred, smooth=1.):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def vod_coefficient(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
union = K.sum(y_true_f) + K.sum(y_pred_f) - intersection
return (intersection + smooth) / (union + smooth)
def vod_coefficient_np(y_true, y_pred, smooth=1.):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
union = np.sum(y_true_f) + np.sum(y_pred_f) - intersection
return (intersection + smooth) / (union + smooth)
def get_surface_distances(truth, pred_binary):
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
statistics_image_filter = sitk.StatisticsImageFilter()
pred_bin_0 = sitk.GetImageFromArray(pred_binary)
truth = sitk.GetImageFromArray(truth)
reference_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(truth, squaredDistance=False))
reference_surface = sitk.LabelContour(truth)
segmented_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(pred_bin_0, squaredDistance=False))
segmented_surface = sitk.LabelContour(pred_bin_0)
statistics_image_filter.Execute(reference_surface)
num_reference_surface_pixels = int(statistics_image_filter.GetSum())
statistics_image_filter.Execute(segmented_surface)
num_segmented_surface_pixels = int(statistics_image_filter.GetSum())
if num_segmented_surface_pixels == 0:
return 100
hausdorff_distance_filter.Execute(truth, pred_bin_0)
seg2ref_distance_map = reference_distance_map * sitk.Cast(segmented_surface, sitk.sitkFloat32)
ref2seg_distance_map = segmented_distance_map * sitk.Cast(reference_surface, sitk.sitkFloat32)
seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)
seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])
seg2ref_distances = seg2ref_distances + list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances)))
ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)
ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])
ref2seg_distances = ref2seg_distances + list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances)))
all_surface_distances = seg2ref_distances + ref2seg_distances
return hausdorff_distance_filter.GetHausdorffDistance(), np.mean(all_surface_distances)
def dice_coefficient_loss(y_true, y_pred):
return -dice_coefficient(y_true, y_pred)
def vod_coefficient_loss(y_true, y_pred):
return -vod_coefficient(y_true, y_pred)
def weighted_dice_coefficient(y_true, y_pred, axis=(-3, -2, -1), smooth=0.00001):
"""
Weighted dice coefficient. Default axis assumes a "channels first" data structure
:param smooth:
:param y_true:
:param y_pred:
:param axis:
:return:
"""
return K.mean(2. * (K.sum(y_true * y_pred,
axis=axis) + smooth / 2) / (K.sum(y_true,
axis=axis) + K.sum(y_pred,
axis=axis) + smooth))
def weighted_dice_coefficient_loss(y_true, y_pred):
return -weighted_dice_coefficient(y_true, y_pred)
def label_wise_dice_coefficient(y_true, y_pred, label_index):
return dice_coefficient(y_true[..., label_index], y_pred[..., label_index])
def get_label_dice_coefficient_function(label_index):
f = partial(label_wise_dice_coefficient, label_index=label_index)
f.__setattr__('__name__', 'label_{0}_dice_coef'.format(label_index))
return f
def dice_and_xent(y_true, y_pred, xent_weight=1.0, weight_mask=None):
return dice_coef_loss(y_true, y_pred) + \
xent_weight * weighted_cross_entropy_loss(y_true, y_pred, weight_mask)
def weighted_cross_entropy_loss(y_true, y_pred, weight_mask=None):
xent = K.binary_crossentropy(y_true, y_pred)
if weight_mask is not None:
xent = K.prod(weight_mask, xent)
return K.mean(xent)
def _focal_loss(gamma=2., alpha=.5):
def focal_loss_fixed(y_true, y_pred):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.sum(
(1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed
dice_coef = dice_coefficient
dice_coef_loss = dice_coefficient_loss
binary_crossentropy_loss = binary_crossentropy
focal_loss = _focal_loss()
def combined_dice_focal_loss(y_true, y_pred, focal_weight=0.0005):
return dice_coef_loss(y_true, y_pred) + \
focal_weight * focal_loss(y_true, y_pred)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os,sys
import subprocess
import time
def color_red(string): #显示红色字体
return"%s[31;10m%s%s[0m"%(chr(27),string,chr(27))
def color_yellow(string): #显示黄色字体
return"%s[33;10m%s%s[0m"%(chr(27),string,chr(27))
def log(logcontent): #安装过程写入日志,日志文件生成在本目录
logfile = open('installlog.txt','a')
logfile.write('%s' % logcontent)
logfile.write('\n')
logfile.close()
def check_user(): #检测是否有用户anyuan以及是否使用root用户执行该脚本
command = 'grep -iw anyuan /etc/passwd'
execute = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result = execute.communicate()
if execute.poll() == 0:
pass
else:
print "you should create user anyuan, command: adduser anyuan"
sys.exit(555)
if os.geteuid() != 0:
print "This program must be run as root. Aborting."
sys.exit(404)
def input_passwd(passwdname): #设置相关密码
if passwdname == 'mysqlroot':
mysqlrootpasswd = raw_input('please input mysql root\'s password: ')
return mysqlrootpasswd
elif passwdname == 'mysqlnormal':
mysqlnormalpasswd = raw_input('please input mysql normaluser(anyuan)\'s password: ')
return mysqlnormalpasswd
elif passwdname == 'zsh':
zshpasswd = raw_input('please input systemuser(anyuan)\'s password: ')
return zshpasswd
elif passwdname == 'mongo':
mongopasswd = raw_input('please input mongodb user(anyuan)\'s password: ')
return mongopasswd
def backup(source, dest):
if not os.path.exists(dest):
print dest+" does not exist,create it"
os.mkdir(dest)
if not os.path.exists(source):
print source+" does not exist."
sys.exit()
filedate = time.strftime('%Y%m%d')+"_"+time.strftime('%H%M%S')
filename = os.path.split(source)[1]+"_"+filedate+".tar.gz"
backup_command = "tar zcf %s %s" % (dest+'/'+filename,source)
if os.system(backup_command) == 0:
print "backup "+source+" successful!"
else:
print "backup "+source+" failed.........."
def replace(filepath, oldstr, newstr): #替换文件中的字符
try:
print newstr+' replace '+oldstr+' in '+filepath
f = open(filepath,'r+')
all_lines = f.readlines()
f.seek(0)
f.truncate()
for line in all_lines:
line = line.replace(oldstr, newstr)
f.write(line)
f.close()
print color_red(newstr+' replace '+oldstr+' in '+filepath+' '+'ok')
log(newstr+' replace '+oldstr+' in '+filepath+' '+'ok')
time.sleep(3)
except Exception,e:
log(newstr+' replace '+oldstr+' in '+filepath+' '+'error '+e)
print e
def add_filecontent(basefile,content,pos):
#将content文件中的内容增加到basefile中的pos处,pos可以是数字也可以是字符,数字代表插入行数所在的下面一行,字符代表插入所在的那一行上面的一行
try:
print content+' addto '+basefile
base_filename = open(basefile, "r")
file_content = open(content,"r")
if pos.isdigit():
add_content = file_content.read()
file_content.close()
lines=[]
for line in base_filename:
lines.append(line)
base_filename.close()
lines.insert(int(pos),add_content)
result=''.join(lines)
f = open(basefile, "w")
f.write(result)
f.close()
print color_red(content+' addto '+basefile+' '+'ok')
log(content+' addto '+basefile+' '+'ok')
time.sleep(3)
else:
content = base_filename.read()
add_content = file_content.read()
base_filename.close()
file_content.close()
pos = content.find(pos)
print pos
if pos != -1:
content = content[:pos] + add_content + content[pos:]
f = open(basefile, "w")
f.write(content)
f.close()
print color_red(content+' addto '+basefile+' '+'ok')
log(content+' addto '+basefile+' '+'ok')
time.sleep(3)
except Exception,e:
log(content+' addto '+basefile+' '+'error '+e)
print e
#def env(filename):
# f = open(filename)
# for command in f.readlines():
# print color('****************')
# print command
# execute = subprocess.Popen(command,shell=True,stderr=subprocess.PIPE)
# result = execute.communicate()
# output = result[0]
# error = result[1]
# if output is None and error == '':
# print color(command.strip('\n')+' '+'ok')
# continue
# else:
# print error
# break
#def main():
# file_path = os.path.abspath('.')
# env_file = file_path+'/'+'envcreate.txt'
# env(env_file)
#def base_nojudge(command):
# subprocess.call(command,shell=True)
# print color_red('starting'+command)
def base(command): #执行shell命令
print color_red('****************')
print command
execute = subprocess.Popen(command,shell=True,stderr=subprocess.PIPE)
result = execute.communicate()
output = result[0]
error = result[1]
if execute.poll() == 0:
print color_red(command.strip('\n')+' '+'ok')
log(command.strip('\n')+' '+'ok')
time.sleep(3)
else:
if 'oh-my-zsh' in command:
print color_red(command.strip('\n')+' '+'ok')
log(command.strip('\n')+' '+'ok')
else:
print error
print color_yellow('error: please execute again after checking.')
log(command+' '+'error'+'\n'+error)
sys.exit(250)
def base_passwd(passwd,command): #执行需要输入密码的shell命令
print color_red('****************')
print command
outputpasswd = subprocess.Popen(['echo',passwd],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
execute = subprocess.Popen(command,shell=True,stdin=outputpasswd.stdout,stderr=subprocess.PIPE)
result = execute.communicate()
output = result[0]
error = result[1]
if execute.poll() == 0:
print color_red(command.strip('\n')+' '+'ok')
log(command.strip('\n')+' '+'ok')
time.sleep(3)
else:
print error
print color_yellow('error: please execute again after checking.')
log(command+' '+'error'+'\n'+error)
sys.exit(250)
|
#!/usr/bin/python
import os, glob
import logging
import time
from queue import Queue;
logger = logging.getLogger("Notification")
class QueueManager:
def __init__(self, dir_path = None):
if not dir_path:
self.dir_path = os.path.join("data","queue")
else:
self.dir_path = dir_path
def prepare_filepath(self, case_id):
return os.path.join(self.dir_path, str(case_id) + ".q")
def age(self, past_time):
current_time = time.time()
time_delta = current_time - past_time
age_str = ""
if int(time_delta) > 0:
days = int(time_delta) / 86400
hours = int(time_delta) / 3600 % 24
minutes = int(time_delta) / 60 % 60
seconds = int(time_delta) % 60
if days > 0:
age_str = "%d days, " % days
if hours > 0:
age_str = "%s%d hours, " % (age_str, hours)
if minutes > 0:
age_str = "%s%d minutes, " % (age_str, minutes)
if seconds > 0:
age_str = "%s%d seconds " % (age_str, seconds)
return age_str
def get_current_queue(self):
queue = {}
for filename in glob.glob(os.path.join(self.dir_path, "*.q")):
case_id, ext = os.path.splitext(os.path.basename(filename))
queue_item = Queue(case_id)
queue_item.load_data()
queue[case_id] = queue_item
return queue
def export(self):
logger.info("exporting current queue stats.")
if not os.path.exists(os.path.join("data","exports")):
os.makedirs(os.path.join("data","exports"))
filename = "queue_%s.csv" % time.strftime("%Y%m%d%H%M%S")
filepath = os.path.join("data","exports",filename)
with open(filepath, "w+") as casefile:
casefile.write("Case ID,Customer ID,Customer Name,Attempts,Created,Age\n")
for case_id, queue in self.get_current_queue().iteritems():
casefile.write("%s,%s,%s,%s,%d,%s\n" %
(queue.data.case_id, queue.data.customer_id, queue.data.customer_name.replace(","," "),
queue.data.attempts, queue.data.filecreated, self.age(queue.data.filecreated)))
logger.info("Queue Stats exported to %s location" % filepath)
return filepath
def print_stat(self):
logger.info("************** Printing Queue Stats **************")
for case_id, queue in self.get_current_queue().iteritems():
logger.info(r"Case ID: %s, Cust ID: %d, Cust Name: %s, Attempts: %s, Pending since: %s" %
(queue.data.case_id, queue.data.customer_id, queue.data.customer_name,
queue.data.attempts, self.age(queue.data.filecreated)))
logger.info("**************************************************")
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
if l1 is None:
return l2
if l2 is None:
return l1
_addition = 0
_div_10 = 0
_remainder = 0
_carry = 0
# p = l1.val
# q = l2.val
l3 = []
len1 = self.checkLength(l1)
len2 = self.checkLength(l2)
if len1 > len2:
l2 = self.append(l2, len1 - len2)
else:
l1 = self.append(l1, len2 - len1)
while l1 or l2:
_addition = l1.val + l2.val + _carry
_div_10 = _addition//10
l1 = l1.next
l2 = l2.next
if _div_10 == 0:
_carry = _div_10
l3.append(_addition)
continue
_remainder = _addition % 10
_carry = _div_10
l3.append(_remainder)
if _carry != 0:
l3.append(_carry)
l4 = self.list2link(l3)
# l5 = self.reverseLL(l4)
return l4
def append(self, lin, diff):
new_list = []
while lin:
new_list.append(lin.val)
lin = lin.next
new_list.extend([0]*diff)
new_Linklist = self.list2link(new_list)
return new_Linklist
def checkLength(self, nodelen):
count = 0
while nodelen:
count += 1
nodelen = nodelen.next
return count
def list2link(self, l):
if len(l) == 0:
return None
ret_tail = ret_head = ListNode(l[0])
for val in l[1:]:
tmp = ListNode(val)
ret_tail.next = tmp
ret_tail = ret_tail.next
return ret_head
def printList(self,l):
temp = l
while temp:
print(temp.val, end=" ")
temp = temp.next
def reverseLL(self, l):
_prev = None
_current = l
_next = None
while _current:
_next = _current.next
_current.next = _prev
_prev = _current
_current = _next
lh = _prev
return lh
if __name__ == '__main__':
solution = Solution()
ll1 = solution.list2link([2, 7, 8, 9, 9, 9])
solution.printList(ll1)
print('\n')
ll2 = solution.list2link([3, 7, 5])
solution.printList(ll2)
print('\n')
ll3 = solution.addTwoNumbers(ll1, ll2)
solution.printList(ll3) |
from importlib import import_module
from datalake_library.commons import init_logger
from datalake_library.configuration.resource_configs import DynamoConfiguration
from datalake_library.interfaces.dynamo_interface import DynamoInterface
logger = init_logger(__name__)
class TransformHandler:
def __init__(self):
logger.info("Transformation Handler initiated")
def stage_a_transform(self, bucket, key, team, dataset):
"""Applies StageA Transformation to Object
Arguments:
bucket {string} -- Origin S3 Bucket
key {string} -- Key to transform
team {string} -- Team owning the transformation
dataset {string} -- Dataset targeted by transformation
Returns:
{dict} -- Dictionary of Bucket and Keys transformed
"""
transform_info = self.get_transform_info('{}-{}'.format(team, dataset))
module = import_module('datalake_library.transforms.stage_a_transforms.{}'.format(transform_info['stage_a_transform']))
Transform = getattr(module, 'CustomTransform')
try:
response = Transform().transform_object(bucket, key, team, dataset)
except Exception as e:
raise e
if ((not isinstance(response, list) or (len(response) == 0))):
raise ValueError("Invalid list of processed keys - Aborting")
else:
logger.info("Object successfully transformed")
return response
def stage_b_transform(self, bucket, keys, team, dataset):
"""Applies StageB Transformation to Objects
Arguments:
bucket {string} -- Origin S3 Bucket
keys {string} -- Keys to transform
team {string} -- Team owning the transformation
dataset {string} -- Dataset targeted by transformation
Returns:
{dict} -- Dictionary of Bucket, Keys transformed, Path to Keys Processed and Job Details
"""
transform_info = self.get_transform_info('{}-{}'.format(team, dataset))
module = import_module('datalake_library.transforms.stage_b_transforms.{}'.format(transform_info['stage_b_transform']))
Transform = getattr(module, 'CustomTransform')
try:
response = Transform().transform_object(bucket, keys, team, dataset)
except Exception as e:
raise e
if ((len(response) == 0) or (not isinstance(response, dict)) or ('processedKeysPath' not in response)
or ('jobDetails' not in response) or ('jobStatus' not in response['jobDetails'])):
raise ValueError("Invalid dictionary - Aborting")
return response
def stage_b_job_status(self, bucket, keys, team, dataset, processed_keys_path, job_details):
"""Checks completion of Stage B Job
Arguments:
bucket {string} -- Origin S3 bucket
keys {string} -- Keys to transform
processed_keys_path {string} -- Job output S3 path
job_details {string} -- Details about job to monitor
team {string} -- Team owning the transformation
dataset {string} -- Dataset targeted by transformation
Returns:
{dict} -- Dictionary of Bucket and Keys transformed
"""
transform_info = self.get_transform_info('{}-{}'.format(team, dataset))
module = import_module('datalake_library.transforms.stage_b_transforms.{}'.format(transform_info['stage_b_transform']))
Transform = getattr(module, 'CustomTransform')
try:
response = Transform().check_job_status(bucket, keys, processed_keys_path, job_details)
except Exception as e:
raise e
if ((len(response) == 0) or (not isinstance(response, dict)) or ('processedKeysPath' not in response)
or ('jobDetails' not in response) or ('jobStatus' not in response['jobDetails'])):
raise ValueError("Invalid dictionary - Aborting")
if response['jobDetails']['jobStatus'] == 'FAILED':
raise ValueError('Job Failed')
elif response['jobDetails']['jobStatus'] == 'SUCCEEDED':
logger.info("Objects successfully transformed")
return response
def get_transform_info(self, dataset):
dynamo_config = DynamoConfiguration()
dynamo_interface = DynamoInterface(dynamo_config)
return dynamo_interface.get_transform_table_item(dataset)['transforms']
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
import os
from .models import User,Questions
import json as simplejson
from Gamer.check.checkscore import check
import random
#题目
@csrf_exempt
def Choise(request):
if request.method == "POST":
req = simplejson.loads(request.body.decode('utf-8'))
username=req['username']
groups = req['groups']
User.objects.filter(username=username).update(groups=groups)
question = Questions.objects.filter(groups=groups).values('id', 'question', 'option1', 'option2', 'option3')
if question:
question=random.sample(list(question),5)
lists = [0, 1, 2, 3,4]
d = dict()
for i in lists:
oneQ = question[i]
d[i] = {
'status': 1,
'question': i+1,
'data':{
'id': oneQ['id'],
'data': {
'question': oneQ['question'],
'A': oneQ['option1'],
'B': oneQ['option2'],
'C': oneQ['option3'],
}
}
}
return JsonResponse(d)
import base64
# 返回图片
@csrf_exempt
def ReturnImage(request):
if request.method == 'POST':
obj = User.objects
d = os.path.dirname(__file__)
req = simplejson.loads(request.body.decode('utf-8'))
username = req['username']
check(request,username)
X = "data:image/png;base64,"
score = obj.filter(username=username,score__gte=85,score__lte=100).values('score')
if not score:
score = obj.filter(username=username,score__gte=70,score__lt=85).values('score')
if not score:
score = obj.filter(username=username, score__gte=65, score__lt=70).values('score')
if not score:
'''只剩50~65了'''
img =os.path.join(d,"photo/50-65.jpg")
dt = open(img, 'rb').read()
data1 = base64.b64encode(dt).decode('utf-8')
return HttpResponse(X+'%s'%(data1))
# return HttpResponse(data,content_type='image/png')
else:
img = os.path.join(d,"photo/65-70.jpg")
dt = open(img, 'rb').read()
data1 = base64.b64encode(dt).decode('utf-8')
return HttpResponse(X+'%s'%(data1))
# return HttpResponse(data, content_type='image/png')
else:
img = os.path.join(d,"photo/70-85.jpg")
dt = open(img, 'rb').read()
data1 = base64.b64encode(dt).decode('utf-8')
return HttpResponse(X+'%s'%(data1))
# return HttpResponse(data,content_type='image/png')
else:
img = os.path.join(d,"photo/85-100.jpg")
dt = open(img, 'rb').read()
data1 = base64.b64encode(dt).decode('utf-8')
return HttpResponse(X+'%s'%(data1))
# return HttpResponse(data, content_type='image/png')
'''
d = os.path.dirname(__file__)
image = os.path.join(d,"photo/image/Paper_Architecture_by_Dmitri_Popov.jpg")
data = open(image,'rb').read() # 读取图片
return HttpResponse(data,content_type='image/png')
'''
@csrf_exempt
def ReturnImage2(request):
if request.method == 'POST':
obj = User.objects
d = os.path.dirname(__file__)
req = simplejson.loads(request.body)
username = req['username']
# username = request.GET.get('username')
list = [50,65,70,85,100]
list1 = [0,1,2,3,4]
M = dict()
lst = []
for i in list1:
if i == 100:
i=i - 1
score =obj.filter(username=username,score__gte=list[i],score__lt=list[i+1]).values('score')
if score:
a = list[i]
b = list[i+1]
print(a,b)
image = os.path.join(d,"photo/%s-%s.png"%(a,b))
data = open(image,'rb').read()
return HttpResponse(data,content_type='image/png')
# score = obj.filter(username=username,score__gte=85,score__lte=100).values('score')
#九宫格
@csrf_exempt
def nine(request):
if request.method == 'POST':
# d = os.path.dirname(__file__)
nineAnswer = [0,1,2,3,4,5,6,7,8]
# list = request.POST.getlist('list[]') # 数组名字
list = simplejson.loads(request.body.decode('utf-8'))
list = list['puzzle']
if list == nineAnswer:
return JsonResponse(
{
'status':1,
},
json_dumps_params={'ensure_ascii': False}
)
# image = os.path.join(d, "photo/myheart.jpg")
# data = open(image, 'rb').read() # 读取图片
# return HttpResponse(data, content_type='image/png')
else:
return JsonResponse(
{
'status':0,
'success':False,
'message':'你的拼图有错误哦'
},
json_dumps_params={'ensure_ascii': False}
)
|
#Servers enumeration
import socket
domain = input('Enter the domain:\n')
#the name in other file
with open("brute.txt", "r") as file:
names = file.readlines()
new_line="\n"
for name in names:
DNS = f"{name.strip(new_line)}.{domain}"
try:
print(f"{DNS} : {socket.gethostbyname(DNS)}")
except socket.gaierror as e:
pass
#print(f"{DNS} {e}")
|
from celery import shared_task
from django.core.mail import send_mail
from .models import Order
@shared_task
def order_created(order_id):
"""The task of sending email notifications when the order is successfully placed"""
order = Order.objects.get(id=order_id)
subject = f'Order nr. {order.id}'
message = f'Dear {order.first_name},\n\nYou have successfully placed an order. Your order id is {order.id}'
mail_sent = send_mail(subject, message, 'admin@myshop.com', [order.email])
return mail_sent
|
# -*- coding: utf-8 -*-
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __eq__(self, other):
return other is not None and self.val == other.val and self.next == other.next
class Solution:
def addTwoNumbers(self, l1, l2):
carry, val = divmod(l1.val + l2.val, 10)
l1 = l1.next
l2 = l2.next
result = ListNode(val)
current = result
while l1 is not None or l2 is not None:
val = carry
if l1 is not None:
val += l1.val
l1 = l1.next
if l2 is not None:
val += l2.val
l2 = l2.next
carry, val = divmod(val, 10)
current.next = ListNode(val)
current = current.next
if carry:
current.next = ListNode(1)
return result
if __name__ == "__main__":
solution = Solution()
l0_0 = ListNode(2)
l0_1 = ListNode(4)
l0_2 = ListNode(3)
l0_1.next = l0_2
l0_0.next = l0_1
l1_0 = ListNode(5)
l1_1 = ListNode(6)
l1_2 = ListNode(4)
l1_1.next = l1_2
l1_0.next = l1_1
l2_0 = ListNode(7)
l2_1 = ListNode(0)
l2_2 = ListNode(8)
l2_1.next = l2_2
l2_0.next = l2_1
assert l2_0 == solution.addTwoNumbers(l0_0, l1_0)
|
import pandas as pd
from math import factorial
from math import sqrt
import csv
import numpy as np
from itertools import product
# Portfolio allocations
trading_methodologies = pd.read_csv('trading_methodologies.csv')
# Asset information
ST = pd.read_csv('amundi-msci-wrld-ae-c.csv')
CB = pd.read_csv('ishares-global-corporate-bond-$.csv')
PB = pd.read_csv('db-x-trackers-ii-global-sovereign-5.csv')
GO = pd.read_csv('spdr-gold-trust.csv')
# Asset prices
def get_asset_prices_endYear(ST, CB, PB, GO):
asset_prices = [ST.Price[365],CB.Price[365],PB.Price[365],GO.Price[365],1]
return asset_prices
def get_asset_prices_total(ST, CB, PB, GO):
asset_prices = [ST.Price,CB.Price,PB.Price,GO.Price,1]
return asset_prices
def get_asset_prices_rebal(ST, CB, PB, GO, array_month):
search_values = ['01','02','13','14','15','16','17','30','31']
ST_rebal = ST[ST['Date'].str.contains('|'.join(search_values))]
CB_rebal = CB[CB['Date'].str.contains('|'.join(search_values))]
PB_rebal = PB[PB['Date'].str.contains('|'.join(search_values))]
GO_rebal = GO[GO['Date'].str.contains('|'.join(search_values))]
tmp = [ST_rebal,CB_rebal,PB_rebal,GO_rebal]
a = 0
asset_prices_mid = pd.DataFrame(columns=['ST','CB','PB','GO','CA'])
asset_prices_ini = pd.DataFrame(columns=['ST','CB','PB','GO','CA'])
for i in array_month:
tmp_mid = []
tmp_ini = []
for j in range(len(tmp)):
mid_day = str(i) + "-15"
first_day = str(i) + "-01"
if len(tmp[j][tmp[j]['Date'].str.contains(mid_day)]) != 0:
tmp_mid.append(tmp[j][tmp[j]['Date'].str.contains(mid_day)].iloc[0].Price)
else:
mid_day2 = str(i) + "-14"
mid_day3 = str(i) + "-16"
if len(tmp[j][tmp[j]['Date'].str.contains(mid_day2)]) != 0:
tmp_mid.append(tmp[j][tmp[j]['Date'].str.contains(mid_day2)].iloc[0].Price)
elif len(tmp[j][tmp[j]['Date'].str.contains(mid_day3)]) != 0:
tmp_mid.append(tmp[j][tmp[j]['Date'].str.contains(mid_day3)].iloc[0].Price)
if len(tmp[j][tmp[j]['Date'].str.contains(first_day)]) != 0:
tmp_ini.append(tmp[j][tmp[j]['Date'].str.contains(first_day)].iloc[0].Price)
else:
if i == 'Jan':
prev_month = 'Dec'
else:
prev_month = array_month[array_month.index(i) - 1]
last_day = str(prev_month) + "-31"
last_day2 = str(prev_month) + "-30"
next_day = str(i) + "-02"
if len(tmp[j][tmp[j]['Date'].str.contains(last_day)]) != 0:
tmp_ini.append(tmp[j][tmp[j]['Date'].str.contains(last_day)].iloc[0].Price)
elif len(tmp[j][tmp[j]['Date'].str.contains(last_day2)]) != 0:
tmp_ini.append(tmp[j][tmp[j]['Date'].str.contains(last_day2)].iloc[0].Price)
elif len(tmp[j][tmp[j]['Date'].str.contains(next_day)]) != 0:
tmp_ini.append(tmp[j][tmp[j]['Date'].str.contains(next_day)].iloc[0].Price)
tmp_mid.append('1')
tmp_ini.append('1')
asset_prices_mid.loc[a] = tmp_mid
asset_prices_ini.loc[a] = tmp_ini
a = a + 1
return asset_prices_mid, asset_prices_ini
# Investing strategies
def one_off(money, portfolio_allocation, asset_prices):
shares =[]
for i in range(len(portfolio_allocation)):
shares.append(((portfolio_allocation[i])/100 * money)/asset_prices[i])
return shares
def rebalance_function(prev_asset_prices, current_asset_prices):
tmp_percent = []
for i in range(len(current_asset_prices)):
tmp_percent.append(round(current_asset_prices[i]/sum(current_asset_prices),2)*100)
return tmp_percent
def dca(money, portfolio_allocation, period, asset_prices):
monthly_portfolio = list(map(lambda x: x/period, portfolio_allocation))
shares =[]
for i in range(len(monthly_portfolio)):
shares.append(((monthly_portfolio[i])/100 * money)/asset_prices[i])
return shares
def cost(trading_methodology):
portfolio_allocations = trading_methodology.iloc[:,:5]
asset_prices_fd = get_asset_prices_endYear(ST, CB, PB, GO)
trading_methodology['Cost'] = 0
for index, portfolio_allocation in portfolio_allocations.iterrows():
cost = 0
for index2, i in enumerate(asset_prices_fd):
cost = cost + i * (portfolio_allocation[index2]/100)
trading_methodology['Cost'][index] = cost
def volatility(money, trading_methodologies):
shares = []
a = 0
trading_methodologies['Volatility'] = ""
for index, row in trading_methodologies.iterrows():
print("Iteration volatility: " + str(index) + "...")
temp = pd.DataFrame(np.zeros((366,6)), columns = ['ST_Values','CB_Values','PB_Values','GO_Values','CA_Values','Values'])
shares_values = [((money * (row[0] / 100))),((money * (row[1] / 100))),((money * (row[2] / 100))),((money * (row[3] / 100))),(money * (row[4] / 100))]
if row[5] == '1-OFF':
shares = [((money * (row[0] / 100)) / ST.Price[0]),((money * (row[1] / 100)) / CB.Price[0]),((money * (row[2] / 100)) / PB.Price[0]),((money * (row[3] / 100)) / GO.Price[0]),(money * (row[4] / 100))]
if row[6] == 'NO':
temp['ST_Values'] = (shares[0]) * ST.Price
temp['CB_Values'] = (shares[1]) * CB.Price
temp['PB_Values'] = (shares[2]) * PB.Price
temp['GO_Values'] = (shares[3]) * GO.Price
temp['CA_Values'] = shares[4]
temp['Values'] = temp.sum(axis=1)
trading_methodologies['Volatility'][index] = (temp['Values'].std() / temp['Values'].mean()) * 100
else:
for indexST, rowST in ST.iterrows():
if ("Jan-01" in rowST['Date']):
temp['ST_Values'][indexST] = (shares[0]) * rowST.Price
temp['CB_Values'][indexST] = (shares[1]) * CB.iloc[indexST].Price
temp['PB_Values'][indexST] = (shares[2]) * PB.iloc[indexST].Price
temp['GO_Values'][indexST] = (shares[3]) * GO.iloc[indexST].Price
temp['CA_Values'][indexST] = (shares[4])
a = 1
elif ("15" in rowST['Date']):
shares_current_value = [(shares_prev_value[0]/ST.iloc[indexST-1].Price)*rowST.Price,(shares_prev_value[1]/CB.iloc[indexST-1].Price)*CB.iloc[indexST].Price,(shares_prev_value[2]/PB.iloc[indexST-1].Price)*PB.iloc[indexST].Price,(shares_prev_value[3]/GO.iloc[indexST-1].Price)*GO.iloc[indexST].Price,shares_prev_value[4]]
current_money = sum(shares_current_value)
shares_prev_value = shares_current_value
temp['ST_Values'][indexST] = shares[0] * rowST.Price
temp['CB_Values'][indexST] = shares[1] * CB.iloc[indexST].Price
temp['PB_Values'][indexST] = shares[2] * PB.iloc[indexST].Price
temp['GO_Values'][indexST] = shares[3] * GO.iloc[indexST].Price
temp['CA_Values'][indexST] = shares[4]
shares = [((current_money * (row[0] / 100))) / rowST.Price, ((current_money * (row[1] / 100))) / CB.iloc[indexST].Price, ((current_money * (row[2] / 100))) / PB.iloc[indexST].Price, ((current_money * (row[3] / 100))) / GO.iloc[indexST].Price, (current_money * (row[4] / 100))]
a = 1
else:
if a == 1:
shares_current_value = [shares[0]*rowST.Price,shares[1]*CB.iloc[indexST].Price,shares[2]*PB.iloc[indexST].Price,shares[3]*GO.iloc[indexST].Price,shares[4]]
rebalance = rebalance_function(shares_values, shares_current_value)
current_money = sum(shares_current_value)
shares_prev_value = shares_current_value
temp['ST_Values'][indexST] = shares[0] * rowST.Price
temp['CB_Values'][indexST] = shares[1] * CB.iloc[indexST].Price
temp['PB_Values'][indexST] = shares[2] * PB.iloc[indexST].Price
temp['GO_Values'][indexST] = shares[3] * GO.iloc[indexST].Price
temp['CA_Values'][indexST] = shares[4]
a = 2
else:
shares_current_value = [(shares_prev_value[0]/ST.iloc[indexST-1].Price)*rowST.Price,(shares_prev_value[1]/CB.iloc[indexST-1].Price)*CB.iloc[indexST].Price,(shares_prev_value[2]/PB.iloc[indexST-1].Price)*PB.iloc[indexST].Price,(shares_prev_value[3]/GO.iloc[indexST-1].Price)*GO.iloc[indexST].Price,shares_prev_value[4]]
rebalance = rebalance_function(shares_prev_value, shares_current_value)
current_money = sum(shares_current_value)
shares_prev_value = shares_current_value
temp['ST_Values'][indexST] = shares[0] * rowST.Price
temp['CB_Values'][indexST] = shares[1] * CB.iloc[indexST].Price
temp['PB_Values'][indexST] = shares[2] * PB.iloc[indexST].Price
temp['GO_Values'][indexST] = shares[3] * GO.iloc[indexST].Price
temp['CA_Values'][indexST] = shares[4]
temp['Values'] = temp.sum(axis=1)
trading_methodologies['Volatility'][index] = (temp['Values'].std() / temp['Values'].mean()) * 100
else:
if row[6] == 'NO':
shares2 = [0,0,0,0,0]
for indexST, rowST in ST.iterrows():
if ("01" in rowST['Date']):
shares = dca(money, row[:5], 12, [rowST.Price, CB.iloc[indexST].Price, PB.iloc[indexST].Price, GO.iloc[indexST].Price, 1])
shares2 = list(i+j for (i,j) in zip(shares2,shares))
temp['ST_Values'][indexST] = (shares2[0]) * rowST.Price
temp['CB_Values'][indexST] = (shares2[1]) * CB.iloc[indexST].Price
temp['PB_Values'][indexST] = (shares2[2]) * PB.iloc[indexST].Price
temp['GO_Values'][indexST] = (shares2[3]) * GO.iloc[indexST].Price
temp['CA_Values'][indexST] = (shares2[4])
temp['Values'] = temp.sum(axis=1)
trading_methodologies['Volatility'][index] = (temp['Values'].std() / temp['Values'].mean()) * 100
else:
shares_prev_value = [0,0,0,0,0]
for indexST, rowST in ST.iterrows():
if ("01" in rowST['Date']):
shares = dca(money, row[:5], 12, [rowST.Price, CB.iloc[indexST].Price, PB.iloc[indexST].Price, GO.iloc[indexST].Price, 1])
shares_current_value = [(shares_prev_value[0]/ST.iloc[indexST-1].Price)*rowST.Price,(shares_prev_value[1]/CB.iloc[indexST-1].Price)*CB.iloc[indexST].Price,(shares_prev_value[2]/PB.iloc[indexST-1].Price)*PB.iloc[indexST].Price,(shares_prev_value[3]/GO.iloc[indexST-1].Price)*GO.iloc[indexST].Price,shares_prev_value[4]]
shares_current = [shares_current_value[0] / rowST.Price, shares_current_value[1] / CB.iloc[indexST].Price, shares_current_value[2] / PB.iloc[indexST].Price, shares_current_value[3] / GO.iloc[indexST].Price, shares_current_value[4]]
shares = list(i+j for (i,j) in zip(shares_current, shares))
temp['ST_Values'][indexST] = (shares[0]) * rowST.Price
temp['CB_Values'][indexST] = (shares[1]) * CB.iloc[indexST].Price
temp['PB_Values'][indexST] = (shares[2]) * PB.iloc[indexST].Price
temp['GO_Values'][indexST] = (shares[3]) * GO.iloc[indexST].Price
temp['CA_Values'][indexST] = (shares[4])
a = 1
elif ("15" in rowST['Date']):
shares_current_value = [(shares_prev_value[0]/ST.iloc[indexST-1].Price)*rowST.Price,(shares_prev_value[1]/CB.iloc[indexST-1].Price)*CB.iloc[indexST].Price,(shares_prev_value[2]/PB.iloc[indexST-1].Price)*PB.iloc[indexST].Price,(shares_prev_value[3]/GO.iloc[indexST-1].Price)*GO.iloc[indexST].Price,shares_prev_value[4]]
current_money = sum(shares_current_value)
shares_prev_value = shares_current_value
temp['ST_Values'][indexST] = (shares[0]) * rowST.Price
temp['CB_Values'][indexST] = (shares[1]) * CB.iloc[indexST].Price
temp['PB_Values'][indexST] = (shares[2]) * PB.iloc[indexST].Price
temp['GO_Values'][indexST] = (shares[3]) * GO.iloc[indexST].Price
temp['CA_Values'][indexST] = (shares[4])
shares = [((current_money * (row[0] / 100))) / rowST.Price, ((current_money * (row[1] / 100))) / CB.iloc[indexST].Price, ((current_money * (row[2] / 100))) / PB.iloc[indexST].Price, ((current_money * (row[3] / 100))) / GO.iloc[indexST].Price, (current_money * (row[4] / 100))]
a = 1
else:
if a == 1:
shares_current_value = [shares[0]*rowST.Price,shares[1]*CB.iloc[indexST].Price,shares[2]*PB.iloc[indexST].Price,shares[3]*GO.iloc[indexST].Price,shares[4]]
rebalance = rebalance_function(shares_values, shares_current_value)
current_money = sum(shares_current_value)
shares_prev_value = shares_current_value
temp['ST_Values'][indexST] = (shares[0]) * rowST.Price
temp['CB_Values'][indexST] = (shares[1]) * CB.iloc[indexST].Price
temp['PB_Values'][indexST] = (shares[2]) * PB.iloc[indexST].Price
temp['GO_Values'][indexST] = (shares[3]) * GO.iloc[indexST].Price
temp['CA_Values'][indexST] = (shares[4])
a = 2
else:
shares_current_value = [(shares_prev_value[0]/ST.iloc[indexST-1].Price)*rowST.Price,(shares_prev_value[1]/CB.iloc[indexST-1].Price)*CB.iloc[indexST].Price,(shares_prev_value[2]/PB.iloc[indexST-1].Price)*PB.iloc[indexST].Price,(shares_prev_value[3]/GO.iloc[indexST-1].Price)*GO.iloc[indexST].Price,shares_prev_value[4]]
rebalance = rebalance_function(shares_prev_value, shares_current_value)
current_money = sum(shares_current_value)
shares_prev_value = shares_current_value
temp['ST_Values'][indexST] = (shares[0]) * rowST.Price
temp['CB_Values'][indexST] = (shares[1]) * CB.iloc[indexST].Price
temp['PB_Values'][indexST] = (shares[2]) * PB.iloc[indexST].Price
temp['GO_Values'][indexST] = (shares[3]) * GO.iloc[indexST].Price
temp['CA_Values'][indexST] = (shares[4])
temp['Values'] = temp.sum(axis=1)
trading_methodologies['Volatility'][index] = (temp['Values'].std() / temp['Values'].mean()) * 100
def return_function(money, trading_methodologies):
return_options = [1,3,6,9,12]
trading_methodologies['Return_1M'] = 0.0
trading_methodologies['Return_3M'] = 0.0
trading_methodologies['Return_6M'] = 0.0
trading_methodologies['Return_9M'] = 0.0
trading_methodologies['Return_12M'] = 0.0
return_months = ["Jan-31-2020","Mar-31-2020","Jun-30-2020","Sep-30-2020","Dec-31-2020"]
temp = pd.DataFrame(np.zeros((5,6)), columns = ['Period','ST_price','CB_price','PB_price','GO_price','CA_price'])
for index, i in enumerate(return_options):
temp.Period[index] = i
maskST = ST.Date == return_months[index]
st_price = ST.Price[maskST]
temp.ST_price[index] = st_price
maskCB = CB.Date == return_months[index]
cb_price = CB.Price[maskCB]
temp.CB_price[index] = cb_price
maskPB = PB.Date == return_months[index]
pb_price = PB.Price[maskPB]
temp.PB_price[index] = pb_price
maskGO = GO.Date == return_months[index]
go_price = GO.Price[maskGO]
temp.GO_price[index] = go_price
temp.CA_price[index] = 1
for index, row in trading_methodologies.iterrows():
print("Iteration return: " + str(index) + "...")
return_bymonths = []
shares_values = [((money * (row[0] / 100))),((money * (row[1] / 100))),((money * (row[2] / 100))),((money * (row[3] / 100))),(money * (row[4] / 100))]
if row[5] == '1-OFF':
shares = [((money * (row[0] / 100)) / ST.Price[0]),((money * (row[1] / 100)) / CB.Price[0]),((money * (row[2] / 100)) / PB.Price[0]),((money * (row[3] / 100)) / GO.Price[0]),(money * (row[4] / 100))]
buy_amount = shares[0] * ST.Price[0] + shares[1] * CB.Price[0] + shares[2] * PB.Price[0] + shares[3] * GO.Price[0] + shares[4] * 1
if row[6] == 'NO':
for i, j in enumerate(return_options):
return_column = f"Return_{j}M"
current_value = temp.ST_price[i]*shares[0]+temp.CB_price[i]*shares[1]+temp.PB_price[i]*shares[2]+temp.GO_price[i]*shares[3]+temp.CA_price[i]*shares[4]
portfolio_return = ((current_value - buy_amount)/buy_amount) * 100
trading_methodologies[return_column][index] = round(portfolio_return,2)
else:
for indexST, rowST in ST.iterrows():
if ("15" in rowST['Date']):
shares_current_value = [(shares_prev_value[0]/ST.iloc[indexST-1].Price)*rowST.Price,(shares_prev_value[1]/CB.iloc[indexST-1].Price)*CB.iloc[indexST].Price,(shares_prev_value[2]/PB.iloc[indexST-1].Price)*PB.iloc[indexST].Price,(shares_prev_value[3]/GO.iloc[indexST-1].Price)*GO.iloc[indexST].Price,shares_prev_value[4]]
current_money = sum(shares_current_value)
shares_prev_value = shares_current_value
shares = [((current_money * (row[0] / 100))) / rowST.Price, ((current_money * (row[1] / 100))) / CB.iloc[indexST].Price, ((current_money * (row[2] / 100))) / PB.iloc[indexST].Price, ((current_money * (row[3] / 100))) / GO.iloc[indexST].Price, (current_money * (row[4] / 100))]
elif (rowST['Date'] in return_months):
index_month = return_months.index(rowST['Date'])
index_options = return_options[index_month]
current_value = temp.ST_price[temp['Period'] == index_options]*shares[0]+temp.CB_price[temp['Period'] == index_options]*shares[1]+temp.PB_price[temp['Period'] == index_options]*shares[2]+temp.GO_price[temp['Period'] == index_options]*shares[3]+temp.CA_price[temp['Period'] == index_options]*shares[4]
portfolio_return = ((current_value - buy_amount)/buy_amount) * 100
mask = f"Return_{index_options}M"
trading_methodologies[mask][index] = round(portfolio_return,2)
elif ("14" in rowST['Date']):
shares_current_value = [shares[0]*rowST.Price,shares[1]*CB.iloc[indexST].Price,shares[2]*PB.iloc[indexST].Price,shares[3]*GO.iloc[indexST].Price,shares[4]]
rebalance = rebalance_function(shares_values, shares_current_value)
current_money = sum(shares_current_value)
shares_prev_value = shares_current_value
else:
buy_amount_total = 0
if row[6] == 'NO':
shares2 = [0,0,0,0,0]
for indexST, rowST in ST.iterrows():
if ("01" in rowST['Date']):
shares = dca(money, row[:5], 12, [rowST.Price, CB.iloc[indexST].Price, PB.iloc[indexST].Price, GO.iloc[indexST].Price, 1])
shares2 = list(i+j for (i,j) in zip(shares2,shares))
buy_amount = shares[0] * ST.Price[indexST] + shares[1] * CB.Price[indexST] + shares[2] * PB.Price[indexST] + shares[3] * GO.Price[indexST] + shares[4] * 1
buy_amount_total = buy_amount_total + buy_amount
elif (rowST['Date'] in return_months):
index_month = return_months.index(rowST['Date'])
index_options = return_options[index_month]
current_value = temp.ST_price[temp['Period'] == index_options]*shares2[0]+temp.CB_price[temp['Period'] == index_options]*shares2[1]+temp.PB_price[temp['Period'] == index_options]*shares2[2]+temp.GO_price[temp['Period'] == index_options]*shares2[3]+temp.CA_price[temp['Period'] == index_options]*shares2[4]
portfolio_return = ((current_value - buy_amount_total)/buy_amount_total) * 100
mask = f"Return_{index_options}M"
trading_methodologies[mask][index] = round(portfolio_return,2)
else:
shares_prev_value = [0,0,0,0,0]
shares2 = [0,0,0,0,0]
for indexST, rowST in ST.iterrows():
if ("01" in rowST['Date']):
shares = dca(money, row[:5], 12, [rowST.Price, CB.iloc[indexST].Price, PB.iloc[indexST].Price, GO.iloc[indexST].Price, 1])
shares_current_value = [shares2[0]*rowST.Price,shares2[1]*CB.iloc[indexST].Price,shares2[2]*PB.iloc[indexST].Price,shares2[3]*GO.iloc[indexST].Price,shares2[4]]
shares2 = list(i+j for (i,j) in zip(shares2, shares))
buy_amount = shares[0] * ST.Price[indexST] + shares[1] * CB.Price[indexST] + shares[2] * PB.Price[indexST] + shares[3] * GO.Price[indexST] + shares[4] * 1
buy_amount_total = buy_amount_total + buy_amount
elif ("15" in rowST['Date']):
shares2 = [((current_money * (row[0] / 100))) / rowST.Price, ((current_money * (row[1] / 100))) / CB.iloc[indexST].Price, ((current_money * (row[2] / 100))) / PB.iloc[indexST].Price, ((current_money * (row[3] / 100))) / GO.iloc[indexST].Price, (current_money * (row[4] / 100))]
shares_current_value = [shares2[0]*rowST.Price,shares2[1]*CB.iloc[indexST].Price,shares2[2]*PB.iloc[indexST].Price,shares2[3]*GO.iloc[indexST].Price,shares2[4]]
shares_prev_value = shares_current_value
elif (rowST['Date'] in return_months):
index_month = return_months.index(rowST['Date'])
index_options = return_options[index_month]
current_value = temp.ST_price[temp['Period'] == index_options]*shares2[0]+temp.CB_price[temp['Period'] == index_options]*shares2[1]+temp.PB_price[temp['Period'] == index_options]*shares2[2]+temp.GO_price[temp['Period'] == index_options]*shares2[3]+temp.CA_price[temp['Period'] == index_options]*shares2[4]
portfolio_return = ((current_value - buy_amount_total)/buy_amount_total) * 100
mask = f"Return_{index_options}M"
trading_methodologies[mask][index] = round(portfolio_return,2)
elif ("14" in rowST['Date']):
shares_current_value = [shares2[0]*rowST.Price,shares2[1]*CB.iloc[indexST].Price,shares2[2]*PB.iloc[indexST].Price,shares2[3]*GO.iloc[indexST].Price,shares2[4]]
current_money = sum(shares_current_value)
shares_prev_value = shares_current_value
# Define variables
cost(trading_methodologies)
volatility(1000000,trading_methodologies)
return_function(1000000,trading_methodologies)
trading_methodologies.to_csv('portfolio_metrics.csv', header=True, index=False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Dusan Klinec, ph4r05
import os
import ctypes as ct
from trezor_crypto import trezor_ctypes as tt
from trezor_crypto import mod_base
# Loaded library instance
CLIB = None
def open_lib(lib_path=None, try_env=False, no_init=False):
"""
Opens the library
:param lib_path:
:param try_env:
:param no_init:
:return:
"""
global CLIB
ext_fpath = lib_path
if ext_fpath is None:
ext_base = 'tcry_ctype'
mods, basedir = mod_base.get_ext_outputs()
ext_name = '%s%s' % (ext_base, mod_base.get_mod_suffix())
extensions = ['.so', '.dylib', '.dll', '.pyd']
ext_guesses = ['%s%s' % (ext_base, x) for x in extensions]
if ext_name in mods:
ext_fpath = os.path.join(basedir, ext_name)
else:
for g in ext_guesses:
if g in mods:
ext_fpath = os.path.join(basedir, g)
if ext_fpath is None and try_env:
ext_fpath = os.getenv('LIBTREZOR_CRYPTO_PATH', None)
if ext_fpath is None or not os.path.exists(ext_fpath):
raise FileNotFoundError('Trezor-Crypto lib not found')
CLIB = ct.cdll.LoadLibrary(ext_fpath)
if not no_init:
setup_lib(CLIB)
init_lib()
return CLIB
def cl():
"""
Returns CLIB
:return:
"""
return CLIB
def init_lib():
"""
Initializes Trezor crypto library
:return:
"""
res = cl().random_init()
if res < 0:
raise ValueError('Library initialization error: %s' % res)
return res
def setup_lib(CLIB):
"""
Setup the CLIB - define fncs
:param CLIB:
:return:
"""
# {{ SETUP_LIB }}
#
# Wrappers
#
# {{ WRAPPERS }}
|
from objects.trips import Trip
def reading(path):
data = []
with open(path, 'r') as f:
output = f.read()
for line in output.split('\n'):
data.append(list(map(int, line.split())))
data.pop(-1)
return data
def reading_2(data):
rides = []
rows, cols, n_cars, n_rides, bonus, t = data[0]
for trip in data[1:]:
rides.append(Trip(data.index(trip) - 1, trip[0], trip[1], trip[2], trip[3], trip[4], trip[5]))
return rides, int(rows), int(cols), int(n_cars), int(bonus), int(t)
def get_coordinates(data):
x = []
y = []
xy_s = []
xy_f = []
time_s = []
time_f = []
for trip in data[1:]:
x.extend([trip[0], trip[2]])
y.extend([trip[1], trip[3]])
xy_s.extend([trip[0], trip[1]])
xy_f.extend([trip[2], trip[3]])
time_s.append(trip[4])
time_f.append(trip[5])
return x, y, time_s, time_f, xy_s, xy_f
|
# Generated by Django 2.2.6 on 2019-10-18 20:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0003_auto_20191018_2241'),
]
operations = [
migrations.AddField(
model_name='shippingaddress',
name='save_address',
field=models.BooleanField(default=False),
),
]
|
import numpy as np
import os
from hylite.project import PMap
def savePMap(path, pmap):
"""
Save a PMap instance using numpy.
*Arguments*:
- path = the path to save to.
- pmap = the PMap instance to save.
"""
pnt,pix,z = pmap.get_flat()
dims = np.array([pmap.xdim, pmap.ydim, pmap.npoints])
np.savez_compressed( path, dims=dims, points=pnt, pixels=pix, depths=z )
def loadPMap(path):
"""
Load a PMap instance using numpy.
*Arguments*:
- path = the file path to load from.
*Returns*:
- a PMap instance loaded from the file.
"""
# check extension
if not os.path.exists(path):
path += ".npz" # try adding npz extension to see if that helps
assert os.path.exists(path), "Error - file not found: %s" % path
# load data
data = np.load( path )
# check attributes
if 'dims' not in data:
data.close()
assert False, "Error - npz does not contain a 'dims' attribute."
if 'points' not in data:
data.close()
assert False, "Error - npz does not contain a 'points' attribute."
if 'pixels' not in data:
data.close()
assert False, "Error - npz does not contain a 'pixels' attribute."
if 'depths' not in data:
data.close()
assert False, "Error - npz does not contain a 'depths' attribute."
# extract attrubites
xdim, ydim, npoints = data[ "dims" ]
points = data[ "points" ]
pixels = data[ "pixels" ]
depths = data[ "depths" ]
# close file
data.close()
# create new PMap and populate with data
pm = PMap( xdim, ydim, npoints )
pm.set_flat( points, pixels, depths )
return pm |
from django.conf.urls import url
from app import views
urlpatterns = [
url(r'^flags/new/$', views.flags_new),
url(r'^flags/all/', views.flags_all),
url(r'^flags/open/', views.open_flags),
url(r'^flag/([0-9]+)/$', views.flag),
url(r'^sh0ts/new/$', views.sh0ts_new),
url(r'^sh0ts/all/', views.sh0ts_all),
url(r'^sh0t/([0-9]+)/$', views.sh0t),
url(r'^assessments/new/$', views.assessments_new),
url(r'^assessments/all/', views.assessments_all),
url(r'^assessment/([0-9]+)/$', views.assessment),
url(r'^projects/new/$', views.projects_new),
url(r'^projects/all/', views.projects_all),
url(r'^project/([0-9]+)/$', views.project),
url(r'^templates/$', views.templates),
url(r'^template/([0-9]+)/$', views.template),
url(r'^case-masters/$', views.case_masters),
url(r'^case-master/([0-9]+)/$', views.case_master),
url(r'^module-masters/$', views.module_masters),
url(r'^module-master/([0-9]+)/$', views.module_master),
url(r'^methodology-masters/$', views.methodology_masters),
url(r'^methodology-master/([0-9]+)/$', views.methodology_master),
] |
from django.contrib import admin
from catalog.models import Country, MovieRole, MovieFigure, Genre, AuthorReview, Review, Film
admin.site.register(Country)
admin.site.register(MovieRole)
admin.site.register(MovieFigure)
admin.site.register(Genre)
admin.site.register(AuthorReview)
admin.site.register(Review)
admin.site.register(Film)
|
# -*- coding:utf8 -*-
#encoding = utf-8
txt = """
双肩包 情侣包 旅行包 登山包 运动服 休闲服 春秋装 情侣装 猫眼石 水晶兔 珍珠串 首饰盒 电冰箱 洗衣机 电风扇 淋浴器 国宴酒 婚庆酒 礼品酒 高度酒 学步车 羊奶粉 孕妇装 婴儿床 布沙发 席梦思 竹凉席 餐饮具 盆栽花 水族箱 宠物犬 波斯猫
女装男装
潮流女装 羽绒服 毛呢大衣 毛衣 冬季外套 新品 裤子 连衣裙 腔调 时尚男装 秋冬新品 淘特莱斯 淘先生 拾货 秋冬外套 时尚套装 潮牌 爸爸装 性感内衣 春新品 性感诱惑 甜美清新 简约优雅 奢华高贵 运动风 塑身 基础内衣 羽绒服 轻薄款 长款 短款 毛领 加厚 被子 鹅绒 新品 秋外套 秋款 夹克 卫衣 西装 风衣 皮衣 毛呢外套 薄羽绒 文胸 无钢圈 无痕文胸 蕾丝内衣 运动文胸 聚拢文胸 大码文胸 抹胸式 隐形 呢外套 廓形 双面呢 羊绒 中长款 短款 毛领 设计师款 系带 衬衫/T恤 T恤 长袖T 打底衫 纯色 衬衫 长袖款 商务款 时尚款 家居服 睡衣套装 睡裙 睡袍浴袍 外穿家居 女士睡衣 男士睡衣 情侣睡衣 亲子睡衣 毛衣 马海毛 貂绒 羊绒 羊毛 开衫 中长款 短款 卡通 男士裤子 休闲裤 工装裤 运动裤 长裤 牛仔裤 小脚裤 哈伦裤 直筒裤 内裤 女士内裤 男士内裤 三角裤 平角裤 丁字裤 阿罗裤 星期裤 低腰 外套上衣 外套 套装 风衣 卫衣 真皮皮衣 马甲 小西装 唐装 中老年 针织毛衫 薄毛衣 针织开衫 圆领毛衣 V领毛衣 纯色毛衣 民族风 羊毛衫 羊绒衫 丝袜 船袜 男人袜 连裤袜 隐形袜 收腹裤 塑身衣 美体裤 收腹带
鞋类箱包
女鞋 帆布鞋 高帮 低帮 内增高 懒人鞋 厚底 韩版 系带 情侣款 运动风鞋 厚底 内增高 星星鞋 系带 潮流女包 上新 人气款 单肩包 斜挎包 手提包 迷你包 手拿包 小方包 帽子 棒球帽 鸭舌帽 遮阳帽 渔夫帽 草帽 平顶帽 嘻哈帽 贝雷帽 牛仔帽 爵士帽 单鞋 高跟 平底 厚底 中跟 粗跟 坡跟 浅口 尖头 圆头 运动款 头层牛皮 内增高 松糕鞋 豆豆鞋 精品男包 商务 休闲 潮范 胸包 腰包 单肩 斜跨 手提 手拿 帆布 牛皮 腰带 女士腰带 男士皮带 帆布腰带 腰封 腰链 针扣头 平滑扣 自动扣 真皮 正品 运动风鞋 厚底 内增高 星星鞋 系带 一脚蹬 魔术贴 气垫 网状 双肩包 印花 铆钉 水洗皮 卡通 原宿 糖果色 商务 运动 帆布 牛皮 围巾 女士围巾 男士围巾 披肩 丝巾 假领 小方巾 三角巾 大方巾 真丝 雪纺 棉质 亚麻 蕾丝 男鞋 青春潮流 商务皮鞋 休闲皮鞋 正装皮鞋 商务休闲 布洛克 内增高 反绒皮 真皮 潮流低帮 韩版 英伦 复古 铆钉 编织 豹纹 大头 旅行箱 拉杆箱 密码箱 学生箱 子母箱 拉杆包 万向轮 飞机轮 航空箱 铝框 手套 女士手套 男士手套 真皮手套 蕾丝手套 防晒手套 半指手套 分指手套 连指手套 短款手套 长款手套 休闲男鞋 皮鞋 低帮 反绒皮 大头鞋 豆豆鞋 帆船鞋 懒人鞋 帆布/板鞋 高帮 凉鞋/拖鞋 沙滩鞋 人字拖 皮凉鞋 洞洞鞋 热门 钱包 潮包馆 真皮包 手机包 大牌 coach MK MCM 其他配件 毛线 鞋垫 鞋带 领带 领结 袖扣 手帕 布面料 耳套 领带夹 婚纱配件 皮带扣
母婴用品
宝宝奶粉 英国牛栏 英国爱他美 美赞臣 雅培 澳洲爱他美 可瑞康 惠氏 贝因美 婴童用品 推车 驱蚊器 婴儿床 理发器 奶瓶 餐椅 背带腰凳 安全座椅 孕产必备 内衣 内裤 喂奶枕 收腹带 妈咪包 待产包 防辐射服 储奶袋 辅食营养 米粉 肉松 磨牙棒 果泥 益生菌 清火开胃 钙铁锌 维生素 纸尿裤 花王 moony 大王 帮宝适 雀氏 好奇 妈咪宝贝 安儿乐 海外直邮 海淘奶粉 海淘辅食 海淘营养品 直邮花王 海淘洗护 海淘奶瓶 海淘餐具 海淘孕产 童装 T恤 连衣裙 泳装 套装 衬衫 防晒服 半身裙 短裤 童鞋 凉鞋 沙滩鞋 洞洞鞋 网鞋 学步鞋 拖鞋 帆布鞋 宝宝鞋 亲子鞋服 母女裙 父子装 亲子T恤 亲子衬衫 亲子套装 母女鞋 父子鞋 家庭鞋 玩具 沙滩戏水 早教启蒙 拼插益智 遥控模型 运动户外 学习爱好 卡通公仔 亲子互动 童车 电动车 自行车 学步车 手推车 三轮车 滑板车 扭扭车 儿童轮滑 早教启蒙 早教机 点读机 健身架 布书 串/绕珠 床/摇铃 爬行垫 木质拼图
护肤彩妆
美容护肤 卸妆 面膜 洁面 防晒 面霜 爽肤水 眼霜 乳液 换季保养 补水 美白 收缩毛孔 控油 祛痘 祛斑 去黑眼圈 去黑头 超值彩妆 BB霜 粉底液 唇膏 隔离 遮瑕 指甲油 粉饼 彩妆套装 香氛精油 女士香水 男士香水 中性香水 淡香水 古龙水 香精 复方精油 香体乳 美发造型 洗发水 护发素 染发 烫发 造型 假发 洗护套装 假发配件 纤体塑身 美胸 纤体 胸部护理 身体护理 塑身 脱毛 手部保养 足部护理 眼部彩妆 眼线 睫毛膏 眼影 眉笔 假睫毛 眼霜 双眼皮贴 眼部护理 男士护理 劲能醒肤 清洁面膜 男性主义 剃须膏 男士套装 男士防晒 火山岩 爽身走珠 海外直邮 抗皱 抗敏感 保湿 去眼袋 滋润 抗氧化 深层清洁 热门品牌 雅诗兰黛 兰蔻 资生堂 自然乐园 SK-II 悦诗风吟 水宝宝 契尔氏 新品推荐 芦荟胶 彩妆盘 腮红 香氛 高光棒 修容 V脸 去角质 口碑大赏 洁面 爽肤水 精华 乳液 鼻贴 马油
汇吃美食
休闲零食 牛肉干 鲜花饼 红枣 糖果 巧克力 山核桃 松子 卤味 饼干 话梅 蔓越莓 薯片 生鲜果蔬 奇异果 芒果 樱桃 橙子 秋葵 苹果 番茄 柠檬 椰子 榴莲 粮油调味 大米 橄榄油 小米 黄豆 赤豆 火腿 香肠 木耳 香菇 豆瓣酱 水产鲜肉 海参 龙虾 瑶柱 土鸡 牛排 三文鱼 咸鸭蛋 皮蛋 五花肉 北极贝 美酒佳酿 鸡尾酒 红酒 啤酒 白酒 梅酒 洋酒 清酒 滋补酒 茅台 五粮液 牛奶饮料 麦片 咖啡 牛奶 柚子茶 酸梅汤 矿泉水 酵素 藕粉 姜茶 酸奶粉 四季茗茶 铁观音 红茶 花草茶 龙井 普洱 黑茶 碧螺春 毛峰 袋泡茶 白茶 滋补养生 枸杞 人参 石斛 燕窝 雪蛤 蜂蜜 天麻 花粉 党参 红花 全球美食 芒果干 鱼子酱 咖啡 橄榄油 薯片 巧克力 咖喱 方便面 红酒 麦片
珠宝配饰
时尚饰品 项链 手链 戒指 发饰 银饰 水晶 耳饰 手镯 珠宝首饰 翡翠 彩宝 蜜蜡 裸钻 珍珠 黄金 钻石 金条 最热单品 和田玉 翡翠 水晶/佛珠 黄金 手表 眼镜 品质手表 瑞士表 机械表 时装表 儿童表 电子表 情侣表 石英表 手表配件 潮流眼镜 太阳镜 偏光镜 近视镜 司机镜 护目镜 眼镜配件 运动镜 老花镜 绅士配件 zippo 电子烟 烟斗 瑞士军刀 绝美酒具 风格男表 手链 佛珠 水晶 碧玺 925银 施华洛 翡翠 珍珠 黄金 项链吊坠 银项链 流行风格 天然水晶 锆石水晶 佛珠项链 人造水晶 手镯 925银 翡翠 和田玉 复古泰银 粉晶手镯 黄金手镯 发饰 日韩 甜美 复古/宫廷 欧美 瑞丽 波西米亚 民族风 新娘配饰 发饰 项链 套装 耳饰 韩式 头饰 三件套 DIY饰品 合金配件 银饰 水晶配珠 琉璃 珍珠母贝 有机玻璃 人造水晶
家装建材
装修设计 设计师 半包装修 全包装修 全案装修 装修监理 清包施工 局部装修 验房量房 装修空气质量检测 装修污染治理 全屋定制 整体橱柜 定制衣柜 定制吊顶 定制淋浴房 门 窗 定制柜 楼梯 榻榻米定制 地暖 灯具灯饰 吸顶灯 吊灯 吸吊两用灯 筒灯 射灯 台灯 落地灯 室外灯 壁灯 小夜灯 卫浴用品 浴室柜 普通马桶 花洒套装 一体智能马桶 智能马桶盖板 淋浴房 面盆龙头 地漏 五金挂件 浴霸 墙纸 PVC墙纸 无纺布墙纸 纯纸墙纸 墙布 沙粒墙纸 绒面墙纸 定制壁画 3D墙纸 地板 实木地板 实木复合地板 强化复合地板 竹地板 户外地板 PVC地板 防静电地板 防潮膜 踢脚线 地板龙骨 瓷砖 仿古砖 釉面砖 玻化砖 微晶石 马赛克 抛晶砖 通体砖 花片 腰线 瓷砖背景墙 电子电工 插座 开关 电线 监控器材 智能家居 防盗报警器材 消防报警设备 接线板插头 布线箱 断路器 基础建材 涂料乳胶漆 油漆 水管 板材 木方 阳光房 线条 天然大理石 人造大理石 防水涂料
家居家纺
卧室家具 实木床 布艺床 皮艺床 床垫 衣柜 斗柜 梳妆台 子母床 床头柜 儿童床 客厅家具 皮艺沙发 布艺沙发 沙发床 实木沙发 懒人沙发 电视柜 茶几 鞋柜 玄关厅 衣帽架 餐厅家具 餐桌 折叠餐桌 欧式餐桌 实木餐桌 大理石餐桌 餐椅 餐边柜 换鞋凳 角柜 屏风 书房家具 餐桌 折叠餐桌 欧式餐桌 实木餐桌 大理石餐桌 餐椅 餐边柜 换鞋凳 角柜 屏风 夏凉床品 蚊帐 三开蚊帐 凉席 凉席套件 冰丝席 藤席 牛皮席 夏凉被 空调被 天丝套件 床单 床笠 全季床品 四件套 全棉套件 被套 蚕丝被 羽绒被 枕头 乳胶枕 记忆枕 床褥 毛毯 居家布艺 定制窗帘 地毯 沙发垫 靠垫 桌布桌旗 飘窗垫 地垫 餐垫 防尘罩 椅垫 成品窗帘 沙发罩 家居摆件 摆件 花瓶 仿真花 台钟闹钟 香薰炉 储物罐 装饰碗盘 木雕 烟灰缸 纸巾盒 蜡烛烛台 仿真饰品 墙饰壁饰 现代装饰画 无框画 后现代画 油画 挂钟 照片墙 新中式 北欧家饰 美式乡村 挂钩搁板 装饰挂钩 壁饰
百货市场
居家日用 扇子 毛巾 浴巾 口罩 隔音耳塞 竹炭包 眼罩 夏季凉拖 居家鞋 夏季清凉 应季百货 湿巾 晴雨伞 驱蚊灯 驱蚊液 冰格 保鲜产品 密封罐 防潮制品 电扇/冰垫 5元小物 收纳整理 被子防尘袋 收纳盒 收纳袋 大衣/西服罩 护洗袋 收纳凳 鞋柜 置物架 桌用收纳 内衣收纳 个人清洁 洗发护发 沐浴露 漱口水 卫生巾 洗手液 牙膏 纸巾 香皂 沐浴球/浴擦/浴刷 指甲刀 清洁工具 剃须刮毛刀 沐浴球 浴室角架 浴帘杆 拖把 垃圾桶 梳子镜子 围裙 百洁布 海绵擦 厨房工具 餐具 锅具 刀具 炖锅 蒸锅 汤锅 煎锅 压力锅 炒锅 菜板砧板 盆碗碟筷 一次性餐桌用品 酒杯酒具 咖啡器具 碗盘碟 刀叉勺 餐具瓷器套装 餐桌小物 饭盒 厨房储物 一次性餐桌用品 茶具杯具 茶具 茶壶 飘逸杯 功夫茶杯 玻璃杯 杯垫 保温杯 马克杯 保温壶 情侣杯 家用杂物 晒衣篮 晾衣杆 脏衣篮 衣架 家庭清洁剂 蓝泡泡 管道疏通器 塑胶手套 医药箱 垃圾袋
汽车·用品
热门新车 汽车首页 新车先购 车海淘 二手车 爱车估价 suv 别克 大众 宝马 品质内饰 座垫 座套 脚垫 香水 旅行床 遮阳挡 挂件摆件 安全座椅 专车专用座垫 脚垫 安全座椅 香水 钥匙包 挂件 座套 后备箱垫 置物箱 汽车导航 智能车机 后视镜 安卓导航 便携GPS DVD导航 电子狗 流动测速 导航软件 记录仪 预警仪 GPS 车机 倒车雷达 智能后视镜 蓝牙 防盗器 MP3 汽车服务 4S保养 电瓶安装 配件安装 隔热膜 洗车卡 镀晶镀膜 连锁保养 上门服务 影音电子 行车记录仪 逆变器 跟踪器 充电器 充气泵 胎压监测 车载冰箱 空气净化 车衣 SUV踏板 晴雨挡 改色膜 汽车车标 车牌架 汽车配件 轮胎 雨刮器 机油滤芯 空气滤芯 空调滤芯 减震 刹车片 火花塞 轮胎 雨刮 机油 高亮大灯 挡泥板 保险杠 车顶架 轮眉 改装达人 轮毂 排气 保险杠 汽车包围 氙气灯 车顶架 脚踏板 大灯总成 尾翼 轮毂 汽车装饰灯 排气筒 尾喉 车身饰条 美容清洗 添加剂 防冻液 玻璃水 车蜡 补漆笔 洗车机 洗车水枪 车掸蜡拖 车蜡 洗车机 补漆笔 抛光机 打蜡海绵 车用水桶 擦车巾 车刷 外饰装潢 装饰条 车贴 尾喉 改色膜 防爆膜 晴雨挡 日行灯 车衣 夏季座垫 遮阳挡 防眩蓝镜 防晒手套
手机数码
手机 iPhone 小米 华为 三星 魅族 纽扣 酷派 VIVO 平板 iPad 小米 三星 10寸 台电 win8 蓝魔 华为 电脑 DIY电脑 一体机 路由器 显示器 学生 CPU 移动硬盘 无线鼠标 笔记本 苹果 联想 Thinkpad 戴尔 华硕 Acer 神州 三星 相机 单反 自拍神器 拍立得 佳能 微单 镜头 卡西欧 尼康 3C配件 充电宝 智能穿戴 蓝牙耳机 iPhone6壳 电脑包 手机贴膜 手机壳套 三脚架 数码配件 保护壳套 炫彩贴膜 移动电源 相机配件 手机零件 自拍神器 移动POS支付 电池 智能设备 儿童手表 Apple Watch 智能手表 智能手环 智能配饰 智能健康 智能排插 智能眼镜 电玩 游戏掌机 家用游戏机 游戏手柄 PS主机 XBOX 任天堂配件 PS主机配件 XBOX配件 网络设备 路由器 网关 交换机 光纤设备 网络存储设备 无线上网卡 TP-LINK 小米路由器 MP3/MP4 MP3 MP4 录音笔 索尼 飞利浦 ipod 爱国者 耳机 存储 U盘 闪存卡 记忆棒 移动硬盘 希捷 三星 Sandisk 金士顿
家电办公
厨房电器 电磁炉 电水壶 料理机 电饭煲 榨汁机 净水器 豆浆机 烤箱 生活电器 电风扇 空调扇 挂烫机 扫地机 吸尘器 加湿器 除湿机 对讲机 空气净化 个护电器 理发器 电子称 美容仪 按摩椅 按摩披肩 血压计 足浴器 电动牙刷 剃须刀 影音电器 耳机 音响 网络机顶盒 麦克风 扩音器 HiFi套装 蓝光DVD 低音炮 办公耗材 打印机 投影仪 硒鼓墨盒 A4纸 一体机 学生文具 保险柜 电纸书 学习机 大家电 冰箱 空调 平板电视 油烟机 燃气灶 消毒柜 厨电套装 热水器 洗衣机 包装用品 包装设备 包装纸箱 塑料袋 包装胶带 铭牌 快递袋 气泡膜 真空机 文化用品 笔记本 文件袋 钢笔 胶粘用品 铅笔 计算器 白板 台历 个性定制 设计定制 企业用品定制 T恤印制 杯子定制 ppt模板 班服定制 洗照片 人偶定制 五金工具 电子电工 气动元件 水泵 阀门 电钻 焊接设备 万用表 雕刻机 商用家具 办公家具 商业设施 办公桌 陈列柜 货架 广告牌 文件柜 沙发 电子元器件 网络设备 电子元器件 路由器 交换机 光纤设备 视频会议 无线安全保密 机柜
更多服务
生活团购 餐饮美食 冰淇淋 火锅 购物卡券 体检配镜 美容美甲 保险理财 婚纱摄影 旅行团购 买房租房 住在帝都 住在魔都 住在杭州 住在南京 住在广州 住在青岛 住在宁波 住在成都 儿童培养 少儿英语 小学教育 潜能开发 家长训练 孕产育儿 少儿绘画 婴幼早教 音乐 淘宝游戏 Q币充值 点卡充值 充游戏币 游戏代练 超值账号 手游充值 电竞比赛 游戏帮派 挑个好房 潇洒一室 靠谱二室 舒适三房 大四室 私藏别墅 景观居所 轨道沿线 学区房 成人教育 实用英语 网站制作 IT技能 会计职称 一对一 办公软件 日语 编程 游戏中心 英雄联盟 剑侠情缘3 征途2 魔域 我叫MT 刀塔传奇 DOTA2 DNF 魔兽世界 吃喝玩乐 自助餐 个性写真 儿童写真 电影票团购 上门服务 周边旅游 境外旅游 基金理财 生活兴趣 魅力健身 时尚美妆 手工DIY 舞蹈 减肥瑜伽 个人形象 美剧英语 摄影 美女陪练 轻松甩肉 基金理财 淘宝美工 办公技能
生活服务
婚庆服务 婚纱摄影 婚礼策划 三亚婚拍 厦门婚拍 青岛婚拍 北京婚拍 杭州婚拍 上海婚拍 新娘跟妆 婚礼跟拍 婚礼司仪 婚车租赁 在线清洗 任意洗 洗外套 洗西装 洗鞋 洗四件套 洗烫衬衫 皮包护理 洗窗帘 洗地毯 在线洗衣 洗礼服 洗玩具 家庭保洁 开荒保洁 厨房保洁 公司保洁 家电清洗 空调清洗 洗油烟机 冰箱清洗 擦玻璃 家政服务 家庭保洁 保洁服务 钟点工 洗衣机清洗 卫生间保洁 汽车服务 上门养车 洗车 封釉镀膜 内饰清洗 空调清洗 汽车维修 充加油卡 年检代办 玻璃贴膜 汽车装饰 底盘装甲 四轮定位 汽车改装 违章代办 汽车隔音 健康服务 上门按摩 常规体检 入职体检 老人体检 四维彩超 孕前检查 体检报告 专业洗牙 烤瓷牙 胃部检测 母婴服务 月嫂 催乳师 育儿嫂 营养师 普通保姆 涉外保姆 产后陪护 临时看护 管家 烧饭阿姨 宠物服务 宠物寄养 宠物美容 宠物配种 宠物洗澡 宠物摄影 宠物托运 宠物训练 宠物医疗 水族服务 宠物绝育 宠物洗牙 宠物造型 宠物体检 家政服务 居家搬家 公司搬运 空调拆装 家电搬运 家具搬运 打孔 电路维修 甲醛测试 开锁换锁 杀虫消毒 高空清洁 除尘除螨 便民服务 跑腿服务 代缴费 叫醒服务 宝宝起名 学车报名 代邮代取 代送鲜花 同城速递 代办档案 机场停车 商务服务 专利申请 法律咨询 专业翻译 开发建站 图片处理 视频制作 名片制作 商标转让 打印 复印 商标注册 私人律师 合同文书 出国翻译 数码维修 手机维修 pad维修 修台式机 相机维修 修笔记本 修复印机 修游戏机 修导航仪 软件服务 延保服务 硬件维修 苹果维修 小米维修 三星维修 安卓刷机 数据恢复 电脑维修 ipad维修 华为维修 重装系统 家电维修 相机维修 硬盘维修 苹果换屏 换主板 招聘服务 名企招聘 高薪岗位 文案编辑 网店推广 开发技术 活动策划 美工设计 金牌客服 大促客服 网页设计 人才认证 图片设计 摄影师 店长 运营主管 客服主管 美工主管
运动户外
运动潮鞋 跑步鞋 篮球鞋 休闲鞋 足球鞋 帆布鞋 训练鞋 徒步鞋 登山鞋 限量版 板鞋 Rosherun 运动服 运动套装 运动卫衣 长裤 皮肤风衣 健身服 球服 耐克 阿迪达斯 三叶草 美津浓 彪马 狼爪 骑行装备 山地车 公路车 骑行服 头盔 装备 零件 工具 护具 折叠车 死飞 水壶架 行李架 球类运动 羽毛球拍 羽毛球服 羽毛球 网球拍 篮球 篮球服 足球 足球服 乒乓球拍 橄榄球 台球 高尔夫 户外野营 吊床 头灯 遮阳棚 望远镜 照明 野营帐篷 野外照明 烧烤炉 望远镜 潜水镜 防潮垫 皮划艇 户外穿戴 皮肤衣 防晒衣 冲锋衣 探路者 速干裤 迷彩服 战术靴 登山鞋 crocs 溯溪鞋 户外鞋 民间运动 麻将机 轮滑 麻将 象棋 雀友 飞镖 桌上足球 风筝 陀螺 空竹 沙袋 太极服 健身运动 甩脂机 轮滑装备 跑步机 舞蹈 瑜伽 哑铃 仰卧板 踏步机 划船机 卧推器 健身车 呼啦圈 瑜伽舞蹈 舞蹈 瑜伽 广场舞 舞蹈鞋 拉丁鞋 广场舞套装 肚皮舞服装 瑜伽垫 瑜伽球 瑜伽服 垂钓用品 鱼饵 套装 路亚 附件 鱼钩 钓鱼工具 船/艇 台钓竿 海钓竿 溪流竿 路亚竿 矶钓杆 运动包 单肩背包 旅行包 双肩背包 挎包 户外摄影包 头巾 运动水壶 防水包 电动车 电池 电自行车 平衡车 滑板车 头盔 摩托车 老年代步 独轮车 遮阳伞 扭扭车 折叠车
花鸟文娱
鲜花速递 仿真植物 干花 DIY花 手捧花 鲜果蓝 仿真蔬果 开业花篮 花瓶 花卉绿植 绿植同城 园艺方案 多肉植物 桌面盆栽 蔬菜种子 水培花卉 苔藓景观 空气凤梨 园艺用品 肥料 花盆花器 花卉药剂 营养土 园艺工具 洒水壶 花架 铺面石 观赏鱼 热带鱼 孔雀鱼 底栖鱼 虾螺 龙鱼 罗汉鱼 锦鲤 金鱼 水母 灯科鱼 乌龟 造景设备 水草 底砂 水草泥 沉木 仿真水草 假山 氧气泵 过滤器 水草灯 加热棒 鱼粮 水质维护 硝化细菌 除藻剂 龟粮 奇趣小宠 兔兔 仓鼠 龙猫 雪貂 粮食零食 医疗保健 笼子 鹦鹉 鸟笼 观赏鸟 蚂蚁工坊 蜘蛛 蚕 萌狗世界 大牌狗粮 宠物服饰 狗厕所 宠物窝 航空箱 海藻粉 羊奶粉 宠物笼 储粮桶 剃毛器 营养膏 上门服务 乐器音乐 吉他 钢琴 数码钢琴 古筝 电子琴 萨克斯风 古琴 二胡 小提琴 音箱 模玩手办 高达 手办 盒蛋 兵人 变形金刚 圣衣神话 钢铁侠 BJD 拼装 人偶 猫咪世界 猫砂 猫粮 猫爬架 猫窝 猫砂盆 化毛膏 猫罐头 喂食器 折耳猫 猫抓板 猫玩具 猫笼 乐器配件 拾音器 乐器培训 合成器 乐器包 MIDI键盘 乐器定制 扬琴 贝司 葫芦丝 尤克里里 调音台 监听耳机 动漫周边 动漫T恤 动漫抱枕 COS 背包 项链 颜文字 哆啦A梦 大白 手表 盗墓笔记 海贼 火影 LOL
农资采购
农药 杀菌剂 杀虫剂 除草剂 调节剂 杀螨剂 杀鼠剂 敌敌畏 草甘膦 种子种苗 园林种苗 动物种苗 蔬菜种苗 水果种苗 粮油种子 药材种苗 食用菌种 辣木籽 肥料 氮肥 磷肥 钾肥 叶面肥 新型肥料 复合肥 生物肥料 有机肥 农业机械 耕种机械 收割机械 农机配件 植保机械 拖拉机 施肥机械 粮油设备 微耕机 农膜 塑料薄膜 大棚膜 防渗膜 鱼塘专用 薄膜 遮阳网 篷布 防虫网 农业工具 镰刀 锹 高压水枪 锨 镐 耙子 锄头 叉 饲料 猪饲料 羊饲料 牛饲料 预混料 饲料原料 全价料 饲料添加剂 浓缩料 畜牧养殖 加工设备 养殖器械 渔业用具 养殖服务 配种服务 养鸡设备 挤奶机 母猪产床 兽药 化学药 中兽药 抗生素 驱虫 消毒剂 疫苗 阿莫西林 氟苯尼考
"""
'''
task_keys = []
for i in txt.split(" "):
if i != "":
#print(i)
task_keys.append(i)
print(task_keys)
'''
'''
pingpaifenleis = pingpaifenleis.replace("\n","")
ppfl_keys2 = []
for i in pingpaifenleis.split("\t"):
if i != "" and i != "\t"and i != "\t\t":
#print(i)
aaa = i.split("(")
if len(aaa)>1:
aaa1 = aaa[0]
#aaa1 = aaa1.replace("\'","\\'")
#aaa1 = aaa1.replace("°","0")
#print(aaa1)
ppfl_keys2.append(aaa1)
aaa2 = aaa[1][:-1]
#aaa2 = aaa2.replace("\'","\\'")
#print(aaa2)
ppfl_keys2.append(aaa2)
else:
aaaa1 = aaa[0]
#aaaa1 = aaaa1.replace("\'","\\'")
#print(aaaa1)
ppfl_keys2.append(aaaa1)
#print("_______________________")
print(ppfl_keys2)
'''
'''
for infos in ppfl_keys2:
print(infos)
'''
def ppfldatas():
pingpaifenleis = """
沃特(VOIT) 韩都衣舍(HSTYLE) 欧舒丹(L'OCCITANE)
双星名人 音儿(YINER) 海蓝之谜(LaMer)
金莱克(JMK) 艾米(AMII) 雅诗兰黛(Estee Lauder)
鸿星尔克(ERKE) 波司登(BOSIDENG) 凯朵(kate)
乔丹(AIR JORDAN) 摩安珂(Mo&Co.) 美宝莲纽约(Maybelline)
乔丹(QIAODAN) 梦舒雅(M•SUYA) 芙丽芳丝(Freeplus)
匹克(PEAK) 艾格(Etam) 日月光彩(Lunasol)
特步(Xtep) 妖精的口袋 迪奥(Dior)
361° 依恋(E·LAND) 印象之美(Impress)
安踏(ANTA) Five Plus 佳丽宝(Kanebo)
李宁(LI-NING) 丽丽(Lily) 细胞博士(Dr.cell)
乐卡克(Le Coq Sportif) 伊芙丽(EIFINI) 纪梵希(Givenchy)
hummel 逸阳(ESEY) JUJU
霍马(joma) 美特斯邦威(Meters/bonwe) 珊娜(SANA)
范斯(Vans) 飒拉(Zara) 兰皙欧(RECIPEO)
KELME 森马(Semir) 高丝润肌精(Kosé Junkisei)
匡威(Converse) 裂帛(LIEBO) 贝缔雅(Predia)
亚瑟士(ASICS) 茵曼(INMAN) 雪肌精(SEKKISEI)
安德玛(Under Armour) 杰尼亚(Zegna) 艾文莉(AVENIR)
新百伦(New Balance) 盖尔斯(GUESS) 黛珂(DECORTE)
乐途(lotto) 阿玛尼(Giorgio Armani) 高丝(Kose)
迪亚多纳(DIADORA) 范思哲(Versace) 植村秀(SHU UEMURA)
卡帕(Kappa) 华伦天奴(Valentino) DHC
茵宝(UMBRO) 菲拉格慕(Ferragamo) SK-ii
美津浓(Mizuno) 拉夫劳伦(Ralph Lauren) 肌肤之钥(CPB)
斐乐(FILA) 爱马仕(Hermes) 茵芙莎(IPSA)
彪马(PUMA) 博柏利(Burberry) 泊美(Pure&Mild)
锐步(REEBOK) 克洛伊(Chloe) 心机彩妆(Maquillage)
阿迪达斯(adidas) 普拉达(Prada) 俊士(JS)
耐克(NIKE) 俪丝娅(RELLECIGA) 欧珀莱(AUPRES)
古驰(Gucci) 姬芮(ZA)
LV/路易威登(Louis Vuitton) 三宅一生(Issey Miyake)
雨果博斯(Hugo Boss) 资生堂(Shiseido)
红豆(HODO) 谜尚(MISSHA)
报喜鸟(SAINT ANGELO) 菲诗小铺(The Face Shop)
KITON 思亲肤(SKIN FOOD)
杉杉(FIRS) 悦诗风吟(Innisfree)
新郎希努尔(SINOER) 爱丽小屋(ETUDE HOUSE)
法派(FAPAI) 芙莉美娜(Primera)
金利来(Goldlion) 梦妆(Mamonde)
九牧王(JOEONE) 秀雅韩(Sooryehan)
才子(TRIES) 赫拉(HERA)
罗蒙(ROMON) 伊思(IT'S SKIN)
柒牌(SEVEN) 亦博(IOPE)
七匹狼(SEPTWOLVES) SU:M37°呼吸
利郎(LILANZ) 欧蕙(O HUI)
雅戈尔(Youngor) 雪花秀(Sulwhasoo)
Baby Milo 后(WHOO)
NEXT 兰芝(laneige)
优衣库(UNIQLO)
盖璞(GAP)
"""
pingpaifenleis = pingpaifenleis.replace("\n","")
ppfl_keys2 = []
for i in pingpaifenleis.split("\t"):
if i != "" and i != "\t"and i != "\t\t":
#print(i)
aaa = i.split("(")
if len(aaa)>1:
aaa1 = aaa[0]
#aaa1 = aaa1.replace("\'","\\'")
#aaa1 = aaa1.replace("°","0")
#print(aaa1)
ppfl_keys2.append(aaa1)
aaa2 = aaa[1][:-1]
#aaa2 = aaa2.replace("\'","\\'")
#print(aaa2)
ppfl_keys2.append(aaa2)
else:
aaaa1 = aaa[0]
#aaaa1 = aaaa1.replace("\'","\\'")
#print(aaaa1)
ppfl_keys2.append(aaaa1)
#print("_______________________")
print(ppfl_keys2)
return ppfl_keys2
"""
格林汉姆(Graham) 赛乐普(CYCLOP) 爱慕(Aimer) 谊嘉宝(yijiabao) 寇驰(COACH) 晶石灵(CHENIM) 玛丽黛佳(Marie Dalgar) mikihouse 福米托(Formitalia)
美图meitu 阳光发品(U.shine) 微星(MSI) 歌尔(GoerTek) 乐德创(Wizcom) 华美(HUAMEI) 中天微(C-SKY) 好太太(Haotaitai) 威力电器(WEILI) 三角牌
韩国福库(CUCKOO) 小鸭(LittleDuck) 德尔(Deer) 桑夏(SUNSHORE) 百乐满(Paloma) 哈佛(Harvard) 海氏(Hauswirt) 海氏(Hauswirt) 富信(fuxin) 意利(illy)
皇冠(Royal Sovereign)
伊莎贝恩腕表(IsaBain) 傲品(WINOMIN) 天姿(TNZI) 牧羊人生 葆蝶家(Bottega Veneta) 星碧水晶(Singbee) 芭妮兰(Banilaco) mothercare Milano&Design
一加Oneplus Fortune Fashion 神舟(Hasee) 天龙(DENON) 快译通 白雪(Baixue) 大唐电信(DTT) 德国西门子(SIEMENS) 美国惠而浦(Whirlpool) 康佳(KONKA)
威王(WEKING) 润唐(Rota) 摩飞(MorphyRichards) 天普(TIANPU) 神州厨电(SHENZHOU) 康泉(KANCH) 忠臣(LOYOLA) 柏翠(petrus) 易极优(Easiyo)
喜客(Saeco) 文权(WenChyuan)
博柏利手表(Burberry) 健英台球 曼妮芬(ManiForm) ShearersUGG 芬迪(Fendi) 亚华天玑宝石(TAKEN STONE) 珂莱欧(Clio)
小木马(TWINKIDS) 芝华仕(CHEERS) 联想(Lenovo)
Sleek 宏碁(acer) 爱国者(aigo) 文曲星 星星(XINGX) 龙芯(loongson) 德意电器(DE&E) 日本三洋(SANYO) 雅乐思(Rnice) 伊莱特(enaiter)
美斯特(MESTER) 金正(NONTAUS) 清华阳光 帅康厨卫(sacon) 阿诗丹顿(USATON) 长帝电器(changdi) 润唐(Rota) 日创(Rikon) 奈斯派索(Nespresso) 金图(JINTU)
Invicta手表 乔氏(JOY) 歌瑞尔(Gainreel) 萨顿(SuttonsUgg) 迪奥箱包(Dior) 莱俪(LALIQUE) 蒂佳婷(Dr.Jart+) 丽婴房(Les enphants) 米洛(MENOIR)
金立GIONEE
金瑞祥(JRX) 惠普(HP) 铂傲(B&O) 先科(SAST) 酷开(Coocaa) 瑞萨(Renesas) 帅康厨卫(sacon) 德国西门子(SIEMENS) 尚朋堂(SANPNT) 三角牌 欧科(OUKE)
贝尔斯顿(Bestday) 沐阳(my) 火王(HIONE) 威博(weber) 客浦(caple) 美国惠而浦(Whirlpool) 天际(TONZE) 惠家(WELHOME) 光明(GM)
尼克松(Nixon) 绅迪(Shender) 维多利亚的秘密(VICTORIA' SSECRET) MOU 路易威登(LV) 宝仕奥莎(PRECIOSA) 高姿(COGI) 玛米玛卡(MOMOCO)
左右家私(ZUOYOU) 中兴ZTE 海森林(seaforest) 戴尔(DELL) 宾果(Bingle) 易读宝 风行电视 意法半导体(ST) 火王(HIONE) 老板(ROBAM) 格兰仕集团(Galanz)
象印(ZOJIRUSHI) OneCup 恩优希(NUC) 光芒(GOMON) 方太厨具(Fotile) 光芒(GOMON) 柏翠(petrus) 瑞典伊莱克斯(Electrolux) 苏泊尔电器(SUPOR) 美乐家(Melitta)
天岑(Tiancen)
鲁米诺斯(Luminox) 莱利(RILEY) 黛安芬(Triumph) EMU 香奈儿箱包(CHANEL) 珂丝塔(KostaBoda) 植美村(ZMC) ABC童装(ABC KIDS) 顾家家居(KUKA) 宏达HTC
璐酷尔(LUKUL) 联想(ThinkPad) 先锋(Pioneer) 天朗科技(MPR) 乐华(ROWA) 英飞凌(Infineon) 老板(ROBAM) 格兰仕集团(Galanz) 苏泊尔电器(SUPOR)
天际(TONZE) 苏泊尔电器(SUPOR) 博朗(BRAUN) 同济阳光 华帝厨电(Vatti) 美国惠而浦(Whirlpool) 德国西门子(SIEMENS) 苏泊尔电器(SUPOR) 小熊电器(Bear)
北欧欧慕(nathome) 德拉根(DLG)
励柏艾顿(LIBER AEDON) 威利(FURY) 鳄鱼(LACOSTE) Cozy Steps 爱马仕箱包(Hermes) 巴卡拉(Baccarat) 蜜丝佛陀(Max Factor) 阿杰邦尼(AJIEBANGNI)
哥伦比尼(colombini) 摩托罗拉(Motorola) 爱德兰丝(Aderans) 华硕(ASUS) 罗技(logitech) 优彼(ubbie) PPTV 展讯(Spreadtrum) 方太厨具(Fotile)
美的集团(Midea) 荣事达小家电(Royalstar) 格兰仕集团(Galanz) 荣事达小家电(Royalstar) 欧科(OUKE) 五星太阳能 康宝(Canbo) 德国西门子(SIEMENS) 老板(ROBAM)
小熊电器(Bear) 灿坤(EUPA) 鼎瑞(JAVA) 意高(ICO)
汤米·希尔费格(TOMMY HILFIGER) 星牌(STAR) 宝姿(PORTS) JumboUGG 明辉七色花 芭比波朗(BobbiBrown) 好孩子 芙莱莎(FLEXA) 索尼XPERIA 即发(JF)
苹果(Mac) 赛睿(SteelSeries) 快易典 暴风TV 台积电(tsmc) 华帝厨电(Vatti) LG电子 大松电器(TOSOT) 爱仕达(ASD) 九阳股份(Joyoung)
瑞典伊莱克斯(Electrolux) 元升 樱雪集团(INSE) 帅康厨卫(sacon) 方太厨具(Fotile) 澳柯玛股份(AUCMA) 荣事达小家电(Royalstar) 柏翠(petrus) 富雷(Fulei)
CK手表(Calvin Klein) 美洲豹(Predator) Barbour Yellow Earth 头彩(I feel) 伊蒂之屋(Etude House) 水孩儿(SOUHAIT) 松堡王国(sampo)
酷派Coolpad 艾瑞美(IREMY) 机械革命(MECHREVO) 雷蛇(Razer) 洪恩(Hongen) KKTV 博通(Broadcom) 康宝(Canbo) 松下(Panasonic) 艾美特(airmate)
苏泊尔电器(SUPOR) 美的集团(Midea) 艾诗凯奇(SKG) 桑普(Sunpo) 樱花卫厨(SAKURA) 华帝厨电(Vatti) 瑞典伊莱克斯(Electrolux) 九阳股份(Joyoung) 奔腾厨电(POVOS)
摩飞(MorphyRichards) 仙视(Goodview)
盖尔斯手表(Guess) XXIO 雅格狮丹(AQUASCUTUM) 星期六(ST&SAT) 茜子(She's) NARS 迪士尼童装(Disney) LHABC 锤子Smartisan 瑞贝卡(Rebecca)
海尔电脑(Haier) 创新(Creative) 外研通(VIATON) 模卡(MOOKA) 海思(Hisilicon) 百得(BEST) 海尔(Haier) 广东志高(CHIGO) 小熊电器(Bear) 飞利浦(PHILIPS)
苏泊尔电器(SUPOR) 辉煌太阳能 能率(NORITZ) 统帅电器(Leader) 格兰仕集团(Galanz) 美的集团(Midea) 九阳股份(Joyoung) 博朗(BRAUN) 中电数码(SCT)
化石(Fossil) 史力胜(SRIXON) 思琳(CELINE) 玉兰 威妮华(Viennois) 丝芙兰(SEPHORA) 铅笔俱乐部 多喜爱(AOK) 努比亚nubia 斯必申 外星人(AlienWare) 哈曼(JBL)
万利达教育电子 微鲸(WHALEY) 联发科技(Mediatek) 樱雪集团(INSE) 奔腾厨电(POVOS) 荣事达小家电(Royalstar) 小熊电器(Bear) 亿家能 创尔特(Chant)
樱花卫厨(SAKURA) 艾诗凯奇(SKG) 飞利浦(PHILIPS) OneCup 深圳巨龙
天美时(TIMEX) 马基高(Mac Gregor) 麦丝玛拉(MaxMara) 步步升布鞋 流行美 卡姿兰(CARSLAN) 嗒嘀嗒(Dadida) 喜梦宝(XMB) 360手机 红花牌
清华同方(Tsinghua Tongfang) 缤特力(Plantronics) 万虹(VANHON) 乐视(LETV) 美光(Micron) 统帅电器(Leader) 九阳股份(Joyoung) 大松电器(TOSOT)
灿坤(EUPA)
美大(MEIDA) 前锋(CHIFFO) 万家乐 意大利德龙(Delonghi) 凯伍德(KENWOOD) 德国西门子(SIEMENS) 鸿合(HiteVision)
博塔设计(BOTTAdesign) PING 伦敦雾(LONDONFOG) 足下有福 美爆(MIXBOX) 日本植村秀(Shu Uemura) 乐高(LEGO) 酷漫居(comagic) SAMSUNG三星 立效牌
微软(Microsoft) 舒尔(Shure) 好易通(Besta) 麦克维尔(McQuay) 德州仪器(TI) 容声厨卫 美的集团(Midea) 奔腾厨电(POVOS) 荣事达小家电(Royalstar)
四季沐歌(Micoe) 万家乐 法罗力(Ferroli) 小熊电器(Bear) 北美电器(ACA) 瑞典伊莱克斯(Electrolux) 希沃(seewo)
爱丽舍(ELYSEE) 本间(HONMA) 白领(WhiteCollar) 宝石布鞋 新光饰品(NEOGLORY) 凯卓(KENZO) 麦格弗(MAGFORMERS) 我爱我家(MOKKI) 魅族MEIZU 保法止(Propecia)
东芝Toshiba 魔声(MONSTER) 小霸王 特灵(TRANE) AMD 樱花卫厨(SAKURA) 飞利浦(PHILIPS) 九阳股份(Joyoung) 九阳股份(Joyoung) 皇明(Himin)
日本林内(Rinnai) 万和(Vanward) 灿坤(EUPA) 松下(Panasonic) 小熊电器(Bear) 艾博德(iBoard)
齐博林(ZEPPELIN) Titleist Calvin Klein 三五三九(3539) 哎呀呀生活馆(Aiyaya House) 浪凡(LANVIN) 建乐思(K’NEX) 奇思妙想(IDEAS) 小米MI 西帕维药
联想(Lenovo) 捷波朗(Jabra) 好记星(OZing) 开利(Carrier) 英伟达(NVIDIA) 万事兴(VEZSIN) 海尔(Haier) 美的集团(Midea) 美的集团(Midea)
华扬(HUAYANG) 万和(Vanward) 意大利阿里斯顿(ARISTON) 九阳股份(Joyoung) 灿坤(EUPA) 创凯(CKDZ)
朗坤(LACO) 泰勒梅(TaylorMade) 千仞岗(CHERICOM) 同升和 千千氏(Cheerts) 大卫杜夫(Davidoff) 孩之宝(Hasbro) 七彩人生(color life) VIVO 达霏欣
SAMSUNG三星 博士(Bose) 优学派 盾安中央空调(DunAn) 高通(Qualcomm) 亿田(entive) 米家(MIJIA) 飞利浦(PHILIPS) 太阳雨(SUNRAIN) 意大利阿里斯顿(ARISTON)
瑞典伊莱克斯(Electrolux) 北美电器(ACA) 飞利浦(PHILIPS) 创显(createview)
尼芙尔(NIVREL) 卡拉威(Callaway) 鸭鸭(YAYA) 步瀛斋 水晶坊(Crystalane) 祖玛珑(Jo Malone) 绘儿乐(Crayola) 舒达(Serta) OPPO 敬修堂 小米MI
硕美科(SOMIC) 步步高教育电子 天加(TICA) 迎广(IN WIN) 能率(NORITZ) 飞利浦(PHILIPS) 北美电器(ACA) 桑乐(sangle) 博世热力(BOSCH)
艾欧史密斯(A.O.SMITH) 松下(Panasonic) 北美电器(ACA) 东方中原(Donview)
拉芙兰瑞(Lavaro) 普瑞吉(PRGR) 雅鹿 老美华 海盗船(Pirateship) 伊夫圣罗兰(YSL) 费雪(Fisher-Price) 丝涟(Sealy) 荣耀honor 落健(Rogaine)
华为HUAWEI
漫步者(EDIFIER) 欧科(EUROKLIMAT) 银欣(SilverStone) 前锋(CHIFFO) 虎牌(TIGER) 惠人(Hurom) 力诺瑞特(linuo-paradigma) 美的集团(Midea)
奥特朗(Otlan) 海尔(Haier) 德龙(Delonghi) 熊猫(Panda)
显赫(HANHART) 克利夫兰(Cleveland) 艾莱依(ERAL) 荣顺(ROSUN) 卡利雅(CARLIER) 乔治·阿玛尼(Giorgio Armani) 小裂帛(liebo) 福莱哥勒(Breckle)
华为HUAWEI
章光101 飞利浦(PHILIPS) 东芝空调(Toshiba) 游戏帝国(GAMEMAX) 万家乐 东芝(Toshiba) 松下(Panasonic) 海尔(Haier) 艾欧史密斯(A.O.SMITH) 海尔(Haier) 美的(Midea) 松下(Panasonic) 康佳商显(KONKA)
帝玛(TUTIMA) Odyssey 杰奥(JIEAO) 内联升 天使之泪(ANGERERLE) 安娜苏(AnnaSui) 安踏(ANTA kids) 席梦思(SIMMONS) 苹果手机(iPhone) 光明 拜亚动力(Beyerdynamic) 科龙(KELON) 九州风神(DEEPCOOL) 日本林内(Rinnai) 松下(Panasonic) 海尔(Haier) 美的(Midea) 领视(Linxee)
瑞宝(Chronoswiss) Maruman 康博(combo) 馨华德 欧诗漫珠宝(OSM) 丽娜蕙姿(Nina Ricci) 太平鸟(MINI PEACE) 金可儿(KINGKOIL) 歌薇(Goldwell)
爱科技(AKG) 春兰(Chunlan) 游戏悍将 万和(Vanward) 海尔(Haier) 视臻科技(MAXHUB)
联合(UNION) 本哈根(Ben Hogan) 金羽杰(LINC) 京花(JINGHUA) 塔思琦(TASAKI) 意大利菲拉格慕(Ferragamo) 笛莎(DEESHA) 雅兰(AIRLAND) 美奇丝(MATRIX)
Beats 扬子空调(YAIR) 鑫谷(Segotep) 格兰仕集团(Galanz) 全时
诺莫斯(Nomos) 泰尼飞(Tecnifibre) 坦博尔(TANBOER) 蜘蛛王(SPIDERKING) 国玉 HABA护肤品 巴布豆(BOBDOG) 梦百合(MLILY) 美源(Bigen)
铁三角(audio-technica) 美国约克(YORK) 曜越(Thermaltake) 苏泊尔电器(SUPOR) 好视通
万宝龙(Montblanc) 沃克(VOLKL) 盟可睐(Moncler) 奥康(AOKANG) 和合玉器 雅顿(Elizabeth Arden) 安奈儿(Annil) 穗宝(somnopro) 莉婕(Liese)
声海(Sennheiser) 日立集团(Hitachi) 航嘉(Huntkey) 美的集团(Midea) 亿联(Yealink)
汉米尔顿(HAMILTON) 史莱辛格(Slazenger) 冰洁(Blinger) 其乐(Clarks) 和玉缘 法国欧舒丹(L'OCCITANE) 智高(CHICCO) 慕思(DE RUCCI) 俪诗朵(LUCIDO-L)
JVC杰伟世 金羚(JINLING) 先马(Sama) 海尔(Haier) 科达(KEDACOM)
帝舵(Tudor) 天龙(Teloon) 凯撒(KAISER) 健乐士(GEOX) 北玉青龙 法国娇韵诗(Clarins) 拉玛泽(LAMAZE) 喜临门(Sleemon) 美吾发 小米MI
卡萨帝(Casarte)
大水牛(BUBALUS) 思科(CISCO)
优立时(unizeit) 王子(Prince) 依奴珈(ENAGA) 康龙(KANGlong) 昆玉 欧莱雅赫莲娜(HR) 美家宝(Mega Bloks) 吉斯(JISI) 韩伊(CO.E) 荣耀honor
海信集团(Hisense) 金河田(GOLDENFIELD) 宝利通(POLYCOM)
绮年华(ETERNA) 百保力(Babolat) 东明(DongMing) 八哥(BAGE) 张铁军 可贝尔(COBOR) HABA 史丹利(STANLEY) TIGI SONY索尼 容声(Ronshen)
建兴(LITEON) 小鱼易连
迪沃斯(DAVOSA) 强力(QiangLi) 圣尼(SHENGNI) 暇步士(HushPuppies) 玉翠山庄 香蒲丽(Shangpree) People 惠尔邦(WELBOM) 瑞虎(Ruihu)
帝度(DIQUA) 饥饿鲨 维海德(VHD)
梅花(Titoni) 肯尼士(PROKENNEX) 兽王(SHOUWANG) 圣伽步(SKAP) 健兴利 瞳话 Melissa&Doug 维意定制(WAYES) 塞巴斯汀(SEBASTIAN)
创维(Skyworth) 浦科特(Plextor) 威速科技(V2)
赫柏林(Michel Herbelin) 奥联(OLIPA) 束兰(Shulan) TOD'S 传世翡翠 瑞拉迪雅芙丝(DIAFORCE) B.toys 索菲亚(SOGAL) 杰士派(GATSBY)
晶弘(kinghome) HGST 亚美亚(AVAYA)
雍加毕索(yonger & Bresson) 祖迪斯(Joerex) 庄子(ZORANZI) 康奈(KANGNAI) 东方金钰 美即面膜(MG) 启蒙 欧派(OPPEIN) 美涛(Maestro)
奥马(Homa) 希捷(SEAGATE) 网动(Iactive)
依波路(Ernest Borel) 世达(STAR) 慕容(Morriszou) 金猴(Jinho) 七彩云南 森田药妆(Dr.Morita) 木玩世家 雅迪斯(ATIS) 安安金纯(A'Gensn)
爱客(IQAir) 西部数据(WD) 会畅通讯(bizconf)
康斯登(Frederique Constant) 火车头(Train) 雪豹(XueBao) 木林森(Mulinsen) 翡翠物语 膜法世家 邦宝 百得胜 追风(Royal Wind)
布鲁雅尔(Blueair) 富士康(Foxconn) 华望技术(hwactive)
罗马(ROAMER) 利生(LeeSheng) 应大(YINGDAK) 红蜻蜓(Red Dragonfly) 玉缘(YUYUAN) 一叶子(Oneleaf) 美泰(mattel) 伊百丽(EBERY) 章华(SAVOL) 巴慕达(BALMUDA) 精英(ECS) Vidyo
荣汉斯(JUNGHANS) 兰华(Lanhua) 玛丽亚·古琦(MARJAKURKI) UGG 城隍珠宝 御泥坊 仙霸(SIMBA-DICKIE) 飞美(FAMILY) 温雅(YOUNGRACE)
戴森(Dyson) 梅捷(SOYO) ZOOM
劳特莱(ROTARY) 萨洛蒙(Salomon) 巴贝(BABEI) 思加图(STACCATO) 荟华楼 日本高丝(KOSE) 万代(BANDAI) 尚品宅配 迪彩(Dcolor) 胡佛(Hoover)
华擎(ASRock) 网迅(WebEx)
豪度(CODEX) 迈乐(Merrell) 万事利(Wensli) 他她(TATA) 伊泰莲娜(ITALIAN) 贝佳斯(BORGHESE) 迪士尼(Disney) 晚安(Goodnight) 威娜(WELLA)
福维克(VORWERK) 映泰(BIOSTAR) 世纪风
瑞士军工(SWISS MILITARY) LOWA 红都(hongdu) 卓诗尼(Josiny) 施华洛世奇(SWAROVSKI) 爱神菲(SNP) 澳贝 劳卡衣柜(Roco) 水之密语(AQUAIR)
沃尔斯顿(WALSTON) 得胜(TAKSTAR) 天彩(Skycolor)
沛纳海(Panerai) 奥索卡(OZARK) 培罗成(PROGEN) 达芙妮(Daphne) 蒂芙尼(Tiffany) 丽得姿(Leaders) 骅威 易高定制 力士(LUX)
彩虹(rainbow) 乐之邦(MUSILAND) 乐彩(Locor)
真力时(Zenith) 嘎蒙特(GARMONT) 圣得西(SUNDANCE) Charles&Keith 海润珍珠(Heren) JAYJUN Hape 太和木作 丝蕴(Syoss) 爱贝斯
美奥多(MIDIMAN) 金恒丰(JHF)
泰格豪雅(TAG Heuer) 添柏岚(Timberland) 威可多(VICUTU) 百丽(BeLLE) 千足珍珠 我的美丽日志 银辉 红古轩 飘柔(Rejoice) 南极人(nanjiren)
福克斯特(Focusrite) 宏华(Atexco)
三宅一生手表(ISSEY MIYAKE) 动感(ACTION) 蓝豹(LAMPO) 哈森(HARSON) 南珠宫(PEARLROYAL) 美迪惠尔(MEDIHEAL) 奥迪双钻 鸿发 施华蔻(Schwarzkopf)
超人(SID) 雅马哈(Yamaha) 泰威(teckwin)
名古城(NAKZEN) 金峰(JINFENG) 培罗蒙(BAROMON) 爱步(ecco) 阮仕(RUANS) 资生堂肌肤之钥(CPB) 多美(TOMY) 元亨利 清扬(CLEAR)
桂冠之星(LAURASTAR) 罗兰(Roland) 威特(EFI)
东方双狮(ORIENT) 天鹅(TIAN-E) 红领(REDCOLLAR) Fed 石头记(ISTONE) 美宝莲(Maybelline) 伟易达 年年红 潘婷(PANTENE) 凯驰(Kärcher)
艾肯(ICON) 彩神(FLORA)
西铁城(CITIZEN) 麦斯卡(MESUCA) 美尔雅(MAILYARD) 意尔康(YEARCON) 御木本(MIKIMOTO) Make Up For Ever 小泰克(littletikes) 美联红木 沙宣(VS) 伊莱克斯(Electrolux) 坦克(Terratec) 工正集团
卡西欧(CASIO) 若喜士(ROCES) 创世(TRANDS) 接吻猫(Kiss Cat) 宝格丽珠宝(BVLGARI) 资生堂泊美(PURE&MILD) 小猪班纳(PEPCO) 老周红木 海飞丝 红心(hongxin-sh) 希仕(His) MIMAKI
精工(SEIKO) 罗勒布雷德(Rollerblade) 爱帝(I'd) 千百度(C.banner) 谢瑞麟(TSL) 罗拉玛斯亚(Laura Mercier) 可么多么(COMOTOMO) 明堂 吕(RYOE)
威马(goodway) 翔升(ASL) 武藤(MUTOH)
百年灵(Breitling) 美洲狮(COUGAR) 莫代尔(madallo) 鞋柜(ShoeBox) 明牌珠宝(MINGR) 韩国梦妆(Mamonde) NUK 友联为家 施巴(Sebamed)
东菱(donlim) 耕升(GAINWARD)
梵德宝(Van Der Bauwede) 迈古(M-CRO) 婷美(GRACEWELL) 莱尔斯丹(Le Saunda) 老庙 确美同(Coppertone) 贝塔(Betta) 皖宝(Vanbo) 惠润(SUPERMiLD)
北美电器(ACA) 盈通(yeston)
宝格丽(Bvlgari) 塔巴塔(Tabata) Dickies 柯玛妮克(KOMANIC) 菜百首饰(BAI) 美肤宝(MeiFuBao) 布朗博士(Dr.Brown's) 桑马红木 蒂花之秀(Difaso) 阿夸莎娜(aquasana) 映众(INNO3D)
艾美(Maurice Lacroix) 捷佳(JIEJIA) 米皇(Mihuang) 百思图(BASTO) 潮宏基(CHJ) 苏菲娜(SOFINA) 新安怡(AVENT) Baker 蜂花(Beeflower)
怡口(Ecowater) 讯景(XFX)
萧邦(Chopard) 黑貂(SABLE) 圣雪绒(st·edenw) 天美意(Teenmix) 周大生(Chow Tai Seng) 安热沙(ANESSA) 倍得适/倍儿乐(Playtex) Minotti 丝蓓绮(TSUBAKI) 霍尼韦尔(Honeywell) 铭瑄(MAXSUN)
格雅(Geya) 沙鸽(Zoggs) 天山(TIANSHAN) 大东(DUSTO) 老凤祥 丹姿(DANZ) 贝亲(pigeon) Christopher Guy 卡诗(KERASTASE) 斯帝沃(STEVOOR)
丽台(Leadtek)
北京表(BEIJING) 日高(NIKKO) 珍贝(ZHENBEI) 六福珠宝(LUKFOOK) 法兰琳卡(FRANIC) 乐儿宝(bobo) Kartell 夏士莲(Hazeline) 爱惠浦(Everpure)
迪兰(Dataland)
星皇表(STARKING) 雅丽嘉(ARYCA) 鹿王(King Deer) 周生生(Chow Sang Sang) 温碧泉(Wetherm) 爱得利(IVORY) 爱室丽(Ashley) 滋源(Seeyoung) 东阳美吉(Magic) 索泰(ZOTAC)
天霸表(TIANBA) 号手(HaoShou) 春竹(SPRING) 周大福(Chow Tai Fook) 丸美(MARUBI) 日康(RIKANG) 红苹果(RED APPLE) 拉芳(Lovefun) 3M
技嘉(GIGABYTE)
雷诺表(RARONE) 浪都(Rundo) 雪莲(Snowlotus) 佐卡伊(ZOCAI) 韩后(HANHOO) 好奇(HUGGIES) 联邦(LANDBOND) 霸王(BAWAMG) 道尔顿(Doulton)
影驰(GALAXY)
宝时捷表(POSCER) 三奇(SANQI) 兆君(Zhaojun) 钻石小鸟(ZBird) 娜丽丝(Naris) 花王(KAO) 曲美家具 舒蕾(SLEK) 四季沐歌(Micoe)
蓝宝石(SAPPHIRE)
北极星(POLARIS) 洲克(ZOKE) 兔皇(TUHUANG) 珂兰(Kela) 怡思丁(ISDIN) 巴拉巴拉(Balabala) 宜家(IKEA) 多芬(Dove) 皇明(Himin)
金泰克(tigo)
爱彼(Audemars Piguet) 英发(YINGFA) 皮皮狗(PiPiGOU) 周六福珠宝 珂润(Curél) 尤妮佳(moony) 皇朝家俬 华扬(HUAYANG) 骇客(HyperX)
芝柏(Girard Perregaux) 浩沙(Hosa) 三利(SANLI) 越王(Jovan) 海蓝之谜(La Mer) 大王 全友家居 太阳雨(SUNRAIN) 英睿达(Crucial)
万国表(IWC) 阿瑞娜(Arena) 红莲(RedLotus) 千叶(KEER) 法国希思黎(sisley) 帮宝适(pampers) 双叶家具 桑乐(sangle)
海力士(Hynix)
雅典(Ulysse Nardin) 速比涛(Speedo) 浅秋(QIANQIU) 吉盟(GMOND) 珀莱雅(PROYA) 妈咪宝贝 力诺瑞特(linuo-paradigma)
十铨(TEAM)
法穆兰(FRANCK MULLER) 凯迪龙(Kdilon) 海尔曼斯 京润(gNPearl) 韩国雪花秀(Sulwhasoo) 小鹿叮叮 格兰仕(Galanz)
芝奇(G.SKILL)
艾美达(Armand Nicolet) 飘(Few) 鄂尔多斯(ERDOS) 金叶珠宝(GOLDLEAF) 欧珀莱AUPRES 雀氏 林内(Rinnai) 富勒(fühlen)
库尔沃(Cuervo y Sobrinos) 人本帆布鞋 帕兰朵(Plandoo) 戴梦得(Diamend) 理肤泉(La Roche Posay) 班博(BAMBO)
阿里斯顿(Ariston) 美商海盗船(Corsair)
豪朗时(hautlence) 波尼(PONY) 猫人(MiiOW) 爱迪尔(IDEAL) 玫琳凯(MARYKAY) 安儿乐 奥特朗(Otlan) 斐尔可(FILCO)
欧米茄(Omega) 飞跃(Feiyue) 三枪(THREEGUN) 亚一珠宝 日本雪肌精(SEKKISEI) 爹地宝贝 A.O.史密斯 樱桃(CHERRY)
卡地亚(Cartier) 凡客(VANCL) 花花公子(PLAYBOY) 千禧之星(Millennium Star) 娥佩兰(OPERA) 菲比(Fitti) 澳柯玛(AUCMA)
雷柏(RAPOO)
昆仑(Corum) 回力(Warrior) 雪中飞(SnowFlying) 金一(KingOne) 李士(DOCTOR LI) 丽贝乐(Libero) 帅康(SACON)
双飞燕艾思釱克(A4TECH)
上海表(SHANGHAI) 迪卡侬(Decathlon) 红妮(Hongni) 金伯利(Kimberlite) 薇姿(vichy) 固力果(ICREO) 象印(ZO JIRUSHI)
达尔优(Dareu)
海鸥(SEA-GULL) 亿健(YIJIAN) 舒雅(SCHIESSER) ENZO SK-II/SK2 明治(Meiji) 虎牌(TIGER) 精灵(Genius)
依波表(EBOHR) 澳瑞特(ORIENT) 铜牛(Topnew) 萃华金店 Kiehl's科颜氏 和光堂(Wakodo) 福库(Cuckoo)
酷冷至尊(CoolerMaster)
罗西尼(ROSSINI) BH A&B 百泰首饰(Batar) 欧珀莱俊士(JS) 多美滋(dumex) 奔腾(povos) 多彩(DeLUX)
格拉苏蒂(Glashütte) 搏飞(Bowflex) 朵彩(DOCARE) 莱绅通灵(Leysen) 欧莱雅男士(L'Oreal) 合生元(BIOSTIME) 海盗(VIKING)
新贵(NEWMEN)
朗格(A.Lange & Söhne) 爱康(ICON) 宜而爽(YOURSUN) 金至尊(3D-GOLD) 屈臣氏(Watsons) 佳贝艾特(kabrita) 华林(waring)
惠科(HKC)
诺美纳(Normana) 乔山(JOHNSON) 纤丝鸟(TINSINO) 中国黄金 韩国兰芝(LANEIGE) 雀巢(nestle) 法格(FAGOR) 优派(ViewSonic)
亨利慕时(H.Moser&Cie) 赛佰斯(CYBEX) 俞兆林 蝶翠诗(DHC) 伊利(YILI) 美诺(Miele) LG电子
天王表(TIAN WANG) 诺德士(Nautilus) 顶瓜瓜 阿迪达斯护肤(Adidas) 惠氏(Wyeth) 九阳(Joyoung) 冠捷(AOC)
飞亚达(FIYTA) 星驰(Star Trac) MJstyle 日本姬芮(Za) 美诺赞臣(Meadjohnson) 苏泊尔(SUPOR) 易美逊(ENVISION)
积家(Jaeger-LeCoultre) 泰诺健(Technogym) 热风(hotwind) 他能量(Tenor) 可瑞康(Karicare) 通用电器(GE Appliances)
方正科技(Founder)
伯爵(PIAGET) 力健(lifefitness) Forever21 高夫(GF) 喜宝(HIPP) 小天鹅(Little Swan) 京东方(BOE)
宝玑(Breguet) 必确(PRECOR) New Look 中国百雀羚(Pechoin) 英氏(YEEHOO) 三洋(SANYO) 长城(GreatWall)
宝珀(blancpain) 阿索罗(Asolo) 无印良品(MUJI) 相宜本草(INOHERB) 牛栏/诺优能(Nutrilon) 索尼(SONY) NEC
劳力士(Rolex) 赞贝拉(ZAMBERLAN) UR 露得清(Neutrogena) 爱他美(Aptamil) 东芝(Toshiba) 昂达(ONDA)
江诗丹顿(Patrimony) 拉思珀蒂瓦(la sportiva) 霍利斯特(Hollister) 丝塔芙(Cetaphil) 贝因美(Beingmate) TCL 酷比魔方
百达翡丽(Patek Philippe) 斯卡帕(SCARPA) MIXXO 美国雅诗兰黛(Estee Lauder) 贝拉米(Bellamys) 大金(DAIKIN)
台电(TECLAST)
多威(Do-win) ASOBIO 日本资生堂(SHISEIDO) A2 日立(HITACHI) 苹果(iPad)
FERRINO MONKI 曼秀雷敦(Mentholatum) 姆明一族(Muumi BABY) 格力(GREE) 原道(VIDO)
Sierra Designs Bershka 妮维雅(NIVEA) 宝松怡(BOSOMI) 三菱电机 Kindle
洛尔斯(LUXE) 极优(GU) 巴黎欧莱雅(L'OREAL) 牛栏(Cow&gate) 三菱重工(Mitsubishi) E人E本
沃德(VAUDE) TOPSHOP 水密码(Wetcode) 泓乐(Holle) 现代电器(HYUNDAI) 微星(MSI)
比格尼斯(Big Agnes) KM 欧诗漫(OSM) HeroBaby 约克(YORK) 神舟(Hasee)
Eureka! MANGO 韩国悦诗风吟(Innisfree) 美素佳儿(Friso) 长虹(CHANGHONG) 宏碁(acer)
牧高笛(MOBIGARDEN) 大嘴猴(Paul Frank) 自然堂(CHANDO) 雅培(Abbott) 志高(CHIGO) 惠普(HP)
KELTY C&A 法国雅漾(Avene) LG 戴尔(DELL)
HILLEBERG 潮流前线 韩国菲诗小铺(TheFaceShop) 三星(SAMSUNG) 联想(ThinkPad)
攀山鼠(KlatterMusen) H&M 北京大宝(Dabao) 松下(Panasonic) 华硕(ASUS)
贝豪斯(Berghaus) 班尼路(Baleno) 花印(HANAJIRUSHI) 夏普(SHARP) 苹果(Mac)
北极狐(Fjallraven) 娅丽达(YERAD) 韩束(KANS) 西门子(SIEMENS) 机械革命(MECHREVO)
Westcomb 百圆裤业 碧柔(Biore) 海尔(Haier) 海尔电脑(Haier)
Outdoor Research 浪莎(LANSWE) 法国兰蔻(LANCOME) 海信(Hisense) 外星人(AlienWare)
土拨鼠(Marmot) 初语(Toyouth) 上海佰草集(Herborist) 扎努西.伊莱克斯(Electrolux Zanussi)
清华同方(Tsinghua Tongfang)
猛犸象(MAMMUT) 北极绒(beijirog) 美国倩碧(Clinique) 美的(Midea)
巴塔哥尼亚(Patagonia) 南极人 日本芙丽芳丝(Freeplus) 美菱(Meling)
山浩(Mountain Hardwear) 百斯盾(Bestn) 法国碧欧泉(Biotherm) 博世(BOSCH)
里昂比恩(L.L.BEAN) 佐丹奴(Giordano) 旁氏(POND'S) 惠而浦(Whirlpool)
布来亚克(BLACK YAK) 虎都(FORDOO) 珊珂(SENKA)
觅乐(MILLET) Texwood 玉兰油(OLAY)
科勒曼(Coleman) Lee 莱珀妮(La Prairie)
乐飞叶(Lafuma) 威鹏(WEIPENG) 馥蕾诗(fresh)
探路者(TOREAD) JASONWOOD 茱莉蔻(Jurlique)
哥伦比亚(Columbia) ABLE JEANS NYR
凯乐石(KAILAS) EVISU boots
哥仑步(Kolumb) 牛仔时光(ERQ) 露诗(lush)
狼爪(JACK WOLFSK) 步森(Busen) AA网
骆驼(CAMEL) 彩羊(Fazeya) EVELOM
始祖鸟(ARCTERYX) 占姆士(James Kingdom) 美体小铺(The Body Shop)
摩腾(Molten) 鲁泰.格蕾芬(LT.GRFF) 郁美净
斯伯丁(SPALDING) 绅士(gentleman) 美加净
航宇(HANGYU) 虎豹(HUBAO) 春娟
尤迪曼(YODIMAN) 海螺(CONCH) 片仔癀
伟士(WISH) 洛兹(Rouse) 迷奇(miracle)
燕龙(YANLONG) 开开 大宝
红双喜(DHS) 富绅(VIRTUE) 相宜本草
索牌(SOTX) GXG 百雀羚(SPDC)
凯胜(KASON) 唐狮(Tonlion) 谢馥春
薰风(KUMPOO) 劲霸(K-BOXING) 佰草集
邦喜尔(Benhill) 海澜之家(HLA) 薇诺娜
海德(HEAD) 太子龙(TEDELON) 雅漾(Avene)
威雷斯(Varesi) 爱登堡(edenbo) 圣罗兰(YSL)
泰迪(TACTIC) 与狼共舞(D-WOLVES) 香奈儿(Chanel)
波若亚士(PROACE) G2000 羽西(yue sai)
德尔惠(DEERWAY) 恒源祥 卡尼尔(Garnier)
邓禄普(DUNLOP) 千纸鹤男装(QZHIHE) 碧欧泉(Biotherm)
北面(The North Face) 真维斯(JEANSWEST) 兰蔻(LANCOME)
波力(Bonny) 波司登男装(BOSIDENG) 赫莲娜(HR)
威尔胜(Wilson) 李维斯(Levi's) 伊丽莎白雅顿(Elizabeth Arden)
亚狮龙(RSL) 思莱德(SELECTED) 科颜氏/契尔氏(Kiehl's)
高神(GOSEN) 卡帝乐鳄鱼(CARTELO) 贝玲妃(Benefit)
奥立弗(OLIVER) 马克华菲(MARK FAIRWHALE) 娇韵诗(Clarins)
川崎(KAWASAKI) 卡宾(Cabbeen) 魅可(M.A.C)
胜利/威克多(Victor) 杰克琼斯(Jack&Jones) 芭比波朗(BOBBI BROWN)
尤尼克斯(YONEX) 秋水伊人(CHIUSHUI) TomFord
斯凯奇(SKECHERS) 哥弟(GIRDEAR) 娇兰(Guerlain)
爱威亚(AVIA) 拉夏贝尔(La Chapelle) 祖·玛珑(JO MALONE)
索康尼(SAUCONY) 太平鸟(PEACEBIRD) 艾凡达(Aveda)
Brooks 欧时力(Ochirly) PRESCRIPTIVES
盖世威(K-Swiss) 乐町(LEDIN) 希思黎(sisley)
赛琪(SAIQI) VERO MODA 悦木之源(Origins)
喜得龙(XDLONG) 日本优衣库(UNIQLO) 诗狄娜(Stila)
贵人鸟 ONLY 倩碧(Clinique)
沃特(VOIT) 韩都衣舍(HSTYLE) 欧舒丹(L'OCCITANE)
双星名人 音儿(YINER) 海蓝之谜(LaMer)
金莱克(JMK) 艾米(AMII) 雅诗兰黛(Estee Lauder)
鸿星尔克(ERKE) 波司登(BOSIDENG) 凯朵(kate)
乔丹(AIR JORDAN) 摩安珂(Mo&Co.) 美宝莲纽约(Maybelline)
乔丹(QIAODAN) 梦舒雅(M•SUYA) 芙丽芳丝(Freeplus)
匹克(PEAK) 艾格(Etam) 日月光彩(Lunasol)
特步(Xtep) 妖精的口袋 迪奥(Dior)
361° 依恋(E·LAND) 印象之美(Impress)
安踏(ANTA) Five Plus 佳丽宝(Kanebo)
李宁(LI-NING) 丽丽(Lily) 细胞博士(Dr.cell)
乐卡克(Le Coq Sportif) 伊芙丽(EIFINI) 纪梵希(Givenchy)
hummel 逸阳(ESEY) JUJU
霍马(joma) 美特斯邦威(Meters/bonwe) 珊娜(SANA)
范斯(Vans) 飒拉(Zara) 兰皙欧(RECIPEO)
KELME 森马(Semir) 高丝润肌精(Kosé Junkisei)
匡威(Converse) 裂帛(LIEBO) 贝缔雅(Predia)
亚瑟士(ASICS) 茵曼(INMAN) 雪肌精(SEKKISEI)
安德玛(Under Armour) 杰尼亚(Zegna) 艾文莉(AVENIR)
新百伦(New Balance) 盖尔斯(GUESS) 黛珂(DECORTE)
乐途(lotto) 阿玛尼(Giorgio Armani) 高丝(Kose)
迪亚多纳(DIADORA) 范思哲(Versace) 植村秀(SHU UEMURA)
卡帕(Kappa) 华伦天奴(Valentino) DHC
茵宝(UMBRO) 菲拉格慕(Ferragamo) SK-ii
美津浓(Mizuno) 拉夫劳伦(Ralph Lauren) 肌肤之钥(CPB)
斐乐(FILA) 爱马仕(Hermes) 茵芙莎(IPSA)
彪马(PUMA) 博柏利(Burberry) 泊美(Pure&Mild)
锐步(REEBOK) 克洛伊(Chloe) 心机彩妆(Maquillage)
阿迪达斯(adidas) 普拉达(Prada) 俊士(JS)
耐克(NIKE) 俪丝娅(RELLECIGA) 欧珀莱(AUPRES)
古驰(Gucci) 姬芮(ZA)
LV/路易威登(Louis Vuitton) 三宅一生(Issey Miyake)
雨果博斯(Hugo Boss) 资生堂(Shiseido)
红豆(HODO) 谜尚(MISSHA)
报喜鸟(SAINT ANGELO) 菲诗小铺(The Face Shop)
KITON 思亲肤(SKIN FOOD)
杉杉(FIRS) 悦诗风吟(Innisfree)
新郎希努尔(SINOER) 爱丽小屋(ETUDE HOUSE)
法派(FAPAI) 芙莉美娜(Primera)
金利来(Goldlion) 梦妆(Mamonde)
九牧王(JOEONE) 秀雅韩(Sooryehan)
才子(TRIES) 赫拉(HERA)
罗蒙(ROMON) 伊思(IT'S SKIN)
柒牌(SEVEN) 亦博(IOPE)
七匹狼(SEPTWOLVES) SU:M37°呼吸
利郎(LILANZ) 欧蕙(O HUI)
雅戈尔(Youngor) 雪花秀(Sulwhasoo)
Baby Milo 后(WHOO)
NEXT 兰芝(laneige)
优衣库(UNIQLO)
盖璞(GAP)
""" |
## Document Reader
import pyttsx3
engine = pyttsx3.init()
##
##engine.setProperty('rate',120) #120 words per minute
##engine.setProperty('volume',0.9)
with open("readfile.txt") as file:
for line in file:
engine.say(line)
engine.runAndWait()
|
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
import json
import csv
import pylyrics3
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
def get_features(track_id):
features_results = sp.audio_features([track_id])
json_features = json.dumps(features_results)
features_data = json.loads(json_features)
# Convert features dictionary to a list
features_list = list(features_data[0].values())
return features_list
client_credentials_manager = SpotifyClientCredentials()
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
sentiment_analyzer = SIA()
# IDs of monthly playlists from November 2016 to November 2017
playlist_ids = [
"07zqCIPCroFMKSajvERGvE",
"30PgYnoeT2PAgFNuYLR5qd",
"1vS1nakUrLYkTd3W0yRMYe",
"3scPGVlAn7d74uXRtFnmUC",
"5LzBRPVAPYUssZ4ZESnRmH",
"6hDHXewz8qBTezvONSqzyl",
"00riJCYiVJ1yptAXtv2h6k",
"0HxFI5dOlKztf38T9sa0cF",
"7EFWm7Mjy6GLJHOEgKEblM",
"6YAG0Li1BoUkmhc8iycY6l",
"7Iw0yI71QX59zyFq0kAZTS",
"69XTCqVzbSWPMLucSvzlLl",
"7pRnKuQMkmntEj7Nnj94r0"
]
# Audio features
feature_names = [
"danceability",
"energy",
"key",
"loudness",
"mode",
"speechiness",
"acousticness",
"instrumentalness",
"liveness",
"valence",
"tempo",
"type",
"id",
"uri",
"track_href",
"analysis_url",
"duration_ms",
"time_signature"
]
username = '1240951381'
### Write data to CSV file
data_file = open('data.csv','w')
writer = csv.writer(data_file)
# Write header
writer.writerow(['track_id', 'playlist_id', 'date_added', 'track_name', 'first_artist'] + feature_names + ['lyrics', 'neg', 'neu', 'pos', 'compound'])
for playlist_id in playlist_ids:
print('Querying playlist: ' + str(playlist_id))
repeat_query = True
offset_n = 0
for i in range(2):
# Query Spotify API
if i > 0:
print('Repeating query')
offset_n += 100
results = sp.user_playlist_tracks(username, playlist_id, offset=offset_n)
json_results = json.dumps(results)
data = json.loads(json_results)
# Write rows
for track in data['items']:
track_id = track['track']['id']
date_added = track['added_at']
track_name = track['track']['name']
first_artist = track['track']['artists'][0]['name']
# Track features
features = get_features(track_id)
# Try to get lyrics, if available
lyrics = ''
try:
lyrics = pylyrics3.get_song_lyrics(first_artist, track_name)
except:
pass
# Sentiment Analysis
neg = None
neu = None
pos = None
compound = None
if lyrics:
snt = sentiment_analyzer.polarity_scores(lyrics)
neg = snt['neg']
neu = snt['neu']
pos = snt['pos']
compound = snt['compound']
writer.writerow([track_id, playlist_id, date_added, track_name, first_artist] + features + [lyrics] + [neg, neu, pos, compound])
# Special case: API limit is 100 tracks, so we need a second request
# for playlists that have over 100 tracks
if data['total'] < 100:
break
print('Done querying')
data_file.close()
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.enums import Choices
# Create your models here.
class Item(models.Model) :
status_choices = [ ('C' , "Completed") , ('P' , "Pending") ]
priority_choices = [ (1 , '1️⃣' ) , (2 , '2️⃣'), (3 , '3️⃣'),
(4 , '4️⃣'), (5 , '5️⃣')
]
user = models.ForeignKey( to = User , on_delete= models.CASCADE)
title = models.CharField(max_length= 50 )
status = models.CharField(max_length= 2 , choices= status_choices , default= 'P')
priority = models.IntegerField(choices= priority_choices , default = 1 )
def __str__(self) :
return self.title
|
#Finding the k minimum sum pairs form two sorted arrays
def k_min_pairs(array1,array2,k):
N1=len(array1)
N2=len(array2)
index=[0]*N1
while (k>0):
minsum = 1000000000
idx = 0
for j in range(0,N1):
if((index[j]<N2)and (array1[j]+array2[index[j]]<minsum)):
idx=j
minsum=array1[j]+array2[index[j]]
array1=[1,2,3,4]
array2=[4,5,6,7]
k=2
k_min_pairs(array1,array2,k)
|
import os
import sys
import discord
from discord.ext import commands
import DiscordUtils
import sqlite3
import json
import colorama
from colorama import init, Fore
if os.name == "nt": # Windows users needs this option
init(convert=True)
###################################
TOKEN = "Your bot token goes here"
PREFIX = "The prefix you want"
###################################
intents = discord.Intents.default()
intents.members = True
bot = commands.AutoShardedBot(command_prefix=PREFIX, intents=intents)
tracker = DiscordUtils.InviteTracker(bot)
###################################
@bot.event
async def on_ready():
await tracker.cache_invites()
print('Bot Ready')
@bot.event
async def on_invite_create(invite):
await tracker.update_invite_cache(invite)
@bot.event
async def on_guild_join(guild):
await tracker.update_guild_cache(guild)
@bot.event
async def on_invite_delete(invite):
await tracker.remove_invite_cache(invite)
@bot.event
async def on_guild_remove(guild):
await tracker.remove_guild_cache(guild)
@bot.command()
async def invites(ctx, member: discord.Member = None):
if member is None:
member = ctx.author
with open("main.json", "r") as enter:
c = json.load(enter)
try:
invite = dict(c[str(ctx.guild.id)][str(member.id)])
except KeyError:
await ctx.send("This user doesn't have invites")
return
real = 0
leaved = 0
fake = 0
for value in invite.values():
if value == "1":
real += 1
if value == "2":
leaved += 1
if value == "3":
fake += 1
embed = discord.Embed(
title = f"{member.name}'s Invites",
description = f"```\nReal: {real}\nLeaved: {leaved}\nFake: {fake}\n```"
)
await ctx.send(embed=embed)
@bot.event
async def on_member_remove(member):
with open("main.json", "r") as enter:
c = json.load(enter)
for i in list(c.values()):
i = list(i)[0]
try:
c[str(member.guild.id)][i][str(member.id)] = "2"
except:
pass
with open("main.json", "w") as out:
json.dump(c, out, indent=4)
@bot.event
async def on_member_join(member):
"""
CODE
1: REAL
2: LEAVED
3: FAKE
"""
inviter = await tracker.fetch_inviter(member)
with open("main.json", "r") as enter:
c = json.load(enter)
try:
user_invite = c[str(member.guild.id)][str(inviter.id)][str(member.id)]
c[str(member.guild.id)][str(inviter.id)][str(member.id)] = "1"
with open("main.json", "w") as out:
json.dump(c, out, indent=4)
return
except Exception as e:
try:
a = c[str(member.guild.id)]
except Exception as e:
c[str(member.guild.id)] = {}
try:
a = c[str(member.guild.id)][str(inviter.id)]
except Exception as e:
c[str(member.guild.id)][str(inviter.id)] = {}
c[str(member.guild.id)][str(inviter.id)][str(member.id)] = "1"
with open("main.json", "w") as out:
json.dump(c, out, indent=4)
###################################
if __name__ == '__main__':
bot.run(TOKEN) |
# Author: Sam Crigler
from __future__ import print_function
import sys
import time
def clear():
# escape sequence that clears the terminal window
sys.stderr.write("\x1b[2J\x1b[H")
def greeting():
clear()
print("TIC TAC TOE\n")
print("1. X goes first")
print("2. Board positions start at 0\n")
print("Example: row 0 and col 0 is top-left, row 2 col 2 is bottom-right\n")
print("'p' to play")
print("'e' to exit\n")
cmd = raw_input()
return cmd
def printBoard(board):
for row in board:
for cell in row:
if cell == -1:
print("x", sep=' ', end=' ')
elif cell == 1:
print("o", sep=' ', end=' ')
else:
print("#", sep=' ', end=' ')
print("")
print("")
def validateMove((row, col), player, board):
if row < 0 or row > 2:
return False
if col < 0 or col > 2:
return False
if board[row][col] != 0:
return False
return True
def move((row, col), player, board):
if player == True:
board[row][col] = -1
if player == False:
board[row][col] = 1
return board
def play(board, turn):
clear()
win = winner(board)
while win == 0 and full(board) == False:
printBoard(board)
(row, col) = getMove()
valid = validateMove((row,col), turn, board)
if not valid:
print("\nINVALID MOVE\n")
time.sleep(2)
clear()
continue
board = move((row, col), turn, board)
turn = not turn
win = winner(board)
clear()
return winner(board)
def full(board):
for row in board:
for cell in row:
if cell == 0:
return False
return True
def winner(board):
# rows
for row in board:
rowSum = sum(row)
if rowSum == 3:
return 1
if rowSum == -3:
return -1
# cols
if board[0][0] + board[1][0] + board[2][0] == 3:
return 1
if board[0][0] + board[1][0] + board[2][0] == -3:
return -1
if board[0][1] + board[1][1] + board[2][1] == 3:
return 1
if board[0][1] + board[1][1] + board[2][1] == -3:
return -1
if board[0][2] + board[1][2] + board[2][2] == 3:
return 1
if board[0][2] + board[1][2] + board[2][2] == -3:
return -1
# diagonals
if board[0][0] + board[1][1] + board[2][2] == 3:
return 1
if board[0][0] + board[1][1] + board[2][2] == -3:
return -1
if board[0][2] + board[1][1] + board[2][0] == 3:
return 1
if board[0][2] + board[1][1] + board[2][0] == -3:
return -1
return 0
def valid(x):
if x > 0 and x < 3:
return True
else:
return False
def getMove():
row = -1
col = -1
try:
row = int(raw_input("row: "))
col = int(raw_input("col: "))
except ValueError:
return(-1, -1)
return(row,col)
def printWinner(winner, board):
printBoard(board)
if winner == -1:
symbol = "X"
elif winner == 1:
symbol = "O"
else:
symbol = "The cat"
print(symbol, "wins!", sep=' ')
# main function
if __name__ == "__main__":
while True:
cmd = greeting()
# x = True, o = False
turn = True
# tic tac toe board
board = [[0,0,0],[0,0,0],[0,0,0]]
if cmd == 'p':
result = play(board, turn)
printWinner(result, board)
time.sleep(4)
elif cmd == 'e':
sys.exit()
|
from __future__ import annotations
from enum import Enum
from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import torch
from torch.utils._pytree import tree_flatten
from ._tv_tensor import TVTensor
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(TVTensor):
"""[BETA] :class:`torch.Tensor` subclass for bounding boxes.
.. note::
There should be only one :class:`~torchvision.tv_tensors.BoundingBoxes`
instance per sample e.g. ``{"img": img, "bbox": BoundingBoxes(...)}``,
although one :class:`~torchvision.tv_tensors.BoundingBoxes` object can
contain multiple bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: Union[BoundingBoxFormat, str], canvas_size: Tuple[int, int], check_dims: bool = True) -> BoundingBoxes: # type: ignore[override]
if check_dims:
if tensor.ndim == 1:
tensor = tensor.unsqueeze(0)
elif tensor.ndim != 2:
raise ValueError(f"Expected a 1D or 2D tensor, got {tensor.ndim}D")
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def _wrap_output(
cls,
output: torch.Tensor,
args: Sequence[Any] = (),
kwargs: Optional[Mapping[str, Any]] = None,
) -> BoundingBoxes:
# If there are BoundingBoxes instances in the output, their metadata got lost when we called
# super().__torch_function__. We need to restore the metadata somehow, so we choose to take
# the metadata from the first bbox in the parameters.
# This should be what we want in most cases. When it's not, it's probably a mis-use anyway, e.g.
# something like some_xyxy_bbox + some_xywh_bbox; we don't guard against those cases.
flat_params, _ = tree_flatten(args + (tuple(kwargs.values()) if kwargs else ())) # type: ignore[operator]
first_bbox_from_args = next(x for x in flat_params if isinstance(x, BoundingBoxes))
format, canvas_size = first_bbox_from_args.format, first_bbox_from_args.canvas_size
if isinstance(output, torch.Tensor) and not isinstance(output, BoundingBoxes):
output = BoundingBoxes._wrap(output, format=format, canvas_size=canvas_size, check_dims=False)
elif isinstance(output, (tuple, list)):
output = type(output)(
BoundingBoxes._wrap(part, format=format, canvas_size=canvas_size, check_dims=False) for part in output
)
return output
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
|
from django.http import JsonResponse
from django.urls import reverse
from comment.forms import CommentForm
from comment.models import Comment
def update_comment(request):
comment_form = CommentForm(request.POST, user=request.user)
if comment_form.is_valid():
comment = Comment()
comment.user = comment_form.cleaned_data['user']
comment.text = comment_form.cleaned_data['text']
comment.content_object = comment_form.cleaned_data['content_object']
comment.save()
data = {
'status': 'SUCCESS',
'username': comment.user.username,
'comment_time': comment.comment_time.strftime('%Y-%m-%d %H:%M:%S'),
'text': comment.text,
}
else:
data = {
'status': 'ERROR',
'message': list(comment_form.errors.values())[0][0]
}
return JsonResponse(data) |
def main():
rivi = input("Vastaa K tai E: ")
vastaus = rivi
while vastaus != "K" and vastaus != "E" and vastaus != "k" and vastaus != "e":
print("Virheellinen syöte.")
vastaus= input("Yritä uudelleen: ")
print("Vastasit", vastaus)
main() |
'''
event.wait()
event.set()
event.clear()
if the flag is set the wait method doesn't do anything
标志位设定了 代表放行
if the flag is cleared ,wait will block until it becames set again
标志位被清空 代表阻塞 wait 等待放行
Any number of threads may wait for the same event
'''
import threading
import time
event = threading.Event()
def light():
count = 0
event.set()
while True:
if count > 10 and count < 30:#改成红灯
event.clear()
print("\033[41;1m red light is on ...\033[0m")
elif count > 30:#过十秒
event.set()
count = 0
else:
print("\033[1;30;42m green light is on ...\033[0m")
time.sleep(0.1)
count +=1
def car(name):
while True:
if event.is_set():#代表绿灯
print("[%s] running---> "%name)
time.sleep(0.2)
else:
print("[%s] see the red light"%name)
event.wait()
print("\033[34;1mThe light turns to the green\033[0m")
event = threading.Event()
l = threading.Thread(target=light,)
l.start()
car1 = threading.Thread(target=car,args=('tesla',))
car1.start() |
# Generated by Django 2.1.2 on 2018-10-06 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0007_auto_20181006_1611'),
]
operations = [
migrations.AddField(
model_name='projectrequest',
name='comments',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
|
# 题目:输入一行字符,分别统计出其中英文字母、空格、数字和其它字符的个数。
from pip._vendor.distlib.compat import raw_input
str = raw_input("请输入待统计字符串:")
letters = 0
space = 0
digit = 0
others = 0
for i in str:
if i.isdigit():
digit += 1
elif i.isspace():
space += 1
elif i.isalpha():
letters += 1
else:
others += 1
print ('char = %d,space = %d,digit = %d,others = %d' % (letters,space,digit,others))
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for issues."""
# pylint: disable=missing-docstring,protected-access
import collections
import copy
import unittest
import issues
DEFAULT_USERNAME = "default_username"
REPO = "repo"
USER_MAP = collections.defaultdict(lambda: DEFAULT_USERNAME)
USER_MAP.update({
"user@email.com": "a_uthor",
"user2@gmail.com": "w_riter",
})
# Empty issue data map
NO_ISSUE_DATA = {}
COMMENT_ONE = {
"content": "one",
"id": 1,
"published": "last year",
"author": {"name": "user@email.com"},
"updates": {
"labels": ["added-label", "-removed-label"],
},
}
COMMENT_TWO = {
"content": "two",
"id": 2,
"published": "last week",
"author": {"name": "user2@gmail.com"},
}
COMMENT_THREE = {
"content": "three",
"id": 3,
"published": "yesterday",
"author": {"name": "unknown@example.com"},
}
HTML_COMMENT = {
"content": "1 < 2",
"id": 1,
"published": "yesterday",
"author": {"name": "unknown@example.com"},
}
COMMENTS_DATA = [
COMMENT_ONE,
{"content": "two", "id": 2, "published": "last week"},
{"content": "three", "id": 3, "published": "yesterday"},
{"content": "four", "id": 4, "published": "today"},
]
# Full issue json
ISSUE_JSON = {
"id": 1,
"state": "closed",
"title": "issue_title",
"comments": {"items": [COMMENT_ONE]},
"labels": ["awesome", "great"],
"published": "last year",
"updated": "last month",
"status": "Fixed",
"owner": {
"kind": "projecthosting#issuePerson",
"name": "user@email.com",
},
}
SINGLE_ISSUE = issues.GoogleCodeIssue(ISSUE_JSON, REPO, USER_MAP)
SINGLE_COMMENT = issues.GoogleCodeComment(SINGLE_ISSUE, COMMENT_ONE)
HTML_COMMENT = issues.GoogleCodeComment(SINGLE_ISSUE, HTML_COMMENT)
class GoogleCodeIssueTest(unittest.TestCase):
"""Tests for GoogleCodeIssue."""
def testGetIssueOwner(self):
# Report all issues coming from the person who initiated the
# export.
self.assertEqual(DEFAULT_USERNAME, SINGLE_ISSUE.GetOwner())
def testGetIssueOwnerNoOwner(self):
issue_json = ISSUE_JSON.copy()
del issue_json["owner"]
issue = issues.GoogleCodeIssue(issue_json, REPO, USER_MAP)
self.assertEqual(DEFAULT_USERNAME, issue.GetOwner())
def testGetIssueUserOwner(self):
issue_json = copy.deepcopy(ISSUE_JSON)
issue_json["owner"]["name"] = "notauser@email.com"
issue = issues.GoogleCodeIssue(
issue_json, REPO, USER_MAP)
self.assertEqual(DEFAULT_USERNAME, issue.GetOwner())
def testGetCommentAuthor(self):
self.assertEqual("a_uthor", SINGLE_COMMENT.GetAuthor())
def testGetCommentDescription(self):
self.assertEqual(
"```\none\n```\n\nReported by "
"`a_uthor` on last year\n"
"- **Labels added**: added-label\n"
"- **Labels removed**: removed-label\n",
SINGLE_COMMENT.GetDescription())
# TODO(chris): Test GetCommentDescription for something with attachments.
def testGetCommentDescription_BlockingBlockedOn(self):
blocking_data = {
"content": "???",
"id": 1,
"published": "last year",
"author": {"name": "user@email.com"},
"updates": {
"blocking": ["projA:1", "projB:2", "-projB:3"],
"blockedOn": ["projA:1", "-projA:1"],
},
}
blocking_comment = issues.GoogleCodeComment(SINGLE_ISSUE, blocking_data)
self.assertEqual(
"```\n???\n```\n\nReported by `a_uthor` on last year\n"
"- **Blocking**: #1, #2\n"
"- **No longer blocking**: #3\n"
"- **Blocked on**: #1\n"
"- **No longer blocked on**: #1\n",
blocking_comment.GetDescription())
def testGetCommentDescription_BlockingBlockedOn_Issue(self):
issue_json = {
"id": 42,
"blockedOn" : [ {
"projectId" : "issue-export-test",
"issueId" : 3
} ],
"blocking" : [ {
"projectId" : "issue-export-test",
"issueId" : 2
} ],
"comments" : {
"items" : [ {
"id" : 0,
"content" : "Comment #0",
"published": "last year",
# No updates. This test verifies they get added.
}, {
"id" : 1,
"content" : "Comment #1",
"published": "last year",
"updates" : {
"blocking" : [ "issue-export-test:2" ]
},
}, {
"id" : 2,
"content" : "Comment #2",
"published": "last year",
"updates" : {
"blockedOn" : [ "-issue-export-test:1", "issue-export-test:3" ],
},
} ]
}
}
# Definitely not initialized.
issue_exporter = issues.IssueExporter(None, None, None, None, None)
issue_json = issue_exporter._FixBlockingBlockedOn(issue_json)
blocking_issue = issues.GoogleCodeIssue(issue_json, REPO, USER_MAP)
self.assertEqual(
"Originally reported on Google Code with ID 42\n"
"```\nComment #0\n```\n\nReported by `None` on last year\n"
"- **Blocked on**: #1\n", # Inferred via magic.
blocking_issue.GetDescription())
json_comments = blocking_issue.GetComments()
comment_1 = issues.GoogleCodeComment(blocking_issue, json_comments[0])
self.assertEqual(
"```\nComment #1\n```\n\nReported by `None` on last year\n"
"- **Blocking**: #2\n",
comment_1.GetDescription())
comment_2 = issues.GoogleCodeComment(blocking_issue, json_comments[1])
self.assertEqual(
"```\nComment #2\n```\n\nReported by `None` on last year\n"
"- **Blocked on**: #3\n"
"- **No longer blocked on**: #1\n",
comment_2.GetDescription())
def testMergedInto(self):
comment_data = {
"content": "???",
"id": 1,
"published": "last year",
"updates": {
"mergedInto": "10",
},
}
comment = issues.GoogleCodeComment(SINGLE_ISSUE, comment_data)
self.assertEqual(
"```\n???\n```\n\nReported by `None` on last year\n"
"- **Merged into**: #10\n",
comment.GetDescription())
def testStatus(self):
comment_data = {
"content": "???",
"id": 1,
"published": "last year",
"updates": {
"status": "Fixed",
},
}
comment = issues.GoogleCodeComment(SINGLE_ISSUE, comment_data)
self.assertEqual(
"```\n???\n```\n\nReported by `None` on last year\n"
"- **Status changed**: `Fixed`\n",
comment.GetDescription())
def testIssueIdRewriting(self):
comment_body = (
"Originally reported on Google Code with ID 42\n"
"issue 1, issue #2, and issue3\n"
"bug 4, bug #5, and bug6\n"
"other-project:7, issue other-project#8\n"
"#914\n"
"- **Blocked**: #111\n"
"- **Blocking**: #222, #333\n")
expected_comment_body = (
"Originally reported on Google Code with ID 42\n" # Not changed.
"issue 111, issue #222, and issue3\n"
"bug 4, bug #555, and bug6\n"
"other-project:7, issue other-project#8\n"
"#914\n"
"- **Blocked**: #1000\n"
"- **Blocking**: #1001, #1002\n")
id_mapping = {
"1": "111",
"2": "222",
"5": "555",
"8": "888", # NOTE: Not replaced bc proj-ref.
"42": "123", # Origin header shouldn't be replaced.
"111": "1000",
"222": "1001",
"333": "1002",
}
comment_data = {
"content": comment_body,
"id": 1,
"published": "last year",
"updates": {
"status": "Fixed",
},
}
comment = issues.GoogleCodeComment(SINGLE_ISSUE, comment_data, id_mapping)
if expected_comment_body not in comment.GetDescription():
self.fail("Expected comment body not as expected:\n%s\n\nvs.\n\n%s\n" % (
expected_comment_body, comment.GetDescription()))
def testGetHtmlCommentDescription(self):
self.assertIn("```\n1 < 2\n```", HTML_COMMENT.GetDescription())
def testTryFormatDate(self):
self.assertEqual("last year", issues.TryFormatDate("last year"))
self.assertEqual("2007-02-03 05:58:17",
issues.TryFormatDate("2007-02-03T05:58:17.000Z:"))
self.assertEqual("2014-01-05 04:43:15",
issues.TryFormatDate("2014-01-05T04:43:15.000Z"))
def testWrapText(self):
self.assertEqual(issues.WrapText("0123456789", 3),
"0123456789")
self.assertEqual(issues.WrapText("01234 56789", 3),
"01234\n56789")
self.assertEqual(issues.WrapText("a b c d e f g h", 4),
"a b c\nd e f\ng h")
def testLoadUserData(self):
# Verify the "identity dictionary" behavior.
user_data_dict = issues.LoadUserData(None, None)
self.assertEqual(user_data_dict["chrs...@goog.com"], "chrs...@goog.com")
if __name__ == "__main__":
unittest.main(buffer=True)
|
import os
import datetime
import time
import requests
import os.path
import sys
def KML_ScriptGeneration(Iterations):
i = 0
script_kml = open("generated_script.kml","w")
script_kml.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?> \n")
script_kml.write("<kml xmlns=\"http://www.opengis.net/kml/2.2\"> \n")
script_kml.write("<Folder> \n")
script_kml.write("<name>Temperature and wind informations at time "+ str(i) +" </name> \n")
script_kml.write("<description>Ground overlay on France of temps and wind streams at time : "+str(i)+" </description> \n")
while i<NbrPrevisions+1 :
script_kml.write("<GroundOverlay> \n")
script_kml.write("<TimeSpan>")
script_kml.write("<begin>2017-11-"+str(16+i)+"</begin>\n")
script_kml.write("<end>2017-11-"+str(16+i+1)+"</end>\n")
script_kml.write("</TimeSpan> \n")
script_kml.write("<name>Temperatures at time "+str(i)+"</name> \n")
script_kml.write("<Icon> \n")
script_kml.write("<href><![CDATA[../Images/MesDonnees_"+str(i)+".nc_temp.png]]></href> \n")
script_kml.write("</Icon> \n")
script_kml.write("<LatLonBox> \n")
script_kml.write("<north>55.4</north> \n")
script_kml.write("<south>37.5</south> \n")
script_kml.write("<east>16</east> \n")
script_kml.write("<west>-12</west> \n")
script_kml.write("<rotation>0</rotation> \n")
script_kml.write("</LatLonBox> \n")
script_kml.write("</GroundOverlay> \n")
script_kml.write("<GroundOverlay> \n")
script_kml.write("<TimeSpan>\n")
script_kml.write("<begin>2017-11-"+str(16+i)+"</begin>\n")
script_kml.write("<end>2017-11-"+str(16+i+1)+"</end>\n")
script_kml.write("</TimeSpan> \n")
script_kml.write("<name>Isovalues of temperatures at time "+str(i)+" </name> \n")
script_kml.write("<Icon> \n")
script_kml.write("<href><![CDATA[../Images/MesDonnees_"+str(i)+".nc_isovaleurs.png]]></href> \n")
script_kml.write("</Icon> \n")
script_kml.write("<LatLonBox> \n")
script_kml.write("<north>55.4</north> \n")
script_kml.write("<south>37.5</south> \n")
script_kml.write("<east>16</east> \n")
script_kml.write("<west>-12</west> \n")
script_kml.write("<rotation>0</rotation> \n")
script_kml.write("</LatLonBox> \n")
script_kml.write("</GroundOverlay> \n")
script_kml.write("<GroundOverlay> \n")
script_kml.write("<TimeSpan> \n")
script_kml.write("<begin>2017-11-"+str(16+i)+"</begin>\n")
script_kml.write("<end>2017-11-"+str(16+i+1)+"</end>\n")
script_kml.write("</TimeSpan> \n")
script_kml.write("<name>Wind streams at time "+str(i)+"</name> \n")
script_kml.write("<Icon> \n")
script_kml.write("<href><![CDATA[../Images/MesDonnees_"+str(i)+".nc_courants.png]]></href> \n")
script_kml.write("</Icon> \n")
script_kml.write("<LatLonBox> \n")
script_kml.write("<north>55.4</north> \n")
script_kml.write("<south>37.5</south> \n")
script_kml.write("<east>16</east> \n")
script_kml.write("<west>-12</west> \n")
script_kml.write("<rotation>0</rotation> \n")
script_kml.write("</LatLonBox> \n")
script_kml.write("</GroundOverlay> \n")
i+=1
script_kml.write("</Folder> \n")
script_kml.write("</kml> \n")
script_kml.close()
#=================================================================================#
if __name__ == "__main__":
os.system('echo ')
os.system('echo SCRIPT DE GENERATION AUTOMATIQUE DE DONNES METEOS ')
os.system('echo ')
# Lancement du script avec arguments
if (len(sys.argv) == 2 ):
os.system('echo . === Recuperation et traitement des donnees de meteo France ===')
os.system('echo ')
NbrPrevisions = int(sys.argv[1])
i=0
#Importation des donnees
while i<NbrPrevisions+1 :
os.system('echo . == Traitement de la donnee ' + str(i) + ' ==')
os.system('python RequeteAromeHD.py ' + str(i) + ' SP1')
os.system('echo ')
os.system('mv *.grib2 ../Donnees')
# Exportation au format nc
os.system('../wgrib2 ../Donnees/*.grib2 -netcdf ../Donnees/MesDonnees_' + str(i) + '.nc')
#Suppression de la donnee grib2 parasite :
os.system('rm ../Donnees/*.grib2')
os.system('echo ')
#Traitement sur Paraview
os.system('echo 1. Generation des temperatures')
os.system('pvpython temperatures.py ../Donnees/MesDonnees_'+str(i)+'.nc')
os.system('echo 2. Generation des isovaleurs')
os.system('pvpython isovaleurs.py ../Donnees/MesDonnees_'+str(i)+'.nc')
os.system('echo 3. Generation des lignes de courant')
os.system('pvpython lignes_courant.py ../Donnees/MesDonnees_'+str(i)+'.nc')
os.system('mv ../Donnees/*.png ../Images')
os.system('echo ')
i+=1
os.system('')
os.system('echo . === FIN DU SCRIPT ===')
KML_ScriptGeneration(NbrPrevisions)
os.system('mv generated_script.kml ../KML')
sys.exit(1)
#Exportation au format nc
if(len(sys.argv)==3):
NbrPrevisions = int(sys.argv[1])
os.system('echo === Recuperation de la donnee '+str(NbrPrevisions)+' de meteo France ===')
os.system('python RequeteAromeHD.py '+str(NbrPrevisions)+' SP1')
os.system('mv *.grib2 ../Donnees')
os.system('echo ')
# Exportation au format nc
os.system('echo === Exportation de la donnee au format nc ===')
os.system('echo ')
os.system('../wgrib2 ../Donnees/*.grib2 -netcdf ../Donnees/MesDonnees_'+str(NbrPrevisions)+'.nc')
os.system('echo ')
#Suppression des donnees grib2 parasites
os.system('rm ../Donnees/*.grib2')
# Traitement sur Paraview
# permet d'avoir des .png
os.system('echo ')
os.system('echo === Generation de l image dans le dossier Images ===')
os.system('echo ')
os.system('echo 1. Generation des temperatures')
os.system('pvpython temperatures.py ../Donnees/MesDonnees_'+str(NbrPrevisions)+'.nc')
os.system('echo 2. Generation des isovaleurs')
os.system('pvpython isovaleurs.py ../Donnees/MesDonnees_'+str(NbrPrevisions)+'.nc')
os.system('echo 3. Generation des lignes de courant')
os.system('pvpython lignes_courant.py ../Donnees/MesDonnees_'+str(NbrPrevisions)+'.nc')
os.system('mv ../Donnees/*.png ../Images')
os.system('echo ')
os.system('echo === Fin du script ===')
sys.exit(1)
#Lancement du script sans arguments : affichage de base
if (len(sys.argv) == 1):
#Importation des donnees
os.system('echo === Recuperation des donnees de meteo France ===')
os.system('python RequeteAromeHD.py 0 SP1')
os.system('mv *.grib2 ../Donnees')
os.system('echo ')
# Exportation au format nc
os.system('echo === Exportation des donnees au format nc ===')
os.system('echo ')
os.system('../wgrib2 ../Donnees/*.grib2 -netcdf ../Donnees/MesDonnees.nc')
os.system('echo ')
#Suppression des donnees grib2 parasites
os.system('rm ../Donnees/*.grib2')
# Traitement sur Paraview
# permet d'avoir des .png
os.system('echo ')
os.system('echo === Generation des images dans le dossier Images ===')
os.system('echo ')
os.system('echo 1. Generation des temperatures')
os.system('pvpython temperatures.py ../Donnees/MesDonnees.nc')
os.system('echo 2. Generation des isovaleurs')
os.system('pvpython isovaleurs.py ../Donnees/MesDonnees.nc')
os.system('echo 3. Generation des lignes de courant')
os.system('pvpython lignes_courant.py ../Donnees/MesDonnees.nc')
os.system('mv ../Donnees/*.png ../Images')
os.system('echo ')
os.system('echo === Fin du script ===')
sys.exit(1)
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# There are two sorted arrays nums1 and nums2 of size m and n respectively.
# Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
#
# Example 1:
# nums1 = [1, 3]
# nums2 = [2]
#
# The median is 2.0
# Example 2:
# nums1 = [1, 2]
# nums2 = [3, 4]
# The median is (2 + 3)/2 = 2.5
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
index_a, index_b = 0, 0
merged_list, index_m = [], 0
total_length = len(nums1) + len(nums2)
while index_a < len(nums1) and index_b < len(nums2):
if nums1[index_a] < nums2[index_b]:
merged_list.append(nums1[index_a])
index_a += 1
else:
merged_list.append(nums2[index_b])
index_b += 1
if index_a < len(nums1):
merged_list += nums1[index_a:]
if index_b < len(nums2):
merged_list += nums2[index_b:]
median_index = (total_length - 1) // 2
if total_length % 2 and total_length > 1:
median = merged_list[median_index]
elif total_length == 1 :
median = merged_list[0]
else:
median = (merged_list[median_index] + merged_list[median_index + 1]) / 2
return median
if __name__ == '__main__':
print(Solution().findMedianSortedArrays([1,3], [2,5]))
print(Solution().findMedianSortedArrays([], [2,3]))
# 2080 / 2080 test cases passed.
# Status: Accepted
# Runtime: 252 ms
# Accepted Solutions Runtime Distribution
#
# Sorry. We do not have enough accepted submissions to show runtime distribution chart.
# Invite friends to challenge Median of Two Sorted Arrays !
# hmmmm, it's my first time to see this!!!!! |
# DEVELOPER: https://github.com/undefinedvalue0103/nullcore-1.0/
import requests, time
from collections import namedtuple
log_func=print
access_token = None
bytes_received = 0
dict_return = True
__version__ = 'alpha 0.0.2'
def call(method, diargs={}, **kwargs):
if access_token == None:
raise TypeError('Token not specified')
args = {}
args.update(kwargs)
args.update(diargs)
args['access_token'] = access_token
args['v'] = '5.65'
try:
r = requests.get('https://api.vk.com/method/'+method, params=args)
except Exception as e:
print(e)
return None
global bytes_received
bytes_received += len(r.content)
try:
d = r.json()
if 'error' in d:
res = d['error']
log_func('VKERROR: \x23%s: %s'%(res['error_code'], res['error_msg']))
else:
res = d['response']
if dict_return: return res
else: return namedtuple('VKData', res.keys())(**res)
except Exception as e:
if dict_return: return {'error_msg': type(e).__name__, 'error_code': -1}
else: return namedtuple('VKData', ['error_msg', 'error_code'])(error_msg=type(e).__name__, error_code=-1)
def get_bytes():
return bytes_received
def login(username, password):
args = {}
args['username'] = username
args['password'] = password
args['grant_type'] = 'password'
args['client_id'] = '3697615'
args['client_secret'] = 'AlVXZFMUqyrnABp8ncuU'
try:
r = requests.get('https://oauth.vk.com/token', params=args).json()
except Exception as e:
print(e)
return None
if 'access_token' in r: return r['access_token']
else: return r
def login_from_file(path):
global access_token
f = open(path, 'r').read().split('\n')
_login = f[0]
_password = f[1]
if len(f) > 2 and len(f[2]) == 40:
_token = f[2]
__tok__ = access_token
access_token = _token
d = vk.call('users.get')
if type(d) == list and len(d) == 1 and 'first_name' in d[0]:
access_token = __tok__
return _token
ad = login(_login, _password)
if type(ad) == str and len(ad) == 40:
with open(path, 'w') as f:
f.write('\n'.join(_login, _password, ad))
return ad
return ad
def bytes_str():
bb = get_bytes()
by = bb % 1024
bb = int(bb / 1024)
kb = bb % 1024
mb = int(bb / 1024)
return '%4smb %4skb %4sb'%(mb, kb, by)
'''
import vk
vk.access_token = vk.login_from_file('../vk/auth/justm0n1ka')
d = vk.longpoll_get()
for e in d:
print(vk.longpoll_codes.get_name(e[0]))
'''
##########################
class longpoll_codes:
def __init__(self):
self.flag_modify = 1
self.flag_set = 2
self.flag_reset = 3
self.new_message = 4
self.edit_message = 5
self.read_all_input = 6
self.read_all_output = 7
self.friend_now_online = 8
self.friend_now_offline = 9
self.flag_dialog_reset = 10
self.flag_dialog_edit = 11
self.flag_dialog_set = 12
self.messages_delete_all = 13
self.messages_revieve = 14
self.chat_edited = 51
self.user_typing = 61
self.chat_typing = 62
self.counter_changed = 80
self.notify_changed = 114
def get_name(self, code):
keys = {}
for k in dir(self):
if type(self.__getattribute__(k)) == int:
keys[k] = self.__getattribute__(k)
for k, v in keys.items():
if v == code:
return k
return code
longpoll_codes = longpoll_codes()
class longpoll_messages_flags:
def __getattribute__(self, k):
if k == 'flags': return {
'unread': 1,
'outbox': 2,
'replied': 4,
'importnant': 8,
'chat': 16,
'friends': 32,
'spam': 64,
'deleted': 128,
'fixed': 256,
'media': 512,
'hidden': 65536,
'delete_for_all': 131072}
if k in ['chat', 'fixed', 'media']:
log_func('[longpoll][flags] deprecated message flag "%s"'%k)
return self.flags[k]
longpoll_messages_flags = longpoll_messages_flags()
__longpoll__ = {
'enabled': False,
'connection': {
'key': None,
'server': None,
'ts': None
}
}
def get_longpoll_server():
data = call('messages.getLongPollServer', lp_version=2)
if 'key' in data:
return (True, data)
return (False, data)
def update_longpoll_server():
global __longpoll__
longpoll = get_longpoll_server()
if not longpoll[0]:
__longpoll__['enabled'] = False
return (False, longpoll[1])
__longpoll__['enabled'] = True
__longpoll__['connection'] = longpoll[1]
print(__longpoll__['connection'])
return (True, __longpoll__['connection'])
def longpoll_get():
global __longpoll__
if not __longpoll__['enabled']:
update_longpoll_server()
data = requests.get('https://{server}?act=a_check&key={key}&ts={ts}&wait=25&mode=106&version=3'.format(**__longpoll__['connection'])).json()
if 'failed' in data:
fcode = data['failed']
if fcode == 1:
__longpoll__['connection']['ts'] = data['ts']
log_func('[longpoll] event history went out of date, retrying with new timestamp')
return longpoll_get()
elif fcode == 2:
update_longpoll_server()
log_func('[longpoll] key active period expired, updating key and retrying')
return longpoll_get()
elif fcode == 3:
log_func('[longpoll] user authorization failed')
return []
elif fcode == 4:
log_func('[longpoll] invalid version: must be {min_version} > x > {max_version}'.format(**data))
return []
return data['updates']
|
import pyproj
import random
import math
import ogr
from plotter import Plotter
from geometry import Polygon
from geometry import Polyline
from geometry import Polypoint
from geometry import Point
from da_manager import DaData
from my_utils import DaHeatmap
from my_utils import Filter
# from transit_stops import TransitStops
from data_manager import DataManager
from intersect import Intersect
from score import Score
from constants import BASE
PROJ = pyproj.Proj("+init=EPSG:32613")
DECAY = Filter()
print "import finished"
class Runner(object):
def __init__(self):
pass
def make_test_polygon(self):
"""
This test method makes a randomly placed diamond shaped polygon
"""
center_lat = 52.125
center_lng = -106.650
center_x, center_y = PROJ(center_lng, center_lat)
random_offset_x = random.randint(-2000, 2000)
random_offset_y = random.randint(-2000, 2000)
center_x += random_offset_x
center_y += random_offset_y
size = 100
poly_points = [
(-size, 0),
(0, size),
(size, 0),
(0, -size),
(-size, 0),
]
p = Polygon()
for item in poly_points:
x = center_x + item[0]
y = center_y + item[1]
print "test polygon point", x, y
p.add_point(Point(x, y))
return p
def make_test_point(self):
center_lat = 52.125
center_lng = -106.650
center_x, center_y = PROJ(center_lng, center_lat)
point = Point(center_x, center_y)
return point
def make_test_poly_star(self):
center_lat = 52.125
center_lng = -106.650
p = Polygon()
center_x, center_y = PROJ(center_lng, center_lat)
center_x += random.randint(-1000, 1000)
center_y += random.randint(-1000, 1000)
start_angle = random.randint(0, 20)
count = 0
for a in xrange(360/20):
r = math.radians(start_angle + a*20)
if count % 2:
z = 1000
else:
z = 400
x = z * math.cos(r)
y = z * math.sin(r)
x = x + center_x
y = y + center_y
count += 1
p.add_point(Point(x, y))
p.set_attribute('fill_opacity', 0.1)
return p
def test_plot_random(self):
"""
Test intersection by plotting random stars
"""
plotter = Plotter()
point = self.make_test_point()
poly = []
for i in xrange(2):
# p = self.make_test_polygon()
p = self.make_test_poly_star()
plotter.add_polygon(p)
poly.append(p)
intersection = poly[0].intersect(poly[1])
for p in intersection:
p.set_attribute("fill_opacity", 1.0)
plotter.add_polygon(p)
polypoint = Polypoint()
polypoint.add_point(point)
polypoint.set_attribute("radius", 50)
polypoint.set_attribute("fillOpacity", 1)
plotter.add_polypoint(polypoint)
print "intersects", poly[0].intersects(poly[1])
print "point intersect", poly[0].intersects(point)
print "point intersect", poly[1].intersects(point)
plotter.plot("temp/maps/test_random_intersect.html")
def test_plot_random2(self):
"""
Test intersection by plotting random stars
"""
plotter = Plotter()
poly = []
for i in xrange(3):
# p = self.make_test_polygon()
p = self.make_test_poly_star()
plotter.add_polygon(p)
poly.append(p)
# intersection = poly[0].intersect(poly[1])
# for p in intersection:
# p.add_attribute("fill_opacity", 1.0)
# plotter.add_polygon(p)
plotter.plot("temp/maps/test_random_2.html")
def test_plot_das(self):
das = DaData()
plotter = Plotter()
da_id_list = das.get_da_id_list()
for da_id in da_id_list:
polygon = das.get_polygon(da_id)
polygon.set_attribute("fill_opacity", 0.1)
plotter.add_polygon(polygon)
pop = das.get_population(da_id)
centroid = das.get_centroid(da_id)
plotter.add_marker(centroid, "%d" % da_id, "%d" % pop)
clipping = das.get_clipping_polygons()
for p in clipping:
p.set_attribute("fillColor", "#0000ff")
p.set_attribute("fillOpacity", 0.1)
plotter.add_polygon(p)
clipped = das.get_clipped_polygons()
for p in clipped:
p.set_attribute("fillColor", "#00ff00")
p.set_attribute("fillOpacity", 0.5)
plotter.add_polygon(p)
print "clipped area", p.get_area()
plotter.plot("temp/maps/da_polygons_with_markers.html")
def test_plot_da_pop_dens(self):
das = DaData()
plotter = Plotter()
da_id_list = das.get_da_id_list()
max_pop_density = 0.0
total_area = 0
total_pop = 0
for da_id in da_id_list:
polygon = das.get_polygon(da_id)
area = das.get_area(da_id)
pop = das.get_population(da_id)
total_pop += pop
total_area += area
pop_density = 1000 * 1000 * pop / area
if pop_density > max_pop_density:
max_pop_density = pop_density
for da_id in da_id_list:
polygon = das.get_polygon(da_id)
area = das.get_area(da_id)
pop = das.get_population(da_id)
pop_density = 1000 * 1000 * pop / area
opacity = pop_density / max_pop_density
print da_id, pop, area, "density", pop_density
polygon.set_attribute("fill_opacity", opacity)
plotter.add_polygon(polygon)
# pop = das.get_population(da_id)
# centroid = das.get_centroid(da_id)
# print pop, centroid
#
# plotter.add_marker(centroid, "%d" % da_id, "%d" % pop)
plotter.plot("temp/maps/da_pop_density.html")
total_area = total_area / (1000.0 * 1000.0)
print "total_pop", total_pop
print "total_area", total_area
print "total_density", total_pop/total_area
def test_plot_heatmap(self, file_name_in, file_name_out):
das = DaData()
plotter = Plotter()
heatmap = DaHeatmap()
heatmap.load_file("temp/%s" % file_name_in)
da_id_list = heatmap.get_da_id_list()
for da_id in da_id_list:
score = heatmap.get_score_normalized(da_id)
polygon = das.get_polygon(da_id)
polygon.set_attribute("fill_opacity", score)
polygon.set_attribute("fill_color", "#00ff00")
plotter.add_polygon(polygon)
plotter.plot("temp/maps/%s" % file_name_out)
def plot_heatmap_change(self):
das = DaData()
plotter = Plotter()
heatmap_june = DaHeatmap()
heatmap_july = DaHeatmap()
heatmap_june.load_file("temp/da_score_june.csv")
heatmap_july.load_file("temp/da_score_july.csv")
da_id_list = heatmap_june.get_da_id_list()
for da_id in da_id_list:
# score_june = heatmap_june.get_score_normalized(da_id)
# score_july = heatmap_july.get_score_normalized(da_id)
score_june = heatmap_june.get_score(da_id)
score_july = heatmap_july.get_score(da_id)
if score_june == 0:
change = 0
else:
change = 100.0 * (score_july - score_june) / score_june
print da_id, score_june, score_july, change
color = None
if change > 0:
color = '#0000ff'
elif change < 0:
color = '#ff0000'
if color is not None:
opacity = abs(change)/100.0
polygon = das.get_polygon(da_id)
polygon.set_attribute("fill_opacity", opacity)
polygon.set_attribute("fill_color", color)
plotter.add_polygon(polygon)
plotter.plot("temp/maps/heatmap_change_june_july.html")
def plot_stop_buffers(self):
stop = TransitStops( "../data/sts/csv/2018_05_04/")
# stop.make_square_buffers(800)
stop.make_round_buffer(400)
plotter = Plotter()
stop_ids = stop.get_ids()
for stop_id in stop_ids:
p = stop.get_buffer(stop_id)
p.set_attribute("fill_opacity", 0.05)
p.set_attribute("fill_color", "#ff0000")
plotter.add_polygon(p)
plotter.plot("temp/maps/stop_buffers.html")
def plot_test_raster(self):
das = DaData()
if True:
group1 = das.get_polygon_dict()
da_id_list = das.get_da_id_list()
else:
da_id_list = [
47110049,
47110045,
47110046,
]
group1 = {}
for da_id in da_id_list:
group1[da_id] = das.get_polygon(da_id)
# base_path = "../data/sts/csv/2018_05_04/"
base_path = BASE.JULY
# base_path = BASE.BRT
stop_mgr = TransitStops(base_path)
#------------------------------------------------------------------------------------
plotter = Plotter()
polypoint = Polypoint()
stop_mgr.make_round_buffers(400)
stops = stop_mgr.get_active_stops()
for stop in stops:
polypoint.add_point(stop.get_point())
# Add buffer
buffer = stop.get_buffer()
buffer.set_attribute("fillColor", "#0000ff")
buffer.set_attribute("fillOpacity", 0.1)
plotter.add_polygon(buffer)
polypoint.set_attribute("fillOpacity", 0.8)
polypoint.set_attribute("radius", 50)
plotter.add_polypoint(polypoint)
plotter.plot("temp/maps/stop_locations.html")
#------------------------------------------------------------------------------------
raise ValueError("TEMP STOP")
xx = stop_mgr.get_name(3432)
print xx
# raise ValueError("temp stop")
# stop.make_square_buffers(600)
stop_mgr.make_round_buffer(400)
group2 = stop_mgr.get_buffer_polygons()
intersect = Intersect(group1, group2, limit=2000)
stop_mgr.compute_demand(intersect, das)
stop_polygons = intersect.get_intersections(group=1, id=da_id_list[0])
# -------------------------------------------------------------------------------------
plotter = Plotter()
for item in stop_polygons:
p = item[0]
p.set_attribute("fillOpacity", 0.1)
p.set_attribute("fillColor", "#ff0000")
plotter.add_polygon(p)
for item in stop_polygons:
p = item[0]
centroid = p.get_centroid()
da_id = item[1]
msg = "stop_%d" % da_id
plotter.add_marker(centroid, msg, "")
da_p = das.get_polygon(da_id_list[0])
raster_size = 100
raster_points = da_p.get_raster(raster_size)
raster_polygons = []
polypoint = Polyline()
polypoint.set_attribute("radius", 10)
for point in raster_points:
# print "adding raster point", repr(point)
polypoint.add_point(point)
p = point.get_square_buffer(raster_size)
p.set_attribute("fillOpacity", 0.1)
p.set_attribute("fillColor", "#0000ff")
p.set_attribute("strokeWeight", 1)
p.set_attribute("strokeColor", "#202020")
p.set_attribute("strokeOpacity", 0.1)
plotter.add_polygon(p)
raster_polygons.append(p)
plotter.add_polypoint(polypoint)
da_p.set_attribute("strokeWeight", 2)
da_p.set_attribute("strokeColor", "#202020")
da_p.set_attribute("strokeOpacity", 1)
plotter.add_polygon(da_p)
plotter.plot("temp/maps/test_raster_%d.html" % da_id_list[0])
# ===================================================================
# Loop through all the raster polygons and compute score
# This is the number of stop polygons it touches
judge = Score(base_path, stops=stop_mgr)
score_list = []
log_score_list = []
da_ids = intersect.get_group1_ids()
# Make a data dict for all DAs in the list
data = {}
for da_id in da_ids:
da_p = das.get_polygon(da_id)
raster_points = da_p.get_raster(raster_size)
raster_polygons = []
for point in raster_points:
p = point.get_square_buffer(raster_size)
raster_polygons.append(p)
stop_polygons = intersect.get_intersections(group=1, id=da_id)
print "Got %d stop polygons %d raster polygons for DA %d" % \
(len(stop_polygons), len(raster_polygons), da_id)
keep_rasters = []
for p in raster_polygons:
# Figure out which stop polygons intersect this raster polygon
# score = judge.get_score(p, stop_polygons)
score = judge.get_score_simple(p, stop_polygons)
if score == 0: continue
score_list.append(score)
# if score > 200:
# print "CAPPING SCORE"
# score = 200
# score += 1
try:
score_log = math.log10(score)
score_log = math.sqrt(score)
except:
score_log = 0
log_score_list.append(score_log)
keep_rasters.append((p, score, score_log))
data[da_id] = {
'rasters' : keep_rasters,
'da_p' : da_p
}
# Now loop through DAs
plotter = Plotter()
score_list = sorted(score_list)
score_list.reverse()
log_score_list = sorted(log_score_list)
log_score_list.reverse()
# This caps the top score
max_score = score_list[50]
max_score_log = log_score_list[10]
# These are the DAs
for k, v in data.iteritems():
da_p = v.get('da_p')
da_p.set_attribute("fillOpacity", 0)
da_p.set_attribute("fillColor", "#ffffff")
da_p.set_attribute("strokeWeight", 2)
plotter.add_polygon(da_p)
for k, v in data.iteritems():
rasters = v.get('rasters')
da_p = v.get('da_p')
for item in rasters:
p = item[0]
score = item[1]
if score == 0: continue
if score > max_score:
color = "#ff0000"
score = max_score
else:
color = "#0000ff"
opacity = 0.9 * score / (max_score)
intersection = p.intersect(da_p)
for i_p in intersection:
i_p.set_attribute("fillOpacity", opacity)
i_p.set_attribute("fillColor", color)
i_p.set_attribute("strokeWeight", 1)
i_p.set_attribute("strokeColor", "#202020")
i_p.set_attribute("strokeOpacity", 0.1)
plotter.add_polygon(i_p)
plotter.plot("temp/maps/test_raster_score.html")
# Now loop through DAs
plotter = Plotter()
for k, v in data.iteritems():
da_p = v.get('da_p')
da_p.set_attribute("fillOpacity", 0)
da_p.set_attribute("fillColor", "#ffffff")
da_p.set_attribute("strokeWeight", 2)
plotter.add_polygon(da_p)
for k, v in data.iteritems():
rasters = v.get('rasters')
da_p = v.get('da_p')
for item in rasters:
p = item[0]
score = item[2]
if score == 0: continue
if score > max_score_log:
color = "#ff0000"
score = max_score_log
else:
color = "#0000ff"
opacity = 0.9 * score / (max_score_log)
intersection = p.intersect(da_p)
for i_p in intersection:
i_p.set_attribute("fillOpacity", opacity)
i_p.set_attribute("fillColor", color)
i_p.set_attribute("strokeWeight", 1)
i_p.set_attribute("strokeColor", "#202020")
i_p.set_attribute("strokeOpacity", 0.1)
plotter.add_polygon(i_p)
plotter.plot("temp/maps/test_raster_score_log.html")
s = sorted(score_list)
s.reverse()
f = open("scores.txt", "w")
for i, score in enumerate(s):
f.write("%d - %f\n" % (i, score))
f.close()
def plot_stop_da_intersections(self):
dataman = DataManager(BASE.JULY, link_stops=False, link_shapes=False)
dataman.make_round_buffers(400)
group1 = dataman.get_stops()
das = DaData()
group2 = das.get_das()
intersect = Intersect(group1, group2)
s_id = 3312
polygons = intersect.get_intersections_for_group1_id(s_id)
plotter = Plotter()
for item in polygons:
p = item[0]
p.set_attribute("fillOpacity", 0.1)
p.set_attribute("fillColor", "#ff0000")
plotter.add_polygon(p)
for item in polygons:
p = item[0]
centroid = p.get_centroid()
da_id = item[1]
msg = "%d" % da_id
plotter.add_marker(centroid, msg, msg)
plotter.plot("temp/maps/stop_da_intersect_%d.html" % s_id )
polygons = intersect.get_intersections_for_group2_id(47110114)
plotter = Plotter()
da = das.get_da(47110114)
p = da.get_polygon()
p.set_attribute("fillColor", "#0000ff")
p.set_attribute("strokeWeight", 2)
plotter.add_polygon(p)
for item in polygons:
p = item[0]
p.set_attribute("fillOpacity", 0.1)
p.set_attribute("fillColor", "#ff0000")
plotter.add_polygon(p)
for item in polygons:
p = item[0]
p.set_attribute("fillOpacity", 0.1)
p.set_attribute("fillColor", "#ff0000")
p.set_attribute("strokeWeight", 1)
stop_id = item[1]
msg = "stop_%d" % stop_id
centroid = p.get_centroid()
plotter.add_marker(centroid, msg, msg)
plotter.plot("temp/maps/da_stop_intersect_47110114.html")
def test_make_shapefile(self):
test_polygons = []
for i in xrange(5):
test_polygons.append(self.make_test_poly_star())
print len(test_polygons)
driver = ogr.GetDriverByName('Esri Shapefile')
ds = driver.CreateDataSource('my.shp')
layer = ds.CreateLayer('', None, ogr.wkbPolygon)
# Add one attribute
layer.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))
defn = layer.GetLayerDefn()
## If there are multiple geometries, put the "for" loop here
for i, p in enumerate(test_polygons):
# Create a new feature (attribute and geometry)
feat = ogr.Feature(defn)
feat.SetField('id', i)
# Make a geometry, from Shapely object
# geom = ogr.CreateGeometryFromWkb(poly.wkb)
geom = p.get_ogr_poly()
feat.SetGeometry(geom)
layer.CreateFeature(feat)
feat = geom = None # destroy these
# Save and close everything
ds = layer = feat = geom = None
print "Done!!"
if __name__ == "__main__":
runner = Runner()
runner.test_plot_random()
# runner.test_plot_random2()
# runner.test_plot_das()
# runner.test_plot_heatmap('da_score_june.csv', 'heatmap_june.html')
# runner.test_plot_heatmap('da_score_july.csv', 'heatmap_july.html')
# runner.plot_heatmap_change()
# runner.plot_stop_buffers()
# runner.plot_stop_da_intersections()
# runner.plot_test_raster()
# runner.test_plot_da_pop_dens()
# runner.test_point()
# runner.test_make_shapefile()
|
import numpy as np
import matplotlib as mpl
import pandas as pd
import random
import cv2
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Activation, Convolution2D, MaxPooling2D, BatchNormalization, Flatten, Dense, Dropout, GlobalAveragePooling2D, Conv2D,MaxPool2D, ZeroPadding2D, LeakyReLU
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras import backend as kr
from sklearn.model_selection import train_test_split
def plot_results(images, coordinates, image_size=96):
coordinate_sets = []
for point_set in coordinates:
x = []
y = []
for i in range(len(point_set)):
if i % 2 == 0 :
x.append(point_set[i] * image_size)
else:
y.append(point_set[i] * image_size)
coordinate_sets.append((x, y))
fig, ax = mpl.pyplot.subplots(nrows = 4, ncols = 4, sharex=True, sharey=True, figsize = (16,16))
for row in range(4):
for col in range(4):
index = random.randint(0, len(images) - 1)
image = np.reshape(images[index], (96,96))
landmark_x, landmark_y = coordinate_sets[index][0], coordinate_sets[index][1]
ax[row, col].imshow(image, cmap="gray")
ax[row, col].scatter(landmark_x, landmark_y, c = 'r')
ax[row, col].set_xticks(())
ax[row, col].set_yticks(())
ax[row, col].set_title('Index Number: %d' %index)
mpl.pyplot.show()
def load_dataset(set_size=16, folder_path='test_images/', extension_name='.png'):
image_set = []
for i in range(set_size):
file_path = folder_path + str(i) + extension_name
print(file_path)
img = np.float32(cv2.imread(file_path))
g_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rr_img = np.expand_dims(cv2.resize(g_img, (96,96), interpolation=cv2.INTER_CUBIC), axis=-1)
image_set.append(rr_img)
return np.array(image_set)
def process_frame(image):
image_set = []
img = np.float32(image)
g_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rr_img = np.expand_dims(cv2.resize(g_img, (96,96), interpolation=cv2.INTER_CUBIC), axis=-1)
image_set.append(rr_img)
return np.array(image_set)
def sourced_cnn():
#Inspired from the paper by Shutong Zhang, Chenyue Meng of Stanford University
#http://cs231n.stanford.edu/reports/2016/pdfs/007_Report.pdf
model = Sequential()
model.add(Convolution2D(64, (3,3), padding='valid', use_bias=False, input_shape=(96,96,1)))
model.add(LeakyReLU(alpha = 0.2))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(128, (2,2), padding='valid', use_bias=False))
model.add(LeakyReLU(alpha = 0.2))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(256, (2,2), padding='valid', use_bias=False))
model.add(LeakyReLU(alpha = 0.2))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500,activation='relu', kernel_regularizer=regularizers.l2(0.002)))
model.add(Dense(500,activation='relu', kernel_regularizer=regularizers.l2(0.002)))
model.add(Dense(8, activation='sigmoid'))
model.compile(optimizer='SGD',
loss='mean_squared_error',
metrics=['mae', 'accuracy'])
return model
def homographical_augment(image_set, truth_set):
augmented_images = []
augmented_truths = []
flipped_images = []
flipped_truths = []
#unpack coordinate sets for truth_set
coordinate_sets = []
for point_set in truth_set:
x = []
y = []
for i in range(len(point_set)):
if i % 2 == 0 :
x.append(np.uint32(point_set[i] * 96))
else:
y.append(np.uint32(point_set[i] * 96))
coordinate_sets.append((x, y))
#Create Homography_matrices for sample training.
h_matrices = []
pts_src_h1 = np.array([[0,0], [0, 96], [96, 0], [96,96]])
pts_dst_h1 = np.array([[0,0], [0, 96], [80, 16], [80, 80]])
h1, status = cv2.findHomography(pts_src_h1, pts_dst_h1)
h_matrices.append(h1)
pts_src_h2 = np.array([[0,0], [0, 96], [96, 0], [96,96]])
pts_dst_h2 = np.array([[0,0], [0, 96], [72, 24], [72, 72]])
h2, status = cv2.findHomography(pts_src_h2, pts_dst_h2)
h_matrices.append(h2)
pts_src_h3 = np.array([[0,0], [0, 96], [96, 0], [96,96]])
pts_dst_h3 = np.array([[0,0], [0, 96], [66, 30], [66, 66]])
h3, status = cv2.findHomography(pts_src_h3, pts_dst_h3)
h_matrices.append(h3)
pts_src_h4 = np.array([[0,0], [0, 96], [96, 0], [96,96]])
pts_dst_h4 = np.array([[0,0], [0, 96], [60, 16], [60, 80]])
h4, status = cv2.findHomography(pts_src_h4, pts_dst_h4)
h_matrices.append(h4)
pts_src_h5 = np.array([[0,0], [0, 96], [96, 0], [96,96]])
pts_dst_h5 = np.array([[16,16], [16, 80], [96, 0], [96, 96]])
h5, status = cv2.findHomography(pts_src_h5, pts_dst_h5)
h_matrices.append(h5)
pts_src_h6 = np.array([[0,0], [0, 96], [96, 0], [96,96]])
pts_dst_h6 = np.array([[24,24], [24, 72], [96, 0], [96, 96]])
h6, status = cv2.findHomography(pts_src_h6, pts_dst_h6)
h_matrices.append(h6)
pts_src_h7 = np.array([[0,0], [0, 96], [96, 0], [96,96]])
pts_dst_h7 = np.array([[30,30], [30, 66], [96, 0], [96, 96]])
h7, status = cv2.findHomography(pts_src_h7, pts_dst_h7)
h_matrices.append(h7)
pts_src_h8 = np.array([[0,0], [0, 96], [96, 0], [96,96]])
pts_dst_h8 = np.array([[36,16], [36, 80], [96, 0], [96, 96]])
h8, status = cv2.findHomography(pts_src_h8, pts_dst_h8)
h_matrices.append(h8)
augmented_coordinates = []
flipped_coordinates = []
for i in range(len(image_set)):
#Calculate Homography Matrix
h = random.choice(h_matrices)
#Augment Image
reshaped_img = np.reshape(image_set[i], (96,96))
augmented_image = cv2.warpPerspective(reshaped_img, h, (96,96))
#Create Truth Matrix and populate
t_matrix = np.zeros(np.shape(image_set[i]))
coordinate_set = coordinate_sets[i]
for j in range(len(coordinate_set[0])):
x_pos = coordinate_set[0][j]
y_pos = coordinate_set[1][j]
t_matrix[y_pos][x_pos] = 1
augmented_t_matrix = cv2.warpPerspective(t_matrix, h, (96,96))
#Local Non-maxima suppression
for p in range(96):
for k in range(96):
value = augmented_t_matrix[p][k]
if(value != 0 and p > 2 and k > 2):
matrix_slice = augmented_t_matrix[p-2:p+2, k-2:k+2]
lnms_matrix = local_non_max_suppression(matrix_slice)
augmented_t_matrix[p-2:p+2, k-2:k+2] = lnms_matrix
#Create Truths for Homography
augmented_x = []
augmented_y = []
for p in range(96):
for k in range(96):
value = augmented_t_matrix[p][k]
if(value != 0):
augmented_x.append(k / 96)
augmented_y.append(p / 96)
if(len(augmented_x) == 4):
augmented_coordinates.append((augmented_x, augmented_y))
augmented_images.append(np.expand_dims(augmented_image, axis=-1))
#Flip augmentation
flip = random.choice([0,1,-1])
flipped_image = cv2.flip(reshaped_img, flip)
flipped_t_matrix = cv2.flip(t_matrix, flip)
flipped_x = []
flipped_y = []
for u in range(96):
for v in range(96):
value = flipped_t_matrix[u][v]
if(value != 0):
flipped_x.append(v / 96)
flipped_y.append(u / 96)
if(len(flipped_x) == 4):
flipped_coordinates.append((flipped_x, flipped_y))
flipped_images.append(np.expand_dims(flipped_image, axis=-1))
# Repack coordinates into truth set
augmented_truths = repack_truths(augmented_coordinates)
flipped_truths = repack_truths(flipped_coordinates)
augmented_img_set = np.concatenate((augmented_images, flipped_images), axis=0)
augmented_truth_set = np.concatenate((augmented_truths, flipped_truths), axis=0)
return augmented_img_set, augmented_truth_set
def local_non_max_suppression(matrix):
lnms_matrix = matrix.copy()
x_max, y_max, curr_max = 0, 0, -1
for y in range(np.size(lnms_matrix, 0)):
for x in range(np.size(lnms_matrix, 1)):
if lnms_matrix[y][x] > curr_max:
if curr_max > -1:
lnms_matrix[y_max][x_max] = 0
curr_max = lnms_matrix[y][x]
x_max = x
y_max = y
else:
lnms_matrix[y][x] = 0
lnms_matrix[y_max][x_max] = 1
return lnms_matrix
def repack_truths(coordinates_set):
truth_set = []
for coordinate_set in coordinates_set:
truth = np.zeros((8,))
truth[0] = coordinate_set[0][0]
truth[1] = coordinate_set[1][0]
truth[2] = coordinate_set[0][1]
truth[3] = coordinate_set[1][1]
truth[4] = coordinate_set[0][2]
truth[5] = coordinate_set[1][2]
truth[6] = coordinate_set[0][3]
truth[7] = coordinate_set[1][3]
truth_set.append(truth)
return truth_set
def process_dataset(face_images, d_pts):
non_zero_selection = np.nonzero(d_pts.left_eye_center_x.notna() &
d_pts.right_eye_center_x.notna() &
d_pts.nose_tip_x.notna() &
d_pts.mouth_center_bottom_lip_x.notna())[0]
#image_size 96x96
image_size = face_images.shape[1]
set_size = non_zero_selection.shape[0]
#Creating dataset and truthsets
data_set = np.zeros((set_size, image_size, image_size, 1))
truth_set = np.zeros((set_size, 8))
true_index = 0
for index in non_zero_selection:
#Upscale 96x96 image to 192x192
resized_img = cv2.resize(face_images[index], (image_size, image_size), interpolation=cv2.INTER_LINEAR)
data_set[true_index] = np.expand_dims(resized_img, axis=-1)
# One-hot encoded pixel vectors scaled to 0-1, after rescaling to 192x192
truth_set[true_index][0] = (d_pts.left_eye_center_x[index]) / image_size
truth_set[true_index][1] = (d_pts.left_eye_center_y[index]) / image_size
truth_set[true_index][2] = (d_pts.right_eye_center_x[index]) / image_size
truth_set[true_index][3] = (d_pts.right_eye_center_y[index]) / image_size
truth_set[true_index][4] = (d_pts.nose_tip_x[index]) / image_size
truth_set[true_index][5] = (d_pts.nose_tip_y[index]) / image_size
truth_set[true_index][6] = (d_pts.mouth_center_bottom_lip_x[index]) / image_size
truth_set[true_index][7] = (d_pts.mouth_center_bottom_lip_y[index]) / image_size
true_index += 1
return data_set, truth_set
def scale_coordinates(coordinate_set, starting_size, scaled_size):
coordinates = []
x_factor = scaled_size[0] / starting_size[0]
y_factor = scaled_size[1] / starting_size[1]
for i in range(len(coordinate_set)):
if i % 2 == 0:
coordinates.append(coordinate_set[i] * x_factor)
else:
coordinates.append(coordinate_set[i] * y_factor)
return coordinates
def draw_nose(image, coordinate_set, nose_path="SquidNose.png", rgb=False):
raw_nose = cv2.imread(nose_path)
gray_nose = cv2.cvtColor(raw_nose, cv2.COLOR_BGR2GRAY)
#nose_pts = np.array(np.float32([[0,0], [568,0], [287,313], [275,681]]))
nose_pts = np.array(np.float32([[0,0], [568,0], [0,681], [568,681]]))
#Coordinates[0] = Left Eye
#Coordinates[1] = Right Eye
#Coordinates[2] = Nose
#Coordinates[3] = Mouth
coordinates = []
coordinate = []
for i in range(len(coordinate_set)):
if i % 2 == 0:
coordinate.append(coordinate_set[i] * 96)
else:
coordinate.append(coordinate_set[i] * 96)
coordinates.append(coordinate.copy())
coordinate = []
coordinates = np.array(np.float32(coordinates))
#Tl, TR, BL, BR
nose_box_lx = coordinates[0][0]
nose_box_rx = coordinates[1][0]
nose_box_ty = coordinates[0][1]
nose_box_by = coordinates[3][1]
#Rectangle BB around Nose
bb_coordinates = np.array([
[nose_box_lx, nose_box_ty],
[nose_box_rx, nose_box_ty],
[nose_box_lx, nose_box_by],
[nose_box_rx, nose_box_by]
])
h, res = cv2.findHomography(nose_pts, bb_coordinates)
if rgb == False:
warped_nose = cv2.warpPerspective(gray_nose, h, (np.size(image,0), np.size(image,1)))
augmented_image = image.copy()
for y in range(np.size(image, 0) - 15):
for x in range(np.size(image,1) - 15):
nose_pixel = warped_nose[y][x]
if nose_pixel != 0 and nose_pixel != 255:
augmented_image[y][x] = nose_pixel
else:
nose_b, nose_g, nose_r = cv2.split(raw_nose)
img_b, img_g, img_r = cv2.split(image)
warped_nose_b = cv2.warpPerspective(nose_b, h, (np.size(image,0), np.size(image,1)))
warped_nose_g = cv2.warpPerspective(nose_g, h, (np.size(image,0), np.size(image,1)))
warped_nose_r = cv2.warpPerspective(nose_r, h, (np.size(image,0), np.size(image,1)))
for y in range(np.size(image, 0)):
for x in range(np.size(image,1)):
nose_pixel_b = warped_nose_b[y][x]
nose_pixel_g = warped_nose_g[y][x]
nose_pixel_r = warped_nose_r[y][x]
if nose_pixel_b > 20 and nose_pixel_b < 245:
img_b[y][x] = nose_pixel_b
img_g[y][x] = nose_pixel_g
img_r[y][x] = nose_pixel_r
augmented_image = cv2.merge((img_b, img_g, img_r))
return augmented_image
if __name__ == "__main__":
# Built with the Kaggle facial landmark detection competition dataset:
# https://www.kaggle.com/drgilermo/face-images-with-marked-landmark-points
# Load Dataset and CSV
face_images = np.moveaxis(np.load('face_images.npz')['face_images'],-1,0)
d_pts = pd.read_csv('facial_keypoints.csv')
#Process Data_set
data_set, truth_set = process_dataset(face_images, d_pts)
#Get augmented data
a_t, a_tr = homographical_augment(data_set, truth_set)
#Append augmented data
x_set = np.concatenate((data_set, a_t), axis=0)
y_set = np.concatenate((truth_set, a_tr), axis=0)
#Split into training and test sets and plot results
x_train, x_test, y_train, y_test = train_test_split(x_set, y_set, test_size=0.2, random_state=10)
plot_results(x_train, y_train)
#Checkpoints
log_dir = "logs/"
tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=1, profile_batch=100000)
checkpoint = ModelCheckpoint('ghetto_net_weightsv3.h5', monitor='val_accuracy', save_best_only=True, save_weights_only=True, verbose=1)
model = sourced_cnn()
#model.load_weights('ghetto_net_weights_use.h5')
model.fit(x_train, y_train, batch_size=16, epochs=100, callbacks=[checkpoint, tensorboard], validation_split=0.14, verbose=1, shuffle=True)
#Get Test Results and Plot
predictions = model.predict(x_test)
plot_results(x_test, predictions)
#Testing Frame
raw_image = cv2.imread('test_images/15.png')
testing_frame = process_frame(raw_image)
preds = model.predict(testing_frame)
img_upscaled_dpts = scale_coordinates(preds[0], (96,96), (np.size(raw_image,0),np.size(raw_image,1)))
nose = draw_nose(raw_image, img_upscaled_dpts, rgb=True)
nose_rgb = cv2.cvtColor(nose, cv2.COLOR_BGR2RGB)
mpl.pyplot.imshow(nose_rgb)
#
# |
# This script allows generation the toplogy file
# for LAMMPS simulations of MesM-P system with explicit I-BARs and actin filaments
# Either flat membrane or vesicular systems can be created
# Author: Aram Davtyan
import sys
import random as rn
import numpy as np
from data_stuctures import *
from add_straight_filaments import *
from add_complex_proteins import *
from add_solvent_tools import *
SMALL = 0.000001
MT_VESICLE = 0
MT_FLAT_SHEET = 1
MT_CYLINDER = 2
# Main switch keys
mem_topology = MT_VESICLE # Topology of the system
#mem_topology = MT_CYLINDER # Topology of the system
#mem_topology = MT_FLAT_SHEET # Topology of the system
b_solvent = False # Include solvent or not
two_ty_sol = False # Use two type of solvents, where one cannot have protein concetration
b_ibar = True # Include I-BAR or not
b_filaments = False # Include actin filaments or not
b_proteins = False # Include complex proteins
data_file = "data.em2" # data file name
sqr3half = 0.866025 # sqrt(3)/2
# Box dimentions
Lx = Ly = 3600.0
Lz = 3600.0
L = [Lx, Ly, Lz]
box = [[0, Lx], [0, Ly], [0, Lz]]
# Masses
mem_mass = 1 # mass of the membrane particle
sol_mass = 1 # mass of the solvent particle
ibar_mass = 1 # mass of the ibar particle
actin_mass = 1 # mass of the actin filament particle
prot_mass = 1 # mass of the protein particle
# Solvent parameters
srad = 39.4464 # The size of the solvent particles
srad_min = 35.0 # Minimal distance between two solvent particles
# Standard value for mrad: 76.78
# Standard value for srad_min: 70.0
# Defining parameters for flat and vesicular systems
if mem_topology==MT_VESICLE:
n_mem = 5882 # Number of mebrane particles
mrad = 73.9542 # The size of the membrane particles along the surface
xM0 = Lx/2.0 # Center of the vesicle
yM0 = Ly/2.0
zM0 = Lz/2.0
# The number of solvent particles will be calculated automatically
# Standard value for mrad: 73.9542
elif mem_topology==MT_FLAT_SHEET:
mrad = 35.0 # The size of the membrane particles
mradsq = mrad*mrad;
zM0 = Lz/2.0 # z0 coordinte for membrane plane
# The number of membrane and solvent particles will be calculated automatically
# Standard value for mrad: 76.78
elif mem_topology==MT_CYLINDER:
mrad = 35.0 # The size of the membrane particles
mradsq = mrad*mrad;
xM0 = Lx/2.0 # x0 coordinte for cylinder axis
yM0 = Ly/2.0 # y0 coordinte for cylinder axis
zM0 = Lz/2.0 # Not needed in this case, defined here for generality
D0 = 1000.0 # Diameter of the cylinder
# The number of membrane and solvent particles will be calculated automatically
# Standard value for mrad: 76.78
else:
print "Error. Unknown membrane topology\n"
sys.exit()
# I-BAR parameters
Ni = 500 # Number of I-BARs to add
Nib = 5 # Beads per I-BAR
Rib = 30 # Seperation between I-BAR beads
di_min = 1.5*Rib # Minimum seperation between I-BAR beads
zi0 = zM0 - 2.0*Rib # The plane where I-BARs will be initiated
# Actin filament parameters
Nf = 10 # Number of actin filaments to add
Nfb = 30 # Beads per actin filament
Rfb = 100 # Seperation between actin beads
df_min = 1.5*Rfb # Minimum seperation between actin beads
zfh = zM0 - Rfb/2.0 # Maximum Z coordinates for actin filaments
zfl = 0 # Minimum Z coordinates for actin filaments
# Complex protein parameters
Np = 5 # Number of proteins
dp_min = mrad # Minimum seperation between proteins
zp0 = zM0 - 2.0*Rib # The plane where proteins will be initiated
xyz_file = "prot.xyz"
par_file = "prot.par"
# Creating data stuctura
data = Data(DATA_TY_EM2, box)
# Generating membrane coordinates
data.n_atom_types += 1
data.n_mol += 1
data.masses.append(mem_mass)
mem_type = data.n_atom_types
if mem_topology==MT_VESICLE:
print "Generating a membrane vesicle ..."
# Finding vesicle radius based on m_mem and diameter of particles
R = np.sqrt(n_mem * mrad*mrad / (4.0*np.pi))
print "R: ", R
if R>0.5*Lx or R>0.5*Ly or R>0.5*Lz:
print "Warrning: R is smaller than one of the dimentions!\n"
# Generate equially spaced vesicle using Fibonacci lattice method
golden_angle = np.pi * (3.0 - np.sqrt(5.0))
theta = golden_angle * np.arange(n_mem)
zu = np.linspace(1.0 - 1.0 / n_mem, 1.0 / n_mem - 1.0, n_mem)
ru = np.sqrt(1.0 - zu * zu)
xu = ru * np.cos(theta)
yu = ru * np.sin(theta)
for i in range(n_mem):
# Finding coordinates
x = R * xu[i] + xM0
y = R * yu[i] + yM0
z = R * zu[i] + zM0
# Addition new atom
data.n_atoms += 1
ind = data.n_atoms
mol = data.n_mol
ty = data.n_atom_types
data.atoms.append(Atom(ind, mol, ty, x, y, z))
# Finding quaternion
z1 = np.sqrt(0.5*(1.0 + zu[i]))
xy2 = xu[i]**2 + yu[i]**2
qw = z1
qx = yu[i]*z1*(zu[i] - 1.0)/xy2
qy = -xu[i]*z1*(zu[i] - 1.0)/xy2
qz = 0.0
data.atoms[-1].quat = [qw, qx, qy, qz]
# Set membrane and protein composition
phi_m = rn.uniform(-1.0, 1.0)
phi_b = 0.0
data.atoms[-1].phi = [phi_m, phi_b]
elif mem_topology==MT_FLAT_SHEET:
# First generate a flat sheet membrane
print "Generating a flat membrane ..."
# Find the lattice dimentions
nx = int(round(Lx/mrad))
ny = int(round(Ly/(sqr3half*mrad)))
n_mem = nx*ny
print "nx: %d ny: %d" % (nx, ny)
print "n_mem:", n_mem
print
# Place the membrane particles onto the lattice
for i in range(ny):
for j in range(nx):
x0 = 0.0
y0 = 0.0
if i%2==1: x0 += 0.5*mrad
x = x0 + j*mrad
y = y0 + i*sqr3half*mrad
z = zM0
# Addition new atom
data.n_atoms += 1
ind = data.n_atoms
mol = data.n_mol
ty = data.n_atom_types
data.atoms.append(Atom(ind, mol, ty, x, y, z))
# Set quaternion
data.atoms[-1].quat = [1.0, 0.0, 0.0, 0.0]
# Set membrane and protein composition
phi_m = rn.uniform(-1.0, 1.0)
phi_b = 0.0
data.atoms[-1].phi = [phi_m, phi_b]
elif mem_topology==MT_CYLINDER:
# First generate a cylindrical membrane
print "Generating a cylindrical membrane ..."
# Find the lattice dimentions
nx = int(round(np.pi*D0/mrad))
ny = int(round(Lz/(sqr3half*mrad)))
alpha0 = 2.0*np.pi/nx
R = 0.5*D0
n_mem = nx*ny
print "nxy: %d nz: %d" % (nx, ny)
print "n_mem:", n_mem
print
# Place the membrane particles onto the lattice
for i in range(ny):
for j in range(nx):
alpha = j*alpha0
if i%2==1: alpha += 0.5*alpha0
x = R*np.cos(alpha) + xM0
y = R*np.sin(alpha) + yM0
z = i*sqr3half*mrad
# Addition new atom
data.n_atoms += 1
ind = data.n_atoms
mol = data.n_mol
ty = data.n_atom_types
data.atoms.append(Atom(ind, mol, ty, x, y, z))
# Set quaternion
if fabs(alpha-np.pi)>SMALL:
qw = 0.5*np.sqrt(1.0 + np.cos(alpha))
qx = -qw*np.tan(0.5*alpha)
qy = qw
qz = -qx
else:
qw = qy = 0.0
qx = 1.0/np.sqrt(2.0)
qz = -1.0/np.sqrt(2.0)
data.atoms[-1].quat = [qw, qx, qy, qz]
# Set membrane and protein composition
phi_m = rn.uniform(-1.0, 1.0)
phi_b = 0.0
data.atoms[-1].phi = [phi_m, phi_b]
else:
print "Error. Unknown membrane topology\n"
sys.exit()
# Zero average lipid composition and protein concentration on the membrane
# First calculate the average values
sum_phi_m = 0.0
sum_phi_b = 0.0
n_phi = 0.0
for i in range(data.n_atoms):
if data.atoms[i].ty==mem_type:
sum_phi_m += data.atoms[i].phi[0]
sum_phi_b += data.atoms[i].phi[1]
n_phi += 1.0
if n_phi!=0:
sum_phi_m /= n_phi
sum_phi_b /= n_phi
# Substract the average value
for i in range(data.n_atoms):
if data.atoms[i].ty==mem_type:
data.atoms[i].phi[0] -= sum_phi_m
data.atoms[i].phi[1] -= sum_phi_b
# Calculate the sum again and print them
sum_phi_m = 0.0
sum_phi_b = 0.0
for i in range(data.n_atoms):
if data.atoms[i].ty==mem_type:
sum_phi_m += data.atoms[i].phi[0]
sum_phi_b += data.atoms[i].phi[1]
print "Total membrane phi_m and phi_b:", sum_phi_m, sum_phi_b
print
# Generate I-BAR coordinates
if b_ibar:
print "Adding I-BAR proteins ..."
ibar_fil = Filament(Nib, Rib, di_min, True, True)
ibar_fil.set_type_map_sym()
if mem_topology==MT_VESICLE:
# Example of placing proteins outside a vesicle
# The proteins alignmnet parallel to XY plane is used here
# The following alignments may be used:
# ALIGN_NONE, ALIGN_X, ALIGN_Y, ALIGN_Z, ALIGN_XY, ALIGN_XZ, ALIGN_YZ
xyz_max = [box[0], box[1], box[2]]
gen_straight_filaments(ibar_fil, Ni, data, xyz_max, twoD=TWOD_FLAG_SP, top_data=[xM0, yM0, zM0, R+1.5*Rib], align=ALIGN_XY)
elif mem_topology==MT_CYLINDER:
# Example of placing proteins inside a cylinder
# The cylinder is assumed to be oriented along Z axis
# The proteins alignmnet parallel to Z axis is used here
# The following alignments may be used:
# ALIGN_NONE, ALIGN_Z, ALIGN_XY
xyz_max = [box[0], box[1], box[2]]
gen_straight_filaments(ibar_fil, Ni, data, xyz_max, twoD=TWOD_FLAG_CL, top_data=[xM0, yM0, R-1.5*Rib], align=ALIGN_Z)
elif mem_topology==MT_FLAT_SHEET:
# Example of placing proteins on a flat membrane
# The membrane is assumed to be oriented in XY plane
# The proteins are placed "below" the membrane on Z=zi0 plane
# and are randonly oriented in XY plane
# The following alignments may be used for twoD=TWOD_FLAG_XY:
# ALIGN_NONE, ALIGN_X, ALIGN_Y, ALIGN_XY - equivalent to ALIGN_NONE
xyz_max = [box[0], box[1], [zi0, zi0]]
gen_straight_filaments(ibar_fil, Ni, data, xyz_max, twoD=TWOD_FLAG_XY)
else:
# Example of placing proteins "below" the membrane
# The proteins are aligned parallel to XY plane
# The following alignments may be used:
# ALIGN_NONE, ALIGN_X, ALIGN_Y, ALIGN_Z, ALIGN_XY, ALIGN_XZ, ALIGN_YZ
xyz_max = [box[0], box[1], [box[2][0], zi0]]
gen_straight_filaments(ibar_fil, Ni, data, xyz_max, periodic=[True, True, False], align=ALIGN_XY)
nty = ibar_fil.get_ntypes()
for i in range(nty):
data.masses.append(ibar_mass)
# Generate actin filaments
if b_filaments:
print "Adding actin filaments ..."
actin_fil = Filament(Nfb, Rfb, df_min, True, True)
xyz_max = [box[0], box[1], [zfl, zfh]]
prd = [True, True, False]
# Reset index maps because filaments are depositied in a different domian from membrane and I-BAR
data.reset_smaps()
th_max = np.pi/6.0
gen_straight_filaments(actin_fil, Nf, data, xyz_max, prd, theta_max=th_max)
data.masses.append(actin_mass)
# Place complex proteins
if b_proteins:
print "Adding complex proteins ..."
vec1 = [-10.583, 26.972, 109.369] # Main axis of the protein
vec2 = [-1.431, 13.432, -8.767] # Secondary axis of the protein
# Create protein object where vec1 is aligned with X, and plane defined by vec1 and vec2 parelle to XY
prot = Protein(dp_min, xyz_file, par_file, vec1, vec2, align_direction=PROT_AL_X)
xyz_max = [box[0], box[1], [zp0, zp0]]
add_proteins(prot, Np, data, xyz_max)
for i in range(prot.N_atom):
data.masses.append(prot_mass)
# Generating solvent coordinates
if b_solvent:
print "Adding solvent ..."
# adding type(s) and mass(es) for solvent
first_sol_type = data.n_atom_types + 1
if not two_ty_sol:
data.n_atom_types += 1
data.masses.append(sol_mass)
else:
data.n_atom_types += 2
data.masses.append(sol_mass)
data.masses.append(sol_mass)
n_sol = int(round((Lx/srad)*(Ly/srad)*(Lz/srad) - float(n_mem)))
print "n_sol:", n_sol
# Randomly distribute solvent particles in the box
# For flat sheet reduce the size of the box in Z direction by the radius of one particle
if mem_topology==MT_FLAT_SHEET:
box_sol = [[0, Lx], [0, Ly], [0, Lz-mrad]]
else:
box_sol = [[0, Lx], [0, Ly], [0, Lz]]
sol_xyz = add_solvent(n_sol, box_sol, srad_min, 3)
# Generate solvent particle type and phi_b
types = []
phi_b = []
sum_phi_b = 0.0
n_phi_b = 0.0
for ix in sol_xyz:
# Shifting Z coordinate in case of flat sheet
if mem_topology==MT_FLAT_SHEET:
ix[2] += zM0 + 0.5*mrad
if ix[2]>Lz: ix[2] -= Lz
# Assign a correct type to the solvent bead
ty = first_sol_type
if two_ty_sol:
if mem_topology==MT_FLAT_SHEET and ix[2]<zM0:
ty = first_sol_type + 1
elif mem_topology==MT_VESICLE and (ix[0]-xM0)**2 + (ix[1]-yM0)**2 + (ix[2]-zM0)**2<R**2:
ty = first_sol_type + 1
elif mem_topology==MT_CYLINDER and (ix[0]-xM0)**2 + (ix[1]-yM0)**2<R**2:
ty = first_sol_type + 1
# Assigning phi_b and summing over it
if ty==first_sol_type:
phi_b_one = rn.uniform(-1.0, 1.0)
sum_phi_b += phi_b_one
n_phi_b += 1.0
types.append(ty)
phi_b.append(phi_b_one)
if n_phi_b!=0.0: sum_phi_b /= n_phi_b
# Add solvent to the membrane system
for i in range(len(sol_xyz)):
ix = sol_xyz[i]
data.n_atoms += 1
ind = data.n_atoms
ty = types[i]
mol = 0
phi_m_i = 0.0
phi_b_i = phi_b[i]
if ty==first_sol_type: phi_b_i -= sum_phi_b
# Adding new atom
data.atoms.append(Atom(ind, mol, ty, ix[0], ix[1], ix[2]))
data.atoms[-1].quat = [1.0, 0.0, 0.0, 0.0]
data.atoms[-1].phi = [phi_m_i, phi_b_i]
# write data file
out = open(data_file, 'w')
data.write_data_file(out)
out.close()
|
import argparse
from functools import partial
import torch
from presets import StereoMatchingEvalPreset, StereoMatchingTrainPreset
from torchvision.datasets import (
CarlaStereo,
CREStereo,
ETH3DStereo,
FallingThingsStereo,
InStereo2k,
Kitti2012Stereo,
Kitti2015Stereo,
Middlebury2014Stereo,
SceneFlowStereo,
SintelStereo,
)
VALID_DATASETS = {
"crestereo": partial(CREStereo),
"carla-highres": partial(CarlaStereo),
"instereo2k": partial(InStereo2k),
"sintel": partial(SintelStereo),
"sceneflow-monkaa": partial(SceneFlowStereo, variant="Monkaa", pass_name="both"),
"sceneflow-flyingthings": partial(SceneFlowStereo, variant="FlyingThings3D", pass_name="both"),
"sceneflow-driving": partial(SceneFlowStereo, variant="Driving", pass_name="both"),
"fallingthings": partial(FallingThingsStereo, variant="both"),
"eth3d-train": partial(ETH3DStereo, split="train"),
"eth3d-test": partial(ETH3DStereo, split="test"),
"kitti2015-train": partial(Kitti2015Stereo, split="train"),
"kitti2015-test": partial(Kitti2015Stereo, split="test"),
"kitti2012-train": partial(Kitti2012Stereo, split="train"),
"kitti2012-test": partial(Kitti2012Stereo, split="train"),
"middlebury2014-other": partial(
Middlebury2014Stereo, split="additional", use_ambient_view=True, calibration="both"
),
"middlebury2014-train": partial(Middlebury2014Stereo, split="train", calibration="perfect"),
"middlebury2014-test": partial(Middlebury2014Stereo, split="test", calibration=None),
"middlebury2014-train-ambient": partial(
Middlebury2014Stereo, split="train", use_ambient_views=True, calibrartion="perfect"
),
}
def make_train_transform(args: argparse.Namespace) -> torch.nn.Module:
return StereoMatchingTrainPreset(
resize_size=args.resize_size,
crop_size=args.crop_size,
rescale_prob=args.rescale_prob,
scaling_type=args.scaling_type,
scale_range=args.scale_range,
scale_interpolation_type=args.interpolation_strategy,
use_grayscale=args.use_grayscale,
mean=args.norm_mean,
std=args.norm_std,
horizontal_flip_prob=args.flip_prob,
gpu_transforms=args.gpu_transforms,
max_disparity=args.max_disparity,
spatial_shift_prob=args.spatial_shift_prob,
spatial_shift_max_angle=args.spatial_shift_max_angle,
spatial_shift_max_displacement=args.spatial_shift_max_displacement,
spatial_shift_interpolation_type=args.interpolation_strategy,
gamma_range=args.gamma_range,
brightness=args.brightness_range,
contrast=args.contrast_range,
saturation=args.saturation_range,
hue=args.hue_range,
asymmetric_jitter_prob=args.asymmetric_jitter_prob,
)
def make_eval_transform(args: argparse.Namespace) -> torch.nn.Module:
if args.eval_size is None:
resize_size = args.crop_size
else:
resize_size = args.eval_size
return StereoMatchingEvalPreset(
mean=args.norm_mean,
std=args.norm_std,
use_grayscale=args.use_grayscale,
resize_size=resize_size,
interpolation_type=args.interpolation_strategy,
)
def make_dataset(dataset_name: str, dataset_root: str, transforms: torch.nn.Module) -> torch.utils.data.Dataset:
return VALID_DATASETS[dataset_name](root=dataset_root, transforms=transforms)
|
from django.urls import path
from .views import (
index,
new_search,
)
app_name = 'home'
urlpatterns = [
path('', index, name=''),
path('new_search', new_search, name='new_search')
]
|
# Python program to illustrate the concept
# of threading
# importing the threading module
import threading
def print_cube(num):
"""
function to print cube of given num
"""
print("Cube: {}".format(num * num * num))
def print_square(num):
"""
function to print square of given num
"""
print("Square: {}".format(num * num))
if __name__ == "__main__":
# creating thread
t1 = threading.Thread(target=print_square, args=(10,))
t2 = threading.Thread(target=print_cube, args=(10,))
# starting thread 1
t1.start()
# starting thread 2
t2.start()
# wait until thread 1 is completely executed
t1.join()
# wait until thread 2 is completely executed
t2.join()
# both threads completely executed
print("Done!")
|
from math import pi
def volume(r, h):
return int(pi * r ** 2 * h / 3)
|
# -*- coding: utf-8 -*-
from socket import *
import struct
import threading
from QueueTeam import ProcessPacket
class ServerReceveData:
def __init__(self,hostIp,hostPort):
self.hostIp=hostIp
self.hostPort=hostPort
self.tcpSerSock=""
self.acceptThread=None #接收客户端线程
self.dataThread=None #接收数据线程
self.packetStr=ProcessPacket()
#起线程接收数据
def recvData(self):
try:
self.tcpSerSock = socket(AF_INET, SOCK_STREAM)
self.tcpSerSock.bind((self.hostIp,self.hostPort))
self.tcpSerSock.listen(5)
#创建线程处理客户端连接:
self.acceptThread=threading.Thread(target=self.acceptClient,args=())
self.acceptThread.start()
except Exception,e:
print 'Error: ',e
#接收客户端连接线程函数
def acceptClient(self):
try:
while True:
tcpCliSock, addr = self.tcpSerSock.accept()
#创建新线程来处理TCP连接
self.dataThread=threading.Thread(target=self.tcpLink,args=(tcpCliSock,addr))
self.dataThread.start()
self.tcpSerSock.close()
except:
print("accept client fail")
#接收数据线程处理函数
def tcpLink(self,tcpCliSock,addr):
try:
bufSize=1024
print u'Connected client from : ', addr #打印是哪个客户端连过来的 ???
#临时变量
tempStr="";
while True:
#接收客户端数据
dataStr = tcpCliSock.recv(bufSize)
tempStr+=dataStr
#解决一次发多个包或者半包的问题
while True:
if len(tempStr)<4:
break
dataLenStr=tempStr[0:4] #?
tupleData=struct.unpack('i',dataLenStr)
if len(tempStr) < tupleData[0]:
break
dataPacketStr = tempStr[0:tupleData[0]]
#把正常的包塞到队列里
self.packetStr.setUnPacketData(dataPacketStr)
tempStr = tempStr[tupleData[0]:]
tcpCliSock.close()
except:
print("receve data faild")
#清除线程
def clearThread(self):
self.acceptThread.join()
self.acceptThread=None
self.dataThread.join()
self.dataThread=None |
import webapp2
from webapp2_extras import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
from model.libroCientifico import libroCientifico
from model.comentario import Comentario
class verLibroCientificoHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
try:
id_libroCientifico = self.request.GET["id_libroCientifico"]
except:
id_libroCientifico = "ERROR"
libroCientifico = ndb.Key(urlsafe=id_libroCientifico).get()
lista_comentarios = Comentario.query(Comentario.libro == libroCientifico.titulo)
user_name = user.nickname()
sust = {
"users": users,
"libroCientifico": libroCientifico,
"lista_comentarios": lista_comentarios,
"user_name": user_name
}
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(
jinja.render_template("librosCientificos/detalleLibroCientifico.html", **sust)
)
else:
self.redirect("/")
return
def post(self):
user = users.get_current_user()
id_libroCientifico = self.request.get("edIdLibroCientifico", "ERROR")
textoComentario = self.request.get("edComentario", "ERROR")
libroCientifico = ndb.Key(urlsafe=id_libroCientifico).get()
comentario = Comentario(user_name=user.nickname(), texto=textoComentario, libro=libroCientifico.titulo)
comentario.put()
url = "/verLibroCientifico?id_libroCientifico=" + libroCientifico.key.urlsafe()
mensaje = "Su comentario para el libro '" + libroCientifico.titulo + "' ha sido guardado con exito"
template_values = {
"mensaje": mensaje,
"url": url
}
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(jinja.render_template("mensajeConfirmacion.html", **template_values))
app = webapp2.WSGIApplication([
('/verLibroCientifico', verLibroCientificoHandler),
], debug=True) |
'''with open("try.txt","w") as file:
file.write("hello world qwerty qwerty")
with open("try.txt","r") as file:
print(file.read())'''
try:
num = input("enter the number")
print(num /0)
except Exception as exp:
print("error is jhgjgjhgjhg {0}".format(exp))
|
import sys
import os
from ete3 import Tree
from read_tree import read_tree
def analyze_dimensions(tree):
print("Taxa number: " + str(len(tree.get_leaves())))
internal = 0
for t in tree.iter_prepostorder():
if (t[0] and not t[1].is_leaf()):
internal += 1
print("Internal nodes number: " + str(internal))
def get_nodes_number(tree_file):
return len(read_tree(tree_file).get_leaves())
def analyze_polytomies(tree):
print("Polytomies:")
for node in tree.traverse("postorder"):
children = node.get_children()
if (len(children) > 2):
only_leaves = True
for child in children:
if (not child.is_leaf()):
only_leaves = False
break
print(" Polytomy of size " + str(len(children)))
if (only_leaves):
print(" all the children in this polytomy are leaves")
else:
print(" some children in this polytomy are internal nodes")
print(" number of taxa under the polytomy: " + str(len(node.get_leaves())))
def count_leaves_under_polytomy(tree):
leaves = 0
leaves_under_polytomy = 0
leaves_to_prune = 0
for node in tree.traverse("postorder"):
if (node.is_leaf()):
leaves += 1
parent = node.up
if (len(parent.get_children()) > 2):
leaves_under_polytomy += 1
children = node.get_children()
leaves_under_me = 0
for child in children:
if (child.is_leaf()):
leaves_under_me += 1
if (leaves_under_me > 2):
leaves_to_prune += (leaves_under_me - 2)
print("leaves: " + str(leaves))
print("Number of leaves under a polytomy: " + str(leaves_under_polytomy))
print("Leaves to prune: " + str(leaves_to_prune))
def analyze_tree(tree_file):
tree = read_tree(tree_file)
analyze_dimensions(tree)
analyze_polytomies(tree)
count_leaves_under_polytomy(tree)
def get_tree_taxa_number(tree_file):
return len(read_tree(tree_file).get_leaves())
def is_ultrametric(tree_file, epsilon = 0.001):
tree = read_tree(tree_file)
ref = -1.0
for node in tree.traverse("postorder"):
if (node.is_leaf()):
if (ref == -1.0):
ref = node.get_distance(tree)
else:
if (abs(ref - node.get_distance(tree)) > epsilon):
return False
return True
def check_ultrametric_and_get_length(tree_file, epsilon = 0.001):
tree = read_tree(tree_file)
ref = -1.0
for node in tree.traverse("postorder"):
if (node.is_leaf()):
if (ref == -1.0):
ref = node.get_distance(tree)
else:
assert(abs(ref - node.get_distance(tree)) < epsilon)
return ref
def check_duplicates(tree_file):
tree = read_tree(tree_file)
labels = set()
for leaf in tree.get_leaves():
if (leaf.name in labels):
print("duplicate label " + leaf.name)
labels.add(leaf.name)
def count_monophylies(tree_file):
tree = read_tree(tree_file)
leaf_number =len(tree.get_leaf_names())
leaves = list(set(tree.get_leaf_names()))
leaves.sort()
missmatches = 0
print("Number of leaves: " + str(len(leaves)))
print("Number of different leaves: " + str(leaf_number))
print("Monophilies: ")
for leaf in leaves:
value = [leaf]
monophilies = tree.get_monophyletic(value, "name")
res = 0
for m in monophilies:
res += 1
if (res > 1):
missmatches += (res - 1)
print("Number of monophilies for " + leaf +": " + str(res))
print("Missmatch score: " + str(missmatches))
if (__name__ == "__main__"):
if (len(sys.argv) != 2):
print("Syntax: python " + os.path.basename(__file__) + " tree_file")
exit(1)
tree_file = sys.argv[1]
check_duplicates(tree_file)
analyze_tree(tree_file)
if (is_ultrametric(tree_file)):
print("Ultrametric")
else:
print("Not ultrametric")
#count_monophylies(tree_file)
|
class Node:
def __init__(self,data,next):
self.data = data
self.next = next
class LinkedList:
def __init__(self,head = None):
self.head = head
def input(self,data):
node = Node(data,self.head)
self.head = node
def PrintAll(self):
currentNode = self.head
size = 0
while currentNode is not None:
p = currentNode.data
print(p)
size += 1
currentNode = currentNode.next
print("Size of the list :",size)
ll = LinkedList()
ll.input(55)
ll.input(521)
ll.input(5121)
ll.input(51)
ll.input(100)
ll.PrintAll()
|
import tensorflow as tf
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from pandas import read_csv
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error as MAE
from sklearn.metrics import mean_squared_error as MSE
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.contrib import rnn
# from utils.preprocessing_data import Timeseries
from model.utils.preprocessing_data_forBNN import LSTM
import time
"""This class build the model BNN with initial function and train function"""
class Model:
def __init__(self, original_data = None, prediction_data = None,
train_size = None, valid_size = None, sliding = None,
batch_size = None, num_units_LSTM = None,
activation = None, optimizer = None,
learning_rate = None, epochs = None,
input_dim = None, patience = None, dropout_rate = 0.8):
self.original_data = original_data
self.prediction_data = prediction_data
self.train_size = train_size
self.valid_size = valid_size
self.sliding = sliding
self.batch_size = batch_size
self.num_units_LSTM = num_units_LSTM
self.activation = activation
self.optimizer = optimizer
self.learning_rate = learning_rate
self.epochs = epochs
self.input_dim = input_dim
self.patience = patience
self.dropout_rate = dropout_rate
def preprocessing_data(self):
timeseries = LSTM(self.original_data, self.prediction_data, self.train_size, self.valid_size, self.sliding, self.input_dim)
self.train_x, self.valid_x, self.test_x, self.train_y, self.valid_y, self.test_y, self.min_y, self.max_y = timeseries.prepare_data()
def init_RNN(self, num_units, activation):
num_layers = len(num_units)
hidden_layers = []
for i in range(num_layers):
if(i==0):
cell = tf.contrib.rnn.LSTMCell(num_units[i],activation = activation)
cell = tf.nn.rnn_cell.DropoutWrapper(cell,
input_keep_prob = 1.0,
output_keep_prob = self.dropout_rate,
state_keep_prob = self.dropout_rate,
variational_recurrent = True,
input_size = self.input_dim,
dtype=tf.float32)
hidden_layers.append(cell)
else:
cell = tf.contrib.rnn.LSTMCell(num_units[i],activation = activation)
cell = tf.nn.rnn_cell.DropoutWrapper(cell,
input_keep_prob = self.dropout_rate,
output_keep_prob = self.dropout_rate,
state_keep_prob = self.dropout_rate,
variational_recurrent = True,
input_size = self.num_units_LSTM[i-1],
dtype=tf.float32)
hidden_layers.append(cell)
rnn_cells = tf.contrib.rnn.MultiRNNCell(hidden_layers, state_is_tuple = True)
return rnn_cells
def mlp(self, input, num_units, activation):
num_layers = len(num_units)
prev_layer = input
for i in range(num_layers):
prev_layer = tf.layers.dense(prev_layer,
num_units[i],
activation = activation,
name = 'layer'+str(i))
drop_rate = 1-self.dropout_rate
prev_layer = tf.layers.dropout(prev_layer , rate = drop_rate)
prediction = tf.layers.dense(inputs=prev_layer,
units=1,
activation = activation,
name = 'output_layer')
return prediction
def early_stopping(self, array, patience):
value = array[len(array) - patience - 1]
arr = array[len(array)-patience:]
check = 0
for val in arr:
if(val > value):
check += 1
if(check == patience):
return False
else:
return True
def fit(self):
self.preprocessing_data()
print (self.max_y)
print (self.min_y)
# lol
if(self.activation == 1):
activation = tf.nn.sigmoid
elif(self.activation == 2):
activation= tf.nn.relu
elif(self.activation== 3):
activation = tf.nn.tanh
elif(self.activation == 4):
activation = tf.nn.elu
if(self.optimizer == 1):
optimizer = tf.train.MomentumOptimizer(learning_rate = self.learning_rate, momentum = 0.9)
elif(self.optimizer == 2):
optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate = self.learning_rate)
tf.reset_default_graph()
x = tf.placeholder("float",[None, self.sliding*len(self.original_data)/self.input_dim, self.input_dim])
y = tf.placeholder("float", [None, self.train_y.shape[1]])
with tf.variable_scope('LSTM'):
lstm_layer = self.init_RNN(self.num_units_LSTM,activation)
outputs, new_state = tf.nn.dynamic_rnn(lstm_layer, x, dtype="float32")
outputs = tf.identity(outputs, name='outputs')
prediction = tf.layers.dense(outputs[:,:,-1],self.train_y.shape[1],activation = activation,use_bias = True)
loss = tf.reduce_mean(tf.square(y-prediction))
optimize = optimizer.minimize(loss)
cost_train_set = []
cost_valid_set = []
epoch_set=[]
init=tf.global_variables_initializer()
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(init)
# training encoder_decoder
print ("start training ")
for epoch in range(self.epochs):
start_time = time.time()
# Train with each example
print ('epoch : ', epoch+1)
total_batch = int(len(self.train_x)/self.batch_size)
# print (total_batch)
# sess.run(updates)
avg_cost = 0
for i in range(total_batch):
batch_xs = self.train_x[i*self.batch_size:(i+1)*self.batch_size]
batch_ys = self.train_y[i*self.batch_size:(i+1)*self.batch_size]
# print (sess.run(outputs_encoder,feed_dict={x1: batch_xs_encoder,x2: batch_xs_decoder, y1:batch_ys}))
# print (sess.run(new_state_encoder,feed_dict={x1: batch_xs_encoder,x2: batch_xs_decoder, y1:batch_ys}))
sess.run(optimize,feed_dict={x: batch_xs, y:batch_ys})
avg_cost += sess.run(loss,feed_dict={x: batch_xs, y:batch_ys})/total_batch
# Display logs per epoch step
print ("Epoch:", '%04d' % (epoch+1),"cost=", "{:.9f}".format(avg_cost))
cost_train_set.append(avg_cost)
val_cost = sess.run(loss, feed_dict={x:self.valid_x, y: self.valid_y})
cost_valid_set.append(val_cost)
if (epoch > self.patience):
if (self.early_stopping(cost_train_set, self.patience) == False):
print ("early stopping training")
break
print ("Epoch finished")
print ('time for epoch: ', epoch + 1 , time.time()-start_time)
print ('training ok!!!')
prediction = sess.run(prediction, feed_dict={x:self.test_x, y: self.test_y})
prediction = prediction * (self.max_y[0] - self.min_y[0]) + self.min_y[0]
prediction = np.asarray(prediction)
MAE_err = MAE(prediction,self.test_y)
RMSE_err = np.sqrt(MSE(prediction,self.test_y))
error_model = np.asarray([MAE_err,RMSE_err])
print (error_model)
print (self.test_y.shape)
print (prediction.shape)
name_LSTM = ""
for i in range(len(self.num_units_LSTM)):
if (i == len(self.num_units_LSTM) - 1):
name_LSTM += str(self.num_units_LSTM[i])
else:
name_LSTM += str(self.num_units_LSTM[i]) +'_'
folder_to_save_result = 'results/LSTM/multivariate/cpu/5minutes/ver1/'
file_name = str(self.sliding) + '-' + str(self.batch_size) + '-' + name_LSTM + '-' + str(self.activation)+ '-' + str(self.optimizer) + '-' + str(self.input_dim) +'-'+str(self.dropout_rate)
history_file = folder_to_save_result + 'history/' + file_name + '.png'
prediction_file = folder_to_save_result + 'prediction/' + file_name + '.csv'
save_path = saver.save(sess, folder_to_save_result + 'model_saved/' + file_name)
plt.plot(cost_train_set)
plt.plot(cost_valid_set)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
# plt.show()
# plt.savefig('/home/thangnguyen/hust/lab/machine_learning_handling/history/history_mem.png')
plt.savefig(history_file)
plt.close()
predictionDf = pd.DataFrame(np.array(prediction))
# predictionDf.to_csv('/home/thangnguyen/hust/lab/machine_learning_handling/results/result_mem.csv', index=False, header=None)
predictionDf.to_csv(prediction_file, index=False, header=None)
sess.close()
return error_model
|
#coding=utf-8
import requests
from base64 import b64encode
from secure import VRC_USERNAME, VRC_PASSWORD
from utils.utils import *
from settings import *
class VRChatAPI:
apiKey = 'JlE5Jldo5Jibnk5O5hTx6XVqsJu4WJ26'
headers = {
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,ja;q=0.8,en;q=0.7,zh-TW;q=0.6',
'user-agent': 'Mo.zilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'accept': 'application/json, text/plain, */*',
'referer': 'https://www.vrchat.net',
}
def __init__(self,session):
self.s=session
@staticmethod
def init():
session=load_session()
if session:
api=VRChatAPI(session)
else:
try:
session=VRChatAPI.create_session(VRC_USERNAME,VRC_PASSWORD)
except Exception as e:
if str(e)=='Unauthorized':
logging.info("登录失败,请检查帐号密码")
sys.exit("初始化失败")
api=VRChatAPI(session)
return api
@staticmethod
def create_session(uname,pwd):
session=requests.session()
resp=VRChatAPI.login(session,uname,pwd)
dump_session(session)
return session
@classmethod
@resp2json
def login(cls,session,uname,pwd):
auth = {'authorization': 'Basic ' + b64encode(":".join([uname, pwd]).encode('utf8')).decode('utf8')}
params={'apiKey':cls.apiKey}
session.headers.update(cls.headers)
resp = session.get('https://www.vrchat.net/api/1/auth/user',params=params,headers=auth)
return resp
@resp2json
def _get_friends(self,offline,n,offset):
params = {
'offline':{True:"true",False:"false"}[offline],
'n': n,
'offset':offset,
}
resp = self.s.get('https://www.vrchat.net/api/1/auth/user/friends' ,params=params)
return resp
def _get_all_friends(self,offline):
offset=0
n=25
tdata=[]
while True:
data=self._get_friends(offline=offline,n=n,offset=offset)
tdata.extend([i for i in data])
if len(data)==n:
offset+=25
else:
break
return tdata
def get_online_friends(self):
return self._get_all_friends(offline=False)
def get_offline_friends(self):
return self._get_all_friends(offline=True)
@resp2json
def get_user_info(self,user_id):
resp=self.s.get("https://www.vrchat.net/api/1/users/{}".format(user_id))
return resp
@resp2json
def get_world_info(self,wrld_id):
resp=self.s.get("https://www.vrchat.net/api/1/worlds/{}".format(wrld_id))
return resp
def get_world_name(self,wid):
data= self.get_world_info(wid)
return data['name']
def get_user_name(self,user_id):
data=self.get_user_info(user_id=user_id)
return data.get("displayName")
|
import sys
def parse(args):
with open(args[1], mode='r') as f:
return [line.strip() for line in f.readlines()]
def split(s, sep):
pieces = s.split(sep)
return pieces[0], sep.join(pieces[1:])
def parse_line(line):
num_str, rest = split(line, '-')
n1 = int(num_str)
num_str, rest = split(rest, ' ')
n2 = int(num_str)
check_char, password = split(rest, ': ')
return n1, n2, check_char, password
def count_valid_passwords(lines):
num_valid = 0
for line in lines:
lower_bd, upper_bd, check_char, password = parse_line(line)
count = sum([char == check_char for char in password])
num_valid += (lower_bd <= count and count <= upper_bd)
return num_valid
def position_valid_passwords(lines):
num_valid = 0
for line in lines:
idx1, idx2, check_char, password = parse_line(line)
idxs = [idx1, idx2]
num_matches = sum([password[i - 1] == check_char for i in idxs])
num_valid += (num_matches == 1)
return num_valid
def main(args):
lines = parse(args)
p1_ans = count_valid_passwords(lines)
print(f'part one: {p1_ans}')
p2_ans = position_valid_passwords(lines)
print(f'part two: {p2_ans}')
# part one: 546
# part two: 275
if __name__ == '__main__':
main(sys.argv) |
from django.contrib import admin
# Register your models here.
from bank.models import Account, Transaction, TransactionType, TransactionStatus, MetaTransaction
from django.contrib.auth.models import Permission
admin.site.register(Permission)
admin.site.register(Account)
admin.site.register(Transaction)
admin.site.register(TransactionType)
admin.site.register(TransactionStatus)
admin.site.register(MetaTransaction)
|
import random
print(random.randint(1,9))
|
#for nuumbers it will add and for strings concatenates
a,b=1,2
print(a+b)
x1,x2="abc","def"
print(x1+x2)
|
#!/usr/bin/python
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import re
import socket
import time
def main():
pRegex = re.compile('^\s*([^=\s]+)\s*=([^$]+)$')
config = {}
infoprovVersion = "1.1"
hostname = socket.getfqdn()
now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
conffile = None
foundErr = False
try:
if len(sys.argv) <> 2:
raise Exception("Usage: glite-ce-glue2-share-static <config-file>")
conffile = open(sys.argv[1])
for line in conffile:
parsed = pRegex.match(line)
if parsed:
config[parsed.group(1)] = parsed.group(2).strip(' \n\t"')
else:
tmps = line.strip()
if len(tmps) > 0 and not tmps.startswith('#'):
raise Exception("Error parsing configuration file " + sys.argv[1])
for mItem in ['ComputingServiceId',
'Shares']:
if not mItem in config:
raise Exception("Missing attribute %s" % mItem)
if not "EMIES" in config:
config["EMIES"] = "no"
if not "ESComputingServiceId" in config and config["EMIES"] == "yes":
config["ESComputingServiceId"] = hostname + "_ESComputingElement"
except Exception, ex:
sys.stderr.write(str(ex) + '\n')
foundErr = True
if conffile:
conffile.close()
if foundErr:
sys.exit(1)
out = sys.stdout
shareList = config['Shares'].strip('()').split(',')
#
# We used regular expression to discriminate the kind of service
# the endpoint must be in the form <host>_<interfacename>
#
srvTable = {}
srvTable[config['ComputingServiceId']] = (re.compile('[^_]+_org\.glite\.ce\.CREAM'), True)
if config['EMIES'] == 'yes':
#
# TODO select only meaningful port types
#
tmps = '(creation|activitymanagement|delegation|activityinfo|resourceinfo)'
srvTable[config['ESComputingServiceId']] = (re.compile('[^_]+_org\.ogf\.emies\.%s' % tmps), False)
for srvId in srvTable:
srvRe, isLegacy = srvTable[srvId]
for shareItem in shareList:
shareName = shareItem.strip()
shareId = "%s_%s" % (shareName, srvId)
shKeys = ["SHARE_%s_QUEUENAME" % shareName,
"SHARE_%s_OWNER" % shareName,
"SHARE_%s_CEIDS" % shareName,
"SHARE_%s_EXECUTIONENVIRONMENTS" % shareName,
"SHARE_%s_ENDPOINTS" % shareName,
"SHARE_%s_ACBRS" % shareName]
for keyX in shKeys:
if not keyX in config:
sys.stderr.write("Missing definition for " + keyX)
sys.exit(1)
qname = config[shKeys[0]]
owner = config[shKeys[1]]
ceIdList = config[shKeys[2]].strip('()').split(',')
envList = config[shKeys[3]].strip('()').split(',')
epList = []
for tmpId in config[shKeys[4]].strip('()').split(','):
if srvRe.match(tmpId):
epList.append(tmpId)
acbrList = config[shKeys[5]].strip('()').split(',')
out.write("dn: GLUE2ShareID=%s,GLUE2ServiceID=%s,GLUE2GroupID=resource,o=glue\n"
% (shareId, srvId))
out.write("objectClass: GLUE2Entity\n")
out.write("objectClass: GLUE2Share\n")
out.write("objectClass: GLUE2ComputingShare\n")
out.write("GLUE2EntityCreationTime: %s\n" % now)
if isLegacy:
for ceId in ceIdList:
out.write("GLUE2EntityOtherInfo: CREAMCEId=%s\n" % ceId.strip())
out.write("GLUE2EntityOtherInfo: ServiceType=org.glite.ce.CREAM\n")
else:
out.write("GLUE2EntityOtherInfo: ServiceType=org.ogf.emies\n")
out.write("GLUE2EntityOtherInfo: InfoProviderName=glite-ce-glue2-share-static\n")
out.write("GLUE2EntityOtherInfo: InfoProviderVersion=%s\n" % infoprovVersion)
out.write("GLUE2EntityOtherInfo: InfoProviderHost=%s\n" % hostname)
out.write("GLUE2ShareID: %s\n" % (shareId))
out.write("GLUE2ShareDescription: Share of %s for %s\n" % (qname, owner))
if len(envList) > 0:
out.write("GLUE2ShareResourceForeignKey: %s\n" % envList[0].strip())
for epItem in epList:
out.write("GLUE2ShareEndpointForeignKey: %s\n" % epItem.strip())
out.write("GLUE2ShareServiceForeignKey: %s\n" % srvId)
out.write("GLUE2ComputingShareMappingQueue: %s\n" % qname)
# Default value for Serving state is production
# Real value supposed to be provided by the dynamic plugin
out.write("GLUE2ComputingShareServingState: production\n")
# Real values supposed to be provided by the dynamic plugin
out.write("GLUE2ComputingShareDefaultCPUTime: 999999999\n")
out.write("GLUE2ComputingShareMaxCPUTime: 999999999\n")
out.write("GLUE2ComputingShareDefaultWallTime: 999999999\n")
out.write("GLUE2ComputingShareMaxWallTime: 999999999\n")
out.write("GLUE2ComputingShareMaxRunningJobs: 999999999\n")
out.write("GLUE2ComputingShareMaxTotalJobs: 999999999\n")
out.write("GLUE2ComputingShareMaxWaitingJobs: 999999999\n")
out.write("GLUE2ComputingShareMaxSlotsPerJob: 444444\n")
out.write("GLUE2ComputingShareRunningJobs: 0\n")
out.write("GLUE2ComputingShareTotalJobs: 0\n")
out.write("GLUE2ComputingShareFreeSlots: 0\n")
out.write("GLUE2ComputingShareUsedSlots: 0\n")
out.write("GLUE2ComputingShareWaitingJobs: 444444\n")
out.write("GLUE2ComputingShareEstimatedAverageWaitingTime: 2146660842\n")
out.write("GLUE2ComputingShareEstimatedWorstWaitingTime: 2146660842\n")
out.write("GLUE2ComputingShareMaxMainMemory: 444444\n")
out.write("GLUE2ComputingShareMaxVirtualMemory: 444444\n")
out.write("GLUE2ComputingShareGuaranteedMainMemory: 0\n")
out.write("GLUE2ComputingShareGuaranteedVirtualMemory: 0\n")
if len(envList) > 0:
out.write("GLUE2ComputingShareExecutionEnvironmentForeignKey: %s\n" % envList[0].strip())
for epItem in epList:
out.write("GLUE2ComputingShareComputingEndpointForeignKey: %s\n" % epItem.strip())
out.write("GLUE2ComputingShareComputingServiceForeignKey: %s\n" % srvId)
out.write("\n")
out.write("dn: GLUE2PolicyID=%s_policy,GLUE2ShareId=%s,GLUE2ServiceID=%s,GLUE2GroupID=resource,o=glue\n"
% (shareId, shareId, srvId))
out.write("objectClass: GLUE2Entity\n")
out.write("objectClass: GLUE2Policy\n")
out.write("objectClass: GLUE2MappingPolicy\n")
out.write("GLUE2EntityCreationTime: %s\n" % now)
out.write("GLUE2EntityOtherInfo: InfoProviderName=glite-ce-glue2-share-static\n")
out.write("GLUE2EntityOtherInfo: InfoProviderVersion=%s\n" % infoprovVersion)
out.write("GLUE2EntityOtherInfo: InfoProviderHost=%s\n" % hostname)
out.write("GLUE2PolicyID: %s_policy\n" % shareId)
out.write("GLUE2PolicyScheme: org.glite.standard\n")
for acbr in acbrList:
out.write("GLUE2PolicyRule: %s\n" % acbr.strip())
out.write("GLUE2PolicyUserDomainForeignKey: %s\n" % owner)
out.write("GLUE2MappingPolicyShareForeignKey: %s\n" % shareId)
out.write("\n")
if __name__ == "__main__":
main()
|
import pymongo
import hashlib
import random
import string
db = pymongo.MongoClient("localhost", 27017).webmub
ALLOWED_EXTENSIONS = set(['gif','webm'])
def generate():
link = ''.join(random.choice(string.uppercase+string.lowercase+string.digits) for x in range(5))
while db.webm.find_one({"short":link}):
link = ''.join(random.choice(string.uppercase+string.lowercase+string.digits) for x in range(5))
return link
#db.links.insert({"short":link, "path":"/test/path/to/file", "user":user, "points":0, "comments":[{}]})
# this is what the coments section should look like:
'''
[
{
"user":user,
"points":0
"quotes":
[
{
"user":user,
"points":0
}
]
},
{
"user":user,
"points":0
"quotes":
[
{
"user":user,
"points":0
}
]
},
]
'''
def protect(string):
for _ in range(1000):
string = hashlib.sha512(string+"asdkkl21j312j3lkjasdi9930132009)(Sd9asd--as0d-012-3-023-0_)_)-0asd-0asdasdasd]]{AS{D[asd[[123]12]3asd[[ASD]]]123;12312l3laskdlASDKKAJSDKjasd").hexdigest()
return string
|
#---------------------- BOARD STARTER ----------------------#
import turtle
turtle.tracer(0, 0)
turtle.speed(0)
# number of rows and columns.
ROWS = 25
COLS = 8
# the dimensions of the board being drawn.
BOARD_WIDTH = 600
BOARD_HEIGHT = 600
# the dimensions of the window being displayed.
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 800
def make_board_list(rows, cols):
'''
Purpose:
creates a lists of lists where
each item represents a row of a board.
Inputs: Number of rows, and number of columns.
Outputs: A list of lists.
'''
board=[]
for i in range(1,rows+1):
row=[]
for i in range(1,cols+1):
row.append(0)
board.append(row)
return board
# Make a ROWSxCOLS list of lists.
BOARD_DATA = make_board_list(ROWS, COLS)
# Draw the interface.
"""BoardGUI.draw_board(BOARD_WIDTH, BOARD_HEIGHT, ROWS, COLS,
WINDOW_WIDTH, WINDOW_HEIGHT)"""
BOARD_DATA[0][1] = "Sunday"
BOARD_DATA[0][2] = "Monday"
BOARD_DATA[0][3] = "Tuesday"
BOARD_DATA[0][4] = "Wednesday"
BOARD_DATA[0][5] = "Thursday"
BOARD_DATA[0][6] = "Friday"
BOARD_DATA[0][7] = "Saturday"
BOARD_DATA[0][0] = "Time"
time = 0
for i in range(1,25):
time = time + 1
BOARD_DATA[i][0] = time
########BOARD GUI###########
# has a lot of inputs, just use a new line!
def setup():
'''
Purpose:
Configure turtle before drawing.
Tracer affects the rate that turtle refreshes the screen.
Setting it to 0 turns it off so we don't compute any
unnecessary animations.
####### VERY IMPORTANT NOTE #########
YOU WILL NEED TO CALL turtle.update() whenever you finish drawing something.
We also set our turtle's speed to 0 which is the fastest animation.'''
def draw_board(board_width, board_height, rows, cols,
window_width, window_height):
'''
Purpose: Draws the board.
Inputs: Board dimensions, row and columns, and window size.
Outputs: Nothing.
'''
pass
def draw_board_border(board_width, board_height):
'''
Purpose: Draw the border around the board.
Inputs: the board's width and height.
Outputs: Nothing.
'''
turtle.penup()
turtle.goto(-(board_width/2), -(board_height/2))
turtle.pendown()
for i in range(0,4):
turtle.forward(board_width)
turtle.left(90)
def draw_vertical_lines(board_height, board_width, cols):
'''
Purpose: Helper for drawing the vertical lines of the board.
Inputs: the board's height and the amount of columns it has.
Outputs: Nothing.
'''
turtle.penup()
turtle.goto(-(board_width/2), (board_height/2))
for i in range(1,cols+1):
x=turtle.xcor()
y=turtle.ycor()
turtle.setheading(270)
turtle.pendown()
turtle.forward(board_height)
turtle.penup()
turtle.setx(x+board_width/cols)
turtle.sety(y)
def draw_horizontal_lines(board_width, board_height, rows):
'''
Purpose: Helper for drawing the horizontal lines of the board.
Inputs: the board's width and the number of rows we have.
Outputs: Nothing.
'''
turtle.goto(-(board_width/2), (board_height/2))
for i in range(1,rows+1):
turtle.penup()
x=turtle.xcor()
y=turtle.ycor()
turtle.setheading(0)
turtle.pendown()
turtle.forward(board_width)
turtle.penup()
turtle.setx(x)
turtle.sety(y-board_height/rows)
def draw_grid(board_width, board_height, rows, cols):
'''
Purpose: Draws the grid of the board
Inputs: the board's width, height, and the number of rows and columns we have.
Outputs: Nothing.
'''
draw_board_border(board_width,board_height)
draw_vertical_lines(board_height,board_width,cols)
draw_horizontal_lines(board_width,board_height,rows)
draw_grid(600,600,25,8)
def set_row_col_to_val(row,col,val):
BOARD_DATA[row][col] = val
def go_to_row_col_in_display(row,col):
turtle.penup()
turtle.setx((-300 + 75*col)+5)
turtle.sety(300-(24*(row))-20)
def create_text_at_row_col(text, row, col):
set_row_col_to_val(row, col, text)
go_to_row_col_in_display(row, col)
turtle.write(text, font = ("Arial",12,"normal"))
########program###########
print("Welcome to Your Self Care Calendar! Here are some commands:")
print("To display calendar, type 'display_calendar()'")
print("To make an event, type 'make_event()'")
print("To delete an event, type 'delete_event()'")
print("There are three self-care events you are required to have in your calendar: exercise (at least 5 days a week), meditate (at least 7 days a week), and treat yo self (at least 1 day a week).")
print("To make a self care event, type 'make_selfcare_event('name of the self care event, SPELLED RIGHT!')'.")
print("How many hours of scheduled events can you handle in one day?")
hour_limit = int(input())
def make_event():
print("State the name of event.")
name = input()
print("What day is your event on? Sun = 1, Mon = 2...")
date = int(input())
print("When does the event start (In military time. Do not append :00. Example: 1:00 pm = 13)?")
start_time = int(input())
print("When does the event end (In military time. Do not append :00. Example: 1:00 pm = 13)?")
end_time = int(input())
#board[(start_time,date)]=name
for hour in range(start_time, end_time):
BOARD_DATA[hour][date] = name
display_calendar()
def column(day):
lst=[]
for i in range(0,12):
lst.append(BOARD_DATA[i][day])
return lst
def daily_schedule_too_busy(limit,day):
lst=[event for event in column(day) if event!=0]
return len(lst)>limit
def delete_event():
print("What day is the event you would like to delete on?")
date =int(input())
print("When does the event start (In military time. Do not append :00. Example: 1:00 pm = 13)?")
start_time = int(input())
print("When does the event end (In military time. Do not append :00. Example: 1:00 pm = 13)?")
end_time = int(input())
for hour in range(start_time, end_time):
BOARD_DATA[hour][date] = 0
display_calendar()
def sched_limit():
for i in range(1,7):
if daily_schedule_too_busy(hour_limit,i) == True:
print("Your schedule is too busy on day " + str(i)+ ". Please delete so you only have " +str(hour_limit)+ " hours scheduled.")
def make_selfcare_event(activity):
print("What day do you want to " + activity + " ? Sun = 1, Mon = 2...")
date = int(input())
print("When does the event start (In military time. Do not append :00. Example: 1:00 pm = 13)?")
start_time = int(input())
print("When does the event end (In military time. Do not append :00. Example: 1:00 pm = 13)?")
end_time = int(input())
#board[(start_time,date)]=name
for hour in range(start_time, end_time):
BOARD_DATA[hour][date] = activity
display_calendar()
def activity_in_day(activity,day):
return activity in column(day)
def selfcare_check(reqfreq,activity):
lst = [i for i in range(0,7) if activity_in_day(activity,i) == True]
if len(lst) < reqfreq:
print("You still need to plan " +str(activity) + " on " + str(reqfreq-len(lst))+ " days.")
#selfcare_check(reqfreq,activity)
def BOARD_DATA_to_display():
for row in range(0,25):
for col in range(0,8):
if BOARD_DATA[row][col] != 0:
create_text_at_row_col(BOARD_DATA[row][col],row,col)
def display_calendar():
turtle.reset()
draw_grid(600,600,25,8)
print(BOARD_DATA)
BOARD_DATA_to_display()
sched_limit()
selfcare_check(5, "exercise")
selfcare_check(7, "meditate")
selfcare_check(1, "treat yo self")
print("What do you want to do?)Choose to type:")
print("make_event()")
print("delete_event()")
print("make_selfcare_event(name of the self care event SPELLED RIGHT in quotation marks!)")
|
import pytest
import time
from base.base_driver import init_driver
from page.page import Page
from base.base_analyze import analyze_with_file_name
class TestAddress:
def setup(self):
self.driver = init_driver()
self.page = Page(self.driver)
# def teardown(self):
# self.driver.quit()
def test_address(self):
self.page.home.click_mine()
self.page.mine.click_setting()
# 判断条件为flass,则执行if(未登录,则先登录)
if not self.page.mine.is_login():
self.page.mine.click_sign_up_and_login()
self.page.sign_up_and_login.input_phone("18503080305")
# 输入密码
self.page.sign_up_and_login.input_password("123456")
# 点击登录
self.page.sign_up_and_login.click_login()
# 填写收货地址
self.page.mine.click_address()
# 点击新建地址
self.page.address_list.click_new_address()
# 输入收货人
self.page.address_info.input_name("hh1123")
# 输入手机号
self.page.address_info.input_mobile("18503080303")
# 输入详细地址
self.page.address_info.input_address("某区某单元")
# 点击所在地区
self.page.address_info.click_region()
# 选择地区
self.page.region.click_city_titles()
# 点击确定
self.page.region.click_commit()
# 保存收货地址
self.page.address_info.click_save_address()
# 判断toast是不是"添加成功"
assert self.page.address_info.is_toast_exist("添加成功") |
#!/usr/bin/env python
import yaml
from subprocess import call
import argparse
instance_arr = []
def call_and_log(cmd):
print "Running cmd: " + cmd
call([cmd], shell=True)
class Instance:
"""
name, zone, type, containers
"""
def __init__(self, name, zone, i_type, containers):
self.name = name
self.zone = zone
self.i_type = i_type
self.containers = []
for container in containers:
one_container = Container(container.get("name"),
container.get("tag"),
container.get("directory"),
container.get("file"),
self.name,
generate_port_string(container.get("ports", None)),
generate_volumes_str(container.get("volumes", None)),
generate_env_variables(container.get("environment", None)))
self.containers.append(one_container)
def deploy(self):
cmd = self.generate_ce_create_cmd()
call_and_log(cmd)
for container in self.containers:
cmd = self.generate_ce_copy_cmd(container.directory)
call_and_log(cmd)
cmd = container.generate_build_cmd()
self.run_gce_cmd(cmd)
cmd = container.generate_run_cmd(["-d", "-i"], "prod")
self.run_gce_cmd(cmd)
@staticmethod
def list_instances():
for instance in instance_arr:
instance.get_info()
@staticmethod
def get_by_name(name):
for instance in instance_arr:
if instance.name == name:
return instance
def generate_ce_create_cmd(self):
cmd = "gcloud compute instances create {0} --image container-vm --zone {1} --machine-type {2}".format(self.name, self.zone, self.i_type)
return cmd
def run_gce_cmd(self, cmd):
cmd = "gcloud compute ssh root@{0} --zone={1} --command='{2}'".format(self.name, self.zone, cmd)
call_and_log(cmd)
def generate_ce_copy_cmd(self, directory):
cmd = "gcloud compute copy-files {0} root@{1}:/root/{0} --zone {2}".format(directory, self.name, self.zone)
return cmd
class Container:
"""
name, directory, file, ports
"""
def __init__(self, name, tag, directory, file, instance, ports=None, volumes=None, environments=None):
self.name = name
self.tag = tag
self.directory = directory
self.file = file
self.ports = ports
self.instance = instance
self.volumes = volumes
self.environments = environments
def build(self):
cmd = self.generate_build_cmd()
call_and_log(cmd)
def start(self):
cmd = self.generate_run_cmd(["-d", "-i"], "dev")
call_and_log(cmd)
def stop(self):
cmd = "docker rm -f {0}".format(self.name)
call_and_log(cmd)
def update(self):
instance = Instance.get_by_name(self.instance)
cmd = instance.generate_ce_copy_cmd(self.directory)
call_and_log(cmd)
instance.run_gce_cmd("docker rm -f {0}".format(self.name))
instance.run_gce_cmd(self.generate_run_cmd(["-d", "-i"], "prod"))
def generate_run_cmd(self, parameters, level="dev"):
if level == "dev":
env_string = "-e 'WORK_ENV=DEV'"
if level == "prod":
env_string = "-e 'WORK_ENV=PROD'"
docker_cmd = "docker run {0} {4} {1} {5} {6} {7} --name {2} -t {3}".format(" ".join(parameters), self.ports, self.name, self.tag, env_string, self.volumes, self.environments, generate_host_string() if level == "dev" else " ")
return docker_cmd
def generate_build_cmd(self):
docker_cmd = "docker build -f {0}/Dockerfile -t {1} {0}".format(self.directory, self.tag)
return docker_cmd
@staticmethod
def get_by_name(name):
for instance in instance_arr:
for container in instance.containers:
if container.name == name:
return container
def generate_volumes_str(volume_arr=None):
volume_str = ""
if volume_arr is not None:
for volume in volume_arr:
volume_str += "-v {0} ".format(volume)
return volume_str
def generate_env_variables(env_arr=None):
env_str = ""
if env_arr is not None:
for env in env_arr:
env_str += "-e {0}={1} ".format(env["key"], env["value"])
return env_str
def generate_port_string(port_arr=None):
port_string = ""
if port_arr is not None:
for port in port_arr:
port_string += " -p {0}:{1} ".format(port["host"], port["container"])
return port_string
def generate_host_string():
host_string = ""
#for instance in instance_arr:
#host_string += "--add-host {0}:{1} ".format(instance.name, "192.168.64.1")
host_string = "--net=host "
return host_string
def initialize():
with open("server_config.yaml", "r") as f:
settings = yaml.load(f)
for instance in settings["instance"]:
one_instance = Instance(instance["name"],
instance["zone"],
instance["type"],
instance["containers"])
instance_arr.append(one_instance)
def main():
initialize()
parser = argparse.ArgumentParser(
description="Script for preparing and deploying the Akfiha local development server")
parser.add_argument("-b", "--build", help="build a dev enviroment using docker file.", type=str)
parser.add_argument("-s", "--start", help="start a dev enviroment using docker file.", type=str)
parser.add_argument("--stop", help="start a dev enviroment using docker file.", type=str)
parser.add_argument("--deploy", help="start a dev enviroment using docker file.", type=str)
parser.add_argument("--update", help="start a dev enviroment using docker file.", type=str)
args = parser.parse_args()
if args.build:
if args.build == "all":
for instance in instance_arr:
for container in instance.containers:
container.build()
else:
Container.get_by_name(args.build).build()
if args.start:
if args.start == "all":
for instance in instance_arr:
for container in instance.containers:
container.start()
else:
Container.get_by_name(args.start).start()
if args.stop:
if args.stop == "all":
for instance in instance_arr:
for container in instance.containers:
container.stop()
else:
Container.get_by_name(args.stop).stop()
if args.deploy:
if args.deploy == "all":
for instance in instance_arr:
instance.deploy()
else:
instance = Instance.get_by_name(args.deploy)
instance.deploy()
if args.update:
container = Container.get_by_name(args.update)
container.update()
if __name__ == '__main__':
main()
|
def _get_20newsgroup_classes():
'''
@return list of classes associated with each split
'''
label_dict = {
'talk.politics.mideast': 0,
'sci.space': 1,
'misc.forsale': 2,
'talk.politics.misc': 3,
'comp.graphics': 4,
'sci.crypt': 5,
'comp.windows.x': 6,
'comp.os.ms-windows.misc': 7,
'talk.politics.guns': 8,
'talk.religion.misc': 9,
'rec.autos': 10,
'sci.med': 11,
'comp.sys.mac.hardware': 12,
'sci.electronics': 13,
'rec.sport.hockey': 14,
'alt.atheism': 15,
'rec.motorcycles': 16,
'comp.sys.ibm.pc.hardware': 17,
'rec.sport.baseball': 18,
'soc.religion.christian': 19,
}
train_classes = []
for key in label_dict.keys():
if key[:key.find('.')] in ['sci', 'rec']:
train_classes.append(key)
val_classes = []
for key in label_dict.keys():
if key[:key.find('.')] in ['comp']:
val_classes.append(key)
test_classes = []
for key in label_dict.keys():
if key[:key.find('.')] not in ['comp', 'sci', 'rec']:
test_classes.append(key)
return train_classes, val_classes, test_classes
def _get_reuters_classes():
'''
@return list of classes associated with each split
'''
# 31 classes inferred using /preprocessing/reuters.py
classes = {'trade': 28, 'grain': 12, 'ship': 25, 'gold': 11, 'acq': 0, 'tin': 27, 'ipi': 14, 'earn': 9, 'jobs': 16, 'sugar': 26, 'cpi': 7, 'money-fx': 18, 'interest': 13, 'cocoa': 3, 'coffee': 4, 'crude': 8,
'cotton': 6, 'livestock': 17, 'money-supply': 19, 'copper': 5, 'alum': 1, 'rubber': 24, 'nat-gas': 20, 'reserves': 22, 'bop': 2, 'gnp': 10, 'iron-steel': 15, 'orange': 21, 'retail': 23, 'wpi': 30, 'veg-oil': 29}
sorted_classes = sorted(classes, key=lambda k: classes[k])
train_classes = sorted_classes[:15]
val_classes = sorted_classes[15:20]
test_classes = sorted_classes[20:31]
return train_classes, val_classes, test_classes
def _get_huffpost_classes():
'''
@return list of classes associated with each split
'''
# randomly sorted classes
classes = [
'SPORTS', 'MEDIA', 'PARENTING', 'CULTURE & ARTS', 'MONEY',
'FOOD & DRINK', 'BLACK VOICES', 'LATINO VOICES', 'TRAVEL',
'RELIGION', 'THE WORLDPOST', 'ARTS & CULTURE', 'IMPACT', 'ARTS',
'STYLE', 'COMEDY', 'GOOD NEWS', 'GREEN', 'WOMEN', 'FIFTY',
'SCIENCE', 'WORLDPOST', 'WEIRD NEWS', 'CRIME', 'QUEER VOICES',
'HEALTHY LIVING', 'TECH', 'WEDDINGS', 'EDUCATION', 'BUSINESS',
'ENTERTAINMENT', 'TASTE', 'POLITICS', 'WORLD NEWS', 'ENVIRONMENT',
'DIVORCE', 'PARENTS', 'COLLEGE', 'STYLE & BEAUTY', 'WELLNESS',
'HOME & LIVING',
]
train_classes = classes[:20]
val_classes = classes[20:25]
test_classes = classes[25:41]
return train_classes, val_classes, test_classes
def _get_fewrel_classes():
'''
@return list of classes associated with each split
'''
# Computed using /preprocessing/fewrel.py
bao_labels_to_wikidata_properties = {
0: 'P931',
1: 'P4552',
2: 'P140',
3: 'P1923',
4: 'P150',
5: 'P6',
6: 'P27',
7: 'P449',
8: 'P1435',
9: 'P175',
10: 'P1344',
11: 'P39',
12: 'P527',
13: 'P740',
14: 'P706',
15: 'P84',
16: 'P495',
17: 'P123',
18: 'P57',
19: 'P22',
20: 'P178',
21: 'P241',
22: 'P403',
23: 'P1411',
24: 'P135',
25: 'P991',
26: 'P156',
27: 'P176',
28: 'P31',
29: 'P1877',
30: 'P102',
31: 'P1408',
32: 'P159',
33: 'P3373',
34: 'P1303',
35: 'P17',
36: 'P106',
37: 'P551',
38: 'P937',
39: 'P355',
40: 'P710',
41: 'P137',
42: 'P674',
43: 'P466',
44: 'P136',
45: 'P306',
46: 'P127',
47: 'P400',
48: 'P974',
49: 'P1346',
50: 'P460',
51: 'P86',
52: 'P118',
53: 'P264',
54: 'P750',
55: 'P58',
56: 'P3450',
57: 'P105',
58: 'P276',
59: 'P101',
60: 'P407',
61: 'P1001',
62: 'P800',
63: 'P131',
64: 'P177',
65: 'P364',
66: 'P2094',
67: 'P361',
68: 'P641',
69: 'P59',
70: 'P413',
71: 'P206',
72: 'P412',
73: 'P155',
74: 'P26',
75: 'P410',
76: 'P25',
77: 'P463',
78: 'P40',
79: 'P921',
}
# head=WORK_OF_ART validation/test split
train_classes = [0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16, 19, 21,
22, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 43, 44, 45, 46, 48, 49, 50, 52, 53, 56, 57, 58,
59, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
76, 77, 78]
val_classes = [7, 9, 17, 18, 20]
test_classes = [23, 29, 42, 47, 51, 54, 55, 60, 65, 79]
def _f(lst):
return [bao_labels_to_wikidata_properties[i] for i in lst]
return _f(train_classes), _f(val_classes), _f(test_classes)
|
import cProfile, pstats, io
import requests
from sys import argv
import re
def get_description(gene_id):
"""recives the gene_id and retrives the description
of the function. If the text has less than 281 characters
is fully written. Otherwise the number of sentences is shortened
from the end to the begining, until it attains less than 281 characteres.
returns the function text."""
url = "http://v1.marrvel.org/data/OMIM/"
req = requests.get(url, params = {"geneSymbol": gene_id})
data=req.json()
gg=""
if data!=None and "description" in data:
temp=re.sub("[\(\[].*?[\)\]]", "",data["description"])
if len(temp)<=280:
gg=temp
else:
aa=temp.split(".")
b=len(aa)-2
res=""
while b>0:
if len(".".join(aa[:b]))<=280:
res=".".join(aa[:b])
break
b-=1
if res=="":
res=aa[0]
gg=res+"."
return gg
def get_omim(gene_id):
inh={"Autosomal dominant":"AD", "Autosomal recessive":"AR", "Isolated cases":"IC","Somatic mutation":"SMu", "Multifactorial":"Mu", "Mitochondrial": "Mi", "Somatic mosaicism":"SMo", "X-linked":"XL","X-linked dominant":"XLD", "X-linked recessive":"XLR", "Digenic recessive":"DR", "Inherited chromosomal imbalance":"ICB", "Digenic dominant":"DD"}
url = "http://v1.marrvel.org/data/OMIM/"
req = requests.get(url, params = {"geneSymbol": gene_id})
data=req.json()
mimNumber="NOOMIM"
pheno=[]
if data!=None:
if "mimNumber" in data:
mimNumber=str(data["mimNumber"])
if "phenotypes" in data:
for el in data["phenotypes"]:
if el["phenotypeInheritance"] in inh:
gg=inh[el["phenotypeInheritance"]]
else:
gg="ND"
if "phenotypeMimNumber" in el and "phenotype" in el:
pheno.append([str(el["phenotypeMimNumber"]), el["phenotype"], gg])
if len(pheno)==0:
pheno=["NOOMIM"]
return mimNumber, pheno
def read_bd(bd, patern):
"""read the animal ids and returns the entry of that id.
if the id is not on the file, returns ND"""
with open(bd) as f:
line = next((l for l in f if patern in l), "ND")
if len(line)==0:
return "ND"
else:
return line
def make_it_shorter(l):
gg=l.split(";")
hh=len(gg)-1
final=""
while hh>0:
if len(";".join(gg[:hh]))<=200:
final=";".join(gg[:hh])+"..."
hh-=1
if final=="":
return gg[0]+"..."
else:
return final
def get_data_animal(gene_id, bd):
"""conects to the marrvel API and gets the animal models ID corresponding
to the human gene ID. If it exists, returns the hyperlink ready for the
output file, otherwise returns ND"""
entrez=get_entrezid(gene_id)
if entrez!="":
bdd="http://api.marrvel.org/data/diopt/ortholog/gene/entrezId/"+str(entrez)
req = requests.get(bdd)
data=req.json()
l=["flyBaseId","mgiId","wormBaseId", "zfinId", "rgdId"]
final={"flyBaseId":"ND","mgiId":"ND","wormBaseId":"ND", "zfinId":"ND", "rgdId":"ND"}
if len(data)>0:
for el in data:
if "gene2" in el and el['bestScore']==True:
gg=el["gene2"]
for database in l:
if database in gg and final[database]=="ND":
final[database]=str(gg[database])
print("final_passo1", final)
for key,value in final.items():
if value=="ND":
final[key]=["ND"]
else:
line=read_bd(bd[key][0],value)
if line!="ND":
rr=[]
ll=line.split("\t")
for el in ll[1].split(";"):
if len(el)<=200:
rr.append('=HYPERLINK("'+bd[key][1]+ll[0]+'","'+el+'")')
else:
tt=make_it_shorter(el)
rr.append('=HYPERLINK("'+bd[key][1]+ll[0]+'","'+tt+'")')
final[key]=rr
else:
final[key]=['=HYPERLINK("'+bd[key][1]+value+'","No disease association")']
else:
final={"flyBaseId":["ND"],"mgiId":["ND"],"wormBaseId":["ND"], "zfinId":["ND"], "rgdId":["ND"]}
else:
final={"flyBaseId":["ND"],"mgiId":["ND"],"wormBaseId":["ND"], "zfinId":["ND"], "rgdId":["ND"]}
return final
def get_entrezid(syn):
for gene_id in syn:
ii=gene_id.split("/")[0]
bd="http://api.marrvel.org/data/gene/taxonId/9606/symbol/"+ii.strip()
print(bd)
req= requests.get(bd)
data=req.json()
if 'entrezId' in data:
return data['entrezId']
else:
print(gene_id)
return ""
#final={"flyBaseId":["ND"],"mgiId":["ND"],"wormBaseId":["ND"], "zfinId":["ND"], "rgdId":["ND"]}
def get_all_animal_models(gene_id, params, syns):
"""runs the get_data_animal function for all the animal models used.
returns a list with all the results."""
syns.append(gene_id)
final=get_data_animal(syns, params["animals"])
if final["wormBaseId"]==["ND"]:
final["wormBaseId"]=['=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'+organism%3Aelegans&sort=score","'+gene_id+' C. elegans Uniprot entry")']
else:
final["wormBaseId"].append('=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'+organism%3Aelegans&sort=score","'+gene_id+' C. elegans Uniprot entry")')
if final["flyBaseId"]==["ND"]:
final["flyBaseId"]=['=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'+organism%3A%22Drosophila+melanogaster+%28Fruit+fly%29+%5B7227%5D%22&sort=score","'+gene_id+' Drosophila Uniprot entry")']
else:
final["flyBaseId"].append('=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'+organism%3Aelegans&sort=score","'+gene_id+' Drosophila Uniprot entry")')
if final["mgiId"]==["ND"]:
final["mgiId"]=['=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'&fil=organism%3A%22Mus+musculus+%28Mouse%29+%5B10090%5D%22&sort=score","'+gene_id+' Mouse Uniprot entry")']
else:
final["mgiId"]==["ND"].append('=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'&fil=organism%3A%22Mus+musculus+%28Mouse%29+%5B10090%5D%22&sort=score","'+gene_id+' Mouse Uniprot entry")')
if final["rgdId"]==["ND"]:
final["rgdId"]=['=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'&fil=organism%3A%22Rattus+norvegicus+%28Rat%29+%5B10116%5D%22&sort=score","'+gene_id+' Rat Uniprot entry")']
else:
final["rgdId"].append('=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'&fil=organism%3A%22Rattus+norvegicus+%28Rat%29+%5B10116%5D%22&sort=score","'+gene_id+' Rat Uniprot entry")')
if final["zfinId"]==["ND"]:
final["zfinId"]=['=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'&fil=organism%3A%22Danio+rerio+%28Zebrafish%29+%28Brachydanio+rerio%29+%5B7955%5D%22&sort=score","'+gene_id+' Zebrafish Uniprot entry")']
else:
final["zfinId"].append('=HYPERLINK("https://www.uniprot.org/uniprot/?query='+gene_id+'&fil=organism%3A%22Danio+rerio+%28Zebrafish%29+%28Brachydanio+rerio%29+%5B7955%5D%22&sort=score","'+gene_id+' Zebrafish Uniprot entry")')
return [final["wormBaseId"], final["flyBaseId"], final["mgiId"], final["rgdId"],final["zfinId"]]
def read_gtex_loops(gene_id, bd, t, chrr):
"""reads the gtex/loops file and returns a list with all the entries
of that respective gene. for gtex the gene must be in ensemblID and for
theloops should be in gene name."""
lines=["ND"]
with open(bd) as f:
if t=="gtex":
lines = [l.split("\t")[-1].strip() for l in f if gene_id in l]
else:
lines = [l.split("\t")[-1].strip() for l in f if ("\t"+gene_id+" " in l and "chr"+chrr+"\t" in l)]
return lines
def make_biblio_link_DD(ttype,band):
return ('=HYPERLINK("https://pubmed.ncbi.nlm.nih.gov/?term=('+ttype+')+AND+('+band+')&sort=&filter=hum_ani.humans","'+band+" "+ttype+'")')
def make_biblio_link(geneid, li):
"""makes the pubmed biblio link to be put on the output file"""
gg="("+geneid+")"
for el in li:
if el!="nan":
gg+="+OR+("+el+")"
return ('=HYPERLINK("https://pubmed.ncbi.nlm.nih.gov/?term='+gg+'&sort=&filter=hum_ani.humans","'+geneid+'")')
|
import numpy as np
import hpgeom as hpg
import warnings
from .utils import _compute_bitshift
from .io_coverage import _read_coverage
class HealSparseCoverage(object):
"""
Class to define a HealSparseCoverage map.
Parameters
----------
cov_index_map : `np.ndarray`
Coverage map with pixel indices.
nside_sparse : `int`
Healpix nside of the sparse map.
Returns
-------
cov_map : `HealSparseCoverage`
HealSparseCoverage map.
"""
def __init__(self, cov_index_map, nside_sparse):
self._nside_coverage = hpg.npixel_to_nside(cov_index_map.size)
self._nside_sparse = nside_sparse
self._cov_index_map = cov_index_map
self._bit_shift = _compute_bitshift(self._nside_coverage, self._nside_sparse)
self._nfine_per_cov = 2**self._bit_shift
self._compute_block_to_cov_index()
@classmethod
def read(cls, filename_or_fits, use_threads=False):
"""
Read in a HealSparseCoverage map from a file.
Parameters
----------
coverage_class : `type`
Type value of the HealSparseCoverage class.
filename_or_fits : `str` or `HealSparseFits`
Name of filename or already open `HealSparseFits` object.
use_threads : `bool`, optional
Use multithreaded reading for parquet files. Should not
be necessary for coverage maps.
Returns
-------
cov_map : `HealSparseCoverage`
HealSparseCoverage map from file.
"""
return _read_coverage(cls, filename_or_fits, use_threads=use_threads)
@classmethod
def make_empty(cls, nside_coverage, nside_sparse):
"""
Make an empty coverage map.
Parameters
----------
nside_coverage : `int`
Healpix nside for the coverage map
nside_sparse : `int`
Healpix nside for the sparse map
Returns
-------
healSparseCoverage : `HealSparseCoverage`
HealSparseCoverage from file
"""
if nside_coverage > 128:
warnings.warn('Using `nside_coverage` > 128 may result in poor performance', ResourceWarning)
bit_shift = _compute_bitshift(nside_coverage, nside_sparse)
nfine_per_cov = 2**bit_shift
cov_index_map = -1*np.arange(hpg.nside_to_npixel(nside_coverage), dtype=np.int64)*nfine_per_cov
return cls(cov_index_map, nside_sparse)
@classmethod
def make_from_pixels(cls, nside_coverage, nside_sparse, cov_pixels):
"""
Make an empty coverage map.
Parameters
----------
nside_coverage : `int`
Healpix nside for the coverage map
nside_sparse : `int`
Healpix nside for the sparse map
cov_pixels : `np.ndarray`
Array of coverage pixels
Returns
-------
healSparseCoverage : `HealSparseCoverage`
HealSparseCoverage from file
"""
if nside_coverage > 128:
warnings.warn('Using `nside_coverage` > 128 may result in poor performance', ResourceWarning)
cov_map = cls.make_empty(nside_coverage, nside_sparse)
cov_map.initialize_pixels(cov_pixels)
return cov_map
def initialize_pixels(self, cov_pix):
"""
Initialize pixels in the index map
Parameters
----------
cov_pix : `np.ndarray`
Array of coverage pixels
"""
self._cov_index_map[cov_pix] += np.arange(1, len(cov_pix) + 1)*self.nfine_per_cov
self._compute_block_to_cov_index()
def append_pixels(self, sparse_map_size, new_cov_pix, check=True, copy=True):
"""
Append new pixels to the coverage map
Parameters
----------
sparse_map_size : `int`
Size of current sparse map
new_cov_pix : `np.ndarray`
Array of new coverage pixels
"""
if check:
if np.max(self.cov_mask[new_cov_pix]) > 0:
raise RuntimeError("New coverage pixels are already in the map.")
if copy:
new_cov_map = self.copy()
else:
new_cov_map = self
# Reset to "defaults"
cov_index_map_temp = new_cov_map._cov_index_map + np.arange(hpg.nside_to_npixel(self.nside_coverage),
dtype=np.int64)*self.nfine_per_cov
# set the new pixels
cov_index_map_temp[new_cov_pix] = (np.arange(new_cov_pix.size)*self.nfine_per_cov +
sparse_map_size)
# Restore the offset
cov_index_map_temp -= np.arange(hpg.nside_to_npixel(self.nside_coverage),
dtype=np.int64)*self.nfine_per_cov
new_cov_map._cov_index_map[:] = cov_index_map_temp
new_cov_map._compute_block_to_cov_index()
return new_cov_map
def cov_pixels(self, sparse_pixels):
"""
Get coverage pixel numbers (nest) from a set of sparse pixels.
Parameters
----------
sparse_pixels : `np.ndarray`
Array of sparse pixels
Returns
-------
cov_pixels : `np.ndarray`
Coverage pixel numbers (nest format)
"""
return np.right_shift(sparse_pixels, self._bit_shift)
def cov_pixels_from_index(self, index):
"""
Get the coverage pixels from the sparse map index.
Parameters
----------
index : `np.ndarray`
Array of indices in sparse map
Returns
-------
cov_pixels : `np.ndarray`
Coverage pixel numbers (nest format)
"""
return self._block_to_cov_index[(index // self.nfine_per_cov) - 1]
@property
def coverage_mask(self):
"""
Get the boolean mask of the coverage map.
Returns
-------
cov_mask : `np.ndarray`
Boolean array of coverage mask.
"""
cov_mask = (self._cov_index_map[:] +
np.arange(hpg.nside_to_npixel(self._nside_coverage)) *
self._nfine_per_cov) >= self.nfine_per_cov
return cov_mask
@property
def nside_coverage(self):
"""
Get the nside of the coverage map
Returns
-------
nside_coverage : `int`
"""
return self._nside_coverage
@property
def nside_sparse(self):
"""
Get the nside of the associated sparse map
Returns
-------
nside_sparse : `int`
"""
return self._nside_sparse
@property
def bit_shift(self):
"""
Get the bit_shift for the coverage map
Returns
-------
bit_shift : `int`
Number of bits to shift from coarse to fine maps
"""
return self._bit_shift
@property
def nfine_per_cov(self):
"""
Get the number of fine (sparse) pixels per coarse (coverage) pixel
Returns
-------
nfine_per_cov : `int`
Number of fine (sparse) pixels per coverage pixel
"""
return self._nfine_per_cov
def _compute_block_to_cov_index(self):
"""
Compute the mapping from block number to cov_index
"""
offset_map = (self._cov_index_map[:] +
np.arange(hpg.nside_to_npixel(self._nside_coverage)) *
self._nfine_per_cov)
cov_mask = (offset_map >= self.nfine_per_cov)
cov_pixels, = np.where(cov_mask)
block_number = (offset_map[cov_pixels] // self.nfine_per_cov) - 1
st = np.argsort(block_number)
self._block_to_cov_index = cov_pixels[st]
def copy(self):
return self.__copy__()
def __copy__(self):
return HealSparseCoverage(self._cov_index_map.copy(), self._nside_sparse)
# Pass through to the underlying map
def __getitem__(self, key):
return self._cov_index_map[key]
def __setitem__(self, key, value):
self._cov_index_map[key] = value
def __repr__(self):
return self.__str__()
def __str__(self):
descr = 'HealSparseCoverage: nside_coverage = %d, nside_sparse = %d' % (self._nside_coverage,
self._nside_sparse)
return descr
|
from .namesss import getNamesForYear
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 10:23:54 2020
@author: Alex
"""
import numpy as np
import matplotlib.pyplot as plt
#Import the NLL_1 function which calculates the NLL without the cross section term
from Analysis_Methods import NLL_1
x0 = 0.5
x1 = 0.65
x2 = 0.73
#Define a routine which calculated the curvature at the minimum of a lagrange polynomial interpolated
#through 3 sample points
def d2dx(x0,x1, x2,x3, f):
y0, y1, y2 = f(x0), f(x1), f(x2)
t0 = (y0*2)/((x0-x1)*(x0-x2))
t1 = (y1*2)/((x1-x0)*(x1-x2))
t2 = (y2*2)/((x2-x0)*(x2-x1))
return (t0+t1+t2)
#Defin a routine which in 1D interpolates a lagrange polynomial through 3 samples, and/
#then calculated the minima of the interpolated parabola
def par_min(x0, x1, x2, f):
y0, y1, y2 = f(x0), f(x1), f(x2)
#convergence depends on a residue calculation, so a control bit is used
#so that the residue isnt referenced before the variable is declared
control = 0
#initialise residue as something higher than the threshold convergence value
res = 10
while res > 0.00000001:
#Algorithm will produce zero-division if the sample y values are identical, so make sure
#loop ends before this is computed
y0, y1, y2 = f(x0), f(x1), f(x2)
if y0==y2 or y0==y1 or y1==y2:
break
else:
#x3 is the minima for the interpolated lagrange poly
x3 = 0.5*((x2**2-x1**2)*y0+(x0**2-x2**2)*y1+(x1**2-x0**2)*y2)/\
((x2-x1)*y0+(x0-x2)*y1+(x1-x0)*y2)
a = np.array([y0,y1,y2,f(x3)])
b = np.argmax(a)
#figure out which sample point is the highest, and remove this from the sample set!
if control == 1:
#calculate residue to end loop when convergence complete
res = np.sqrt((x3-xm)**2)
c = [x0, x1, x2, x3]
df = d2dx(c[0], c[1], c[2], c[3], f)
del c[b]
c = np.array(c)
c.sort()
x0, x1, x2 = c
#print(res)
control = 1
xm = x3
std_dev = 1/np.sqrt(df)
return x3, std_dev
#Definte a function that turns the NLL into a one dimensional function, for ease of plotting
#Parabolic minimiser here is coded as a function of one variable
def f2(x):
return NLL_1([x, 2.4e-3])
#Define some sample points near the minima, guessed by eye by plotting the function
x0, x1, x2 = 0.6, 0.7, 0.75
print("the function minimum and standard deviation are", par_min(x0, x1, x2, f2))
#Sample the parameter space for plotting
pi = np.linspace(0, np.pi/4, 100)
u_v = []
for i in pi:
u_v.append([i, 2.4*1e-3])
#Compute the NLL for the sampled theta values
NLL_v = []
for i in u_v:
NLL_v.append(NLL_1(i))
#Define pyplot parameters so plots hopefully render well on any python build
plt.figure(figsize=(10,6))
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
plt.rc('figure', titlesize=45)
plt.rc('axes', titlesize=20) # fontsize of the axes title
plt.rc('axes', labelsize=20)
plt.rc('legend', fontsize=22)
#Plot NLL
plt.plot(pi, NLL_v)
plt.grid()
plt.xlabel('θ (radians)')
plt.ylabel('NLL')
plt.legend(['NLL(θ)'])
plt.show()
#Using the computed minima, define functions that scan upwards until the NLL increases
#and decreases by 0.5, correpsonding to one standard deviation
theta_min = 0.6763585471865108
NLL_min = f2(theta_min)
def scan_up():
diff = 0
dx = 0
while diff < 0.5:
new_NLL = f2(theta_min+dx)
diff = new_NLL-NLL_min
dx += 0.0001
return dx
def scan_down():
diff = 0
dx = 0
while diff < 0.5:
new_NLL = f2(theta_min+dx)
diff = new_NLL-NLL_min
dx -= 0.0001
return dx
print("Standard deviation in positive direction is", scan_up())
print("Standard deviation in negative direction is", scan_down())
|
class BinaryTree:
def __init__(self, rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
def insertLeft(self, newNode):
if self.leftChild == None:
self.leftChild = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.leftChild = self.leftChild
self.leftChild = t
def insertRight(self, newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.rightChild = self.rightChild
self.rightChild = t
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def setRootVal(self, obj):
self.key = obj
def getRootVal(self):
return self.key
def preorder(self):
print(self.key)
if self.leftChild:
self.leftChild.preorder()
if self.rightChild:
self.rightChild.preorder()
def postorder(self):
if self.leftChild:
self.leftChild.postorder()
if self.rightChild:
self.rightChild.postorder()
print(self.key)
def inorder(self):
if self.leftChild:
self.leftChild.inorder()
print(self.key)
if self.rightChild:
self.rightChild.inorder()
def __repr__(self):
return '{} [{}, {}]'.format(self.getRootVal(), self.getLeftChild(), self.getRightChild())
r = BinaryTree('a')
r.insertLeft('b')
r.getLeftChild().insertLeft('d')
r.getLeftChild().insertRight('e')
r.getLeftChild().getRightChild().insertLeft('g')
r.insertRight('c')
r.getRightChild().insertRight('f')
r.getRightChild().getRightChild().insertLeft('h')
r.getRightChild().getRightChild().insertRight('i')
print(r)
a, b, c= [], [], []
print('Обход в прямом порядке')
a.append(r.preorder())
print('Обход в обратном порядке')
b.append(r.postorder())
print('Симметричный обход')
c.append(r.inorder())
|
# importing necessary modules
import numpy as np
from PIL import Image
# total number of times the process will be repeated
total = 2
# size of the image
size = 8**total
# creating an image
square = np.empty([size, size, 3], dtype = np.uint8)
color = np.array([255, 255, 255], dtype = np.uint8)
# filling it black
square.fill(0)
for i in range(0, total + 1):
stepdown = 3**(total - i)
for x in range(0, 3**i):
# checking for the centremost square
if x % 3 == 1:
for y in range(0, 3**i):
if y % 3 == 1:
# changing its color
square[y * stepdown:(y + 1)*stepdown, x * stepdown:(x + 1)*stepdown] = color
# saving the image produced
save_file = "sierpinski.jpg"
Image.fromarray(square).save(save_file)
# displaying it in console
#i = Image.open("sierpinski.jpg")
#i.show() |
import argparse
import os
import glob
import json
import pandas as pd
from subprocess import Popen, PIPE
parser = argparse.ArgumentParser()
parser.add_argument('--pointcloud', type=str, help='Path of the area point cloud.')
parser.add_argument('--detection_dir', type=str, help='Path of the directory that contains 2D detection and ground truth files.')
parser.add_argument('--area_dir', type=str, help='2D-3D-S area directory path.')
parser.add_argument('--output_csv', type=str, help='Output CSV file to be created that includes 3D mappings.')
args = parser.parse_args()
detection_files = glob.glob(os.path.join(args.detection_dir, 'detections', '*txt'))
results = pd.DataFrame(columns=['2d_Detection_File', 'Class', 'Confidence', 'x_min', 'y_min', 'x_max', 'y_max', 'x', 'y', 'z'])
header = True
print('Number of Detections: ', len(detection_files))
for i, d in enumerate(detection_files):
print('{}/{} {}'.format(i, len(detection_files), d))
with open(d) as d_file:
dets = d_file.read().split('\n')[:-1]
for i in range(len(dets)):
dets[i] = dets[i].split(' ')
for j in range(1, 6):
dets[i][j] = float(dets[i][j])
#print(dets)
pose_file_name = ('_').join(os.path.basename(d).split('_')[:-1]) + '_pose.json'
pose_file_path = os.path.join(args.area_dir, 'data/pose', pose_file_name)
with open(pose_file_path) as p_file:
pose = json.load(p_file)
x = pose['camera_location'][0]
y = pose['camera_location'][1]
z = pose['camera_location'][2]
roll = pose['final_camera_rotation'][1]
pitch = 1.57 - pose['final_camera_rotation'][0]
yaw = 1.57 + pose['final_camera_rotation'][2]
focal_length = pose['camera_k_matrix'][0][0]
#print(x, y, z, roll, pitch, yaw, focal_length)
center_points = []
'''for det in dets:
center_point_x = ((det[2] + det[4]) / 2) / 4
center_point_y = ((det[3] + det[5]) / 2) / 4
print('\n\n\n')
print(det)
args_2d_to_3d = ['/home/ubuntu/stanford-detectron/pcl_mapping/center-point/convert_coordinates', args.pointcloud, str(x), str(y), str(z), str(roll), str(pitch), str(yaw), str(center_point_x), str(center_point_y), '1080', '1080', '0', '0.2', '0', str(focal_length), str(focal_length), 'range.png']
print(center_point_x, center_point_y)
p = Popen(args_2d_to_3d, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
point = [float(j) for j in output.split()]
print(output)
print(point)'''
for det in dets:
center_points.append(str((det[2] + det[4]) / 2))
center_points.append(str((det[3] + det[5]) / 2))
args_2d_to_3d = ['center-point-multi/convert_coordinates', args.pointcloud, str(x), str(y), str(z), str(roll), str(pitch), str(yaw), '1080', '1080', '0', '0.2', '0', str(focal_length), str(focal_length), 'range.png'] + center_points
#print(args_2d_to_3d)
p = Popen(args_2d_to_3d, stdout=PIPE, stderr=PIPE)
output = p.stdout.read()
center_points = [cp.split() for cp in output.split(b'\n')[:-1]]
#print(center_points)
#print(err)
for i in range(len(center_points)):
results = results.append({'2d_Detection_File': os.path.basename(d), 'Class': dets[i][0], 'Confidence': float(dets[i][1]), 'x_min': float(dets[i][2]), 'y_min': float(dets[i][3]), 'x_max': float(dets[i][4]), 'y_max': float(dets[i][5]), 'x': float(center_points[i][0]), 'y': float(center_points[i][1]), 'z': float(center_points[i][2])}, ignore_index=True)
if len(results.index) == 50:
results = results.astype({'Confidence': float, 'x_min': float, 'y_min': float, 'x_max': float, 'y_max': float, 'x': float, 'y': float, 'z': float})
results.to_csv(args.output_csv, index=False, header=header, mode = 'a')
header = False
results = results.iloc[0:0]
results = results.astype({'Confidence': float, 'x_min': float, 'y_min': float, 'x_max': float, 'y_max': float, 'x': float, 'y': float, 'z': float})
results.to_csv(args.output_csv, index=False, header=header, mode = 'a')
|
import numpy as np
from sigpy import backend, util, thresh, linop
from sigpy import prox
def GLRA(shape,lamda,A = None,sind_1 = 1):
u_len = 1
for i in range(sind_1):
u_len = u_len * shape[i]
v_len = 1
for i in range(len(list(shape))-sind_1):
v_len = v_len * shape[-i-1]
ishape = (u_len,v_len)
GPR_prox = GLR(ishape, lamda)
R = linop.Reshape(oshape=ishape,ishape=shape)
if A is None:
RA = R
else:
RA = R*A
GLRA_prox = prox.UnitaryTransform(GPR_prox,RA)
return GLRA_prox
class GLR(prox.Prox):
def __init__(self, shape, lamda):
self.lamda = lamda
super().__init__(shape)
def _prox(self, alpha, input):
u,s,vh = np.linalg.svd(input,full_matrices=False)
s_max = np.max(s)
#print('Eigen Value:{}'.format(np.diag(s)))
s_t = thresh.soft_thresh(self.lamda * alpha*s_max, s)
return np.matmul(u, s_t[...,None]*vh)
|
from flask import Flask, render_template, request
from flask_socketio import SocketIO, send, emit, join_room, \
leave_room, close_room, rooms
app = Flask(__name__,
static_folder="../client-chat/build/static",
template_folder="../client-chat/build")
socketio = SocketIO(app)
@app.route("/")
def index():
return render_template('index.html')
@socketio.on('check-in')
def notify_user_checked_in():
try:
username = rooms()[0] # online users should only ever be in one room
emit('is-online', {"username": username}, broadcast=True)
except Exception as e:
print('Caught error', e)
@socketio.on('login-user')
def notify_user_login(username):
leave_room(request.sid) # leave the sid room and join the username room
join_room(username)
emit('is-online', {"username": username}, broadcast=True)
# Ask all users to declare if they are online for our new user
emit('all-users-check-in', broadcast=True)
print('User', username, 'has signed on')
@socketio.on('logoff-user')
def notify_user_logoff(username):
close_room(username) # leave the sid room and join the username room
emit('is-offline', {"username": username}, broadcast=True)
print('User', username, 'has signed off')
@socketio.on('send-chat')
def send_chat(json):
print('received message object', json)
recipient = json['recipient']
message = json['message']
sender = json['sender']
msg_obj = {
"recipient": recipient,
"message": message,
"sender": sender
}
emit('incoming-chat', msg_obj, room=recipient)
emit('incoming-chat', msg_obj, room=sender)
@socketio.on('connect')
def handle_connect():
print('Client connected')
@socketio.on('disconnect')
def handle_disconnect():
try:
print('Client', rooms(), 'disconnected')
username = rooms()[0]
emit('is-offline', {"username": username}, broadcast=True)
except Exception as e:
print('Caught', e)
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0')
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
'''
@Desc: 简易本地缓存
@Author: wangzs@ct108.com
@Create: 2016/06/17
@Update: 2017/04/11
'''
import time
class MemoryCache:
'''
利用内存进行python的缓存管理
'''
CACHE_DICT = {}
KEY_EXPIRE_TS = "expireTs"
KEY_CACHE_OBJ = "obj"
@staticmethod
def build_cache_obj(value, timeout):
'''
获取过期时间
Args:
value: 缓存对象
timeout: 缓存超时时间,单位毫秒
'''
return {MemoryCache.KEY_EXPIRE_TS: time.time() * 1000 + timeout, MemoryCache.KEY_CACHE_OBJ: value}
@staticmethod
def has_expired(cacheobj):
'''
判断是否已经过期
Args:
cacheobj 缓存object
'''
if not cacheobj:
return True
valid = MemoryCache.KEY_EXPIRE_TS in cacheobj
valid = valid and (time.time() * 1000 <= cacheobj[MemoryCache.KEY_EXPIRE_TS])
return not valid
@staticmethod
def set(key, value, timeout):
'''
设置缓存,如果key存在,则覆盖现有缓存
Args:
key: 缓存键
value: 缓存对象
timeout: 缓存超时时间,单位毫秒
'''
MemoryCache.CACHE_DICT[key] = MemoryCache.build_cache_obj(value, timeout)
@staticmethod
def clear(key):
'''
清空缓存
Args:
key: 缓存键
'''
if key in MemoryCache.CACHE_DICT:
del MemoryCache.CACHE_DICT[key]
@staticmethod
def clearall():
'''
清空所有内存缓存
'''
MemoryCache.CACHE_DICT = {}
@staticmethod
def get(key):
'''
获取缓存, 如果缓存失效或者没有缓存,则返回None
Args:
key: 缓存键
Returns:
缓存对象
'''
if key not in MemoryCache.CACHE_DICT:
return None
cacheobj = MemoryCache.CACHE_DICT[key]
if MemoryCache.has_expired(cacheobj):
del MemoryCache.CACHE_DICT[key]
return None
else:
return cacheobj[MemoryCache.KEY_CACHE_OBJ]
def autocache(key, timeout):
'''
自动设置缓存,如果key存在,则覆盖现有缓存
Args:
key: 缓存键
timeout: 缓存超时时间,单位毫秒
Returns:
object
'''
def __autocache(func):
def __autocache_handle(*args, **kwargs):
key_ = key
if len(args) > 0 and hasattr(args[0], "getcachekey"):
key_ = "%s#%s" % (key, args[0].getcachekey())
value = MemoryCache.get(key_)
if value:
return value
else:
value = func(*args, **kwargs)
if value:
MemoryCache.set(key_, value, timeout)
return value
return __autocache_handle
return __autocache
|
def fun(arr,num,num1):
if(num<len(arr)):
if(num1<len(arr[num])):
print(arr[num][num1])
fun(arr,num,num1+1)
else:
fun(arr,num+1,0)
arr = [[1,2],
[4,5]]
fun(arr,0,0) |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2017-2020, SCANOSS Ltd. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import yaml
import logging
import logging.handlers as handlers
import os
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType
from http.server import HTTPServer
from scanoss_hook.bitbucket import BitbucketRequestHandler
from scanoss_hook.gitlab import GitLabRequestHandler
from scanoss_hook.github import GitHubRequestHandler
from functools import partial
os.environ["PYTHONUNBUFFERED"] = "1"
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
logger = logging.getLogger('scanoss-hook')
logger.setLevel(logging.DEBUG)
log_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
def get_parser():
"""Returns the command line parser for the SCANOSS webhook.
"""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--addr",
dest="addr",
default="0.0.0.0",
help="address where it listens")
parser.add_argument("--port",
dest="port",
type=int,
default=8888,
metavar="PORT",
help="port where it listens")
parser.add_argument("--handler", dest="handler", choices=['gitlab', 'github', 'bitbucket'],
default="gitlab", metavar="HANDLER", help="webhook handler")
parser.add_argument("--cfg",
dest="cfg",
type=FileType('r'),
required=True,
help="path to the config file")
return parser
def main():
"""Starts a HTTPServer which waits for requests. Configures the webhook in GitHub, GitLab and BitBucket mode.
"""
p = get_parser()
args = p.parse_args()
if not args.cfg:
p.print_help()
sys.exit(1)
config = yaml.safe_load(args.cfg)
handler = handlers.TimedRotatingFileHandler("/var/log/scanoss-hook.log", when='midnight', interval=1)
handler.setFormatter(log_format)
logger.addHandler(handler)
if args.handler == 'gitlab':
handler = partial(GitLabRequestHandler, config)
elif args.handler == 'github':
handler = partial(GitHubRequestHandler, config, logger)
elif args.handler == 'bitbucket':
handler = partial(BitbucketRequestHandler, config)
httpd = HTTPServer((args.addr, args.port),
handler)
httpd.serve_forever()
if __name__ == '__main__':
main()
|
def make_car(manufactor,size,color='blue',tow_package=True,**info):
infos={}
infos['maker']=manufactor
infos['size']=size
infos['color']='blue'
info['tow_package']=True
for key,value in info.items():
infos[key]=value
return infos
car=make_car('Yiqi','Large',output='China',price='1400$')
print(car)
car=make_car('宝马','Large')
print(car)
|
# Python program to read in json files for
# language conversion application
import json
# open the english, french and pirate json files
with open("languages-english.json") as f1, open("languages-french.json") as f2, open("languages-pirate.json") as f3:
# create dicts from json objs
data1 = json.load(f1)
data2 = json.load(f2)
data3 = json.load(f3)
# function takes a language key string as an arg
def string_translator(keyString):
# find out which file we are going to use
print('English or French or Pirate?')
selection = input().upper()
# access the english file
if selection == 'ENGLISH':
for i in data1[keyString.upper()]:
print(i, end='')
# access the french file
elif selection == 'FRENCH':
for i in data2[keyString.upper()]:
print(i, end='')
# access the pirate file
elif selection == 'PIRATE':
try:
for i in data3[keyString.upper()]:
print(i, end='')
# if the keyString isn't found, use english string
except KeyError:
for i in data1[keyString.upper()]:
print(i, end='')
# handle invalid inputs
else:
print('Sorry, translation not available')
# close the files
f1.close()
f2.close()
f3.close()
|
import cv2
from PIL import Image
import numpy as np
import math
class HSI:
def __init__(self):
pass
def getShadow(self, image):
h, w, c = image.shape
image = cv2.resize(image,(100,100))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.bilateralFilter(image,9,75,75)
#print(blur[300,300].astype(float))
blur = np.divide(blur.astype(float), 255.0)
#print(blur[300,300])
cv2.imwrite("image.jpg",blur)
hsi = np.zeros((blur.shape[0],blur.shape[1],blur.shape[2]),dtype=np.float)
ratio_map = np.zeros((blur.shape[0],blur.shape[1]),dtype=np.uint8)
for i in range(blur.shape[0]):
for j in range(blur.shape[1]):
#print(hsi[i][j])
#if (hsi[i][j][0]==0.0 and hsi[i][j][1]==0.0 and hsi[i][j][2]==0.0):
# continue
hsi[i][j][2] = (blur[i][j][0]+blur[i][j][1]+blur[i][j][2])/3
hsi[i][j][0] = math.acos(((blur[i][j][2]-blur[i][j][1])*(blur[i][j][2]-blur[i][j][0])) /
(0.000001 + 2*math.sqrt((blur[i][j][2]-blur[i][j][1])*(blur[i][j][2]-blur[i][j][1])+(blur[i][j][2]-blur[i][j][0])*(blur[i][j][1]-blur[i][j][0]))))
hsi[i][j][1] = 1 - 3*min(blur[i][j][0],blur[i][j][1],blur[i][j][2])/hsi[i][j][2]
#print(blur[i][j][2], blur[i][j][1], blur[i][j][0])
ratio_map[i][j] = hsi[i][j][0]/(hsi[i][j][2]+0.01)
hist = np.histogram(ratio_map.ravel(),256,[0,256])
ret,th = cv2.threshold(ratio_map,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
median = cv2.medianBlur(th,15)
median = cv2.resize(median,(w,h))
image = cv2.resize(image,(w,h))
return median
|
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.utils.data as data_utils
def get_data_loader(args):
if args.dataset == 'cifar':
trans = transforms.Compose([
transforms.Scale(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train_dataset = dset.CIFAR10(root=args.dataroot, train=True, download=args.download, transform=trans)
test_dataset = dset.CIFAR10(root=args.dataroot, train=False, download=args.download, transform=trans)
# Check if everything is ok with loading datasets
assert train_dataset
assert test_dataset
train_dataloader = data_utils.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
test_dataloader = data_utils.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True)
return train_dataloader, test_dataloader
|
import sys
import ssl
import urllib2
if len(sys.argv) < 3 or len(sys.argv) > 4:
exit("Usage: %s <HOST> <PORT> [CA_FILE]" % sys.argv[0])
host = sys.argv[1]
port = sys.argv[2]
cafile = sys.argv[3] if len(sys.argv) > 3 else None
# Python 2.7.9 added the cafile argument support to urllib2.urlopen. Some
# distributions have also backported the support to nominally earlier versions
# so a basic version number check won't be sufficient.
# As a workaround pass in the cafile argument only if needed, and prepare to
# catch the TypeError if the used urllib2.urlopen doesn't support cafile yet.
kwargs = {} if cafile is None else {"cafile": cafile}
try:
urllib2.urlopen("https://" + host + ":" + port, **kwargs)
except TypeError:
if not kwargs:
raise
print "UNSUPPORTED"
except getattr(ssl, "CertificateError", ()):
print("REJECT")
except urllib2.URLError as exc:
if not isinstance(exc.reason, ssl.SSLError):
raise
print("REJECT")
else:
print("ACCEPT")
|
# Configuration file consists of one or more named sections, each of which can
#contain individual options with names and values.
# Config file sections are identified by looking for lines starting with
# [ and ending with ]. The value between the square brackets is the section
# name, and can contain any characters except square brackets.
# Options are listed one per line within a section. The line starts with
# the name of the option, which is separated from the value by a colon (:)
# or equal sign (=). Whitespace around the separator is ignored when the file is parsed.
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read('simple.ini')
print parser.get('bug_tracker', 'url')
|
from datetime import datetime
from django.test import TestCase
from .ical import encode_event, encode_events
from .models import Event
class IcalEncodeTest(TestCase):
def test_single_event(self):
evt = Event.objects.create(
id=12,
start=datetime(year=2032, month=1, day=3, hour=12, minute=00),
end=datetime(year=2032, month=1, day=3, hour=14, minute=00),
timezone="UTC",
)
self.assertIsInstance(encode_event(evt), str)
def test_multiple_events(self):
Event.objects.create(
name="First EVT",
start=datetime(year=2021, month=1, day=3, hour=12, minute=00),
end=datetime(year=2021, month=1, day=3, hour=14, minute=00),
timezone="Europe/Berlin",
)
Event.objects.create(
name="Some Event",
start=datetime(year=2021, month=1, day=4, hour=12, minute=00),
end=datetime(year=2021, month=1, day=4, hour=14, minute=00),
timezone="Europe/Berlin",
)
ics = encode_events(Event.objects.all())
self.assertIsInstance(ics, str)
|
import cupy as cp
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import optimizers, iterators
from chainer import training
from chainer.training import extensions
class RNN(chainer.Chain):
def __init__(self, batch_size, n_units=1000, n_out=10):
super().__init__()
with self.init_scope():
self.fc1 = L.Linear(None, n_units)
self.fc2 = L.Linear(None, n_units)
self.fc3 = L.Linear(None, n_out)
self.w1 = L.Linear(None, n_units)
self.w2 = L.Linear(None, n_units)
self.z1 = cp.zeros((batch_size, n_units), dtype=cp.float32)
self.z2 = cp.zeros((batch_size, n_units), dtype=cp.float32)
def __call__(self, x):
print(type(self.fc1(x)))
h = F.relu(self.fc1(x) + self.w1(self.z1))
self.z1 = h
h = F.relu(self.fc2(h) + self.w2(self.z2))
self.z2 = h
return self.fc3(h)
|
#!/usr/bin/python3
"""Fabric script (based on the file 1-pack_web_static.py) that
distributes an archive to your web servers, using the function do_deploy:"""
from fabric.api import *
import time
from os import path, stat
file_path = None
env.hosts = ['35.237.41.190', '3.90.183.111']
def do_pack():
try:
stat("versions")
except:
local("mkdir versions")
try:
time_tuple = time.localtime()
file_path = "versions/web_static_" + \
time.strftime("%Y%m%d%H%M%S", time_tuple) + ".tgz"
print("Packing web_static to " + file_path)
local("tar -cvzf " + file_path + " web_static")
size = path.getsize(file_path)
print("web_static packed: " + file_path + " -> " + str(size) + "Bytes")
return file_path
except:
return None
def do_deploy(archive_path):
if path.isfile(archive_path) is False:
return False
try:
name = archive_path[9:-4]
print(name)
put(archive_path, "/tmp/" + name + ".tgz")
host_path = "/data/web_static/releases/" + name + "/"
run("mkdir -p " + host_path)
run("tar -xzf /tmp/" + name + ".tgz -C " + host_path)
run("rm /tmp/" + name + ".tgz")
run("mv /data/web_static/releases/" + name +
"/web_static/* " + host_path)
run("rm -rf " + host_path + "web_static")
run("rm -rf /data/web_static/current")
run("ln -s " + host_path + " /data/web_static/current")
print("New version deployed!")
return True
except:
return False
def deploy():
"""deployment"""
global file_path
if file_path is None:
file_path = do_pack()
if file_path is None:
return False
return do_deploy(file_path)
|
"""
vietoris_rips.py
"""
import numpy as np
def _lower_neighbours(G, u):
""" Given a graph `G` and a vertex `u` in `G`, we return a list with the
vertices in `G` that are lower than `u` and are connected to `u` by an
edge in `G`.
Parameters
----------
G : :obj:`Numpy Array(no. of edges, 2)`
Matrix storing the edges of the graph.
u : int
Vertex of the graph.
Returns
-------
lower_neighbours : :obj:`list`
List of lower neighbours of `u` in `G`.
"""
lower_neighbours = [] # list of lowest neighbours to be computed.
for e in G:
if e[1] == u:
lower_neighbours.append(e[0])
lower_neighbours.sort()
return lower_neighbours
def vietoris_rips(Dist, max_r, max_dim):
"""This computes the Vietoris-Rips complex with simplexes of dimension less
or equal to max_dim, and with maximum radius specified by max_r
Parameters
----------
Dist : :obj:`Numpy Array(no. of points, no. of points)`
Distance matrix of points.
max_r : float
Maximum radius of filtration.
max_dim : int
Maximum dimension of computed Rips complex.
Returns
-------
C : :obj:`list(Numpy Array)`
Vietoris Rips complex generated for the given parameters. List where
the first entry stores the number of vertices, and all other entries
contain a :obj:`Numpy Array` with the list of simplices in `C`.
R : :obj:`list(Numpy Array)`
List with radius of birth for the simplices in `C`. The `i` entry
contains a 1D :obj:`Numpy Array` containing each of the birth radii
for each `i` simplex in `C`.
"""
if max_dim < 1: # at least returns always the neighbourhood graph
max_dim = 1
C = []
R = []
for i in range(max_dim + 1):
C.append([])
R.append([])
# The zero component contains the number of vertices of C
C[0] = len(Dist)
R[0] = np.zeros(len(Dist))
# Start with C[1]
for i in range(C[0]):
for j in range(i):
if Dist[i][j] < max_r:
C[1].append([j, i])
R[1].append(Dist[i][j])
# Sort edges according to their birth radius
sort_R = np.argsort(R[1])
R[1], C[1] = np.array(R[1]), np.array(C[1])
R[1] = R[1][sort_R]
C[1] = C[1][sort_R]
# Build VR-complex inductively
for d in range(1, max_dim):
# Assume that C[d] is already defined, then we compute C[d+1]
for k, s in enumerate(C[d]):
"""Find neighbouring vertices of s in C, such that they are smaller than
all the vertices from s. """
low_n = _lower_neighbours(C[1], s[0])
for v in s[1:]:
low_n = [n for n in low_n if
n in _lower_neighbours(C[1], v)]
for n in low_n:
simplex_rad = R[d][k]
for v in s:
if Dist[v][n] > simplex_rad:
simplex_rad = Dist[v][n]
C[d + 1].append(np.insert(s, 0, n))
R[d + 1].append(simplex_rad)
# Sort simplices according to their birth radius
sort_R = np.argsort(R[d + 1])
R[d + 1], C[d + 1] = np.array(R[d + 1]), np.array(C[d + 1])
R[d + 1] = R[d + 1][sort_R]
C[d + 1] = C[d + 1][sort_R]
# store complexes as integers
for c in C[1:]:
c = c.astype(int)
return C, R
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
update_rent_element_query = """
UPDATE public.user_hw_command AS rnt SET
user_id = $2::BIGINT,
hw_action_id = $3::BIGINT,
proto_field = $4::VARCHAR,
field_type = $5::VARCHAR,
"value" = $6::VARCHAR,
state = $7::VARCHAR,
traceable_object_id = $8::BIGINT,
hw_module_id = $9::BIGINT,
ack_message = $10::BOOLEAN,
date_from = $11::TIMESTAMPTZ,
date_to = $12::TIMESTAMPTZ,
alaram_time = $13::VARCHAR,
active = $14::BOOLEAN,
updated_on = now()
WHERE rnt.id = $1::BIGINT RETURNING *;
"""
update_rent_element_alarm_query = """
UPDATE public.user_hw_command AS rnt SET
alarm_status = $2::BOOLEAN,
updated_on = now()
WHERE rnt.id = $1::BIGINT RETURNING *;
""" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.