text stringlengths 8 6.05M |
|---|
import json
import numpy as np
from constants import *
weights = [0]*6
weights[RETWEET] = 0.4
weights[RETWEETED] = 0.0
weights[FOLLOW] = 0.4
weights[FOLLOWED] = 0.0
weights[POST] = 0.6
weights[POSTED] = 0.6
def newTweet():
return {
"pos": 0,
RETWEETED: 0,
}
def newUser(pos):
return {
"pos": pos,
FOLLOW: 0,
FOLLOWED: 0,
POST: 0,
}
def addRandomWeight(A, users, tweets):
user_scores = [0]*len(users)
tweet_scores = [0]*len(tweets)
with open(FOLLOW_FILE) as f:
followData = json.load(f)
with open(RETWEET_FILE) as f:
retweetData = json.load(f)
for u in users:
user_scores[users[int(u)].get("pos")] = float(followData[str(u)]["followers"]**2)/ (followData[str(u)]["following"]+1)
normaliseScores(user_scores)
for t in tweets:
tweet_scores[tweets[t].get("pos") -len(users)] = float(retweetData[str(t)])
normaliseScores(tweet_scores)
all_scores = user_scores + tweet_scores
# print all_scores
total = sum(all_scores)
if total!=0:
for score in all_scores:
score/=total
for i in range(len(A)):
A[:,i] *= d
A[:,i] +=(1-d)*all_scores[i]
return A
def normaliseScores(a):
nmax = max(a)
nmin = min(a)
for i in range(len(a)):
a[i] -= nmin
if nmax==nmin:
return
for i in range(len(a)):
a[i] /= (nmax-nmin)
def normaliseWeights(graph, users, tweets):
n = len(users)
m = len(tweets)
hsplit = np.split(graph, [n])
(user_user, user_tweet, tweet_user, tweet_tweet) = (np.split(hsplit[0],[n], axis=1)[0], np.split(hsplit[0],[n], axis=1)[1], np.split(hsplit[1],[n], axis=1)[0], np.split(hsplit[1],[n], axis=1)[1])
for u in users:
i = users[u].get("pos")
follow = users[u].get(FOLLOW)
followed = users[u].get(FOLLOWED)
post = users[u].get(POST)
user_user[i] /= max(follow, 1)
# user_user[:,i] /= max(followed, 1)
user_tweet[i] /= max(post, 1)
# for t in tweets:
# i = tweets[t].get("pos") - n
# retweeted = tweets[t].get(RETWEETED)
# tweet_tweet[:,i] /= max(retweeted, 1)
def buildGraph(tweetData, userData, users, tweets):
n = len(users)
m = len(tweets)
A = np.zeros((n+m, n+m))
for tw in tweetData:
tweet = tw.get("id")
user = tw.get("user_id")
retweeted = tw.get("retweeted_status_id")
u = users[user].get("pos")
t = tweets[tweet].get("pos")
A[u][t] = weights[POST]
users[user][POST] += 1
A[t][u] = weights[POSTED]
if retweeted and tweets.get(retweeted):
r = tweets[retweeted].get("pos")
A[t][r] = weights[RETWEET]
A[r][t] = weights[RETWEETED]
tweets[retweeted][RETWEETED] += 1
for user in userData:
u = users[int(user)].get("pos")
for following in userData[user]:
f = users[following].get("pos")
A[u.get("pos")][f.get("pos")] = weights[FOLLOW]
users[user][FOLLOW] += 1
A[f.get("pos")][u.get("pos")] = weights[FOLLOWED]
users[following][FOLLOWED] += 1
normaliseWeights(A, users, tweets)
return addRandomWeight(A, users, tweets)
if __name__ == '__main__':
with open(TWEET_DATAFILE) as f:
tweetData = json.load(f)
with open(USER_DATAFILE) as f:
userData = json.load(f)
users = {}
tweets = {}
i = 0
j = 0
for tweet in tweetData:
tweets[tweet["id"]] = newTweet()
if not users.get(tweet["user_id"]):
users[tweet["user_id"]] = newUser(i)
i += 1
j += 1
k = 0
for tweet in tweets:
tweets[tweet]["pos"] = i+k
k += 1
print("No. of tweets:", j)
print("No. of users:", i)
graph = buildGraph(tweetData, userData, users, tweets)
np.save(MODEL_GRAPH_FILE, graph)
with open(USER_DATA, "w") as f:
json.dump(users, f)
|
from django.http import HttpResponse
from django.shortcuts import render
def hello(request):
# return render(request, 'index.html')
return render(request, 'index.html')
def runoob(request):
context = {}
context['hello'] = 'Hello MB!'
return render(request, 'runoob.html', context)
def runooblist(request):
context = ['one','cao','bi']
return render(request, 'runoob.html', {'fuck':context,'name':'picture'}) |
gu2=int(input())
s,t=0,1
while gu2>0:
print(t,end=' ')
s,t=t,s+t
gu2=gu2-1
|
import numpy as np
#store all data in memory:
#use 3 numpy arr: q, ans, topic
#retrieved these data from .txt --- written in preprocess.py
class UbuntuDataset(data.Dataset):
def __init__(self, filepath, length_path, dataset_path):
#length_path = "data/data_length.txt"
#dataset_path = "data/train_dataset.txt" #or valid, test
length_file = open(length_path,'r')
dataset_file = open(dataset_path, 'r')
#read length
_ = length_file.readline()
self.max_q_length = int(length_file.readline())
self.max_ans_length = int(length_file.readline())
length_file.close()
#read dataset size
self.qa_size = int(dataset_file.readline())
print("loading data from ", dataset_path)
print("qa_size = ", self.qa_size)
#init 3 lists
q = np.zeros((qa_size, max_q_length), dtype='int32')
ans = np.zeros((qa_size, max_ans_length), dtype='int32')
label = np.zeros((qa_size,), dtype='int16')
count = 0
#read data
while count < qa_size:
#implicit str->int
q[count] = dataset_file.readline().split()
ans[count] = dataset_file.readline().split()
_ = dataset_file.readline()
_ = dataset_file.readline()
label[count] = dataset_file.readline()
count += 1
print("{} entries".format(self.qa_size))
#TODO !!!
#(may need changes in train_once so that np -> torch happen there)
def __getitem__(self, offset):
# 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
# 2. Preprocess the data (e.g. torchvision.Transform).
# 3. Return a data pair (e.g. image and label).
#print ('\tcalling Dataset:__getitem__ @ idx=%d'%index)
pos, q_len, a_len = self.index[offset]['pos'], self.index[offset]['q_len'], self.index[offset]['a_len']
question=self.data[pos:pos + q_len].astype('int64')
answer=self.data[pos+q_len:pos+q_len+a_len].astype('int64')
## Padding ##
if len(question)<self.max_seq_len:
question=np.append(question, [0]*self.max_seq_len)
question=question[:self.max_seq_len]
question[-1]=0
if len(answer)<self.max_seq_len:
answer=np.append(answer,[0]*self.max_seq_len)
answer=answer[:self.max_seq_len]
answer[-1]=0
## get real seq len
q_len=min(int(q_len),self.max_seq_len) # real length of question for training
a_len=min(int(a_len),self.max_seq_len)
return question, answer, q_len, a_len
def __len__(self):
return self.data_len |
#!/usr/bin/env
# encoding: utf-8
"""
Created by John DiBaggio on 2018-07-28
Find All Occurrences of a Pattern in a String
In this problem, we ask a simple question: how many times can one string occur as a substring of another? Recall from “Find the Most Frequent Words in a String” that different occurrences of a substring can overlap with each other. For example, ATA occurs three times in CGATATATCCATAG.
Pattern Matching Problem
Find all occurrences of a pattern in a string.
Given: Strings Pattern and Genome.
Return: All starting positions in Genome where Pattern appears as a substring. Use 0-based indexing.
Sample Dataset
ATAT
GATATATGCATATACTT
Sample Output
1 3 9
Execute like:
python src/ba1d.py data/ba1d.txt output/ba1d.txt
"""
__author__ = 'johndibaggio'
import sys
import fileinput
argv = list(sys.argv)
input_pattern = ""
input_genome = ""
for line in fileinput.input(argv[1]):
if len(line) > 0:
if len(input_pattern) == 0:
input_pattern += line.replace('\n', '')
else:
input_genome += line.replace('\n', '')
def find_occurrences(genome, pattern):
"""
Find the indices of all occurrences of pattern in genome
:param genome: DNA string
:type genome: str
:param pattern: DNA substring
:type pattern: str
:return: list of indices of occurrences of pattern in genome
:rtype: list[int]
"""
k = len(pattern)
buffer = genome[0:k]
genome = genome[k:len(genome)]
i = 0
indices = []
if buffer == pattern:
indices.append(i)
for c in genome:
i += 1
buffer = buffer[1:k] + c
if buffer == pattern:
indices.append(i)
return indices
occurrences = find_occurrences(input_genome, input_pattern)
output_string = str.join(" ", [str(i) for i in occurrences])
print("The following are the occurrences of pattern \"{}\" in genome \"{}\":\n{}".format(input_pattern, input_genome,
output_string))
output_file = open(argv[2], "w+")
output_file.write(output_string)
output_file.close()
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.hp, name="hp"),
path('cpage',views.cpage, name="cpage")
]
|
# tuple 使用
# 元组tuple一旦定义,就不能改变,这里的不能改变指的是指向不变改变
classmates = ("zhangsan", "lisi", "wangwu", "xiaoming")
print(len(classmates))
print(classmates)
for i in range(len(classmates)):
print(classmates[i])
# 在定义tuple时,元素就必须确定下来
aa = (1, "ok")
# aa[0] = 2 tuple元素不能修改
print(aa)
# 在定义只有一个元素的tuple时,注意要在第一个元素后加上",",aa=(2,),否则会被认为是赋值aa=(2)
aa = (2, ) # tuple
print(type(aa))
aa = (2) # int,被认为是一个数字
print(type(aa))
# tuple的元素指向不变,但指定的内容可变
classes = [1, 2]
tuple1 = ("a", "b", classes) # tuple1指向classes没有变,但classes本身的内容是可以变的
print(tuple1)
classes[0] = 11
classes[1] = 22
print(tuple1)
'''
总结
1.tuple与list类似,但是是不可变的,一旦定义,就不能修改
2.获取元素的方式和元素个数与list相同
3.当定义只有一个元素的tuple时,注意要在元素后加上","
4.tuple一旦初始化后就不能改变指的是指向不能变,但被指的元素内容可变
'''
# 测试使用
print("===============================")
tuple1 = (1, 2, 3, 2, 4, 5)
# 获取元组元素个数,即元素长度
print(len(tuple1))
print(tuple1.__len__())
# for i in tuple1:
# print(i)
# print(tuple1[::]) # 获取所有元素
# print(tuple1[::-1]) # 倒序打印元组所有元素,tuple1元素本身不变
# print(tuple1[1:3]) # 获取下标为1, 2的元素(2, 3)
# print(tuple1[:4]) # 不写起始位置,默认从头开始,0,1,2,3,(1,2,3,4)
# print(tuple1[3:]) # 不写结束位置,默认一直截到最后,3,4,(4,5)
print(tuple1.count(2)) # .count(e),获取元素e在元组中出现的次数
print(tuple1.index(2)) # .index(e),获取元素e在元组中第一次出现时的下标
|
#Process data from https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-1-states-provinces/
#convert to WKT using ogr2ogr -f CSV ne_10m_admin_1_states_provinces.wkt ne_10m_admin_1_states_provinces.shp -lco GEOMETRY=AS_WKT
import os
import csv
import sys
from shapely.wkt import dumps, loads
if __name__=="__main__":
csv.field_size_limit(sys.maxsize)
csv = csv.DictReader(open("/home/tim/Downloads/ne_10m_admin_1_states_provinces/ne_10m_admin_1_states_provinces.wkt/ne_10m_admin_1_states_provinces.csv", "rt"))
geounits, byGn = {}, {}
if not os.path.exists("regions"):
os.mkdir("regions")
for li in csv:
gu = li['geonunit']
#if gu == "Australia":
# print (li['gn_name'], li['region'], li['type'])
region = li['region']
if len(region) > 0:
if gu not in geounits:
geounits[gu] = {}
if region not in geounits[gu]:
geounits[gu][region] = {}
nameId = li['name_id']
wkt = loads(li['WKT'])
geounits[gu][region][nameId] = (li, wkt)
gnName = li['gn_name']
if len(gnName) > 0:
if gu not in byGn:
byGn[gu] = {}
if gnName not in byGn[gu]:
byGn[gu][gnName] = {}
nameId = li['name_id']
wkt = loads(li['WKT'])
byGn[gu][gnName][nameId] = (li, wkt)
#for gu in geounits:
# print (gu)
for geounit in ['England', 'Scotland', 'Wales', 'Northern Ireland', 'Ireland']:
for region in geounits[geounit]:
regionList = geounits[geounit][region]
combined = None
for nameId in regionList:
li, wkt = regionList[nameId]
if combined is None:
combined = wkt
else:
combined = combined.union(wkt)
combined = combined.buffer(0.01)
combined = combined.simplify(0.0001)
print (geounit, region, combined.area)
fina = "{}, {}.wkt".format(geounit, region).replace(" ", "_")
fi=open(os.path.join("regions", fina), "wt")
fi.write(dumps(combined))
fi.close()
for geounit in ['Australia']:
for gnName in byGn[geounit]:
gnList = byGn[geounit][gnName]
combined = None
for nameId in gnList:
li, wkt = gnList[nameId]
if combined is None:
combined = wkt
else:
combined = combined.union(wkt)
combined = combined.buffer(0.01)
combined = combined.simplify(0.0001)
print (geounit, gnName, combined.area)
fina = "{}, {}.wkt".format(geounit, gnName).replace(" ", "_")
fi=open(os.path.join("regions", fina), "wt")
fi.write(dumps(combined))
fi.close()
|
with open("data.txt", mode="w", encoding="utf-8") as myFile: #a for append
myFile.write("some random text\nMore random filestext\n")
with open("data.txt", encoding="utf-8") as myFile:
lineNum = 1
while True:
line = myFile.readline()
if not line:
break
print("line", lineNum)
worldList = line.split()
print("no. of words:", len(worldList))
charCount = 0
for word in worldList:
for char in word:
charCount += 1
avgNumCharS = charCount/len(worldList)
print("avg word length : {:.2f}".format(avgNumCharS))
lineNum += 1
|
for i in range(1,6):
print("Hello World")
|
#!/usr/bin/env python
"""
@author: Jean-Lou Dupont
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id: layout.py 16 2009-04-01 00:09:14Z jeanlou.dupont $"
import os
import sys
from string import Template
from pyjld.os import safe_mkdir
_base = "pyjld.builder.layout: %s"
dir_list = [ 'tags',
'trunk',
'trunk/tests',
'trunk/docs',
'trunk/docs/html',
'trunk/docs/source',
'trunk/docs/source/_static',
'trunk/docs/source/_templates',
'trunk/docs/source/modules',
'trunk/src',
'trunk/src/$ns',
'trunk/src/$ns/$pkg',
]
def build_layout(top, ns, pkg):
"""
Builds the directory layout
"""
for path in dir_list:
ppath = subvars(path, ns=ns, pkg=pkg)
existed, path=safe_mkdir([top, ppath])
if not existed:
msg("Created path: [%s]" % ppath)
def subvars(path, **params):
tpl = Template(path)
return tpl.safe_substitute( params )
def emsg(msg, code=0):
"""
Exit with message
"""
print _base % msg
sys.exit(code)
def msg(msg, **params):
"""
Normal message
"""
message = _base % msg
rendered = Template(message).safe_substitute(params)
print rendered
def main(*argv):
current_dir = os.getcwd()
current_dir_name = os.path.basename( current_dir )
try:
ns, package = current_dir_name.split('.')
except:
emsg('top directory must be named following pattern: ns.package',1)
msg("Building layout for: ns[$ns] package[$pkg]", ns=ns, pkg=package)
msg("Top directory [$dir]", dir=current_dir)
build_layout(current_dir, ns, package)
return 0
# ==============================================
# ==============================================
if __name__ == "__main__":
"""
"""
sys.exit(main())
|
import unittest
from katas.kyu_7.make_a_function_that_does_arithmetic import arithmetic
class ArithmeticTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(arithmetic(1, 2, 'add'), 3)
def test_equal_2(self):
self.assertEqual(arithmetic(8, 2, 'subtract'), 6)
def test_equal_3(self):
self.assertEqual(arithmetic(5, 2, 'multiply'), 10)
def test_equal_4(self):
self.assertEqual(arithmetic(8, 2, 'divide'), 4)
|
#!/usr/bin/python2
import cgitb,cgi,commands,random
print "Contant-type:text/html"
print ""
cgitb.enable()
x=cgi.FieldStorage()
n=x.getvalue("num")
u=x.getvalue('uname')
p=x.getvalue('pas')
port=random.randint(6000,7000)
commands.getoutput("sudo systemctl restart docker")
print "<html>"
print "access your container using links:"
print "<br>"
print "</html>"
for i in range(int(n)) :
commands.getoutput("sudo docker run -it -p "+ str(port) +":4200 -d --name "+u+" rahul1")
print "<html>"
print (" <a href='http://172.17.0.3:4200' target='_blank'> Container " + str(i) +" </a>")
print "access containers using <login - ritesh ; password - redhat >"
print "</html>"
|
import math
delta = 0.000001
R_a = 2.46267
R_b = 2.29567
z_minus1 = 2*(math.log(2)/math.log(math.e))/(math.pi*(R_a+R_b))
z_i = 0
while True:
y_i = 1/math.exp(math.pi*z_minus1*R_a)+1/math.exp(math.pi*z_minus1*R_b)
z_i = z_minus1-((1-y_i)/math.pi)/(R_a/math.exp(math.pi*z_minus1*R_a)+R_b/math.exp(math.pi*z_minus1*R_b))
if (abs(z_i-z_minus1)/z_i < delta):
break
z_minus1 = z_i
print(z_i)
print("resistance: " + str(1/z_i)) |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import format
from resource_management.libraries.functions import Direction
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
def run_migration(env, upgrade_type):
"""
If the acl migration script is present, then run it for either upgrade or downgrade.
That script was introduced in JDP 2.3.4.0 and requires stopping all clickhouse server first.
Requires configs to be present.
:param env: Environment.
:param upgrade_type: "rolling" or "nonrolling
"""
pass |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
from BeautifulSoup import BeautifulStoneSoup # Only required for the test that doesn't use Selenium
import pytest
import requests
from unittestzero import Assert
from pages.home import HomePage
from base_test import BaseTest
class TestHomePage(BaseTest):
@pytest.mark.nondestructive
def test_that_page_has_correct_tagline(self, mozwebqa):
home_page = HomePage(mozwebqa)
home_page.go_to_page()
Assert.equal('The home of Mozilla QA', home_page.tagline)
@pytest.mark.nondestructive
def test_that_page_has_news_items(self, mozwebqa):
home_page = HomePage(mozwebqa)
home_page.go_to_page()
Assert.greater(home_page.news_items_count, 0)
@pytest.mark.nondestructive
def test_that_news_items_are_sorted_in_reverse_chronological_order(self, mozwebqa):
home_page = HomePage(mozwebqa)
home_page.go_to_page()
news_items = home_page.news_items
most_recent_date = datetime.date.today()
for news_item in news_items:
if news_item.is_post:
news_item_date = news_item.date_posted
Assert.greater_equal(most_recent_date, news_item_date, 'News items are out of sequence. %s is not after %s.' % (most_recent_date, news_item_date))
most_recent_date = news_item_date
# The following 3 tests check for visibilty, accuracy and validity of the team links on the home page
@pytest.mark.nondestructive
def test_that_getting_started_links_are_visible(self, mozwebqa):
home_page = HomePage(mozwebqa)
home_page.go_to_page()
bad_links = []
for link in home_page.getting_started_links_list:
if not home_page.is_element_visible(*link.get('locator')):
bad_links.append('The link at %s is not visible' % link.get('locator')[1:])
Assert.equal(0, len(bad_links), '%s bad links found: ' % len(bad_links) + ', '.join(bad_links))
@pytest.mark.nondestructive
def test_that_getting_started_link_destinations_are_correct(self, mozwebqa):
home_page = HomePage(mozwebqa)
home_page.go_to_page()
bad_links = []
for link in home_page.getting_started_links_list:
url = home_page.link_destination(link.get('locator'))
if not url.endswith(link.get('url_suffix')):
bad_links.append('%s does not end with %s' % (url, link.get('url_suffix')))
Assert.equal(0, len(bad_links), '%s bad links found: ' % len(bad_links) + ', '.join(bad_links))
@pytest.mark.nondestructive
def test_that_getting_started_link_urls_are_valid(self, mozwebqa):
home_page = HomePage(mozwebqa)
home_page.go_to_page()
bad_links = []
for link in home_page.getting_started_links_list:
url = home_page.link_destination(link.get('locator'))
response_code = self.get_response_code(url, mozwebqa.timeout)
if response_code != requests.codes.ok:
bad_links.append('%s is not a valid url - status code: %s.' % (url, response_code))
Assert.equal(0, len(bad_links), '%s bad urls found: ' % len(bad_links) + ', '.join(bad_links))
# This test checks the validity of all links on the page, and doesn't use Selenium
# Note: for the purposes of example, this only checks the first 10 links
@pytest.mark.skip_selenium
@pytest.mark.nondestructive
def test_that_checks_the_validity_of_all_links_on_the_page(self, mozwebqa):
url = mozwebqa.base_url
page_response = requests.get(url, verify=False)
html = BeautifulStoneSoup(page_response.content)
bad_links = []
links = html.findAll('a')
for index, link in enumerate(links[:10]):
url = self.make_absolute(link['href'], mozwebqa.base_url)
response_code = self.get_response_code(url, mozwebqa.timeout)
if response_code != requests.codes.ok:
bad_links.append('%s is not a valid url - status code: %s.' % (url, response_code))
Assert.equal(0, len(bad_links), '%s bad urls found: ' % len(bad_links) + ', '.join(bad_links))
|
### config.py ###
# Scheme: "postgres+psycopg2://<USERNAME>:<PASSWORD>@<IP_ADDRESS>:<PORT>/<DATABASE_NAME>"
#DATABASE_URI = 'sqlite:///books.sqlite3'
DATABASE_URI = 'postgres+psycopg2://postgres:ome0895114530@127.0.0.1:5432/books'
#DATABASE_URI = 'postgresql://webadmin:SPYatb32373@node1234-rachpython.th.app.ruk-com.cloud:11001/books'
f_yam = '/Users/INE-01/Desktop/advanceINE-8/aftermidterm/ORM-Database/Ex-03/books.yaml'
|
from flask import g, render_template, request, redirect, url_for, flash
import requests
from app import app
from .forms import SearchForm, LoginForm, RegisterForm
from .models import User, Pokemon, db
from flask_login import login_user, logout_user, current_user, login_required
#Routes
@app.route('/', methods=['GET'])
def index():
form = SearchForm()
g.form=form
return render_template('index.html.j2')
@app.route('/register', methods=['GET','POST'])
def register():
form = SearchForm()
g.form=form
form = RegisterForm()
if request.method == 'POST' and form.validate_on_submit():
try:
new_user_data={
"first_name": form.first_name.data.title(),
"last_name": form.last_name.data.title(),
"email": form.email.data.lower(),
"password": form.password.data
}
new_user_object = User()
new_user_object.from_dict(new_user_data)
except:
error_string="There was a problem creating your account. Please try again"
return render_template('register.html.j2',form=form, error=error_string)
# Give the user some feedback that says registered successfully
return redirect(url_for('login'))
return render_template('register.html.j2',form=form)
@app.route('/login', methods=['GET','POST'])
def login():
form = SearchForm()
g.form=form
form = LoginForm()
if request.method == 'POST' and form.validate_on_submit():
# Do login Stuff
email = form.email.data.lower()
password = form.password.data
u = User.query.filter_by(email=email).first()
print(u)
if u is not None and u.check_hashed_password(password):
login_user(u)
# Give User feeedback of success
return redirect(url_for('index'))
else:
# Give user Invalid Password Combo error
return redirect(url_for('login'))
return render_template("login.html.j2", form=form)
@app.route('/logout', methods=['GET'])
@login_required
def logout():
if current_user is not None:
logout_user()
return redirect(url_for('index'))
@app.route('/pokemon', methods=['GET', 'POST'])
@login_required
def pokemon():
form = SearchForm()
g.form=form
if request.method == 'POST' and g.form.validate_on_submit():
pokemon = form.search.data.lower()
print("Pokemon is", pokemon)
url = f'https://pokeapi.co/api/v2/pokemon/{pokemon}'
response = requests.get(url)
if response.ok:
try:
data = response.json().get("stats")
spritedata = response.json()["sprites"]['other']['dream_world'].get("front_default")
print("Sprit data is", spritedata)
except:
error_string=f'There is no info for {pokemon}'
return render_template("pokemon.html.j2", form=form, error=error_string)
# Check if pokemon exists
check_pokemon = bool(Pokemon.query.filter_by(name = pokemon).first())
if check_pokemon == True:
user = current_user
# Check is user has the pokemon
user_pokemon_object = db.session.query(User).join(User.pokemons).filter(Pokemon.name==pokemon).all()
# Send message to frontend if user has pokemon
if user in user_pokemon_object:
flash(f'{pokemon.capitalize()} already exists in your database', 'success')
else:
# Add pokemon to user's pokemon list
p = Pokemon.query.filter_by(name = pokemon).first()
user.pokemons.append(p)
db.session.add(user)
db.session.commit()
flash(f'{pokemon.capitalize()} has been added to your pokemon collections', 'success')
# If pokemon has never been searched before then add to database
else:
new_pokemon_data={
"name": pokemon,
"spritedata": spritedata
}
# Create a dictionary with each attribute as a key
for attribute in data:
attribute_name = attribute['stat']['name']
if attribute_name == 'special-attack':
new_pokemon_data['special_attack'] = {'base_stat': attribute['base_stat'], 'effort' : attribute['effort']}
elif attribute_name == 'special-defense':
new_pokemon_data['special_defense'] = {'base_stat': attribute['base_stat'], 'effort' : attribute['effort']}
else:
new_pokemon_data[attribute_name] = {'base_stat': attribute['base_stat'], 'effort' : attribute['effort']}
new_pokemon_object = Pokemon()
new_pokemon_object.from_dict(new_pokemon_data)
u = current_user
p = Pokemon.query.filter_by(name = pokemon).first()
u.pokemons.append(p)
db.session.add(u)
db.session.commit()
flash(f'{pokemon.capitalize()} has been added to your pokemon collections', 'success')
all_stats = []
for stat in data:
stat_dict={
'poke_statbase':stat['base_stat'],
'poke_stateffort':stat['effort'],
'poke_statname':stat['stat']['name'],
}
all_stats.append(stat_dict)
return render_template("pokemon.html.j2", form=form, stats=all_stats, sprite=spritedata, pokemon=pokemon.title())
else:
error_string="Invalid Pokemon name!"
return render_template("pokemon.html.j2", form=form, error=error_string)
return render_template("pokemon.html.j2", form=form)
#export/set FLASK_APP=app.py
#export/set FLASK_ENV=development
|
# -*- coding: utf-8 -*-
from django.contrib.auth import login
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from annoying.decorators import render_to
from django.contrib.auth.models import User
from Professor.models import Professor, Monitor
from Professor.views.utils import prof_monit_exist
from Materia.Turma.models import Turma
from Avaliacao.models import TemplateAvaliacao, Avaliacao
from Avaliacao.Questao.models import QuestaoDeAvaliacao, Questao ,FiltroQuestao
@prof_monit_exist
@login_required
@render_to('professor/criar/criar_conteudos.html')
def criar_conteudos(request):
usu = None
try:
professor = request.user.professor_set.get()
usu = professor
except Professor.DoesNotExist:
pass
try:
monitor = request.user.monitor_set.get()
usu = monitor
except Monitor.DoesNotExist:
pass
return locals() |
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from pyscrap3 import Item
from pyscrap3 import ItemList
class DemoItem(Item):
"""Los Item son ideales para guardar contenido único como el
título de una página o el cuerpo de una noticia."""
def __init__(self):
super().__init__()
self.newfield("title")
self.newfield("body")
class DemoListItems(ItemList):
"""Las ItemList son ideales para guardar multiples contenidos
agrupados, como todos los comentarios de un solo autor."""
def __init__(self):
super().__init__()
self.newfield("author")
|
import logging
from .hit_counts import test__hitcounts
from .url_category import test__url_category
from .ports import test__ports
from .upgrades import test__upgrades
from .manualnat import test__manualnat
from .autonat import test__autonat
from .port_object_group import test__port_object_group
from .acprule import test__acp_rule
from .acp import test__access_control_policy
from .intrusion_policy import test__intrusion_policy
from .interfaces_subinterfaces import test__subinterfaces
from .interfaces_etherchannel import test__etherchannel_interfaces
from .interfaces_redundant import test__redundant_interfaces
from .interfaces_bridge_group import test__bridge_group_interfaces
from .interfaces_physical import test__phys_interfaces
from .wait_for_task import wait_for_task
from .device_with_task import test__device_with_task
from .sla_monitor import test__slamonitor
from .interface_group import test__interface_group
from .security_zone import test__security_zone
from .protocol_port import test__protocol_port
from .vlan_tag import test__vlan_tag
from .url import test__url
from .ikev2 import test__ikev2
from .ikev1 import test__ikev1
from .icmpv6 import test__icmpv6
from .icmpv4 import test__icmpv4
from .geolocations import test__geolocations
from .acls_extended import test__extended_acls
from .ip_range import test__ip_range
from .ip_network import test__ip_network
from .ip_host import test__ip_host
from .variable_set import test__variable_set
from .server_version import test__fmc_version
from .ip_addresses import test__ip_addresses
from .network_group import test__network_group
from .url_group import test__url_group
from .vlan_group_tag import test__vlan_group_tag
from .dns_servers_group import test__dns_servers_group
from .continent import test__continent
from .file_policies import test__filepolicies
from .country import test__country
from .certificate_enrollment import test__cert_enrollment
from .application_category import test__application_category
from .application_productivity import test__application_productivity
from .application_filter import test__application_filter
from .application_risk import test__application_risk
from .application import test__application
from .application_tag import test__application_tag
from .application_type import test__application_type
from .audit_records import test__audit_records
from .deployable_devices import test__deployable_devices
from .deployment_requests import test__deployment_requests
from .devicegrouprecords import test__devicegrouprecords
from .ftddevicehapairs import test__ftddevicehapairs
from .failoverinterfacemacaddressconfigs import test__failoverinterfacemacaddressconfigs
from .monitored_interface import test__monitoredinterfaces
from .devicerecords import test__devicerecords
from .staticroutes import test__staticroutes
from .ipv4staticroutes import test__ipv4staticroutes
from .ipv6staticroutes import test__ipv6staticroutes
from .prefilter import test__prefilter_policy
from .prefilter_rule import test__prefiler_rule
from .s2s_vpn import test__ftds2svpns
logging.debug("In the unit-tests __init__.py file.")
__all__ = [
"test__audit_records",
"test__deployment_requests",
"test__deployable_devices",
"test__devicegrouprecords",
"test__ftddevicehapairs",
"test__failoverinterfacemacaddressconfigs",
"test__monitoredinterfaces",
"test__devicerecords",
"test__staticroutes",
"test__ipv4staticroutes",
"test__ipv6staticroutes",
"test__bridge_group_interfaces",
"test__url_category",
"test__application_type",
"test__application_tag",
"test__application",
"test__application_risk",
"test__application_filter",
"test__application_productivity",
"test__application_category",
"test__cert_enrollment",
"test__country",
"test__filepolicies",
"test__continent",
"test__dns_servers_group",
"test__vlan_group_tag",
"test__url_group",
"test__network_group",
"test__ip_addresses",
"test__fmc_version",
"test__variable_set",
"test__ip_host",
"test__ip_network",
"test__ip_range",
"test__extended_acls",
"test__geolocations",
"test__icmpv6",
"test__icmpv4",
"test__ikev2",
"test__ikev1",
"test__vlan_tag",
"test__url",
"test__protocol_port",
"test__security_zone",
"test__interface_group",
"test__slamonitor",
"test__device_with_task",
"wait_for_task",
"test__phys_interfaces",
"test__redundant_interfaces",
"test__etherchannel_interfaces",
"test__subinterfaces",
"test__ports",
"test__upgrades",
"test__manualnat",
"test__autonat",
"test__port_object_group",
"test__acp_rule",
"test__access_control_policy",
"test__intrusion_policy",
"test__prefilter_policy",
]
|
#!/usr/bin/env python
# -*- coding: cp1252 -*-
# <PythonProxy.py>
#
#Copyright (c) <2009> <Fábio Domingues - fnds3000 in gmail.com>
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
"""\
Copyright (c) <2009> <Fábio Domingues - fnds3000 in gmail.com> <MIT Licence>
"""
import sys
sys.path.append('../dns')
import socket, thread, select, time, random, re
from dns_common import sendDNSQuery
__version__ = '0.1.0 Draft 1'
BUFLEN = 8192
VERSION = 'Python Proxy/'+__version__
HTTPVER = 'HTTP/1.1'
BR = []
AVG = 0
ALPHA = .5
USAGE = '%s <log> <alpha> <listen-port> <fake-ip> <dns-ip> <dns-port> [<www-ip>]' % (sys.argv[0])
NAME = 'video.cs.cmu.edu'
PORT = 8080
INNER_IP = '0.0.0.0'
DNS_IP = '0.0.0.0'
DNS_PORT = -1
RR_ADDR = ''
LOG_FILE = None
def getBR():
b = BR[0]
for i in BR:
if i*1.5 < AVG:
b = i
return b
class ConnectionHandler:
def __init__(self, connection, address, timeout):
global AVG
self.client = connection
self.client_buffer = ''
self.timeout = timeout
self.method, self.path, self.protocol = self.get_base_header()
self.method_others()
if 'Seg' in self.path:
t_new = int(8*float(self.cl)/float(self.req_time)/1000)
b = getBR()
AVG = (1-ALPHA)*AVG + ALPHA*t_new
t = int(time.time())
s = ' '.join([str(t),str(self.req_time),str(t_new),str(round(AVG)),str(b),RR_ADDR,self.path])
print s
LOG_FILE.write(s+'\n')
#print self.path + ' --> ' + str(t_new)
#print AVG
self.client.close()
self.target.close()
def get_base_header(self):
while 1:
self.client_buffer += self.client.recv(BUFLEN)
end = self.client_buffer.find('\n')
if end!=-1:
break
sys.stdout.flush()
data = (self.client_buffer[:end+1]).split()
self.client_buffer = self.client_buffer[end+1:]
return data
def method_others(self):
self._connect_target()
path = self.path
sys.stdout.flush()
path = path.replace('big_buck_bunny.f4m','big_buck_bunny_nolist.f4m')
b = getBR()
path = path.replace('1000',str(b))
self.path = path
self.req_start = time.time()
self.target.send('%s %s %s\n'%(self.method, path, self.protocol)+
self.client_buffer)
self.client_buffer = ''
self._read_write()
self.req_time = time.time() - self.req_start
def _connect_target(self):
global RR_ADDR
# name = self.path.split('http://')[1].split('/')[0].split(':')[0]
# port = 80
# try:
# port = self.path.split('http://')[1].split('/')[0].split(':')[1]
# except:
# pass
if not RR_ADDR:
RR_ADDR = sendDNSQuery(NAME, INNER_IP, DNS_IP, DNS_PORT)[1]
(soc_family, _, _, _, address) = socket.getaddrinfo(RR_ADDR, PORT)[0]
self.target = socket.socket(soc_family)
self.target.bind((INNER_IP,0))#random.randrange(3000,10000)))
self.target.connect(address)
def _read_write(self):
time_out_max = self.timeout/3
count = 0
self.left = 0
while 1:
count += 1
(recv, _, error) = select.select([self.target], [], [self.target], 3)
if error:
break
if recv:
data = self.target.recv(BUFLEN)
if data:
data = data.replace('Connection: Keep-Alive','Connection: Close')
try:
d = data.split('Content-Length: ')[1]
self.cl = d.split('\r\n')[0]
except:
pass
self.client.send(data)
count = 0
if self.left:
self.left = self.left - len(data)
if self.left <= 0:
return
if '\r\n\r\n' in data:
self.left = int(float(self.cl)) - len(data.split('\r\n\r\n')[1])
if count == time_out_max:
break
def start_server(timeout=5, handler=ConnectionHandler):
global BR, AVG, INNER_IP, DNS_IP, DNS_PORT, LOG_FILE, RR_ADDR, ALPHA
if len(sys.argv) < 7:
print USAGE
exit(-1)
LOG_FILE = open(sys.argv[1], 'w', 0)
ALPHA = float(sys.argv[2])
INNER_IP = sys.argv[4]
DNS_IP = sys.argv[5]
DNS_PORT = int(float(sys.argv[6]))
if len(sys.argv) == 8:
RR_ADDR = sys.argv[7]
v = open('/var/www/vod/big_buck_bunny.f4m').read()
vi = [m.start() for m in re.finditer('bitrate=',v)]
for i in vi:
BR.append(int(float(v[i+9:].split('"')[0])))
BR = sorted(BR)
print BR
AVG = BR[0]
port = int(float(sys.argv[3]))
soc = socket.socket(socket.AF_INET)
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soc.bind(('', port))
print "Serving on %d."% port#debug
soc.listen(0)
while 1:
thread.start_new_thread(handler, soc.accept()+(timeout,))
soc.close()
if __name__ == '__main__':
start_server()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
def load(folder):
"""
Method Description: This method will load Accelerometer, EMG, Gyro, Orientation, Orientation Euler
for an input activity folder
Input: Folder name of the folder containing the 5 data files
Output: A list of 5 (Accelerometer, EMG, Gyro, Orientation, Orientation Euler) 2d matrix as dataframe.
"""
datafile = []
#path_to_data is the path to the folder containing the EatFood data
#Change the path
path_to_data = "data"
path = os.path.join(path_to_data, folder)
print(path)
#featch all files in the folder as a dataframe and append it to a list
for files in os.listdir(path):
file = os.path.join(path,files)
datafile.append(pd.read_csv(file))
return datafile
def loadData(folders):
"""
Method description: This method recieves all the eatfood activity folders and returns all the datafiles in a single file
input: folder names of the eatfood activities
output: A 2d list containing 5(Accelerometer, EMG, Gyro, Orientation, Orientation Euler) files for each eating activity
"""
data = []
for folder in folders:
data.append(load(folder))
return data
if __name__ == "__main__":
#
#Folders of eat food activity
folders = ['EatFood1', 'EatFood2', 'EatFood3', 'EatFood4']
data = loadData(folders)
# to get the minimum length of IMU data among all the 4 activities
#initialising a random high value because the objective is to get the minimum length
sample = 1e10
for i in range(4):
#
#Eat Food Activity
activity = data[i]
#
#We just check Accelerometer since all the IMU files have same number if records
accelerometer = activity[0]
sample = min(sample, len(accelerometer))
#
#Create this path manually.
#The loop will go through each columns of all the five files
path = "plot/eating/variables/"
sensorName = ['Accelerometer', 'Emg', 'Gyro', 'Orienation', 'Eulers']
for name in sensorName:
sensorpath = os.path.join(path,name)
if not (os.path.isdir(sensorpath)):
os.mkdir(sensorpath)
#for each activity
for i in range(4):
#A Eatfood Activity
activity = data[i]
for k in range(len(activity)):
sensor = activity[k]
sensor = sensor.iloc[:500]
column = sensor.columns.tolist()
for j in range(1, len(column)):
imagepath = os.path.join(path,sensorName[k],column[j])
if not (os.path.isdir(imagepath)):
os.mkdir(imagepath)
x = np.arange(len(sensor))
value = sensor.iloc[:,j].values
plt.cla()
plt.plot(x,value, color='red')
plt.xlabel('Timestamp')
plt.ylabel(column[j])
plt.title(sensorName[k]+' '+column[j]+' '+'Eatfood'+str(i+1))
plt.savefig(imagepath+'/Eatfood'+str(i+1)+'.jpg')
|
__author__ = 'Justin'
import networkx as nx
import os
from DisplayNetwork import networkdisplay
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from numpy.random import rand
# Plot Simple Network Graph for Example
G1 = nx.Graph()
nodes = ['A','B','C','D','E','F']
for node in nodes:
G1.add_node(node)
keys = ['zenness','time','distance']
G1.add_edge('A','B',weight = 1,zenness= 5, time = 8, distance = 2)
G1.add_edge('B','C',weight = 2,zenness= 2, time = 6, distance = 2)
G1.add_edge('D','C',weight = 3,zenness= 4, time = 6, distance = 6)
G1.add_edge('D','B',weight = 4,zenness= 1, time = 5, distance = 2)
G1.add_edge('A','D',weight = 5,zenness= 2, time = 4, distance = 2)
G1.add_edge('D','E',weight = 5,zenness= 2, time = 4, distance = 2)
G1.add_edge('E','F',weight = 5,zenness= 2, time = 4, distance = 2)
G1.add_edge('F','C',weight = 5,zenness= 2, time = 4, distance = 2)
G1.add_edge('E','A',weight = 5,zenness= 2, time = 4, distance = 2)
positions = nx.spring_layout(G1)
print(positions)
nx.draw_networkx(G1,pos = positions,node_color = '#a6a6a6',edge_color = '#333333',node_size = 500,font_size = 14)
edge_labels = nx.get_edge_attributes(G1,'weight')
nx.draw_networkx_edge_labels(G1,pos = positions,edge_labels = edge_labels,font_size = 14)
# Draw Shortest Path
shortestpath = nx.shortest_path(G1,'A','F','weight')
edgelist =[]
for nodeA, nodeB in zip(shortestpath,shortestpath[1:]):
edgelist.append((nodeA,nodeB))
redcolor = '#ff4d4d'
nx.draw_networkx_edges(G1,pos = positions,edge_color = redcolor,edgelist=edgelist)
nx.draw_networkx_nodes(G1,pos = positions,node_color = redcolor,node_size = 500,font_size = 14,nodelist=shortestpath)
red_patch = mpatches.Patch(color=redcolor, label='Shortest Path')
plt.legend(handles = [red_patch])
plt.show()
|
import os
import re
import platform
import shutil
import argparse
from xmlrpc.client import Fault
from datetime import (datetime, timedelta)
from zrong.base import DictBase, list_dir, slog
from wordpress_xmlrpc import (Client,
WordPressPost, WordPressPage, WordPressTerm, WordPressMedia)
from wordpress_xmlrpc.exceptions import InvalidCredentialsError
from wordpress_xmlrpc.methods.taxonomies import (GetTerms)
class BlogError(Exception):
pass
class Action(object):
def __init__(self, gconf, gargs, gparser):
self.conf = gconf
self.args = gargs
self.parser = gparser
self._wp = None
self._update_site_config()
def _update_site_config(self):
if self.args.user:
self.conf.site.user = self.args.user
if self.args.password:
self.conf.site.password = self.args.password
if self.args.site:
if self.args.site.rfind('xmlrpc.php')>0:
self.conf.site.url = self.args.site
else:
removeslash = self.args.site.rfind('/')
if removeslash == len(self.args.site)-1:
removeslash = self.args.site[0:removeslash]
else:
removeslash = self.args.site
self.conf.site.url = '%s/xmlrpc.php'%removeslash
def get_postid(self, as_list=False):
if not self.args.query:
return None
if as_list:
postids = []
for postid in self.args.query:
match = re.match(r'^(\d+)-(\d+)$', postid)
if match:
a = int(match.group(1))
b = int(match.group(2))
for i in range(a,b+1):
postids.append(str(i))
else:
postids.append(postid)
return postids
return self.args.query[0]
def get_dict_from_query(self, query):
if query:
d = {}
for v in query:
value = v.split('=')
d[value[0]] = value[1]
return d
return None
def get_term_query(self):
typ = self.args.type
q = self.args.query
query = []
if typ == 'term':
query = q
else:
if typ == 'tag':
typ = 'post_tag'
query.append(typ)
if q and len(q)>0:
query.append(q[0])
return query
def get_terms_from_wp(self, query, force=False):
if not query or len(query)== 0:
slog.error('Please provide a taxonomy name! You can use '
'"show -t tax" to get one.')
return None
taxname = query[0]
slug = query[1] if len(query)>1 else None
terms = self.conf[taxname]
if not terms or force:
results = self.wpcall(GetTerms(taxname))
if results:
self.conf.save_terms(results, taxname)
if terms and slug:
return terms[slug]
return terms
def print_result(self, result):
if isinstance(result, WordPressTerm):
slog.info('id=%s, group=%s, '
'taxnomy_id=%s, name=%s, slug=%s, '
'parent=%s, count=%d',
result.id, result.group,
result.taxonomy_id, result.name, result.slug,
result.parent, result.count)
elif isinstance(result, WordPressPost):
slog.info('id=%s, date=%s, date_modified=%s, '
'slug=%s, title=%s, post_status=%s, post_type=%s',
result.id, str(result.date), str(result.date_modified),
result.slug, result.title,
result.post_status, result.post_type)
elif isinstance(result, WordPressMedia):
slog.info('id=%s, parent=%s, title=%s, '
'description=%s, caption=%s, date_created=%s, link=%s, '
'thumbnail=%s, metadata=%s',
result.id, result.parent, result.title,
result.description, result.caption, str(result.date_created),
result.link,
result.thumbnail, result.metadata)
else:
slog.info(result)
def print_results(self, results):
if isinstance(results, list):
for result in results:
self.print_result(result)
elif isinstance(results, dict):
for k,v in results.items():
slog.info('%s %s'%(k, str(v)))
else:
self.print_result(results)
def get_datetime(self, datestring):
dt = datetime.strptime(datestring, '%Y-%m-%d %H:%M:%S')
return dt - timedelta(hours=8)
def get_terms_from_meta(self, categories, tags):
terms = []
if categories:
for cat in categories:
term = self.conf.get_term('category', cat)
if not term:
slog.error('The category "%s" is not in wordpress.'
' Please create it first.'%cat)
return None
terms.append(term)
if tags:
for tag in tags:
term = self.conf.get_term('post_tag', tag)
if not term:
slog.error('The tag "%s" is not in wordpress.'
'Please create it first'%tag)
return None
terms.append(term)
return terms
def wpcall(self, method):
if not self._wp:
self._wp = Client(self.conf.site.url,
self.conf.site.user,
self.conf.site.password)
try:
results = self._wp.call(method)
except InvalidCredentialsError as e:
slog.error(e)
return None
except Fault as e:
slog.error(e)
return None
return results
def go(self):
pass
def build(self):
if self.args.type:
self.go()
elif self.parser:
self.parser.print_help()
class Conf(DictBase):
ARTICLE_TYPES = ('post', 'page', 'draft')
def save_to_file(self):
super().save_to_file(self.conffile)
def init(self, workDir, confFile):
self.confile = confFile
self.site = DictBase(
{
'user': 'user',
'password': 'password',
'url': 'http://you-wordpress-blog/xmlrpc.php',
})
self.directory = DictBase(
{
'work': workDir,
'draft': 'draft',
'post': 'post',
'page': 'page',
'media': 'media',
})
self.files = DictBase(
{
'ext': '.md',
'draftfmt': 'draft_%s',
})
self.save_to_file()
def save_terms(self, terms, taxname):
termdict = DictBase()
for term in terms:
self.save_term(term, taxname, termdict)
self[taxname] = termdict
self.save_to_file()
def save_term(self, term, taxname, termdict=None):
if termdict == None:
termdict = self[taxname]
termdict[term.slug] = DictBase({
'id':term.id,
'group':term.group,
'taxonomy':term.taxonomy,
'taxonomy_id':term.taxonomy_id,
'name':term.name,
'slug':term.slug,
'description':term.description,
'parent':term.parent,
'count':term.count,
})
def get_term(self, taxname, slug):
if not self[taxname]:
return None
if not self[taxname][slug]:
return None
termdict = self[taxname][slug]
term = WordPressTerm()
term.id = termdict['id']
term.group = termdict['group']
term.taxonomy = termdict['taxonomy']
term.taxonomy_id = termdict['taxonomy_id']
term.name = termdict['name']
term.slug = termdict['slug']
term.description = termdict['description']
term.parent = termdict['parent']
term.count = termdict['count']
return term
def is_article(self, posttype):
return posttype in Conf.ARTICLE_TYPES
def get_draft(self, name):
"""
There are two kind of draft file in draft directory.
One has published to wordpress and in draft status;
One has beed not published to wordpress yet.
"""
draftname = (self.files.draftfmt % str(name))+self.files.ext
return self.get_path(self.directory.draft, draftname), draftname
def get_new_draft(self, name=None):
draftnames = list(list_dir(self.get_path(self.directory.draft)))
draftfile, draftname = None, None
if name:
draftfile, draftname = self.get_draft(name)
if draftname in draftnames:
raise BlogError('The draft file "%s" is already existence!'%
draftname)
else:
name = 1
draftfile, draftname = self.get_draft(name)
while os.path.exists(draftfile):
name += 1
draftfile, draftname = self.get_draft(name)
return draftfile, draftname
def get_article(self, name, posttype):
postname = name+self.files.ext
if self.is_article(posttype):
return self.get_path(self.directory[posttype], postname), postname
return None, None
def get_path(self, name, *path):
workdir = os.path.join(self.directory.work, name)
if path:
return os.path.abspath(os.path.join(workdir, *path))
return workdir
def get_media(self, *path):
mediadir = self.get_path(self.directory.media)
if path:
return os.path.join(mediadir, *path)
return mediadir
def get_mdfiles(self, posttype):
for afile in os.listdir(self.get_path(posttype)):
if afile.endswith('.md'):
name = afile.split('.')[0]
yield (posttype, name, os.path.join(posttype, afile))
def checkFTPConf(ftpConf):
if not ftpConf \
or not ftpConf.server \
or not ftpConf.user \
or not ftpConf.password:
raise BlogError('ftpConf MUST contains following values:'
'server,user,password !')
def check_args(argv=None):
parser = argparse.ArgumentParser(prog='wpcmd')
subParsers = parser.add_subparsers(dest='sub_name', help='sub-commands')
pw = subParsers.add_parser('write',
help='Write *.md files.')
pw.add_argument('-r', '--readme', action='store_true',
help='Build README.md.')
pw.add_argument('-u', '--url', action='store_true',
help='Rewrite url.')
pw.add_argument('-c', '--category', action='store_true',
help='Rewrite category.')
pw.add_argument('-d', '--dirname', type=str, default='post',
choices = ['post', 'page', 'draft', 'all'],
help='Rewrite articles by type. The value is [post|page|draft|all].')
pw.add_argument('-a', '--analytic', action='store_true',
help='Analytic the articles.')
pw.add_argument('--name', type=str,
help='Provide a article name.')
pn = subParsers.add_parser('new',
help='Create some new content.')
pn.add_argument('-u', '--user', type=str,
help='Login username.')
pn.add_argument('-p', '--password', type=str,
help='Login password.')
pn.add_argument('-s', '--site', type=str,
help='Site url.')
pn.add_argument('-t', '--type', type=str,
choices=['post', 'page', 'tag', 'category'],
default='post',
help='Create a new content in wordpress.')
pn.add_argument('-q', '--query', nargs='*',
help='The options for query.')
ps = subParsers.add_parser('show',
help='Show wordpress contents.')
ps.add_argument('-u', '--user', type=str,
help='Login username.')
ps.add_argument('-p', '--password', type=str,
help='Login password.')
ps.add_argument('-s', '--site', type=str,
help='Site url.')
ps.add_argument('-t', '--type', type=str,
choices=['post', 'page', 'draft',
'option','tax','term',
'category','tag',
'medialib', 'mediaitem'],
default='option',
help='Content type of wordpress.')
ps.add_argument('-n', '--number', type=int,
default=10,
help='The amount for GetPosts.')
ps.add_argument('-o', '--orderby',
choices=['post_modified', 'post_id'],
default='post_id',
help='To sort the result-set by one column.')
ps.add_argument('-d', '--order',
choices=['ASC', 'DESC'],
default='DESC',
help='To sort the records in a descending or a ascending order.')
ps.add_argument('-q', '--query', nargs='*',
help='The options for query.')
pu = subParsers.add_parser('update',
help='Update wordpress contents.')
pu.add_argument('-u', '--user', type=str,
help='Login username.')
pu.add_argument('-p', '--password', type=str,
help='Login password.')
pu.add_argument('-s', '--site', type=str,
help='Site url.')
pu.add_argument('-t', '--type', type=str,
choices=['post', 'page', 'draft', 'option', 'tag', 'category'],
default='post',
help='Content type of wordpress.')
pu.add_argument('-q', '--query', nargs='*',
help='The options for query.')
args = parser.parse_args(args=argv)
if args.sub_name:
return args, subParsers.choices[args.sub_name]
parser.print_help()
return None, None
|
# utils.py This file may be used for all utility functions
import random
from nltk import sent_tokenize, word_tokenize, pos_tag, RegexpTokenizer
import numpy as np
import datetime
import string
import pickle
'''
Create a bijection betweeen int and object. May be used for reverse indexing
'''
class Indexer(object):
def __init__(self):
self.objs_to_ints = {}
self.ints_to_objs = {}
def __str__(self):
return self.__repr__()
def __len__(self):
return len(self.ints_to_objs)
def get_object(self, i):
return self.ints_to_objs[i]
def contains(self, obj):
return self.index_of(obj) != -1
def index_of(self, obj):
if obj in self.objs_to_ints:
return self.objs_to_ints[obj]
return -1
# Get the index of the object, if add_object, add object to dict if not present
def get_index(self, obj, add_object=True):
if not add_object or obj in self.objs_to_ints:
return self.index_of(obj)
new_idx = len(self.ints_to_objs)
self.objs_to_ints[obj] = new_idx
self.ints_to_objs[new_idx] = obj
return new_idx
# Add features from feats to feature indexer
# If add_to_indexer is true, that feature is indexed and added even if it is new
# If add_to_indexer is false, unseen features will be discarded
def add_dataset_features(feats, feature_indexer):
for i in range(len(feats)):
l = word_tokenize(feats[i].passage)
for word in l:
feature_indexer.get_index(word)
# Below code taken from CS 388 provided code (written by Greg Durrett <email>)
# Wraps an Indexer and a list of 1-D numpy arrays where each position in the list is the vector for the corresponding
# word in the indexer. The 0 vector is returned if an unknown word is queried.
class WordEmbeddings:
def __init__(self, word_indexer, vectors):
self.word_indexer = word_indexer
self.vectors = vectors
# assert self.vectors.shape[0] == len(self.word_indexer)
@property
def embedding_dim(self):
return len(self.vectors[0])
def get_embedding(self, word):
word_idx = self.word_indexer.index_of(word)
if word_idx == len(self.word_indexer):
print("HALT")
exit()
if word_idx != -1:
return self.vectors[word_idx]
else:
return self.vectors[self.word_indexer.index_of(UNK_SYMBOL)]
def word2embedding_idx(self, word):
word_idx = self.word_indexer.index_of(word)
if word_idx != -1:
return word_idx
else:
return self.word_indexer.index_of(UNK_SYMBOL)
def get_embedding_idx(self, word_idx):
if word_idx != -1:
return self.vectors[word_idx]
else:
return self.vectors[self.word_indexer.index_of(UNK_SYMBOL)]
def get_average_score(self, word_idx):
vec = self.get_embedding_idx(word_idx)
return np.average(vec)
PAD_SYMBOL = "<PAD>"
UNK_SYMBOL = "<UNK>"
# Loads the given embeddings (ASCII-formatted) into a WordEmbeddings object. Augments this with an UNK embedding
# that is the 0 vector. Reads in all embeddings with no filtering -- you should only use this for relativized
# word embedding files.
def read_word_embeddings(embeddings_file):
f = open(embeddings_file)
word_indexer = Indexer()
vectors = []
for line in f:
if line.strip() != "":
space_idx = line.find(' ')
word = line[:space_idx]
numbers = line[space_idx + 1:]
float_numbers = [float(number_str) for number_str in numbers.split()]
# print repr(float_numbers)
vector = np.array(float_numbers)
word_indexer.get_index(word)
vectors.append(vector)
# print repr(word) + " : " + repr(vector)
# Add PAD token
word_indexer.get_index(PAD_SYMBOL)
vectors.append(np.zeros(vectors[0].shape[0]))
# Add an UNK token
word_indexer.get_index(UNK_SYMBOL)
vectors.append(np.zeros(vectors[0].shape[0]))
f.close()
print("Read in " + repr(len(word_indexer)) + " vectors of size " + repr(vectors[0].shape[0]))
# Turn vectors into a 2-D numpy array
return WordEmbeddings(word_indexer, np.array(vectors))
# Relativization = restrict the embeddings to only have words we actually need in order to save memory
# (but this requires looking at the data in advance).
# Relativize the word vectors to the training set
def relativize(file, outfile, indexer):
f = open(file, encoding='utf-8')
o = open(outfile, 'w', encoding='utf-8')
voc = []
for line in f:
word = line[:line.find(' ')]
if indexer.contains(word):
# print("Keeping word vector for " + word)
voc.append(word)
o.write(line)
# for word in indexer.objs_to_ints.keys():
# if word not in voc:
# print("Missing " + word)
f.close()
o.close()
# Takes the given Examples and their input indexer and turns them into a numpy array by padding them out to max_len.
# Optionally reverses them.
def make_padded_input_tensor(exs, input_indexer, max_len):
result = []
for ex in exs:
passage = word_tokenize(ex.passage)
result.append([input_indexer.index_of(PAD_SYMBOL) if i >= len(passage) else input_indexer.index_of(
UNK_SYMBOL) if input_indexer.index_of(passage[i]) == -1 else input_indexer.index_of(passage[i])
for i in range(0, max_len)])
return np.array(result)
def make_output_one_hot_tensor(exs, output_indexer):
result = []
for ex in exs:
result.append([int(i == output_indexer.index_of(ex.author)) for i in range(len(output_indexer))])
return np.array(result)
pos_fancy = """CC
DT
EX
FW
IN
MD
PDT
RB
WDT
WP
WP$
WRB""".split("\n")
##RBR
#RBS
##
def pos(passage, n=2, fancy=True):
# tokenize = RegexpTokenizer(r'\w+')
# words = tokenize.tokenize(passage)
words = word_tokenize(passage)
postags = pos_tag(words)
postags_ = [("", word.lower()) if pos_tag[1] in pos_fancy else pos_tag for word, pos_tag in zip(words, postags)]
final = []
for i in range(len(postags) - n):
n_gram = "".join([postags_[i + _i][1] for _i in range(n)])
final.append(n_gram)
return " ".join(final)
class Example:
def __init__(self, passage, author, id=None):
self.passage = passage
self.author = author
self.id = id
class AuthorshipModel:
def __init__(self):
self.history = None
def _predictions(self, test_data, args):
pass
def _sentencewise_prediction(self, test_data, args):
predictions = self._predictions(test_data, args)
prediction = max(set(predictions), key=predictions.count)
return prediction
def evaluate(self, test_data, args):
if args.sentencewise:
predictions = [self._sentencewise_prediction(sentences, args) for sentences in test_data]
labels = [sentences[0].author for sentences in test_data]
else:
predictions = self._predictions(test_data, args)
labels = [example.author for example in test_data]
correct = sum([pred == true for pred, true in zip(predictions, labels)])
if not args.sentencewise:
for i in range(len(test_data)):
if labels[i]==predictions[i]:
print("CORRECT", labels[i])
print(test_data[i].passage)
else:
print("INCORRECT", predictions[i], labels[i])
print(test_data[i].passage)
print("Correctness: " + str(correct) + "/" + str(len(test_data)), "->", correct / len(test_data))
if args.plot:
filename = args.model + "_" + args.train_type + "_" + args.train_options + "_" + str(datetime.datetime.now()) + ".pdf"
with open(filename, "wb") as f:
pickle.dump((self.history, correct, len(test_data)), f)
return correct, len(test_data)
|
limite = int(input("Ingrese un numero: "))
for i in range (2 , limite+1):
sumaDivisores = 0
for j in range (1, i+1):
if i%j==0 and i!=j:
sumaDivisores = sumaDivisores + j
if i == sumaDivisores:
print(i, "es un numero perfecto")
elif i > sumaDivisores:
print(i, "es un numero deficiente")
else:
print(i, "es un numero abundante")
|
from django.db import models
from django.db.models import Q
from django.core.validators import FileExtensionValidator
from PIL import Image
from django.contrib.auth.models import User
from django.utils import timezone
import uuid
from django.urls import reverse
from django.shortcuts import redirect
# from uuid import UUID
# Create your models here.
class ProfileManager(models.Manager):
def search(self, query=None):
qs = self.get_queryset()
if query is not None:
or_lookup = (Q(title__icontains=query)|
Q(content__icontains=query)|
Q(user__username__icontains=query)
)
qs = qs.filter(or_lookup).distinct() # distinct() is often necessary with Q lookups
return qs
class AdminUpload(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f'{self.user.username}'
class AdminImages(models.Model):
admin = models.ForeignKey(AdminUpload, on_delete=models.CASCADE)
image = models.FileField(upload_to='website_img/', blank=True, null=True)
def __str__(self):
return f'{self.admin}'
class Profile(models.Model):
GENDER_CHOICES = [
('male', 'Male'),
('female', 'Female')
]
user = models.OneToOneField(User, on_delete=models.CASCADE)
title = models.CharField(max_length=140, blank=True)
content = models.TextField(max_length=120, blank=True)
image = models.ImageField(default='default.jpg', upload_to='profile_pics/')
following = models.ManyToManyField('self', related_name='followin', symmetrical=False)
follower = models.ManyToManyField('self', related_name='followr', symmetrical=False)
gender = models.CharField(max_length=20, choices=GENDER_CHOICES, blank=True, null=True)
birth_date = models.DateField(blank=True, null=True)
objects = ProfileManager()
deleted_post_views = models.PositiveIntegerField(blank=False, null=False, default=0)
monetized_views = models.IntegerField(blank=False, null=False, default=0)
account_monetized = models.BooleanField(default=False)
verified = models.BooleanField(default=False)
cv = models.FileField(upload_to='cvs/', blank=True, null=True, validators=[FileExtensionValidator(["pdf","docx"])])
def __str__(self):
return f'{self.user.username} Profile'
def get_absolute_url(self):
return reverse('follower', kwargs={'user_id': self.user.id})
def get_follow_url(self):
return reverse('follow_toggle', kwargs={'user_id': self.user.id})
def get_follow_api_url(self):
return reverse('follow_api', kwargs={'user_id': self.user.id})
def save(self, *args, **kwargs):
super(Profile, self).save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
class Monetization(models.Model):
STATUS_CHOICES = [
('uncleared', 'Uncleared'),
('Cleared', 'Cleared')
]
BANK_CHOICES = [
('gt', 'GT Bank Plc'),
('first', 'First Bank Plc'),
('polaris', 'Polaris Bank Plc'),
('access', 'Access Bank Plc'),
('zenith', 'Zenith Bank Plc'),
]
user = models.ForeignKey(User, on_delete=models.CASCADE)
views = models.PositiveIntegerField(blank=True, null=True)
amount = models.PositiveIntegerField(blank=True, null=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='uncleared')
bank = models.CharField(max_length=20, choices=BANK_CHOICES, default='gt')
account_name = models.CharField(max_length=140)
account_num = models.CharField(max_length=10)
time_stamp = models.DateTimeField(default=timezone.now)
transaction_ref = models.CharField(max_length=39, blank=False, unique=True, default=uuid.uuid4)
def __str__(self):
return '{} - {} - {} - {}'.format(str(self.user.username), self.views, self.amount, self.bank)
class UserEmailRequest(models.Model):
sender = models.ForeignKey(User, on_delete=models.CASCADE, related_name='sender')
receiver = models.ForeignKey(User, on_delete=models.CASCADE)
#ref_code = models.CharField(max_legth=39, unique= True)
ref_code = models.UUIDField(unique=True, default=uuid.uuid4,blank=False, editable=False)
date_added = models.DateTimeField(default=timezone.now)
def __str__(self):
return '{} - {}'.format(self.sender,self.receiver)
class Suggestion_Report(models.Model):
date = models.DateField(default = timezone.now)
content = models.TextField()
def __str__(self):
return '{} - {}'.format(self.date, self.content)
|
# -*- coding: utf-8 -*-
import scrapy
from RongCloudChannel.items import ContentItem
from RongCloudChannel.items import AccountItem
from scrapy.http import FormRequest
from RongCloudChannel.utils import dateUtil
from RongCloudChannel.utils.accountUtil import *
class YidianzixunSpider(scrapy.Spider):
name = 'YiDianZiXun'
channel_id = '一点资讯'
loginUrl = "https://mp.yidianzixun.com/sign_in"
articleListUrl = "https://mp.yidianzixun.com/model/Article?page={}&page_size=10&status={}&has_data=1&type=original"
fanUrl = "https://mp.yidianzixun.com/api/get-fans-rate"
statusMap = {
'2,6,7': 3, #已发布
'1,4,5,14': 1, #待审核
'3': 2, #未通过
'0': 0, #草稿
'9': 9, #已删除
}
def __init__(self):
self.accountDict = getAllAccountByChannel(self.channel_id)
#self.accountDict = {"15802103561": "P@ssword521"}
def start_requests(self):
for user, passwordAndId in self.accountDict.items():
password, curId = passwordAndId
formdata = {"username": user, "password": password}
time.sleep(3)
yield FormRequest(self.loginUrl, method='POST',
formdata=formdata, callback=self.parseLoginPage,
meta={'formdata': formdata,
'account': user,
'curId': curId})
def parseLoginPage(self, response):
if response.status != 200:
print('get url error: ' + response.url)
return
account = response.meta['account']
curId = response.meta['curId']
rltJson = json.loads(response.text)
try:
cookieStr = rltJson['cookie']
tempIdx = cookieStr.find("=")
if tempIdx < 0:
print('get cookie error: ' + cookieStr)
return
cookieKey = cookieStr[0:tempIdx]
cookieVal = cookieStr[tempIdx+1:]
curCookie = {cookieKey: cookieVal}
except:
print("登录失败:" + response.text)
print(response.meta['formdata'])
####test
if isErrorAccount(self.channel_id, response.text):
#postLoginErrorAccount(self.channel_id, account)
postLoginErrorAccount(curId)
return
time.sleep(2)
yield scrapy.Request(self.fanUrl, method='GET', callback=self.parseFansPage,
cookies=curCookie, meta={'account': account})
for statusKey, statusVal in self.statusMap.items():
time.sleep(2)
yield scrapy.Request(self.articleListUrl.format(1, statusKey), method='GET',
callback=self.parseArticlePage,
cookies=curCookie,
meta={'account': account, 'cookies': curCookie,
'currentPage': 1, 'totalPage': 1, 'beginFlag': True,
'statusKey': statusKey, 'statusVal': statusVal})
def parseFansPage(self, response):
#print(response.text)
if response.status != 200:
print('get url error: ' + response.url)
return
account = response.meta['account']
rltJson = json.loads(response.text)
if 'result' in rltJson:
accountItem = AccountItem()
accountItem['channel_id'] = self.channel_id
accountItem['account_id'] = account
accountItem['record_class'] = "channel_info"
accountItem['crawl_time'] = dateUtil.getCurDate()
if 'fans_add' in rltJson['result']:
if 'fans_add' in rltJson['result']['fans_add']:
accountItem['new_subscribe_count'] = rltJson['result']['fans_add']['fans_add']
if 'fans_reduce' in rltJson['result']:
if 'fans_reduce' in rltJson['result']['fans_reduce']:
accountItem['cancel_fans_count'] = rltJson['result']['fans_reduce']['fans_reduce']
if 'fans_total' in rltJson['result']:
if 'fans_total' in rltJson['result']['fans_total']:
accountItem['total_subscribe_count'] = rltJson['result']['fans_total']['fans_total']
#print(accountItem)
yield accountItem
def parseArticlePage(self, response):
#print(response.text)
if response.status != 200:
print('get url error: ' + response.url)
return
account = response.meta['account']
cookies = response.meta['cookies']
currentPage = response.meta['currentPage']
totalPage = response.meta['totalPage']
beginFlag = response.meta['beginFlag']
statusKey = response.meta['statusKey']
statusVal = response.meta['statusVal']
rltJson = json.loads(response.text)
if beginFlag:
totalPage = rltJson['page_total']
beginFlag = False
contentList = rltJson['posts']
curTime = dateUtil.getCurDate()
for contentInfo in contentList:
contentItem = ContentItem()
contentItem['channel_id'] = self.channel_id
contentItem['account_id'] = account
contentItem['record_class'] = "content_info"
contentItem['crawl_time'] = curTime
id = ""
if 'newsId' in contentInfo:
id = contentInfo['newsId']
contentItem['id'] = id
if statusVal == 3 and len(id) > 0:
contentItem['content_link'] = "https://www.yidianzixun.com/article/" + str(id)
if 'title' in contentInfo:
contentItem['title'] = contentInfo['title']
if 'date' in contentInfo:
timeStamp = contentInfo['date']
if len(str(timeStamp)) == 13:
timeStamp = int(timeStamp/1000)
if len(str(timeStamp)) == 10:
contentItem['publish_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timeStamp))
contentItem['publish_status'] = statusVal
if 'all_data' in contentInfo:
allData = contentInfo['all_data']
if 'clickDoc' in allData:
contentItem['read_count'] = allData['clickDoc']
if 'addCommentDoc' in allData:
contentItem['comment_count'] = allData['addCommentDoc']
if 'shareDoc' in allData:
contentItem['share_count'] = allData['shareDoc']
if 'likeDoc' in allData:
contentItem['collect_count'] = allData['likeDoc'] #收藏?
if 'viewDoc' in allData:
contentItem['recommend_count'] = allData['viewDoc']
#print(contentItem)
yield contentItem
currentPage += 1
if currentPage <= totalPage:
time.sleep(5)
yield scrapy.Request(self.articleListUrl.format(currentPage, statusKey), method='GET',
callback=self.parseArticlePage,
cookies=cookies,
meta={'account': account, 'cookies': cookies,
'currentPage': currentPage, 'totalPage': totalPage, 'beginFlag': beginFlag,
'statusKey': statusKey, 'statusVal': statusVal})
|
import itertools
from itertools import repeat
import random
import json
import numbers
import datetime
dataFramesPerSecond = 5
def smoothStep(minT, maxT, t):
x = (t - minT)/(maxT-minT)
return x*x*(3.0-2.0*x)
def smoothInterpolate(initVal, endVal, initTime, endTime, step):
valueRange = endVal-initVal
timeRangeInt = int((endTime-initTime)/step + 0.5)
return (valueRange*smoothStep(initTime, endTime, t) + initVal for t in ( initTime + (0.5 + r)*step for r in range(0, timeRangeInt, 1)))
def linearInterpolate(initVal, endVal, initTime, endTime, step):
valueRange = endVal-initVal
timeRange = (endTime-initTime)
timeRangeInt = int((endTime-initTime)/step + 0.5)
return (valueRange*(t - initTime)/timeRange + initVal for t in ( initTime + (0.5 + r)*step for r in range(0, timeRangeInt, 1)))
def stepInterpolate(initVal, endVal, initTime, endTime, step):
timeRangeInt = int((endTime-initTime)/step + 0.5)
return ((initVal if t == initTime else endVal) for t in ( initTime + (0.5 + r)*step for r in range(0, timeRangeInt, 1)))
interpolateFuncs = [smoothInterpolate, linearInterpolate, stepInterpolate]
def genSingleLine(size, defaultValue, lineDescription):
singleLine = [defaultValue] * size
for event in lineDescription:
interpolate = event['interpolate'] if 'interpolate' in event else False
st = event['start']
ed = event['end']
if not interpolate:
singleLine[st:ed] = repeat( event['value'], ed-st)
else:
singleLine[st:ed] = random.choice(interpolateFuncs)(event['startValue'], event['endValue'], st, ed, 1)
return singleLine
def readJsonFromFile(filename):
with open(filename) as fp:
situationFileJson = json.load(fp)
return situationFileJson
def writeJsonToFile(filename, jsonToWrite, indent = 4):
with open(filename, 'w') as outfile:
json.dump(jsonToWrite, outfile, indent = indent)
outfile.write("\n")
def determineFloatNumber(durationJson):
if isinstance(durationJson, numbers.Number):
return durationJson
return random.uniform(durationJson['min'], durationJson['max'])
def determineRef(refJson, previousEvents, duration):
if refJson == 'start':
return 0.0
if refJson == 'end':
return duration
if refJson == 'prev_start':
return previousEvents[-1]['start']
if refJson == 'prev_end':
prevEventInst = previousEvents[-1]
return prevEventInst['start'] + prevEventInst['duration']
return nan
def determineEventTiming(startJson, previousEvents, duration):
if isinstance(startJson, numbers.Number):
return startJson
if isinstance(startJson, str):
return 0.0 if startJson.lower() == 'start' else duration if startJson.lower() == 'end' else nan
refTiming = 0.0
if 'ref' in startJson:
refTiming = determineRef(startJson['ref'], previousEvents, duration)
return refTiming + determineFloatNumber(startJson)
def genSituation(inputsJson, situationJson):
duration = determineFloatNumber(situationJson['duration'])
numInputs = len(inputsJson)
durationInFrames = int(dataFramesPerSecond * duration)
eventInstances = []
for event in situationJson['events']:
eventInst = {}
eventInst['start'] = determineEventTiming(event['start'], eventInstances, duration)
eventDuration = float('NaN')
if 'end' in event:
eventDuration = determineEventTiming(event['end'], eventInstances, duration) - eventInst['start']
if 'duration' in event:
eventDuration = determineFloatNumber(event['duration'])
eventInst['duration'] = min(duration, eventDuration)
for key,data in event.items():
if key not in ['start', 'end', 'duration']:
eventInst[key] = data
eventInstances.append(eventInst)
#print(str(eventInstances))
# generate lineDescriptions from eventInstances
lineDescriptions = {}
for input in inputsJson:
lineDescriptions[input['id']] = []
for eventInst in eventInstances:
if eventInst['input_id'] == input['id']:
if input['type'] == 'real':
startDur = eventInst['duration'] *0.1
endStart = eventInst['duration'] *0.9
midPoint = random.uniform(startDur, endStart)
eventFramesStart = {}
eventFramesStart['start'] = int(eventInst['start']*dataFramesPerSecond)
eventFramesStart['end'] = int((eventInst['start'] + startDur)*dataFramesPerSecond)
eventFramesStart['interpolate'] = True
eventFramesStart['startValue'] = input['default']
eventFramesStart['endValue'] = determineFloatNumber(eventInst['value'])
lineDescriptions[input['id']].append(eventFramesStart)
eventFramesToMid = {}
eventFramesToMid['start'] = int((eventInst['start'] + startDur)*dataFramesPerSecond)
eventFramesToMid['end'] = int((eventInst['start'] + midPoint)*dataFramesPerSecond)
eventFramesToMid['interpolate'] = True
eventFramesToMid['startValue'] = eventFramesStart['endValue']
eventFramesToMid['endValue'] = determineFloatNumber(eventInst['value'])
lineDescriptions[input['id']].append(eventFramesToMid)
eventFramesFromMid = {}
eventFramesFromMid['start'] = int((eventInst['start'] + midPoint)*dataFramesPerSecond)
eventFramesFromMid['end'] = int((eventInst['start'] + endStart)*dataFramesPerSecond)
eventFramesFromMid['interpolate'] = True
eventFramesFromMid['startValue'] = eventFramesToMid['endValue']
eventFramesFromMid['endValue'] = determineFloatNumber(eventInst['value'])
lineDescriptions[input['id']].append(eventFramesFromMid)
eventFramesEnd = {}
eventFramesEnd['start'] = int((eventInst['start'] + endStart)*dataFramesPerSecond)
eventFramesEnd['end'] = int((eventInst['start'] + eventInst['duration'])*dataFramesPerSecond)
eventFramesEnd['interpolate'] = True
eventFramesEnd['startValue'] = eventFramesFromMid['endValue']
eventFramesEnd['endValue'] = input['default']
lineDescriptions[input['id']].append(eventFramesEnd)
else:
eventFrames = {}
eventFrames['start'] = int(eventInst['start']*dataFramesPerSecond)
eventFrames['end'] = (int(eventInst['start'] + eventInst['duration'])*dataFramesPerSecond)
eventFrames['value'] = eventInst['value']
lineDescriptions[input['id']].append(eventFrames)
#print(json.dumps(lineDescriptions, indent=4))
resultingData = {}
for input in inputsJson:
resultingData[input['id']] = genSingleLine(size=durationInFrames, defaultValue=input['default'],lineDescription= lineDescriptions[input['id']])
#print(str(input['name']) + ':' + str(resultingData[input['id']]))
#print('numInputs:' + str(numInputs) + ' duration:' + str(duration))
resultingData['situation_id'] = genSingleLine(size=durationInFrames, defaultValue=situationJson['id'],lineDescription= [])
return resultingData
def determineIntNumber(numberJson):
if isinstance(numberJson, numbers.Number):
return numberJson
return random.randint(numberJson['min'], numberJson['max'])
def genSituationSequence(situationsJson, aversiveID, numAversive, aversiveSeparation, length):
nonAversiveIDs = [sitJson['id'] for sitJson in situationsJson if sitJson['id'] != aversiveID]
numAversive = determineIntNumber(numAversive)
genAversive = 0
resultSeq = [random.choice(nonAversiveIDs) for x in range(length)]
while genAversive != numAversive:
numAversive = determineIntNumber(numAversive)
genAversive = 0
currentAversive = determineIntNumber(aversiveSeparation)
resultSeq = [random.choice(nonAversiveIDs) for x in range(length)]
while currentAversive < length:
resultSeq[currentAversive] = aversiveID
currentAversive += determineIntNumber(aversiveSeparation)
genAversive += 1
return resultSeq
def genAndExportSituationSequence(filename, situationFileJson):
aversiveID=0
numAversive=28
aversiveSeparation={'min' : 4, 'max' : 50}
length=553
situationSequence = genSituationSequence(situationFileJson['situations'], aversiveID=aversiveID, numAversive=numAversive, aversiveSeparation=aversiveSeparation, length=length)
situationSequenceJson = {'situations' : situationFileJson['situations']}
situationSequenceGenParamJson = {'aversiveID' : aversiveID, 'numAversive' : numAversive, 'aversiveSeperation' : aversiveSeparation, 'length' : length}
situationSequenceJson['genParams'] = situationSequenceGenParamJson
situationSequenceJson['sequence'] = situationSequence
situationSequenceJson['genDateTime'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
writeJsonToFile(filename, situationSequenceJson)
def appendSituationInstance(situationInstanceSequence, newSituationInstance):
for inputID in situationInstanceSequence:
situationInstanceSequence[inputID].extend(newSituationInstance[inputID])
def genSituationSequenceInstance(situationSequence, situationFileJson):
inputsJson = situationFileJson['inputs']
resultSequenceInstance = genSituation(inputsJson, {'duration':0.0, 'events':[], 'id':-1})
#print(json.dumps(resultSequenceInstance, indent=4))
for situationID in situationSequence:
situationJson = situationFileJson['situations'][situationID]
situationInstance = genSituation(inputsJson, situationJson)
appendSituationInstance(resultSequenceInstance, situationInstance)
return resultSequenceInstance
def genAndExportSituationSequenceInstance(outputFilename, situationFileJson, situationSequenceJson):
situationSequenceInstanceJson = {'situationFileJson' : situationFileJson, 'situationSequenceJson':situationSequenceJson}
situationSequenceInstanceJson['sequenceInstance'] = genSituationSequenceInstance(situationSequenceJson['sequence'], situationFileJson)
situationSequenceInstanceJson['genDateTime'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
writeJsonToFile(outputFilename, situationSequenceInstanceJson)
if __name__ == '__main__':
situationFileJson = readJsonFromFile("situations.json")
situationSequenceJson = readJsonFromFile('situationSequence.json')
for i in range(10):
genAndExportSituationSequenceInstance('sitSeq_'+str(i)+'.json', situationFileJson, situationSequenceJson)
|
__version__ = '0.0.3'
from famplex.api import *
from famplex.load import *
|
# coding=utf-8
from ..base.privileges import PrivilegeBase, PrivilegeItem, PrivilegeGroup
class CDNPrivilege(PrivilegeBase):
__type__ = PrivilegeGroup.normal
__privileges__ = (
PrivilegeItem('admin', u'管理 CDN 权限的分配'),
PrivilegeItem('view_cdn', u'查看 CDN 信息'),
PrivilegeItem('manage_cdn', u'进行 CDN 管理操作'),
)
__privilege_name__ = 'cdn'
__privilege_alias__ = 'CDN'
|
#!/usr/bin/env python3
''' DESCRIPTION
This is a simple script designed to make VPN management easier.
'''
from subprocess import run
from getpass import getpass
from os import geteuid
from tempfile import NamedTemporaryFile as ntf
from vpman_common import fatal as fatal
from vpman_config import main as vpconf
from vpman_select import main as vppick
def ovpn(conf, auth):
'''Takes the location of a config file and a password
and runs openvpn. Automatically reconnects if the connection
is dropped.'''
while True:
run(["ovpn", "--config", conf, "--auth-user-pass", auth])
def ask_echo():
'''Ask if the user wants their password echoed or not.'''
while True:
x = input("Echo Password? [y/n]: ")
if x in ("y","Y"):
return True
if x in ("N","n"):
return False
def get_input(echo=True, msg=""):
'''Get user input.'''
print(msg)
if echo == True:
return input()
if echo == False:
return getpass(prompt="")
def check_root():
if geteuid() != "0":
fatal("This application needs to be run as root.")
def get_auth():
'''Return a temporary file containing the user's
name and password.'''
user = get_input(msg="VPN username?")
echo = ask_echo()
pwrd = get_input(echo=echo, msg="VPN password?")
temp = ntf(mode="w")
temp.writelines([user,"\n",pwrd,"\n"])
temp.flush()
return temp
''' # Uncomment when finished.
auth = get_auth() # Make a temp file with username and password
ovpn(conf=conf, auth=auth.name) # Run openvpn
# '''
data = vpconf()
vppick(data) |
"""
Este modulo contiene todo lo relevante a HttpResponse. El objeto HttpResponse que se usa para simbolizar
y el metodo render_http_response que sirve para transformar un objeto HttpResponse en un string que luego
se puede enviar al socket
"""
import logging
from codigos_de_estado import CODIGOS_ESTADO_HTTP
Log = logging.getLogger('StarLord.response')
class HttpResponse(object):
def __init__(self, protocolo, codigo_de_estado):
assert codigo_de_estado in CODIGOS_ESTADO_HTTP, '!!----Status Code Desconocido----!!'
self.protocolo = protocolo #la version del protocolo
self.codigo_de_estado = codigo_de_estado #el codigo de estado
self.headers = {} #los headers
self.content = '' #el contenido
self.file = None #el archivo
def __str__(self):
return 'HttpRequest (protocolo=%s, codigo_de_estado=%s)' % \
(self.protocolo, self.codigo_de_estado)
#mandamos el response al socket especificado en output. El response siempre es
#un file que el cliente esta solicitando.
def write_to(self, output):
#si el file en el response ha sido especificado
if self.file:
#ponemos el response header 'Content-Type' al mime_type especificado
self.headers['Content-type'] = self.file.mime_type
#ponemos el response header 'Content-Length' al tamanno especificado
self.headers['Content-Length'] = self.file.tamanno
#ponemos el response header 'Accept-Ranges' a 'bytes'
self.headers['Accept-Ranges'] = 'bytes'
#creamos un string que contiene el HttpResponse
mensaje_response = renderizar_http_response(self)
#mandamos la respuesta al socket.
output.sendall(mensaje_response)
#si se ha especificado un archivo entonces..
#esto lo hacemos despues de mandar el 'mensaje_response' porque
#primero tenemos que decirle al cliente que es lo que le vamos a mandar
#si le vamos a mandar algo, luego se lo mandamos. La parte en que le decimos
#que le vamos a mandar es cuando le mandamos 'mensaje_response' mediante
# output.sendall(mensaje_response)
if self.file:
#mandamos el archivo [ver file_system.file.File]
self.file.stream_to(output)
#metodo que recibe un HttpResponse como parametro y devuelve un String
#con el response
def renderizar_http_response(response):
ret_val = []
#seteamos la pimera linea del response, especifivamos los distintos elementos
#separados por ' ' (version protocolo, codigo de estado y mensaje de estado).
#el mensaje de estado se encuentra en un mapa en http_protocol.status_codes.HTTP_STATUS_CODES
response_line = '%s %s %s' % (response.protocolo, response.codigo_de_estado,
CODIGOS_ESTADO_HTTP[response.codigo_de_estado][0])
#agregamos la primera linea al valor a retornar
ret_val.append(response_line)
#por cada pareja key, value, en el diccionario de headers
#las agregamos al ret_val
for key, value in response.headers.iteritems():
header_line = '%s: %s' % (key, value)
ret_val.append(header_line)
ret_val.append('')
if response.content:
ret_val.append(response.content)
else:
ret_val.append('')
#unimos los elementos del arreglo ret_val con '\n' entre ellos y retornamos
#ese string
return '\n'.join(ret_val)
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2022, Network to Code (@networktocode) <info@networktocode.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Jdiff Action Plugin for jdiff library."""
from __future__ import absolute_import, division, print_function
from ansible.errors import AnsibleError
from ansible.module_utils.six import raise_from
from ansible.plugins.action import ActionBase
try:
from jdiff import CheckType, extract_data_from_json
except ImportError as imp_exc:
JDIFF_IMPORT_ERROR = imp_exc
else:
JDIFF_IMPORT_ERROR = None
__metaclass__ = type
def main(args):
"""Module function."""
if "evaluate_args" not in args:
raise AnsibleError("Invalid arguments, 'evaluate_args' not found.")
check_type = args.get('check_type')
evaluate_args = args.get('evaluate_args')
if not isinstance(evaluate_args, dict):
raise AnsibleError(f"'evaluate_args' invalid type, expected <class 'dict'>, got {type(evaluate_args)}")
if "value_to_compare" not in evaluate_args:
raise AnsibleError("Key 'value_to_compare' missing in 'evaluate_arguments'.")
reference_data = evaluate_args.get("reference_data")
value = evaluate_args['value_to_compare']
jpath = args.get('jmespath', '*')
exclude = args.get('exclude')
try:
check = CheckType.create(check_type)
evaluate_args['value_to_compare'] = extract_data_from_json(value, jpath, exclude)
if reference_data:
evaluate_args['reference_data'] = extract_data_from_json(reference_data, jpath, exclude)
eval_results, passed = check.evaluate(**evaluate_args)
except NotImplementedError:
raise AnsibleError(f"CheckType '{check_type}' not supported by jdiff")
except Exception as e:
raise AnsibleError(f"Exception in backend jdiff library: {e}")
return dict(
success=passed,
fail_details=eval_results,
)
class ActionModule(ActionBase):
"""Ansible Action Module to interact with jdiff.
Args:
ActionBase (ActionBase): Ansible Action Plugin
"""
def run(self, tmp=None, task_vars=None):
"""Run of action plugin for interacting with jdiff.
Args:
tmp ([type], optional): [description]. Defaults to None.
task_vars ([type], optional): [description]. Defaults to None.
"""
if JDIFF_IMPORT_ERROR:
raise_from(
AnsibleError("jdiff library must be installed to use this plugin"),
JDIFF_IMPORT_ERROR,
)
self._supports_check_mode = True
self._supports_async = False
result = super(ActionModule, self).run(tmp, task_vars)
del tmp
if result.get("skipped"):
return None
if result.get("invocation", {}).get("module_args"):
# avoid passing to modules in case of no_log
# should not be set anymore but here for backwards compatibility
del result["invocation"]["module_args"]
args = self._task.args
return main(args=args)
|
import threadingutil
import sys
import random
import time
random.seed(0)
def f(v): # this function must be declared at global scope, in order to make it visible to subprocess.
time.sleep(random.random() * 2.0)
return v * v
if __name__ == '__main__':
usage = "Usage: testthreadingutil.py [NUMWORKER [INPUTSIZE]]"
numWorker = 4
inputSize = 30
if len(sys.argv) >= 2:
if sys.argv[1] == "-h":
print usage
sys.exit(0)
numWorker = int(sys.argv[1])
if len(sys.argv) >= 3:
inputSize = int(sys.argv[2])
if len(sys.argv) >= 4:
print usage
sys.exit(1)
def genargslist(size):
for v in xrange(size):
yield ( v, )
t1 = time.time()
#for index, result in threadingutil.multithreading_iter(f, [ args for args in genargslist(inputSize) ], numWorker):
for index, result in threadingutil.multithreading_iter(f, genargslist(inputSize), numWorker):
print "index = ", index, ", result = ", result
print
print "NUMWORKER = %d, INPUTSIZE = %d" % ( numWorker, inputSize )
print "elapsed time: %g" % (time.time() - t1)
|
import numpy as np
import math
from lerArquivo import *
from multistart import multiStart
if __name__ == '__main__':
try:
matriz,tamanho = lerArquivo2()
except IndexError:
matriz,tamanho = lerArquivo()
multiStart(matriz,tamanho)
|
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from .models import Project, Resource
from .serializers import ProjectListItemSerializer, ProjectDetailsSerializer, ResourceSerializer
from .permissions import IsAuthorOrReadOnly
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
permission_classes = (IsAuthenticated,IsAuthorOrReadOnly)
def get_serializer_class(self):
if self.action == 'retrieve':
return ProjectDetailsSerializer
return ProjectListItemSerializer
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class ResourceViewSet(viewsets.ModelViewSet):
queryset= Resource.objects.all()
serializer_class = ResourceSerializer
permission_classes = (IsAuthenticated,) |
AUTHOR = 'Adrian Sampson'
# General configuration
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
project = 'beets'
copyright = '2016, Adrian Sampson'
version = '1.6'
release = '1.6.1'
pygments_style = 'sphinx'
# External links to the bug tracker and other sites.
extlinks = {
'bug': ('https://github.com/beetbox/beets/issues/%s', '#%s'),
'user': ('https://github.com/%s', '%s'),
'pypi': ('https://pypi.org/project/%s/', '%s'),
'stdlib': ('https://docs.python.org/3/library/%s.html', '%s'),
}
linkcheck_ignore = [
r'https://github.com/beetbox/beets/issues/',
r'https://github.com/[^/]+$', # ignore user pages
r'.*localhost.*',
r'https?://127\.0\.0\.1',
r'https://www.musixmatch.com/', # blocks requests
r'https://genius.com/', # blocks requests
]
# Options for HTML output
htmlhelp_basename = 'beetsdoc'
# Options for LaTeX output
latex_documents = [
('index', 'beets.tex', 'beets Documentation',
AUTHOR, 'manual'),
]
# Options for manual page output
man_pages = [
('reference/cli', 'beet', 'music tagger and library organizer',
[AUTHOR], 1),
('reference/config', 'beetsconfig', 'beets configuration file',
[AUTHOR], 5),
]
|
from transformers import BertModel, BertTokenizer
import torch
import os
import numpy as np
import pandas as pd
# create model
tokenizer = BertTokenizer.from_pretrained('bert-large-cased')
model = BertModel.from_pretrained('bert-large-cased')
# read data
root = '/home/alan/Downloads/imbalanced/amz_review'
labels = []
# minority data
cat0_num = 738943
index = np.arange(cat0_num)
np.random.shuffle(index)
index = set(index.tolist()[:int(cat0_num * 0.002)])
cat0 = []
with open(os.path.join(root, 'amazon_books_cat0.txt')) as f:
i = 0
for line in f:
print("line: {}/{} {:.2f}%".format(i, cat0_num, i/cat0_num*100), end='\r')
if i in index:
tokens = tokenizer.encode(line, add_special_tokens=True, max_length=512)
input_ids = torch.tensor([tokens])
with torch.no_grad():
last_hidden_state = model(input_ids)[0]
embedding = last_hidden_state[:, 0, :]
cat0.append(embedding)
i += 1
cat0 = torch.cat(cat0, dim=0)
labels += [1] * len(cat0)
print()
print(cat0.shape, len(labels))
# majority class
cat1_num = 7203909
index = np.arange(cat1_num)
np.random.shuffle(index)
index = set(index.tolist()[:int(cat1_num * 0.002)])
cat1 = []
with open(os.path.join(root, 'amazon_books_cat1.txt')) as f:
i = 0
for line in f:
print("line: {}/{} {:.2f}%".format(i, cat1_num, i/cat1_num*100), end='\r')
if i in index:
tokens = tokenizer.encode(line, add_special_tokens=True, max_length=512)
input_ids = torch.tensor([tokens])
with torch.no_grad():
last_hidden_state = model(input_ids)[0]
embedding = last_hidden_state[:, 0, :]
cat1.append(embedding)
i += 1
cat1 = torch.cat(cat1, dim=0)
labels += [0] * len(cat1)
print()
print(cat1.shape, len(labels)-len(cat0))
x = torch.cat((cat0, cat1), dim=0)
labels = torch.from_numpy(np.array(labels).reshape(-1, 1))
print(x.shape, labels.shape)
data = torch.cat((x, labels), dim=1).numpy()
print(data.shape)
# save
pd.DataFrame(data).to_csv(os.path.join(root, 'books2.csv'), header=None, index=False)
# train valid test split
x, labels = data[:, :-1], data[:, -1]
train_ratio = 0.6
valid_ratio = 0.1
index = np.arange(len(data))
np.random.shuffle(index)
train_index = index[:int(len(index)*train_ratio)]
valid_index = index[int(len(index)*train_ratio):int(len(index)*(train_ratio+valid_ratio))]
test_index = index[int(len(index)*(train_ratio+valid_ratio)):]
# print class distribution
from collections import Counter
print(Counter(labels[train_index]))
print(Counter(labels[valid_index]))
print(Counter(labels[test_index]))
pd.DataFrame(train_index).to_csv(os.path.join(root, 'books2_{}_train_idx.csv'.format(train_ratio)), header=None, index=False)
pd.DataFrame(valid_index).to_csv(os.path.join(root, 'books2_{}_valid_idx.csv'.format(train_ratio)), header=None, index=False)
pd.DataFrame(test_index).to_csv(os.path.join(root, 'books2_{}_test_idx.csv'.format(train_ratio)), header=None, index=False) |
import re
import sys
def nhx_to_newick(input_tree, output_tree):
s = open(input_tree).readlines()
res = re.sub("\[.*?\]", "", s[0].replace("\n", ""))
with open(output_tree, "w") as writer:
writer.write(res)
if (__name__ == "__main__"):
if (len(sys.argv) != 3):
print("Syntax: input_tree output_tree")
exit(1)
nhx_to_newick(sys.argv[1], sys.argv[2])
|
import pandas as pd
import pdb
import os
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
from .helpers import unique_output_filename
def load_ADL(fname):
df = pd.read_csv(
fname,
sep=' ',
# index_col=[5,1],
header=None,
names=[
"objectTrackID",
"x1",
"y1",
"x2",
"y2",
"frameNumber",
"active",
"objectLabel"],
engine='python',
index_col=False
)
# These are what we want
return df
def ADL_to_MOT(ADL):
output = pd.DataFrame(
columns=[
'FrameId',
'Id',
'X',
'Y',
'Width',
'Height',
'Confidence',
'ClassId',
'Visibility'])
output["X"] = ADL["x1"] * 2
output["Y"] = ADL["y1"] * 2
output["Width"] = (ADL["x2"] - ADL["x1"]) * 2
output["Height"] = (ADL["y2"] - ADL["y1"]) * 2
output["FrameId"] = ADL["frameNumber"]
output["Id"] = ADL["objectTrackID"]
output["Confidence"] = 1
output["ClassId"] = 1 # this could be changed
output["Visibility"] = 1
return output
def load_MOT(fname):
df = pd.read_csv(
fname,
sep=' ',
# index_col=[5,1],
header=None,
names=[
'FrameId',
'Id',
'X',
'Y',
'Width',
'Height',
'Confidence',
'ClassId',
'Visibility'],
engine='python',
index_col=False
)
return df
def remove_intermediate_frames(gt, pred):
"""
both inputs are going to be dataframes
"""
frames = gt["FrameId"].unique()
indexing = pred["FrameId"].isin(frames)
pred = pred[indexing]
return pred
def run_remove_imovable():
WRITE_OUT_GTS = True
PRED_OUT = "/usr0/home/drussel1/dev/TTM/TTM/data/CVPR/no_imovables/not_shifted_BAD"
if not os.path.isdir(PRED_OUT):
os.system("mkdir -p {}".format(PRED_OUT))
for i in range(1, 21):
GT_IN = "/home/drussel1/data/ADL/ADL_annotations/just_object_annotations/object_annot_P_{:02d}.txt".format(
i)
PRED_IN = "/usr0/home/drussel1/dev/TTM/TTM/data/CVPR/scaled_wrong/not_shifted/P_{:02d}.txt".format(
i)
ADL_gt = load_ADL(GT_IN)
pred = load_MOT(PRED_IN)
# TODO filter these again to get only the ones which line up
gt, pred = remove_imovable_objects(ADL_gt, pred)
pred = remove_intermediate_frames(gt, pred)
pred.to_csv(
os.path.join(
PRED_OUT,
"P_{:02d}.txt".format(i)),
sep=" ",
header=False,
index=False)
if WRITE_OUT_GTS:
GT_OUT = "/usr0/home/drussel1/dev/TTM/TTM/data/CVPR/no_imovables/gt/P_{:02d}/gt/".format(
i)
if not os.path.isdir(GT_OUT):
os.system("mkdir -p {}".format(GT_OUT))
gt.to_csv(
os.path.join(
GT_OUT,
"gt.txt"),
sep=" ",
header=False,
index=False) # don't write a header or index column
def remove_imovable_objects(gt_ADL, pred):
"""
pass in the ground truth ADL and the MOT prediction
find all of the objects which are in the imovable list in the gt and get their IDs
Then remove all of the ones with this ID from the gt and the prediction
finally, convert the gt to MOT
"""
IMOVABLE_TYPES = set({'bed',
'keyboard',
'tap',
'oven/stove',
'thermostat',
'person',
'blanket',
'tv',
'microwave',
'door',
'trash_can',
'fridge',
'washer/dryer',
'monitor'})
imovable = gt_ADL["objectLabel"].isin(IMOVABLE_TYPES)
if not np.array_equal(
sorted(
gt_ADL["objectTrackID"].unique()), sorted(
pred["Id"].unique())):
#same = np.equal(sorted(gt_ADL["objectTrackID"].unique()), sorted(pred["Id"].unique()))
print("Detected inequality in IDs in the first stage")
# get rid of the imovable objects, note ~ for negation
gt_ADL = gt_ADL[~imovable]
movable_IDs = gt_ADL["objectTrackID"].unique()
pred_indices = pred["Id"].isin(movable_IDs)
pred = pred[pred_indices]
if not np.array_equal(
sorted(
gt_ADL["objectTrackID"].unique()), sorted(
pred["Id"].unique())):
print("Detected inequality in IDs in the second stage")
gt = ADL_to_MOT(gt_ADL)
return gt, pred
def get_all_classes():
"""
these are {'oven/stove', 'shoe', 'kettle', 'tv', 'microwave', 'food/snack',
'person', 'towel', 'thermostat', 'vacuum', 'comb', 'tooth_paste', 'cloth', 'cell_phone', 'container', 'pills', 'bottle', 'laptop', 'elec_keys', 'mop', 'detergent', 'monitor', 'tap', 'knife/spoon/fork', 'trash_can', 'blanket', 'washer/dryer', 'keyboard', 'tv_remote', 'book', 'shoes', 'bed', 'dish', 'door', 'basket', 'electric_keys', 'milk/juice', 'tooth_brush', 'pan', 'mug/cup', 'large_container', 'cell', 'dent_floss', 'pitcher', 'perfume', 'tea_bag', 'fridge', 'soap_liquid'}
The ones that it seems resonable to track are:
movable = set(['kettle', 'shoe', 'food/snack', 'towel', 'vacuum', 'comb', 'tooth_paste', 'cloth', 'cell_phone', 'container', 'pills', 'bottle', 'laptop', 'elec_keys', 'mop', 'detergent', 'knife/spoon/fork', 'tv_remote', 'book', 'shoes', 'basket', 'electric_keys', 'milk/juice', 'tooth_brush', 'pan', 'mug/cup', 'large_container', 'cell', 'dent_floss', 'pitcher', 'perfume', 'tea_bag', 'dish', 'soap_liquid'])
"""
unique_objects = set()
for i in range(1, 21):
GT_IN = "/home/drussel1/data/ADL/ADL_annotations/just_object_annotations/object_annot_P_{:02d}.txt".format(
i)
gt = load_ADL(GT_IN)
new_objects = gt["objectLabel"].unique().tolist()
unique_objects.update(new_objects)
movable = set(['kettle',
'shoe',
'food/snack',
'towel',
'vacuum',
'comb',
'tooth_paste',
'cloth',
'cell_phone',
'container',
'pills',
'bottle',
'laptop',
'elec_keys',
'mop',
'detergent',
'knife/spoon/fork',
'tv_remote',
'book',
'shoes',
'basket',
'electric_keys',
'milk/juice',
'tooth_brush',
'pan',
'mug/cup',
'large_container',
'cell',
'dent_floss',
'pitcher',
'perfume',
'tea_bag',
'dish',
'soap_liquid'])
not_movable = unique_objects - movable
print(not_movable)
combination = movable | not_movable
pdb.set_trace()
assert(combination == unique_objects)
def interpolate_MOT(df, method="cubic"):
"""
fill in the blanks between frames
df : pd.dataframe
The MOT annotations
method : str
The method of interpolation. linear is the only one supported right now
"""
interpolated_tracks = []
track_IDs = df['Id']
original_columns = df.columns
for track_ID in track_IDs.unique():
track_inds = (track_IDs == track_ID).values
one_track = df.iloc[track_inds, :]
interpolated_tracks.append(interpolate_track(one_track, method=method,
vis=False))
all_tracks = pd.concat(interpolated_tracks, sort=False)
# rearange the columns so it's consistent
all_tracks = all_tracks[original_columns]
print("The number of rows increased by a factor of {:.2f}".format(
len(all_tracks) / len(df)))
return all_tracks
def interpolate_track(track, method="cubic", vis=True,
vis_chance=0.01, longest_break=30, output_folder="data/vis/interpolation"):
"""
Interpolate a dataframe containing a single track
track : pd.DataFrame
A dataframe containing a single track
method : str
Interplation method, "linear" or "cubic"
vis : bool
Should you plot interpolation
longest_break : int
The longest gap to be filled with interpolations
output_folder : str
Where to write the visualizations
"""
# sort the track by frame ID or at least check that that's the case
if len(track) <= 1:
return track
frame_Ids = track['FrameId'].values
interpolated_dists = np.diff(frame_Ids)
long_breaks = interpolated_dists > longest_break
if np.any(long_breaks):
# TODO see if this can be made more efficient
long_break_locs = np.where(long_breaks)[0]
# I'm not entirely sure why there's an off-by-one error since this works for np
split_tracks = np.array_split(track, long_break_locs + 1)
interpolated_subsections = []
for track in split_tracks:
interpolated_track = interpolate_track(track,
method=method,
vis=vis,
vis_chance=vis_chance,
longest_break=longest_break)
interpolated_subsections.append(interpolated_track)
concatenated_tracks = pd.concat(interpolated_subsections, sort=False)
return concatenated_tracks
else:
start = np.min(frame_Ids)
end = np.max(frame_Ids)
# The places we'll interpolate, all the frame values
sampling_locations = np.arange(start, end+1)
X1 = track['X'].values
Y1 = track['Y'].values
X2 = X1 + track['Width'].values
Y2 = Y1 + track['Height'].values
locs = np.vstack((X1, Y1, X2, Y2)).transpose()
if method == "linear":
f = interpolate.interp1d(frame_Ids, locs, axis=0)
elif method == "cubic":
f = interpolate.CubicSpline(frame_Ids, locs)
else:
raise ValueError(
"Method : {} has not been implmented".format(method))
interpolated = f(sampling_locations)
if (np.random.rand() < vis_chance):
plt.clf()
for i in range(4):
plt.plot(sampling_locations, interpolated[:, i])
plt.scatter(frame_Ids, locs[:, i])
plt.legend(["x1", "y1", "x2", "y2"])
plt.xlabel("Frame number")
plt.ylabel("Pixel location")
format_str = method + "_interpolation_{:03d}.png"
vis_filename = unique_output_filename(output_folder, format_str)
if vis:
plt.pause(2)
else:
plt.savefig(vis_filename)
X1 = interpolated[:, 0]
Y1 = interpolated[:, 1]
W = interpolated[:, 2] - X1
H = interpolated[:, 3] - Y1
interpolated_track = pd.DataFrame({"X": X1.astype(int),
"Y": Y1.astype(int),
"Width": W.astype(int),
"Height": H.astype(int)})
confidence = track.Confidence.unique()
class_ID = track.ClassId.unique()
visibility = track.Visibility.unique()
Id = track.Id.unique()
if not (len(confidence) == 1 and len(class_ID) == 1
and len(visibility) == 1 and len(Id)):
raise ValueError(
"There is variation in over the course of the track")
interpolated_track["Confidence"] = confidence[0]
interpolated_track["ClassId"] = class_ID[0]
interpolated_track["Visibility"] = visibility[0]
interpolated_track["Id"] = Id[0]
interpolated_track["Visibility"] = visibility[0]
interpolated_track["FrameId"] = sampling_locations
return interpolated_track
|
#srun --ntasks=1 --cpus-per-task=2 --mem=2gb -t 90 --pty bash -i
from dask_jobqueue import SLURMCluster
from datetime import datetime
from time import sleep
cluster = SLURMCluster(project='ewhite',death_timeout=100)
cluster.start_workers(1)
print(cluster.job_script())
from dask.distributed import Client
client = Client(cluster)
client
counter=0
while counter < 10:
print(datetime.now().strftime("%a, %d %B %Y %I:%M:%S"))
print(client)
sleep(20)
counter+=1
import socket
host = client.run_on_scheduler(socket.gethostname)
def start_jlab(dask_scheduler):
import subprocess
proc = subprocess.Popen(['jupyter', 'lab', '--ip', host, '--no-browser'])
dask_scheduler.jlab_proc = proc
client.run_on_scheduler(start_jlab)
print("ssh -N -L 8787:%s:8787 -L 8888:%s:8888 -l b.weinstein hpg2.rc.ufl.edu" % (host, host)) |
# -*-coding:utf-8 -*-
#评论文件
from flask import jsonify,request,g,abort,url_for,current_app
from .. import db
from ..models import Post,Permission,Comment
from . import api
from .decorators import permission_required
@api.route('/comments/<int:id>')
def get_comment(id):
comment=Comment.query.get_or_404(id)
return jsonify(comment.to_json)
@api.route('/posts/<int:id>/comments/',methods=['POST'])
@permission_required(Permission.COMMENT)
def new_post_comment(id):
post=Post.query.get_or_404(id)
comment=Comment.from_json(request.json)
comment.author=g.current_user
comment.post=post
db.session.add(comment)
db.session.commit()
return jsonify(comment.to_json()),201,{'Location':url_for('api.get_comment',id=comment.id,_external=True)}
#分页资源
@api.route('/comments/')
def get_comments():
page=reuqest.args.get('page',1,type=int)
pagination=Comment.query.paginate(page,per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],error_out=False)
posts=pagination.items
perv=None
if pagination.has_prev:
prev=url_for('api.get_comments',page==page-1,_external=True)
next=None
if pagination.has_next:
next=url_for('api.get_comments',page=page+1,_external=True)
return jsonify({
'comments':[comment.to_json() for post in comments],
'prev':prev,
'next':next,
'count':pagination.total
})
@api.route('/posts/<int:id>/comments/')
def get_post_comments(id):
post=Post.query.get_or_404(id)
page=reuqest.args.get('page',1,type=int)
pagination=post.comments.order_by(Comment.timestamp.asc()).paginate(page,per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],error_out=False)
comments=pagination.items
perv=None
if pagination.has_prev:
prev=url_for('api.get_post_comments',page==page-1,_external=True)
next=None
if pagination.has_next:
next=url_for('api.get_post_comments',page=page+1,_external=True)
return jsonify({
'comments':[comment.to_json() for post in comments],
'prev':prev,
'next':next,
'count':pagination.total
})
|
# CLIENT_ID = "a306199ae27744c1a3defcc6ca25fd5d"
# CLIENT_SECRET = "cbf79c4ebe5542feb670c14a17c040e3"
# REDIRECT_URI = "http://127.0.0.1:8000/spotify/redirect"
CLIENT_ID = "7c12ce6d3f524689b25884e14682d76d"
CLIENT_SECRET = "dc7df330b2ff4bcd8a9ad811519ac5cb"
REDIRECT_URI = "https://music-expanse.herokuapp.com/spotify/redirect"
|
from Generate_signal_func import *
import scipy.io as sio
# Load CSI
# H_G (Num_of_Data,Ni,Nt)
# H_r (Num_of_Data,Num_of_user,Ni) -> single antenna user
hr_train = np.load('./Data/Channel/Channel_Hr.npy')
g_train = np.load('./Data/Channel/Channel_G.npy')
phi_up_init = np.load('./Data/Channel/Phase_uplink_init.npy')
diag_phi_up_init = tf.linalg.diag(phi_up_init)
#shape check (10000, 100, 64) (10000, 4, 100) (10000, 4, 64)
# up-link channel
diag_phi_up_init = tf.cast(diag_phi_up_init,tf.complex64)
hr_train = tf.constant(hr_train,tf.complex64)
g_train = tf.constant(g_train,tf.complex64)
hr_train_real = tf.math.real(hr_train)
hr_train_imag = tf.math.imag(hr_train)
hr_train_AsInput = tf.concat([hr_train_real,hr_train_imag],2)
g_train_real = tf.math.real(g_train)
g_train_imag = tf.math.imag(g_train)
g_train_AsInput = tf.concat([g_train_real,g_train_imag],2)
diag_phi_up_init_real = tf.math.real(diag_phi_up_init)
diag_phi_up_init_imag = tf.math.imag(diag_phi_up_init)
H_train_real = tf.matmul(hr_train_real,diag_phi_up_init_real)
H_train_real = tf.matmul(H_train_real,g_train_real)
H_train_imag = tf.matmul(hr_train_imag,diag_phi_up_init_imag)
H_train_imag = tf.matmul(H_train_imag,g_train_imag)
H_train = tf.complex(H_train_real,H_train_imag)#(10000,4,64)
## combined channel shape is (Num_Data, Num_User, Num_Ant_BS)
H_train_T = tf.transpose(H_train,[0,2,1])#(10000,64,4) up-link
### Generate pilot signals and message signals
counts = np.ones((Num_Data, K*mu))
# Probability of success.
probs = [0.5]
seeds = np.random.randint((1,2))
binomial_samples = tf.random.stateless_binomial(
shape=[Num_Data,K*mu], seed=seeds, counts=counts, probs=probs)
#n=1, p=0.5, size=(K * mu, ))
Pilots_matrix_real = Pilots_matrix[:,0:K]
Pilots_matrix_imag = Pilots_matrix[:,K:K*mu]
binomial_samples_real = binomial_samples[:,0:K]
binomial_samples_imag = binomial_samples[:,K:K*mu]
Signal_real = tf.concat([Pilots_matrix_real,binomial_samples_real],1)
Signal_imag = tf.concat([Pilots_matrix_imag,binomial_samples_imag],1)
#Signal [real[pilot,signal],imag[pilot,signal]]
Signal = tf.concat([Signal_real,Signal_imag],1)
code_words = tf.concat([Pilots_matrix,binomial_samples],1)
step = int(Num_Data/100)
signal_y = np.zeros((Num_Data,K*2*2))
for index in range(100):
print(index)
signal_y[index*step:(index+1)*step,:]=ofdm_simulate(Signal[index*step:(index+1)*step,:],\
H_train_T[index*step:(index+1)*step,:,:],step)
signal_y_real = signal_y[:,0:K*mu]
signal_y_imag = signal_y[:,K*mu:K*mu*2]
rec_pilot_real = signal_y_real[:,0:K]
rec_pilot_imag = signal_y_imag[:,0:K]
rec_pilot = tf.concat([rec_pilot_real,rec_pilot_imag],1)
rec_signal_real = signal_y_real[:,K:K*mu]
rec_signal_imag = signal_y_imag[:,K:K*mu]
rec_signal = tf.concat([rec_signal_real,rec_signal_imag],1)
print('end')
print('shape of signal',signal_y.shape)
np.save('./Data/Signal/Received_at_BS.npy',signal_y)
np.save('./Data/Signal/Transmitted_at_User.npy',Signal)
np.save('./Data/Signal/Received_pilot.npy',rec_pilot)
np.save('./Data/Signal/Received_signal.npy',rec_signal)
np.save('./Data/Signal/Transmitted_pilot.npy',Pilots_matrix)
print('pilots shape',Pilots_matrix.shape)
|
#Multi agent DDPG
from ddpg import Agent
from BufferNoise import ReplayBuffer
import numpy as np
import torch
BUFFER_SIZE = int(1e6)
BATCH_SIZE = 256
GAMMA = 0.99
TAU = 1e-2 # for soft update of target parameters, dfferent from single agent ddpg
UPDATE_EVERY = 15 # Frequency at which to update
LEARNING_REPEATS = 5 # Number of learning updates
def loss_actor(output, target):
"""
The square meaan loss
PARAMS
=====
target: The target tensor
output: The output tensor
"""
return (target - output)**2
def policy(states, vector_agent):
"""
Policy to be used for a single agent
PARAMS
=====
states: The states array
vector_agent: The agent
"""
# Take the composite function
pred_actions = torch.stack([agent.actor_local(next_states[:, j, :]) for j, agent in enumerate(self.agents)], dim=1)
# We will take the derivative with respect to the vector agent
f = vector_agent.critic_local(states, pred_actions)
return f
class MultiAgent():
"""
Implements the members of the MultiAgent class for MADDPG
"""
def __init__ (self, n_agents, state_size, action_size, seed):
"""
Initializes a MultiAgent object
PARAMS
=====
n_agents: Number of agents
state_size: The dimension of the state space
action_size: The dimensions of the action space
seed: The seed to use
"""
self.n_agents = n_agents
self.state_size = state_size
self.action_size = action_size
self.seed = seed
self.agents = [Agent(self.state_size, self.action_size, self.seed) for i in range(n_agents)]
# Single Memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Gamma
self.Gamma = GAMMA
self.t_step = 0
def step(self, state, action, reward, next_state, done):
"""
Add one sample to experience and Learn if warranted
PARAMS
=====
state: The array of states
action: The array of actions
reward: The array of rewards
next_state: The array of next states
done: The array of dones
"""
# Save experience in replay memory
self.t_step = (self.t_step + 1) % UPDATE_EVERY
#print("state shape: {}".format(state.shape))
# reshape state
#state = torch.tensor(state).view(-1, self.n_agents * self.state_size)
#print("state shape: {}".format(state.shape))
#print("adding to memory")
self.memory.add(state, action, reward, next_state, done)
# If enough samples are available in memory, get random subset and learn
if self.t_step == 0:
if len(self.memory) > BATCH_SIZE:
# Learn
#print("Learning")
for i in range(LEARNING_REPEATS):
#experiences = self.memory.sample()
self.learn()
self.update()
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps: The multiplier going inside the noise component for each agent
"""
# return an array of the actions
return np.vstack([agent.act(state[i], eps) for i, agent in enumerate(self.agents)])
def learn(self, GAMMA):
""" Make the agents learn according to MADDPG
PARAMS
=====
GAMMA: The discount factor
"""
experiences = self.memory.sample()
for i, agent in enumerate(self.agents):
#print(experiences)
states, actions, rewards, next_states, dones = experiences
#print("experiences for agent {}, : {}".format(i, experiences))
#print("state after memory sample shape: {}".format(states.shape))
#print("actions after memory sample shape: {}".format(actions.shape))
#print(self.agents[1].actor_target(next_states).shape)
#print(next_states)
#print("next states after gather: {}".format(next_states[:,i,:]))
#print(" pred actions shape: {}".format(self.agents[1].actor_target(next_states[:, i, :]).shape))
# Use the actor to get the action to take (bounded by -1, 1) for each of the agents
#print("experiences, next state for agent {}: {}".format(i, next_states[:, i, :]))
#print("Entering pred actions calculation for agent {}".format(i))
pred_actions = torch.stack([agent.actor_target(next_states[:, j, :]) for j, agent in enumerate(self.agents)], dim=1)
#print("pred_actions {}:" .format(pred_actions))
#print("next_states {}".format(next_states))
#print("next_states flattened {}".format(torch.flatten(next_states, start_dim=1 )))
#print("pred_actions flattened {}".format(torch.flatten(pred_actions, start_dim=1 )))
# Copy the rewards to pred_critic
#print(" pred actions for all the agents shape : {}".format(pred_actions.shape))
#print("rewards: {}".format(rewards))
pred_critic = rewards[:, i].view(-1,1)
#print(" pred_critic {}".format(pred_critic))
#print("pred_critic shape {}".format(pred_critic.shape))
#print("dones shape {}".format(dones.shape))
# check where the agent is done
#print(" dones: {}".format(dones))
false_idx = dones[:, :, i] == False
#print(false_idx.shape)
#print(" rewards shape {}".format(rewards.shape))
# Apply the update for those states where done=False for the agent
#print(" next_states for all the agents shape: {}".format(next_states.shape))
#print(agent.critic_target(next_states, pred_actions).shape)
#print("false idx :{}".format(false_idx))
pred_critic[false_idx] += GAMMA * agent.critic_target(next_states, pred_actions)[false_idx]
#+ rewards[:, i].view(-1, 1)[false_idx]
#print(pred_critic == rewards[:, i].view(-1, 1))
#print("actions {}".format(actions))
# Update the critic
pred_critic_local = agent.critic_local(states, actions)
L_critic = loss_actor(pred_critic_local, pred_critic).mean()
agent.optim_critic.zero_grad()
L_critic.backward(retain_graph=True)
# Clip the gradient
torch.nn.utils.clip_grad_norm_(agent.critic_local.parameters(), 1)
agent.optim_critic.step()
# Update the actor (maximize gradient so taking the symmetric of the loss)
#print("states before actor:{}".format(states.shape))
L_actor = - policy(states, agent).mean()
agent.optim_actor.zero_grad()
L_actor.backward()
agent.optim_actor.step()
def update(self):
"""
Update the target networks
"""
for agent in self.agents:
agent.soft_update(agent.actor_local, agent.actor_target, TAU)
agent.soft_update(agent.critic_local, agent.critic_target, TAU)
|
import requests #pip install requests if you don't already have this
from lxml import html # also with lxml if this is not available
import os
from tqdm import tqdm #I want a progress bar...
import requests
import xlrd
import csv
# from TDQM docs https://github.com/tqdm/tqdm#hooks-and-callbacks
# this one seems to work the best
# https://gist.github.com/wy193777/0e2a4932e81afc6aa4c8f7a2984f34e2
def download_from_url(url, dst, chunk_size=1024*2):
file_size = int(requests.head(url).headers["Content-Length"])
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm( total=file_size, initial=first_byte, unit='B', unit_scale=True, desc=url.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(dst, 'ab')) as f:
for chunk in req.iter_content(chunk_size):
if chunk:
f.write(chunk)
pbar.update(chunk_size)
pbar.close()
return file_size
# lets see if I can figure out whats available to download from the html on the page
# apparently yes... see : https://docs.python-guide.org/scenarios/scrape/
def decodePage(url):
#make data dir
os.system('mkdir -p data')
r = requests.get(url)
tree = html.fromstring(r.content)
xmlStr = '//a[@data-ga-event=\"download\"]/text()'
#This will create a list of files available to download
tagInfo = tree.xpath(xmlStr)
#will this get the list of urls?
#print(tagInfo)
#https://lxml.de/3.1/api/private/lxml.html.HtmlElement-class.html
xmlStr = '//table//tr//td//a'#tr[@class=\" dgu-datafile\"]/text()'
tagInfo = tree.xpath(xmlStr)
fileIndex=0
for tag in tagInfo :
publisher = tag.get('publisher')
label = tag.get('aria-label')
refUrl = tag.get('href')
if( '.zip' in refUrl or '.csv' in refUrl and label != None ):
# figure out a smarter way to label these..
refUrl_split=refUrl.split('.')
print(label, ' - ', refUrl)
labels=(label.split('dataset:'))
description=labels[len(labels)-1].split('-')[0].strip().replace(' ', '_')
print(description)
fileName='data/f%d.%s'%(fileIndex,refUrl_split[len(refUrl_split)-1])
if( fileIndex == 0):
download_from_url(refUrl, fileName,1024*32)
fileIndex+=1
return tagInfo
def csv_from_excel():
wb = xlrd.open_workbook('your_workbook.xls')
sh = wb.sheet_by_name('Sheet1')
your_csv_file = open('your_csv_file.csv', 'wb')
wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL)
for rownum in xrange(sh.nrows):
wr.writerow(sh.row_values(rownum))
your_csv_file.close()
|
from microqiskit import QuantumCircuit, simulate
import numpy as np
def make_line ( length ):
# determine the number of bits required for at least `length` bit strings
n = int(np.ceil(np.log(length)/np.log(2)))
# start with the basic list of bit values
line = ['0','1']
# each application of the following process double the length of the list,
# and of the bit strings it contains
for j in range(n-1):
# this is done in each step by first appending a reverse-ordered version of the current list
line = line + line[::-1]
# then adding a '0' onto the end of all bit strings in the first half
for j in range(int(len(line)/2)):
line[j] += '0'
# and a '1' onto the end of all bit strings in the second half
for j in range(int(len(line)/2),int(len(line))):
line[j] += '1'
return line
def height_to_circuit( height ):
line = make_line( len(height) )
n = int(np.ceil(np.log(len(line))/np.log(2)))
renorm = np.sqrt(sum(height))
real_vec = [0]*(2**n)
for j,h in enumerate(height):
real_vec[int(line[j],2)] = np.sqrt(h)/renorm
qc = QuantumCircuit(n)
qc.initialize( real_vec )
return qc
def circuit_to_height( qc ):
n = qc._n
line = make_line( 2**n )
real_vec = simulate(qc,get='statevector')
height = [0]*(2**n)
for j,amp in enumerate(real_vec):
string = "{0:b}".format(j)
string = '0'*(n-len(string)) + string
k = line.index(string)
height[k] = amp[0]**2
max_prob = max(height)
for j,h in enumerate(height):
height[j] = h/max_prob
return height |
import os
import pickle
import numpy as np
import tensorflow as tf
import argparse
import tf_util
import gym_util
import sklearn
from sklearn.model_selection import train_test_split
class TrainConfig(object):
def __init__(self, hidden_sizes=[128,64,32], dropout_rate=0, learning_rate=1e-3, epochs=10, batch_size=32):
self.hidden_sizes = hidden_sizes
self.dropout_rate = dropout_rate
self.learning_rate = learning_rate
self.epochs = epochs
self.batch_size = batch_size
configs = {
"Ant-v2": TrainConfig(),
"HalfCheetah-v2": TrainConfig(),
"Hopper-v2": TrainConfig(),
"Humanoid-v2": TrainConfig(hidden_sizes=[256,128,64,32],epochs=100),
"Reacher-v2": TrainConfig(),
"Walker2d-v2": TrainConfig(),
}
def load_data(data_file):
data = pickle.load(open(data_file, 'rb'))
assert data['observations'].shape[0] == data['actions'].shape[0]
input_size = data['observations'].shape[-1]
output_size = data['actions'].shape[-1]
return data, input_size, output_size
class Model(object):
def __init__(self, input_size, output_size, train_config):
self.input_size = input_size
self.output_size = output_size
self.train_config = train_config
self.input = tf.placeholder(tf.float32, shape=[None, input_size], name="input")
self.label = tf.placeholder(tf.float32, [None, output_size], name="label")
self.is_training_phase = tf.placeholder_with_default(False, shape=(), name="is_training_phase_input")
self._build_graph()
self._build_train_ops()
def _build_graph(self):
self.output = self.input
if self.train_config.hidden_sizes:
for i, hidden_size in enumerate(self.train_config.hidden_sizes):
self.output = tf_util.dense(self.output, hidden_size, "hidden_{}".format(i))
self.output = tf.nn.relu(self.output)
if self.train_config.dropout_rate > 0:
self.output = tf_util.dropout(self.output, 1.0 - self.train_config.dropout_rate, self.is_training_phase)
self.output = tf_util.dense(self.output, self.output_size, "last_layer")
def _build_train_ops(self):
self.loss = tf_util.mean(tf.square(self.output - self.label))
optimizer = tf.train.AdamOptimizer(self.train_config.learning_rate)
self.train_op = optimizer.minimize(self.loss)
def train_model(sess, model, X, y, test_size, train_config, verbose=True):
if test_size > 0:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
else:
X_train, y_train, X_test, y_test = X, y, None, None
print("Train size: {}, test size: {}".format(len(X_train), 0 if X_test is None else len(X_test)))
batch_size = train_config.batch_size
steps = len(X_train) // batch_size
for i in range(train_config.epochs):
X_train, y_train = sklearn.utils.shuffle(X_train, y_train)
losses = []
for j in range(steps):
X_batch = X_train[j*batch_size:(j+1)*batch_size]
y_batch = np.squeeze(y_train[j*batch_size:(j+1)*batch_size])
_, loss = sess.run([model.train_op, model.loss], feed_dict={model.input: X_batch, model.label: y_batch, model.is_training_phase: True})
losses.append(loss)
if j % 100 == 0 and verbose:
print("Epoch {}, step {}, loss: {}".format(i, j, np.mean(losses)))
losses = []
if batch_size * steps != len(X_train):
# train on remainder
X_batch = X_train[steps * batch_size:]
y_batch = np.squeeze(y_train[steps * batch_size:])
_, loss = sess.run([model.train_op, model.loss], feed_dict={model.input: X_batch, model.label: y_batch, model.is_training_phase: True})
if X_test is not None:
test_loss = sess.run(model.loss, feed_dict={model.input: X_test, model.label: np.squeeze(y_test), model.is_training_phase: False})
print("Test loss is: {}".format(test_loss))
def train(args):
data_file = os.path.join('expert_data', args.env_name + ".pkl")
data, input_size, output_size = load_data(data_file)
print("For environment {}, input size: {}, output size: {}".format(args.env_name, input_size, output_size))
X, y = data['observations'], data['actions']
train_config = configs[args.env_name]
model = Model(input_size, output_size, train_config)
saver = tf.train.Saver()
saver_path = os.path.join('models', args.env_name)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_model(sess, model, X, y, test_size=0.1, train_config=train_config, verbose=True)
saver.save(sess, saver_path)
def run(args):
data_file = os.path.join('expert_data', args.env_name + ".pkl")
data, input_size, output_size = load_data(data_file)
model = Model(input_size, output_size, configs[args.env_name])
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, os.path.join('models', args.env_name))
policy_fn = tf_util.function([model.input], model.output)
#test_in = data['observations'][0]
#action = policy_fn(test_in[None, :])
gym_util.run_gym(args.env_name, policy_fn, num_rollouts=10)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', type=str)
parser.add_argument('--mode', type=str, default='train')
args = parser.parse_args()
if args.mode == 'train':
train(args)
elif args.mode == 'run':
run(args)
if __name__ == "__main__":
main()
|
import numpy as np
import pytest
import math
from sklearn.base import clone
from sklearn.linear_model import ElasticNet
import doubleml as dml
from ._utils import draw_smpls
from ._utils_pliv_partial_z_manual import fit_pliv_partial_z, boot_pliv_partial_z, tune_nuisance_pliv_partial_z
@pytest.fixture(scope='module',
params=[ElasticNet()])
def learner_r(request):
return request.param
@pytest.fixture(scope='module',
params=['partialling out'])
def score(request):
return request.param
@pytest.fixture(scope='module',
params=['dml2'])
def dml_procedure(request):
return request.param
@pytest.fixture(scope='module',
params=[True, False])
def tune_on_folds(request):
return request.param
def get_par_grid(learner):
assert learner.__class__ == ElasticNet
par_grid = {'l1_ratio': [.1, .5, .7, .9, .95, .99, 1], 'alpha': np.linspace(0.05, 1., 7)}
return par_grid
@pytest.fixture(scope='module')
def dml_pliv_partial_z_fixture(generate_data_pliv_partialZ, learner_r, score, dml_procedure, tune_on_folds):
par_grid = {'ml_r': get_par_grid(learner_r)}
n_folds_tune = 4
boot_methods = ['Bayes', 'normal', 'wild']
n_folds = 2
n_rep_boot = 503
# collect data
data = generate_data_pliv_partialZ
x_cols = data.columns[data.columns.str.startswith('X')].tolist()
z_cols = data.columns[data.columns.str.startswith('Z')].tolist()
# Set machine learning methods for r
ml_r = clone(learner_r)
np.random.seed(3141)
obj_dml_data = dml.DoubleMLData(data, 'y', ['d'], x_cols, z_cols)
dml_pliv_obj = dml.DoubleMLPLIV._partialZ(obj_dml_data,
ml_r,
n_folds,
dml_procedure=dml_procedure)
# tune hyperparameters
_ = dml_pliv_obj.tune(par_grid, tune_on_folds=tune_on_folds, n_folds_tune=n_folds_tune)
dml_pliv_obj.fit()
np.random.seed(3141)
y = data['y'].values
x = data.loc[:, x_cols].values
d = data['d'].values
z = data.loc[:, z_cols].values
n_obs = len(y)
all_smpls = draw_smpls(n_obs, n_folds)
smpls = all_smpls[0]
if tune_on_folds:
r_params = tune_nuisance_pliv_partial_z(y, x, d, z,
clone(learner_r),
smpls, n_folds_tune,
par_grid['ml_r'])
else:
xx = [(np.arange(len(y)), np.array([]))]
r_params = tune_nuisance_pliv_partial_z(y, x, d, z,
clone(learner_r),
xx, n_folds_tune,
par_grid['ml_r'])
r_params = r_params * n_folds
res_manual = fit_pliv_partial_z(y, x, d, z,
clone(learner_r),
all_smpls, dml_procedure, score,
r_params=r_params)
res_dict = {'coef': dml_pliv_obj.coef,
'coef_manual': res_manual['theta'],
'se': dml_pliv_obj.se,
'se_manual': res_manual['se'],
'boot_methods': boot_methods}
for bootstrap in boot_methods:
np.random.seed(3141)
boot_theta, boot_t_stat = boot_pliv_partial_z(y, d, z, res_manual['thetas'], res_manual['ses'],
res_manual['all_r_hat'],
all_smpls, score, bootstrap, n_rep_boot)
np.random.seed(3141)
dml_pliv_obj.bootstrap(method=bootstrap, n_rep_boot=n_rep_boot)
res_dict['boot_coef' + bootstrap] = dml_pliv_obj.boot_coef
res_dict['boot_t_stat' + bootstrap] = dml_pliv_obj.boot_t_stat
res_dict['boot_coef' + bootstrap + '_manual'] = boot_theta
res_dict['boot_t_stat' + bootstrap + '_manual'] = boot_t_stat
return res_dict
def test_dml_pliv_coef(dml_pliv_partial_z_fixture):
assert math.isclose(dml_pliv_partial_z_fixture['coef'],
dml_pliv_partial_z_fixture['coef_manual'],
rel_tol=1e-9, abs_tol=1e-4)
def test_dml_pliv_se(dml_pliv_partial_z_fixture):
assert math.isclose(dml_pliv_partial_z_fixture['se'],
dml_pliv_partial_z_fixture['se_manual'],
rel_tol=1e-9, abs_tol=1e-4)
def test_dml_pliv_boot(dml_pliv_partial_z_fixture):
for bootstrap in dml_pliv_partial_z_fixture['boot_methods']:
assert np.allclose(dml_pliv_partial_z_fixture['boot_coef' + bootstrap],
dml_pliv_partial_z_fixture['boot_coef' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_pliv_partial_z_fixture['boot_t_stat' + bootstrap],
dml_pliv_partial_z_fixture['boot_t_stat' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
|
import parmed as pmd
class System(object):
def __init__(self, path, name, cores):
self.path = path
self.name = name
self.cores = cores
structure = pmd.load_file(path+'/build/complex.crd')
self.box = structure.box
@property
def descriptors(self):
return ['cons.pdb', 'tags.pdb']
|
__all__ = ['ttypess', 'constant'] |
from collections import defaultdict
from get_valid_data import get_valid_data
def get_ratio(data_table_1, data_table_2):
""" get_ratio
Get ratio which is amount1 / amount2
Arguements: two dictionaries, {country1: amount1, country2, amount2...}
Returns: an unsorted list[(ratio1, country1), (ratio2, country2), (ratio3, country3) ...]
"""
country_with_ratio = []
for country in data_table_1.keys():
if country in data_table_2.keys():
country_with_ratio.append((data_table_1[country] / data_table_2[country], country))
return country_with_ratio
# Test Codes
if __name__ == "__main__":
agricultural_land_filename = 'API_AG.LND.AGRI.K2_DS2_en_csv_v2.csv'
forest_filename = 'API_AG.LND.FRST.K2_DS2_en_csv_v2.csv'
year_1 = 1992
year_2 = 1999
agricultural = get_valid_data(agricultural_land_filename, year_1, year_2)
forest = get_valid_data(forest_filename, year_1, year_2)
print(get_ratio(agricultural, forest))
|
## gfal 2.0 tools core logic of checksum
## @author Adrien Devresse <adevress@cern.ch> CERN
## @license GPLv3
##
import gfal2
import sys
from gfal2_utils_arg_parser import *
from gfal2_utils_parameters import applys_option
from gfal2_utils_verbose import set_verbose_mode
from gfal2_utils_errors import gfal_catch_gerror
def setup_verbose_from_opt(params):
vlvl = params.verbose
if(vlvl > 0):
print "verbose mode"
def create_gfal_sum_parser():
p = create_basic_parser()
p.add_argument( 'FILE', nargs=1, type=str, help="file uri to use for checksum calculation")
p.add_argument( 'CHECKSUM_TYPE', nargs=1, type=str, help="checksum algorithm to use ( ex: ADLER32, CRC32, MD5, etc.. )")
return p
@gfal_catch_gerror
def gfal_sum_main():
params = create_gfal_sum_parser().parse_args(sys.argv[1:])
set_verbose_mode(params)
c = gfal2.creat_context()
applys_option(c,params)
my_file = params.FILE[0]
r= c.checksum(my_file, params.CHECKSUM_TYPE[0])
print "%s\t%s"%(my_file, r)
return 0
|
"""add tag description
Revision ID: 13a6be76ac3b
Revises: 55954d2561cb
Create Date: 2021-03-21 14:14:59.291750
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "13a6be76ac3b"
down_revision = "55954d2561cb"
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
|
# -*- coding: utf-8 -*-
import argparse
import collections
import datetime
import json
import sys
import time
def parse_datetime(str_time=None, fmt='%Y-%m-%d %H:%M:%S'):
"""
将时间字符串解析为时间
:param str_time:
:param fmt:
:return:
"""
# t = datetime.datetime.now()
if str_time:
try:
t = datetime.datetime.strptime(str_time, fmt)
except:
try:
t = datetime.datetime.strptime(str_time, '%Y/%m/%d %H:%M:%S')
except:
try:
# UTC时间转为北京时间需要加上8小时时差
t = datetime.datetime.strptime(str_time, '%Y-%m-%dT%H:%M:%SZ')
t += datetime.timedelta(hours=8)
except:
try:
# UTC时间转为北京时间需要加上8小时时差
t = datetime.datetime.strptime(str_time, '%Y-%m-%dT%H:%M:%S.%fZ')
t += datetime.timedelta(hours=8)
except Exception as e:
t = t
return t
def str_to_timestamp(str_time=None, fmt='%Y-%m-%d %H:%M:%S'):
"""时间转时间戳"""
str_time = str(str_time)
convert_timestamp = str_time
if str_time:
if str_time.isdigit():
if len(str_time) == 10:
convert_timestamp = int(str_time)
else:
convert_timestamp = int(str_time) / 1000
else:
d = parse_datetime(str_time, fmt)
convert_timestamp = int(time.mktime(d.timetuple()))
return convert_timestamp
def timestamp2str(ts, fmt='%Y-%m-%d %H:%M:%S'):
ts = str(ts)
convert_str = ts
if ts.isdigit():
ts = int(ts[0:10])
convert_str = time.strftime(fmt, time.localtime(ts))
return convert_str
def main():
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType())
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'))
options = parser.parse_args()
infile = options.infile or sys.stdin
outfile = options.outfile or sys.stdout
with infile:
try:
content = infile.read()
# 转义未解码的utf-8
if "\\x" in content:
content = content.replace("'", "\"")\
.replace("\r\n","")\
.replace("\n","")\
.replace("\r","")
content = eval("b'" + content + "'").decode('utf8')
obj = json.loads(content, object_pairs_hook=collections.OrderedDict)
except Exception as e:
# raise SystemExit(e)
try:
# unicode
if "\\u" in content:
obj = json.loads("u'" + content.strip() + "'")
else:
# 时间
obj = str(str_to_timestamp(content))
except:
obj = content
# 时间戳转时间
if isinstance(obj, int):
obj = timestamp2str(int(content))
# raise SystemExit(e)
with outfile:
if isinstance(obj, dict) or isinstance(obj, list):
json.dump(
obj,
outfile,
indent=4,
ensure_ascii=False
)
outfile.write('\n')
elif isinstance(obj, str) or isinstance(obj, int):
outfile.write(str(obj))
outfile.write('\n')
if __name__ == '__main__':
main()
|
from django import forms
from django.core.exceptions import ValidationError
import magic
import sys
class pdfFileUpload(forms.FileField):
def to_python(self, value):
return value
def validate(self, value):
#get the mime type from the buffer:
mime_type = magic.from_buffer(value.file.getvalue(), mime=True)
#check if the mime type matches with mime type of pdf
if('application/pdf' not in mime_type):
raise ValidationError('Unsupported File Type ..', code='unsupported_ftype')
#check if the combined upload file sizes should be less than 25 MB
print(f"total uploaded file size == {sys.getsizeof(value.file.getvalue())}")
total_file_size = sys.getsizeof(value.file.getvalue())
if(total_file_size > 2.5e7):
raise ValidationError("Total file size should be less that 2 MB", code="filesize_limit_exceed") |
import os
import definitions
from definitions import NOMENCLATURES_DIR
import wsdm.ts.helpers.persons.persons as p_lib
import wsdm.ts.helpers.nationalities.nationalities as nat_lib
import wsdm.ts.helpers.professions.professions as prof_lib
def init_persons():
persons = []
with open(os.path.join(NOMENCLATURES_DIR, "persons.txt"), encoding='utf8') as persons_f:
for line in persons_f:
persons.append(line.split(' ', 1)[0])
return persons
def get_train_lines(dict, is_positive):
score = "7" if is_positive else "0"
result = []
for key, val in dict.items():
if len(val) > 0:
for item in val:
result.append("{0} {1} {2}".format(item, key, score))
return result
def save_train_data(positive_dict, negative_dict, train_file):
positive_lines = get_train_lines(positive_dict, True)
negative_lines = get_train_lines(negative_dict, False)
with open(train_file, encoding='utf8', mode='w') as fw:
for (pl, nl) in zip(positive_lines, negative_lines):
fw.write(pl + "\n")
fw.write(nl + "\n")
def init_nationalities_empty_dict():
result = {}
with open(os.path.join(NOMENCLATURES_DIR, "nationalities.txt"), encoding='utf8', mode='r') as fr:
for line in fr:
nationality = line.rstrip()
result[nationality] = []
return result
def init_professions_empty_dict():
result = {}
with open(os.path.join(NOMENCLATURES_DIR, "professions.txt"), encoding='utf8', mode='r') as fr:
for line in fr:
profession = line.rstrip()
result[profession] = []
return result
def is_nationality_negative(person, nationality):
person_file = os.path.join(definitions.PERSONS_DIR, p_lib.remove_spaces(person) + ".txt")
if os.path.isfile(person_file):
with open(person_file, 'r', encoding='utf8') as fr:
content = fr.read()
for synonym, coun in nat_lib.nationalities_dict.items():
content = content.replace(synonym, coun)
if nationality in content:
return False
return True
def get_positive_nationality(person):
nationalities_empty_dict = init_nationalities_empty_dict()
person_file = os.path.join(definitions.PERSONS_DIR, p_lib.remove_spaces(person) + ".txt")
if os.path.isfile(person_file):
with open(person_file, 'r', encoding='utf8') as fr:
first_line = fr.readline()
fr.seek(0)
content = fr.read()
for synonym, coun in nat_lib.nationalities_dict.items():
first_line = first_line.replace(synonym, coun)
content = content.replace(synonym, coun)
mentioned_nationalities = tuple(
temp_nationality for temp_nationality in nationalities_empty_dict if temp_nationality in content)
if len(mentioned_nationalities) == 2 and 'Republic of Ireland' in mentioned_nationalities:
mentioned_nationalities = ['Republic of Ireland']
if len(mentioned_nationalities) == 1 and mentioned_nationalities[0] in first_line:
return mentioned_nationalities[0]
return None
def is_profession_negative(person, profession):
similarity_words = prof_lib.get_similarity_words(profession)
person_file = os.path.join(definitions.PERSONS_DIR, p_lib.remove_spaces(person) + ".txt")
if os.path.isfile(person_file):
with open(person_file, 'r', encoding='utf8') as fr:
content = fr.read()
if any(x in content for x in similarity_words):
return False
return True
def get_positive_profession(person):
professions_empty_dict = init_professions_empty_dict()
person_file = os.path.join(definitions.PERSONS_DIR, p_lib.remove_spaces(person) + ".txt")
if os.path.isfile(person_file):
with open(person_file, 'r', encoding='utf8') as fr:
first_line = fr.readline()
fr.seek(0)
content = fr.read()
mentioned_professions = []
mentioned_professions_first_sentence = []
for profession in professions_empty_dict:
similarity_words = prof_lib.get_similarity_words(profession)
if all(x in content for x in similarity_words):
mentioned_professions.append(profession)
if all(x in first_line for x in similarity_words):
mentioned_professions_first_sentence.append(profession)
if len(mentioned_professions) == 1 and len(mentioned_professions_first_sentence) == 1:
return mentioned_professions_first_sentence[0]
return None
|
# creating a fle object
myfile = open("files/fruits.txt")
# storing the file content into a variable
content = myfile.read()
# myfile.close()
print(content)
|
#! /usr/bin/env python3
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
#
# Parts of code and comments contained in this file are taken from
# the official Hyperledger Sawtooth documentation
# https://sawtooth.hyperledger.org/docs/core/releases/1.1.4/contents.html
# and from example projects from developer ``danintel'':
# https://github.com/danintel/sawtooth-cookiejar
#
'''Attestation Family event client
To run, start the validator then type the following on the command line:
./events_client.py
For more information, see
https://sawtooth.hyperledger.org/docs/core/releases/latest/app_developers_guide/event_subscriptions.html
'''
import sys
import traceback
import attmgr_client
from sawtooth_sdk.messaging.stream import Stream
from sawtooth_sdk.protobuf import events_pb2
from sawtooth_sdk.protobuf import client_event_pb2
from sawtooth_sdk.protobuf.validator_pb2 import Message
# hard-coded for simplicity (otherwise get the URL from the args in main):
# For localhost access:
#DEFAULT_VALIDATOR_URL = 'tcp://localhost:4004'
# For Docker access:
DEFAULT_VALIDATOR_URL = 'tcp://validator:4004'
# Calculated from the 1st 6 characters of SHA-512("attestation"):
ATTESTATION_TP_ADDRESS_PREFIX = 'fadc96'
# Method to subscribe to the desired events
def subscribe_to_events(delta_filters=None):
'''Listen to attestation state-delta events.'''
# Subscribe to events
trust_path_subscription = events_pb2.EventSubscription(
event_type="attestation/trustpath", filters=delta_filters)
trust_entry_subscription = events_pb2.EventSubscription(
event_type="attestation/entrypoint", filters=delta_filters)
request = client_event_pb2.ClientEventsSubscribeRequest(
subscriptions=[trust_path_subscription, trust_entry_subscription])
# Send the subscription request
stream = Stream(DEFAULT_VALIDATOR_URL)
msg = stream.send(message_type=Message.CLIENT_EVENTS_SUBSCRIBE_REQUEST,
content=request.SerializeToString()).result()
assert msg.message_type == Message.CLIENT_EVENTS_SUBSCRIBE_RESPONSE
# Parse the subscription response
response = client_event_pb2.ClientEventsSubscribeResponse()
response.ParseFromString(msg.content)
assert response.status == \
client_event_pb2.ClientEventsSubscribeResponse.OK
# Called from the client after submitting a transaction
def listen_to_events():
# Listen for events in an infinite loop
stream = Stream(DEFAULT_VALIDATOR_URL)
print("Listening to events.")
while True:
msg = stream.receive().result()
assert msg.message_type == Message.CLIENT_EVENTS
# Parse the response
event_list = events_pb2.EventList()
event_list.ParseFromString(msg.content)
print("Received the following events: ----------")
for event in event_list.events:
print(event)
# Unsubscription method
def unsubscribe_from_events():
# Unsubscribe from events
stream = Stream(DEFAULT_VALIDATOR_URL)
request = client_event_pb2.ClientEventsUnsubscribeRequest()
msg = stream.send(Message.CLIENT_EVENTS_UNSUBSCRIBE_REQUEST,
request.SerializeToString()).result()
assert msg.message_type == Message.CLIENT_EVENTS_UNSUBSCRIBE_RESPONSE
# Parse the unsubscribe response
response = client_event_pb2.ClientEventsUnsubscribeResponse()
response.ParseFromString(msg.content)
assert response.status == \
client_event_pb2.ClientEventsUnsubscribeResponse.OK
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import bottleneck
import collections
import cPickle as pickle
import datetime
import heapq
import itertools
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import numpy.matlib
np.matlib = numpy.matlib
import pdb
import scipy.sparse
np.set_printoptions(linewidth=225)
# np.seterr(all='raise')
class UtilityMatrix:
def __init__(self, r, beta=1, hidden=None, similarities=True):
print('UtilityMatrix.__init__')
self.beta = beta
self.r = r.astype(float) # rating matrix (=utility matrix)
self.r_coo = self.r.tocoo()
print('getting training data...')
self.rt, self.rt_coo, self.hidden_indices, self.hidden_vals =\
self.get_training_data(hidden)
self.mu, self.b_i, self.b_u = self.entrymean(self.rt)
print('getting coratings...')
self.coratings_r = self.get_coratings(self.r)
self.coratings_rt = self.get_coratings(self.rt)
if similarities:
print('getting similarities...')
self.s_r = self.get_similarities(self.r, self.coratings_r)
self.s_rt = self.get_similarities(self.rt, self.coratings_rt)
self.sirt_cache = {}
def get_training_data(self, hidden, training_share=0.2):
if hidden is None: # take some of the input as training data
i, j = self.r_coo.row, self.r_coo.col
rands = np.random.choice(
len(i),
int(training_share * len(i)),
replace=False
)
hidden = np.vstack((i[rands], j[rands]))
r_dok = self.r_coo.todok()
vals = np.zeros(hidden.shape[1])
for idx in range(hidden.shape[1]):
print('\r ', idx, '/', hidden.shape[1], end='')
vals[idx] = r_dok[hidden[0, idx], hidden[1, idx]]
del r_dok[hidden[0, idx], hidden[1, idx]]
print()
r_coo = r_dok.tocoo()
return r_coo.tocsr(), r_coo, hidden.T, vals
def entrymean(self, m, axis=None):
"""Average a matrix over the given axis. If the axis is None,
average over both rows and columns, returning a scalar.
(via some SciPy function)
"""
# Mimic numpy's casting. The int32/int64 check works around numpy
# 1.5.x behavior of np.issubdtype, see gh-2677.
if (np.issubdtype(m.dtype, np.float_) or
np.issubdtype(m.dtype, np.int_) or
m.dtype in [np.dtype('int32'), np.dtype('int64')] or
np.issubdtype(m.dtype, np.bool_)):
res_dtype = np.float_
elif np.issubdtype(m.dtype, np.complex_):
res_dtype = np.complex_
else:
res_dtype = m.dtype
m = m.astype(res_dtype)
mu = m.sum(None) / m.getnnz()
# if user or item has no ratings (stripped from training data), set to 0
b_i = m.sum(0)
b_u = m.sum(1)
with np.errstate(invalid='ignore'):
b_i = (b_i / m.getnnz(axis=0)) - mu
b_u = (b_u.T / m.getnnz(axis=1)) - mu
b_i[np.isnan(b_i)] = 0
b_u[np.isnan(b_u)] = 0
return mu, np.array(b_i)[0], np.array(b_u)[0]
def get_coratings(self, r):
um = scipy.sparse.csr_matrix(r)
um.data = np.ones(um.data.shape[0])
coratings = um.T.dot(um)
coratings.setdiag(0)
return coratings
def get_similarities(self, r, coratings):
print('centering...')
# use the centered version for similarity computation
um_centered = r.toarray().astype(np.float32)
um_centered[np.where(um_centered == 0)] = np.nan
um_centered = um_centered - np.nanmean(um_centered, axis=0)[np.newaxis, :]
um_centered[np.where(np.isnan(um_centered))] = 0
print('computing similarities...')
A = scipy.sparse.csr_matrix(um_centered)
print('\r', 1, end='')
# transpose, as the code below compares rows
A = A.T
print('\r', 2, end='')
# base similarity matrix (all dot products)
similarity = A.dot(A.T)
print('\r', 3, end='')
# squared magnitude of preference vectors (number of occurrences)
square_mag = similarity.diagonal()
print('\r', 4, end='')
# inverse squared magnitude
inv_square_mag = 1 / square_mag
print('\r', 5, end='')
# if it doesn't occur, set the inverse magnitude to 0 (instead of inf)
inv_square_mag[np.isinf(inv_square_mag)] = 0
print('\r', 6, end='')
# inverse of the magnitude
inv_mag = np.sqrt(inv_square_mag)
print('\r', 7, end='')
# cosine similarity (elementwise multiply by inverse magnitudes)
col_ind = range(len(inv_mag))
row_ind = np.zeros(len(inv_mag))
inv_mag2 = scipy.sparse.csr_matrix((inv_mag, (col_ind, row_ind)))
print('\r', 8, end='')
cosine = similarity.multiply(inv_mag2)
print('\r', 9, end='')
cosine = cosine.T.multiply(inv_mag2)
print('\r', 10, end='')
cosine.setdiag(0)
s = cosine.toarray()
# shrink similarities
# this is "s = (coratings * s) / (coratings + self.beta)" in sparse form
coratings_shrunk = scipy.sparse.csr_matrix((
coratings.data / (coratings.data + self.beta),
coratings.indices,
coratings.indptr)
)
print()
return scipy.sparse.csr_matrix(coratings_shrunk.multiply(s))
# @profile
def similar_items(self, u, i, k, use_all=False):
try:
return self.sirt_cache[(u, i, k)]
except KeyError:
if use_all: # use entire matrix
m = self.r
s = self.s_r
# r_u = self.r[u, :] # user ratings
# r_u = self.r.getrow(u) # user ratings
# s_i = np.copy(self.s_r[i, :]) # item similarity
# s_i = self.s_r.getrow(i) # item similarity
# s_i = self.s_r[i, :]
else: # use training matrix
# r_u = self.rt[u, :] # user ratings
# r_u = self.rt.getrow(u) # user ratings
# s_i = np.copy(self.s_rt[i, :]) # item similarity
# s_i = self.s_rt.getrow(i) # item similarity
# s_i = self.s_rt[i, :]
m = self.rt
s = self.s_rt
# old version
# 12.41, 12.25, 12.81
# r_u = m[u, :].toarray()[0]
# s_i = s[i, :].toarray()[0]
# r_u[r_u == 0] = np.nan
# s_i[s_i <= 0] = np.nan
# # if (u, i) == (0, 0):
# # pdb.set_trace()
# # s_i[s_i < 0.0] = np.nan # mask only to similar items
# s_i[i] = np.nan # mask the item
# s_i[np.isnan(r_u)] = np.nan # mask to items rated by the user
# nn = np.isnan(s_i).sum() # how many invalid items
# s_i_sorted = np.argsort(s_i)
# s_i_k = s_i_sorted[-k - nn:-nn]
# new version
# ML(255): 11.54, 11.47, 12.12
# ML: 13:51
# r_u = m.getrow(u)
# s_i = s.getrow(i)
# nz = r_u.nonzero()
# if nz[1].any():
# vals = zip(np.array(s_i[nz])[0], nz[1])
# vals = [t for t in vals if t[0] > 0]
# s_i_k = heapq.nlargest(k, vals)
# s_i_k = tuple(e[1] for e in s_i_k)
# else:
# s_i_k = ()
# new and faster version
# ML(255): 6.92, 6.31, 7.01, 6.04
# ML: 8:06
r_u = m.getrow(u)
s_i = s.getrow(i)
nnz = set(r_u.nonzero()[1])
s_i_filtered = s_i.toarray()
s_i_sorted = np.argsort(s_i_filtered)
top = []
for el in s_i_sorted[0]:
if el in nnz and el != i:
top.append(el)
if len(top) == k:
break
s_i_k = tuple(v for v in top if s_i[0, v] > 0)
# new new and slow version
# extremly slow!
# r_u = m.getrow(u)
# s_i = s.getrow(i)
# nnz = set(r_u.nonzero()[1])
# s_i_array = s_i.toarray()
# top = []
# k_top = k
# while len(top) < k or k_top < s_i_array.shape[1]:
# k_top = min(k_top + 10, s_i_array.shape[1])
# s_i_sorted = bottleneck.argpartsort(s_i_array, k_top)
#
# for el in s_i_sorted[0][:k_top]:
# if el in nnz:
# top.append(el)
# if len(top) == k:
# break
# s_i_k = tuple(top)
self.sirt_cache[(u, i, k)] = tuple(s_i_k)
return self.sirt_cache[(u, i, k)]
def rt_not_nan_iterator(self, idx=False):
if idx:
for idx, (u, i, v) in enumerate(itertools.izip(
self.rt_coo.row, self.rt_coo.col, self.rt_coo.data
)):
yield idx, (u, i, v)
else:
for u, i, v in itertools.izip(
self.rt_coo.row, self.rt_coo.col, self.rt_coo.data
):
yield (u, i, v)
def r_not_nan_iterator(self):
for u, i, v in itertools.izip(
self.r_coo.row, self.r_coo.col, self.r_coo.data
):
yield (u, i, v)
class Recommender:
def __init__(self, m):
self.m = m
self.rmse = []
self.rt_predicted = None
self.rt_nnz = self.m.rt.nnz
self.r_t_predicted = self.get_baseline_predictions()
def get_baseline_predictions(self):
P = np.ones(self.m.rt.shape) * self.m.mu
P += np.matlib.repmat(self.m.b_i, self.m.rt.shape[0], 1)
P += np.matlib.repmat(self.m.b_u, self.m.rt.shape[1], 1).T
return P
def predict(self, u, i, dbg=False):
raise NotImplementedError
def training_error(self):
sse = 0.0
for idx, (u, i, v) in self.m.rt_not_nan_iterator(idx=True):
# print('\r', idx, '/', nnz, end='')
err = v - self.predict(u, i)
sse += err ** 2
print()
rmse = np.sqrt(sse / self.rt_nnz)
return rmse
def test_error(self):
# sse_old = 0.0
# errs = []
# no_hidden = self.m.hidden_indices.shape[0]
# for idx, (u, i) in enumerate(self.m.hidden_indices):
# print('\r ', idx+1, '/', no_hidden, end='')
# err = self.m.r[u, i] - self.predict(u, i)
# sse_old += err ** 2
# # errs.append(err)
# # print(self.m.r[u, i], self.predict(u, i), err)
#
# # if err > 100:
# # print(err, self.m.r[u, i], self.predict(u, i))
# # self.predict(u, i, dbg=True)
# print()
#
# rmse_old = np.sqrt(sse_old / self.m.hidden_indices.shape[0])
#
# predictions = np.zeros(self.m.hidden_vals.shape)
# no_hidden = self.m.hidden_vals.shape[0]
# for idx, (u, i) in enumerate(self.m.hidden_indices):
# print('\r', idx+1, no_hidden, end='')
# predictions[idx] = self.predict(u, i)
# print()
# sse = sum((predictions - self.m.hidden_vals) ** 2)
# rmse = np.sqrt(sse / no_hidden)
####
sses = 0
no_hidden = self.m.hidden_vals.shape[0]
for idx, (u, i) in enumerate(self.m.hidden_indices):
print('\r', idx+1, no_hidden, end='')
sses += (self.predict(u, i) - self.m.hidden_vals[idx]) ** 2
print()
rmse = np.sqrt(sses / no_hidden)
# print((predictions - self.m.hidden_vals) ** 2)
# pdb.set_trace()
return rmse
def print_test_error(self):
print('%.3f - Test Error %s' %
(self.test_error(), self.__class__.__name__))
def plot_rmse(self, title='', suffix=None):
plt.plot(range(len(self.rmse)), self.rmse)
plt.xlabel("iteration")
plt.ylabel("RMSE")
plt.title(title + ' | ' + '%.4f' % self.rmse[-1] +
' | ' + datetime.datetime.now().strftime("%H:%M:%S"))
plt.savefig('rmse' + ('_' + suffix if suffix is not None else '') +
'.png')
class GlobalAverageRecommender(Recommender):
def __init__(self, m):
Recommender.__init__(self, m)
def predict(self, u, i, dbg=False):
return self.m.mu
class UserItemAverageRecommender(Recommender):
def __init__(self, m):
Recommender.__init__(self, m)
def predict(self, u, i, dbg=False):
return self.r_t_predicted[u, i]
class CFNN(Recommender):
def __init__(self, m, k):
Recommender.__init__(self, m)
print('k =', k)
self.w = self.m.s_rt
self.k = k
self.normalize = True
def predict_basic_old(self, u, i, dbg=False):
# pdb.set_trace()
n_u_i = self.m.similar_items(u, i, self.k)
r = 0
for j in n_u_i:
if self.w[i, j] < 0: # resolve problems with near-zero weight sums
continue
r += self.w[i, j] * self.m.r[u, j]
if self.normalize:
if r != 0:
s = sum(self.w[i, j] for j in n_u_i
# resolve problems with near-zero weight sums
if self.w[i, j] > 0
)
if not np.isfinite(r/sum(self.w[i, j] for j in n_u_i)) or\
np.isnan(r/sum(self.w[i, j] for j in n_u_i)):
pdb.set_trace()
r /= s
return r
def predict(self, u, i, dbg=False):
# the > 0 resolves problems with near-zero weight sums
n_u_i = self.m.similar_items(u, i, self.k)
r = 0
for j in n_u_i:
if self.w[i, j] < 0: # resolve problems with near-zero weight sums
continue
# diff = self.m.r[u, j] - (self.m.mu + self.m.b_u[u] + self.m.b_i[j])
diff = self.m.r[u, j] - self.r_t_predicted[u, j]
r += self.w[i, j] * diff
# if (u, i) == (0, 0):
# print(' ', r)
# pdb.set_trace()
# if dbg:
# print('r =', r)
# print('r (normalized) =', r / sum(self.w[i, j] for j in n_u_i))
# s = sum(self.w[i, j] for j in n_u_i)
# print('s =', s)
# pdb.set_trace()
if self.normalize:
if r != 0:
s = sum(self.w[i, j] for j in n_u_i
# resolve problems with near-zero weight sums
if self.w[i, j] > 0
)
if not np.isfinite(r/sum(self.w[i, j] for j in n_u_i)) or\
np.isnan(r/sum(self.w[i, j] for j in n_u_i)):
pdb.set_trace()
r /= s
return self.r_t_predicted[u, i] + r
class Factors(Recommender):
def __init__(self, m, k, eta_type, nsteps=500, eta=0.000004,
regularize=False, newton=False, tol=0.5*1e-5, lamda=0.05,
init='random', reset_params=False):
Recommender.__init__(self, m)
self.k = k
self.nsteps = nsteps
self.eta = eta
self.eta_type = eta_type
self.regularize = regularize
self.newton = newton
self.tol = tol
self.lamda = lamda
self.reset_params = reset_params
print(1)
if init == 'svd':
# init by Singular Value Decomposition
m = self.m.rt.toarray()
m[np.where(np.isnan(m))] = 0
ps, ss, vs = np.linalg.svd(m)
self.p = ps[:, :self.k]
self.q = np.dot(np.diag(ss[:self.k]), vs[:self.k, :]).T
elif init == 'random':
# init randomly
self.eta *= 15 # use a higher eta for random initialization
self.p = np.random.random((self.m.rt.shape[0], self.k))
self.q = np.random.random((self.m.rt.shape[1], self.k))
elif init == 'random_small':
self.eta *= 100 # use a higher eta for random initialization
self.p = np.random.random((self.m.rt.shape[0], self.k)) / 100
self.q = np.random.random((self.m.rt.shape[1], self.k)) / 100
elif init == 'zeros':
self.p = np.zeros((self.m.rt.shape[0], self.k))
self.q = np.zeros((self.m.rt.shape[1], self.k))
else:
print('init method not supported')
pdb.set_trace()
print('init =', init)
print('k =', k)
print('lamda =', self.lamda)
self.eta_init = self.eta
print('eta = ', self.eta_init)
print('eta_type = ', self.eta_type)
self.factorize()
# self.factorize_biased()
print('init =', init)
print('k =', k)
print('lamda =', self.lamda)
print('eta = ', self.eta_init)
print('eta_type = ', self.eta_type)
# self.plot_rmse('%.4f' % diff, suffix='init')
print('test error: %.4f' % self.test_error())
def predict(self, u, i, dbg=False):
p_u = self.p[u, :]
q_i = self.q[i, :]
return np.dot(p_u, q_i.T)
def predict_biased(self, u, i):
b_xi = self.m.mu + self.m.b_u[u] + self.m.b_i[i]
if np.isnan(b_xi):
if np.isnan(self.m.b_u[u]) and np.isnan(self.m.b_i[i]):
return self.m.mu
elif np.isnan(self.m.b_u[u]):
return self.m.mu + self.m.b_i[i]
else:
return self.m.mu + self.m.b_u[u]
p_u = self.p[u, :]
q_i = self.q[i, :]
return b_xi + np.dot(p_u, q_i.T)
def factorize(self):
test_rmse = []
mrt = self.m.rt.toarray()
masked = np.ma.array(mrt, mask=np.isnan(mrt))
for m in xrange(self.nsteps):
err = np.dot(self.p, self.q.T) - masked
delta_p = np.ma.dot(err, self.q)
delta_q = np.ma.dot(err.T, self.p)
if self.regularize:
delta_p += self.lamda * self.p
delta_q += self.lamda * self.q
self.p -= 2 * self.eta * delta_p
self.q -= 2 * self.eta * delta_q
self.rmse.append(self.training_error())
print(m, 'eta = %.8f, rmse = %.8f' % (self.eta, self.rmse[-1]))
# print(m, 'eta = %.8f, training_rmse = %.8f, test_rmse = %.8f' %
# (self.eta, self.rmse[-1], self.test_error()))
if len(self.rmse) > 1:
if abs(self.rmse[-1] - self.rmse[-2]) < self.tol:
break
if self.rmse[-1] > self.rmse[-2]:
print('RMSE getting larger')
if self.reset_params:
self.p += 2 * self.eta * delta_p # reset parameters
self.q += 2 * self.eta * delta_q # reset parameters
del self.rmse[-1] # reset last error value
self.eta *= 0.5
if self.eta_type == 'constant':
break
elif self.eta_type == 'increasing':
break
else:
if self.eta_type == 'constant':
pass
else: # 'increasing' or 'bold_driver'
self.eta *= 1.1
if (m % 100) == 0:
test_rmse.append(self.test_error())
print(' TEST RMSE:')
for idx, err in enumerate(test_rmse):
print(' %d | %.8f' % (idx * 100, err))
print(' TEST RMSE:')
for idx, err in enumerate(test_rmse):
print(' %d | %.8f' % (idx * 100, err))
def factorize_biased(self):
self.predict = self.predict_biased
ucount = self.m.rt.shape[0]
icount = self.m.rt.shape[1]
B_u = np.tile(self.m.b_u, (icount, 1)).T
B_i = np.tile(self.m.b_i, (ucount, 1))
for m in xrange(self.nsteps):
masked = np.ma.array(self.m.rt, mask=np.isnan(self.m.rt))
err = np.dot(self.p, self.q.T) + self.m.mu + B_u + B_i - masked
delta_p = np.ma.dot(err, self.q)
delta_q = np.ma.dot(err.T, self.p)
if self.regularize:
delta_p += self.lamda * self.p
delta_q += self.lamda * self.q
self.p -= 2 * self.eta * delta_p
self.q -= 2 * self.eta * delta_q
self.rmse.append(self.training_error())
# print(m, 'eta = %.8f, rmse = %.8f' % (self.eta, self.rmse[-1]))
print(m, 'eta = %.8f, training_rmse = %.8f, test_rmse = %.8f' %
(self.eta, self.rmse[-1], self.test_error()))
if len(self.rmse) > 1:
if abs(self.rmse[-1] - self.rmse[-2]) < self.tol:
break
if self.rmse[-1] > self.rmse[-2]:
print('RMSE getting larger')
self.p += 2 * self.eta * delta_p # reset parameters
self.q += 2 * self.eta * delta_q # reset parameters
del self.rmse[-1] # reset last error value
self.eta *= 0.5
if self.eta_type == 'constant':
break
elif self.eta_type == 'increasing':
break
else:
if self.eta_type == 'constant':
pass
else: # 'increasing' or 'bold_driver'
self.eta *= 1.1
def factorize_iterate(self):
for m in xrange(self.nsteps):
print(m, end='\r')
delta_p = np.zeros((self.m.rt.shape[0], self.k))
delta_q = np.zeros((self.m.rt.shape[1], self.k))
for u, i in self.m.rt_not_nan_indices:
error = np.dot(self.p[u, :], self.q[i, :]) - self.m.rt[u, i]
for k in range(self.k):
delta_p[u, k] = error * self.p[u, k]
delta_q[i, k] = error * self.q[i, k]
self.p -= 2 * self.eta * delta_p
self.q -= 2 * self.eta * delta_q
self.rmse.append(self.training_error())
print(self.rmse[-1])
if len(self.rmse) > 1:
if abs(self.rmse[-1] - self.rmse[-2]) < self.tol:
break
class WeightedCFNN(CFNN):
def __init__(self, m, k, eta_type, init, nsteps=500, eta=0.00075,
tol=0.5*1e-5, lamda=0.05, regularize=False):
Recommender.__init__(self, m)
self.k = k
self.nsteps = nsteps
self.eta = eta
self.eta_type = eta_type
self.tol = tol
self.regularize = regularize
self.lamda = lamda
self.normalize = False
if init == 'sim':
self.w = np.copy(self.m.s_rt)
elif init == 'random':
self.w = np.random.random((self.m.rt.shape[1], self.m.rt.shape[1]))
elif init == 'zeros':
self.w = np.zeros((self.m.rt.shape[1], self.m.rt.shape[1]))
else:
print('init method not supported')
# w_init = np.copy(self.w)
print('init =', init)
print('k =', k)
print('lamda =', self.lamda)
print('eta =', self.eta)
print('eta_type =', self.eta_type)
self.interpolate_weights_old()
print('init =', init)
print('k =', k)
print('lamda =', self.lamda)
print('eta = ', self.eta)
print('eta_type =', self.eta_type)
# diff = np.linalg.norm(w_init - self.w)
# self.plot_rmse('%.4f' % diff, suffix=init)
print(self.__class__.__name__)
print('test error: %.4f' % self.test_error())
def interpolate_weights_old(self):
print('ATTENTION not resetting larger values')
icount = self.m.rt.shape[1]
rt_nan_indices = set(self.m.rt_nan_indices)
ucount = self.m.rt.shape[0]
m = self.m
test_rmse = []
for step in xrange(self.nsteps):
print(step, end='\r')
delta_w_i_j = np.zeros((icount, icount))
for i in xrange(icount):
for u in xrange(ucount):
if (u, i) in rt_nan_indices:
continue
s_u_i = m.similar_items(u, i, self.k)
error = sum(self.w[i, k] * m.rt[u, k] for k in s_u_i) -\
m.rt[u, i]
for j in s_u_i:
delta_w_i_j[i, j] += error * m.rt[u, j]
if self.regularize:
delta_w_i_j[i, j] += self.lamda * self.w[i, j]
self.w -= 2 * self.eta * delta_w_i_j
self.rmse.append(self.training_error())
print(step, 'eta = %.8f, rmse = %.8f' % (self.eta, self.rmse[-1]))
if len(self.rmse) > 1:
if abs(self.rmse[-1] - self.rmse[-2]) < self.tol:
break
if self.rmse[-1] > self.rmse[-2]:
print('RMSE getting larger')
# self.w += 2 * self.eta * delta_w_i_j # reset parameters
# del self.rmse[-1]
self.eta *= 0.5
if self.eta_type == 'constant':
break
elif self.eta_type == 'increasing':
break
else:
if self.eta_type == 'constant':
pass
else: # 'increasing' or 'bold_driver'
self.eta *= 1.05
if (step % 100) == 0:
test_rmse.append(self.test_error())
print(' TEST RMSE:')
for idx, err in enumerate(test_rmse):
print(' %d | %.8f' % (idx * 100, err))
print('ATTENTION not resetting larger values')
print(' TEST RMSE:')
for idx, err in enumerate(test_rmse):
print(' %d | %.8f' % (idx * 100, err))
def interpolate_weights_new(self):
rt_nan_indices = set(self.m.rt_nan_indices)
ucount = self.m.rt.shape[0]
icount = self.m.rt.shape[1]
B_u = np.tile(self.m.b_u, (icount, 1)).T
B_i = np.tile(self.m.b_i, (ucount, 1))
m = self.m
m.b = self.m.mu + B_u + B_i
m.rtb = self.m.rt - m.b
for step in xrange(self.nsteps):
print(step, end='\r')
delta_w_i_j = np.zeros((icount, icount))
for i in xrange(icount):
for u in xrange(ucount):
if (u, i) in rt_nan_indices:
continue
s_u_i = m.similar_items(u, i, self.k)
error = m.b[u, i] - m.rt[u, i] +\
sum(self.w[i, k] * m.rtb[u, k] for k in s_u_i)
for j in s_u_i:
delta_w_i_j[i, j] += error * m.rtb[u, j]
if self.regularize:
delta_w_i_j[i, j] += self.lamda * self.w[i, j]
self.w -= 2 * self.eta * delta_w_i_j
self.rmse.append(self.training_error())
print(step, 'eta = %.8f, rmse = %.8f' % (self.eta, self.rmse[-1]))
if len(self.rmse) > 1:
if abs(self.rmse[-1] - self.rmse[-2]) < self.tol:
break
if self.rmse[-1] > self.rmse[-2]:
print('RMSE getting larger')
self.w += 2 * self.eta * delta_w_i_j # reset parameters
self.eta *= 0.5
del self.rmse[-1]
if self.eta_type == 'constant':
break
elif self.eta_type == 'increasing':
break
else:
if self.eta_type == 'constant':
pass
else: # 'increasing' or 'bold_driver'
self.eta *= 1.1
class WeightedCFNNUnbiased(CFNN):
def __init__(self, m, k, regularize, eta, eta_type, init,
nsteps=1000, tol=1e-5, lamda=0.05):
Recommender.__init__(self, m)
self.k = k
self.nsteps = nsteps
self.eta = eta
self.eta_type = eta_type
self.tol = tol
self.normalize = False
self.regularize = regularize
self.lamda = lamda
if init == 'sim':
# self.w = np.copy(self.m.s_rt)
self.w = self.m.s_rt.toarray()
elif init == 'random':
self.w = np.random.random((self.m.rt.shape[1], self.m.rt.shape[1]))
elif init == 'random_small':
self.w = np.random.random((self.m.rt.shape[1], self.m.rt.shape[1]))/100
elif init == 'zeros':
self.w = np.zeros((self.m.rt.shape[1], self.m.rt.shape[1]))
else:
print('init method not supported')
print('k =', k)
print('eta =', self.eta)
print('eta_type =', self.eta_type)
print('init = ', init)
self.interpolate_weights()
print('k =', k)
print('eta = ', self.eta)
print('eta_type =', self.eta_type)
print('init = ', init)
print(self.__class__.__name__)
print('test error: %.4f' % self.test_error())
def predict(self, u, i, dbg=False):
n_u_i = self.m.similar_items(u, i, self.k)
r = sum(self.w[i, j] * self.m.r[u, j] for j in n_u_i)
if self.normalize and r > 0:
r /= sum(self.w[i, j] for j in n_u_i)
return r
def interpolate_weights(self):
test_rmse = []
# rt_nan_indices = set(self.m.rt_nan_indices)
ucount = self.m.rt.shape[0]
icount = self.m.rt.shape[1]
m = self.m
for step in xrange(self.nsteps):
# print(step, end='\r')
print(step)
delta_w_i_j = np.zeros((icount, icount))
# for i in xrange(icount):
# for u in xrange(ucount):
# if (u, i) in rt_nan_indices:
# continue
for idx, (u, i, v) in self.m.rt_not_nan_iterator(idx=True):
s_u_i = m.similar_items(u, i, self.k)
error = sum(self.w[i, k] * m.rt[u, k] for k in s_u_i) -\
m.rt[u, i]
for j in s_u_i:
delta_w_i_j[i, j] += error * m.rt[u, j]
if self.regularize:
delta_w_i_j[i, j] += self.lamda * self.w[i, j]
# # update weights
self.w -= 2 * self.eta * delta_w_i_j
# # ensure weights >= 0
self.w[self.w < 0] = 0
self.rmse.append(self.training_error())
print(step, 'eta = %.8f, training_rmse = %.8f' %
(self.eta, self.rmse[-1]))
if len(self.rmse) > 1:
if abs(self.rmse[-1] - self.rmse[-2]) < self.tol:
break
if self.rmse[-1] > self.rmse[-2]:
print('RMSE getting larger')
self.w += 2 * self.eta * delta_w_i_j # reset parameters
del self.rmse[-1]
self.eta *= 0.5
if self.eta_type == 'constant':
break
elif self.eta_type == 'increasing':
break
else:
if self.eta_type == 'constant':
pass
else: # 'increasing' or 'bold_driver'
self.eta *= 1.05
self.eta *= 1.1
if (step % 10) == 0:
test_rmse.append(self.test_error())
print(' TEST RMSE:')
for idx, err in enumerate(test_rmse):
print(' %d | %.8f' % (idx * 10, err))
class WeightedCFNNBiased(CFNN):
def __init__(self, m, k, eta_type, init, nsteps=500, eta=0.00075,
tol=1e-5, lamda=0.05, regularize=False, reset_params=True, w=None):
Recommender.__init__(self, m)
self.k = k
self.nsteps = nsteps
self.eta = eta
self.eta_type = eta_type
self.tol = tol
self.regularize = regularize
self.lamda = lamda
self.normalize = False
self.reset_params = reset_params
if w is not None:
self.w = w
else:
if init == 'sim':
# self.w = np.copy(self.m.s_rt)
self.w = self.m.s_rt.toarray()
elif init == 'random':
self.w = np.random.random((self.m.rt.shape[1], self.m.rt.shape[1]))
elif init == 'random_small':
self.w = np.random.random((self.m.rt.shape[1], self.m.rt.shape[1])) / 1000
elif init == 'zeros':
self.w = np.zeros((self.m.rt.shape[1], self.m.rt.shape[1]))
else:
print('init method not supported')
pdb.set_trace()
# w_init = np.copy(self.w)
print('init =', init)
print('k =', k)
print('lamda =', self.lamda)
print('eta =', self.eta)
print('eta_type =', self.eta_type)
print('nsteps = ', nsteps)
self.interpolate_weights()
print('init =', init)
print('k =', k)
print('lamda =', self.lamda)
print('eta = ', self.eta)
print('eta_type =', self.eta_type)
print('nsteps = ', nsteps)
# diff = np.linalg.norm(w_init - self.w)
# self.plot_rmse('%.4f' % diff, suffix=init)
print(self.__class__.__name__)
print('test error: %.4f' % self.test_error())
def predict(self, u, i, dbg=False):
# predict an item-based CF rating based on the training data
n_u_i = self.m.similar_items(u, i, self.k)
r = 0
for j in n_u_i:
diff = self.m.r[u, j] - self.r_t_predicted[u, j]
r += self.w[i, j] * diff
if self.normalize:
if r != 0:
s = sum(self.w[i, j] for j in n_u_i)
if not np.isfinite(r/sum(self.w[i, j] for j in n_u_i)) or\
np.isnan(r/sum(self.w[i, j] for j in n_u_i)):
pdb.set_trace()
r /= s
return self.r_t_predicted[u, i] + r
def interpolate_weights(self):
test_rmse = []
# rt_nan_indices = set(self.m.rt_nan_indices)
ucount = self.m.rt.shape[0]
icount = self.m.rt.shape[1]
B_u = np.tile(self.m.b_u, (icount, 1)).T
B_i = np.tile(self.m.b_i, (ucount, 1))
m = self.m
m.b = self.m.mu + B_u + B_i
m.rtb = self.m.rt - m.b
nnz = self.m.rt.nnz
for step in xrange(self.nsteps):
delta_w_i_j = np.zeros((icount, icount))
# for i in xrange(icount):
# for u in xrange(ucount):
# if (u, i) in rt_nan_indices:
# continue
for idx, (u, i, v) in self.m.rt_not_nan_iterator(idx=True):
# print('\r ', idx, '/', nnz, end='')
s_u_i = m.similar_items(u, i, self.k)
error = m.b[u, i] - m.rt[u, i] + sum(self.w[i, k] * m.rtb[u, k] for k in s_u_i)
for j in s_u_i:
delta_w_i_j[i, j] += error * m.rtb[u, j]
if self.regularize:
delta_w_i_j[i, j] += self.lamda * self.w[i, j]
self.w -= 2 * self.eta * delta_w_i_j
self.rmse.append(self.training_error())
# print(step, 'eta = %.8f, training_rmse = %.8f, test_rmse = %.8f' %
# (self.eta, self.rmse[-1], self.test_error()))
print(step, 'eta = %.8f, training_rmse = %.8f' %
(self.eta, self.rmse[-1]))
if len(self.rmse) > 1:
if abs(self.rmse[-1] - self.rmse[-2]) < self.tol:
break
if self.rmse[-1] > self.rmse[-2]:
print('RMSE getting larger')
if self.reset_params:
self.w += 2 * self.eta * delta_w_i_j # reset parameters
self.eta *= 0.5
del self.rmse[-1]
if self.eta_type == 'constant':
break
elif self.eta_type == 'increasing':
break
else:
if self.eta_type == 'constant':
pass
else: # 'increasing' or 'bold_driver'
self.eta *= 1.1
if (step % 10) == 0:
test_rmse.append(self.test_error())
print(' {}'.format(datetime.datetime.now()))
print(' TEST RMSE:')
for idx, err in enumerate(test_rmse):
print(' %d | %.8f' % (idx * 10, err))
def read_movie_lens_data():
import csv
csvfile = open('u1.test', 'r')
reader = csv.reader(csvfile, delimiter='\t')
ratings = {}
movies = set()
for row in reader:
user = row[0]
movie = row[1]
rating = row[2]
if user not in ratings:
ratings[user] = [(movie, rating)]
else:
ratings[user].append((movie, rating))
movies.add(movie)
m = list(movies)
r = np.zeros([len(ratings), len(movies)])
r.fill(np.nan)
i = 0
for user in ratings:
uratings = ratings[user]
for rating in uratings:
r[i, m.index(rating[0])] = rating[1]
i += 1
return r
if __name__ == '__main__':
# start_time = datetime.datetime.now()
np.set_printoptions(precision=2)
# np.random.seed(0)
similarities = True
dataset = ''
if 0:
# dataset = 'movielens'
dataset = 'imdb'
m = np.load(
'data/' + dataset +
'/recommendation_data/RatingBasedRecommender_um_sparse.obj.npy'
)
m = m.item()
m = m.astype(float)
# pdb.set_trace()
# pdb.set_trace()
# data = [m[u, :].nonzero()[0].shape[0] for u in range(m.shape[0])]
um = UtilityMatrix(m, similarities=similarities)
elif 0:
m = scipy.sparse.csr_matrix(np.array([ # simple test case
[5, 1, 0, 2, 2, 4, 3, 2],
[1, 5, 2, 5, 5, 1, 1, 4],
[2, 0, 3, 5, 4, 1, 2, 4],
[4, 3, 5, 3, 0, 5, 3, 0],
[2, 0, 1, 3, 0, 2, 5, 3],
[4, 1, 0, 1, 0, 4, 3, 2],
[4, 2, 1, 1, 0, 5, 4, 1],
[5, 2, 2, 0, 2, 5, 4, 1],
[4, 3, 3, 0, 0, 4, 3, 0]
]))
hidden = np.array([
[6, 2, 0, 2, 2, 5, 3, 0, 1, 1],
[1, 2, 0, 4, 5, 3, 2, 3, 0, 4]
])
um = UtilityMatrix(m, hidden=hidden, similarities=similarities)
elif 1:
import csv
csvfile = open('data/ml_small.csv', 'r')
reader = csv.reader(csvfile, delimiter='\t')
ratings = {}
movies = set()
for row in reader:
user = row[0]
movie = row[1]
rating = row[2]
if user not in ratings:
ratings[user] = [(movie, rating)]
else:
ratings[user].append((movie, rating))
movies.add(movie)
m = list(movies)
r = np.zeros([len(ratings), len(movies)])
# r.fill(np.nan)
i = 0
for user in ratings:
uratings = ratings[user]
for rating in uratings:
r[i, m.index(rating[0])] = rating[1]
i += 1
um = UtilityMatrix(scipy.sparse.csr_matrix(r), similarities=similarities)
# m = np.array([ # simple test case 2
# [1, 5, 5, np.NAN, np.NAN, np.NAN],
# [2, 4, 3, np.NAN, np.NAN, np.NAN],
# [1, 4, 5, np.NAN, np.NAN, np.NAN],
# [1, 5, 5, np.NAN, np.NAN, np.NAN],
#
# [np.NAN, np.NAN, np.NAN, 1, 2, 3],
# [np.NAN, np.NAN, np.NAN, 2, 1, 3],
# [np.NAN, np.NAN, np.NAN, 3, 2, 2],
# [np.NAN, np.NAN, np.NAN, 4, 3, 3],
# ])
# hidden = np.array([
# [0, 1, 3, 4, 5],
# [1, 2, 0, 4, 5]
# ])
# um = UtilityMatrix(m, hidden=hidden)
# cfnn = CFNN(um, k=5); cfnn.print_test_error()
# f = Factors(um, k=5, nsteps=500, eta_type='increasing', regularize=True, eta=0.00001, init='random')
# w = WeightedCFNN(um, eta_type='increasing', k=5, eta=0.000001, regularize=True, init='random')
# w = WeightedCFNN(um, eta_type='increasing', k=5, eta=0.001, regularize=True, init_sim=True)
# w = WeightedCFNN(um, eta_type='bold_driver', k=5, eta=0.001, regularize=True, init_sim=False)
start_time = datetime.datetime.now()
print(dataset)
# gar = GlobalAverageRecommender(um); gar.print_test_error()
# uiar = UserItemAverageRecommender(um); uiar.print_test_error()
#
# for k in [
# 1,
# 2,
# 5,
# 10,
# 15,
# 20,
# 25,
# 40,
# 50,
# 60,
# 80,
# 100
# ]:
# cfnn = CFNN(um, k=k); cfnn.print_test_error()
# f = Factors(
# um,
# k=25,
# nsteps=1000,
# eta_type='bold_driver',
# eta=0.0000001,
# init='random',
# regularize=True,
# )
# wf = WeightedCFNNUnbiased(um, k=5, eta=0.0001, regularize=True,
# eta_type='bold_driver', init='random')
wf = WeightedCFNNBiased(
um,
k=25,
eta_type='bold_driver',
eta=0.00001,
init='random',
regularize=True
)
print(wf.w)
print(dataset)
end_time = datetime.datetime.now()
print('Duration: {}'.format(end_time - start_time))
|
def stopgo(status : str):
if status.lower() == 'stop':
return 'red'
elif status.lower() == 'go':
return 'green'
print(stopgo('stop'))
print(stopgo('go')) |
"""OctreeDisplayOptions, NormalNoise and OctreeMetadata classes.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, NamedTuple
import numpy as np
from napari.utils.config import octree_config
if TYPE_CHECKING:
from napari.components.experimental.chunk import LayerRef
def _get_tile_size() -> int:
"""Return the default tile size.
Returns
-------
int
The default tile size.
"""
return octree_config['octree']['tile_size'] if octree_config else 256
@dataclass
class OctreeDisplayOptions:
"""Options for how to display the octree.
Attributes
-----------
tile_size : int
The size of the display tiles, for example 256.
freeze_level : bool
If True we do not automatically pick the right data level.
track_view : bool
If True the displayed tiles track the view, the normal mode.
show_grid : bool
If True draw a grid around the tiles for debugging or demos.
"""
def __init__(self) -> None:
self._show_grid = True
# TODO_OCTREE we set this after __init__ which is messy.
self.loaded_event = None
@property
def show_grid(self) -> bool:
"""True if we are drawing a grid on top of the tiles.
Returns
-------
bool
True if we are drawing a grid on top of the tiles.
"""
return self._show_grid
@show_grid.setter
def show_grid(self, show: bool) -> None:
"""Set whether we should draw a grid on top of the tiles.
Parameters
----------
show : bool
True if we should draw a grid on top of the tiles.
"""
if self._show_grid != show:
self._show_grid = show
self.loaded_event() # redraw
tile_size: int = field(default_factory=_get_tile_size)
freeze_level: bool = False
track_view: bool = True
class NormalNoise(NamedTuple):
"""Noise with a normal distribution."""
mean: float = 0
std_dev: float = 0
@property
def is_zero(self) -> bool:
"""Return True if there is no noise at all.
Returns
-------
bool
True if there is no noise at all.
"""
return self.mean == 0 and self.std_dev == 0
@property
def get_value(self) -> float:
"""Get a random value.
Returns
-------
float
The random value.
"""
return np.random.normal(self.mean, self.std_dev)
class OctreeMetadata(NamedTuple):
"""Metadata for an Octree.
Attributes
----------
base_shape : np.ndarray
The base [height, width] shape of the entire full resolution image.
num_levels : int
The number of octree levels in the image.
tile_size : int
The default tile size. However each OctreeLevel has its own tile size
which can override this.
Notes
-----
This OctreeMetadata.tile_size will be used by the OctreeLevels in the tree
in general. But the highest level OctreeLevel might use a larger size
so that it can consist of a single chunk.
For example we might be using 256x256 tiles in general. For best
performance it might make sense to have octree levels such that the
highest level fits inside a single 256x256 tiles.
But if we are displaying user provided data, they did not know our tile
size. Instead their root level might be something pretty big, like
6000x6000. In that case we use 6000x6000 as the tile size in our root,
so the root level consists of a single tile.
TODO_OCTREE: we don't actually support larger size tiles yet! However
it's still a good idea to assume that each OctreeLevel could have its
own tile size.
"""
layer_ref: LayerRef
base_shape: np.ndarray
num_levels: int
tile_size: int
@property
def aspect_ratio(self):
"""Return the width:height aspect ratio of the base image.
For example HDTV resolution is 16:9 which has aspect ration 1.77.
"""
return self.base_shape[1] / self.base_shape[0]
def spiral_index(row_range, col_range):
"""Generate a spiral index from a set of row and column indices.
A spiral index starts at the center point and moves out in a spiral
Paramters
---------
row_range : range
Range of rows to be accessed.
col_range : range
Range of columns to be accessed.
Returns
-------
generator
(row, column) tuples in order of a spiral index.
"""
# Determine how many rows and columns need to be transvered
total_row = row_range.stop - row_range.start
total_col = col_range.stop - col_range.start
# Get center offset
row_center = int(np.ceil((row_range.stop + row_range.start) / 2) - 1)
col_center = int(np.ceil((col_range.stop + col_range.start) / 2) - 1)
# Let the first move be down
x, y = 0, 0
dx, dy = 0, -1
# Loop through the desired number of indices
for _ in range(max(total_row, total_col) ** 2):
# Check if values are in range
if (-total_row // 2 < x <= total_row // 2) and (
-total_col // 2 < y <= total_col // 2
):
# Return desired row, col tuple
yield (row_center + x, col_center + y)
# Change direction at appropriate points
if x == y or (x < 0 and x == -y) or (x > 0 and x == 1 - y):
dx, dy = -dy, dx
x, y = x + dx, y + dy
def linear_index(row_range, col_range):
"""Generate a linear index from a set of row and column indices.
A linear index starts at the top left and procedes in a raster fashion.
Parameters
----------
row_range : range
Range of rows to be accessed.
col_range : range
Range of columns to be accessed.
Returns
-------
generator
(row, column) tuples in order of a linear index.
"""
from itertools import product
yield from product(row_range, col_range)
|
'''Restful API request to coinflex
Return coinflex exchange data requested thru RESTFUL api
Author: Tang Wei <tw7613781@gmail.com>
Created: Jan 21, 2022
'''
import os
import base64
import hmac
import hashlib
import datetime
import requests
from urllib.parse import urlencode
from dotenv import load_dotenv
from utils import print_error, current_milli_ts
load_dotenv()
TERM_RED = '\033[1;31m'
TERM_NFMT = '\033[0;0m'
TERM_BLUE = '\033[1;34m'
TERM_GREEN = '\033[1;32m'
HOST= 'https://v2api.coinflex.com'
PATH= 'v2api.coinflex.com'
api_key = os.getenv('APIKEY')
api_secret = os.getenv('APISECRET')
nonce = 888888
def private_call(method, options={}, action='GET'):
'''
generate header based on api credential
method: private call method
options: parameters if have,, the format is as below
{'key1': 'value1', 'key2': 'value2'}
'''
ts = datetime.datetime.utcnow().isoformat()
body = urlencode(options)
if options:
path = method + '?' + body
else:
path = method
msg_string = '{}\n{}\n{}\n{}\n{}\n{}'.format(ts, nonce, action, PATH, method, body)
sig = base64.b64encode(hmac.new(api_secret.encode('utf-8'), msg_string.encode('utf-8'), hashlib.sha256).digest()).decode('utf-8')
header = {'Content-Type': 'application/json', 'AccessKey': api_key,
'Timestamp': ts, 'Signature': sig, 'Nonce': str(nonce)}
if action == 'GET':
resp = requests.get(HOST + path, headers=header)
elif action == 'POST':
resp = requests.post(HOST + path, headers=header)
print(HOST + path)
return resp.json()
def isAlive() -> bool:
'public GET /v2/ping'
try:
endpoint = '/v2/ping'
response = requests.get(HOST + endpoint)
data = response.json()
return data['success']
except Exception as err:
print_error('isAlive', err)
def getOrderBook(market, depth):
'get order books of specific trading market with specific depth'
try:
endpoint = f'/v2/depth/{market}/{depth}'
response = requests.get(HOST + endpoint)
return response.json()
except Exception as err:
print_error('getOrderBook', err)
def getBalance():
'''
get account balance
'''
try:
endpoint = '/v2/balances'
return(private_call(endpoint))
except Exception as err:
print_error('getBalance', err)
def getBalanceBySymbol(symbol):
'''
get account balance by specific symbol
'''
try:
endpoint = f'/v2/balances/{symbol}'
return(private_call(endpoint))
except Exception as err:
print_error('getBalanceBySymbol', err)
def getPositions():
'''
get account positions
'''
try:
endpoint = '/v2/positions'
return(private_call(endpoint))
except Exception as err:
print_error('getPositions', err)
def getPositionsBySymbol(symbol):
'''
get account position by specific symbol
'''
try:
endpoint = f'/v2/positions/{symbol}'
return(private_call(endpoint))
except Exception as err:
print_error('getPositionsBySymbol', err)
def getOrders():
'''
get account's unfilled orders
'''
try:
endpoint = '/v2/orders'
return(private_call(endpoint))
except Exception as err:
print_error('getOrders', err)
def getOrdersByMarket(market):
'''
get account all orders in specific market
'''
try:
endpoint = '/v2.1/orders'
return(private_call(endpoint, {
'marketCode': market
}))
except Exception as err:
print_error('getOrdersByMarket', err)
# def placeLimitOrder(market, side, quantity, price):
# '''
# place a order with options
# '''
# try:
# endpoint = '/v2/orders/place'
# return(private_call(endpoint, {
# 'responseType': 'FULL',
# 'orders': [
# {
# 'clientOrderId': str(current_milli_ts()),
# 'marketCode': market,
# 'side': side,
# 'quantity': quantity,
# 'orderType': 'LIMIT',
# 'price': price
# }
# ]
# }, 'POST'))
# except Exception as err:
# print_error('placeLimitOrder', err)
if __name__ == '__main__':
# print(placeLimitOrder('FLEX-USD', 'BUY', '1', '4.5'))
print(f'{TERM_BLUE}1. public /v2/ping{TERM_NFMT}')
print(isAlive())
print(f'{TERM_BLUE}2. public /v2/depth/FLEX-USD/10{TERM_NFMT}')
print(getOrderBook('FLEX-USD', 10))
print(f'{TERM_BLUE}3. private /v2/balances{TERM_NFMT}')
print(getBalance())
print(f'{TERM_BLUE}4. private /v2/balances/USD{TERM_NFMT}')
print(getBalanceBySymbol('USD'))
print(f'{TERM_BLUE}5. private /v2/positions{TERM_NFMT}')
print(getPositions())
print(f'{TERM_BLUE}6. private /v2/positions/ETH{TERM_NFMT}')
print(getPositionsBySymbol('ETH-USD-SWAP-LIN'))
print(f'{TERM_BLUE}7. private /v2/orders{TERM_NFMT}')
print(getOrders())
print(f'{TERM_BLUE}8. private /v2.1/orders?marketCode=FLEX-USD{TERM_NFMT}')
print(getOrdersByMarket('FLEX-USD'))
|
def find_missing_letter(chars):
for c in range(len(chars)-1):
if ord(chars[c]) - ord(chars[c+1]) == -2:
return chr(ord(chars[c]) + 1)
elif ord(chars[c]) - ord(chars[c+1]) == 1:
return chr(ord(chars[c]) - 1)
'''
#Find the missing letter
Write a method that takes an array of consecutive (increasing) letters as
input and that returns the missing letter in the array.
You will always get an valid array. And it will be always exactly one letter be missing.
The length of the array will always be at least 2.
The array will always contain letters in only one case.
Example:
['a','b','c','d','f'] -> 'e'
['O','Q','R','S'] -> 'P'
(Use the English alphabet with 26 letters!)
Have fun coding it and please don't forget to vote and rank this kata! :-)
I have also created other katas. Take a look if you enjoyed this kata!
'''
|
# -*- coding: utf-8 -*-
from collections import deque
class Solution:
def maxSlidingWindow(self, nums, k):
result = []
seen = deque()
for i, num in enumerate(nums):
while seen and num >= nums[seen[-1]]:
seen.pop()
seen.append(i)
if i >= k and seen and seen[0] == i - k:
seen.popleft()
if i >= k - 1:
result.append(nums[seen[0]])
return result
if __name__ == "__main__":
solution = Solution()
assert [3, 3, 5, 5, 6, 7] == solution.maxSlidingWindow(
[1, 3, -1, -3, 5, 3, 6, 7], 3
)
|
from SpeechRecognition import speech,textTospeech
from sel import search,open_url
from execute import fileopen
from reminder import reminder
from mailSystem import mailInforamtion
from image import imageCaputre
def assist():
text = speech()
print("You said : ", text)
text=text.lower()
text = text.split(" ")
if text[0] == 'search':
temp = ' '.join(text[1:])
search(temp)
elif text[0] == 'open' and "." in text[-1]:
temp = ' '.join(text[1:])
open_url(temp)
elif text[0] == 'open':
temp = ' '.join(text[1:])
fileopen(temp)
elif " ".join(text) == 'set reminder':
reminder()
elif " ".join(text)=='send mail' :
mailInforamtion()
elif " ".join(text) == 'click photo':
imageCaputre()
else:
text="Sorry Unable to recognize any command try including"+'\n\n'+"search than sentence you want to search "+'\n'+"open than filename"+'\n'+''
textTospeech(text)
if __name__ == '__main__':
assist() |
import os
import subprocess
#OS selection
def os_select():
os.system("clear")
print("\t\t\t\t OS SELECTION \n")
print("Supported OS: \n 1.Windows \n 2.Linux-Debian \n")
os_name = int(input("Enter the choice: "))
return os_name
#data input
prj_path = str(input("\nFile path: "))
prj_name = str(input("Project Name: "))
file_statement = "mkdir " + prj_path + prj_name
init_statement = "cd " + prj_path + prj_name + " && git init"
print("Are you going to push this to Github? (Y|N): ")
opt = str(input())
str.lower(opt)
if opt == 'y':
push_code = str(input("PUSH CODE: "))
repos = str(input("Remote Repos Name: "))
selection = os_select()
elif opt =='n':
selection = os_select()
else:
os.system("clear")
print("ERORR")
#Git Push
def push(statement):
os.system("cd "+prj_path + prj_name + " && git remote add " +repos+" " + statement + " && git push -u "+repos+ " master")
#process
if selection == 1:
print("windows")
elif selection == 2:
#folder create
os.system(file_statement)
print("folder created\n")
#git initialise
os.system(init_statement)
#file create
c_opt = str(input("Do you want to create a file (Y|N): "))
if str.lower(c_opt) == 'y':
while c_opt =='y':
os.system("clear")
file_name = str(input("File name: "))
os.system("touch " + prj_path + prj_name +"/" + file_name) #create a empty file
c_opt = str(input("\n Do you want to create one more file? (Y|N): "))
e = 'y'
git_status = "clear" + "&& cd " + prj_path + prj_name +"&& git status"
git_commit = "cd " + prj_path + prj_name +" && git commit "+ file_name +" -m "
git_log = "cd " + prj_path +prj_name +" && git log"
os.system(git_status) # list the files
while e == 'y':
info = str(input("Enter the file name to add: "))
os.system("cd " + prj_path + prj_name + "&& git add " + info)
e = str(input("Do u want to add one more file (Y|N): "))
#Push to remote
if opt == 'y':
os.system("clear")
os.system(git_status)
print("Project has been created and files as beed added")
#git commit
commit_msg = input("Enter the commit mssg: ")
os.system(git_commit +'"'+commit_msg +'"')
print(git_commit +'"'+commit_msg +'"')
os.system(git_status)
os.system(git_log)
print("\n\n Pushing the repos\n\n")
push(push_code)
elif opt =='n':
os.system("clear")
os.system(git_status)
print("Project has been created and files as beed added")
o = input("Do you want to commit it now(Y|N): ")
str.lower(o)
#git commit
if o == 'y':
commit_msg = input("Enter the commit mssg: ")
os.system(git_commit +'"'+commit_msg +'"')
print(git_commit +'"'+commit_msg +'"')
os.system(git_status)
os.system(git_log)
else:
print("Error")
|
#!/usr/bin/env python3
import minimalmodbus
import time
import random
slave_id = 0x10
slave_baudrate = 9600
serial_port = '/dev/ttyUSB0'
test_M90E26 = False
test_many_read = True
test_many_write = True
instrumentA = minimalmodbus.Instrument(serial_port, slave_id)
instrumentA.serial.baudrate = slave_baudrate
instrumentA.serial.timeout = 1
instrumentA.mode = minimalmodbus.MODE_RTU
def modbus_read(address, instrument, data_count=None, errors=False, attempts=10):
success = False
count = 0
while ((not success) and (count<attempts)):
try:
count += 1
if data_count:
data = instrument.read_registers(address, data_count)
else:
data = instrument.read_register(address, 0)
except minimalmodbus.SlaveReportedException as e:
print(e)
if errors:
raise
except minimalmodbus.NoResponseError as e:
print(e)
if errors:
raise
else:
time.sleep(5)
else:
success = True
if success:
return data
else:
return "error"
def modbus_write(address, data, instrument, errors=False, attempts=10):
success = False
count = 0
while ((not success) and (count<attempts)):
try:
count += 1
instrument.write_register(address, data, functioncode=6)
except minimalmodbus.SlaveReportedException as e:
print(e)
if errors:
raise
except minimalmodbus.NoResponseError as e:
print(e)
if errors:
raise
else:
time.sleep(5)
else:
success = True
if not success:
print('modbus write error')
def modbus_write_many(address, data, instrument, errors=False, attempts=10):
success = False
count = 0
while ((not success) and (count<attempts)):
try:
count += 1
instrument.write_registers(address, data)
except minimalmodbus.SlaveReportedException as e:
print(e)
if errors:
raise
except minimalmodbus.NoResponseError as e:
print(e)
if errors:
raise
else:
time.sleep(5)
else:
success = True
if not success:
print('modbus write error')
if test_M90E26:
print('M90E26')
print(modbus_read(1, instrumentA))
print(modbus_read(2, instrumentA))
print(modbus_read(3, instrumentA))
modbus_write(3, 7531, instrumentA)
print(modbus_read(3, instrumentA))
print('relays')
print(modbus_read(128, instrumentA))
print(modbus_read(129, instrumentA))
print('ds18b20')
print(modbus_read(132, instrumentA))
print(modbus_read(133, instrumentA))
print('debuging')
print('OSCCAL')
print('OSCCAL register value: {}'.format(modbus_read(144, instrumentA)))
print('OSCCAL EEPROM value: {}'.format(modbus_read(145, instrumentA)))
#instrumentA.write_register(145, 165, functioncode=6)
#instrumentA.write_register(145, 255, functioncode=6)
#print('OSCCAL EEPROM value: {}'.format(instrumentA.read_register(145, 1)*10))
if test_many_read:
print('read many')
print(modbus_read(132, instrumentA, data_count=2))
print(modbus_read(144, instrumentA, data_count=2))
print(modbus_read(136, instrumentA, data_count=4))
if test_many_write:
print('write many')
modbus_write_many(128, [random.randint(0,1),random.randint(0,1)], instrumentA)
time.sleep(0.2)
print('{} {}'.format(modbus_read(128, instrumentA),modbus_read(129, instrumentA)))
modbus_write_many(128, [0,0], instrumentA)
print('relays')
for i in range(5):
modbus_write(128, random.randint(0,1), instrumentA)
modbus_write(129, random.randint(0,1), instrumentA)
time.sleep(0.2)
print('{} {}'.format(modbus_read(128, instrumentA),modbus_read(129, instrumentA)))
modbus_write(128, 0, instrumentA)
modbus_write(129, 0, instrumentA)
print('errors')
print('M90E26 read errors: {}'.format(modbus_read(136, instrumentA)))
print('M90E26 write errors: {}'.format(modbus_read(137, instrumentA)))
print('modbus all errors: {}'.format(modbus_read(138, instrumentA)))
print('modbus CRC errors: {}'.format(modbus_read(139, instrumentA)))
print('exceptions testing')
print(modbus_read(145, instrumentA, data_count=2, attempts=1))
|
import logging
from logging.handlers import QueueHandler
import os, time, atexit
from threading import Thread
from multiprocessing import Process, Event, Queue
import cdsapi
###############################################################################
def downloader(runEvent, dlEvent, requestQueue, logQueue, retry = 3):
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log.addHandler( QueueHandler( logQueue ) )
c = cdsapi.Client(debug = True)
while runEvent.is_set():
try:
name, request, target = requestQueue.get( timeout = 0.1 )
except:
continue
else:
log.debug( '{}, {}, {}'.format(name, request, target) )
dlEvent.set()
attempt = 0
while attempt < retry:
log.info( 'Download attempt {:3d} of {:3d}: {}'.format(attempt+1, retry, target) )
try:
c.retrieve( name, request, target )
except:
log.error( 'Download attempt {:3d} of {:3d} FAILED: {}'.format(attempt+1, retry, target) )
attempt += 1
else:
log.info( 'Download attempt {:3d} of {:3d} SUCESS: {}'.format(attempt+1, retry, target) )
attempt = retry+1
if (attempt == retry):
log.warning('Failed to download file: {}'.format(target))
if os.path.isfile( target ):
os.remove( target )
dlEvent.clear()
###############################################################################
class ERA5_Downloader( object ):
def __init__(self, nThreads = 2):
self.log = logging.getLogger(__name__)
self.runEvent = Event()
self.reqQueue = Queue()
self.logQueue = Queue()
self.logThread = Thread( target = self._mpLogger )
self.logThread.start()
self.runEvent.set()
self.procs = []
self.dlEvents = []
for i in range( nThreads ):
dlEvent = Event()
args = (self.runEvent, dlEvent, self.reqQueue, self.logQueue,)
proc = Process(target = downloader, args=args)
proc.start()
self.procs.append( proc )
self.dlEvents.append( dlEvent )
atexit.register( self._quit )
def retrieve(self, name, request, target = None):
self.reqQueue.put( (name, request.copy(), target,) )
def wait(self):
while not self.reqQueue.empty(): time.sleep(0.1) # While the queue is NOT empty, sleep for 100 ms
for dlEvent in self.dlEvents: # Iterate over the download events
dl = dlEvent.wait(timeout=0.1); # Wait for the download event to be set; time out after 100 ms
if dl: # If did NOT timeout
while dlEvent.is_set(): time.sleep(0.1) # While the file is downloading, sleep 100 ms
def _quit(self):
self.logQueue.put(None)
self.runEvent.clear()
def _mpLogger(self):
while True:
try:
record = self.logQueue.get( timeout = 1.0 )
except:
pass
else:
if record is None: break
self.log.callHandlers( record )
|
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Dict, Union, Optional
from ethtx.models.decoded_model import DecodedEvent, Proxy, AddressInfo
from ethtx.models.objects_model import BlockMetadata, TransactionMetadata, Event
from ethtx.semantics.standards.erc20 import ERC20_EVENTS
from ethtx.semantics.standards.erc721 import ERC721_EVENTS
from .abc import ABISubmoduleAbc
from .helpers.utils import decode_event_abi_name_with_external_source
from ..decoders.parameters import decode_event_parameters
class ABIEventsDecoder(ABISubmoduleAbc):
"""ABI Events Decoder."""
def decode(
self,
events: Union[Event, List[Event]],
block: BlockMetadata,
transaction: TransactionMetadata,
proxies: Optional[Dict[str, Proxy]] = None,
chain_id: Optional[str] = None,
) -> Union[DecodedEvent, List[DecodedEvent]]:
"""Return list of decoded events."""
if isinstance(events, list):
return (
[
self.decode_event(event, block, transaction, proxies, chain_id)
for event in events
]
if events
else []
)
return self.decode_event(events, block, transaction, proxies, chain_id)
def decode_event(
self,
event: Event,
block: BlockMetadata,
transaction: TransactionMetadata,
proxies: Dict[str, Proxy] = None,
chain_id: str = None,
) -> DecodedEvent:
if event.topics:
event_signature = event.topics[0]
else:
event_signature = None
anonymous, guessed = False, False
chain_id = chain_id or self._default_chain
event_abi = self._repository.get_event_abi(
chain_id, event.contract, event_signature
)
if not event_abi:
if not event_abi:
# if signature is not known but there is exactly one anonymous event in tha ABI
# we can assume that this is this the anonymous one (e.g. Maker's LogNote)
event_abi = self._repository.get_anonymous_event_abi(
chain_id, event.contract
)
if event_abi:
anonymous = True
if not event_abi and event.contract in proxies:
# try to find signature in delegate-called contracts
for semantic in proxies[event.contract].semantics:
event_abi = (
semantic.contract.events[event_signature]
if event_signature in semantic.contract.events
else None
)
if event_abi:
break
if not event_abi and event_signature in ERC20_EVENTS:
# try standard ERC20 events
if (
len(
[
parameter
for parameter in ERC20_EVENTS[event_signature].parameters
if parameter.indexed
]
)
== len([topic for topic in event.topics if topic]) - 1
):
event_abi = ERC20_EVENTS[event_signature]
elif event_signature in ERC721_EVENTS:
# try standard ERC721 events
if (
len(
[
parameter
for parameter in ERC721_EVENTS[
event_signature
].parameters
if parameter.indexed
]
)
== len([topic for topic in event.topics if topic]) - 1
):
event_abi = ERC721_EVENTS[event_signature]
contract_name = self._repository.get_address_label(
chain_id, event.contract, proxies
)
event_name = event_abi.name if event_abi else event_signature
parameters = decode_event_parameters(
event.log_data, event.topics, event_abi, anonymous
)
if event_name.startswith("0x") and len(event_name) > 2:
guessed, event_name = decode_event_abi_name_with_external_source(
signature=event_signature
)
return DecodedEvent(
chain_id=chain_id,
tx_hash=transaction.tx_hash,
timestamp=block.timestamp,
contract=AddressInfo(address=event.contract, name=contract_name),
index=event.log_index,
call_id=event.call_id,
event_signature=event_signature,
event_name=event_name,
parameters=parameters,
event_guessed=guessed,
)
|
# -*- coding: UTF-8 -*-
import json
import time
import logging
LOG = logging.getLogger("__name__")
class Room(object):
def __init__(self, room_id):
self.room_id = room_id
self.current_members = 0
self.connections = []
def add(self, conn):
self.connections.append(conn)
self.current_members += 1
def remove(self, conn):
self.connections.remove(conn)
self.current_members -= 1
def broadcast(self, msg):
try:
encrypt = ""
if "encrypt" in msg:
encrypt = msg["encrypt"]
msg["encrypt"] = ""
dead_connections = []
for conn in self.connections:
try:
if "encrypt" in msg:
msg["msg"] = conn.encrypt_msg(encrypt)
conn.write_message(msg)
except Exception as e:
LOG.exception(e)
dead_connections.append(conn)
for conn in dead_connections:
conn.close()
self.connections.remove(conn)
except Exception as e:
LOG.exception(e)
class Rooms(object):
def __init__(self, room_num = 100):
self.rooms = {}
for i in range(room_num + 1):
self.rooms[i] = Room(i)
def __getitem__(self, room_id):
return self.rooms[room_id]
|
from pynamodb.attributes import UnicodeAttribute, BooleanAttribute, UTCDateTimeAttribute, NumberAttribute, MapAttribute, ListAttribute
from pynamodb.models import Model
import acsys_system.env as env
from acsys_system.models import attributes
class AccountModel(Model):
class Meta:
table_name = "Account"
aws_access_key_id = env.AWS_ACCESS_KEY_ID
aws_secret_access_key = env.AWS_SECRET_ACCESS_KEY
region = env.AWS_REGION
AccountID = NumberAttribute(hash_key=True)
AccountName = UnicodeAttribute(null=False)
AccountHeight = NumberAttribute(null=False)
AccountWeight = NumberAttribute(null=False)
AccountBirthday = UTCDateTimeAttribute(null=False)
AccountGender = BooleanAttribute(null=False)
AccountLevel = NumberAttribute(null=False)
AccountAddress = UnicodeAttribute(null=False)
AccountPass = UnicodeAttribute(null=False)
def __iter__(self):
for name, attr in self.get_attribute().items():
yield name, attr.serialize(getattr(self, name))
class ScheduleModel(Model):
class Meta:
table_name = "Schedule"
aws_access_key_id = env.AWS_ACCESS_KEY_ID
aws_secret_access_key = env.AWS_SECRET_ACCESS_KEY
region = env.AWS_REGION
AccountID = NumberAttribute(hash_key=True)
ScheduleDate = UTCDateTimeAttribute(null=False)
ScheduleTime = UTCDateTimeAttribute(null=True)
CaloriesBurned = NumberAttribute(null=True)
CaloriesIntake = NumberAttribute(null=True)
Ingestion = UnicodeAttribute(null=True)
TrainingMemory = UnicodeAttribute(null=True)
def __iter__(self):
for name, attr in self.get_attribute().items():
yield name, attr.serialize(getattr(self, name))
class FoodModel(Model):
class Meta:
table_name = "Food"
aws_access_key_id = env.AWS_ACCESS_KEY_ID
aws_secret_access_key = env.AWS_SECRET_ACCESS_KEY
region = env.AWS_REGION
FoodName = UnicodeAttribute(null=False)
FoodCalorie = NumberAttribute(null=False)
def __iter__(self):
for name, attr in self.get_attribute().items():
yield name, attr.serialize(getattr(self, name))
class MotionModel(Model):
class Meta:
table_name = "Motion"
aws_access_key_id = env.AWS_ACCESS_KEY_ID
aws_secret_access_key = env.AWS_SECRET_ACCESS_KEY
region = env.AWS_REGION
MotionName = UnicodeAttribute(null=False)
MotionCalorie = NumberAttribute(null=False)
def __iter__(self):
for name, attr in self.get_attribute().items():
yield name, attr.serialize(getattr(self, name)) |
import logging
import typing
import json
import random
import sys
import boto3
import os
from pydub import AudioSegment
from aiohttp import request
import aiohttp
from aiogram import Bot, Dispatcher, executor, types, filters
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.types.message import ContentType
from aiogram.utils.callback_data import CallbackData
from aiogram.utils.exceptions import MessageNotModified
from aiogram.dispatcher import FSMContext
from asyncio import AbstractEventLoop
import asyncio
from google.cloud import speech_v1 as speech
from messages import MESSAGES, QUESTIONS
from utils import InterviewStates
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
loop: AbstractEventLoop = asyncio.new_event_loop()
asyncio.set_event_loop(loop=loop)
bot = Bot(token=os.environ["BOT_TOKEN"], loop=loop)
dp = Dispatcher(bot, storage=MemoryStorage())
dp.middleware.setup(LoggingMiddleware())
session: aiohttp.ClientSession = aiohttp.ClientSession()
download_voices_path = '/home/app/voices/ogg/'
converted_path = '/home/app/voices/flac/'
api_key_path = '/home/app/google_sr_token.json'
def cancel_keyboard():
return types.ReplyKeyboardMarkup().row(*(
types.KeyboardButton('🙅 Cancel'),
))
def list_keyboard():
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
for q in QUESTIONS:
keyboard.row(*(
types.KeyboardButton(q),
))
keyboard.row(*(
types.KeyboardButton('🙅 Cancel'),
))
return keyboard
def speech_to_text(config, audio):
client = speech.SpeechClient.from_service_account_json(api_key_path)
response = client.recognize(config=config, audio=audio)
return get_transcript(response)
def get_transcript(response):
results = []
for result in response.results:
best_alternative = result.alternatives[0]
results.append(best_alternative.transcript)
return results
def convert_voice(download_voices_path, message):
ogg_voice = AudioSegment.from_ogg(download_voices_path + str(message.message_id) + '.ogg')
ogg_voice.export(converted_path + str(message.message_id) + '.flac', format='flac')
return converted_path + str(message.message_id) + '.flac'
async def check_answer(answer):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'accept': 'application/json',
}
params = (
('text', answer),
('language', 'en-US'),
('enabledOnly', 'false'),
)
async with request('post', 'https://api.languagetoolplus.com/v2/check', params=params, headers=headers) as response:
logger.debug('Checking results: %r', json.dumps(await response.json(), indent=4, sort_keys=True))
return await response.json()
def format_errors_explanation(errors):
if len(errors['matches']) == 0:
return MESSAGES['no_errors']
else:
message = ''
if errors['language']['code'] != errors['language']['detectedLanguage']['code']:
message += f"Are you sure, it's in English? I guess it's {errors['language']['detectedLanguage']['name']} with " \
+ f"{errors['language']['detectedLanguage']['confidence']}% confidence.\n" \
+ "Anyway, if it is English, "
message += 'I have noticed following errors in your speech:\n\n'
for ind, error in enumerate(errors['matches']):
message += str(ind+1) + '. ' \
+ '*Error type:* ' + error['rule']['issueType']\
+ '. ' + error['message']
if len(error['replacements']) == 0:
return message
else:
message += '\n*Original:* ' + error['sentence'] \
+ '\n*Advice:* Probably, you should use: ' \
+ ' / '.join(x['value'] for x in error['replacements']) \
+ '\n*Correct sentences:* ' \
+ error['context']['text'][:error['context']['offset']] \
+ error['replacements'][0]['value'] \
+ error['context']['text'][error['context']['offset'] + error['context']['length']:] \
+ '\n\n'
return message
async def process_voice(message):
logger.debug(message)
await message.voice.download(download_voices_path + str(message.message_id) + '.ogg')
converted_file_path = convert_voice(download_voices_path, message)
config = dict(
language_code="en-US",
enable_automatic_punctuation=True,
)
with open(converted_file_path, 'rb') as f:
audio = dict(content=f.read())
os.remove(download_voices_path + str(message.message_id) + '.ogg')
os.remove(converted_file_path)
try:
return speech_to_text(config, audio)[0]
except IndexError:
return None
@dp.message_handler(state='*', commands='start')
async def start_cmd_handler(message: types.Message, state: FSMContext):
state.reset_state()
logger.debug(message)
await message.reply(f"Hi, {message.from_user.first_name}! " + MESSAGES['welcome_message'],
reply_markup=types.ReplyKeyboardMarkup().row(*(
types.KeyboardButton('😃 Yeah, great!'),
types.KeyboardButton('🤨 You? Teaching me? I\'d better die!')
)),
parse_mode='Markdown')
@dp.message_handler(commands='random')
async def random_cmd_handler(message: types.Message):
logger.debug(message)
reply_message = f"*{random.choice(QUESTIONS)}*" + MESSAGES['exercise']
await message.reply(reply_message, parse_mode='Markdown', reply=False)
@dp.message_handler(state='*', commands='interview')
async def interview_cmd_handler(message: types.Message, state: FSMContext):
logger.debug(message)
await InterviewStates.results.set()
async with state.proxy() as data:
data['results'] = [MESSAGES['interview_done']]
await InterviewStates.question_number.set()
data['question_number'] = 0
await message.reply("Answer the question below via TEXT or VOICE" \
+" or click 'Cancel' to finish interview immediately. \n\n*" \
+ QUESTIONS[0] + "*",
reply_markup=cancel_keyboard(),
parse_mode='Markdown',
reply=False)
@dp.message_handler(commands='list')
async def list_cmd_handler(message: types.Message):
logger.debug(message)
reply_message = "Let's pick a question you want to answer:"
await message.reply(reply_message, reply_markup=list_keyboard(),
reply=False
)
@dp.message_handler(text='😃 Yeah, great!')
async def msg_handler(message: types.Message):
logger.debug(message)
logger.debug('The answer is %r', message.text)
await message.reply(MESSAGES['agree_practice'], reply_markup=types.ReplyKeyboardRemove())
@dp.message_handler(text='🤨 You? Teaching me? I\'d better die!')
async def msg_handler(message: types.Message):
logger.debug(message)
logger.debug('The answer is %r', message.text)
reply_text = "Oh no! Why?"
await message.reply(reply_text, reply_markup=types.ReplyKeyboardRemove())
@dp.message_handler(text='🤨 You now... Fuck off, pal, I\'d handle it by myself.')
async def msg_handler(message: types.Message):
logger.debug(message)
logger.debug('The answer is %r', message.text)
reply_text = "Good luck with your application, you dick🤪"
await message.reply(reply_text, reply_markup=types.ReplyKeyboardRemove())
@dp.message_handler(state = InterviewStates.question_number)
async def interview_questions_handler(message: types.Message, state: FSMContext):
if message.text == '🙅 Cancel':
logger.debug(message)
async with state.proxy() as data:
data['results'].append(MESSAGES['canceled_interview'])
await message.reply(text="".join(data['results']),
reply_markup=types.ReplyKeyboardRemove(),
parse_mode='Markdown')
await state.reset_state()
else:
current_state = await state.get_state()
logger.debug(message)
logger.debug(current_state)
async with state.proxy() as data:
data['results'].append("\n\n" + "*Question:* " + QUESTIONS[data['question_number']] + "\n" \
+ "*Your answer:* " + message.text + "\n" \
+ "*Result:* " + format_errors_explanation(await check_answer(message.text)))
data['question_number'] += 1
try:
await message.reply(text=QUESTIONS[data['question_number']]+ MESSAGES['exercise'],
reply_markup=cancel_keyboard(),reply=False)
except IndexError:
data['results'].append(MESSAGES['next_step'])
await message.reply("".join(data['results']),
reply_markup=types.ReplyKeyboardRemove(),
parse_mode='Markdown')
await state.reset_state()
@dp.message_handler()
async def all_msg_handler(message: types.Message):
logger.debug(message)
logger.debug('The answer is %r', message.text)
if message.text in QUESTIONS:
reply_message = f"*{message.text}*" + MESSAGES['exercise']
await message.reply(reply_message,
parse_mode='Markdown',
reply_markup=types.ReplyKeyboardRemove(),
reply=False)
elif message.text == '🙅 Cancel':
await message.reply(MESSAGES['next_step'],
parse_mode='Markdown',
reply_markup=types.ReplyKeyboardRemove()
)
else:
await message.reply(format_errors_explanation(await check_answer(message.text)) \
+ MESSAGES['next_step'],
parse_mode='Markdown',
reply_markup=types.ReplyKeyboardRemove())
@dp.message_handler(state=InterviewStates.question_number, content_types=ContentType.VOICE)
async def interview_questions_handler(message: types.Message, state: FSMContext):
text_from_voice = await process_voice(message)
if text_from_voice == None:
text_from_voice = "Sorry, can't recognize"
current_state = await state.get_state()
async with state.proxy() as data:
data['results'].append("\n\n" + "*Question:* " + QUESTIONS[data['question_number']] + "\n" \
+ "*Your answer:* " + text_from_voice + "\n" \
+ "*Result:* " + format_errors_explanation(await check_answer(text_from_voice)))
data['question_number'] += 1
try:
await message.reply(text=QUESTIONS[data['question_number']],
reply_markup=cancel_keyboard(), reply=False)
except IndexError:
data['results'].append(MESSAGES['next_step'])
await message.reply(text="".join(data['results']),
reply_markup=types.ReplyKeyboardRemove(),
parse_mode='Markdown')
await state.reset_state()
@dp.message_handler(content_types=ContentType.VOICE)
async def voices_handler(message: types.Message):
text_from_voice = await process_voice(message)
if text_from_voice == None:
text_from_voice = "Sorry, can't recognize"
logger.debug(text_from_voice)
await message.reply((text_from_voice),
parse_mode='Markdown',
reply_markup=types.ReplyKeyboardRemove())
else:
await message.reply('If I understood you correctly, you said:\n\n' + text_from_voice)
logger.debug('Checking results: %r', await check_answer(text_from_voice))
await message.reply(format_errors_explanation(await check_answer(text_from_voice)),
parse_mode='Markdown',
reply_markup=types.ReplyKeyboardRemove())
async def load_google_api_key(api_key_path):
client = boto3.client(
's3',
aws_access_key_id = os.environ["AWS_ACCESS_KEY"],
aws_secret_access_key = os.environ["AWS_SECRET_KEY"],
region_name = 'eu-west-2'
)
obj = client.get_object(
Bucket = 'englybot',
Key = 'google_sr_token.json'
)
api_key = obj['Body'].read().decode('utf-8')
with open(api_key_path, "w") as f:
f.write(api_key)
async def on_startup(_):
asyncio.create_task(load_google_api_key(api_key_path))
async def shutdown(dp):
await dp.storage.close()
await dp.storage.wait_closed()
await session.close()
if __name__ == '__main__':
print("start")
executor.start_polling(dp, on_shutdown=shutdown, loop=loop, on_startup=on_startup) |
#!/usr/bin/env python
'''All form models'''
__author__ = 'Denys Tarnavskyi'
__copyright__ = 'Copyright 2018, RPD site project'
__license__ = 'MIT'
__version__ = '1.0'
__email__ = 'marzique@gmail.com'
__status__ = 'Development'
from flask_wtf import FlaskForm, RecaptchaField
from flask_wtf.file import FileField, FileAllowed
from flask_login import current_user
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField, SelectField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from .models import User
from .helpers import get_all_roles, get_role_tuples
from .constants import LIST_ALLOWED_FILE_EXTENSIONS
class RegistrationForm(FlaskForm):
username = StringField('Ім'я користувача',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Пароль', validators=[DataRequired()])
confirm_password = PasswordField('Пароль ще раз',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Підтвердити')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
'Користувач з таким іменем вже зареєстрований')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError(
'Користувач з такою поштовою скринькою вже зареєстрований')
# Email+password authentication model
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Пароль', validators=[DataRequired()])
remember = BooleanField('Запам'ятати')
submit = SubmitField('Увійти')
recaptcha = RecaptchaField()
class UpdateAccountForm(FlaskForm):
username = StringField('Ім'я користувача',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
submit = SubmitField('Змінити')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
'Користувач з таким іменем вже зареєстрований')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError(
'Користувач з такою поштовою скринькою вже зареєстрований')
class UpdatePicture(FlaskForm):
picture = FileField('Змінити зображення користувача', validators=[
FileAllowed(['jpg', 'jpeg', 'png'])])
submit = SubmitField('Змінити')
class PostForm(FlaskForm):
title = StringField('Назва', validators=[
DataRequired(), Length(min=5, max=100)])
content = TextAreaField('Зміст', validators=[DataRequired()])
picture = FileField('Зображення', validators=[
FileAllowed(['jpg', 'jpeg', 'png'])])
submit = SubmitField('Додати новину')
class ResetRequest(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
recaptcha = RecaptchaField()
submit = SubmitField('Надіслати')
class ResetPassword(FlaskForm):
password = PasswordField('Новый пароль', validators=[DataRequired()])
confirm_password = PasswordField('Пароль ще раз',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Змінити пароль')
class NewRole(FlaskForm):
role = StringField('Назва Ролі',
validators=[DataRequired(), Length(min=4, max=15)])
submit = SubmitField('Додати')
class AddRole(FlaskForm):
role = SelectField(label='Роль', choices=get_role_tuples())
submit = SubmitField('Надати роль')
class UploadFile(FlaskForm):
'''
Uploads files for students
'''
name = StringField('Назва', validators=[
DataRequired(), Length(min=5, max=100)])
course = StringField('Предмет', validators=[
DataRequired(), Length(min=5, max=100)])
# TODO: what extensions we will allow here?
file_uploaded = FileField('Файл', validators=[DataRequired(),
FileAllowed(LIST_ALLOWED_FILE_EXTENSIONS)])
submit = SubmitField('Завантажити')
|
import csv
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Optional, Sequence, Tuple, Union
import torch
from torchdata.datapipes.iter import Filter, IterDataPipe, IterKeyZipper, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
csv.register_dialect("celeba", delimiter=" ", skipinitialspace=True)
class CelebACSVParser(IterDataPipe[Tuple[str, Dict[str, str]]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[Any, BinaryIO]],
*,
fieldnames: Optional[Sequence[str]] = None,
) -> None:
self.datapipe = datapipe
self.fieldnames = fieldnames
def __iter__(self) -> Iterator[Tuple[str, Dict[str, str]]]:
for _, file in self.datapipe:
try:
lines = (line.decode() for line in file)
if self.fieldnames:
fieldnames = self.fieldnames
else:
# The first row is skipped, because it only contains the number of samples
next(lines)
# Empty field names are filtered out, because some files have an extra white space after the header
# line, which is recognized as extra column
fieldnames = [name for name in next(csv.reader([next(lines)], dialect="celeba")) if name]
# Some files do not include a label for the image ID column
if fieldnames[0] != "image_id":
fieldnames.insert(0, "image_id")
for line in csv.DictReader(lines, fieldnames=fieldnames, dialect="celeba"):
yield line.pop("image_id"), line
finally:
file.close()
NAME = "celeba"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CelebA(Dataset):
"""
- **homepage**: https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
splits = GDriveResource(
"0B7EVK8r0v71pY0NSMzRuSXJEVkk",
sha256="fc955bcb3ef8fbdf7d5640d9a8693a8431b5f2ee291a5c1449a1549e7e073fe7",
file_name="list_eval_partition.txt",
)
images = GDriveResource(
"0B7EVK8r0v71pZjFTYXZWM3FlRnM",
sha256="46fb89443c578308acf364d7d379fe1b9efb793042c0af734b6112e4fd3a8c74",
file_name="img_align_celeba.zip",
)
identities = GDriveResource(
"1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS",
sha256="c6143857c3e2630ac2da9f782e9c1232e5e59be993a9d44e8a7916c78a6158c0",
file_name="identity_CelebA.txt",
)
attributes = GDriveResource(
"0B7EVK8r0v71pblRyaVFSWGxPY0U",
sha256="f0e5da289d5ccf75ffe8811132694922b60f2af59256ed362afa03fefba324d0",
file_name="list_attr_celeba.txt",
)
bounding_boxes = GDriveResource(
"0B7EVK8r0v71pbThiMVRxWXZ4dU0",
sha256="7487a82e57c4bb956c5445ae2df4a91ffa717e903c5fa22874ede0820c8ec41b",
file_name="list_bbox_celeba.txt",
)
landmarks = GDriveResource(
"0B7EVK8r0v71pd0FJY3Blby1HUTQ",
sha256="6c02a87569907f6db2ba99019085697596730e8129f67a3d61659f198c48d43b",
file_name="list_landmarks_align_celeba.txt",
)
return [splits, images, identities, attributes, bounding_boxes, landmarks]
def _filter_split(self, data: Tuple[str, Dict[str, str]]) -> bool:
split_id = {
"train": "0",
"val": "1",
"test": "2",
}[self._split]
return data[1]["split_id"] == split_id
def _prepare_sample(
self,
data: Tuple[
Tuple[str, Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]],
Tuple[
Tuple[str, Dict[str, str]],
Tuple[str, Dict[str, str]],
Tuple[str, Dict[str, str]],
Tuple[str, Dict[str, str]],
],
],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, (_, image_data) = split_and_image_data
path, buffer = image_data
image = EncodedImage.from_file(buffer)
(_, identity), (_, attributes), (_, bounding_boxes), (_, landmarks) = ann_data
return dict(
path=path,
image=image,
identity=Label(int(identity["identity"])),
attributes={attr: value == "1" for attr, value in attributes.items()},
bounding_boxes=BoundingBoxes(
[int(bounding_boxes[key]) for key in ("x_1", "y_1", "width", "height")],
format="xywh",
spatial_size=image.spatial_size,
),
landmarks={
landmark: torch.tensor((int(landmarks[f"{landmark}_x"]), int(landmarks[f"{landmark}_y"])))
for landmark in {key[:-2] for key in landmarks.keys()}
},
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
splits_dp, images_dp, identities_dp, attributes_dp, bounding_boxes_dp, landmarks_dp = resource_dps
splits_dp = CelebACSVParser(splits_dp, fieldnames=("image_id", "split_id"))
splits_dp = Filter(splits_dp, self._filter_split)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
anns_dp = Zipper(
*[
CelebACSVParser(dp, fieldnames=fieldnames)
for dp, fieldnames in (
(identities_dp, ("image_id", "identity")),
(attributes_dp, None),
(bounding_boxes_dp, None),
(landmarks_dp, None),
)
]
)
dp = IterKeyZipper(
splits_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=path_accessor("name"),
buffer_size=INFINITE_BUFFER_SIZE,
keep_key=True,
)
dp = IterKeyZipper(
dp,
anns_dp,
key_fn=getitem(0),
ref_key_fn=getitem(0, 0),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 162_770,
"val": 19_867,
"test": 19_962,
}[self._split]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 28 19:51:58 2018
@author: PPAGACZ
"""
from packets import *
class DifferencingPipe(IPipe):
def runFilter(self):
if(DifferencingPipe.checkConditions(self.data)):
DifferencingFilter.process(self)
def checkConditions(data):
return data.orginal is not None |
import os
from py2neo import Graph
from py2neo.ogm import GraphObject, Property, RelatedFrom, RelatedTo
GRAPH: Graph = None
REFERENCES_RELATIONSHIP = "REFERENCES"
WROTE_RELATIONSHIP = "WROTE"
HAS_KEYWORD_RELATIONSHIP = "HAS_KEYWORD"
PART_OF_RELATIONSHIP = "PART_OF"
def get_graph():
global GRAPH
if not GRAPH:
host = os.getenv("NEO4J_URL")
user = os.getenv("NEO4J_USER")
password = os.getenv("NEO4J_PASS")
port = int(os.getenv("NEO4J_PORT"))
GRAPH = Graph(host=host, user=user, password=password, port=port, secure=True)
return GRAPH
class BaseModel(GraphObject):
__other_properties_dict = dict()
_time = Property()
def __init__(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
else:
self.__other_properties_dict[key] = value
def all(self):
graph = get_graph()
return self.match(graph)
def save(self):
graph = get_graph()
graph.push(self)
class Paper(BaseModel):
__primarykey__ = "ID"
ID = Property()
title = Property()
address = Property()
acmid = Property()
year = Property()
isbn = Property()
link = Property()
_bibtex = Property()
numpages = Property()
url = Property()
pages = Property()
series = Property()
ENTRYTYPE = Property()
publisher = Property()
location = Property()
booktitle = Property()
doi = Property()
authors = RelatedFrom("Author", WROTE_RELATIONSHIP)
keywords = RelatedTo("Keyword", HAS_KEYWORD_RELATIONSHIP)
projects = RelatedFrom("Project", PART_OF_RELATIONSHIP)
references = RelatedTo("Paper", REFERENCES_RELATIONSHIP)
referenced_by = RelatedFrom("Paper", REFERENCES_RELATIONSHIP)
def asdict(self):
return {
"ID": self.ID,
"title": self.title,
"address": self.address,
"acmid": self.acmid,
"year": self.year,
"isbn": self.isbn,
"link": self.link,
"_bibtex": self._bibtex,
"_time": self._time,
"numpages": self.numpages,
"url": self.url,
"pages": self.pages,
"series": self.series,
"ENTRYTYPE": self.ENTRYTYPE,
"publisher": self.publisher,
"location": self.location,
"booktitle": self.booktitle,
"doi": self.doi,
}
def fetch(self):
graph = get_graph()
return Paper.match(graph, self.ID).first()
def fetch_authors(self):
return [
{**author[0].asdict(), **author[1]}
for author in self.authors._related_objects
]
def fetch_keywords(self):
return [{**kw[0].asdict(), **kw[1]} for kw in self.keywords._related_objects]
def fetch_projects(self):
return [
{**proj[0].asdict(), **proj[1]} for proj in self.projects._related_objects
]
def fetch_references(self):
return [{**proj[0].asdict()} for proj in self.references._related_objects]
class Author(BaseModel):
__primarykey__ = "name"
name = Property()
papers = RelatedTo("Paper", WROTE_RELATIONSHIP)
def fetch(self):
graph = get_graph()
return Author.match(graph, self.name).first()
def fetch_papers(self):
return [{**paper[0].asdict()} for paper in self.papers._related_objects]
def asdict(self):
return {"name": self.name}
class Keyword(BaseModel):
__primarykey__ = "value"
value = Property()
papers = RelatedFrom("Paper", HAS_KEYWORD_RELATIONSHIP)
def fetch(self):
graph = get_graph()
return Keyword.match(graph, self.value).first()
def fetch_papers(self):
return [{**paper[0].asdict()} for paper in self.papers._related_objects]
def asdict(self):
return {"value": self.value}
class Project(BaseModel):
__primarykey__ = "name"
name = Property()
papers = RelatedTo("Paper", PART_OF_RELATIONSHIP)
def fetch(self):
graph = get_graph()
return Project.match(graph, self.name).first()
def fetch_papers(self):
return [{**paper[0].asdict()} for paper in self.papers._related_objects]
def asdict(self):
return {"name": self.name}
|
#!/usr/bin/env python3
"""
Analysis task base class.
Author: James Mulligan (james.mulligan@berkeley.edu)
"""
from __future__ import print_function
# General
import os
import sys
import time
# Data analysis and plotting
import ROOT
import yaml
# Analysis utilities
from pyjetty.alice_analysis.process.base import common_base
from pyjetty.alice_analysis.analysis.base import analysis_utils
#import base
#import analysis_utils
################################################################
class AnalysisBase(common_base.CommonBase):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, input_file_data='', input_file_response='', config_file='', output_dir='', file_format='', **kwargs):
super(AnalysisBase, self).__init__(**kwargs)
self.input_file_data = input_file_data
self.input_file_response = input_file_response
self.config_file = config_file
self.output_dir = output_dir
self.file_format = file_format
# Create output dir
if not self.output_dir.endswith("/"):
self.output_dir = self.output_dir + "/"
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# Initialize utils class
self.utils = analysis_utils.AnalysisUtils()
#---------------------------------------------------------------
# Initialize config file into class members
#---------------------------------------------------------------
def initialize_config(self):
# Read config file
with open(self.config_file, 'r') as stream:
config = yaml.safe_load(stream)
self.jetR_list = config['jetR']
self.debug_level = config['debug_level']
|
teen_dicti = {
'rh' : 'Rảnh rỗi',
'cn' : 'con',
'kcj' : 'Không có gì',
'ps' : 'tái bút',
'kqt' : 'Không Quan Tâm',
'wtf' : 'Cái đ*o gì vậy',
'r' : 'ròi',
}
while True:
print(*teen_dicti)
your_code = input("Your Code? ")
if your_code in teen_dicti:
print(teen_dicti[your_code])
else:
your_choice= input("You want to update it (y/n)?").upper();
if your_choice.upper() == 'Y':
your_add = input("Your Data?")
teen_dicti[your_code] = your_add
print(teen_dicti)
else:
break
|
from sklearn.datasets.base import load_iris
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.model_selection._split import train_test_split, KFold
from sklearn.model_selection._validation import cross_val_score
iris = load_iris()
logreg = LogisticRegression()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=0)
logreg = LogisticRegression().fit(X_train, y_train)
print("test accuracy: {:.2f}".format(logreg.score(X_test, y_test)))
scores = cross_val_score(logreg, iris.data, iris.target, cv=3)
print("cross validation: {}".format(scores))
kfold = KFold(n_splits=3, shuffle=True, random_state=0)
scores = cross_val_score(logreg, iris.data, iris.target, cv=kfold)
print("cross validation: {}".format(scores)) |
import uuid
from django.db import models
class Participant(models.Model):
name = models.CharField(max_length=64)
secret = models.UUIDField(default=uuid.uuid4, editable=False)
def __unicode__(self):
return self.name
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 31 16:48:59 2019
@author: Raghav
"""
import os
import random
CHARACTERS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
'Z', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
FONTS = (os.listdir('fonts/'))
backgroundDimensions = os.popen('convert ' + 'background.jpg ' + 'ping -format "%w %h" info:').read()
backgroundDimensions = backgroundDimensions.split(' ')
backgroundWidth = backgroundDimensions[0]
backgroundHeight = backgroundDimensions[1]
backgroundOutfile = 'background_outfile.jpg'
command = "magick convert " + "background.jpg " + "-crop 20x20+0+0 " + backgroundOutfile
os.system(str(command))
for i in range(0,len(CHARACTERS)):
char_output_dir = 'training_data/' + CHARACTERS[i] + '/'
if(not os.path.exists(char_output_dir)):
os.makedirs(char_output_dir)
print('Generating Data ' + char_output_dir)
for j in range(0,1000):
font = 'fonts/' + random.choice(FONTS)
# Get random blur amount
blur = random.randint(0,3)
# Add random shifts from the center
x = str(random.randint(-1,1))
y = str(random.randint(-1,1))
command = "magick convert " + str(backgroundOutfile) + " -fill "+str('white')+" -font "+ \
str(font) + " -weight 900 -pointsize 20 "+"-gravity center" + " -blur 0x" + str(blur) \
+ " -annotate +" + x + "+" + y + " " + str(CHARACTERS[i]) + " " + char_output_dir + "output_file"+str(i)+str(j)+".jpg"
os.popen(str(command))
|
# encoding = UTF-8
# Autor: Silvia Ferman
# Mision 10
import matplotlib.pyplot as plot # libreria para graficar
# Funcion que muestra los nombres de los equipos en mayuscula y orden alfabetico
def listarEquiposOrdenados(nombreArchivo):
entrada = open(nombreArchivo, "r")
entrada.readline() # titulo
entrada.readline() # titulo
listaEquipos = []
for linea in entrada:
datos = linea.split("&")
listaEquipos.append(datos[0].upper())
listaEquipos.sort()
entrada.close()
return listaEquipos
# Funcion que muestra el equipo y sus puntos que llevan hasta el momento
def mostrarEquiposPuntos(nombre):
entrada = open(nombre, "r")
entrada.readline() # titulo
entrada.readline() # titulo
listaEquipos = []
for linea in entrada:
datos = linea.split("&")
listaEquipos.append(datos[0])
listaEquipos.append(int(datos[8]))
entrada.close()
return listaEquipos
# Funcion que muestra los equipos que han perdido 3 o menos partidos
def mostrarEquiposPerdedores(nombre):
entrada = open(nombre, "r")
entrada.readline() # titulo
entrada.readline() # titulo
listaEquipos = []
for linea in entrada:
datos = linea.split("&")
equipo = datos[0]
perdidos = int(datos[4])
if perdidos <= 3:
listaEquipos.append(equipo)
entrada.close()
return listaEquipos
# Funcion que muestra los nombres de los equipos que tienen ERROR en su puntaje
def reportarErrorPuntos(nombre):
entrada = open(nombre, "r")
entrada.readline() # titulo
entrada.readline() # titulo
listaEquiposError = []
for linea in entrada:
datos = linea.split("&")
equipo = datos[0]
jganados = int(datos[2])
jempatados = int(datos[3])
puntos = int(datos[8]) # los puntos que se reportaron
pcalculados = jganados * 3 + jempatados * 1
if puntos != pcalculados:
listaEquiposError.append(equipo)
entrada.close()
return listaEquiposError
# Funcion que muestra al equipo con la menor diferencia de goles
def mostrarDiferenciaGoles(nombre):
entrada = open(nombre, "r")
entrada.readline() # titulo
entrada.readline() # titulo
listaGoles = []
for linea in entrada:
datos = linea.split("&")
listaGoles[datos[0]] = int(datos[7])
for valor in listaGoles.items():
if diferencia > valor:
diferencia = valor
for equipo, goles in listaGoles.items():
if diferencia == goles
resultado = equipo
entrada.close()
return resultado
# Funcion que muestra una grafica EQUIPOS vs. PUNTOS (solo la dibuja, NO regresa datos)
def graficarPuntos(nombre):
entrada = open(nombre, "r")
entrada.readline()
entrada.readline()
listaEquipos= []
listaPuntos = []
for linea in entrada:
datos = linea.split("&")
listaEquipos.append(datos[0])
listaPuntos.append(int(datos[8]))
# graficar
plot.plot(listaEquipos, listaPuntos)
plot.title("Equipos vs. Puntos")
plot.xlabel("EQUIPOS")
plot.ylabel("PUNTOS")
plot.show()
def main():
ordenados = listarEquiposOrdenados("LigaMX.txt")
print(ordenados)
puntaje = mostrarEquiposPuntos("LigaMX.txt")
print(puntaje)
perdedores = mostrarEquiposPerdedores("LigaMX.txt")
print(perdedores)
errores = reportarErrorPuntos("LigaMX.txt")
print(errores)
goles = mostrarDiferenciaGoles("LigaMX.txt")
print(goles)
graficar = graficarPuntos("LigaMX.txt")
print(graficar)
main()
|
from dataclasses import dataclass
from typing import List
@dataclass
class Superblock:
"""superblock summary"""
block_num: int # total number of blocks
inode_num: int # total number of i-nodes
block_size: int # block size
inode_size: int # inode size
blocks_per_group: int # blocks per group
inodes_per_group: int # inodes per group
first_non_reserved_inode: int # first non-reserved i-node
@classmethod
def from_entries(cls, args):
return Superblock(int(args[1]), int(args[2]), int(args[3]),
int(args[4]), int(args[5]), int(args[6]),
int(args[7]))
@dataclass
class Group:
"""group summary"""
group_index: int # group number
block_num: int # total number of blocks in this group
inode_num: int # total number of inodes in this group
free_block_num: int # number of free blocks
free_inode_num: int # number of free inodes
block_bitmap_index: int # block number of free block bitmap
inode_bitmap_index: int # block number of free i-node bitmap
first_inode_index: int # block number of first block of i-nodes
@classmethod
def from_entries(cls, args):
return Group(int(args[1]), int(args[2]), int(args[3]), int(args[4]),
int(args[5]), int(args[6]), int(args[7]), int(args[8]))
@dataclass
class Inode:
"""inode summary"""
inode_index: int # inode number
file_type: str # 'f': file, 'd': directory, 's': symlink,'?' others
mode: int # mode (low order 12-bits, octal ... suggested format "%o")
uid: int # owner id
gid: int # group id
links_count: int # links count
creation_time: str # time of last I-node change (mm/dd/yy hh:mm:ss, GMT)
modification_time: str # modification time (mm/dd/yy hh:mm:ss, GMT)
access_time: str # time of last access (mm/dd/yy hh:mm:ss, GMT)
file_size: int # file size
block_num: int # number of blocks of disk space taken up by this file
block_addresses: List[int]
@classmethod
def from_entries(cls, args):
block_addresses = args[12:27]
return Inode(int(args[1]), args[2], int(args[3]), int(args[4]),
int(args[5]), int(args[6]), args[7], args[8], args[9],
int(args[10]), int(args[11]), block_addresses)
@dataclass
class Dirent:
"""Directory entry summary"""
parent_inode_index: int # the I-node number of the parent directory
logical_byte_offset: int # logical byte offset of this entry
inode_index: int # inode number of the referenced file
entry_length: int # entry length
name_length: int # name length
name: str # name
@classmethod
def from_entries(cls, args):
return Dirent(int(args[1]), int(args[2]), int(args[3]), int(args[4]),
int(args[5]), args[6])
@dataclass
class Indirect:
"""Indirect block reference summary"""
inode_index: int # I-node number of the owning file (decimal)
level: int # 1: single indirect, 2: double, 3: triple
logical_block_offset: int
block_index: int # block number
ref_block_index: int # reference block number
@classmethod
def from_entries(cls, args):
return Indirect(int(args[1]), int(args[2]), int(args[3]), int(args[4]),
int(args[5]))
@dataclass
class Block:
"""Block summary"""
inode_index: int
offset: int
level: int
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import cast
from pants.backend.terraform.partition import partition_files_by_directory
from pants.backend.terraform.target_types import TerraformFieldSet
from pants.backend.terraform.tool import TerraformProcess
from pants.backend.terraform.tool import rules as tool_rules
from pants.core.goals.fmt import FmtResult, FmtTargetsRequest, Partitions
from pants.core.util_rules import external_tool
from pants.core.util_rules.partitions import Partition
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.internals.selectors import Get
from pants.engine.process import ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.option.option_types import SkipOption
from pants.option.subsystem import Subsystem
from pants.util.strutil import pluralize
logger = logging.getLogger(__name__)
class TfFmtSubsystem(Subsystem):
options_scope = "terraform-fmt"
name = "`terraform fmt`"
help = "Terraform fmt options."
skip = SkipOption("fmt", "lint")
class TffmtRequest(FmtTargetsRequest):
field_set_type = TerraformFieldSet
tool_subsystem = TfFmtSubsystem
@dataclass(frozen=True)
class PartitionMetadata:
directory: str
@property
def description(self) -> str:
return self.directory
@rule
async def partition_tffmt(
request: TffmtRequest.PartitionRequest, tffmt: TfFmtSubsystem
) -> Partitions:
if tffmt.skip:
return Partitions()
source_files = await Get(
SourceFiles, SourceFilesRequest([field_set.sources for field_set in request.field_sets])
)
return Partitions(
Partition(tuple(files), PartitionMetadata(directory))
for directory, files in partition_files_by_directory(source_files.files).items()
)
@rule(desc="Format with `terraform fmt`")
async def tffmt_fmt(request: TffmtRequest.Batch, tffmt: TfFmtSubsystem) -> FmtResult:
directory = cast(PartitionMetadata, request.partition_metadata).directory
result = await Get(
ProcessResult,
TerraformProcess(
args=("fmt", directory),
input_digest=request.snapshot.digest,
output_files=request.files,
description=f"Run `terraform fmt` on {pluralize(len(request.files), 'file')}.",
),
)
return await FmtResult.create(request, result)
def rules():
return [
*collect_rules(),
*external_tool.rules(),
*tool_rules(),
*TffmtRequest.rules(),
]
|
from django.db import models
# Create your models here.
class ClueNode(models.Model):
title = models.CharField(max_length=140)
unique_id = models.CharField(max_length=7, unique=True)
text = models.CharField(max_length=140)
latitude = models.FloatField()
longitude = models.FloatField()
prev_node = models.OneToOneField("self", related_name="next_node", null=True, blank=True)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class TreasureHunt(models.Model):
first_node = models.OneToOneField(ClueNode, related_name="treasure_hunt")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
|
"""
functions.py
This code implements multiplication mod p and solving a linear
equation mod p.
"""
import numpy as np
from .gauss_mod_p import gauss_col
###############################################################################
# Multiply two matrices mod p
def multiply_mod_p(A, B, p):
"""
Multiply matrices mod p.
"""
return np.matmul(A, B) % p
###############################################################################
# solve_mod_p
# Function to solve linear equations mod p
def solve_mod_p(A, b, p):
"""
Find the vector x such that A * x = b (mod p)
This method assumes that a solution exists to the equation
A * x = b (mod p). If a solution does not exist, it raises a ValueError
exception.
Parameters
----------
A : :obj:`Numpy Array`
2D array
b : :obj:`Numpy Array`
1D array
p : int(prime)
Number to mod out by.
Returns
-------
x : :obj:`Numpy Array`
1D array. Solution to equation.
Raises
------
ValueError
If a solution to the equation does not exist.
"""
R, T = gauss_col(np.append(A, np.array([b]).T, axis=1), p)
if np.any(R[:, -1]):
print("Linear equation has no solution.")
raise ValueError
# Return last column from T without the last row
return -T[:, -1][:-1] % p
###############################################################################
# solve_matrix_mod_p
#
def solve_matrix_mod_p(A, B, p):
"""
Same as :meth:`solve_mod_p`, but with B and X being matrices.
That is, given two matrices A and B, we want to find a matrix X
such that A * X = B (mod p)
Parameters
----------
A : :obj:`Numpy Array`
2D array
B : :obj:`Numpy Array`
2D array
p : int(prime)
Returns
-------
X : :obj:`Numpy Array`
2D array solution.
Raises
------
ValueError
There is no solution to the given equation
"""
R, T = gauss_col(np.append(A, B.T, axis=1), p)
if np.any(R[:, np.size(A, 1):]):
print("Linear matrix equation has no solution.")
raise ValueError
# Return matrix X
return - T[:, np.size(A, 1):][:np.size(A, 1)] % p
|
#读取yaml文件数据
import yaml
import os
class ReadYaml:
def __init__(self,yamlPath="\conf\db1.yaml"):
self.yamlPath = yamlPath
#获取yaml文件的地址
def getpath(self):
#获取当前文件所在的目录地址
current_dir = os.path.abspath(".")
#获取当前脚本的地址
#curPath = os.path.realpath(__file__)
#获取db.yaml的地址,即用上一级的目录地址加上yamlPath
path = os.path.dirname(current_dir) + self.yamlPath
return path
#读取yaml文件
def readyaml(self):
path = self.getpath()
'''读取yaml文件内容
参数path: 相对路径,起始路径:项目的根目录
realPath: 文件的真实路径,绝对路径地址 '''
if not os.path.isfile(path):
raise FileNotFoundError("文件路径不存在,请检查路径是否正确:%s" % path)
# open方法打开直接读出来
#r表示可读,w表示可写,a表示可读可写
# f = open(path, 'r', encoding='utf-8')
# # 读取,此时读取出来的是字符串
# y = f.read()
# # 将读取的内容转化成字典,也可以直接y = yaml.load(f,Loader=yaml.FullLoader)
# d = yaml.load(y, Loader=yaml.FullLoader)
with open(path, 'r', encoding='utf-8') as f:
y = f.read()
d = yaml.load(y, Loader=yaml.FullLoader)
return d
#写数据到yaml中,
def writeyaml(self):
wpath = self.getpath()
# 此字符串写入yaml中
aproject = {
"host": "localhost",
"port": 3306,
"user": "root",
"password": "123456",
"charset": "utf8"
}
# 若是写入中文,则要注意添加encoding='utf-8',且dump中要添加default_flow_style=False,encoding='utf-8',allow_unicode=True
with open(wpath, 'w', encoding='utf-8') as f:
yaml.dump(aproject, f, default_flow_style=False, encoding='utf-8', allow_unicode=True)
# f = open(wpath, 'w', encoding='utf-8')
# yaml.dump(aproject, f, default_flow_style=False, encoding='utf-8', allow_unicode=True)
if __name__ == '__main__':
#wy = ReadYaml("\conf\db.yaml").writeyaml()
ry = ReadYaml("\conf\log.yaml").readyaml()
print(ry)
|
### -*- coding: utf-8 -*- #############################################
# Разработано компанией Стерх (http://sterch.net/)
# Все права защищены, 2010
#
# Developed by Sterch (http://sterch.net/)
# All right reserved, 2010
#######################################################################
""" Tests for the <lock .../> <rlock .../> directive
"""
__author__ = "Maxim Polscha (maxp@sterch.net)"
__license__ = "ZPL"
import sterch.threading
import threading
import zope.app.component
from sterch.threading import interfaces
from StringIO import StringIO
from unittest import TestCase, makeSuite, main
from zope.component import queryUtility
from zope.component.testing import PlacelessSetup
from zope.configuration.xmlconfig import XMLConfig, xmlconfig
class Test(PlacelessSetup, TestCase):
"""Test threading classes and objects """
def setUp(self):
super(Test, self).setUp()
XMLConfig('meta.zcml', zope.app.component)()
XMLConfig('meta.zcml', sterch.threading)()
XMLConfig('configure.zcml', sterch.threading)()
def test_valid_zcml(self):
xml=u"""<configure xmlns="http://namespaces.sterch.net/threading">
<lock />
<lock name="My Lock"/>
<rlock />
<rlock name="My RLock"/>
</configure>"""
xmlconfig(StringIO(xml))
self.assertTrue(queryUtility(interfaces.ILock) is not None)
self.assertTrue(queryUtility(interfaces.ILock, name=u"My Lock") is not None)
self.assertTrue(queryUtility(interfaces.IRLock) is not None)
self.assertTrue(queryUtility(interfaces.IRLock, name=u"My RLock") is not None)
def test_suite():
suite = makeSuite(Test)
return suite
if __name__ == '__main__':
main(defaultTest='test_suite')
|
from django.db import models
class Member(models.Model):
username=models.CharField(max_length=40,primary_key=True)
name=models.CharField(max_length=30)
password=models.CharField(max_length=15)
regno=models.CharField(max_length=10)
type=models.CharField(max_length=2)
tackle=models.CharField(max_length=1)
class Admin(models.Model):
username = models.CharField(max_length=40, primary_key=True)
name = models.CharField(max_length=30)
password = models.CharField(max_length=15)
class TempMember(models.Model):
username=models.CharField(max_length=40,primary_key=True)
name=models.CharField(max_length=30)
regno=models.CharField(max_length=10)
password=models.CharField(max_length=15)
class TackleMember(models.Model):
file=models.FileField(upload_to="tackle/%Y/%m/%d/",blank=True,null=True)
member=models.ForeignKey(Member,on_delete=models.CASCADE,blank=True,null=True)
class Feedback(models.Model):
member=models.ForeignKey(Member,on_delete=models.CASCADE,blank=True,null=True)
type=models.CharField(max_length=10)
text=models.CharField(max_length=200)
id=models.CharField(max_length=4,primary_key=True)
class FileCSV(models.Model):
name=models.CharField(max_length=8)
file=models.FileField(upload_to="data/")
class Topscores(models.Model):
member=models.ForeignKey(Member,on_delete=models.CASCADE,blank=True,null=True)
score=models.CharField(max_length=4)
class Message(models.Model):
name=models.CharField(max_length=40,primary_key=True)
message=models.CharField(max_length=300)
class EventCreator(models.Model):
eventName=models.CharField(max_length=20,primary_key=True);
description=models.CharField(max_length=300)
team=models.CharField(max_length=1)
class EventMember(models.Model):
username = models.CharField(max_length=40,blank=False,null=False)
regno = models.CharField(max_length=10,blank=False,null=False)
slot=models.CharField(max_length=8)
teamID=models.CharField(max_length=5)
eventName=models.CharField(max_length=20) |
from flask_wtf import FlaskForm
from wtforms import PasswordField, SubmitField, StringField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired
class SignUpForm(FlaskForm):
nickname = StringField("Имя пользователя", validators=[DataRequired()])
email = EmailField("Email", validators=[DataRequired()])
password = PasswordField("Пароль", validators=[DataRequired()])
repeat_password = PasswordField("Повторите пароль", validators=[DataRequired()])
submit = SubmitField("Зарегистрироваться")
|
from .blueprint import bp_v1
from . import web
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
import ast
from uuid import uuid4
from typing import Dict, Any
class Coffee(object):
uuid: str
cup_type: str
coffee_type: str
coffee_for: str
amount: float
state: str
context: Dict[str, Any]
def __init__(self, coffee_type: str, cup_type: str, coffee_for: str, amount: float, uuid: str = None):
self.coffee_type = coffee_type
self.cup_type = cup_type
self.coffee_for = coffee_for
self.amount = amount
if uuid is None:
self.uuid = uuid4().hex
else:
self.uuid = uuid
self.state = 'ordered'
self.context = dict()
def set_state(self, state: str) -> None:
self.state = state
def set_context(self, context: Dict[str, Any]) -> None:
self.context = context
def __to_dict__(self) -> Dict[str, Any]:
return {
'uuid': self.uuid,
'cup_type': self.cup_type,
'coffee_type': self.coffee_type,
'coffee_for': self.coffee_for,
'amount': self.amount
}
def __to_bytes_dict__(self) -> bytes:
return str({
'uuid': self.uuid,
'cup_type': self.cup_type,
'coffee_type': self.coffee_type,
'coffee_for': self.coffee_for,
'amount': self.amount,
'state': self.state,
'context': self.context
}).encode('utf-8')
@classmethod
def __from_dict_bytes__(cls, data: bytes):
dict_coffee = ast.literal_eval(data.decode('utf-8'))
coffee = cls(coffee_type=dict_coffee['coffee_type'], cup_type=dict_coffee['cup_type'],
coffee_for=dict_coffee['coffee_for'], amount=dict_coffee['amount'], uuid=dict_coffee['uuid'])
coffee.set_state(dict_coffee['state'])
coffee.set_context(dict_coffee['context'])
return coffee
|
from landscapesim import models
from landscapesim.common import config
from landscapesim.common.types import default_int, empty_or_yes_to_bool
from landscapesim.common.utils import get_random_csv
from .base import ImporterBase
from .filters import *
TERMINOLOGY = (
'STSim_Terminology',
models.Terminology,
config.TERMINOLOGY,
(str, str, str, str, str, str, str)
)
DISTRIBUTION_TYPE = (
'Stats_DistributionType',
models.DistributionType,
config.DISTRIBUTION_TYPE,
(str, str, empty_or_yes_to_bool)
)
STRATUM = (
'STSim_Stratum',
models.Stratum,
config.STRATUM,
(str, str, str, default_int)
)
SECONDARY_STRATUM = (
'STSim_SecondaryStratum',
models.SecondaryStratum,
config.SECONDARY_STRATUM,
(str, str, default_int)
)
STATECLASS = (
'STSim_StateClass',
models.StateClass,
config.STATECLASS,
(str, str, str, str, str, default_int)
)
TRANSITION_TYPE = (
'STSim_TransitionType',
models.TransitionType,
config.TRANSITION_TYPE,
(str, str, str, default_int)
)
TRANSITION_GROUP = (
'STSim_TransitionGroup',
models.TransitionGroup,
config.TRANSITION_GROUP,
(str, str)
)
TRANSITION_TYPE_GROUP = (
'STSim_TransitionTypeGroup',
models.TransitionTypeGroup,
config.TRANSITION_TYPE_GROUP,
(TransitionTypeFilter, TransitionGroupFilter, str)
)
TRANSITION_MULTIPLIER_TYPE = (
'STSim_TransitionMultiplierType',
models.TransitionMultiplierType,
config.TRANSITION_MULTIPLIER_TYPE,
(str,)
)
ATTRIBUTE_GROUP = (
'STSim_AttributeGroup',
models.AttributeGroup,
config.ATTRIBUTE_GROUP,
(str, str)
)
STATE_ATTRIBUTE_TYPE = (
'STSim_StateAttributeType',
models.StateAttributeType,
config.STATE_ATTRIBUTE_TYPE,
(str, str, str, AttributeGroupFilter)
)
TRANSITION_ATTRIBUTE_TYPE = (
'STSim_TransitionAttributeType',
models.TransitionAttributeType,
config.TRANSITION_ATTRIBUTE_TYPE,
(str, str, str, AttributeGroupFilter)
)
class ProjectImporter(ImporterBase):
"""
The base Project importer, responsible for converting SyncroSim project-related ata into the
appropriate LandscapeSim project-related data.
"""
related_model = models.Project
def __init__(self, console, project=None):
super().__init__(console, project, get_random_csv(project.library.tmp_file))
self.sheet_kwargs = {'pid': project.pid, 'overwrite': True, 'orig': True}
def import_terminology(self):
self._extract_sheet(TERMINOLOGY)
def import_distribution_types(self):
self._extract_sheet(DISTRIBUTION_TYPE)
def import_stratum(self):
self._extract_sheet(STRATUM)
def import_secondary_stratum(self):
self._extract_sheet(SECONDARY_STRATUM)
def import_stateclasses(self):
self._extract_sheet(STATECLASS)
def import_transition_types(self):
self._extract_sheet(TRANSITION_TYPE)
def import_transition_groups(self):
self._extract_sheet(TRANSITION_GROUP)
def import_transition_multiplier_types(self):
self._extract_sheet(TRANSITION_MULTIPLIER_TYPE)
def import_attribute_groups(self):
self._extract_sheet(ATTRIBUTE_GROUP)
def import_state_attribute_types(self):
self._extract_sheet(STATE_ATTRIBUTE_TYPE)
def import_transition_attribute_types(self):
self._extract_sheet(TRANSITION_ATTRIBUTE_TYPE)
def process_project_definitions(self):
self.import_terminology()
self.import_distribution_types()
self.import_stratum()
self.import_secondary_stratum()
self.import_stateclasses()
self.import_transition_types()
self.import_transition_groups()
self.import_transition_multiplier_types()
self.import_attribute_groups()
self.import_state_attribute_types()
self.import_transition_attribute_types()
|
#I pledge my honor that I have abided by the Stevens Honor System. Jill McDonald
#I understand that I may access the course textbook and course lecture notes
#but I am not to access any other resource.
#I also pledge that I worked alone on this exam.
#Quiz 2 Part Two
def numbVowels(string):
count = 0
for letter in string:
if letter in "AEIOUaeiou":
count = count + 1
print("There are " + str(count) + " vowels in " + string)
def encrypt(string):
encrypted = ""
for letter in string:
if not letter.isalpha():
encrypted = encrypted + letter
continue
unicode = ord(letter)
if letter.isupper():
index = unicode - ord("A")
new_index = (index + 3) % 26
new_unicode = new_index + ord("A")
new_character = chr(new_unicode)
encrypted = encrypted + new_character
else:
index = unicode - ord("a")
new_index = (index + 3) % 26
new_unicode = new_index + ord("a")
new_character = chr(new_unicode)
encrypted = encrypted + new_character
print("Encrypted string: " + encrypted)
def main():
print("For Mathematical Functions, Please Enter the Number 1")
print("For String Operations, Please Enter the Number 2")
resp = input()
if resp == "1":
print("For Addition, Please Enter the Number 1")
print("For Subtraction, Please Enter the Number 2")
print("For Multiplication, Please Enter the Number 3")
print("For Division, Please Enter the Number 4")
resp = input()
numb1 = float(input("Please enter the first number: "))
numb2 = float(input("Please enter the second number: "))
if resp == "1":
print(numb1 + numb2)
if resp == "2":
print(numb1 - numb2)
if resp == "3":
print(numb1 * numb2)
if resp == "4":
print(numb1 / numb2)
if resp == "2":
print("To Determine the Number of Vowels in a String; Enter the Number 1")
print("To Encrypt a String; Enter the Number 2")
resp = input()
string = input("Please enter a string: ")
if resp == "1":
numbVowels(string)
if resp == "2":
encrypt(string)
main()
|
from scipy import optimize
from objective_function.main import *
from data_util import get_structure
from initial_prediction import predict_normal
import numpy as np
class MembraneOptimizer:
def __init__(self):
self.structure, self.helices = 0, 0
self.init_pred = 0
self.initial_q = 0
self.initial_x = 0
def load_pdb(self, pdb_id):
self.structure, self.helices = get_structure(pdb_id)
self.init_pred = predict_normal(self.structure, self.helices)
self.initial_q = objective_function(self.structure, (self.init_pred[1], self.init_pred[2]), self.init_pred[0])[2]
self.initial_x = 0
def optimize(self, max_iter=20):
bounds = self.get_bounds(5.0)
result = optimize.dual_annealing(self.objective_optimization_wrapper, bounds, maxiter=max_iter)
# print(f"Q-Value Improvement: {-result.fun/self.initial_q - 1}")
# print(f"Old Normal: {self.initial_x}")
# print(f"new Normal: {result.x}")
upper = np.array((result.x[0],result.x[1],result.x[2]))
lower = np.array((result.x[3],result.x[4],result.x[5]))
normal = upper - lower
return normal,upper,lower,self.structure
def get_bounds(self, range):
"""
Creates bounds for the upper and lower position inside which the optimzation is performed
:param range: Range of the parameter space for each parameter, in angstrom
:return: Array containing tuples of bounds for the 6 design variables
"""
upper = self.init_pred[1]
lower = self.init_pred[2]
upper_bounds = list(zip(upper - range, upper + range))
lower_bounds = list(zip(lower - range, lower + range))
self.initial_x = np.hstack((upper, lower))
return upper_bounds + lower_bounds
def objective_optimization_wrapper(self, x):
"""
Wrapper for the objective function to achieve a suitable format for an optimization algorithm
Parameters describe the coordinates of the two points describing the membrane
:param x: 1D vevtor consisting of the 6 coordinates of the two points describing the plane
:return:
"""
upper = np.asarray(x[:3])
lower = np.asarray(x[3:])
axis = (upper, lower)
normal = upper - lower
return -objective_function(self.structure, axis, normal)[2]
# first bound 5
# opt = MembraneOptimizer()
# opt.load_pdb("4ikz")
# res = opt.optimize(20)
# normal, upper, lower, structure = opt.optimize(20)
# structure_factor, hydrophobic_factor, q_value = objective_function(structure,(upper,lower), normal)
# print(structure_factor)
# print(hydrophobic_factor)
# print(q_value)
# print(res)
|
from time import sleep
import pyupm_i2clcd as lcd
import urllib2
import json
import re
import random
import mraa
# digital input - button in D6
touchPin = mraa.Gpio(6)
touchPin.dir(mraa.DIR_IN)
# digital output - led in D8
ledPin = mraa.Gpio(8)
ledPin.dir(mraa.DIR_OUT)
ledPin.write(0)
# Some invalid json repair: http://stackoverflow.com/questions/15198426/fixing-invalid-json-escape
invalid_escape = re.compile(r'\\[0-7]{1,6}') # up to 3 digits for byte values up to FF
def replace_with_byte(match):
return unichr(int(match.group(0)[1:], 8))
def repair(brokenjson):
return invalid_escape.sub(replace_with_byte, brokenjson)
#################################################################
while 1:
while touchPin.read() == 1:
ledPin.write(1)
sleep(2)
print "Getting some quotes online..."
url = 'http://api.forismatic.com/api/1.0/'
params = 'method=getQuote&key=457653&format=json&lang=en'
respJson = urllib2.urlopen(url, params).read()
try:
#Repair invalid json escape character
repairMap = repair(respJson)
respMap = json.loads(repairMap)
print "Quotes received!"
quoteText = respMap["quoteText"]
quoteAuthor = respMap["quoteAuthor"]
quote = list(quoteText + ' -' + quoteAuthor)
except:
print "Illegal character found, using generic"
quote = list("Today is gonna be a great day.-")
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
myLcd.setColor(random.randrange(0,255), random.randrange(0,255), random.randrange(0,255))
myLcd.cursorBlinkOn()
line = 0
for i in range(len(quote)):
if i%16 == 0:
if i%32 == 0:
# clear the whole screen if the LCD is fully occupied
sleep(0.7)
myLcd.setCursor(0,0)
myLcd.write(" ")
myLcd.setCursor(1,0)
myLcd.write(" ")
if i > 0:
line += 1
line = line % 2
myLcd.setCursor(line,i%16)
myLcd.write(str(quote[i]))
sleep(0.2)
sleep(3)
myLcd.clear()
myLcd.cursorBlinkOff()
myLcd.setColor(0,0,0)
ledPin.write(0) |
def power(base, exponent = 2):
result = base ** exponent
return result
print(f' Result: {power(3)}') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.