text stringlengths 8 6.05M |
|---|
#!usr/bin/env python2
import numpy as np
from matplotlib import pyplot as plt
# Paso = (max-min)/(N) -> N=2^B
#A = 1.2 # amplitude of signal
# quantization stepsize
#n = 2000 # number of samples
#x -> entrada
#Q ->paso del cuantizador
#N -> numero de niveles
#B -> numero de bits
#xq = floor(x*(2^B-1)/(max{x}-min{x}))
def uniform_quantizer(X, B):
# limiter
#paso del cuantizador
x = X
Q = np.float(np.max(x) - np.min(x))/np.float(2**B)
#idx = np.where(np.abs(x) >= 1)
#x[idx] = np.sign(x[idx])
# linear uniform quantization
Qa = []
for k in range(0,np.size(x)):
s = float(x[k]/Q)
Qa.append(s + 0.5)
xQ = (Q * (np.floor(Qa)))#*((2**B -1)/2) #para normalizar a numero de bits
print("se usaron %d bits" %B)
return xQ
# def bit_stream(xQ,B):
# #xQ-> senal cuantizada
# #B-> num de bits
# Q = np.float(np.max(xQ) - np.min(xQ))/np.float(2**B)
# for i in range(-2**(B-1),2**(B-1)):
# xQ = xQ*((2**B -1)/2)
# bits = []
# for i in range(0,np.size(xQ)):
# if (xQ[i]>0):
# else:
# return bits
def plot_signals(x, xQ,B):
#x -> senal original
#xQ -> senal cuantizada
#N -> num de muestras
e = xQ - x
plt.figure(figsize=(10,6))
plt.plot(x, label=r'original signal $x[k]$',color='blue')
plt.plot(e, label=r'quantization error $E_Q[k]$',color='orange')
plt.plot(xQ, label=r'quantized signal $x_Q[k]$',color='red')
plt.xlabel(r'$k$')
#plt.axis([0, np.size(x), -1.1*min(x), 1.1*max(x)])
plt.legend(loc='upper right')
plt.grid()
plt.show()
#if __name__ == "__main__":
# generate signal
#import numpy as np
#import matplotlib.pyplot as plt
n = 2000
A = 1.2
b = 4
t = np.linspace(0,6,200)
x = np.sin(2*np.pi*t) #+ np.random.normal(0,0.5,np.size(t))
# quantize signal
xQ = uniform_quantizer(x, b)
#print(xQ)
# plot signals
plot_signals(x, xQ,b) |
import redis
def get_from_db(key):
client = redis.StrictRedis()
flight_data = client.get(key)
return flight_data
def add_to_db(key, value):
client = redis.StrictRedis()
client.set(key, value)
|
#
# @lc app=leetcode.cn id=121 lang=python3
#
# [121] 买卖股票的最佳时机
#
# @lc code=start
class Solution:
def maxProfit(self, prices: List[int]) -> int:
# """
# DP table
# """
# n = len(prices)
# dp = [[None, None]] * n
# for i in range(n):
# if i == 0:
# dp[0][0] = 0
# dp[0][1] = -prices[0]
# continue
# dp[i][0] = max(dp[i-1][0], dp[i-1][1] + prices[i])
# dp[i][1] = max(dp[i-1][1], -prices[i])
# return dp[n-1][0]
"""
简化空间
"""
dp_i_0, dp_i_1 = 0, -prices[0]
for p in prices:
dp_i_0 = max(dp_i_0, dp_i_1 + p)
dp_i_1 = max(dp_i_1, -p)
return dp_i_0
# @lc code=end
|
class Node:
def init(self, val):
self.right = None
self.data = val
self.left = None
# your task is to complete this function
# function should print the level order traversal of the binary tree in spiral order
# Note: You aren't required to print a new line after every test case
def printSpiral(root):
if root:
Q = dict()
Q[0] = [root.data]
def bfs(i,root):
if root is None:
return
if root.left:
try:
Q[i].append(root.left.data)
except:
Q[i] = [root.left.data]
if root.right:
try:
Q[i].append(root.right.data)
except:
Q[i] = [root.right.data]
if root.left:
bfs(i+1,root.left)
if root.right:
bfs(i+1,root.right)
bfs(1,root)
for i in Q:
if i%2 == 0:
for j in Q[i][::-1]:
print(j,end= " ")
else:
for j in Q[i]:
print(j,end= " ")
return |
urunler = {
'Elma': {
'fiyat': 5,
'miktar': 3
},
'Armut': {
'fiyat': 7,
'miktar': 9
},
'Mandalina': {
'fiyat': 4,
'miktar': 6
},
'Kiraz': {
'fiyat': 8,
'miktar': 1
},
}
cebimdekiPara = 100
sepet = {}
def sepeteUrunEkle(urunAdi, miktar):
global cebimdekiPara
if (not (urunAdi in urunler)):
print("Ürün bulunamadı")
return
toplamFiyat = urunler[urunAdi]["fiyat"] * miktar
if (urunler[urunAdi]["miktar"] < miktar):
print("Yeterli ürün yok")
elif (cebimdekiPara < toplamFiyat):
print("Yeterli paranız yok")
else:
if (urunAdi in sepet):
sepet[urunAdi]["miktar"] += 1
else:
sepet[urunAdi] = urunler[urunAdi].copy()
sepet[urunAdi]['miktar'] = miktar
# urunler[urunAdi]['miktar'] = urunler[urunAdi]['miktar'] - miktar
urunler[urunAdi]['miktar'] -= miktar
cebimdekiPara = cebimdekiPara - toplamFiyat
def fisYazdir():
fisToplam = 0
for urunAdi in sepet:
urunFiyat = sepet[urunAdi]["fiyat"]
toplamFiyat = sepet[urunAdi]["fiyat"] * sepet[urunAdi]["miktar"]
print("Ürün: " + urunAdi + " Fiyat: " + str(urunFiyat) + " Adet: " +
str(sepet[urunAdi]["miktar"]) + " Toplam: " + str(toplamFiyat))
# fisToplam = fisToplam + toplamFiyat
fisToplam += toplamFiyat
print("Alışveriş Toplam: " + str(fisToplam))
fisYazdir()
while True:
urunAdi = input("Bir ürün adı giriniz:")
if (urunAdi == "Tamam"):
fisYazdir()
break
else:
sepeteUrunEkle(urunAdi, 1)
|
"""API v2 tests."""
from django.urls import reverse
from modoboa.lib.tests import ModoAPITestCase
class TransportViewSetTestCase(ModoAPITestCase):
def test_list(self):
url = reverse("v2:transport-list")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
backends = resp.json()
self.assertEqual(len(backends), 1)
self.assertEqual(backends[0]["name"], "relay")
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# We are given an array A of positive integers, and two positive integers L and R (L <= R).
# Return the number of (contiguous, non-empty) subarrays such that the value of the maximum array element
# in that subarray is at least L and at most R.
# Example :
# Input:
# A = [2, 1, 4, 3]
# L = 2
# R = 3
# Output: 3
# Explanation: There are three subarrays that meet the requirements: [2], [2, 1], [3].
# Note:
# L, R and A[i] will be an integer in the range [0, 10^9].
# The length of A will be in the range of [1, 50000].
# LeetCode Weekly Contest 74.
# 38 / 38 test cases passed.
# Status: Accepted
# Runtime: 48 ms
class Solution(object):
def numSubarrayBoundedMax(self, A, L, R):
"""
:type A: List[int]
:type L: int
:type R: int
:rtype: int
"""
def count_subarrays(n):
return n * (n + 1) // 2
res, inc, exc = 0, 0, 0
for num in A:
if num > R:
res += count_subarrays(inc) - count_subarrays(exc)
inc = exc = 0
elif num < L:
inc += 1
exc += 1
else:
inc += 1
res -= count_subarrays(exc)
exc = 0
res += count_subarrays(inc) - count_subarrays(exc)
return res
# LeetCode Weekly Contest 74.
# 38 / 38 test cases passed.
# Status: Accepted
# Runtime: 44 ms
class Solution(object):
def numSubarrayBoundedMax(self, A, L, R):
"""
:type A: List[int]
:type L: int
:type R: int
:rtype: int
"""
def count(bound):
res = 0
tmp = 0
for num in A:
if num <= bound:
tmp += 1
else:
tmp = 0
res += tmp
return res
return count(R) - count(L - 1)
if __name__ == '__main__':
print(Solution().numSubarrayBoundedMax([2, 1, 4, 3], 2, 3))
print(Solution().numSubarrayBoundedMax([2, 9, 2, 5, 6], 2, 8))
|
import time
from zimsoap import utils
from zimsoap import zobjects
from zimsoap.rest import AdminRESTClient
from zimsoap.exceptions import DomainHasNoPreAuthKey
from zimsoap.client import ZimbraAbstractClient
from . import methods
class ZimbraAdminClient(
ZimbraAbstractClient,
methods.accounts.MethodMixin,
methods.config.MethodMixin,
methods.domains.MethodMixin,
methods.lists.MethodMixin,
methods.mailboxes.MethodMixin,
methods.resources.MethodMixin):
""" Specialized Soap client to access zimbraAdmin webservice, handling auth.
API ref is
http://files.zimbra.com/docs/soap_api/<zimbra version>/api-reference/zimbraAdmin/service-summary.html # noqa
See mixins in methods directory for API requests implementations.
"""
NAMESPACE = 'urn:zimbraAdmin'
LOCATION = 'service/admin/soap'
REST_PREAUTH = AdminRESTClient
def __init__(self, server_host, server_port='7071',
*args, **kwargs):
super(ZimbraAdminClient, self).__init__(
server_host, server_port,
*args, **kwargs)
def _get_or_fetch_id(self, zobj, fetch_func):
""" Returns the ID of a Zobject wether it's already known or not
If zobj.id is not known (frequent if zobj is a selector), fetches first
the object and then returns its ID.
:type zobj: a zobject subclass
:type fetch_func: the function to fetch the zobj from server if its id
is undefined.
:returns: the object id
"""
try:
return zobj.id
except AttributeError:
try:
return fetch_func(zobj).id
except AttributeError:
raise ValueError('Unqualified Resource')
def mk_auth_token(self, account, admin=False, duration=0):
""" Builds an authentification token, using preauth mechanism.
See http://wiki.zimbra.com/wiki/Preauth
:param duration: in seconds defaults to 0, which means "use account
default"
:param account: an account object to be used as a selector
:returns: the auth string
"""
domain = account.get_domain()
try:
preauth_key = self.get_domain(domain)['zimbraPreAuthKey']
except KeyError:
raise DomainHasNoPreAuthKey(domain)
timestamp = int(time.time())*1000
expires = duration*1000
return utils.build_preauth_str(preauth_key, account.name, timestamp,
expires, admin)
def get_account_authToken(self, account=None, account_name=''):
""" Use the DelegateAuthRequest to provide a token and his lifetime
for the provided account.
If account is provided we use it,
else we retreive the account from the provided account_name.
"""
if account is None:
account = self.get_account(zobjects.admin.Account(
name=account_name))
selector = account.to_selector()
resp = self.request('DelegateAuth', {'account': selector})
authToken = resp['authToken']
lifetime = int(resp['lifetime'])
return authToken, lifetime
def delegated_login(self, *args, **kwargs):
raise NotImplementedError(
'zimbraAdmin does not support delegated auth')
def search_directory(self, **kwargs):
"""
SearchAccount is deprecated, using SearchDirectory
:param query: Query string - should be an LDAP-style filter
string (RFC 2254)
:param limit: The maximum number of accounts to return
(0 is default and means all)
:param offset: The starting offset (0, 25, etc)
:param domain: The domain name to limit the search to
:param applyCos: applyCos - Flag whether or not to apply the COS
policy to account. Specify 0 (false) if only requesting attrs that
aren't inherited from COS
:param applyConfig: whether or not to apply the global config attrs to
account. specify 0 (false) if only requesting attrs that aren't
inherited from global config
:param sortBy: Name of attribute to sort on. Default is the account
name.
:param types: Comma-separated list of types to return. Legal values
are: accounts|distributionlists|aliases|resources|domains|coses
(default is accounts)
:param sortAscending: Whether to sort in ascending order. Default is
1 (true)
:param countOnly: Whether response should be count only. Default is
0 (false)
:param attrs: Comma-seperated list of attrs to return ("displayName",
"zimbraId", "zimbraAccountStatus")
:return: dict of list of "account" "alias" "dl" "calresource" "domain"
"cos"
"""
search_response = self.request('SearchDirectory', kwargs)
result = {}
items = {
"account": zobjects.admin.Account.from_dict,
"domain": zobjects.admin.Domain.from_dict,
"dl": zobjects.admin.DistributionList.from_dict,
"cos": zobjects.admin.COS.from_dict,
"calresource": zobjects.admin.CalendarResource.from_dict
# "alias": TODO,
}
for obj_type, func in items.items():
if obj_type in search_response:
if isinstance(search_response[obj_type], list):
result[obj_type] = [
func(v) for v in search_response[obj_type]]
else:
result[obj_type] = func(search_response[obj_type])
return result
|
from unittest import TestCase, main
from tempfile import gettempdir
import svtools.lsort as lsort
class Test_lsort(TestCase):
def test_parser(self):
parser = lsort.command_parser()
args = parser.parse_args('file1 file2 file3'.split())
self.assertEqual(args.vcf_files, ['file1', 'file2', 'file3'])
self.assertEqual(args.batchsize, 200)
self.assertEqual(args.tempdir, gettempdir())
args2 = parser.parse_args('-b 2 -t temp file1 file2'.split())
self.assertEqual(args2.batchsize, 2)
self.assertEqual(args2.tempdir, 'temp')
self.assertEqual(args2.vcf_files, ['file1', 'file2'])
def test_lsort_init_defaults(self):
file_list = ['file1', 'file2']
lsort_class = lsort.Lsort(file_list)
self.assertEqual(lsort_class.vcf_file_names, file_list)
self.assertEqual(lsort_class.batchsize, 200)
self.assertEqual(lsort_class.tempdir, gettempdir())
def test_lsort_init_full(self):
file_list = ['file1', 'file2']
lsort_class = lsort.Lsort(file_list, tempdir='tempydir', batchsize=5 )
self.assertEqual(lsort_class.vcf_file_names, file_list)
self.assertEqual(lsort_class.batchsize, 5)
self.assertEqual(lsort_class.tempdir, 'tempydir')
if __name__ == "__main__":
main()
|
limite = int(input("Ingrese el limite fraccionario de pi"))
cont=0
pi=0
for l in range (1, limite+1, 2):
frac = 4/l
cont=cont+1
if cont%2==1:
pi=pi+frac
else:
pi=pi-frac
print(pi)
print(pi)
|
grocery_list = ["Fish", "tomato", 'Apples']
print("tomato" in grocery_list)
grocery_dict = {"fish":1, "tomato":6, 'Apples':3}
print("tomato" in grocery_dict.keys()) # También puede ser: ("tomato" in grocery_dict), pero sólo busca en la parte de 'keys'
|
#!/usr/bin/python
# Copyright (c) 2018 Thanos Poulos
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__version__ = '0.1'
__author__ = 'Thanos Poulos'
__license__ = 'MIT'
import os
'''
This script renames the current computation directory files to a prescribed one
It needs to be launched in the appropriate directory
'''
####### User Inputs #######
old_name = "DemoCase_1_SA_ExtWF_booster_uns" # To be modified
new_name = "DemoCase_1_SA_ExtWF_booster_uns_res" # To be modified
####### Main Program ######
#current_folder_path, current_folder_name = os.path.split(os.getcwd())
for filename in os.listdir(os.getcwd()):
if filename.startswith(old_name):
print("Renaming %s to %s ...") % (filename, new_name + filename[len(old_name):])
os.rename(filename, new_name + filename[len(old_name):])
print ("Done")
|
import sys
import numpy as np
log_file = sys.argv[1]
n_files = int(sys.argv[2])
fds = [0]*n_files
cn_file = 0
ts = {}
lustre_dir = "/global/cscratch1/sd/monarin/testxtc2/hsd/smalldata"
bb_dir = "/var/opt/cray/dws/mounts/batch/psana2_hsd_16718487_striped_scratch/hsd/smalldata"
with open(sys.argv[1], "r") as f:
for data in f:
if data.find('open("%s'%bb_dir) > 0:
fds[cn_file] = int(data.split()[5])
cn_file += 1
if cn_file == n_files: break
for fd in fds:
ts[fd] = []
for data in f:
for fd in fds:
if data.find('read(%d'%fd) > 0 and data.find('..., 52) = 52') > 0 :
ts[fd].append(data.split()[1])
elif data.find('read(%d'%fd) > 0 and data.find('..., 2104) = 2104') > 0:
ts[fd].append(data.split()[1])
elif data.find('read(%d, <unfinished ...>'%fd) > 0:
if len(ts[fd]) < 5:
ts[fd].append(data.split()[1])
flag_stop = False
for key, val in ts.iteritems():
if len(val) == 6:
flag_stop = True
if flag_stop:
break
for key, val in ts.iteritems():
print('%d %s'%(key, ' '.join(val)))
|
t = int(input())
l = list(map(int, input().split(' ')))
cont = 0
for c in range(len(l)):
if l[c] == t:
cont += 1
print(cont) |
from vcenter_connect import get_all_disknumbers,remove_disc
virtualmachine_name = raw_input("enter virtual machine name:")
all_disks_numbers = get_all_disknumbers(virtualmachine_name)
print "All disks number avaliable:"
print ",".join(map(str,all_disks_numbers))
selected_disk_number = int(raw_input("Selec a disk number:"))
if selected_disk_number in all_disks_numbers:
if remove_disc(virtualmachine_name,selected_disk_number):
print "Disc deleted"
else:
print "Something went wrong"
else:
print "Please select proper disc number"
|
# created by xibai
import tkinter as tk
window = tk.Tk()
window.title('JJ GUI')
window.geometry('500x300')
var = tk.StringVar()
var.set('你是猪头')
l = tk.Label(window,textvariable=var,bg='green',font=('Arial,12'),width=60,height=1)
l.pack()
on_hit = False
def hit_me():
global on_hit
if on_hit == False:
var.set('果然是猪头')
else:
var.set('')
b = tk.Button(window,text='不是',width=15,height=2,command=hit_me)
b.pack()
e = tk.Entry(window,show='*')
e.pack()
window.mainloop()
|
import json
import os
import sys
import pandas.io.sql as psql
import requests
crypto_tools_dir = os.getcwd().split('/scripts/')[0] + '/scripts/'
sys.path.append(crypto_tools_dir)
from crypto_tools import *
class PopulateCryptoCoinone(object):
"""
"""
def __init__(self):
"""
"""
self.port = 3306
self.host = "159.89.20.249"
self.database_name = 'crypto_test'
self.user = 'toby'
self.password = 'R1i9p1p1l9e0$'
self.database = DatabaseConnect(self.host, self.database_name, self.user, self.password, self.port)
self.database.database_connect()
self.get_coinone_exchange_id()
def get_coinone_exchange_id(self):
"""
"""
sql_str = """SELECT id FROM crypto_test.exchange
WHERE name = 'coinone' """
results = psql.read_sql(sql_str,con = self.database.mydb)
self.exchange_id = results['id'].loc[0]
def get_coinone_asset_pairs_lookup(self):
"""
"""
sql_str = """SELECT apl.name,apl.id AS asset_pairs_lookup_id
FROM crypto_test.asset_pairs_lookup apl
INNER JOIN crypto_test.exchange e ON e.id = apl.exchange_id
WHERE e.name = 'coinone'"""
results = psql.read_sql(sql_str,con = self.database.mydb)
asset_pairs_lookup_dict = {}
self.asset_pairs_list = results['name'].tolist()
self.asset_pairs_str = ','.join(self.asset_pairs_list)
print (self.asset_pairs_str)
for ind,row in results.T.iteritems():
name = row['name']
asset_pairs_lookup_dict[name] = row['asset_pairs_lookup_id']
self.asset_pairs_lookup_dict = asset_pairs_lookup_dict
def populate_coinone_data(self):
"""Please note that there is no server_time for coinone so default is order_time,
which is universal across trades
"""
self.get_coinone_asset_pairs_lookup()
for coinone_asset_pair in self.asset_pairs_list:
print (coinone_asset_pair)
url = """https://api.coinone.co.kr/orderbook?currency=%s"""%(coinone_asset_pair)
all_response = requests.get(url)
all_json = all_response.text
all_json_dict = json.loads(all_json)
timestamp = all_json_dict['timestamp']
#coinone timestamp, no order time of order
order_time = datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')
bids = all_json_dict['bid']
asks = all_json_dict['ask']
asset_pairs_lookup_id = self.asset_pairs_lookup_dict[coinone_asset_pair]
bid_ask_list = [[1,bids],[2,asks]]
for order_type in bid_ask_list:
order_type_id = order_type[0]
x = 0
for order in order_type[1]:
x = x + 1
price = order['price']
quantity = order['qty']
#need to remove trailing zeros before and after decimal
new_price = '{0:g}'.format(float(price))
new_quantity = '{0:g}'.format(float(quantity))
ut = datetime.now()
sql_str = """INSERT IGNORE INTO crypto_test.order_book(asset_pairs_lookup_id,order_type_id,price,quantity,order_time,server_time,ut)
VALUES(%s,%s,%s,%s,"%s","%s","%s")
"""%(asset_pairs_lookup_id,order_type_id,float(new_price),float(quantity),order_time,order_time,ut)
self.database.cursor.execute(sql_str)
try:
self.database.mydb.commit()
except:
self.database.mydb.rollback()
if x < 6:
ut = datetime.now()
ob_last_row_id = self.database.cursor.lastrowid
sql_str = """INSERT IGNORE INTO crypto_test.order_book_live(order_book_id,ut)
VALUES(%s,"%s")
"""%(ob_last_row_id,ut)
self.database.cursor.execute(sql_str)
try:
self.database.mydb.commit()
except:
self.database.mydb.rollback()
def main():
"""
"""
PC = PopulateCryptoCoinone()
PC.populate_coinone_data()
if __name__=="__main__":
main() |
import sys
import os
f = open("C:/Users/user/Documents/python/atcoder/ABC053/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
import math
x = int(input())
temp = x // 11
move = temp * 2
if x % 11 == 0:
pass
else:
move += 1
if x % 11 > 6:
move += 1
print(move)
|
import DivisibleBy
## This is the universal code used by both addition and subtraction
## It gets all necessary info to solve either problem
## NOTE: This is not a standalone program, just contains info to be called elsewhere
## Gets the coeffecients of the quadratic in the order of ax^2, bx, c
def GetNumbers():
numbers = [input('Enter a number: ') for i in range(3)]
return numbers
## Take out a common factor if possible
def GetSimplified(a,b,c):
testNums = []
refinedFactors = []
numbers = [a,b,c]
shouldTest = False
## Only use for 'numbers' is to find greatest test number
for x in range (int(max(numbers))):
if x > 1:
if a % x == 0:
if b % x == 0:
if c % x == 0:
## Eliminates the common factor if there is one
testNums.append(x)
shouldTest = True
## Factors out the GREATEST common coeffecient then return the factors and the coeffeicent
if shouldTest == True:
trueTest = max(testNums)
refinedFactors.append(a/trueTest)
refinedFactors.append(b/trueTest)
refinedFactors.append(c/trueTest)
refinedFactors.append(max(testNums))
return refinedFactors
## Returns the factors of any given number
## This would be redundant if I imported DivisibleBy into FactorQuadraticsAddition
## This is to keep from having to import DivisibleBy into FactorQuadraticsAddition
def GetFactors(givenNumber):
factors = list( DivisibleBy.Main(givenNumber) )
return factors
## Returns the 'front factors' or the lesser of two numbers in a multiplication equation
## Ex. 3*8=24: 'front factor' = 3
def GetFirstSet(factors, half):
firstSet = factors[:half]
return firstSet
## Returns the 'latter factors' or the greater of two numbers in a multiplication equation
## Ex. 8*3=24: 'latter factor' = 8
def GetSecondSet(factors,firstSet, half):
secondSet = []
for i in factors:
if i not in firstSet:
secondSet.append(i)
## the list is reversed so the the factors of the first and second set line up
## Ex. Factors of 24 = 1,2,3,4 || 6,8,12,24
## 4*6 = 24: firstSet[3]=4 and secondSet[3]=6
secondSet.reverse()
return secondSet
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 28 21:14:41 2018
@author: PPAGACZ
"""
from packets import *
class ForecastPipe(IPipe):
def runFilter(self):
if(ForecastPipe.checkConditions(self.data)):
ForecastFilter.process(self)
def checkConditions(data):
return True |
from django.utils import timezone
from borg_utils.singleton import SingletonMetaclass,Singleton
class PublishStatusMetaclass(SingletonMetaclass):
"""
A metaclass for Publish Status class
"""
_classes = []
def __init__(cls,name,base,dct):
"""
cache all publish status classes.
"""
super(PublishStatusMetaclass,cls).__init__(name,base,dct)
if name != 'PublishStatus':
PublishStatusMetaclass._classes.append(cls)
@property
def all_classes(self):
return PublishStatusMetaclass._classes
class PublishStatus(Singleton):
"""
super class for publish status
"""
__metaclass__ = PublishStatusMetaclass
_all_status = None
_status_dict = None
_all_options = None
_publish_enabled = False
@staticmethod
def _initialize():
if not PublishStatus._all_status:
PublishStatus._all_status = [c() for c in PublishStatus.all_classes]
PublishStatus._status_dict = dict([(o.name.lower(),o) for o in PublishStatus._all_status])
PublishStatus._all_options = tuple([(o.name,o.name) for o in PublishStatus._all_status])
@staticmethod
def all_status():
"""
return all possible publish status
"""
PublishStatus._initialize()
return PublishStatus._all_status
@staticmethod
def get_status(status):
"""
if status is correct, return the publish status instance.
otherwise throw exception
"""
if isinstance(status, PublishStatus):
#status is a PublishStatus instance, return directly
return status
PublishStatus._initialize()
try:
return PublishStatus._status_dict[status.lower()]
except:
raise ValueError("The publish status {0} is not recognized.".format(status))
@property
def publish_enabled(self):
return self._publish_enabled
@property
def name(self):
return self._name
@staticmethod
def all_options():
PublishStatus._initialize()
return PublishStatus._all_options
def __str__(self):
return self._name
class EnabledStatus(PublishStatus):
_name = "Enabled"
_publish_enabled = True
class DisabledStatus(PublishStatus):
_name = "Disabled"
_publish_enabled = False
|
import bpy
import math
from utils import printf, start, end
def insert():
C = bpy.context
cl = C.scene.cursor_location
start()
x = math.trunc(cl[0])
y = math.trunc(cl[1])
printf("x")
printf(x)
printf(y)
end()
# insert()
|
''' Retrieve publications from Scopus APIs and add them to the database.
'''
import os
import sys
import requests
import time
import xml.etree.ElementTree as et
import re
import json
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import sessionmaker
from collections import defaultdict
#engine = create_engine('postgresql+psycopg2://likit@localhost/research_dev')
engine = create_engine('postgresql+psycopg2://likit@localhost/research_dev')
Base = automap_base()
Base.prepare(engine, reflect=True)
Session = sessionmaker(bind=engine)
session = Session()
#print(Base.classes.keys())
Abstracts = Base.classes.scopus_abstracts
Authors = Base.classes.scopus_authors
Affiliations = Base.classes.scopus_affiliations
API_KEY = '871232b0f825c9b5f38f8833dc0d8691'
ITEM_PER_PAGE = 25
SLEEPTIME = 5
def add_author(authors, abstract):
url = 'http://api.elsevier.com/content/search/author'
if not authors:
return None
for au in authors:
params = {'apiKey': API_KEY,
'query': 'auid({})'.format(au['authid']),
'httpAccept': 'application/json'}
author = requests.get(url, params=params).json()
author = author['search-results']['entry'][0]
cur_affil = author.get('affiliation-current', {})
preferred_name=author['preferred-name']['surname']+ ' ' +\
author['preferred-name']['given-name']
new_author = Authors(initials=author['preferred-name'].get('initials', ''),
surname=author['preferred-name'].get('surname', ''),
given_name=author['preferred-name'].get('given-name', ''),
preferred_name=preferred_name,
url=author.get('prism:url', ''))
# get an affiliation of the author
new_affil = Affiliations(name=cur_affil.get('affiliation-name', ''),
city=cur_affil.get('affiliation-city', ''),
country=cur_affil.get('affiliation-country', ''),
scopus_affil_id=cur_affil.get('affiliation-id', ''))
# search for the affiliation in the db
existing_affil = session.query(Affiliations).filter_by(
scopus_affil_id=new_affil.scopus_affil_id).first()
if existing_affil:
# if the affiliation exists, get its ID
affil_id = existing_affil.id
else:
# if the affiliation not exists, insert it to the db
session.add(new_affil)
session.commit()
affil_id = new_affil.id
author = session.query(Authors).filter_by(
given_name=new_author.given_name,
surname=new_author.surname).first()
if not author:
new_author.affil_id = affil_id # assign affiliation ID to the author
new_author.scopus_abstracts_collection.append(abstract)
abstract.scopus_authors_collection.append(new_author)
print('new author, {}, added.'.format(
new_author.preferred_name.encode('utf8')))
session.add(new_author)
else:
if affil_id != author.affil_id:
author.affil_id = affil_id # update an affiliation
author.scopus_abstracts_collection.append(abstract)
abstract.scopus_authors_collection.append(author)
print('new article added to {}'.format(
author.preferred_name.encode('utf8')))
session.add(author)
session.commit()
def update(year):
query = 'AFFILORG("faculty of medical technology" "mahidol university")' \
'AND PUBYEAR IS %s' % year
params = {'apiKey': API_KEY, 'query': query, 'httpAccept': 'application/json'}
apikey = {'apiKey' : API_KEY}
url = 'http://api.elsevier.com/content/search/scopus'
r = requests.get(url, params=params).json()
total_results = int(r['search-results']['opensearch:totalResults'])
page = 0
article_no = 0
print('Total articles %d' % total_results)
for start in range(0, total_results+1, ITEM_PER_PAGE):
page += 1
print >> sys.stderr, \
'Waiting %d sec to download from page %d... (%d articles/page)' \
% (SLEEPTIME, page, ITEM_PER_PAGE)
time.sleep(SLEEPTIME)
params = {'apiKey': API_KEY,
'query': query,
'start': start,
'httpAccept': 'application/json',
'view': 'COMPLETE',
'count': ITEM_PER_PAGE}
articles = requests.get(url, params=params).json()['search-results']['entry']
for n, entry in enumerate(articles, start=1):
article_no += 1
print >> sys.stderr, '%d) %s..%s' \
% (article_no, entry['dc:title'][:80], entry['dc:creator'][:30])
new_abstract = Abstracts(url=entry.get('prism:url', ''),
title=entry.get('dc:title', ''),
identifier=entry.get('dc:identifier', ''),
pii=entry.get('pii', ''),
doi=entry.get('prism:doi', ''),
eid=entry.get('eid', ''),
publication_name=entry.get('prism:publicationName', ''),
citedby_count=entry.get('citedby-count', ''),
cover_date=entry.get('prism:coverDate', ''),
description=entry.get('dc:description', '')
)
existing_abstract = session.query(Abstracts).filter_by(
doi=entry.get('prism:doi')).first()
if existing_abstract:
print('Article already in the database. Updating number of citations..')
existing_abstract.citedby_count = \
entry.get('citedby_count', existing_abstract.citedby_count)
else:
print('New article loaded.')
session.add(new_abstract)
session.flush()
add_author(entry.get('author'), new_abstract)
session.commit()
if __name__=='__main__':
year = int(sys.argv[1])
entry = update(year) |
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import logout
from .forms import AdminKelolaRegistrationForm
from perwakilan_penghuni.forms import PerwakilanPenghuniForm
from account.forms import RegisterForm, LoginForm
from django.db import transaction
from perwakilan_penghuni.models import PerwakilanPenghuni
from serah_terima.models import Dokumen
from laporan.models import BerkasLaporan
from serah_terima.forms import DokumenForm
from laporan.forms import BerkasLaporanForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from account.decorators import admin_kelola_required, anonymous_required
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def index(request):
page = request.GET.get('page', 1)
dokumen_list = Dokumen.objects.select_related()
paginator = Paginator(dokumen_list, 10)
try:
dokumens = paginator.page(page)
except PageNotAnInteger:
dokumens = paginator.page(1)
except EmptyPage:
dokumens = paginator.page(paginator.num_pages)
semua_laporan = BerkasLaporan.objects.all()
paginator_laporan = Paginator(semua_laporan, 10)
try:
laporans = paginator_laporan.page(page)
except PageNotAnInteger:
laporans = paginator_laporan.page(1)
except EmptyPage:
laporans = paginator_laporan.page(paginator_laporan.num_pages)
semua_perwakilan_penghuni = PerwakilanPenghuni.objects.all()
paginator_penghuni = Paginator(semua_perwakilan_penghuni, 10)
try:
perwakilan_penghunis = paginator_penghuni.page(page)
except PageNotAnInteger:
perwakilan_penghunis = paginator_penghuni.page(1)
except EmptyPage:
perwakilan_penghunis = paginator_penghuni.page(paginator_penghuni.num_pages)
return render(request, "admin_kelola/index.html", {
'dokumens' : dokumens,
'laporans': laporans,
'perwakilan_penghunis': perwakilan_penghunis
})
@transaction.atomic
def register_admin_kelola(request):
account = RegisterForm(request.POST or None, prefix='account')
admin_kelola = AdminKelolaRegistrationForm(request.POST or None, prefix='admin_kelola')
context = {
"admin_kelola_form" : admin_kelola,
"account_form" : account
}
if account.is_valid() and admin_kelola.is_valid():
user = account.save(commit=False)
user.user_type = 1
user.save()
admin_kelola_data = admin_kelola.save(commit=False)
admin_kelola_data.user = user
admin_kelola_data.save()
return redirect('admin_kelola:login')
return render(request, 'admin_kelola/auth/register.html', context)
@anonymous_required
def login_admin_kelola(request):
account = LoginForm(data=request.POST or None, request=request, prefix='admin')
context = {
"form" : account
}
if account.is_valid():
return redirect('admin_kelola:index')
return render(request, 'admin_kelola/auth/login.html', context)
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def logout_admin_kelola(request):
logout(request)
return redirect('home')
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def perwakilan_penghuni_tambah(request):
account = RegisterForm(request.POST or None, prefix='account')
perwakilan_penghuni = PerwakilanPenghuniForm(request.POST or None, prefix='perwakilan_penghuni')
context = {
"perwakilan_penghuni_form" : perwakilan_penghuni,
"account_form" : account
}
if account.is_valid() and perwakilan_penghuni.is_valid():
user = account.save(commit=False)
user.user_type = 2
user.save()
perwakilan_penghuni_data = perwakilan_penghuni.save(commit=False)
perwakilan_penghuni_data.user = user
perwakilan_penghuni_data.save()
return redirect('admin_kelola:index')
return render(request, 'admin_kelola/perwakilan_penghuni/tambah.html', context)
# SERAH TERIMA
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def serah_terima_tambah(request):
if request.method == 'POST':
form = DokumenForm(request.POST, request.FILES)
if form.is_valid():
admin = AdminKelola.objects.get(pk=request.user.id)
form_serah_terima = form.save(commit=False)
form_serah_terima.admin_kelola = admin
form_serah_terima.save()
nama_psu = form.cleaned_data.get('nama_psu')
messages.success(request, f'Dokumen berhasil ditambahkan.', extra_tags='serah_terima')
return redirect('admin_kelola:index')
else:
form = DokumenForm()
return render(request, "admin_kelola/serah_terima/tambah.html", {
'form' : form
})
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def serah_terima_tampil(request, id):
dokumen = Dokumen.objects.get(id=id)
return render(request, "admin_kelola/serah_terima/tampil.html", {
'dokumen' : dokumen
})
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def serah_terima_ubah(request, id):
try:
dokumen = Dokumen.objects.get(id = id)
except Dokumen.DoesNotExist:
return redirect('admin_kelola:index')
form = DokumenForm(request.POST or None, request.FILES or None, instance = dokumen)
if form.is_valid():
admin = AdminKelola.objects.get(pk=request.user.id)
form_serah_terima = form.save(commit=False)
form_serah_terima.admin_kelola = admin
form_serah_terima.save()
messages.success(request, f'Dokumen berhasil diperbarui.', extra_tags='serah_terima')
return redirect('admin_kelola:index')
print(form.errors)
return render(request, "admin_kelola/serah_terima/ubah.html", {
'form' : form, 'dokumen' : dokumen
})
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def serah_terima_hapus(request, id):
try:
dokumen = Dokumen.objects.get(id=id)
except Dokumen.DoesNotExist:
return redirect('admin_kelola:index')
dokumen.delete()
messages.success(request, f'Dokumen berhasil dihapus.', extra_tags='serah_terima')
return redirect('admin_kelola:index')
# LAPORAN
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def laporan_tambah(request):
if request.method == 'POST':
form = BerkasLaporanForm(request.POST, request.FILES)
if form.is_valid():
form_laporan = form.save(commit=False)
form_laporan.status_laporan = 'BELUM'
form_laporan.user_created = request.user
form_laporan.save()
nama_psu_laporan = form.cleaned_data.get('nama_psu_laporan')
messages.success(request, f'Laporan {nama_psu_laporan} berhasil ditambahkan.', extra_tags='laporan')
return redirect('admin_kelola:index')
else:
form = BerkasLaporanForm()
return render(request, "admin_kelola/laporan/tambah.html", {
'form': form
})
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def laporan_tampil(request, id):
laporan = BerkasLaporan.objects.get(id=id)
return render(request, "admin_kelola/laporan/tampil.html", {
'laporan': laporan
})
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def laporan_ubah(request, id):
try:
laporan = BerkasLaporan.objects.get(id=id)
except BerkasLaporan.DoesNotExist:
return redirect('admin_kelola:index')
form = BerkasLaporanForm(request.POST or None, request.FILES or None, instance = laporan)
if form.is_valid():
form.save()
nama_psu = form.cleaned_data.get('nama_psu_laporan')
messages.success(request, f'Laporan {nama_psu} berhasil diperbarui.', extra_tags='laporan')
return redirect('admin_kelola:index')
return render(request, 'admin_kelola/laporan/ubah.html', {
'form': form
})
@login_required(login_url='/admin_kelola/login')
@admin_kelola_required
def laporan_hapus(request, id):
try:
laporan = BerkasLaporan.objects.get(id=id)
except BerkasLaporan.DoesNotExist:
return redirect('admin_kelola:index')
laporan.delete()
messages.success(request, f'Laporan {laporan.nama_psu_laporan} berhasil dihapus.', extra_tags='laporan')
return redirect('admin_kelola:index') |
line = 'asdf fjdk; afed, fjek,asdf, foo'
import re
print re.split(r'[;,\s]\s*', line)
fields = re.split(r'(;|,|\s)\s*', line)
print fields
values = fields[::2]
print values
delimiters = fields[1::2] + ['']
print delimiters
print ''.join(v+d for v,d in zip(values, delimiters))
print re.split(r'(?:,|;|\s)\s*', line)
|
from pyalgotrade.broker.backtesting import TradePercentage
from pyalgotrade.broker.fillstrategy import DefaultStrategy
from pyalgotrade import strategy
from pyalgotrade.technical import ma
from pyalgotrade.technical import cross
class mystrategy(strategy.BacktestingStrategy):
def __init__(self, feed, instrument):
super(mystrategy, self).__init__(feed)
self.__instrument = instrument
# build ma
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__prices = feed[instrument].getPriceDataSeries()
self.__ma1len = 10
self.__ma2len = 20
self.__ma1 = ma.SMA(self.__prices, self.__ma1len)
self.__ma2 = ma.SMA(self.__prices, self.__ma2len)
def onBars(self, bars):
# filter
if self.__ma2[-1] is None:
return
# close position
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_above(self.__ma1,self.__ma2) > 0:
self.__position.exitMarket()
# open position
if self.__position is None:
if cross.cross_above(self.__ma1,self.__ma2) > 0:
shares = int(self.getBroker().getEquity() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument,shares)
print(bars[self.__instrument].getDateTime(), bars[self.__instrument].getPrice())
def onEnterOk(self, position):
pass
def onEnterCanceled(self, position):
self.__position = None
def onExitOk(self, position):
self.__position = None
def onExitCanceled(self, position):
self.__position.exitMarket() |
import re
import logging
logger = logging.getLogger(__name__)
class ParserException(BaseException):
pass
class IrcMessage:
prefix = None
command = None
params = []
def __str__(self):
ret = ""
if self.prefix is not None:
ret += ":%s " % self.prefix
ret += "%s " % self.command
if self.params:
for arg in self.params[:-1]:
ret += "%s " % arg
ret += ":%s" % self.params[-1]
ret += "\n"
return ret
def __init__(self, msg):
p = re.compile(r"""(?::(
([^@!\ ]*)
(?:
(?:
!([^@]*)
)?
@([^\ ]*)
)?
)\ )?
([^\ ]+)
(
(?:
\ [^:\ ][^\ ]*
){0,14}
)
(?:\ :?(.*))?
$""", re.VERBOSE)
res = re.match(p, msg)
if res is None:
raise ParserException()
self.prefix = res.group(1)
self.command = res.group(5)
self.params = res.group(6).split()
if res.group(7) is not None:
self.params.append(res.group(7))
|
from __future__ import division
import numpy as np
import numpy.random as npr
from svae.util import add, scale, rand_dir_like, contract
from svae.svae import make_gradfun
EPS, RTOL, ATOL = 1e-4, 1e-4, 1e-6
def grad_check(fun, gradfun, arg, eps=EPS, rtol=RTOL, atol=ATOL, rng=None):
def scalar_nd(f, x, eps):
return (f(x + eps/2) - f(x - eps/2)) / eps
random_dir = rand_dir_like(arg)
scalar_fun = lambda x: fun(add(arg, scale(x, random_dir)))
numeric_grad = scalar_nd(scalar_fun, 0.0, eps=eps)
numeric_grad2 = scalar_nd(scalar_fun, 0.0, eps=eps)
analytic_grad = contract(gradfun(arg), random_dir)
assert np.isclose(numeric_grad, numeric_grad2, rtol=rtol, atol=atol)
assert np.isclose(numeric_grad, analytic_grad, rtol=rtol, atol=atol)
def make_wrappers(seed, eta, y, value_and_grad_fun):
def fun(params):
phi, psi = params
npr.seed(seed) # seed to fix random function being evaluated
return value_and_grad_fun(y, 1, 1, eta, phi, psi)[0]
def gradfun(params):
phi, psi = params
npr.seed(seed) # seed to fix random function being evaluated
return value_and_grad_fun(y, 1, 1, eta, phi, psi)[1][-2:]
return fun, gradfun
# NOTE: I commented these out because they test with full-matrix node
# potentials, which the python code supports but the cython code doesn't.
# Refactoring the lds_svae and slds_svae code to have both python and cython
# versions looked like a pain, so I'm just leaving them unested. These tests
# really only check that autograd (plus any manual gradients) is working anyway,
# and the other tests cover that well.
# def test_lds_svae():
# from svae.models.lds_svae import run_inference, make_prior_natparam, \
# generate_test_model_and_data, linear_recognition_params_from_lds
# from svae.recognition_models import linear_recognize as recognize
# from svae.forward_models import linear_loglike as loglike
# def make_experiment(seed):
# npr.seed(seed) # seed for random model/data generation
# n, p, T = npr.randint(1,5), npr.randint(1,5), npr.randint(10, 20)
# # set up a model, data, and recognition network
# prior_natparam = make_prior_natparam(n)
# lds, data = generate_test_model_and_data(n, p, T)
# phi = psi = linear_recognition_params_from_lds(lds)
# # variational eta at which gradient is evaluated
# eta = make_prior_natparam(n, random=True)
# # value-and-grad function that gets wrapped for testing
# value_and_grad_fun = make_gradfun(run_inference, recognize, loglike, prior_natparam)
# # create wrappers
# fun, gradfun = make_wrappers(seed, eta, data, value_and_grad_fun)
# return fun, gradfun, phi, psi
# for i in xrange(10):
# fun, gradfun, phi, psi = make_experiment(i)
# yield grad_check, fun, gradfun, (phi, psi)
# def test_slds_svae():
# from svae.models.slds_svae import run_inference, make_slds_global_natparam
# from svae.recognition_models import linear_recognize as recognize
# from svae.forward_models import linear_loglike as loglike
# from svae.models.lds_svae import linear_recognition_params_from_lds, \
# generate_test_model_and_data
# def make_experiment(seed):
# npr.seed(seed) # seed for random model/data generation
# k, n, p, T = npr.randint(1,3), npr.randint(1,3), npr.randint(1,3), npr.randint(5, 10)
# # set up a model, data, and recognition network
# prior_natparam = make_slds_global_natparam(k, n)
# lds, data = generate_test_model_and_data(n, p, T)
# phi = psi = linear_recognition_params_from_lds(lds)
# # variational eta at which gradient is evaluated
# eta = make_slds_global_natparam(k, n, random=False) # TODO make this random
# # value-and-grad function that gets wrapped for testing
# value_and_grad_fun = make_gradfun(run_inference, recognize, loglike, prior_natparam)
# # create wrappers
# fun, gradfun = make_wrappers(seed, eta, data, value_and_grad_fun)
# return fun, gradfun, phi, psi
# for i in xrange(10):
# fun, gradfun, phi, psi = make_experiment(i)
# yield grad_check, fun, gradfun, (phi, psi)
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that bundles that have no 'sources' (pure resource containers) work.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='sourceless-module')
# Just needs to build without errors.
test.build('test.gyp', 'empty_bundle', chdir='sourceless-module')
test.built_file_must_not_exist(
'empty_bundle.bundle', chdir='sourceless-module')
# Needs to build, and contain a resource.
test.build('test.gyp', 'resource_bundle', chdir='sourceless-module')
test.built_file_must_exist(
'resource_bundle.bundle/Contents/Resources/foo.manifest',
chdir='sourceless-module')
test.built_file_must_not_exist(
'resource_bundle.bundle/Contents/MacOS/resource_bundle',
chdir='sourceless-module')
# Build an app containing an actionless bundle.
test.build(
'test.gyp',
'bundle_dependent_on_resource_bundle_no_actions',
chdir='sourceless-module')
test.built_file_must_exist(
'bundle_dependent_on_resource_bundle_no_actions.app/Contents/Resources/'
'mac_resource_bundle_no_actions.bundle/Contents/Resources/empty.txt',
chdir='sourceless-module')
# Needs to build and cause the bundle to be built.
test.build(
'test.gyp', 'dependent_on_resource_bundle', chdir='sourceless-module')
test.built_file_must_exist(
'resource_bundle.bundle/Contents/Resources/foo.manifest',
chdir='sourceless-module')
test.built_file_must_not_exist(
'resource_bundle.bundle/Contents/MacOS/resource_bundle',
chdir='sourceless-module')
# TODO(thakis): shared_libraries that have no sources but depend on static
# libraries currently only work with the ninja generator. This is used by
# chrome/mac's components build.
if test.format == 'ninja':
# Check that an executable depending on a resource framework links fine too.
test.build(
'test.gyp', 'dependent_on_resource_framework', chdir='sourceless-module')
test.built_file_must_exist(
'resource_framework.framework/Resources/foo.manifest',
chdir='sourceless-module')
test.built_file_must_exist(
'resource_framework.framework/resource_framework',
chdir='sourceless-module')
test.pass_test()
|
from django.core.management import BaseCommand
import time
from random import randint
from channels import Group
from threading import Thread, Lock, active_count
import socket
from django.http import HttpResponse
from django.shortcuts import render,redirect,HttpResponseRedirect,HttpResponse
#from django.contrib.sessions.backends.db import SessionStore
#The class must be named Command, and subclass BaseCommand
class Command(BaseCommand):
# Show this when the user types help
print("Simulates data values from a beddit sensor.")
#source = str(request.session['session_id'])
# A command must define handle()
def handle(self, *args, **options):
def message_handler(message_string, identifier):
try:
message_type = message_string[0]
message_string = message_string[1:]
if message_type == '0':
##message_string == graph value
print("message received", message_string)
Group(identifier).send({'text': message_string})
#OBS message can be the empty string ""
elif message_type == '1':
##message_string == id
identifier = message_string
#for testing:
print("identifier set to", message_string)
#OBS identifier can be set to the empty string ""
else:
print("unknown message_type")
except:
print("bad message!")
print(identifier)
return identifier
def thread_client_socket(conn, addr):
identifier = '0'
mutex_print.acquire()
try:
print(str(addr)+' connected')
finally:
mutex_print.release()
while True:
try:
while True:
data = conn.recv(16)
mutex_print.acquire()
try:
##print(str(addr)+':'+str(data.decode()))
#message_handler(str(data.decode()), identifier)
#print(str(data.decode()))
#print("HEEEEEEJ")
identifier = message_handler(str(data.decode()), identifier)
#print (identifier)
finally:
mutex_print.release()
if not data:
break
finally:
conn.close()
break
mutex_print.acquire()
try:
print(str(addr)+' disconnected')
finally:
mutex_print.release()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('', 10000)
sock.bind(server_address)
sock.listen(5)
mutex_print = Lock()
while True:
connection, client_address = sock.accept()#stalling
thread = Thread(target = thread_client_socket, args = (connection, client_address))
thread.start()
mutex_print.acquire()
try:
print('number of threads: '+str(active_count()))
finally:
mutex_print.release()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# 下面理清楚一些数学概念:
# 因数:一个数,如果存在可以被它整除的数,则这些数都是该数的因数。
# 规定0没有因数,1的因数是1,其他的比如4的因数有“1”、“2”、“4
# 因子:一个数,如果存在可以被它整除的数且这些数不包括它本身,则这些书都是该数的因子。
# 规定0没有因子,1的因子是1,其他的比如4的因子有“1”、“2”
# 质因子:一个数,如果可以分解成n个质数相乘,则n个质数成为该数的质因子。
# 规定0和1没有质因子,质数的质因子为其本身
# 完数:一个数的因子之和等于它本身,则该数为完数。
# 1. Given a non-negative number n, return all factors of it.
class Solution(object):
def allFactor(self, n):
"""
:type n: int
:rtype: int
"""
ans = [1]
if n == 1 or n == 2:
return [n]
for i in range(2, int(n ** 0.5)):
if n % i == 0:
ans += [i, n // i]
return ans
# 2. Given a non-negative number n, return all common factors both of A and B.
# Solution:
# Using the Euclidean algorithm to get maximum common factor, then get all factor of it.
class Solution(object):
def allCommonFactor(self, A, B):
def gcd(a, b): # Euclidean algorithm
if b == 0:
return a
return gcd(b, a % b)
maximum_common_factor = gcd(A, B)
ans = {1}
for i in range(2, maximum_common_factor + 1):
if maximum_common_factor % i == 0:
ans.add(i)
ans.add(maximum_common_factor // i)
return ans - {A} - {B}
if __name__ == '__main__':
# print(Solution().allFactor(100))
print(Solution().allCommonFactor(20, 8))
print(Solution().allCommonFactor(30, 15))
|
import discord
from discord.ext import commands
class Ping(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def ping(self, ctx):
ping = discord.Embed(
color = 0xffff00,
description = f'⏳ | {round(self.client.latency * 1000)}ms'
)
await ctx.send(embed = ping)
def setup(client):
client.add_cog(Ping(client)) |
import requests
import json
def received_message(event, token):
sender_id = event['sender']['id']
recipient_id = event['recipient']['id']
time_message = event['timestamp']
message = event['message']
text = message['text']
typing = typing_message(sender_id)
call_send_API(typing, token)
user = call_user_API(sender_id, token)
print(list(user.keys()))
first_name = user['first_name']
message = 'Hola {} , ¿Cómo estas?'.format( first_name )
message = text_message(sender_id, message )
call_send_API(message, token)
def typing_message(recipient_id):
message_data = {
'recipient' : { 'id' : recipient_id},
'sender_action' : 'typing_on'
}
return message_data
def text_message(recipient_id, message_text):
message_data = {
'recipient': {'id': recipient_id},
'message': {'text': message_text}
}
return message_data
def call_send_API(data, token):
res = requests.post('https://graph.facebook.com/v2.6/me/messages',
params = {'access_token': token},
data = json.dumps(data),
headers={'Content-type': 'application/json'}
)
if res.status_code == 200:
print("El mensaje fue enviado exitoso")
def call_user_API(user_id, token):
res = requests.get('https://graph.facebook.com/v2.6/' + user_id,
params = {'access_token' : token})
data = json.loads(res.text)
return data
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import click
from wadebug import results
from wadebug.config import Config
table_left_alignment = 63
table_right_aligment = 17
color_map = {
results.OK: "green",
results.Warning: "yellow",
results.Problem: "red",
results.Skipped: "blue",
results.WADebugError: "red",
}
indicator_icon_map = {
results.OK: "✓",
results.Warning: "!",
results.Problem: "✗",
results.Skipped: "-",
results.WADebugError: "✗",
}
def print_program_header():
click.secho("WADebug summary: ", bold=True, nl=False)
def print_dev_mode_header():
click.secho("(DEV mode enabled)")
def print_result_header(result):
result_type = result.__class__
indicator_icon = get_result_indicator_icon(result_type)
indicator_color = get_result_color(result_type)
click.secho("[{}] ".format(indicator_icon), fg=indicator_color, nl=False)
click.secho(
"{} - {}".format(
result.action.user_facing_name, result.action.short_description
)
)
def get_result_color(result_type):
color = color_map.get(result_type)
if not color:
color = "white"
return color
def get_result_indicator_icon(result_type):
indicator_icon = indicator_icon_map.get(result_type)
if not indicator_icon:
indicator_icon = " "
return indicator_icon
def print_result_details(result):
for field in [result.message, result.details, result.remediation]:
click.echo(add_indentation_to_result_field(field))
if Config().development_mode and hasattr(result, "traceback"):
click.echo(add_indentation_to_result_field(result.traceback))
def add_indentation_to_result_field(str):
return "\n".join([" " + line for line in str.split("\n")])
def print_invalid_config_message(config_file_path, ex):
"""Message to print when invalid yaml file is provided as config."""
click.secho(
"\nConfig file at {config_file_path} is invalid.".format(
config_file_path=os.path.join(os.getcwd(), config_file_path)
),
fg="red",
)
click.secho(
"Make sure file contains valid yaml or rename it and wadebug will "
"create a new empty config file. Execute command below to make a backup:\n"
)
click.secho(
"\tmv {config_file_path} {config_file_path}.bak\n".format(
config_file_path=config_file_path
),
bold=True,
)
click.secho("Exception found:", bold=True)
click.secho("{parsing_ex}\n".format(parsing_ex=ex))
|
# Generated by Django 2.1.4 on 2018-12-19 11:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='contacts',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='发送时间'),
),
]
|
# Alarms are a special sort of signal, where your program asks the OS to notify it
# after some period of time has elapsed.
import signal
import time
def received_alarm(signum, stack):
print 'Alarm:', time.ctime()
# Call received_alarm in 2 seconds.
signal.signal(signal.SIGALRM, received_alarm)
signal.alarm(2)
print 'Before:', time.ctime()
time.sleep(4)
print 'After:', time.ctime()
# The call to sleep() does not last the full 4 seconds.
"""
Before: Sun Jan 18 13:57:25 2015
Alarm: Sun Jan 18 13:57:27 2015
After: Sun Jan 18 13:57:27 2015
"""
|
#encoding=utf-8
import cv2
import numpy as ny #别名
img = cv2.imread("./images/bear.jpg")
w = img.shape[0]
h = img.shape[1]
#平移图像
#创建一个变换矩阵
#平移:x轴正方向(1,0) 100, y轴正方向(0,1)50
M = ny.float32([[1, 0, 100], [0, 1, 50]])
dst = cv2.warpAffine(img, M, (w, h))#平移图像
cv2.imshow("Hello", dst)
cv2.imshow("HelloCV", img)
#旋转图像
#创建旋转矩阵,参数为旋转中心,旋转角度,缩放比例
N = cv2.getRotationMatrix2D((0.5*w, 0.5*h), 45, 0.75)
dst1 = cv2.warpAffine(img, N, (w, h))
cv2.imshow("HelloCV1", dst1)
cv2.waitKey(0)
|
def format_solution(d, p, s):
return f"Day {d:1} (part {p}):\t{s}"
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
from sklearn.metrics import r2_score
y = np.array([2.632214144151, 3.272355, 2.7607268, 3.4447693, 3.6799583, 3.5055826, 2.9330038, 2.8568107, 3.3276575, 2.8926306, 2.7319663, 3.5326295, 3.160792, 2.8481617, 3.2420505, 3.0637345, 3.0162536, 3.3754508, 3.489457, 2.8902778, 3.2010884])
x = np.array(list(range(len(y))))
def test_func(x, a, b, c, d):
return (a * np.sin(b * (x + c))) + d
params, params_covariance = optimize.curve_fit(test_func, x, y,
p0=[2, 2, 2, 2])
print("R^2: {}".format(r2_score(y, test_func(x, params[0], params[1], params[2], params[3]))))
plt.figure(figsize=(6, 4))
plt.scatter(x, y, label='Data point')
plt.plot(x, y, label='Data')
print(params[0], params[1], params[2], params[3])
plt.plot(x, test_func(x, params[0], params[1], params[2], params[3]),
label='Fitted function')
'''
# prediction section below:
t = np.array(list(range(20, 80)))
plt.plot(t, test_func(t, params[0], params[1], params[2], params[3]),
label='Predicted trend')
'''
plt.legend(loc='best')
plt.show()
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose, Join
class ProductLoader(ItemLoader):
default_output_processor = TakeFirst()
name_in = MapCompose()
name_out = Join()
price_in = MapCompose()
class EdinburghItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
programme = scrapy.Field()
description = scrapy.Field(input_processor=MapCompose(),
output_processor=Join('\n'))
Compulsory_Courses = scrapy.Field(input_processor=MapCompose(),
output_processor=Join('\n'))
Option_Courses = scrapy.Field(input_processor=MapCompose(),
output_processor=Join('\n'))
learning_outcomes = scrapy.Field(input_processor=MapCompose(),
output_processor=Join('\n'))
career_opportunities = scrapy.Field(input_processor=MapCompose(),
output_processor=Join('\n'))
bachelor_requirements = scrapy.Field(input_processor=MapCompose(),
output_processor=Join('\n'))
language_requirements = scrapy.Field(input_processor=MapCompose(),
output_processor=Join('\n'))
application_deadlines = scrapy.Field(input_processor=MapCompose(),
output_processor=Join('\n'))
full_time_fee = scrapy.Field(input_processor=MapCompose(),
output_processor=Join('\n'))
|
from collections import Counter
from collections import OrderedDict
from sklearn.model_selection import train_test_split
player_reference = {}
pattern_reference = OrderedDict()
# data generation
print("Data generation")
x_train = []
x_test = []
y_train = []
y_test = []
with open('data/trainlight.csv', 'r') as file:
next(file)
x = []
y = []
for line in file:
line = line.split(';')
player_name = line[0].strip(' \t\n\r')
y.append(player_name)
x.append(line[1].split(','))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=0)
################
def histogram_compararator(g1, g2):
score = 0
for pattern in (set(g1.keys()) | set(g2.keys())):
score = score + abs(g1.get(pattern, 0) - g2.get(pattern, 0))
return score/len(pattern)
def actions_to_patterns(game):
patterns = []
# for current_p_size in range(3, int(len(actions)/100)):
for current_p_size in range(2, 7):
for i in range(len(actions)-current_p_size+1):
patterns.append(tuple(actions[i:i+current_p_size]))
return patterns
# Training
print("Training")
for player_index in range(len(y_train)):
player_name = y_train[player_index]
game = x_train[player_index]
if player_name not in player_reference:
player_reference[player_name] = {}
race = game[0].strip(' \t\n\r')
actions = game[1::2]
temp = Counter(actions_to_patterns(actions))
player_reference[player_name].update(temp)
for player in list(player_reference.keys()):
sorted_patterns = sorted(player_reference[player].items(), key=lambda x: x[1])
player_reference[player] = dict(sorted_patterns[:15])
s = sum(player_reference[player].values())
player_reference[player] = {k: v/s for k, v in player_reference[player].items()}
# Test
print("test " + str(len(y_test)))
accuracy = 0
total = len(x_test)
for test_index in range(len(x_test)):
print(str(test_index))
player_test = y_test[test_index]
game_test = x_test[test_index]
if player_test not in player_reference.keys():
print('not in')
total -= 1
break
test_pattern = game_test[1::2]
best_fit = {'score': 2**10000, 'player': ''}
game_test_patterns = Counter(actions_to_patterns(game))
sorted_patterns = sorted(game_test_patterns.items(), key=lambda x: x[1])
game_test_patterns = dict(sorted_patterns[:15])
s = sum(game_test_patterns.values())
game_test_patterns = {k: v/s for k, v in game_test_patterns.items()}
for player, player_patterns in player_reference.items():
score = histogram_compararator(game_test_patterns, player_patterns)
if score <= best_fit['score']:
best_fit['score'] = score
best_fit['player'] = player
if best_fit['player'] == player_test:
print('match')
accuracy += 1
print(accuracy/total*100)
|
"""
stringjumble.py
Author: Eamon
Credit: stack overflow
Assignment:string jumble
The purpose of this challenge is to gain proficiency with
manipulating lists.
Write and submit a Python program that accepts a string from
the user and prints it back in three different ways:
* With all letters in reverse.
* With words in reverse order, but letters within each word in
the correct order.
* With all words in correct order, but letters reversed within
the words.
Output of your program should look like this:
Please enter a string of text (the bigger the better): There are a few techniques or tricks that you may find handy
You entered "There are a few techniques or tricks that you may find handy". Now jumble it:
ydnah dnif yam uoy taht skcirt ro seuqinhcet wef a era erehT
handy find may you that tricks or techniques few a are There
erehT era a wef seuqinhcet ro skcirt taht uoy yam dnif ydnah
"""
def tnirp(b):
print(b[::-1])
a = 0
#reverse the text
string = input("Please enter a string of text (the bigger the better): ")
stringg = " " + string
unedit = list(stringg)
revcor = []
revrev=[]
n = (len(unedit))
u = 0
for b in range(0,n-1):
if unedit[b] == ' ':
u = u + 1
e = 0
list = [s for s in range(0,u)]
for q in range(0,n-1):
if unedit[q] == ' ':
list[e] = q
e = e + 1
spaces = list[:]
w = len(spaces)
spaces.append(n)
for t in range(1,w+1):
list = unedit[spaces[t-1]:spaces[t]]
revcor = list + revcor
list = revcor
revcor.pop(0)
for t in range(1,w+1):
list = unedit[spaces[t-1]:spaces[t]]
revrev = list + revrev
revrev.pop(0)
#print(n)
#print(u)
#print("spaces: " + str(spaces))
#print("revcor: " + str(revcor))
#print("unedit: " + str(unedit))
revcorr = ""
for o in revcor:
revcorr = revcorr + str(o)
quotes = '"'
revrevv = ""
for o in revrev:
revrevv = revrevv + str(o)
print("You entered " + str(quotes) + str(string) + str(quotes) + ". Now jumble it:")
tnirp(string)
print(revcorr)
tnirp(revrevv)
|
import numpy as np
from sklearn.tree import DecisionTreeClassifier
import pandas as pn
data = pn.read_csv('./DATA/titanic.csv', index_col='PassengerId')
data_for_tree = pn.DataFrame(data, columns=['Pclass','Fare','Age','Sex','Survived']).dropna()
# data.dropna()
surv = pn.DataFrame(data_for_tree, columns=['Survived'])
del data_for_tree['Survived']
data_for_tree.Sex = data_for_tree.Sex.map(lambda x: 1 if x == 'male' else 0)
# data_for_tree.Sex = [1 for i in data_for_tree.Sex if i == 'male']
#преобразуем строку в числа: male = 1, female = 0
# for i in data_for_tree.index:
# if data_for_tree.get_value(index=i,col='Sex') == 'male':
# data_for_tree.set_value(index=i,col='Sex', value=1)
# else:
# data_for_tree.set_value(index=i,col='Sex', value=0)
#
#
# clf = DecisionTreeClassifier(random_state=241)
# clf = clf.fit(data_for_tree,surv)
#
# imp = clf.feature_importances_
print(data_for_tree.Age)
|
from django.shortcuts import render, redirect
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseForbidden
from list_app.models import Entry, List
from list_app.forms import EntryForm, ListForm
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
def home_page(request):
return render(request, 'home.html', {'form': ListForm(), })
@login_required(login_url='/accounts/login/')
def my_lists(request, username):
user = User.objects.get(username=username)
if user == request.user:
return render(request, 'my_lists.html', {'user': user})
else:
return HttpResponseForbidden()
@login_required(login_url='/accounts/login/')
def new_list(request):
form = ListForm(data=request.POST)
if form.is_valid():
list_ = List()
list_.title = form.cleaned_data['title']
list_.user = request.user
list_.save()
return redirect(list_)
else:
return render(request, 'home.html', {"form": form})
@login_required(login_url='/accounts/login/')
def view_list(request, list_id, list_slug):
list_ = List.objects.get(id=list_id, slug=list_slug)
form = EntryForm()
if list_.user == request.user:
if request.method == 'POST':
form = EntryForm(data=request.POST)
if form.is_valid():
entry = form.save(for_list=list_)
entry.lastfm_artist_getInfo()
entry.rym_artist_geturl()
entry.spotify_artist_geturl()
entry.spotify_album_geturl()
entry.spotify_artist_geturi()
entry.spotify_album_geturi()
return redirect(list_)
return render(request, 'list.html', {"list": list_, "form": form})
else:
return HttpResponseForbidden()
@login_required(login_url='/accounts/login/')
def delete_entry(request, list_id, entry_id):
try:
list_ = List.objects.get(id=list_id)
if list_.user == request.user:
entry = Entry.objects.get(id=entry_id, list=list_)
entry.delete()
return redirect(list_)
else:
return HttpResponseForbidden()
except ObjectDoesNotExist:
raise Http404('Błąd podczas usuwania - Wpis nie istnieje')
|
from rest_framework import serializers
from .models import Users, Blog, Comment
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = Users
fields = ('firstname', 'lastname', 'emailid', 'mobile', 'username', 'password',
'security_question','security_answer')
class BlogSerializer(serializers.ModelSerializer):
class Meta:
model = Blog
fields='__all__'
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('text', 'author', 'post')
|
from django.db import models
# Create your models here.
class Produto (models.Model):
nome = models.CharField("Nome", max_length=100, unique=True)
def __str__(self):
return self.nome
class InformacaoPedido (models.Model):
cidade = models.CharField("Cidade", max_length=500)
quantidadePessoa = models.DecimalField("Quantidade pessoa", decimal_places=2, max_digits = 9)
class Pedido (models.Model):
produto = models.ForeignKey(Produto, on_delete=models.SET_NULL, blank = True, null = True)
lista = models.ForeignKey(InformacaoPedido, on_delete=models.CASCADE, blank = True, null = True)
quantidade = models.DecimalField("Quantidade",decimal_places=2, max_digits=9)
preco = models.DecimalField("Preço médio",decimal_places=2, max_digits=9)
|
from flask_restplus import Resource, Api
from .. import api
from server.operation.register import Register
import server.event as event
import server.operation as operation
import server.document as document
ns = api.namespace('RestoreList', description="列表")
class ExportRestoreList(Resource):
"""列表模块
"""
@document.request_search
@event.Event.execute(name="restore", controler=operation.EventModelExport)
def get():
return {"test": ""}
ns.add_resource(ExportRestoreList, '/')
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import data.climate.window_generator as wg
# https://www.tensorflow.org/tutorials/structured_data/time_series
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
train_df = pd.read_csv("jena_climate_2009_2016_train.csv")
val_df = pd.read_csv("jena_climate_2009_2016_val.csv")
test_df = pd.read_csv("jena_climate_2009_2016_test.csv")
wide_window = wg.WindowGenerator(train_df=train_df, val_df=val_df, test_df=test_df,
input_width=24, label_width=24, shift=1,
label_columns=['T (degC)'])
lstm_model = tf.keras.models.Sequential([
# Shape [batch, time, features] => [batch, time, lstm_units]
tf.keras.layers.LSTM(32, return_sequences=True),
# Long Short Term Memory
# Shape => [batch, time, features]
tf.keras.layers.Dense(units=1)
])
history = wg.compile_and_fit(lstm_model, wide_window)
lstm_model.save("h5/lstm_32_24_19__32_24_1.h5")
wide_window.plot(lstm_model)
plt.show()
|
from agagd_core.models import Chapters, Country, Game, Member, MembersRegions, Membership
from django.contrib import admin
class MemberAdmin(admin.ModelAdmin):
list_display = ('member_id', 'full_name', 'join_date', 'chapter', 'chapter_id')
admin.site.register(Chapters)
admin.site.register(Country)
admin.site.register(Game)
admin.site.register(Member, MemberAdmin)
admin.site.register(Membership)
admin.site.register(MembersRegions)
|
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch_ros.actions import Node
qomolo_robot_id = os.environ.get("QOMOLO_ROBOT_ID","id")
def generate_launch_description():
ld = LaunchDescription()
config = os.path.join(
get_package_share_directory('compressed_pointcloud_transport'),
'config',
'config.yaml'
)
node=Node(package='compressed_pointcloud_transport',
executable='decompress',
namespace=qomolo_robot_id,
name='decompress',
parameters = [config]
)
ld.add_action(node)
return ld
|
from flask import Flask
from flask import jsonify
import requests
app = Flask(__name__)
# This is for Elastic Beanstalk
application = app
@app.route('/')
def index():
return 'Hello, world'
@app.route('/weather')
def weather():
response = requests.get('http://wttr.in/Vantaa?format=j1')
return response.json()
@app.route('/health')
def health():
return '200 - OK'
if __name__ == '__main__':
app.run(debug=True) |
import datetime
import string
import random
from django.contrib.auth.models import User
from django.db.models import Model, CharField, ForeignKey, CASCADE, IntegerField, DateField, TextField, ImageField, \
OneToOneField, BooleanField
from image_cropping import ImageRatioField
def random_generator(size=16, chars=string.ascii_uppercase + string.digits):
"""
Generates a random hash with characters and digits.
:param size: The length of the string.
:param chars: The characters to include in the hash.
:return: A random hash combination of the chars input and size.
"""
return ''.join(random.choice(chars) for x in range(size))
class Product(Model):
title = CharField(max_length=70, default="")
author = CharField(max_length=70, default="")
seller = ForeignKey(User, related_name="selling", on_delete=CASCADE)
price = IntegerField(default=1000)
lowest_price = IntegerField(default=999)
condition = TextField(default="average")
location = CharField(max_length=50, default="America")
date_listed = DateField(default=datetime.date.today)
image = ImageField(upload_to="products/images/", null=True)
cropping = ImageRatioField('image', '250x150')
def dollar(self):
return float(self.price / 100)
@property
def serialize(self):
return {'title': self.title, 'author': self.author, 'price': self.price}
class Meta:
db_table = "books"
ordering = ["title", "price"]
class Transaction(Model):
product = OneToOneField(Product, related_name="transaction")
name = CharField(max_length=255, default="")
email = CharField(max_length=255, default="")
address = CharField(max_length=255, default="")
city = CharField(max_length=255, default="")
zipcode = CharField(max_length=10, default="")
stripe_id = CharField(max_length=255, default="")
started_at = DateField(default=datetime.date.today)
confirmed = BooleanField(default=False)
has_sent = BooleanField(default=False)
has_arrived = BooleanField(default=False)
identification = CharField(default=random_generator, max_length=17)
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from api import views
router = DefaultRouter()
router.register(r'blog', views.BlogViewSet, base_name='blog')
urlpatterns = [
url(r'^', include(router.urls)),
]
|
def function(a, b, c):
print(a, b, c)
function(1, 2, 3)
function(c=6, a=4, b=5) # arguments matching by name
|
"""
Написать функцию is_prime, принимающую 1 аргумент — число от 2 до 1000,
и возвращающую True, если оно простое, и False - иначе.
"""
def is_prime(x):
if x < 2 or x > 1000:
return f'Number is out of range from 2 to 1000'
else:
for i in range(2, x):
if x % i == 0:
return f'Entered number is not prime'
return f'Entered number is prime'
print(is_prime(7))
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 1 14:05:28 2019
@author: Xi Yu
"""
import tensorflow as tf
import numpy as np
import pandas as pd
#import tensorflow.contrib.eager as tfe
print(tf.__version__)
print(tf.__git_version__)
tf.compat.v1.enable_eager_execution()
#%%
# set data dimensions
K = 3
V = 5
D = 100
N = 100
# set the seed
np.random.seed(2014)
# beta prior parameters
eta = np.ones(V) * 1e-1
# beta profiles
beta = np.random.dirichlet(alpha=eta, size=K)
# theta prior parameters
alpha = np.ones(K) * 1e-1
# alpha[0] = 10
# document's prior topic allocation
theta = np.random.dirichlet(alpha=alpha, size=D)
# word's topic membership
z = [np.random.choice(K, size=N, replace=True, p=theta[d, :]) for d in range(D)]
z = np.vstack(z)
# actual words and counts
w = [np.array([np.random.choice(V, size=1, p=beta[k,:])[0] for k in z[d, :]] + list(range(V))) for d in range(D)]
nw = [np.unique(w[d], return_counts=True)[1] for d in range(D)]
nw = np.vstack(nw)
w = np.vstack(w)
nw = tf.convert_to_tensor(nw, dtype=tf.float32)
nw = tf.Variable(initial_value=tf.transpose(nw),
name="nw_vd")
#%%
print("beta:")
pd.DataFrame(np.round(np.transpose(beta), decimals=3))
#%%
print("theta:")
pd.DataFrame(np.round(theta, decimals=3)).head(6)
#%%
print("documents word counts:")
pd.DataFrame(tf.transpose(nw).numpy()).head(6)
#%%
# initialize LDA parameters
def initialize_variables(K, V, D, alpha=1e-1, eta=1e-1, seed=2014):
"""
Initialize parameters of LDA model returning adequate Tensors.
args:
K (int): number of LDA components
V (int): vocabulary size
D (int): number of documents
alpha (float): hyperparameter for theta prior
eta (float): hyperparameter for beta prior
returns:
eta: [V] tensor with prior parameters (alpha) for beta
lambda: [K, V] tensor with posterior word distribution per class
phi: [K, V, D] tensor with vocabulary membership per document
gamma: [K, D] tensor
"""
tf.random.set_seed(seed)
eta = tf.Variable(initial_value=tf.ones(V) * eta,
name="eta_v")
alpha = tf.Variable(initial_value=tf.ones(K) * alpha,
name="alpha_k")
lam = tf.Variable(tf.abs(tf.random.normal(shape=(K, V))),
name="lambda_kv")
phi = tf.Variable(initial_value=tf.random.normal(shape=(K, V, D)),
name="phi_kvd")
phi.assign(value=tf.nn.softmax(phi, axis=0))
gamma = tf.Variable(initial_value=tf.abs(tf.random.normal(shape=(K, D))),
name="gamma_kd")
e_log_beta = tf.Variable(initial_value=tf.abs(tf.random.normal(shape=(K, V, D))) * .0,
name="e_log_beta_kvd")
e_log_theta = tf.Variable(initial_value=tf.abs(tf.random.normal(shape=(K, V, D))) * .0,
name="e_log_theta_kvd")
return eta, alpha, lam, phi, gamma, e_log_beta, e_log_theta
#%%
# test
eta, alpha, lam, phi, gamma, e_log_beta, e_log_theta = initialize_variables(K, V, D)
#%%
def update_lambda(lam, eta, phi, nw):
K = lam.shape.as_list()[0]
num_k = lam.shape.as_list()[1]
for k in range(K):
lam.assign(tf.tensor_scatter_nd_update(lam,
indices=tf.constant([[k,i] for i in range(num_k)]),
updates=tf.reduce_sum(tf.multiply(phi[k], nw), axis=1) + eta))
return lam
#%%
update_lambda(lam, eta, phi, nw)
print(lam)
#%%
# gamma update
def update_gamma(gamma, alpha, phi, nw):
K = gamma.shape.as_list()[0]
num_k = gamma.shape.as_list()[1]
for k in range(K):
gamma.assign(tf.tensor_scatter_nd_update(gamma,
indices=tf.constant([[k,i] for i in range(num_k)]),
updates=tf.reduce_sum(tf.multiply(phi[k], nw), axis=0) + alpha[k]))
return gamma
tmp = gamma.value()
update_gamma(gamma, alpha, phi, nw)
print(gamma)
#%%
def update_e_log_beta(e_log_beta, lam):
K = lam.shape.as_list()[0]
num_k = lam.shape.as_list()[1]
for k in range(K):
e_log_beta.assign(tf.tensor_scatter_nd_update(e_log_beta,
indices=tf.constant([[k,i] for i in range(num_k)]),
updates=tf.tile(tf.expand_dims(tf.math.digamma(lam[k]) - tf.math.digamma(tf.reduce_sum(lam[k])), axis=1), multiples=[1, D])))
return e_log_beta
print(e_log_beta)
update_e_log_beta(e_log_beta, lam);
print(e_log_beta)
#%%
def update_e_log_theta(e_log_theta, gamma):
e_log_theta.assign(value=tf.tile(tf.expand_dims(tf.math.digamma(gamma) -
tf.math.digamma(tf.reduce_sum(gamma, axis=0)), axis=1), multiples=[1, V, 1]))
return e_log_theta
update_e_log_theta(e_log_theta, gamma)
#%%
import time
start = time.time()
def update_phi(e_log_beta, e_log_theta):
phi.assign(value=e_log_beta + e_log_theta)
phi.assign(value=tf.nn.softmax(logits=phi, axis=0))
return phi
update_phi(e_log_beta, e_log_theta)
end = time.time()
print(end - start)
print(phi)
#%%
nw_kvd = tf.tile(tf.expand_dims(nw / tf.reduce_sum(nw), axis=0),
multiples=[K, 1, 1])
nw_kvd
#%%
def elbo(phi, e_log_beta, e_log_theta, nw_kvd):
A = tf.reduce_sum(nw_kvd * phi * (e_log_beta + e_log_theta - tf.math.log(phi + 1e-6)))
return A.numpy()
#elbo(phi, e_log_beta, e_log_theta, nw_kvd)
#%%
seed = 1
seed += 1
eta, alpha, lam, phi, gamma, e_log_beta, e_log_theta = initialize_variables(K, V, D)
prev_elbo = 0.0
next_elbo = 0.0
iter = 0
for i in range(100000):
for j in range(100000):
# E-Step:
update_e_log_beta(e_log_beta, lam);
update_e_log_theta(e_log_theta, gamma);
update_phi(e_log_theta=e_log_theta, e_log_beta=e_log_beta)
gamma_prev = gamma.value()
update_gamma(gamma, alpha, phi, nw)
diff = tf.reduce_mean(tf.abs(gamma_prev - gamma.value()))
if diff < 1e-6:
break
# M-Step:
update_lambda(lam, eta, phi, nw)
next_elbo = elbo(phi, e_log_beta, e_log_theta, nw_kvd)
# next_elbo = 0.0
print("Iteration:", iter, "ELBO:", next_elbo)
diff = np.abs(next_elbo - prev_elbo)
if diff < 1e-6:
print("Converged!")
break
else:
iter += 1
prev_elbo = next_elbo
|
from abc import ABC, abstractmethod
class Animal(ABC):
@abstractmethod
def make_sound(self):
print("Some implementation!")
def display_values(self):
pass
def walk(self):
pass
def jump(self):
pass
class Dog(Animal):
def __init__(self):
self.name = 'dog'
def make_sound(self):
super().make_sound()
print("I bark")
class Human(Animal):
def make_sound(self):
super().make_sound()
print("I talk")
x = Animal()
x.make_sound()
# class Phone:
# def __init__(self):
# self.name = 'abc'
#
# def switch_off(self):
# pass
#
#
# class OnePlus(Phone):
# def switch_off(self):
# pass
#
#
# class iPhone(Phone):
# def switch_off(self):
# pass
# class Parent:
# def __init__(self):
# self.name = 'abc'
# self.age = 25
# self.spouse_name = 'xyz'
#
#
# def display_parent_name_with_spouse_name(self):
# self.__replace_letters()
#
# def __replace_letters(self):
# pass
#
#
# p = Parent()
# p.display_parent_name_with_spouse_name()
# what is abstraction
# why abstraction
# how abstraction can be achieved
# what are abstract classes
# difference between class and abstract class
# how abstract classes help in data hiding (cannot instantiate base class)
# difference between subclassing and abstract class
|
class Book:
def __init__(self, year, name, author):
self.year = year
self.name = name
self.author = author
self.reviews = []
def __eq__(self, other):
if [self.year, self.name, self.author] == [other.year, other.name, other.author]:
print(True)
else:
print(False)
def add_review(self, text):
self.reviews.append(text)
def show_reviews(self):
counter = 0
for review in self.reviews:
counter += 1
print('{}.\n{}\n'.format(counter, review))
book1 = Book('Nineteen Eighty-Four', 1949, 'George Orwell')
book2 = Book('Nineteen Eighty-Four', 1949, 'George Orwell')
book3 = Book('Над пропастью во ржи', 1951, 'Jerome David Salinger')
book1 == book2
book1 == book3
book1.add_review('Cool!!')
book1.add_review('Not bad')
book1.show_reviews()
|
#!/usr/bin/env python
import os, sys, os.path
from collections import defaultdict
from pixelterm.xtermcolors import xterm_colors
from PIL import Image, PngImagePlugin
try:
import re2 as re
except:
import re
def parse_escape_sequence(seq):
codes = list(map(int, seq[2:-1].split(';')))
fg, bg = None, None
i = 0
while i<len(codes):
if codes[i] in [38, 48]:
if codes[i+1] == 5:
c = xterm_colors[codes[i+2]]
fg, bg = (c, bg) if codes[i] == 38 else (fg, c)
i += 2
elif codes[i] == 39:
fg = (0,0,0,0)
elif codes[i] == 49:
bg = (0,0,0,0)
elif codes[i] == 0:
fg, bg = (0,0,0,0), (0,0,0,0)
i += 1
return fg, bg
def unpixelterm(text):
lines = text.split('\n')
metadata = defaultdict(list)
try:
first = lines.index('$$$')
second = lines[first+1:].index('$$$')
metadataarea = lines[first+1:second+1]
for i,l in enumerate(metadataarea):
parts = l.split(': ')
if len(parts) == 2:
k,v = parts
if k not in ['WIDTH', 'HEIGHT']:
metadata[k.lower()] += [v]
else:
metadata['_comment'] = '\n'.join(metadataarea[i:])
break
lines[first:] = lines[first+1+second+1:]
except:
pass
if lines[-1] == '\x1b[0m':
lines = lines[:-1]
h = len(lines)*2
w = max([ len(re.sub(r'\x1b\[[0-9;]+m|\$balloon.*\$|\$', '', line)) for line in lines ])
bw = int(re.search(r'\$balloon([0-9]*)\$', text).group(1) or '1')
if bw > w: #Fuck special cases.
w = bw
img = Image.new('RGBA', (w, h))
fg, bg = (0,0,0,0), (0,0,0,0)
x, y = 0, 0
for line in lines:
for escapeseq, specialstr, char in re.findall(r'(\x1b\[[0-9;]+m)|(\$[^$]+\$)|(.)', line, re.DOTALL):
if escapeseq:
nfg, nbg = parse_escape_sequence(escapeseq)
fg, bg = nfg or fg, nbg or bg
elif specialstr:
if specialstr == '$\\$':
img.putpixel((x, y), (255, 0, 0, 127))
img.putpixel((x, y+1), (255, 0, 0, 127))
x += 1
elif specialstr == '$/$':
img.putpixel((x, y), (0, 0, 255, 127))
img.putpixel((x, y+1), (0, 0, 255, 127))
x += 1
else: #(should be a) balloon
for i in range(x, x+bw):
img.putpixel((i, y), (0, 255, 0, 127))
img.putpixel((i, y+1), (0, 255, 0, 127))
x += bw
elif char:
#Da magicks: ▀█▄
c = {' ': (bg, bg),
'█': (fg, fg),
'▀': (fg, bg),
'▄': (bg, fg)}[char]
img.putpixel((x, y), c[0])
img.putpixel((x, y+1), c[1])
x += 1
x, y = 0, y+2
return img, metadata
|
from common.run_method import RunMethod
import allure
@allure.step("极师通/获取学生所有班级")
def student_class_getAllClass_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/获取学生所有班级"
url = f"/service-profile/student/class/getAllClass"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户管理/获取学生基本信息")
def student_studentId_basicInfo_get(studentId, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户管理/获取学生基本信息"
url = f"/service-profile/student/{studentId}/basicInfo"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户管理/修改当前学生用户一个或多个基础属性值")
def student_studentId_attribute_put(studentId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户管理/修改当前学生用户一个或多个基础属性值"
url = f"/service-profile/student/{studentId}/attribute"
res = RunMethod.run_request("PUT", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/学生信息/查询学生明细")
def student_class_detail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/学生信息/查询学生明细"
url = f"/service-profile/student/class/detail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户行课/某个学生用户主动发起调课请求")
def student_studentId_transferringClass_post(studentId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户行课/某个学生用户主动发起调课请求"
url = f"/service-profile/student/{studentId}/transferringClass"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户管理/获取学生电子账户")
def student_studentId_electronicAccount_get(studentId, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户管理/获取学生电子账户"
url = f"/service-profile/student/{studentId}/electronicAccount"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户管理/获取学生的续报类型")
def student_studentId_signUpType_get(studentId, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户管理/获取学生的续报类型"
url = f"/service-profile/student/{studentId}/signUpType"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/学生信息/获取潜在学生基本信息")
def student_potential_items_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/学生信息/获取潜在学生基本信息"
url = f"/service-profile/student/potential/items"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/学生信息/获取在读学生基本信息")
def student_reading_items_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/学生信息/获取在读学生基本信息"
url = f"/service-profile/student/reading/items"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户行课/判断学生将要报名的课程和已经报名的课程是否有排课冲突")
def student_studentId_class_classId_Conflict_get(studentId, classId, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户行课/判断学生将要报名的课程和已经报名的课程是否有排课冲突"
url = f"/service-profile/student/{studentId}/class/{classId}/Conflict"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/学生信息/获取已结业学生基本信息")
def student_completed_items_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/学生信息/获取已结业学生基本信息"
url = f"/service-profile/student/completed/items"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/学生信息/获取学生基本信息")
def student_all_items_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/学生信息/获取学生基本信息"
url = f"/service-profile/student/all/items"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/班主任机制/学员管理/学生内页/查询学生考试记录--成长轨迹")
def student_administration_exam_student_id_get(id, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/班主任机制/学员管理/学生内页/查询学生考试记录--成长轨迹"
url = f"/service-profile/student/administration/exam/student/{id}"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/班主任机制/学员管理/学生内页/根据学生ID查询学生页标签信息")
def student_administration_labels_student_id_get(id, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/班主任机制/学员管理/学生内页/根据学生ID查询学生页标签信息"
url = f"/service-profile/student/administration/labels/student/{id}"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/班主任机制/学员管理/查询学员明细")
def student_administration_classes_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/班主任机制/学员管理/查询学员明细"
url = f"/service-profile/student/administration/classes"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
# coding=utf-8
import os
import sys
import unittest
from time import sleep
from selenium import webdriver
sys.path.append(os.environ.get('PY_DEV_HOME'))
from webTest_pro.common.initData import init
from webTest_pro.common.model.baseActionAdd import user_login, add_model
from webTest_pro.common.model.baseActionModify import update_model
from webTest_pro.common.model.baseActionSearch import search_model
from webTest_pro.common.model.baseActionDel import del_model
reload(sys)
sys.setdefaultencoding("utf-8")
modelData = [{
"addname":u"测试名称",
"addurl":u"测试地址",
"addremark":u"测试描述",
'addnId': u'在线课堂'
}]
class model(unittest.TestCase):
''''栏目管理'''
def setUp(self):
if init.execEnv['execType'] == 'local':
print "\n", "=" * 20, "local exec testcase", "=" * 19
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
print "start model..."
else:
print "\n", "=" * 20, "remote exec testcase", "=" * 18
browser = webdriver.DesiredCapabilities.CHROME
self.driver = webdriver.Remote(command_executor=init.execEnv['remoteUrl'], desired_capabilities=browser)
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
print "start model..."
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
print "model end!"
print "=" * 60
def test_add_model(self):
'''添加栏目管理'''
print "exec:test_add_model..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in modelData:
add_model(driver, **itme)
search_model(driver, **itme)
print "exec:test_add_model success."
def test_bupdate_model(self):
'''修改栏目管理'''
print "exec:test_bupdate_model..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in modelData:
update_model(driver, **itme)
print "exec:test_bupdate_model success."
def test_cdel_model(self):
'''修改栏目管理'''
print "exec:test_cdel_model..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in modelData:
del_model(driver, **itme)
print "exec:test_cdel_model success."
if __name__ == '__main__':
unittest.main()
# driver = webdriver.Chrome()
# user_login(driver, **init.loginInfo)
#
# for itme in announcementData:
# search_announcement(driver, **itme)
|
# Optional: debug mode
DEBUG = True
TEMPLATE_DEBUG = True
# Location of routes (main app.py file)
ROOT_URLCONF = 'app'
# Secret key is required by Django
SECRET_KEY = 'r*ll9mlx=d)cko4gp03ms%+tmq51+dlyo06gl2$xbt$w=7$=_8'
|
import tkinter as tk
from tkinter import *
from tkinter import ttk
import cfg_common
import cls_CalibPH
LARGE_FONT= ("Verdana", 12)
class PageAnalogProbes(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.parent = parent
self.controller = controller
label = tk.Label(self, text="Analog Probes", font=LARGE_FONT)
label.grid(row=0, column=0, sticky=W)
# save button
#self.saveimg=PhotoImage(file="images/save-blue-24.png")
self.saveimg=PhotoImage(file="images/upload-to-cloud-24.png")
self.btn_save = Button(self, text="Save", image=self.saveimg,
compound='left', relief=RAISED, command=self.saveChanges)
self.btn_save.grid(row=1, column=0, sticky=W)
# mcp3008 analog to digital converter
self.adcframe = LabelFrame(self, text="MCP3008 10-bit Analog to Digital Converter", relief=GROOVE)
self.adcframe.grid(row=2, column=0, pady=10, sticky=W)
# create the channel widgets
self.channelDict = {}
for i in range(0,8):
self.channelDict[i] = ChannelWidget(self.adcframe, self, i)
def saveChanges(self):
for i in range(0,8):
self.channelDict[i].saveChanges()
class ChannelWidget(tk.Frame):
def __init__(self, parent, controller, channelID):
tk.Frame.__init__(self, parent)
self.channelID = channelID
self.controller = controller
self.parent = parent
# channel frame
channelText = "Channel " + str(channelID)
self.adcframe = LabelFrame(parent, relief=GROOVE, text=channelText)
if channelID == 0 or channelID == 1:
if channelID == 0:
self.adcframe.grid(row=0, column=0, padx=10, pady=10)
else:
self.adcframe.grid(row=0, column=1, padx=10, pady=10)
elif channelID == 2 or channelID == 3:
if channelID == 2:
self.adcframe.grid(row=1, column=0, padx=10, pady=10)
else:
self.adcframe.grid(row=1, column=1, padx=10, pady=10)
elif channelID == 4 or channelID == 5:
if channelID == 4:
self.adcframe.grid(row=2, column=0, padx=10, pady=10)
else:
self.adcframe.grid(row=2, column=1, padx=10, pady=10)
elif channelID == 6 or channelID == 7:
if channelID == 6:
self.adcframe.grid(row=3, column=0, padx=10, pady=10)
else:
self.adcframe.grid(row=3, column=1, padx=10, pady=10)
# channel name
self.lbl_name = Label(self.adcframe,text="Name:")
self.lbl_name.grid(row=0, column=0, sticky=E)
self.txt_name = Entry(self.adcframe)
self.txt_name.grid(row=0, column=1, columnspan=2)
# channel sensor type drown down list
self.sensortype = StringVar()
self.sensortypelist = ["pH","salinity","raw"]
self.sensortype.set("pH") # default value
self.lbl_sensortype = Label(self.adcframe,text="Sensor Type:")
self.lbl_sensortype.grid(row=1, column=0, sticky=E)
self.sensortypemenu = OptionMenu(self.adcframe,self.sensortype,*self.sensortypelist)
self.sensortypemenu.configure(indicatoron=True, relief=GROOVE)
self.sensortypemenu.grid(row=1, column=1)
# channel calibrate button
self.btn_calibrate = Button(self.adcframe, text="Calibrate", relief=RAISED, command=lambda:self.calibrateSensor(parent))
self.btn_calibrate.grid(row=1, column=2)
# channel enable checkbox
self.Enabled = IntVar()
self.chk_enable = Checkbutton(self.adcframe,text="Enable",
variable=self.Enabled, command=self.enableControls)
self.chk_enable.grid(row=2, column=0, sticky=E)
# get configuration from server
self.getConfig(controller.controller, self.channelID)
# enable/disable controls
self.enableControls()
def enableControls(self):
# channel
if self.Enabled.get() == True:
self.lbl_name.config(state='normal')
self.txt_name.config(state='normal')
self.lbl_sensortype.config(state='normal')
self.sensortypemenu.config(state='normal')
self.btn_calibrate.config(state='normal')
else:
self.lbl_name.config(state='disabled')
self.txt_name.config(state='disabled')
self.lbl_sensortype.config(state='disabled')
self.sensortypemenu.config(state='disabled')
self.btn_calibrate.config(state='disabled')
def calibrateSensor(self, master):
if str(self.sensortype.get()) == "raw":
tk.messagebox.showwarning("Calibration", "Calibration unavailable for sensor type 'raw' on channel " + str(self.channelID) )
if str(self.sensortype.get()) == "salinity":
tk.messagebox.showwarning("Calibration", "Calibration unavailable for sensor type 'salinity' on channel " + str(self.channelID) )
if str(self.sensortype.get()) == "pH":
#tk.messagebox.showinfo("Calibration", "Let's calibrate pH")
strtitle = "3 Point PH Calibration"
d = Dialog(master, self, self.channelID, str(self.txt_name.get()), title = strtitle)
#d.CalibPH.running = False
def getConfig(self, controller, channelID):
# read values from config file
# Channel name
strval = "ch" + str(channelID) + "_name"
val = controller.downloadsettings("mcp3008", strval, "Unnamed")
self.txt_name.delete(0,END)
self.txt_name.insert(0,val)
# Channel enabled
strval = "ch" + str(channelID) + "_enabled"
val = controller.downloadsettings("mcp3008", strval, "False")
if str(val) == "True":
self.chk_enable.select()
else:
self.chk_enable.deselect()
# Channel type
strval = "ch" + str(channelID) + "_type"
val = controller.downloadsettings("mcp3008", strval, "raw")
self.sensortype.set(val)
def saveChanges(self):
# Channel
if self.Enabled.get() == True:
chkstate = "True"
else:
chkstate = "False"
strval = "ch" + str(self.channelID) + "_name"
self.controller.controller.uploadsettings('mcp3008', strval, str(self.txt_name.get()))
strval = "ch" + str(self.channelID) + "_enabled"
self.controller.controller.uploadsettings('mcp3008', strval, str(chkstate))
strval = "ch" + str(self.channelID) + "_type"
self.controller.controller.uploadsettings('mcp3008', strval, str(self.sensortype.get()))
class Dialog(Toplevel):
def __init__(self, parent, controller, channelnum, channelname, title = None):
Toplevel.__init__(self, parent)
self.transient(parent)
self.controller = controller
if title:
self.title(title)
self.parent = parent
self.result = None
self.channelnum = channelnum
self.channelname = channelname
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
#
# construction hooks
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
self.CalibPH = cls_CalibPH.CalibPH(master, self, self.channelnum, self.channelname)
self.CalibPH.pack()
pass
def buttonbox(self):
# add standard button box. override if you don't want the
# standard buttons
box = Frame(self)
w = Button(box, text="Save", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
#self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
#self.outlet.saveOutlet()
self.withdraw()
self.update_idletasks()
self.apply()
#self.cancel()
def cancel(self, event=None):
#if tk.messagebox.askyesno("Calibration", "Unsaved changes will be lost. Close anyway?", parent=self.CalibPH):
if tk.messagebox.askyesno("Calibration", "Unsaved changes will be lost. Close anyway?", parent=self):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
pass # override
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Importing packages
import pandas as pd
import numpy as np
import seaborn as sns
import textstat as ts
from nltk.corpus import stopwords
from textblob import Word
from textblob import TextBlob
stop = stopwords.words('english')
# In[2]:
#Importing Raw Data
reviews_df = pd.read_csv('D:\\NCI Notes\\Thesis\\Data\\Reviews.csv', encoding = 'cp1252')
restaurant_df = pd.read_csv('D:\\NCI Notes\\Thesis\\Data\\Restaurant.csv', encoding = 'cp1252')
# In[3]:
#Checking for Missing values - Reviews Dataset
column_names = reviews_df.columns
print(column_names) #Column names
totalCells = np.product(reviews_df.shape) #Calculate total number of cells in dataframe
missingCount = reviews_df.isnull().sum() #Count number of missing values per column
totalMissing = missingCount.sum() #Calculate total number of missing values
print("The Reviews dataset contains", round(((totalMissing/totalCells) * 100), 2), "%", "missing values.") #Calculate percentage of missing values
# In[4]:
#Replace any missing values with NA and Drop NA values
reviews_df.replace(' ',np.nan, inplace = True)
reviews_df = reviews_df.dropna()
# In[5]:
#Deleting rows flagged as NR and YR
reviews_df = reviews_df[reviews_df.flagged != 'NR']
reviews_df = reviews_df[reviews_df.flagged != 'YR']
# In[6]:
#Encoding categorical values
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
reviews_df['flagged_code'] = le.fit_transform(reviews_df['flagged'])
# In[7]:
#Unique Id column
reviews_df['ReviewID'] = reviews_df.index
#Adding review content wordcount feature
reviews_df['WordCount_Review'] = reviews_df['reviewContent'].apply(lambda comment: len(comment.split()))
#Drop unwanted columns
reviews_df = reviews_df.drop(columns=['date','reviewID','coolCount','funnyCount'], axis = 1)
#Renaming Columns
reviews_df.rename(columns={'rating':'ReviewRating','usefulCount':'UsefulCount_Review'}, inplace = True)
# In[8]:
#Renaming Columns - Restaurant dataset
restaurant_df.rename(columns={'reviewCount':'TotalReviewCountofRestaurant','filReviewCount':'FakeReviewCountRestaurant','rating':'AggRestaurantRating'}, inplace = True)
rest_df = restaurant_df[['restaurantID','TotalReviewCountofRestaurant','FakeReviewCountRestaurant','AggRestaurantRating']]
# In[9]:
#Joining the two datasets
combined_df = pd.merge(reviews_df,rest_df, how = 'inner', on = 'restaurantID')
combined_df = pd.DataFrame(combined_df)
combined_df.to_csv(r'D:\\NCI Notes\\Thesis\\Data\\FinalMergedDataset.csv')
# In[10]:
combined_df.head()
# In[11]:
combined_df.info()
# In[12]:
combined_df.describe()
# In[13]:
#Text preprocessing and new feature addition
#Remove numbers
combined_df['reviewContentNew'] = combined_df['reviewContent'].apply(lambda x: ''.join([i for i in x if not i.isdigit()]))
#Remove punctuations
combined_df['reviewContentNew'] = combined_df['reviewContentNew'].str.replace('[^\w\s]', '')
#Lowercasing
combined_df['reviewContentNew'] = combined_df['reviewContentNew'].apply(lambda x: " ".join(x.lower() for x in x.split()))
#stopword count
combined_df['stopwordsCount'] = combined_df['reviewContentNew'].apply(lambda x: len([x for x in x.split() if x in stop]))
#removing stopwords
combined_df['reviewContentNew'] = combined_df['reviewContentNew'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
#lemmatizing
combined_df['reviewContentNew'] = combined_df['reviewContentNew'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
# In[14]:
combined_df['reviewContentNew'].head()
# In[15]:
#Adding new numeric features - Feature Engineering
#Rating features and Text based statistics
#AggRating Deviation
combined_df['DeviationfromAggRating'] = abs(combined_df['AggRestaurantRating'] - combined_df['ReviewRating'])
#Character count
combined_df['charCount'] = combined_df['reviewContent'].apply(len)
#uppercase count
combined_df['uppercaseCount'] = combined_df['reviewContent'].apply(lambda comment: sum(1 for c in comment if c.isupper()))
#special char count
combined_df['specialCharCount'] = combined_df['reviewContent'].apply(lambda comment: sum (comment.count(w) for w in '[\w]+'))
#sentence count
combined_df['sentenceCount'] = combined_df['reviewContent'].apply(ts.sentence_count)
# In[16]:
#Redability features
combined_df['fleschReadingEase'] = combined_df['reviewContent'].apply(ts.flesch_reading_ease)
combined_df['fleschKincaidGrade'] = combined_df['reviewContent'].apply(ts.flesch_kincaid_grade)
combined_df['fogScale'] = combined_df['reviewContent'].apply(ts.gunning_fog)
combined_df['smogScore'] = combined_df['reviewContent'].apply(ts.smog_index)
combined_df['ARI'] = combined_df['reviewContent'].apply(ts.automated_readability_index)
combined_df['CLI'] = combined_df['reviewContent'].apply(ts.coleman_liau_index)
combined_df['linsearWrite'] = combined_df['reviewContent'].apply(ts.linsear_write_formula)
combined_df['daleChallScore'] = combined_df['reviewContent'].apply(ts.dale_chall_readability_score)
# In[17]:
#Sentiment Score
#combined_df['sentimentScore'] = combined_df['reviewContent'].apply(lambda x: TextBlob(x).sentiment[0])#SentimentScore-polarity
# In[18]:
#POS Tagging and adding its counts as features
from nltk import word_tokenize, pos_tag
def count_noun(text):
nouns = sum(1 for word, pos in pos_tag(word_tokenize(text)) if pos.startswith('NN'))
return nouns
def count_verb(text):
verbs = sum(1 for word, pos in pos_tag(word_tokenize(text)) if pos.startswith('VB'))
return verbs
def count_adjective(text):
adj = sum(1 for word, pos in pos_tag(word_tokenize(text)) if pos.startswith('JJ'))
return adj
def count_adverb(text):
adv = sum(1 for word, pos in pos_tag(word_tokenize(text)) if pos.startswith('RB'))
return adv
combined_df['nounCount'] = combined_df['reviewContent'].apply(count_noun)
combined_df['verbCount'] = combined_df['reviewContent'].apply(count_verb)
combined_df['adjectiveCount'] = combined_df['reviewContent'].apply(count_adjective)
combined_df['adverbCount'] = combined_df['reviewContent'].apply(count_adverb)
# In[19]:
combined_df.info()
# In[20]:
#Exporting the PreProcessed Data
combined_df.to_csv(r'D:\\NCI Notes\\Thesis\\Data\\CombinedPreProcessedDataset.csv')
# In[21]:
#Defining functions for Report, Cross Validation
def model_report(y_act, y_pred):
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, cohen_kappa_score
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import f1_score, roc_curve,auc
import matplotlib.pyplot as plt
print("Confusion Matrix: ")
print(confusion_matrix(y_act, y_pred))
print("Accuracy = ", accuracy_score(y_act, y_pred))
print("Precision = " ,precision_score(y_act, y_pred))
print("Recall = " ,recall_score(y_act, y_pred))
print("F1 Score = " ,f1_score(y_act, y_pred))
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_act, y_pred)
print("AUC Score =", auc(false_positive_rate, true_positive_rate))
print("Kappa score = ",cohen_kappa_score(y_act,y_pred))
print("Error rate = " ,1 - accuracy_score(y_act, y_pred))
print("AUC-ROC Curve: ")
plt.plot([0, 1], [0, 1], linestyle='--')
plt.plot(false_positive_rate, true_positive_rate,marker='.')
plt.show()
pass
def cross_validation_report(result):
print("Mean accuracy: ", result.mean())
print("Variance: ", result.std())
pass
def KFold_Cross_Validation(classifier,n):
from sklearn.model_selection import cross_val_score
acc = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = n, scoring = 'accuracy')
cross_validation_report(acc)
pass
# In[22]:
#Checking class balance in the dataset
combined_df['flagged'].value_counts()
# In[23]:
combined_df['flagged_code'].value_counts()
# In[24]:
import seaborn as sns
sns.countplot(x = 'flagged', data = combined_df)
# In[25]:
#Handling Class Imbalance
#Random Undersampling for ngrams model
min_class_length = len(combined_df[combined_df['flagged_code'] == 1])
maj_class_indices = combined_df[combined_df['flagged_code'] == 0].index
random_maj_class_indices = np.random.choice(maj_class_indices,min_class_length,replace = False)
min_class_indices = combined_df[combined_df['flagged_code'] == 1].index
random_usamp_indices = np.concatenate([min_class_indices,random_maj_class_indices])
text_df = combined_df.loc[random_usamp_indices]
# In[26]:
text_df.info()
# In[27]:
import seaborn as sns
sns.countplot(x = 'flagged', data = text_df)
# In[28]:
#Export Random Undersampled Dataset to csv
text_df.to_csv(r'D:\\NCI Notes\\Thesis\\Data\\UndersampledDataset.csv')
# In[29]:
#Handling Class Imbalance
#Up-sampling using SMOTE for only numeric features model(Rating Features and Text Based Features)
numeric_df = combined_df.drop(columns=['reviewerID','reviewContent','flagged','ReviewID','restaurantID','reviewContentNew'])
X = numeric_df.drop(columns=['flagged_code','fleschReadingEase',
'fleschKincaidGrade', 'fogScale', 'smogScore', 'ARI', 'CLI',
'linsearWrite', 'daleChallScore'])
X = X.iloc[:,:].values
y = numeric_df.iloc[:,numeric_df.columns.get_loc('flagged_code')].values
# In[30]:
from imblearn.over_sampling import SMOTE
sm = SMOTE()
X, y = sm.fit_sample(X, y)
# In[31]:
#Class instances after upsampling using SMOTE
print("Classes after SMOTE : ")
print("Count of label '1' = {}".format(sum(y==1)))
print("Count of label '0' = {}".format(sum(y==0)))
# In[32]:
#Performing feature selection on numeric features using BorutaPy
from sklearn.ensemble import RandomForestClassifier
from boruta import BorutaPy
rfc = RandomForestClassifier(n_jobs=-1, class_weight=None, max_depth=7, random_state=0)
feat_selector = BorutaPy(rfc, n_estimators='auto',verbose=2, random_state=0)
feat_selector.fit(X,y)
# In[33]:
#Updating feature set for training with selected features
X = X[:,feat_selector.support_]
# In[34]:
#Split Dataset for train and test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.20,random_state = 123)
# In[35]:
#Gaussian Naive Bayes
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train,y_train)
pred_y = gnb.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gnb,10)
# In[36]:
#XGBoost without parameter Tuning
import xgboost as xgb
xgbc = xgb.XGBClassifier()
xgbc.fit(X_train,y_train)
pred_y = xgbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(xgbc,10)
# In[37]:
#Hyperparameter Tuning for XGBoost
from sklearn.model_selection import RandomizedSearchCV
parameter = {
'max_depth' : np.arange(3,10,1),
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60],
'eta' : range(1,100,1),
'subsample' : np.arange(0.5,1,0.01),
'min_child_weight': range(1,6,1),
'gamma' : [i/10.0 for i in range(0,5)]
}
rs = RandomizedSearchCV(
estimator = xgbc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[38]:
rs.cv_results_
# In[39]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Best Parameters: ")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[40]:
#XGBoost after Parameter tuning
import xgboost as xgb
xgbc = xgb.XGBClassifier(subsample = 0.8500000000000003,
n_estimators = 74,
min_child_weight = 2,
max_depth = 8,
learning_rate = 0.3,
gamma = 0.1,
eta = 54)
xgbc.fit(X_train,y_train)
pred_y = xgbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(xgbc,10)
# In[41]:
#AdaBoost without Parameter tuning
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier()
abc.fit(X_train,y_train)
pred_y = abc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(abc,10)
# In[42]:
#Hyperparameter Tuning for AdaBoost
parameter = {
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60,0.70,0.80,0.90,1.0]
}
rs = RandomizedSearchCV(
estimator = abc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[43]:
rs.cv_results_
# In[44]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Best Parameters:")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[45]:
#After parameter Tuning - AdaBoost
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier(n_estimators = 72,
learning_rate = 0.9)
abc.fit(X_train,y_train)
pred_y = abc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(abc,10)
# In[46]:
#Gradient Boosting Machine
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train,y_train)
pred_y = gbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gbc,10)
# In[47]:
#HyperParameter Tuning for Gradient Boosting Machine
parameter = {
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60,0.70,0.80,0.90,1.0],
'subsample' : np.arange(0.5,1,0.01),
'min_samples_split' : range(50,500,50),
'min_samples_leaf' : range(10,400,20),
'max_depth' : np.arange(3,10,1)
}
rs = RandomizedSearchCV(
estimator = gbc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[48]:
rs.cv_results_
# In[49]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[50]:
#After parameter Tuning Gradient Boosting Machine
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier(subsample = 0.7900000000000003,
n_estimators = 79,
min_samples_split = 300,
min_samples_leaf = 10,
max_depth = 8,
learning_rate = 0.15)
gbc.fit(X_train,y_train)
pred_y = gbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gbc,10)
# In[51]:
#Adding Readability features in the feature set
X = combined_df.drop(columns=['reviewerID','reviewContent','flagged','flagged_code','ReviewID','restaurantID',
'reviewContentNew'])
X = X.iloc[:,:].values
y = combined_df.iloc[:,combined_df.columns.get_loc('flagged_code')].values
# In[52]:
#Class instances after upsampling using SMOTE
print("Classes before SMOTE : ")
print("Count of label '1' = {}".format(sum(y==1)))
print("Count of label '0' = {}".format(sum(y==0)))
# In[53]:
#Handling Class imbalance using SMOTE
from imblearn.over_sampling import SMOTE
sm = SMOTE()
X, y = sm.fit_sample(X, y)
# In[54]:
#Class instances after upsampling using SMOTE
print("Classes after SMOTE : ")
print("Count of label '1' = {}".format(sum(y==1)))
print("Count of label '0' = {}".format(sum(y==0)))
# In[55]:
#Feature Selection using Boruta Py
from sklearn.ensemble import RandomForestClassifier
from boruta import BorutaPy
rfc = RandomForestClassifier(n_jobs=-1, class_weight=None, max_depth=7, random_state=0)
feat_selector = BorutaPy(rfc, n_estimators='auto',verbose=2, random_state=0)
feat_selector.fit(X,y)
# In[56]:
X = X[:,feat_selector.support_]
# In[57]:
#Split Dataset for train and test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.20,random_state = 123)
# In[58]:
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train,y_train)
pred_y = gnb.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gnb,10)
# In[59]:
#XGBoost before Hyperparameter Tuning
import xgboost as xgb
xgbc = xgb.XGBClassifier()
xgbc.fit(X_train,y_train)
pred_y = xgbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(xgbc,10)
# In[60]:
#Parameter Tuning
from sklearn.model_selection import RandomizedSearchCV
parameter = {
'max_depth' : np.arange(3,10,1),
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60],
'eta' : range(1,100,1),
'subsample' : np.arange(0.5,1,0.01),
'min_child_weight': range(1,6,1),
'gamma' : [i/10.0 for i in range(0,5)]
}
rs = RandomizedSearchCV(
estimator = xgbc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[61]:
rs.cv_results_
# In[62]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[63]:
#XGBoost after Hyperparameter Tuning
import xgboost as xgb
xgbc = xgb.XGBClassifier(subsample = 0.8500000000000003,
n_estimators = 74,
min_child_weight = 2,
max_depth = 8,
learning_rate = 0.3,
gamma = 0.1,
eta = 54)
xgbc.fit(X_train,y_train)
pred_y = xgbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(xgbc,10)
# In[64]:
#AdaBoost without parameter Tuning
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier()
abc.fit(X_train,y_train)
pred_y = abc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(abc,10)
# In[65]:
#Parameter Tuning - AdaBoost
parameter = {
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60,0.70,0.80,0.90,1.0]
}
rs = RandomizedSearchCV(
estimator = abc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[66]:
rs.cv_results_
# In[67]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[68]:
#AdaBoost after tuning
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier(n_estimators = 72,
learning_rate = 0.9)
abc.fit(X_train,y_train)
pred_y = abc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(abc,10)
# In[69]:
#Gradient Boosting Machine without parameter tuning
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train,y_train)
pred_y = gbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gbc,10)
# In[70]:
#Parameter Tuning for Gradient Boosting Machine
parameter = {
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60,0.70,0.80,0.90,1.0],
'subsample' : np.arange(0.5,1,0.01),
'min_samples_split' : range(50,500,50),
'min_samples_leaf' : range(10,400,20),
'max_depth' : np.arange(3,10,1)
}
rs = RandomizedSearchCV(
estimator = gbc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[71]:
rs.cv_results_
# In[72]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[73]:
#Gradient Boosting after Parameter Tuning
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier( subsample = 0.7900000000000003,
n_estimators = 79,
min_samples_split = 300,
min_samples_leaf = 10,
max_depth = 8,
learning_rate = 0.15)
gbc.fit(X_train,y_train)
pred_y = gbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gbc,10)
# In[124]:
#Bi-Grams TFIDF and numeric features Model
from sklearn.feature_extraction.text import TfidfVectorizer
tv = TfidfVectorizer(ngram_range=(2,2), analyzer='word', min_df = 5)
tv.fit(text_df['reviewContentNew'])
tfidf_bigram_df = tv.fit_transform(text_df['reviewContentNew'])
tfidf_bigram_df = tfidf_bigram_df.toarray()
tfidf_bigram_df = pd.DataFrame(tfidf_bigram_df,columns=tv.get_feature_names())
tfidf_bigram_df = tfidf_bigram_df.reset_index(drop=True)
text_df = text_df.reset_index(drop=True)
combined_bigram_df = pd.concat([text_df,tfidf_bigram_df], axis=1)
# In[125]:
combined_bigram_df.head()
# In[126]:
#Dropping unwanted columns
combined_bigram_df = combined_bigram_df.drop(columns=['reviewerID','reviewContent','flagged','restaurantID','ReviewID',
'reviewContentNew','sentimentScore'])
# In[131]:
combined_bigram_df = combined_bigram_df.abs()
# In[132]:
#Splitting target varibles and independent variables
X = combined_bigram_df.drop(columns=['flagged_code'])
X = X.iloc[:,:].values
y = combined_bigram_df.iloc[:,combined_bigram_df.columns.get_loc('flagged_code')].values
# In[133]:
X.shape
# In[134]:
y.shape
# In[135]:
#Feature selection by using Chi-Square test
#Chi Square feature selection
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
chi_feature = SelectKBest(chi2, k=4000)
X_KBest = chi_feature.fit_transform(X, y)
# In[136]:
#Splitting dataset into train and test after feature selection
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X_KBest,y,test_size = 0.20,random_state = 123)
# In[137]:
#Gaussian Naive Bayes
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train,y_train)
pred_y = gnb.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gnb,10)
# In[84]:
#XGBoost before HyperParameter tuning
import xgboost as xgb
xgbc = xgb.XGBClassifier()
xgbc.fit(X_train,y_train)
pred_y = xgbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(xgbc,10)
# In[85]:
#Parameter Tuning for XGBoost
from sklearn.model_selection import RandomizedSearchCV
parameter = {
'max_depth' : np.arange(3,10,1),
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60,0.70,0.80,0.90],
'eta' : range(1,100,1),
'subsample' : np.arange(0.5,1,0.01),
'min_child_weight': range(1,6,1),
'gamma' : [i/10.0 for i in range(0,5)]
}
rs = RandomizedSearchCV(
estimator = xgbc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[86]:
rs.cv_results_
# In[87]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[88]:
#XGBoost after parameter Tuning
import xgboost as xgb
xgbc = xgb.XGBClassifier(subsample = 0.54,
n_estimators = 43,
min_child_weight = 3,
max_depth = 5,
learning_rate = 0.1,
gamma = 0.1,
eta = 77)
xgbc.fit(X_train,y_train)
pred_y = xgbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(xgbc,10)
# In[89]:
#AdaBoost before parameter tuning
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier()
abc.fit(X_train,y_train)
pred_y = abc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(abc,10)
# In[90]:
#Parameter Tuning AdaBoost
parameter = {
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60,0.70,0.80,0.90,1.0]
}
rs = RandomizedSearchCV(
estimator = abc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[91]:
rs.cv_results_
# In[92]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[93]:
#After parameter Tuning - AdaBoost
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier(n_estimators = 77,
learning_rate = 0.3)
abc.fit(X_train,y_train)
pred_y = abc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(abc,10)
# In[94]:
#Gradient Boosting Machine before tuning
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train,y_train)
pred_y = gbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gbc,10)
# In[95]:
#Parameter Tuning GradientBoosting
parameter = {
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60,0.70,0.80,0.90,1.0],
'subsample' : np.arange(0.5,1,0.01),
'min_samples_split' : range(50,500,50),
'min_samples_leaf' : range(10,400,20),
'max_depth' : np.arange(3,10,1)
}
rs = RandomizedSearchCV(
estimator = gbc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[96]:
rs.cv_results_
# In[97]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[98]:
#Gradient Boosting Machine after tuning
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier(subsample = 0.7900000000000003,
n_estimators = 79,
min_samples_split = 300,
min_samples_leaf = 10,
max_depth = 8,
learning_rate = 0.15)
gbc.fit(X_train,y_train)
pred_y = gbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gbc,10)
# In[99]:
#TFIDF Trigram model
from sklearn.feature_extraction.text import TfidfVectorizer
tv = TfidfVectorizer(ngram_range=(3,3), analyzer='word', min_df = 2)
tv.fit(text_df['reviewContentNew'])
tfidf_trigram_df = tv.fit_transform(text_df['reviewContentNew'])
tfidf_trigram_df = tfidf_trigram_df.toarray()
tfidf_trigram_df = pd.DataFrame(tfidf_trigram_df,columns=tv.get_feature_names())
tfidf_trigram_df = tfidf_trigram_df.reset_index(drop=True)
text_df = text_df.reset_index(drop=True)
combined_trigram_df = pd.concat([text_df,tfidf_trigram_df], axis=1)
# In[100]:
#Dropping unwanted columns
combined_trigram_df = combined_trigram_df.drop(columns=['reviewerID','reviewContent','flagged','restaurantID','ReviewID',
'reviewContentNew','sentimentScore'])
# In[101]:
combined_trigram_df = combined_trigram_df.abs()
# In[102]:
#Splitting target varibles and independent variables
X = combined_trigram_df.drop(columns=['flagged_code'])
X = X.iloc[:,:].values
y = combined_trigram_df.iloc[:,combined_trigram_df.columns.get_loc('flagged_code')].values
# In[103]:
X.shape
# In[104]:
y.shape
# In[105]:
#Feature selection using Chi-Square test
#Chi Square feature selection
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
chi_feature = SelectKBest(chi2, k=3000)
X_KBest = chi_feature.fit_transform(X, y)
# In[106]:
#Splitting after feature selection
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X_KBest,y,test_size = 0.20,random_state = 123)
# In[107]:
#Gaussian Naive Bayes
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train,y_train)
pred_y = gnb.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gnb,10)
# In[108]:
#XGBoost before HyperParameter tuning
import xgboost as xgb
xgbc = xgb.XGBClassifier()
xgbc.fit(X_train,y_train)
pred_y = xgbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(xgbc,10)
# In[109]:
#Parameter Tuning - XGBoost
from sklearn.model_selection import RandomizedSearchCV
parameter = {
'max_depth' : np.arange(3,10,1),
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60],
'eta' : range(1,100,1),
'subsample' : np.arange(0.5,1,0.01),
'min_child_weight': range(1,6,1),
'gamma' : [i/10.0 for i in range(0,5)]
}
rs = RandomizedSearchCV(
estimator = xgbc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[110]:
rs.cv_results_
# In[111]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[112]:
#XGBoost After Tuning
import xgboost as xgb
xgbc = xgb.XGBClassifier(subsample = 0.8400000000000003,
n_estimators = 58,
min_child_weight = 3,
max_depth = 4,
learning_rate = 0.1,
gamma = 0.4,
eta = 83)
xgbc.fit(X_train,y_train)
pred_y = xgbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(xgbc,10)
# In[113]:
#AdaBoost
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier()
abc.fit(X_train,y_train)
pred_y = abc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(abc,10)
# In[114]:
#Parameter Tuning AdaBoost
parameter = {
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60,0.70,0.80,0.90,1.0]
}
rs = RandomizedSearchCV(
estimator = abc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[115]:
rs.cv_results_
# In[116]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[117]:
#After parameter Tuning - AdaBoost
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier(n_estimators = 98,
learning_rate = 0.6)
abc.fit(X_train,y_train)
pred_y = abc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(abc,10)
# In[118]:
#GradientBoosting
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train,y_train)
pred_y = gbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gbc,10)
# In[119]:
#Parameter Tuning Gradient Boosting Machine
parameter = {
'n_estimators' : range(1,100,1),
'learning_rate' : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30,0.50,0.60,0.70,0.80,0.90,1.0],
'subsample' : np.arange(0.5,1,0.01),
'min_samples_split' : range(50,500,50),
'min_samples_leaf' : range(10,400,20),
'max_depth' : np.arange(3,10,1)
}
rs = RandomizedSearchCV(
estimator = gbc,
param_distributions = parameter,
n_iter = 20,
scoring ='accuracy',
n_jobs=4,
verbose=10,
random_state=10
)
rs.fit(X_train,y_train)
# In[120]:
rs.cv_results_
# In[121]:
print("Best accuracy Obtained: {0}".format(rs.best_score_))
print("Parameters")
for key, value in rs.best_params_.items():
print("\t{}:{}".format(key, value))
# In[122]:
#Gradient Boosting Machine after parameter tuning
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier(subsample = 0.7000000000000002,
n_estimators = 62,
min_samples_split = 350,
min_samples_leaf = 190,
max_depth = 3,
learning_rate = 0.15)
gbc.fit(X_train,y_train)
pred_y = gbc.predict(X_test)
model_report(y_test,pred_y)
KFold_Cross_Validation(gbc,10)
# In[123]:
text_df.columns
# In[ ]:
|
class Solution(object):
def reverseString(self, s):
up = 0
down = len(s) - 1
sList = list(s)
while up <= down:
tmp = sList[up]
sList[up] = sList[down]
sList[down] = tmp
up += 1
down -= 1
return ''.join(sList)
def reverseString2(self, s):
return s[::-1]
print(Solution().reverseString("A"))
print(Solution().reverseString("AB"))
print(Solution().reverseString("ABC"))
print(Solution().reverseString("Hello World"))
print(Solution().reverseString(""))
|
from scipy import interpolate
from common import *
import csv
class dive_record_set(object):
"""
Provide an interface to retrieve a set of depth / temp records and do
stuff with them. If the given start and end datetime objects are naive,
we'll assume they're in the local time zone as defined in configuration.py
"""
def __init__(self, start_dt, end_dt, path_to_db):
if start_dt.tzinfo == None:
start_dt = make_aware_of_local_tz(start_dt)
if end_dt.tzinfo == None:
end_dt = make_aware_of_local_tz(end_dt)
self.start = start_dt
self.end = end_dt
self.db_path = path_to_db
@property
def db_rows(self):
"""
utctime field in db does not store time zone but should always be in
UTC so we'll make the start and end naive so we don't screw up the
comparison.
"""
# stdt = self.start.astimezone(pytz.utc).replace(tzinfo=None)
# endt = self.end.astimezone(pytz.utc).replace(tzinfo=None)
t = ( self.start.astimezone(pytz.utc), self.end.astimezone(pytz.utc), )
conn,cur = connection_and_cursor(self.db_path)
rows = cur.execute( "SELECT utctime, celsius, depthm FROM DepthTempLog WHERE utctime >= ? AND utctime <= ?", t ).fetchall()
cur.close()
return rows
@property
def unique_db_files(self):
"""
Get the file numbers of the depth logs that contain data associated
with the current set of photos. If we want to time shift a set of depth
records, we need this method to only shift the records associated with
the current photos. Otherwise, we'd shift all the depth records in the
db.
"""
t = ( self.start.astimezone(pytz.utc), self.end.astimezone(pytz.utc), )
conn,cur = connection_and_cursor(self.db_path)
rows = cur.execute( "SELECT DISTINCT file FROM DepthTempLog WHERE utctime >= ? AND utctime <= ?", t ).fetchall()
cur.close()
filenums = [ int(r[0]) for r in rows ]
return tuple(filenums)
@property
def file_device_pairs(self):
q = "select distinct file, device from depthtemplog where file in " + \
"(select distinct file from depthtemplog where utctime >= ? and utctime <= ? );"
t = ( self.start.astimezone(pytz.utc), self.end.astimezone(pytz.utc), )
conn,cur = connection_and_cursor(self.db_path)
rows = cur.execute( q, t ).fetchall()
cur.close()
return rows
def time_shift_array(self,t_secs):
tstr = str(t_secs) + " seconds"
fdps = np.array( self.file_device_pairs )
tarr = np.repeat( tstr, fdps.shape[0] )
#return tarr,fdps
return np.hstack( (np.expand_dims(tarr,1),fdps) )
def shift_depth_records(self,t_secs):
tsarr = self.time_shift_array(t_secs)
q = "update depthtemplog set utctime=datetime(utctime, +?) where file = ? and device = ?"
conn,cur = connection_and_cursor(self.db_path)
cur.executemany( q, tsarr ).fetchall()
conn.commit()
cur.close()
@property
def depth_time_list(self):
"""
datetimes come out of the db in UTC. Convert to local tz before returning.
"""
return [ ( float(r[2]), local_from_utc( dt_parser.parse(r[0]) ) ) for r in self.db_rows]
@property
def depth_time_array(self):
return np.array(self.depth_time_list)
def plot_depth_time(self):
y = -1 * self.depth_time_array[:,0] # depths * -1 to make negative values
x = self.depth_time_array[:,1] # datetimes
plt.plot_date(x,y,linestyle='-')
plt.show()
@property
def temperature_time_list(self):
"""
datetimes come out of the db in UTC. Convert to local tz before returning.
"""
return [ ( float(r[1]), local_from_utc( dt_parser.parse(r[0]) ) ) for r in self.db_rows]
@property
def temperature_time_array(self):
return np.array(self.temperature_time_list)
@property
def time_delta(self):
return self.end - self.start
def depth_from_pressure(mbars):
"""Return a depth (in meters) from a pressure in millibars. Calculated using
1 atm = 1013.25 millibar and assuming 1 atm for every 9.9908 meters of sea
water. I'm also assuming that we're diving at sea level and that the ambient
presure is 1atm. """
return (mbars - 1013.25) / 101.41830484045322
def read_depth_temp_log(filepath,path_to_db,verbose=False):
"""Read in a single depth / temp csv file into a sqlite db for persistence
and easy searching. Records must have a unique combination of device identifier,
file number, and datetime stamp. If a conflict is found, the old record will be
overwritten by the new one. This should insure that duplicates will not be
created if a csv file is loaded in multiple times."""
# Connect to the db
conn,cur = connection_and_cursor(path_to_db)
# Make sure the table is there
cur.execute("create table if not exists DepthTempLog ( device text, file integer, utctime datetime, kelvin real, celsius real, mbar integer, depthm real, UNIQUE (device, file, utctime) ON CONFLICT REPLACE)")
# Read the csv file
if verbose:
print "About to read %s" % os.path.basename(filepath)
reader = csv.reader(open(filepath,'rb'),delimiter=',')
rec_count = 0
for row in reader:
device = row[1]
file_id = int(row[2])
# put the date and time in a datetime object so it can be manipulated
start_time = dt(int(row[3]),int(row[4]),int(row[5]),int(row[6]),int(row[7]),int(row[8]))
# The time comes in as local time. I don't want conflicts with gps utc
# time so I will store everything as utc time. This is annoying in sqlite
# it would be better to use Postgresql but I want to keep this small
# and reduce the difficulty of installation.
time_offset = td(seconds=float(row[9]))
record_time = make_aware_of_local_tz( start_time + time_offset )
# If I store this as timezone aware, then I have trouble parsing the
# times I pull out of the db. So I will store unaware by taking out tzinfo.
utc_time = utc_from_local(record_time).replace(tzinfo=None)
mbar = int(row[10])
kelvin = float(row[11])
celsius = kelvin - 273.15
depthm = depth_from_pressure(mbar)
t = (device,file_id,utc_time,kelvin,celsius,mbar,depthm)
if verbose:
print "--- Just read row %i, putting it in db now." % rec_count
# stick it in the table
cur.execute("insert into DepthTempLog values (?,?,?,?,?,?,?)", t)
rec_count += 1
conn.commit()
cur.close()
return "Read %i records from %s to %s." % (rec_count,os.path.basename(filepath),os.path.basename(path_to_db))
def interpolate_depth(t_secs,t1_secs,t2_secs,d1m,d2m):
"""Given depth d1m at time t1_secs and depth d2m at time t2_secs, interpolate
to find the depth at time t_secs."""
#print "t_secs=%f;t1_secs=%f;t2_secs=%f;d1m=%f;d2m=%f" % (t_secs,t1_secs,t2_secs,d1m,d2m)
# the order of arguements matters
d = { float(t1_secs):d1m, float(t2_secs):d2m}
x = np.array([min(d.keys()),max(d.keys())])
y = np.array([ d[min(d.keys())], d[max(d.keys())] ])
f = interpolate.interp1d(x,y)
return float( f( float(t_secs) ) )
def seconds_since_arbitrary( dt_obj, arbitrary_ordinal=1 ):
"""
Return seconds since an arbitrary date.
"""
return float( ( dt_obj - dt.fromordinal( arbitrary_ordinal ) ).seconds )
def get_depth_for_time(dt_obj, db_path, verbose=False, reject_threshold=30):
"""For a given datetime object, return the depth from the raw_log db. Go through the
extra hassle of interpolating the depth if the time falls between two depth measurements.
If a record is not found within the number of seconds specified by reject_threshold,
just return False."""
# Connect to the db
conn,cur = connection_and_cursor(db_path)
# For some reason TZ awareness screws up DST
dt_obj = dt_obj.replace(tzinfo=None)
# make a tuple with the time handed in so we can pass it to the query
t = ( dt_obj, )
rows = cur.execute("select utctime, depthm from DepthTempLog order by abs( strftime('%s',?) - strftime('%s',utctime) ) LIMIT 2", t).fetchall()
t1 = dt.strptime(rows[0][0],'%Y-%m-%d %H:%M:%S')
t1_secs = seconds_since_arbitrary( t1 )
t2 = dt.strptime(rows[1][0],'%Y-%m-%d %H:%M:%S')
t2_secs = seconds_since_arbitrary( t2 )
d1m = rows[0][1]
d2m = rows[1][1]
# Clean up
cur.close()
conn.close()
# It is possible that the two closest time stamps do not sandwich our given
# time (dt_obj). They could both be before or after. By putting the times in
# an array, we can easily get the min and max time so we can check.
times = np.array( [t1_secs,t2_secs] )
dt_obj_secs = seconds_since_arbitrary( dt_obj ) + dt_obj.microsecond * 1E-6
# if the closest available time stamp is further away than our threshold
# then we will return False
if verbose:
print "Min: %i Given: %.3f Max: %i" % (times.min(),dt_obj_secs,times.max())
if ( abs(times.min() - dt_obj_secs) > reject_threshold ):
if verbose:
print "Target time: %s, %s seconds, Closest time: %s, %s seconds, 2nd Closest: %s,%s seconds" % ( dt_obj.strftime('%Y-%m-%d %H:%M:%S'),dt_obj.strftime('%s'),t1.strftime('%Y-%m-%d %H:%M:%S'),t1.strftime('%s'),t2.strftime('%Y-%m-%d %H:%M:%S'),t2.strftime('%s') )
return None
elif times.min() < dt_obj_secs < times.max(): # if dt_obj is between the two closest times, interpolate the depth
return interpolate_depth( dt_obj_secs, t1_secs, t2_secs, d1m, d2m )
else: # just return the closest depth if our given time is not between the two closest logged times
return d1m
def get_temp_for_time(dt_obj, db_path, reject_threshold=30):
"""Get a temperature in Celsius for a given time if there is a record within the
number of seconds specified by reject_threshold. If there's no record that close,
return False. I'm not going to bother with interpolation here because I don't
expect temperature to change that quickly relative to the sampling interval."""
conn,cur = connection_and_cursor(db_path)
t = ( dt_obj,dt_obj )
result = cur.execute("select abs(strftime('%s',?) - strftime('%s',utctime) ), celsius from DepthTempLog order by abs( strftime('%s',?) - strftime('%s',utctime) ) LIMIT 1", t).fetchone()
time_diff = result[0]
celsius = result[1]
if time_diff > reject_threshold:
return None
else:
return celsius
def adjust_all_times(time_delta, db_path):
"""
This will shift all the times of all the records. You probably don't wan to do
this but I did want to once. I actually did it directly in the db so I have
never tested this method but I figured I might want it some day.
"""
conn,cur = connection_and_cursor(db_path)
t = (time_delta.total_seconds(),)
cur.execute("update DepthTempLog set utctime=datetime(utctime,+?)", t)
conn.commit()
cur.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Import a depth/temperature csv file into the database.')
parser.add_argument('input_path', type=str, help='The directory of csv files or the individual file that you want to import.')
parser.add_argument('output_db', nargs='?', type=str, help='The database you would like to read the log into. If left blank, the db specified in configuration.py will be used.', default=db_path)
args = parser.parse_args()
if os.path.isdir(args.input_path): # this means a directory has been handed in
for fname in os.listdir(args.input_path):
read_depth_temp_log(os.path.join(args.input_path,fname),args.output_db)
else:
read_depth_temp_log(args.input_path,args.output_db)
|
"""
Generate some stats data so that if we run mypaas.stats locally,
we have some data to look at, even if it's fake :)
"""
import os
import time
import random
import datetime
from mypaas.stats import Monitor
def generate_test_data(filename, ndays=10):
"""Generate test data to test the get_data() and website."""
utc = datetime.timezone.utc
today = time.gmtime() # UTC
today = datetime.datetime(today.tm_year, today.tm_mon, today.tm_mday, tzinfo=utc)
first_day = today - datetime.timedelta(days=ndays)
one_day = datetime.timedelta(days=1)
step = 600 # default 10 min
# Refuse if log db exists
if os.path.isfile(filename):
os.remove(filename)
monitor = Monitor(filename, step=step)
visitor_ids = set(range(random.randint(10000, 80000)))
# Produce data
day = first_day
monitor._monthly_ids = {}
while day < today:
day += one_day
print("Generating for", day)
monitor._daily_ids = {}
for b in range(int(86400 / step)):
with monitor:
# Generate some request data
for i in range(random.randint(1000, 2000)):
monitor.put("requests|count", 1)
for i in range(random.randint(300, 1200)):
monitor.put("views|count", 1)
for i in random.sample(visitor_ids, random.randint(10, 80)):
monitor.put("visits|dcount", i)
monitor.put("visits|mcount", i)
# Generate some random OS and status data
for i in range(random.randint(5, 30)):
osname = random.choice(["Windows", "Windows", "Linux", "OS X"])
browsername = random.choice(
["FF", "FF", "Chrome", "Edge", "Safari"]
)
monitor.put("browser|cat", browsername + " - " + osname)
# Generate some cpu and mem data
for i in range(random.randint(5, 30)):
monitor.put("cpu|num|perc", random.randint(10, 70))
for i in range(random.randint(5, 30)):
monitor.put("mem|num|iB", random.randint(2 * 2**30, 8 * 2**30))
# Write!
aggr = monitor._next_aggr()
t = int(day.timestamp() + b * step)
key = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(t))
aggr["time_key"] = key
aggr["time_start"] = t
aggr["time_stop"] = t + step
monitor._write_aggr(aggr)
if __name__ == "__main__":
generate_test_data(os.path.expanduser("~/_stats/exampledata.db"))
|
from __future__ import print_function
import numpy as np
import tensorflow as tf
import argparse
import time
import os
from six.moves import cPickle
from model import Model
from utils import TextLoader,NumpyLoader
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='save',
help='model directory to store checkpointed models')
parser.add_argument('--data_dir', type=str, default='save',
help='data_directory')
parser.add_argument('-n', type=int, default=500,
help='number of characters to sample')
parser.add_argument('--seq_length', type=int, default=500,
help='sequence_length')
parser.add_argument('--prime', type=str, default=' ',
help='prime text')
parser.add_argument('--batch_size', type=int, default=50,
help='when used in conjunction with (--prime_from_file) compute samples'+\
'in batches of this size')
parser.add_argument('--num_leading_chars_range', type=int, default=-1,
help='when used in conjunction with (--prime_from_file) makes num_leading_chars' + \
'a univorm RV (spanning num_leading_chars +/- num_leading_chars_range ) ')
parser.add_argument('--force_load_from_savedir', action='store_true',
help='continue training from checkpoint')
parser.add_argument('--prime_from_file', type=str, default=None,
help='if defined, will draw samples using primes in a file')
parser.add_argument('--num_leading_chars', type=int, default=130,
help='(used in conjunction with --prime_from_file) how many characters of the '+\
'real conversation to prime the rnn with')
args = parser.parse_args()
sample(args)
def sample(args):
print("V1")
with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
saved_args = cPickle.load(f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
chars, vocab = cPickle.load(f)
print("LOADING MODEL")
if args.num_leading_chars_range>0:
assert args.num_leading_chars - args.num_leading_chars_range>0, RuntimeError("Must not allow negative prime lengths")
saved_args.seq_length = args.seq_length
if args.prime_from_file is None:
saved_args.batch_size=1
saved_args.batch_size=1
print("ARGS ARE "+str(saved_args))
model = Model(saved_args, True)
print("READY TO GO")
with tf.Session() as sess:
tf.initialize_all_variables().run()
print("INITIALIZED")
saver = tf.train.Saver(tf.all_variables())
print("SAVER INITIALIZED")
ckpt = tf.train.get_checkpoint_state(args.save_dir)
print("CHECKPOINT LOADED, SAVE_DIR="+args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt.model_checkpoint_path = os.path.join(args.save_dir,ckpt.model_checkpoint_path.split('/')[-1])
print("Loading checkpoint "+ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
print("SESSION RESTORED")
from evaluator import EvaluatorDataLoader
data_loader = EvaluatorDataLoader(args.data_dir, 2, args.seq_length, use_generated_real_pairs=True,
use_generated_coments_in_datadir=False)
for row in data_loader.real_comments_from_generated_file[600:]:
print(model.get_probs(sess,row))
break
if args.prime_from_file is None:
print(repr(model.sample(sess=sess, chars=chars, vocab=vocab, num=args.n, prime=args.prime)))
else:
data_loader = NumpyLoader(args.prime_from_file, args.batch_size, 500)
data_loader.reset_batch_pointer()
to_save =np.zeros((2*(data_loader.num_batches-1)*data_loader.batch_size, 500+1),dtype='uint8')
print("TO_SAVE shape "+str(to_save.shape))
try:
ioff=0
for i in xrange(data_loader.num_batches-1):
i=i-ioff
n = args.num_leading_chars
primes = data_loader.next_batch(return_y=False)
if args.num_leading_chars_range>0:
n = int(np.random.uniform(n-args.num_leading_chars_range, n+
args.num_leading_chars_range+1))
_primes=primes[:,:n]
print("PRIMES SHAPE"+str(primes.shape))
now=time.time()
try:
generated = model.sample_many(sess=sess,
chars=chars,
primes=_primes,
num=500)
except Exception,e:
print ('EXCPEPTION! '+str(e))
ioff+=1
continue
sc = [chars[int(p)] for p in generated[0]]
sc.insert(n, '<GENERATE:>')
sc = ''.join(sc).replace(chr(0), '-->').replace(chr(1), '').replace(chr(2), '')
actual = primes[0]
actual = [chars[int(p)] for p in actual]
actual=''.join(actual).replace(chr(0), '-->').replace(chr(1), '').replace(chr(2), '')
print("SANITY CHECK! Length %d prime, yields '%s' " % (_primes.shape[1], sc))
print("SANITY CHECK was actually '%s' "%actual)
print('%d of %d batches completed (%.3f seconds per)'%(i,data_loader.num_batches,time.time()-now))
to_save[2*i*data_loader.batch_size:(2*i+1)*data_loader.batch_size,0]=0
to_save[2*i*data_loader.batch_size:(2*i+1)*data_loader.batch_size,1:]=generated
to_save[(2*i+1)*data_loader.batch_size:(2*i+2)*data_loader.batch_size,0]=1
to_save[(2*i+1)*data_loader.batch_size:(2*i+2)*data_loader.batch_size,1:]=primes
finally:
to_save = to_save[:(2*i+2)*data_loader.batch_size]
print("Saving %d generated/ungenerated pairs to %s"%(to_save.shape[0]/2,os.path.join(args.save_dir,'generated.npy')))
np.save(os.path.join(args.save_dir,'generated.npy'),to_save)
if __name__ == '__main__':
main()
|
from sympy.ntheory import totient
limit = 1000001
solution = 0
print(sum(totient(n) for n in range(2, limit))) |
# coding=utf-8
import random
import shutil
import sys
import tempfile
import unittest
from threading import Thread
import uuid
from persistqueue.sqlackqueue import (
SQLiteAckQueue,
FILOSQLiteAckQueue,
UniqueAckQ,
)
from persistqueue import Empty
class SQLite3AckQueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlackqueue')
self.auto_commit = True
self.queue_class = SQLiteAckQueue
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_raise_empty(self):
q = self.queue_class(self.path, auto_commit=self.auto_commit)
q.put('first')
d = q.get()
self.assertEqual('first', d)
self.assertRaises(Empty, q.get, block=False)
# assert with timeout
self.assertRaises(Empty, q.get, block=True, timeout=1.0)
# assert with negative timeout
self.assertRaises(ValueError, q.get, block=True, timeout=-1.0)
def test_empty(self):
q = self.queue_class(self.path, auto_commit=self.auto_commit)
self.assertEqual(q.empty(), True)
q.put('first')
self.assertEqual(q.empty(), False)
q.get()
self.assertEqual(q.empty(), True)
def test_full(self):
# SQL queue `full()` always returns `False` !!
q = self.queue_class(self.path, auto_commit=self.auto_commit)
self.assertEqual(q.full(), False)
q.put('first')
self.assertEqual(q.full(), False)
q.get()
self.assertEqual(q.full(), False)
def test_open_close_single(self):
"""Write 1 item, close, reopen checking if same item is there"""
q = self.queue_class(self.path, auto_commit=self.auto_commit)
q.put(b'var1')
del q
q = self.queue_class(self.path)
self.assertEqual(1, q.qsize())
self.assertEqual(b'var1', q.get())
def test_open_close_1000(self):
"""Write 1000 items, close, reopen checking if all items are there"""
q = self.queue_class(self.path, auto_commit=self.auto_commit)
for i in range(1000):
q.put('var%d' % i)
self.assertEqual(1000, q.qsize())
del q
q = self.queue_class(self.path)
self.assertEqual(1000, q.qsize())
for i in range(1000):
data = q.get()
self.assertEqual('var%d' % i, data)
# assert adding another one still works
q.put('foobar')
data = q.get()
q.shrink_disk_usage()
self.assertEqual('foobar', data)
def test_random_read_write(self):
"""Test random read/write"""
q = self.queue_class(self.path, auto_commit=self.auto_commit)
n = 0
for _ in range(1000):
if random.random() < 0.5:
if n > 0:
q.get()
n -= 1
else:
self.assertRaises(Empty, q.get, block=False)
else:
# UniqueQueue will block at get() if this is not unique
# uuid.uuid4() should be unique
q.put('var%s' % uuid.uuid4())
n += 1
def test_multi_threaded_parallel(self):
"""Create consumer and producer threads, check parallelism"""
# self.skipTest("Not supported multi-thread.")
m_queue = self.queue_class(
path=self.path, multithreading=True, auto_commit=self.auto_commit
)
def producer():
for i in range(1000):
m_queue.put('var%d' % i)
def consumer():
for i in range(1000):
x = m_queue.get(block=True)
self.assertEqual('var%d' % i, x)
c = Thread(target=consumer)
c.start()
p = Thread(target=producer)
p.start()
p.join()
c.join()
self.assertEqual(0, m_queue.size)
self.assertEqual(0, len(m_queue))
self.assertRaises(Empty, m_queue.get, block=False)
def test_multi_threaded_multi_producer(self):
"""Test sqlqueue can be used by multiple producers."""
queue = self.queue_class(
path=self.path, multithreading=True, auto_commit=self.auto_commit
)
def producer(seq):
for i in range(10):
queue.put('var%d' % (i + (seq * 10)))
def consumer():
for _ in range(100):
data = queue.get(block=True)
self.assertTrue('var' in data)
c = Thread(target=consumer)
c.start()
producers = []
for seq in range(10):
t = Thread(target=producer, args=(seq,))
t.start()
producers.append(t)
for t in producers:
t.join()
c.join()
def test_multiple_consumers(self):
"""Test sqlqueue can be used by multiple consumers."""
queue = self.queue_class(
path=self.path, multithreading=True, auto_commit=self.auto_commit
)
def producer():
for x in range(1000):
queue.put('var%d' % x)
counter = []
# Set all to 0
for _ in range(1000):
counter.append(0)
def consumer(index):
for i in range(200):
data = queue.get(block=True)
self.assertTrue('var' in data)
counter[index * 200 + i] = data
p = Thread(target=producer)
p.start()
consumers = []
for index in range(5):
t = Thread(target=consumer, args=(index,))
t.start()
consumers.append(t)
p.join()
for t in consumers:
t.join()
self.assertEqual(0, queue.qsize())
for x in range(1000):
self.assertNotEqual(
0, counter[x], "not 0 for counter's index %s" % x
)
def test_protocol_1(self):
shutil.rmtree(self.path, ignore_errors=True)
q = self.queue_class(path=self.path)
self.assertEqual(
q._serializer.protocol, 2 if sys.version_info[0] == 2 else 4
)
def test_protocol_2(self):
q = self.queue_class(path=self.path)
self.assertEqual(
q._serializer.protocol, 2 if sys.version_info[0] == 2 else 4
)
def test_ack_and_clear(self):
q = self.queue_class(path=self.path)
ret_list = []
for _ in range(100):
q.put("val%s" % _)
for _ in range(100):
ret_list.append(q.get())
for ret in ret_list:
q.ack(ret)
self.assertEqual(q.acked_count(), 100)
q.clear_acked_data(keep_latest=10)
self.assertEqual(q.acked_count(), 10)
q.shrink_disk_usage()
def test_ack_unknown_item(self):
q = self.queue_class(path=self.path)
q.put("val1")
val1 = q.get()
q.ack("val2")
q.nack("val3")
q.ack_failed("val4")
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 1)
q.ack(val1)
self.assertEqual(q.unack_count(), 0)
def test_resume_unack(self):
q = self.queue_class(path=self.path)
q.put("val1")
val1 = q.get()
self.assertEqual(q.empty(), True)
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 1)
self.assertEqual(q.ready_count(), 0)
del q
q = self.queue_class(path=self.path, auto_resume=False)
self.assertEqual(q.empty(), True)
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 1)
self.assertEqual(q.ready_count(), 0)
q.resume_unack_tasks()
self.assertEqual(q.empty(), False)
self.assertEqual(q.qsize(), 1)
self.assertEqual(q.unack_count(), 0)
self.assertEqual(q.ready_count(), 1)
self.assertEqual(val1, q.get())
del q
q = self.queue_class(path=self.path, auto_resume=True)
self.assertEqual(q.empty(), False)
self.assertEqual(q.qsize(), 1)
self.assertEqual(q.unack_count(), 0)
self.assertEqual(q.ready_count(), 1)
self.assertEqual(val1, q.get())
def test_ack_unack_ack_failed(self):
q = self.queue_class(path=self.path)
q.put("val1")
q.put("val2")
q.put("val3")
val1 = q.get()
val2 = q.get()
val3 = q.get()
# qsize should be zero when all item is getted from q
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 3)
# active size should be equal to qsize + unack_count
self.assertEqual(q.active_size(), 3)
# nack will let the item requeued as ready status
q.nack(val1)
self.assertEqual(q.qsize(), 1)
self.assertEqual(q.ready_count(), 1)
# ack failed is just mark item as ack failed
q.ack_failed(val3)
self.assertEqual(q.ack_failed_count(), 1)
# ack should not effect qsize
q.ack(val2)
self.assertEqual(q.acked_count(), 1)
self.assertEqual(q.qsize(), 1)
# all ack* related action will reduce unack count
self.assertEqual(q.unack_count(), 0)
# reget the nacked item
ready_val = q.get()
self.assertEqual(ready_val, val1)
q.ack(ready_val)
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.acked_count(), 2)
self.assertEqual(q.ready_count(), 0)
def test_put_0(self):
q = self.queue_class(path=self.path)
q.put(0)
d = q.get(block=False)
self.assertIsNotNone(d)
def test_get_id(self):
q = self.queue_class(path=self.path)
q.put("val1")
val2_id = q.put("val2")
q.put("val3")
item = q.get(id=val2_id)
# item id should be 2
self.assertEqual(val2_id, 2)
# item should get val2
self.assertEqual(item, 'val2')
def test_get_next_in_order(self):
q = self.queue_class(path=self.path)
val1_id = q.put("val1")
q.put("val2")
q.put("val3")
item = q.get(id=val1_id, next_in_order=True)
# item id should be 1
self.assertEqual(val1_id, 1)
# item should get val2
self.assertEqual(item, 'val2')
q.nack(item)
# queue should roll over to begining if next > end
item = q.get(id=3, next_in_order=True, raw=True)
q.nack(item)
self.assertEqual(item.get("pqid"), 1)
def test_get_raw(self):
q = self.queue_class(path=self.path)
q.put("val1")
item = q.get(raw=True)
q.nack(item)
# item should get val2
self.assertEqual(True, "pqid" in item)
self.assertEqual(item.get("data"), 'val1')
def test_nack_raw(self):
q = self.queue_class(path=self.path)
q.put("val1")
item = q.get(raw=True)
# nack a raw return
q.nack(item)
# size should be 1 after nack
self.assertEqual(q.qsize(), 1)
def test_ack_active_size(self):
q = self.queue_class(path=self.path)
q.put("val1")
item = q.get(raw=True)
# active_size should be 1 as it hasn't been acked
self.assertEqual(q.active_size(), 1)
q.ack(item)
# active_size should be 0 after ack
self.assertEqual(q.active_size(), 0)
def test_queue(self):
q = self.queue_class(path=self.path)
q.put("val1")
q.put("val2")
q.put("val3")
# queue should get the three items
d = q.queue()
self.assertEqual(len(d), 3)
self.assertEqual(d[1].get("data"), "val2")
def test_update(self):
q = self.queue_class(path=self.path)
qid = q.put("val1")
q.update(id=qid, item="val2")
item = q.get(id=qid)
q.nack(item)
self.assertEqual(item, "val2")
class SQLite3QueueInMemory(SQLite3AckQueueTest):
def setUp(self):
self.path = ":memory:"
self.auto_commit = True
self.queue_class = SQLiteAckQueue
def test_open_close_1000(self):
self.skipTest('Memory based sqlite is not persistent.')
def test_open_close_single(self):
self.skipTest('Memory based sqlite is not persistent.')
def test_multiple_consumers(self):
self.skipTest(
'Skipped due to occasional crash during multithreading mode.'
)
def test_multi_threaded_multi_producer(self):
self.skipTest(
'Skipped due to occasional crash during multithreading mode.'
)
def test_multi_threaded_parallel(self):
self.skipTest(
'Skipped due to occasional crash during multithreading mode.'
)
def test_task_done_with_restart(self):
self.skipTest('Skipped due to not persistent.')
def test_protocol_2(self):
self.skipTest('In memory queue is always new.')
def test_resume_unack(self):
self.skipTest('Memory based sqlite is not persistent.')
class FILOSQLite3AckQueueTest(SQLite3AckQueueTest):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='filo_sqlackqueue')
self.auto_commit = True
self.queue_class = FILOSQLiteAckQueue
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_open_close_1000(self):
"""Write 1000 items, close, reopen checking if all items are there"""
q = self.queue_class(self.path, auto_commit=self.auto_commit)
for i in range(1000):
q.put('var%d' % i)
self.assertEqual(1000, q.qsize())
del q
q = self.queue_class(self.path)
self.assertEqual(1000, q.qsize())
for i in range(1000):
data = q.get()
self.assertEqual('var%d' % (999 - i), data)
# assert adding another one still works
q.put('foobar')
data = q.get()
q.nack(data)
self.assertEqual('foobar', data)
def test_multi_threaded_parallel(self):
"""Create consumer and producer threads, check parallelism"""
# self.skipTest("Not supported multi-thread.")
m_queue = self.queue_class(
path=self.path, multithreading=True, auto_commit=self.auto_commit
)
def producer():
for i in range(1000):
m_queue.put('var%d' % i)
def consumer():
# We cannot quarantee what next number will be like in FIFO
for _ in range(1000):
x = m_queue.get(block=True)
self.assertTrue('var' in x)
c = Thread(target=consumer)
c.start()
p = Thread(target=producer)
p.start()
p.join()
c.join()
self.assertEqual(0, m_queue.size)
self.assertEqual(0, len(m_queue))
self.assertRaises(Empty, m_queue.get, block=False)
def test_get_next_in_order(self):
q = self.queue_class(path=self.path)
val1_id = q.put("val1")
q.put("val2")
q.put("val3")
item = q.get(id=val1_id, next_in_order=True)
q.nack(item)
# item id should be 1
self.assertEqual(val1_id, 1)
# item should get val2
self.assertEqual(item, 'val3')
# queue should roll over to end if next < begining
item = q.get(id=1, next_in_order=True, raw=True)
q.nack(item)
self.assertEqual(item.get("pqid"), 3)
# Note
# We have to be carefull to avoid test cases from SQLite3AckQueueTest having
# duplicate values in their q.put()'s. This could block the test indefinitely
class SQLite3UniqueAckQueueTest(SQLite3AckQueueTest):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlackqueue')
self.auto_commit = True
self.queue_class = UniqueAckQ
def test_add_duplicate_item(self):
q = self.queue_class(self.path)
q.put(1111)
self.assertEqual(1, q.size)
# put duplicate item
q.put(1111)
self.assertEqual(1, q.size)
q.put(2222)
self.assertEqual(2, q.size)
del q
q = self.queue_class(self.path)
self.assertEqual(2, q.size)
|
#File Name:- Disk_Check.py
#Service Name:- Disk size
#Purpose: To return the status of Disk Check Qualification criteria.
#Author Name: Roy Bright
#Create Date: 2/Apr/2018
#Modifed By:- Roy Bright
#Last Modify Date: 2/Apr/2019
#Current Version: 1.1
#Summary of Last Change: N/A
#Arguments: Drive/File system name and Min Size of disk required.
import socket, ctypes, platform, os, sys, win32api
def Fun_GetFreeDisk(dirname,size):
from Get_Platform import Fun_platform
# print dirname
# print size
oss=Fun_platform(socket.gethostname()) #Calling Platform function to get OS name.
# print ("OS is ", oss)
#Return folder/drive free space (in megabytes).
if oss == "Windows": #Enter Windows OS block
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\000')[:-1] #Fetch list of available drives in the server.
# print(drives)
flag=0
for i in drives: #Check if passed drive name is present in the server.
# i=i.replace('\\','')
# print(i)
if dirname not in i:
flag = 1
else:
flag = 0
break
if flag == 0:
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(dirname), None, None, ctypes.pointer(free_bytes)) #Calculating Free Disk space of the Drive.
f= round(((free_bytes.value / 1024) / 1024) /1024)
# print(f)
if int(f) >= int(size):
return [f, 0]
else:
return [f, 1]
else:
# print('in lese')
return ["DNF", 1] #Return if Drive Not Found (DNF)
else: #Enter Linux OS Block
# print("Linux block invoked")
import commands
mount = commands.getoutput('ls /') #Fetching list of File Systems on the server.
print(mount)
if dirname in mount: #Check if passed FS name is present in the server
fs = "/" + dirname
#print(fs)
st = os.statvfs(fs)
ldisk = int(round((st.f_bavail * st.f_frsize / 1024 / 1024) / 1024)) #Calculating Free Disk space of the Drive.
# print(fs, "disk size is ", ldisk, "and passed size is ", size)
if ldisk >= size:
return ldisk,0
else:
return ldisk,1
else:
return "DNF", 1
# disk= Fun_GetFreeDisk("C:",500)
# print disk
# if disk[1] == 0:
# print("Disk Check Passed. Free disk space is ", int(disk[0]))
# elif disk[0] == "DNF":
# print("File System not found in the System. ")
# else:
# print("Disk Check Failed. Free disk space is: ", int(disk[0])) |
import gen_strat
import strategy
import util
#util.saveProcessedFromYahoo.download = False
#where = gen_strat.historical()
strategy.multi("history")
|
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
import pkgconfig
flags = [
'-std=gnu++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'./ClangCompleter',
'-pthread',
'-DLINUX=1',
'-DDEBUG=1',
'-D_DEBUG=1',
'-DDONT_SET_USING_JUCE_NAMESPACE=1',
'-DJUCER_LINUX_MAKE_6D53C8B4=1',
'-DJUCE_APP_VERSION=0.0.8.10',
'-DJUCE_APP_VERSION_HEX=0x80a',
'-lcrypto',
'-ldl',
'-lpthread',
'-lrt',
'-I./JuceLibraryCode',
'-I./deps/JUCE/modules',
]
pkgConfigFlags = [
'NetworkManager',
'libnm-glib',
'alsa',
'freetype2',
'libssl',
'gio-2.0',
'x11',
'xext',
'xinerama'
]
for pflag in pkgConfigFlags:
flagList = pkgconfig.cflags(pflag).split(' ')
for f in flagList:
flags.append(f)
for dirname, subdirList, fileList in os.walk('./Source'):
flags.append('-I'+dirname)
for dirname, subdirList, fileList in os.walk('./Tests'):
flags.append('-I'+dirname)
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def FlagsForFile(filename, **kwargs):
return {
'flags': flags,
'include_paths_relative_to_dir': DirectoryOfThisScript()
}
|
import pytest
from ethereum.tools.tester import TransactionFailed
def test_submit_block_valid_key_should_succeed(ethtester, testlang):
submitter = testlang.accounts[0]
assert testlang.root_chain.nextChildBlock() == 1000
blknum = testlang.submit_block([], submitter)
block_info = testlang.root_chain.blocks(1000)
assert block_info[0] == testlang.child_chain.get_block(blknum).root
assert block_info[1] == ethtester.chain.head_state.timestamp
assert testlang.root_chain.nextChildBlock() == 2000
def test_submit_block_invalid_key_should_fail(testlang):
submitter = testlang.accounts[1]
with pytest.raises(TransactionFailed):
testlang.submit_block([], submitter)
|
"""src URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.views.generic.dates import ArchiveIndexView,DateDetailView
from newapp.views import (AsteroideYearArchiveView,
AsteroideMonthArchiveView,
AsteroideWeekArchiveView,
AsteroideDayArchiveView,
AsteroideTodayArchiveView,
AsteroideDetailView,
)
from newapp.models import Asteroide
urlpatterns = [
url(r'^admin/', admin.site.urls),
#url(r'^(?P<slug>[-\w]+)/$', AsteroideDetailView.as_view(), name='asteroide-detail'),
url(r'^$',AsteroideTodayArchiveView.as_view(),name="index"),
url(r'^archive/$',ArchiveIndexView.as_view(model=Asteroide, date_field="fecha"),name="asteroide_archive"),
url(r'^(?P<year>[0-9]{4})/$',AsteroideYearArchiveView.as_view(),name="asteroide_year_archive"),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]+)/$',AsteroideMonthArchiveView.as_view(month_format='%m'),name="archive_month_numeric"),
url(r'^(?P<year>[0-9]{4})/week/(?P<week>[0-9]+)/$',AsteroideWeekArchiveView.as_view(),name="archive_week"),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]+)/(?P<day>[0-9]+)/$',AsteroideDayArchiveView.as_view(month_format='%m'),name="archive_day"),
url(r'^today/$',AsteroideTodayArchiveView.as_view(),name="archive_today"),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]+)/(?P<day>[0-9]+)/(?P<pk>[0-9]+)/$',DateDetailView.as_view(model=Asteroide,month_format='%m', date_field="fecha"),name="archive_date_detail"),
]
|
import random,string
field = string.letters
def getletters():
return ''.join(random.sample(field,4))
def connection():
return '-'.join(getletters() for i in range(4))
def generate(n):
for i in range(n):
yield connection()
print(generate(20))
with open('/Users/jiawei/python/py/yanzheng.txt','a') as f:
f.write(generate(100))
|
#!/usr/bin/env python2
import tempfile, subprocess, shutil
from xmlinterface import JobPraser
class Editor(object):
@staticmethod
def edit_commands(filepath):
job = JobPraser(filepath)
#List with files objects
tempfiles = []
#List with files names for vim
templist = []
for command in job.get_shell_commands():
temp = tempfile.NamedTemporaryFile(prefix=filepath,suffix='.job')
open(temp.name, 'w').write(command)
tempfiles.append(temp)
templist.append(temp.name)
subprocess.call(['vim', '-o'] + templist)
newcommands = []
for temp in tempfiles:
newcommands.append(open(temp.name,'r').read())
temp.close()
job.replace_shell_commands(newcommands)
job.update_file()
|
from restic.repo import Repo
from restic.snapshot import Snapshot
from restic.core import version, self_update, generate
from restic.config import restic_bin
from restic.test import test_all
|
# print(run.meta)
# run.kmeans(channels=['FSC-A', 'SSC-A', 'FSC-H', 'FSC-W', 'SSC-H', 'SSC-W', 'FITC-A', 'FITC-H', 'PE-A', 'PE-H', 'PE-Cy7-A', 'PE-Cy7-H', 'UV1-A', 'UV1-H', 'UV2-A', 'UV2-H', 'APC-Cy7-A', 'APC-Cy7-H', 'APC-A', 'APC-H', 'PE-Cy5-A', 'PE-Cy5-H'], logx=False, logy=True, transpose=False, nclusters=5)
# run.plot(x="FSC-A", y="SSC-A", kind="scatter", transpose=False)
# run.plot(x="FSC-A", y="SSC-A", yfunc=_log, kind="scatter", transpose=False)
# freq = run.freq("FSC-A", scope=500)
# run.plot(x="FSC-A", y="SSC-A", yfunc=_log, kind="scatter", transpose=False, save=True)
# plt.show()
# plt.savefig("blah.png")
# print(freq[])
# run.plot(x=freq[0], y=run.dataset["FSC-A"], kind="scatter")
# run.saveplots(run.freq, column="FSC-A", scope=500, rdata=True, delimiter='\\')
# run.saveplots(run.plot, x="FSC-A", y="SSC-A", yfunc=_log, kind="scatter", transpose=False, save=True, description="FSC-A_ON_SSC-A")
# print(run.dataset["SSC-A"])
run.saveplots(run.kmeans, channels=['FSC-A', 'SSC-A', 'FSC-H', 'FSC-W', 'SSC-H', 'SSC-W', 'FITC-A', 'FITC-H', 'PE-A', 'PE-H', 'PE-Cy7-A', 'PE-Cy7-H', 'UV1-A', 'UV1-H', 'UV2-A', 'UV2-H', 'APC-Cy7-A', 'APC-Cy7-H', 'APC-A', 'APC-H', 'PE-Cy5-A', 'PE-Cy5-H'], logx=False, logy=True, transpose=False, nclusters=5, description="kmeanS2", limit_dataset=None)
# run.saveplots(run.plot_3d, x="FSC-A", z="APC-A", y="FITC-A", yfunc=_log, kind="scatter", transpose=False, save=True, description="FITC")
# run.limiter(channels=["SSC-A", "FSC-A"], xmax=2000)
# run.plot(x="FSC-A", y="SSC-A", yfunc=_log, kind="scatter")
# run.saveplots(run.limiter, channels=["FSC-A", "SSC-A"], xmax=25000, save=True, description="limite") |
from pywinauto.application import Application
from PIL import Image
from pywinauto import win32structures
import os
class imgproc():
def __init__(self):
self.nowdir = os.getcwd()
def capture(cls):
im = self.dlg.capture_as_image()
im.save("{}\\temp\\main.png".format(self.nowdir))
i = 0
for c in self.dlg.children():
i = i + 1
im = c.capture_as_image()
im.save("{}\\temp\\{}.png".format(self.nowdir,i))
if im == None:
continue
for d in c.children():
try:
i = i + 1
im = d.capture_as_image()
if im == None:
continue
im.save("{}\\temp\\{}.png".format(self.nowdir,i))
print(d.class_name())
for e in d.children():
i = i + 1
im = e.capture_as_image()
print(type(e))
if im == None:
continue
im.save("{}\\temp\\{}.png".format(self.nowdir,i))
except:
continue
def getpid(self,processname):
# pset = set()
proc = os.popen('tasklist /NH /FO "csv" /FI "IMAGENAME eq {}"'.format(processname))
procstrs = proc.read()
# print(procstrs)
procl = procstrs.splitlines()
for l in procl:
ll = l.split(",")
self.pid = eval(ll[1])
# print(self.pid)
if __name__ == '__main__':
cap = imgproc()
cap.getpid("Fysw.atd")
#cap.getpid("taskmgr.exe")
cap.linkprocess()
cap.capture()
|
import random
from random import choice
class Ability:
def __init__(self, name, attackStrength):
self.name = name
self.attackStrength = attackStrength
pass
def attack(self):
randomAttack = random.randint(0, self.attackStrength)
return randomAttack
if __name__ == "__main__":
#if you run this file from the terminal
#this block is executed
ability = Ability("Debugging Ability", 20)
print(ability.name)
print(ability.attack())
|
#!/usr/bin/env python
"""
@author: Jean-Lou Dupont
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id$"
import os
import sys
from pyjld.os import safe_mkdir, copyFiles, copyUpdatedFiles, safe_copytree
from pyjld.builder import copyEggs, makeEggReleaseDir
from pyjld.builder import findPackage, pprintFiles, keepBaseNames
# pkg_path/trunk/src
# pkg_path/trunk/dist
# pkg_path/tags/$version/eggs
# pkg_path/tags/$version/docs
## add the current path to the system path ##
## --
cwd=os.getcwd()
src=cwd+"/src"
sys.path.append(src)
#print sys.path
pkg_path, ns, package = findPackage()
this_module_name = "%s.%s" % (ns, package)
this_package = __import__( this_module_name )
this_module = getattr(this_package, package)
version = this_module.__version__
tags_dir = os.path.join(pkg_path, 'tags')
existed, release_path = makeEggReleaseDir( version, tags_dir )
if not existed:
print "*** created eggs release directory [%s]" % release_path
print "*** copying eggs to release directory"
#############################################
eggs_path = os.path.join( pkg_path, 'trunk', 'dist' )
files = copyUpdatedFiles(eggs_path, release_path)
files = keepBaseNames(files)
pprintFiles(files, " copied [$src] to release directory" )
print "*** generating documentation"
####################################
docs_source = os.path.join( pkg_path, 'trunk', 'docs', 'source' )
docs_html = os.path.join( pkg_path, 'trunk', 'docs', 'html' )
import sphinx
sphinx.main( ['sphinx', docs_source, docs_html] )
print "*** copying documentation to release directory"
######################################################
docs_html_release_path = os.path.join( pkg_path, 'tags', version)
safe_copytree(docs_html, docs_html_release_path, skip_dirs=['.svn',])
|
# coding: utf-8
####################################
#RSSを取得する例
####################################
#.NET Frameworkのクラスライブラリを使う宣言
import clr
#XML関連の参照設定とインポート
clr.AddReference("System.Xml")
from System.Xml import *
#RSSを取得
doc = XmlDocument()
doc.Load("http://codezine.jp/rss/new/20/index.xml")
#データ関連の参照設定とインポート
clr.AddReference("System.Data")
from System.Data import *
#XMLを解析してデータテーブルへ保存(ループ)
items = doc.SelectNodes("/rss/channel/item")
for item in items:
row = table.NewRow()
row["title"] = item.SelectSingleNode("title").InnerText
row["url"] = item.SelectSingleNode("link").InnerText
row["desc"] = item.SelectSingleNode("description").InnerText
table.Rows.Add(row)
|
'''
Once decided which asset, if the asset is worth it and how much to buy/sell,
Theses strategies decide the best way to implement the action
'''
import pandas as pd
from src.functions.trends import moving_average
def twap(df):
'''
time weighted average price
effect, reduce impact on market
avg(open,close,low,high)
avg_28days = avg(avg_day[1...28])
if order_price > avg28:
return overvalued
else:
return undervalued
Ou seja...dividir meu order de 10**100
em pedaços onde o pedaçoe seja proximo ao avg28
'''
data = {
'daily_twap': df[['close', 'open', 'low', 'high']].mean(axis=1)
}
data.update({'twap': moving_average(data['daily_twap'],28,None)})
return pd.DataFrame(data)
def vwap(df):
pass |
from karbar.models import *
class Madadkar(MyUser):
employment_date = models.DateField(null=True, blank=True)
class Receipt(models.Model):
madadkar = models.ForeignKey(Madadkar, on_delete=models.CASCADE)
hamyar = models.ForeignKey('hamyar.Hamyar', on_delete=models.CASCADE)
madadju = models.ForeignKey('madadju.Madadju', on_delete=models.CASCADE)
date_receive = models.DateField()
date_send = models.DateField()
content = models.CharField(max_length=500)
|
from .package_analyzer import PackageAnalyzer
from xml.etree.cElementTree import parse
from xml.etree.cElementTree import ParseError
import os
import logging
class ManifestXmlAnalyzer(PackageAnalyzer):
"""
Analyzer plug-in that analyzes manifest.xml (rosbuild) package files.
"""
def analyze_file(self, path: str, dependencies: dict) -> dict:
"""
Analyzes a manifest.xml file.
:param path: Path to the manifest.xml file.
:param dependencies: Dictionary containing (key: package name, value: list[dependency, dependency, ...]
:return: updated dependencies-dictionary.
"""
# Parse xml
try:
file = open(path, "r")
tree = parse(file)
except ParseError:
logging.warning("[ManifestXmlAnalyzer]: Could not parse " + path + "; omitting file.")
return dependencies
element = tree.getroot()
packagename = os.path.basename(os.path.dirname(path))
for tag in self._settings["manifest_xml_dependency_tags"]:
for element in element.findall(tag):
self.add_dependency(packagename, element.attrib["package"], dependencies)
def _analyze(self, path: str) -> dict:
packages = dict()
filellist = self.search_files(path, "manifest.xml")
for filename in filellist:
logging.info("[ManifestXmlAnalyzer]: Analyzing " + filename)
self.analyze_file(filename, packages)
return packages
|
import pwd
import grp
import os
def chown(path, user, recursive=True):
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(user).gr_gid
os.chown(path, uid, gid)
if recursive:
for root, dirs, files in os.walk(path):
for momo in dirs:
os.chown(os.path.join(root, momo), uid, gid)
for momo in files:
os.chown(os.path.join(root, momo), uid, gid)
else:
os.chown(path, uid, gid)
chown('/etc/uwsgi/', 'consen')
|
l,u=list(map(int,input().split()))
i=l
res=0
while(res==0):
if(i%l==0 and i%u==0):
res=i
else:
i+=1
print(res)
|
aa = "a"
b = a + "b" # 字串連接, b 會等於 "ab"
c = a * 3 # 字串重複三倍, c 會等於 "aaa"
# 檔名: exp_demo05.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
import os
import torchvision as tv
import numpy as np
from PIL import Image
def get_dataset(args, transform_train, transform_test):
if args.validation_exp == "True":
temp_dataset = Cifar10Train(args, train=True, transform=transform_train, download = args.download)
train_indexes, val_indexes = train_val_split(args, temp_dataset.train_labels)
cifar_train = Cifar10Train(args, train=True, transform=transform_train, sample_indexes = train_indexes)
testset = Cifar10Train(args, train=True, transform=transform_test, sample_indexes = val_indexes)
else:
cifar_train = Cifar10Train(args, train=True, transform=transform_train, download = args.download)
testset = tv.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
return cifar_train, testset
def train_val_split(args, train_val):
np.random.seed(args.seed_dataset)
train_val = np.array(train_val)
train_indexes = []
val_indexes = []
val_num = int(args.val_samples / args.num_classes)
for id in range(args.num_classes):
indexes = np.where(train_val == id)[0]
np.random.shuffle(indexes)
val_indexes.extend(indexes[:val_num])
train_indexes.extend(indexes[val_num:])
np.random.shuffle(train_indexes)
np.random.shuffle(val_indexes)
return train_indexes, val_indexes
class Cifar10Train(tv.datasets.CIFAR10):
def __init__(self, args, train=True, transform=None, target_transform=None, sample_indexes = None, download=False):
super(Cifar10Train, self).__init__(args.train_root, train=train, transform=transform, target_transform=target_transform, download=download)
self.root = os.path.expanduser(args.train_root)
self.transform = transform
self.target_transform = target_transform
self.args = args
if sample_indexes is not None:
self.train_data = self.train_data[sample_indexes]
self.train_labels = np.array(self.train_labels)[sample_indexes]
self.num_classes = self.args.num_classes
self.data = self.train_data
self.labels = np.asarray(self.train_labels, dtype=np.long)
self.train_samples_idx = []
self.train_probs = np.ones(len(self.labels))*(-1)
self.avg_probs = np.ones(len(self.labels))*(-1)
self.times_seen = np.ones(len(self.labels))*1e-6
def __getitem__(self, index):
img, labels = self.data[index], self.labels[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
labels = self.target_transform(labels)
return img, labels, index |
import unittest
from katas.kyu_6.dubstep import song_decoder
class SongDecoderTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(song_decoder('AWUBBWUBC'), 'A B C')
def test_equals_2(self):
self.assertEqual(song_decoder('AWUBWUBWUBBWUBWUBWUBC'), 'A B C')
def test_equals_3(self):
self.assertEqual(song_decoder('WUBAWUBBWUBCWUB'), 'A B C')
|
# Generated by Django 2.1 on 2018-08-12 20:58
import colossus.storage
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MailingList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='list short URL')),
('subscribers_count', models.PositiveIntegerField(default=0, verbose_name='subscribers')),
('open_rate', models.FloatField(default=0.0, verbose_name='opens')),
('click_rate', models.FloatField(default=0.0, verbose_name='clicks')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('contact_email_address', models.EmailField(blank=True, max_length=254, verbose_name='contact email address')),
('website_url', models.URLField(blank=True, help_text='Where did people opt in to this list?', verbose_name='website URL')),
('campaign_default_from_name', models.CharField(blank=True, max_length=100, verbose_name='default from name')),
('campaign_default_from_email', models.EmailField(blank=True, max_length=254, verbose_name='default from email address')),
('campaign_default_email_subject', models.CharField(blank=True, max_length=150, verbose_name='default subject')),
('enable_recaptcha', models.BooleanField(default=False, verbose_name='enable reCAPTCHA')),
('list_manager', models.EmailField(blank=True, help_text='Email address to handle subscribe/unsubscribe requests.It can be a real email address or an automated route to handle callbacks/webhooks.', max_length=254, verbose_name='list manager')),
('smtp_host', models.CharField(blank=True, max_length=200, verbose_name='host')),
('smtp_port', models.PositiveIntegerField(blank=True, null=True, verbose_name='port')),
('smtp_username', models.CharField(blank=True, max_length=200, verbose_name='username')),
('smtp_password', models.CharField(blank=True, max_length=200, verbose_name='password')),
('smtp_use_tls', models.BooleanField(default=True, verbose_name='use TLS')),
('smtp_use_ssl', models.BooleanField(default=False, verbose_name='use SSL')),
('smtp_timeout', models.PositiveIntegerField(blank=True, null=True, verbose_name='timeout')),
('smtp_ssl_keyfile', models.TextField(blank=True, verbose_name='SSL keyfile')),
('smtp_ssl_certfile', models.TextField(blank=True, verbose_name='SSL certfile')),
('forms_custom_css', models.TextField(blank=True, help_text='Custom CSS will be applied to all subscription form pages.', verbose_name='custom CSS')),
('forms_custom_header', models.TextField(blank=True, help_text='Header displayed on all subscription form pages. Accepts HTML.If empty, the name of the mailing list will be used.', verbose_name='custom header')),
],
options={
'verbose_name': 'list',
'verbose_name_plural': 'lists',
'db_table': 'colossus_mailing_lists',
},
),
migrations.CreateModel(
name='SubscriberImport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('upload_date', models.DateTimeField(auto_now_add=True, verbose_name='upload date')),
('file', models.FileField(storage=colossus.storage.PrivateMediaStorage(), upload_to='uploads', verbose_name='CSV file')),
('columns_mapping', models.TextField(blank=True, verbose_name='columns mapping')),
('subscriber_status', models.PositiveSmallIntegerField(choices=[(1, 'Pending'), (2, 'Subscribed'), (3, 'Unsubscribed'), (4, 'Cleaned')], default=2, verbose_name='assign status to subscriber')),
('status', models.PositiveSmallIntegerField(choices=[(1, 'Pending'), (2, 'Queued'), (3, 'Importing'), (4, 'Completed'), (5, 'Errored'), (6, 'Canceled')], default=1, verbose_name='status')),
('size', models.PositiveIntegerField(default=0, verbose_name='size')),
('strategy', models.PositiveSmallIntegerField(choices=[(1, 'Create new subscribers only'), (2, 'Update existing subscribers only'), (3, 'Update or create subscribers')], default=3, help_text='The email address will be used as the main subscriber identifier to determine if they are already on the list.', verbose_name='import strategy')),
('mailing_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscribers_imports', to='lists.MailingList', verbose_name='mailing list')),
('user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subscribers_imports', to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'subscribers import',
'verbose_name_plural': 'subscribers imports',
'db_table': 'colossus_subscribers_imports',
},
),
]
|
from collections import defaultdict
from operator import itemgetter
def solution(genres, plays):
count_list = defaultdict(int)
total_list = defaultdict(list)
for song_id, genre, play in zip(counter(), genres, plays):
count_list[genre] += play
total_list[genre].append((-play, song_id))
genres_in_order = sorted(count_list.keys(), key=lambda g:count_list[g], reverse=True)
print(count_list.keys())
best_songs = []
for genre in genres_in_order:
sorted_list = sorted(total_list[genre])
temp = []
for _, song_id in sorted_list:
temp.append(song_id)
best_songs.extend(temp[:2])
return best_songs
def solution2(genres, plays):
play_count_by_genre = defaultdict(int)
songs_in_genre = defaultdict(list)
for song_id, genre, play in zip(counter(), genres, plays):
play_count_by_genre[genre] += play
songs_in_genre[genre].append((-play, song_id))
# print(songs_in_genre)
genre_in_order = sorted(play_count_by_genre.keys(), key=lambda g:play_count_by_genre[g], reverse=True)
answer = []
for genre in genre_in_order:
answer.extend([ song_id for minus_play, song_id in sorted(songs_in_genre[genre])][:2])
return answer
# def solution3(generes, plays):
# total = []
# for song_id, genre, play in zip(counter(), genres, plays):
# total.append([song_id, genre, play])
# print(sorted(total, key=itemgetter()))
def counter():
i = 0
while True:
yield i
i += 1
genres = ["classic", "pop", "classic", "classic", "pop"]
plays = [500, 600, 150, 800, 2500]
# print(counter())
# print(solution(genres, plays))
print(solution2(genres, plays))
# solution3(genres, plays)
|
#!/bin/python3
import os
import sys
import urllib.request
import urllib.parse
import json
import time
import datetime
sys.path.append(os.path.join(os.path.dirname(__file__), 'djangorm'))
import djangorm
from db.models import Cache
threshold = datetime.datetime.now() + datetime.timedelta(days=-7)
def search_online(pkgname):
url = 'https://pkgstats.archlinux.de/api/packages/%s/series' % pkgname
#params = {'startMonth': 201901, 'endMonth': 201901}
#url = url + '?' + urllib.parse.urlencode(params)
response = urllib.request.urlopen(url).read().decode('utf-8')
response = json.loads(response)['packagePopularities']
if len(response) == 0:
update_cache(pkgname, 0, search('pacman').total)
else:
response = response[0]
update_cache(response['name'], response['count'], response['samples'])
cache = Cache.objects.get(pkgname=pkgname)
return cache
def update_cache(pkgname, count, total):
try:
cache = Cache.objects.get(pkgname=pkgname)
cache.count = count
cache.total = total
except:
cache = Cache(pkgname=pkgname, count=count, total=total)
cache.save()
def list_cache():
for cache in Cache.objects.all():
print(djangorm.object_to_dict(cache))
def search(pkgname, force_online=False):
try:
assert not force_online
cache = Cache.objects.get(pkgname=pkgname, timestamp__gte=threshold)
except:
cache = search_online(pkgname)
return cache
if __name__ == '__main__':
djangorm.migrate()
pkgname = sys.argv[1]
if len(sys.argv) < 2:
print('Usage:\tpython pkgstats.py [pkgname]')
sys.exit()
#list_cache()
result = search(pkgname, True)
if result:
print('%d / %d = %.2f%%' % (result.count, result.total, 100 * result.count / result.total))
|
class PID:
def __init__(self, Kp_in=-1.0, Ki_in=-1.0, Kd_in=-1.0, rate_in=-1.0):
# Variable to set the rate
self.rate = rate_in
# Calculate the time between intervals
self.dt = 1.0/self.rate
# Setting the PID parameters
self.Kp = Kp_in
self.Ki = Ki_in
self.Kd = Kd_in
# Variables used for the controller
self.integral = 0.0
self.previous_error = 0.0
def set_constants(self, Kp_in, Ki_in, Kd_in):
# Setting the PID constants
self.Kp = Kp_in
self.Ki = Ki_in
self.Kd = Kd_in
# Clear the integral and previous error
def remove_buildup(self):
self.integral = 0.0
self.previous_error = 0.0
# This is the main loop of this class
def get_output(self, setpoint, current_output):
# Generated output
output = 0.0
# Run the Controller
error = setpoint - current_output
self.integral = self.integral + error * self.dt
derivative = (error - self.previous_error)/self.dt
output = self.Kp*error + self.Ki*self.integral + self.Kd*derivative
self.previous_error = error
# Return the output
return output
|
# script to convert yaml data file into json file
# original yaml map data comes from https://github.com/whoenig/libMultiRobotPlanning
import yaml
import json
import os
def convert_yaml_into_json(yaml_file_path, json_file_path):
with open(yaml_file_path) as fp:
yaml_map = yaml.load(fp)
json_map = {}
json_map['starts'] = []
json_map['goals'] = []
json_map['obstacles'] = []
json_map['numberofagents'] = 0
for agent in yaml_map['agents']:
json_map['numberofagents'] += 1
json_map['goals'].append(agent['goal'])
json_map['starts'].append(agent['start'])
json_map['dimX'] = yaml_map['map']['dimensions'][0]
json_map['dimY'] = yaml_map['map']['dimensions'][1]
for o in yaml_map['map']['obstacles']:
json_map['obstacles'].append(o)
with open(json_file_path, 'w') as fp:
json.dump(json_map, fp)
if __name__ == "__main__":
directory = os.fsencode('benchmark/8x8_obst12')
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".yaml"):
print(os.path.join(os.fsdecode(directory), os.fsdecode(filename)))
newfilename = filename.replace('.yaml','.json')
yaml_path = os.path.join(os.fsdecode(directory), os.fsdecode(filename))
json_path = os.path.join(os.fsdecode(directory), os.fsdecode(newfilename))
convert_yaml_into_json(yaml_path,json_path)
|
import cv2
classificador = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
imagem = cv2.imread('pessoas//mprj-01.JPG')
imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
facesDetectadas = classificador.detectMultiScale(imagemCinza, scaleFactor=1.05, minNeighbors=11, minSize=(10,10))
print(len(facesDetectadas))
print(facesDetectadas)
for (x, y, l, a) in facesDetectadas:
print(x, y, l, a)
cv2.rectangle(imagem, (x, y), (x + l, y + a), (0, 0, 255), 1)
cv2.imshow("Faces detectadas", imagem)
cv2.waitKey()
|
def bubblesort(array):
"""
Inputs : array (list)
Outputs : array (list) - sorted lowest to highest
Description : sorts the array 'array' from lowest to highest
using bubblesort algorithm
"""
# have to check at most length of array
for i in range(len(array)):
# loop over adjacent elements, up to already sorted elements
for j in range(len(array) - i - 1):
# checks for bigger number, if passed we swap the elements
if array[j] > array[j + 1]:
array[j], array[j + 1] = array[j + 1], array[j]
# once loop has exited the array has been sorted
return array
if __name__ == '__main__':
# use example from blog for array list
unsorted_array = [1, 4, 24, 21, 53, 102, 6, 42, 16, 99]
# execute bubblesort sorting function to sort array
sorted_array = bubblesort(unsorted_array)
# print to console for comparison
print('Unsorted array: ', unsorted_array)
print('Sorted array: ', sorted_array)
|
import maya.standalone
import os
import sys
# Start Maya in batch mode
maya.standalone.initialize(name='python')
from maya import cmds
os.environ["PYMEL_SKIP_MEL_INIT"] = "1"
#print os.environ["PYMEL_SKIP_MEL_INIT"]
import pymel.core as pm
import shutil
def load_script():
print sys.argv[3]
cmds.workspace(sys.argv[3], o=True)
cmds.file(sys.argv[1], force=True, open=True, loadAllReferences=True)
cmds.evalDeferred('print cmds.ls(type="reference")')
sel_list = sys.argv[2].split(",")
del sel_list[-1]
print sel_list
cmds.select(sel_list)
from . import fileUtils as fu
reload(fu)
fu.publish_vray_rig()
os.system("pause")
def testRun():
print "file tested"
cmds.file(sys.argv[1], force=True, open=True)
print sys.argv[1]
cmds.evalDeferred('print cmds.ls(dag=True)')
os.system("pause")
def publishCurrentFile():
file_path = os.environ["MAYA_PUB_FILE"]
sel = os.environ["MAYA_PUB_SEL"]
# Open the file with the file command
cmds.file(file_path, force=True, open=True)
#file_path = cmds.file(sceneName=True, q=True)
#now = datetime.datetime.now()
dir, filename = os.path.split(file_path)
rig_dir = os.path.dirname(dir)
asset_dir = os.path.dirname(rig_dir)
dev_dir = os.path.dirname(asset_dir)
characters_dir = os.path.dirname(dev_dir)
# rig, character, dev, "Characters"
basename, ver, ext = filename.split(".")
non_ver = ".".join((basename, ext))
non_ver_mb = ".".join((basename, "mb"))
version_dir = os.path.join(rig_dir, "publish")
nonvray_dir = os.path.join(characters_dir, "noVray")
vray_dir = os.path.join(characters_dir, "vray")
#sel = pm.ls(sl=True)
if not sel:
return
pm.select(sel)
exp_ma = pm.exportSelected(os.path.join(version_dir, filename), constructionHistory=True, f=True)
exp_mb = pm.exportSelected(os.path.join(version_dir, filename), type="mayaBinary", constructionHistory=True, f=True)
print ("Exported: %s, %s" % (exp_ma, exp_mb))
shutil.copy2(exp_ma, os.path.join(nonvray_dir,non_ver))
shutil.copy2(exp_mb, os.path.join(nonvray_dir,non_ver_mb))
if float(cmds.about(v=True)) >= 2017.0:
maya.standalone.uninitialize()
def publish_vray_rig():
file_path = os.environ["MAYA_PUB_FILE"]
sel = os.environ["MAYA_PUB_SEL"]
# Open the file with the file command
cmds.file(file_path, force=True, open=True)
fdir, filename = os.path.split(file_path)
rig_dir = os.path.dirname(fdir)
asset_dir = os.path.dirname(rig_dir)
dev_dir = os.path.dirname(asset_dir)
characters_dir = os.path.dirname(dev_dir)
# rig, character, dev, "Characters"
basename, ver, ext = filename.split(".")
non_ver = ".".join((basename, ext))
non_ver_mb = ".".join((basename, "mb"))
version_dir = os.path.join(rig_dir, "publish")
vray_dir = os.path.join(characters_dir, "vray")
#sel = pm.ls(sl=True)
if not sel:
cmds.warning("Nothing selected")
return
pm.select(sel)
sel = pm.ls(sl=True)
reference = False
for s in sel:
try:
ref_node = pm.referenceQuery(s, referenceNode=True)
ref_scene = pm.referenceQuery(s, filename=True, shortName=True)
pm.FileReference(ref_node).importContents(removeNamespace=True)
rbasename = ref_scene.split(".")[0]
reference = True
break
except:
continue
if not reference:
cmds.error("Reference not found")
return
pm.select(sel)
exp_ma = pm.exportSelected(os.path.join(version_dir, filename), type="mayaAscii", constructionHistory=True, f=True)
#exp_mb = pm.exportSelected(os.path.join(version_dir, filename), type="mayaBinary", constructionHistory=True, f=True)
#print ("Exported: %s, %s" % (exp_ma, exp_mb))
path, file = os.path.split(exp_ma)
file, ext = os.path.splitext(file)
shutil.copy2(exp_ma, os.path.join(vray_dir, rbasename + ".ma"))
#shutil.copy2(exp_mb, os.path.join(vray_dir, rbasename + ".mb"))
def assign_default_shader(file_path):
# Start Maya in batch mode
maya.standalone.initialize(name='python')
cmds= __import__('maya.cmds')
import pymel.core as pm
import os
import shutil
# Open the file with the file command
cmds.file(file_path, force=True, open=True)
# Get all meshes in the scene
meshes = cmds.ls(type="mesh", long=True)
for mesh in meshes:
# Assign the default shader to the mesh by adding the mesh to the
# default shader set.
cmds.sets(mesh, edit=True, forceElement='initialShadingGroup')
# Save the file
cmds.file(save=True, force=True)
# Starting Maya 2016, we have to call uninitialize to properly shutdown
if float(cmds.about(v=True)) >= 2016.0:
maya.standalone.uninitialize()
|
# Generated by Django 3.0.8 on 2020-08-17 19:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fhstore', '0003_auto_20200802_1823'),
]
operations = [
migrations.AddField(
model_name='order',
name='status',
field=models.CharField(choices=[('Opłacone', 'Opłacone'), ('Do wysyłki', 'Do wysyłki'), ('Dostarczone', 'Dostarczone')], max_length=100, null=True),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.