blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3090b50b1b2dbeb3ad015b21bfc548109034625b
|
5e5758c4acb2ae534508fb60289e0a11cbdd74db
|
/ABCB prediction/predict.py
|
dec06e3f950227abbd61794e01e14991ccf04cc6
|
[] |
no_license
|
Dannis-ARM/FYP
|
49f048c10600c3614e14e7158fe3d9d5720b4a89
|
a723d4ec8ed7578e890e402b5d9f181f24c4562f
|
refs/heads/main
| 2023-06-17T14:29:47.676789
| 2021-07-02T10:17:26
| 2021-07-02T10:17:26
| 376,296,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,123
|
py
|
# @Time : 2021/5/31 22:47
# @Author : CME1809103
# @IDE : PyCharm
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
# from tensorflow.python.keras.models import Sequential
# from tensorflow.python.keras.layers import Dense, Activation, LSTM
# IMPORTING DATASET
dataset = pd.read_excel('Data/abcb.us.xlsx',sheet_name="abcb.us",usecols=[3,4,5,6])
# CREATING OWN INDEX FOR FLEXIBILITY
obs = np.arange(1, len(dataset) + 1, 1) # 1-1452
# TAKING DIFFERENT INDICATORS FOR PREDICTION
OHLC_avg = dataset.mean(axis = 1) # 0-1451 #OHLC
# rsi =
# RSI
# PLOTTING ALL INDICATORS IN ONE PLOT
plt.plot(obs, OHLC_avg, 'g', label = 'OHLC avg')
plt.legend(loc = 'upper right')
plt.show()
# PREPARATION OF TIME SERIES DATASE
OHLC_avg = np.reshape(OHLC_avg.values, (len(OHLC_avg),1)) # len(OHLC_avg) = 1452
scaler = MinMaxScaler(feature_range=(0, 1))
OHLC_avg = scaler.fit_transform(OHLC_avg)
# TRAIN-TEST SPLIT
train_OHLC = int(len(OHLC_avg) * 0.80)
train_OHLC, test_OHLC = OHLC_avg[0:train_OHLC,:], OHLC_avg[train_OHLC:len(OHLC_avg),:]
# TIME-SERIES DATASET (FOR TIME T, VALUES FOR TIME T+1)
import indicators
trainX, trainY = indicators.new_dataset(train_OHLC, 1) # trainX.shape() (1159, 1), trainY.shape() (1159,)
testX, testY = indicators.new_dataset(test_OHLC, 1)
# RESHAPING TRAIN AND TEST DATA
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) # c.shape[1] col, c.shape[0] row
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
step_size = 1
# LSTM MODEL
model = Sequential()
model.add(LSTM(32, input_shape=(1, step_size), return_sequences = True))
model.add(LSTM(16))
model.add(Dense(1))
model.add(Activation('linear'))
# MODEL COMPILING AND TRAINING
import os
if os.path.exists("saved_model/my_model"):
print("Get model from 'saved_model/my_model'")
model = tf.keras.models.load_model('saved_model/my_model')
model.summary()
else:
model.compile(loss='mean_squared_error', optimizer='adam') # Try SGD is bad, adam is overfit??, adagrad really bad.
model.fit(trainX, trainY, epochs=5, batch_size=10, verbose=2)
model.save('saved_model/my_model')
print("Model created")
# PREDICTION
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# DE-NORMALIZING FOR PLOTTING
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# TRAINING RMSE
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train RMSE: %.2f' % (trainScore))
# TEST RMSE
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test RMSE: %.2f' % (testScore))
# CREATING SIMILAR DATASET TO PLOT TRAINING PREDICTIONS
trainPredictPlot = np.empty_like(OHLC_avg)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[step_size:len(trainPredict)+step_size, :] = trainPredict
# CREATING SIMILAR DATASSET TO PLOT TEST PREDICTIONS
testPredictPlot = np.empty_like(OHLC_avg)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(step_size*2)+1:len(OHLC_avg)-1, :] = testPredict
# DE-NORMALIZING MAIN DATASET
OHLC_avg = scaler.inverse_transform(OHLC_avg)
# PLOT OF MAIN OHLC VALUES, TRAIN PREDICTIONS AND TEST PREDICTIONS
plt.plot(OHLC_avg, 'g', label = 'original dataset')
plt.plot(trainPredictPlot, 'r', label = 'training set')
plt.plot(testPredictPlot, 'b', label = 'predicted stock price/test set')
plt.legend(loc = 'upper right')
plt.xlabel('Time in hours')
plt.ylabel('OHLC Value of ABCB.US Stocks')
plt.show()
# PREDICT FUTURE VALUES
last_val = testPredict[-1]
last_val_scaled = last_val/last_val
next_val = model.predict(np.reshape(last_val_scaled, (1,1,1)))
print ("Last hour Value:", np.asscalar(last_val))
print ("Next hour Value:", np.asscalar(last_val*next_val))
# print np.append(last_val, next_val)
|
[
"CME1809103@xmu.edu.my"
] |
CME1809103@xmu.edu.my
|
ffa3fc919b5b7974add0481cf555fa240c603cce
|
631465709a9b8bef0836766e54653c5a65d1e97a
|
/post.py
|
6ead0812f33a642635d5db338df20a5ff94d4702
|
[
"Apache-2.0"
] |
permissive
|
sbreitenbach/market-trends
|
e5ed7adea6351b92f54d3b837b56c8766cc4b2b4
|
4c21cf61953e4872415ca9fbc886f15bac595ee2
|
refs/heads/main
| 2023-09-01T07:02:05.053527
| 2023-08-15T01:42:59
| 2023-08-15T01:42:59
| 332,080,646
| 0
| 0
|
Apache-2.0
| 2023-08-15T01:43:01
| 2021-01-22T22:49:27
|
Python
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
class Post:
def __init__(self, id, text, upvotes, type):
self.id = id
self.text = text
self.upvotes = upvotes
self.type = type
self.sentiment = None
self.title = None
self.tickers = []
self.url = None
def is_thread(self):
if(self.type == "thread"):
return True
else:
return False
|
[
"scott.breitenbach@gmail.com"
] |
scott.breitenbach@gmail.com
|
f5e3ed467b15386559fc13c23acbe01d5fa2865f
|
3ddda99bfed9777566f7c2cd6e1f4af035a8ca5a
|
/purgeQueue.py
|
bf9f79aff456565eb9f74aba3ca6de0d15fc72e2
|
[] |
no_license
|
kirankarpurapu/TweetMap
|
089fba897275a1c64e878afb704c990c3361b849
|
264e823bb40d728e983806117007b965fc1437fc
|
refs/heads/master
| 2021-01-19T21:54:33.292062
| 2016-11-26T01:20:42
| 2016-11-26T01:20:42
| 74,540,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
import boto3
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(QueueName = 'TweetMap')
#to purge the queue
queue.purge()
|
[
"kiran@edu.chat"
] |
kiran@edu.chat
|
c057f5b102d9212d662b5029e51e37b64f2075d1
|
bad7f9df662dc156dd8a3c880f470750c741c2fe
|
/Sobel.py
|
9fd8bde5f19aeb84eaf2061a977007306b5d102b
|
[] |
no_license
|
Wenarepo/HOLArepo
|
a41f656c8bc8da6d5299a5dcfc3ce8aff4f9c191
|
75d63c837052b339a9b157f5f7695d612ad8b509
|
refs/heads/master
| 2021-01-18T21:17:36.473923
| 2016-06-07T03:30:36
| 2016-06-07T03:30:36
| 55,083,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
from SimpleCV import Camera, Display, Image
import numpy as np
#import sklearn as sk
from sklearn import *
#from scipy.sparse import *
#from scipy import *
from matplotlib import pylab
from matplotlib import pyplot as plt
import cv2
c = Camera()
def foto(c):
img = c.getImage()
img.show()
return img
img = cv2.imread('L5bc.png',0)
# Output dtype = cv2.CV_8U
sobelx8u = cv2.Sobel(img,cv2.CV_8U,1,0,ksize=3)
# Output dtype = cv2.CV_64F. Then take its absolute and convert to cv2.CV_8U
sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
abs_sobel64f = np.absolute(sobelx64f)
sobel_8u = np.uint8(abs_sobel64f)
plt.subplot(1,3,1),plt.imshow(img,cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,2),plt.imshow(sobelx8u,cmap = 'gray')
plt.title('Sobel CV_8U'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,3),plt.imshow(sobel_8u,cmap = 'gray')
plt.title('Sobel abs(CV_64F)'), plt.xticks([]), plt.yticks([])
plt.show()
|
[
"anselmojara@udec.cl"
] |
anselmojara@udec.cl
|
526f8c46e446117efd192bba50cfc4f23ac26872
|
6fe651c2b014f8b07f36ba64d3b55edfc135caac
|
/comprehension_tutorial/exercises/advanced.py
|
8c831cf3ece1036a2df5f5c5e01dc2e07edf8d4f
|
[] |
no_license
|
aftab82/learn
|
55496c11beaf5e00e85b161c4ba97eea921be21b
|
b592e6d4f5ac6ce6f1d2448c8f21b63674dabc0f
|
refs/heads/master
| 2022-09-15T08:05:28.423468
| 2022-08-31T21:16:46
| 2022-08-31T21:16:46
| 250,903,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
"""Advanced exercises"""
from collections import namedtuple
import csv
import random
def matrix_from_string(string):
"""Convert rows of numbers to list of lists."""
return [
[int(elem) for elem in line.split()]
for line in string.splitlines()
]
def parse_csv(file_obj):
"""Return namedtuple list representing data from given file object."""
csv_reader = csv.reader(file_obj)
Row = namedtuple('Row', next(csv_reader))
return [Row(*values) for values in csv_reader]
def get_cards():
"""Create a list of namedtuples representing a deck of playing cards."""
Card = namedtuple('Card', 'rank suit')
ranks = ['A'] + [str(n) for n in range(2, 11)] + ['J', 'Q', 'K']
suits = ['spades', 'hearts', 'diamonds', 'clubs']
return [Card(rank, suit) for suit in suits for rank in ranks]
def shuffle_cards(deck):
"""Shuffles a list in-place"""
random.shuffle(deck)
def deal_cards(deck, count=5):
"""Remove the given number of cards from the deck and returns them"""
return [deck.pop() for i in range(count)]
|
[
"aftab28@gmail.com"
] |
aftab28@gmail.com
|
fad8169480570752dbbffc1c0bc1e2ecb4f7d03e
|
3d952ae35dba7c7399e9278e44f75ee884d97ff6
|
/exchanges/uniswap.py
|
76b8ed359113f7f9bb5b57c6092c76ff523c93dc
|
[
"MIT"
] |
permissive
|
JoaoCampos89/0xbtc-discord-price-bot
|
d90eb909de18b951ade8821fc06958e815e8fd2a
|
6eb6839213dfb0176c0be72c2dda7193d4131750
|
refs/heads/master
| 2020-08-08T09:22:13.036779
| 2019-07-11T04:12:23
| 2019-07-11T04:12:23
| 213,803,946
| 0
| 0
|
MIT
| 2019-10-09T02:39:24
| 2019-10-09T02:39:24
| null |
UTF-8
|
Python
| false
| false
| 5,257
|
py
|
"""
API for Uniswap distributed exchange (uniswap.exchange)
Price info is pulled from the smart contract
https://docs.uniswap.io/api/exchange
"""
import logging
from web3 import Web3
import time
from .base_exchange import BaseExchangeAPI
from .uniswap_abi import exchange_abi
from configuration import ETHEREUM_NODE_URL
from constants import SECONDS_PER_ETH_BLOCK
def wei_to_ether(amount_in_wei):
return int(amount_in_wei) / 1000000000000000000.0
def ether_to_wei(amount_in_ether):
return int(amount_in_ether * 1000000000000000000.0)
class UniswapAPI(BaseExchangeAPI):
def __init__(self, currency_symbol):
super().__init__()
if currency_symbol == "0xBTC":
self.uniswap_exchange_address = "0x701564Aa6E26816147D4fa211a0779F1B774Bb9B"
self._decimals = 8
elif currency_symbol == "XXX":
self.uniswap_exchange_address = "0x0000000000000000000000000000000000000000"
self._decimals = 0
else:
raise RuntimeError("Unknown currency_symbol {}, need to add address to uniswap.py".format(currency_symbol))
self.currency_symbol = currency_symbol
self.exchange_name = "Uniswap"
self.command_names = ["uniswap"]
#self.short_url = "https://bit.ly/2PnLAre" # main uniswap interface
self.short_url = "http://0xbitcoin.trade" # 0xbtc version of the ui
self._time_volume_last_updated = 0
self._w3 = Web3(Web3.HTTPProvider(ETHEREUM_NODE_URL))
self._exchange = self._w3.eth.contract(address=self.uniswap_exchange_address, abi=exchange_abi)
async def _update_24h_volume(self, timeout=10.0):
token_purchase_topic = "0xcd60aa75dea3072fbc07ae6d7d856b5dc5f4eee88854f5b4abf7b680ef8bc50f"
eth_purchase_topic = "0x7f4091b46c33e918a0f3aa42307641d17bb67029427a5369e54b353984238705"
transfer_topic = "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"
remove_liquidity_topic = "0x0fbf06c058b90cb038a618f8c2acbf6145f8b3570fd1fa56abb8f0f3f05b36e8"
add_liquidity_topic = "0x06239653922ac7bea6aa2b19dc486b9361821d37712eb796adfd38d81de278ca"
current_eth_block = self._w3.eth.blockNumber
self.volume_eth = 0
for event in self._w3.eth.getLogs({
'fromBlock': current_eth_block - (int(60*60*24 / SECONDS_PER_ETH_BLOCK)),
'toBlock': current_eth_block - 1,
'address': self.uniswap_exchange_address}):
topic0 = self._w3.toHex(event['topics'][0])
if topic0 == token_purchase_topic:
address = self._w3.toChecksumAddress(event['topics'][1][-20:])
eth_amount = wei_to_ether(self._w3.toInt(event['topics'][2]))
token_amount = self._w3.toInt(event['topics'][3]) / 10**self._decimals
self.volume_eth += eth_amount
elif topic0 == eth_purchase_topic:
address = self._w3.toChecksumAddress(event['topics'][1][-20:])
token_amount = self._w3.toInt(event['topics'][2]) / 10**self._decimals
eth_amount = wei_to_ether(self._w3.toInt(event['topics'][3]))
self.volume_eth += eth_amount
elif topic0 == transfer_topic:
# skip liquidity deposits/withdrawals
continue
elif topic0 == remove_liquidity_topic:
# skip liquidity deposits/withdrawals
continue
address = self._w3.toChecksumAddress(event['topics'][1][-20:])
eth_amount = wei_to_ether(self._w3.toInt(event['topics'][2]))
token_amount = self._w3.toInt(event['topics'][3]) / 10**self._decimals
elif topic0 == add_liquidity_topic:
# skip liquidity deposits/withdrawals
continue
address = self._w3.toChecksumAddress(event['topics'][1][-20:])
eth_amount = wei_to_ether(self._w3.toInt(event['topics'][2]))
token_amount = self._w3.toInt(event['topics'][3]) / 10**self._decimals
else:
logging.debug('unknown topic txhash', self._w3.toHex(event['transactionHash']))
logging.debug('unknown topic topic0', topic0)
self._time_volume_last_updated = time.time()
async def _update(self, timeout=10.0):
# TODO: The amount of tokens 'purchased' to determine the price should
# not be a fixed value (200). Ideally, load the amount of tokens
# available in the contract and use a certain percentage.
amount_tokens = 200
eth_amount_buy = wei_to_ether(self._exchange.functions.getEthToTokenOutputPrice(amount_tokens * 10**self._decimals).call())
eth_amount_sell = wei_to_ether(self._exchange.functions.getTokenToEthInputPrice(amount_tokens * 10**self._decimals).call())
average_eth_amount = (eth_amount_buy + eth_amount_sell) / 2
self.price_eth = average_eth_amount / amount_tokens
# update volume once every hour since it (potentially) loads eth api
if time.time() - self._time_volume_last_updated > 60*60:
await self._update_24h_volume()
if __name__ == "__main__":
e = UniswapAPI('0xBTC')
e.load_once_and_print_values()
|
[
"x1d@protonmail.com"
] |
x1d@protonmail.com
|
5dde8324a09e3e3b34e9ab6a9e16642d78a7ba41
|
20a466c82abf9e24e01814ea5f4864e09566b3b1
|
/foundryapp/foundryapp/report/committed_production_against_requirement_report/committed_production_against_requirement_report.py
|
a5e217adafec362826ee67e0dbfda0c50138c026
|
[
"MIT"
] |
permissive
|
umaepoch/foundryapp
|
5d038562b5c0da6669b0d107a68eddace3682514
|
f8d43e2ead4de7d6f1a6c30bc3f9c7c390f030e1
|
refs/heads/master
| 2023-08-30T05:09:12.570319
| 2021-11-18T12:36:19
| 2021-11-18T12:36:19
| 399,685,380
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,383
|
py
|
# Copyright (c) 2013, yashwanth and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import datetime
def execute(filters=None):
columns = getColumns()
data = construct_report(filters.get("scheduled_date"))
return columns, data
def construct_report(filters):
r_data = []
cum_so_rqd = 0
cum_pln_disp = 0
cum_uo_dlv = 0
cum_prod_com = 0
dispatch = get_dispatch()
data = get_production_plan()
if len(data) > 0 and len(dispatch) > 0:
for i in dispatch:
for d in data:
if i['dispatch_item'] == d['item_code']:
cum_so_rqd += d['so_requirement']
cum_pln_disp += d['planned_dispatch']
cum_uo_dlv += d['under/over_delivery']
cum_prod_com += d['committed_production']
data[data.index(d)]['cum_so'] = cum_so_rqd
data[data.index(d)]['cum_dis'] = cum_pln_disp
data[data.index(d)]['cum_uo'] = cum_uo_dlv
data[data.index(d)]['cum_prod'] = cum_prod_com
data[data.index(d)]['cum_shrt'] = cum_prod_com - cum_pln_disp
cum_so_rqd = 0
cum_prod_com = 0
cum_uo_dlv = 0
cum_pln_disp = 0
if filters:
date = datetime.datetime.strptime(filters, '%Y-%m-%d').strftime('%d-%m-%Y')
for d in data:
if d['scheduled_shipment_date'] == date:
r_data.append([d['scheduled_shipment_date'], d['item_code'], d['item_name'],
d['concat'], d['so_requirement'], d['planned_dispatch'], d['under/over_delivery'],
d['cum_so'], d['cum_dis'], d['cum_uo'], d['committed_production'],
d['shortage/excess_production'], d['cum_prod'], d['cum_shrt']])
if filters is None:
for d in data:
r_data.append([d['scheduled_shipment_date'], d['item_code'], d['item_name'],
d['concat'], d['so_requirement'], d['planned_dispatch'], d['under/over_delivery'],
d['cum_so'], d['cum_dis'], d['cum_uo'], d['committed_production'],
d['shortage/excess_production'], d['cum_prod'], d['cum_shrt']])
return r_data
def getColumns():
columns = [
("Scheduled Shipment Date")+"::150",
("Dispatch Item Code")+"::150",
("Dispatch Item Name")+"::150",
("Concat")+"::150",
("SO Requirement")+"::50",
("Planned Dispatch")+"::50",
("Under/Over Delivery")+"::50",
("Cumulative for the Week - SO Required")+"::50",
("Cumulative for the Week - Planned Dispatch")+"::50",
("Cumulative for theWeek - Under/Over Delivery")+"::50",
("Committed Prodction")+"::50",
("Shortage/Excess Production")+"::50",
("Cumulative Production Comittment")+"::50",
("Cumulative Shortage/Excess Production")+"::50"
]
return columns
def get_production_plan():
data = []
cmp_details = frappe.db.sql("""select cmpi.week_ending, cmpi.dispatch_item, cmpi.dispatch_item_name, concat(datediff(cmpi.week_ending, '1900-01-01') + 2,cmpi.dispatch_item) as date_serial_number,
cmpi.so_requirement, cmpi.container_plan_requirement ,cmpi.production_quantity_committed, cmpi.quantity_in_tonnes
from `tabCommitted Production Plan Items` as cmpi
join `tabCommitted Production Plan` as cmp on cmpi.parent = cmp.name
where cmp.is_active=1
order by cmpi.week_ending, cmpi.dispatch_item""", as_dict = 1)
print("Commited : ",cmp_details)
if cmp_details != None and (len(cmp_details) > 0):
for cmp in cmp_details:
if cmp['week_ending']:
cmp_json = {
'scheduled_shipment_date' : cmp['week_ending'].strftime("%d-%m-%Y"),
'item_code' : cmp['dispatch_item'],
'item_name' : cmp['dispatch_item_name'],
'concat' : cmp['date_serial_number'],
'so_requirement' : cmp['so_requirement'],
'planned_dispatch': cmp['container_plan_requirement'],
'under/over_delivery': cmp['container_plan_requirement'] - cmp['so_requirement'],
'committed_production': cmp['production_quantity_committed'],
'shortage/excess_production': cmp['production_quantity_committed'] - cmp['container_plan_requirement']
}
data.append(cmp_json)
return data
def get_dispatch():
dispatch = frappe.db.sql("""select cmpi.dispatch_item from `tabCommitted Production Plan Items` as cmpi
join `tabCommitted Production Plan` as cmp on cmpi.parent = cmp.name
where cmp.is_active=1
group by cmpi.dispatch_item
order by dispatch_item""", as_dict = 1)
print("Dispatch Items : ",dispatch)
if dispatch != None and (len(dispatch) > 0):
return dispatch
return ""
|
[
"jyothi@meritsystems.com"
] |
jyothi@meritsystems.com
|
15d7aecf4a95145f8bce53ba7959fd4e048a9fed
|
72350cc086e70f93425640b823bc7461c7cbff46
|
/WIDW3T2.py
|
bfc82776864276eb298abf4ed9d89527e41ad788
|
[] |
no_license
|
Faybeee/Session3-Homework
|
c1529bd4e482eebbc4e71ea34e0f32288838e25b
|
2bee58f255bd2667ec78db6b237b302c33ced9f6
|
refs/heads/main
| 2023-02-21T03:40:39.822766
| 2021-01-24T12:40:46
| 2021-01-24T12:40:46
| 332,446,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,650
|
py
|
#Write a program to ask a student for their percentage mark and convert this to a grade.
#The conversion will be done in a function called mark_grade
#Ask the user for their target grade and print this with their mark
# If their target grade > exam grade display a suitable message
# If their target grade = exam grade display a suitable message
# If their target grade < exam grade display a suitable message
def mark_grade (permark):
if permark >=90:
return "A"
elif permark <90 and permark >=80 :
return "B"
elif permark <80 and permark >=70 :
return "C"
elif permark <70 and permark >=60 :
return "D"
elif permark <60 and permark >=50 :
return "E"
elif permark <50 :
return "FAIL"
def grade_mark (want,permark):
if (want == "A" or want =="a") and permark >= 90:
return "achieved"
elif (want == "A" or want == "a") and permark <90:
return "did not achieve"
elif (want == "B" or want =="b") and permark >=80 and permark <90:
return "achieved"
elif (want == "B" or want =="b") and permark >=90:
return "exceeded"
elif (want == "B" or want == "b") and permark >80:
return "did not achieve"
elif (want == "C" or want =="c") and permark >=70 and permark <80:
return "achieved"
elif (want == "C" or want =="c") and permark >=80:
return "exceeded"
elif (want == "C" or want == "c") and permark >70:
return "did not achieve"
elif (want == "D" or want == "d") and permark >= 60 and permark < 70:
return "achieved"
elif (want == "D" or want == "d") and permark >= 70:
return "exceeded"
elif (want == "D" or want == "d") and permark > 60:
return "did not achieve"
elif (want == "E" or want == "e") and permark >= 50 and permark < 60:
return "achieved"
elif (want == "E" or want == "e") and permark >= 60:
return "exceeded"
elif (want == "E" or want == "e") and permark > 50:
return "did not achieve"
print("Hi, I'm here to calculate your grade!")
want = str(input("First though, what grade are you hoping for?"))
permark = int(input("What % mark did you get?"))
grade = mark_grade(int(permark))
wanted = grade_mark(want,permark)
if wanted == "achieved":
endit = "Congratulations!"
elif wanted == "exceeded":
endit = "OMG! CONGRATULATIONS! THAT IS EPIC!!!"
elif wanted == "did not achieve":
endit = "Better luck next time!"
print("Your grade is", grade, "you", wanted,"the", want, "you wanted.", endit)
|
[
"noreply@github.com"
] |
noreply@github.com
|
c2056bf0c275dfbda836faa8cbf3d26e801cb7a5
|
1cbc03603f3aad9f4eecdd341d58d2f8c910063c
|
/theme_10/task_03/__init__.py
|
01de4b6607260dae5291e6053d6acd7ec0422574
|
[] |
no_license
|
omeH/studies
|
71925b9a2419c152d5a1b8fd07974511851449c2
|
96eb72e20180554c2edc25397a520cd1c5cd7347
|
refs/heads/master
| 2016-08-06T01:12:19.723251
| 2014-12-11T18:45:39
| 2014-12-11T18:45:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
__all__ = [
'safe'
]
|
[
"hanulllu@gmail.com"
] |
hanulllu@gmail.com
|
95d13e0f751a416bc4b06580bcf2b908508684b6
|
a1b8b807a389fd3971ac235e46032c0be4795ff1
|
/Repo_Files/Zips/plugin.video.streamhub/resources/lib/sources/en/watchfree.py
|
499eb10d07d5e83d78835d4d22adcf9be4794a51
|
[] |
no_license
|
sClarkeIsBack/StreamHub
|
0cd5da4b3229592a4e2cf7ce3e857294c172aaba
|
110983579645313b8b60eac08613435c033eb92d
|
refs/heads/master
| 2020-05-23T09:09:54.898715
| 2020-02-29T12:15:32
| 2020-02-29T12:15:32
| 80,440,827
| 9
| 20
| null | 2017-10-04T07:32:52
| 2017-01-30T16:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 8,483
|
py
|
# -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['watchfree.to','watchfree.unblockall.org','www6-watchfree6-to.unblocked.lol']
self.base_link = 'http://watchfree.unblockall.org'
self.base_link = 'http://www6-watchfree6-to.unblocked.lol'
self.moviesearch_link = '/?keyword=%s&search_section=1'
self.tvsearch_link = '/?keyword=%s&search_section=2'
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = self.moviesearch_link % urllib.quote_plus(cleantitle.query(title))
query = urlparse.urljoin(self.base_link, query)
result = str(proxy.request(query, 'free movies'))
if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'free movies'))
result = client.parseDOM(result, 'div', attrs = {'class': 'item'})
title = 'watch' + cleantitle.get(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
r = [(proxy.parse(i[0]), i[1]) for i in result]
match = [i[0] for i in r if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]]
match2 = [i[0] for i in r]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
r = proxy.request(urlparse.urljoin(self.base_link, i), 'free movies')
r = re.findall('(tt\d+)', r)
if imdb in r: url = i ; break
except:
pass
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
query = self.tvsearch_link % urllib.quote_plus(cleantitle.query(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
result = str(proxy.request(query, 'free movies'))
if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'free movies'))
result = client.parseDOM(result, 'div', attrs = {'class': 'item'})
tvshowtitle = 'watch' + cleantitle.get(tvshowtitle)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
r = [(proxy.parse(i[0]), i[1]) for i in result]
match = [i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]]
match2 = [i[0] for i in r]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
r = proxy.request(urlparse.urljoin(self.base_link, i), 'free movies')
r = re.findall('(tt\d+)', r)
if imdb in r: url = i ; break
except:
pass
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'tv_episode_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0])
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = url[0][0]
url = proxy.parse(url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'link_ite')
links = client.parseDOM(result, 'table', attrs = {'class': 'link_ite.+?'})
for i in links:
try:
url = client.parseDOM(i, 'a', ret='href')
url = [x for x in url if 'gtfo' in x][-1]
url = proxy.parse(url)
url = urlparse.parse_qs(urlparse.urlparse(url).query)['gtfo'][0]
url = base64.b64decode(url)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = host.encode('utf-8')
quality = client.parseDOM(i, 'div', attrs = {'class': 'quality'})
if any(x in ['[CAM]', '[TS]'] for x in quality): quality = 'CAM'
else: quality = 'SD'
quality = quality.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
|
[
"mediahubiptv@gmail.com"
] |
mediahubiptv@gmail.com
|
00170ae8d4a933ab33ea078c5a1290a931cf032d
|
b292052312683fe396873ea41bcb50b6b5c0c69b
|
/roots.py
|
4beefb069595e269e0cd9fa67d7769f5a330b5e3
|
[] |
no_license
|
lucas-homer/pyfund
|
277da2eac90412a73fb41f161554a63cc5f16f34
|
d82d8bfa4a3dca05e4ae793f88628e7f089df010
|
refs/heads/master
| 2021-04-15T09:20:37.722881
| 2018-03-22T19:29:36
| 2018-03-22T19:29:36
| 126,384,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
import sys
def sqrt(x):
'''Compute square roots using the method of Heron of Alexandria.
Args:
x: The number for which the square root is to be computed.
Returns:
The square root of x.
Raises:
ValueError: If x is negative.
'''
if x < 0:
raise ValueError("Cannot compute square root "
"of negative number {}".format(x))
guess = x
i = 0
while guess * guess != x and i < 20:
guess = (guess + x / guess) / 2.0
i += 1
return guess
def main():
try:
print(sqrt(9))
print(sqrt(2))
print(sqrt(-1))
print("this is never printed.")
except ValueError as e:
print(e, file=sys.stderr)
print("Program execution continues normally here.")
if __name__ == '__main__':
main()
|
[
"lucas.homer@gmail.com"
] |
lucas.homer@gmail.com
|
59374bff51a5f4a97d27ad97be962fd2224e9c53
|
a39449d094f1aeb9c7b269b7c32b03ca84462243
|
/src/createCustomVocab.py
|
b9db22f2fa9618636dfef079f58ba6023650ed86
|
[] |
no_license
|
somi198/KISTI-2020-AI-Project
|
4714f8aebe726a14ef9d620af25435d7dada73fb
|
ebcc8a2f907f1ca609fd026efeca5d716f0895a7
|
refs/heads/master
| 2023-07-13T05:14:40.152542
| 2021-08-14T12:11:39
| 2021-08-14T12:11:39
| 320,918,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,352
|
py
|
import argparse
import re
import sys
# custom vocab dictionaly
new_vocab = []
# Vocab path
## input Vocab path
MecapVocab_path = "rsc/my_conf/hangul_vocab.txt"
EnglishVocab_path = "rsc/my_conf/ices_eng_vocab_1000.txt"
## output Vocab path
CustomVocab_path = "rsc/my_conf/ices_custom_vocab_v2.txt"
## 입력 text가 한글인지 아닌지 판단.
def isHangul(text):
if text[:2] == "##": text = text[2:]
#Check the Python Version
pyVer3 = sys.version_info >= (3, 0)
if pyVer3 : # for Ver 3 or later
encText = text
else: # for Ver 2.x
if type(text) is not unicode:
encText = text.decode('utf-8')
else:
encText = text
hanCount = len(re.findall(u'[\u3130-\u318F\uAC00-\uD7A3]+', encText))
return hanCount > 0
def add_korean():
# 전체 글에서 추출한 vocab dictionaly
f = open(MecapVocab_path, 'r')
lines = f.readlines()
print("Total Mecab Vocab size : ", len(lines))
f.close()
count = 0
for i in lines:
if isHangul(i[:-1]):
new_vocab.append(i)
count += 1
print("Number of Hangul vocab : {}".format(count))
print("Current new_vocab size : {} (한글단어 추가)".format(len(new_vocab)))
def add_english():
f = open(EnglishVocab_path, 'r')
eng_lines = f.readlines()
print("Total English Vocab size : ", len(eng_lines))
f.close()
count = 0
for i in eng_lines[5:]:
new_vocab.append(i)
count += 1
print("Number of english vocab : {}".format(len(eng_lines[5:])))
print("Current new_vocab size : {} (영어 추가)".format(len(new_vocab)))
def add_seperater():
new_vocab.insert(0,'[MASK]\n')
new_vocab.insert(0,'[SEP]\n')
new_vocab.insert(0,'[CLS]\n')
new_vocab.insert(0,'[UNK]\n')
new_vocab.insert(0,'[PAD]\n')
print("Number of seperater : 5")
print("Current new_vocab size : {} (Seperater 추가)".format(len(new_vocab)))
def add_number():
count = 0
for i in range(10):
new_vocab.append(str(i)+'\n')
new_vocab.append("##{}\n".format(i))
count += 2
print("Number of type of number : {}".format(count))
print("Current new_vocab size : {} (숫자 추가)".format(len(new_vocab)))
def add_special_char():
used_Special_Char = "+-/*÷=×±∓∘∙∩∪≅∀√%∄∃θπσ≠<>≤≥≡∼≈≢∝≪≫∈∋∉⊂⊃⊆⊇⋈∑∫∏∞x().,%#{}"
count = 0
for c in used_Special_Char:
new_vocab.append(c+'\n')
new_vocab.append("##{}\n".format(c))
count+=2
print("Number of Special Characters : {}".format(count))
print("Current new_vocab size : {} (숫자 추가)".format(len(new_vocab)))
def merge_all_vocab():
f = open(CustomVocab_path, 'w')
f.write("".join(new_vocab))
f.close()
def compare_shap_word():
# ##붙은것과 안붙은 것 갯수 비교
f = open(CustomVocab_path, 'r')
test = f.readlines()
f.close()
count = 0
count2 = 0
for i in test[5:]:
if i[:2] == '##': count += 1
else: count2 += 1
print("## 붙은 것 : ", count)
print("## 안 붙은 것 : ", count2)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process korean spelling check')
parser.add_argument('Mecab', help='Path of file Mecab vocab')
parser.add_argument('english_only', help='Path of file english vocab')
parser.add_argument('--custom', default="rsc/my_conf/ices_custom_vocab_v2.txt", help='Path of file final custom vocab')
parser.add_argument('--check_word', type=str2bool, default="true", help='check ##word and word')
args = parser.parse_args()
## input Vocab path
MecapVocab_path = args.Mecab
EnglishVocab_path = args.english_only
## output Vocab path
CustomVocab_path = args.custom
add_korean()
add_english()
add_seperater()
add_number()
add_special_char()
merge_all_vocab()
if (args.check_word):
compare_shap_word()
|
[
"saejin7694@gmail.com"
] |
saejin7694@gmail.com
|
8be593e9228a4956a1fb34a15eadd28289e4ea8e
|
1405f47a6e0715f163439b034987e6e298f74429
|
/top/api/rest/__init__.py
|
4d9cc5110cbc87832c3f2ab2702336280424af59
|
[] |
no_license
|
skee-t/backend
|
5dd7064c62615de16c3fefba34edc19e598df00d
|
941976d99245486790ca91e134b0cbae1a003f1e
|
refs/heads/master
| 2021-05-03T20:13:35.559147
| 2016-12-26T10:45:09
| 2016-12-26T10:45:09
| 69,564,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,609
|
py
|
from top.api.rest.AlibabaAliqinFcFlowChargeProvinceRequest import AlibabaAliqinFcFlowChargeProvinceRequest
from top.api.rest.AlibabaAliqinFcFlowChargeRequest import AlibabaAliqinFcFlowChargeRequest
from top.api.rest.AlibabaAliqinFcFlowGradeRequest import AlibabaAliqinFcFlowGradeRequest
from top.api.rest.AlibabaAliqinFcFlowQueryRequest import AlibabaAliqinFcFlowQueryRequest
from top.api.rest.AlibabaAliqinFcSmsNumQueryRequest import AlibabaAliqinFcSmsNumQueryRequest
from top.api.rest.AlibabaAliqinFcSmsNumSendRequest import AlibabaAliqinFcSmsNumSendRequest
from top.api.rest.AlibabaAliqinFcTtsNumSinglecallRequest import AlibabaAliqinFcTtsNumSinglecallRequest
from top.api.rest.AlibabaAliqinFcVoiceNumDoublecallRequest import AlibabaAliqinFcVoiceNumDoublecallRequest
from top.api.rest.AlibabaAliqinFcVoiceNumSinglecallRequest import AlibabaAliqinFcVoiceNumSinglecallRequest
from top.api.rest.AppipGetRequest import AppipGetRequest
from top.api.rest.AreasGetRequest import AreasGetRequest
from top.api.rest.HttpdnsGetRequest import HttpdnsGetRequest
from top.api.rest.KfcKeywordSearchRequest import KfcKeywordSearchRequest
from top.api.rest.TimeGetRequest import TimeGetRequest
from top.api.rest.TopAuthTokenCreateRequest import TopAuthTokenCreateRequest
from top.api.rest.TopAuthTokenRefreshRequest import TopAuthTokenRefreshRequest
from top.api.rest.TopIpoutGetRequest import TopIpoutGetRequest
from top.api.rest.TopSecretGetRequest import TopSecretGetRequest
from top.api.rest.TopatsResultGetRequest import TopatsResultGetRequest
from top.api.rest.TopatsTaskDeleteRequest import TopatsTaskDeleteRequest
|
[
"rensikun@paypalm.cn"
] |
rensikun@paypalm.cn
|
0653972e0dd62e235f1b6c73af6da5b96e246c6f
|
1a812d520fa0788864cab3c6bbd4e2ba0e8872c2
|
/employeedataandprintthatdata.py
|
d97719e66d1ee36ecddc97ae0f16f35d728b4462
|
[] |
no_license
|
manutdmohit/pythonprogramexamples
|
b6f6906a6169ad2ecd9b16d95495474d570b065e
|
06ac4af8ce13872bbe843175a61d7ad77e0f92b6
|
refs/heads/main
| 2023-01-14T13:14:57.468947
| 2020-11-25T05:39:01
| 2020-11-25T05:39:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
eno=int(input('Enter employee number:'))
ename=input('Enter employee name:')
esal=float(input('Enter employee salary:'))
eaddr=input('Enter employee address:')
married=bool(input('Employee married?[True/False]:'))
print('Please confirm your provided information')
print('Employee Number:',eno)
print('Employee Name:',ename)
print('Employee Salary:',esal)
print('Employee Address:',eaddr)
print('Employee Married?:',married)
|
[
"noreply@github.com"
] |
noreply@github.com
|
961781e9a4421f843daec46bf7d27a5b190cffc6
|
989b3499948137f57f14be8b2c77d0610d5975e6
|
/python-package/daily_study/python/question_python(resolved)/chapter4_conditional_and_loops(완결)/i_is_member.py
|
fb8ea88f0fd87a269fb0ec00839eb849b2386979
|
[] |
no_license
|
namkiseung/python_BasicProject
|
76b4c070934ad4cb9d16ce844efa05f64fb09ac0
|
460d05248b2d1431624aba960e28bece888643e4
|
refs/heads/master
| 2022-12-13T21:12:06.865241
| 2020-04-23T01:30:08
| 2020-04-23T01:30:08
| 142,980,920
| 1
| 1
| null | 2022-12-08T02:27:40
| 2018-07-31T07:49:17
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
def is_member(member_list, mem):
""" 리스트 member_list 와, 어떤 데이터 mem을 전달받고, mem이 member_list에 포함되어 있는지를 True/False로 반환하는 함수를 작성하자
sample in/out:
is_member([1, 5, 8, 3], 3) -> True
is_member([5, 8, 3], -1) -> False
"""
# 여기 작성
return mem in member_list
if __name__ == "__main__":
print is_member([1, 5, 8, 3], 3)# -> True
print is_member([5, 8, 3], -1) #-> False
pass
|
[
"rlzld100@gmail.com"
] |
rlzld100@gmail.com
|
dd0cf2b1d4f90a284dd76c89ec61fd109ca9df93
|
c99a9a65f451c2af2a1985829d031a40b0c78379
|
/backend/run.py
|
aa950026058d229589ac138c3517b6ef8aebc907
|
[] |
no_license
|
jianchann/GetUP
|
18bdaa4af3330ce643db5633462e0a4597fc84c0
|
b9a900713df157ff05069fc7d4a57ef446e59469
|
refs/heads/master
| 2022-12-13T00:51:12.210559
| 2020-03-12T21:25:14
| 2020-03-12T21:25:14
| 235,525,416
| 0
| 1
| null | 2022-12-11T21:33:12
| 2020-01-22T08:00:32
|
Vue
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
from app import app, db
import os
db.create_all()
if __name__ == '__main__':
if app.debug:
app.run(host='0.0.0.0')
else:
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
[
"jianlorenzo_chan@yahoo.com.ph"
] |
jianlorenzo_chan@yahoo.com.ph
|
ad005e7c3c65d9d484b6e2414b855dd7605fbebe
|
28ae5b967328670448b47baa87c5506d573595ac
|
/ex.py
|
5c0db097d191b60fa670863c3721a47bfd4236a4
|
[
"Apache-2.0"
] |
permissive
|
Kagurazaka-Hanasaka/RanmaruWorks_Git
|
f4ea9ae838136f5969f5be1fa39d4eaa0ae1c47d
|
8e327b31b1b71cb231755fe61ffee49fa2d69e69
|
refs/heads/master
| 2020-03-25T03:43:21.121098
| 2018-08-03T00:05:59
| 2018-08-03T00:05:59
| 143,356,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
import requests, re, json, uuid, glob, sqlite3, time, gc, os, psutil
from bs4 import BeautifulSoup
eoltoken = "null"
merge = []
hlistc = 0
for pgn in range(5):
cookd = {
"igneous": "89540adbd",
"ipb_member_id": "2237746",
"ipb_pass_hash": "d99e752060d5e11636d7e427f62a3622",
"lv": "1533216215-1533216236"
}
excook = requests.utils.cookiejar_from_dict(cookd, cookiejar=None, overwrite=True)
exhead = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7",
"Connection": "keep-alive",
"Host": "exhentai.org",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
}
eol = []
hlist = []
exurl = "https://exhentai.org/?page="+ str(pgn)+ "&f_doujinshi=on&advsearch=1&f_search=language%3Achinese&f_srdd=5&f_sname=on&f_stags=on&f_sr=on&f_sh=on&f_apply=Apply+Filter"
orig = requests.get(exurl, headers=exhead, cookies=excook).text
if "No hits found" in orig:
print("-----Crawling Queue Ends-----")
break
else:
BSorig = BeautifulSoup(orig)
table = BSorig.find("table", {"class": "itg"})
for link in table.findAll("a", href=re.compile("https://exhentai\.org/g/[0-9]{1,8}/[A-Za-z0-9]{10}/")):
if "href" in link.attrs:
link2 = link.attrs["href"]
hlist.append(link2.split("/")[4:6])
if eoltoken in hlist:
eol = hlist.index(eoltoken)
hlist = hlist[eol+1:len(hlist)]
eoltoken = hlist[-1]
req = {
"method": "gdata",
"gidlist": hlist,
"namespace": 1
}
recl = json.loads(json.dumps(requests.post("https://api.e-hentai.org/api.php", data=json.dumps(req, ensure_ascii=False).encode("utf-8")).json(), ensure_ascii=False))['gmetadata']
for obj in recl:
with open(str(uuid.uuid4())+".json", "w", encoding="UTF-8") as f:
json.dump(obj, f, ensure_ascii=False)
hlistc = hlistc + 1
if hlistc >4:
time.sleep(5)
hlistc = 0
print("-----Page "+str(pgn)+" Crawling Ends-----")
print(psutil.virtual_memory())
del pgn, exurl, orig, BSorig, table, link, link2, eol, hlist, req, recl, obj, cookd, excook, exhead
gc.collect()
for f in glob.glob("*.json"):
with open(f, "rb") as inf:
merge.append(json.load(inf))
del f
gc.collect()
with open("fin.json", "w", encoding="UTF-8") as out:
json.dump(merge, out, ensure_ascii=False, sort_keys=True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
c7cc16f7ebab168223dc1364d956d6a0c8612445
|
e56563ea34f09b2269398cb038256f070fad34b5
|
/py/216_Combination_Sum_III.py
|
7b677bdc65522e1de2d2864dc79e6f0e2db06020
|
[] |
no_license
|
zymov/leetcode
|
95bdefa84c2643933dde3b0826c732a4b9b03431
|
cbf87c5a37543894054040d296741931f8c7b205
|
refs/heads/master
| 2020-03-21T23:12:17.132201
| 2020-02-06T04:24:11
| 2020-02-06T04:24:11
| 139,172,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
from typing import List
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
res = []
nums = [i for i in range(1,10)]
self.backtracking(nums, 0, k, n, [], res)
return res
def backtracking(self, nums: List[int], index: int, k: int, remain: int, comb: List[int], res: List[List[int]]):
if k < 0 or remain < 0:
return
if remain == 0 and k == 0:
res.append(comb)
for i in range(index, len(nums)):
self.backtracking(nums, i + 1, k - 1, remain - nums[i], comb + [nums[i]], res)
|
[
"eyeder@163.com"
] |
eyeder@163.com
|
7868596997fff7f69033313b6a5b4fab5e61c586
|
2d8839935bb79e9190487ab0b9cf17fa97716edf
|
/tas/rng.py
|
ff6c72d9f4cc697b69f3330856b3c5fd6a19363c
|
[] |
no_license
|
yuhasem/poc_utils
|
914ef6069f8f8b22efdffdfce182343539983c73
|
c45ae82f3038432611ccf3336c72e31532843f9e
|
refs/heads/master
| 2023-04-10T17:30:47.059387
| 2023-03-19T23:21:19
| 2023-03-19T23:21:19
| 202,916,199
| 0
| 0
| null | 2020-04-18T18:03:39
| 2019-08-17T18:06:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,832
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 13 19:24:51 2021
@author: yuhasem
"""
def top(value):
return value >> 16
# Every step of RNG works as seed = 0x41C64E6D * seed + 0x6073
# These lists are precomputing what happens when you repeat this multiple
# times. The entry at index i is what happens when it's repeated 2^i times.
# This makes computing many frames in advance more efficient.
multiply = [
0x41C64E6D, 0xC2A29A69, 0xEE067F11, 0xCFDDDF21,
0x5F748241, 0x8B2E1481, 0x76006901, 0x1711D201,
0xBE67A401, 0xDDDF4801, 0x3FFE9001, 0x90FD2001,
0x65FA4001, 0xDBF48001, 0xF7E90001, 0xEFD20001,
0xDFA40001, 0xBF480001, 0x7E900001, 0xFD200001,
0xFA400001, 0xF4800001, 0xE9000001, 0xD2000001,
0xA4000001, 0x48000001, 0x90000001, 0x20000001,
0x40000001, 0x80000001, 0x00000001, 0x00000001]
add = [
0x00006073, 0xE97E7B6A, 0x31B0DDE4, 0x67DBB608,
0xCBA72510, 0x1D29AE20, 0xBA84EC40, 0x79F01880,
0x08793100, 0x6B566200, 0x803CC400, 0xA6B98800,
0xE6731000, 0x30E62000, 0xF1CC4000, 0x23988000,
0x47310000, 0x8E620000, 0x1CC40000, 0x39880000,
0x73100000, 0xE6200000, 0xCC400000, 0x98800000,
0x31000000, 0x62000000, 0xC4000000, 0x88000000,
0x10000000, 0x20000000, 0x40000000, 0x80000000]
def advanceRng(seed, steps):
i = 0
while (steps > 0):
if (steps % 2):
seed = (seed * multiply[i] + add[i]) & 0xFFFFFFFF
steps >>= 1
i += 1
if (i > 32):
break
return seed
class Pokemon():
def __repr__(self):
return "nat: %d, [%d,%d,%d,%d,%d,%d]" % (
self.nature, self.hp_iv, self.att_iv, self.def_iv, self.spa_iv,
self.spd_iv, self.spe_iv)
class StaticPokemon(Pokemon):
def __init__(self, seed):
# The first step is the usual VBlank
seed = advanceRng(seed, 2)
pid = top(seed)
seed = advanceRng(seed, 1)
pid += top(seed) << 16
self.nature = pid % 25;
seed = advanceRng(seed, 1)
ivs = top(seed)
self.hp_iv = ivs & 0x1F
ivs >>= 5
self.att_iv = ivs & 0x1F
ivs >>= 5
self.def_iv = ivs & 0x1F
seed = advanceRng(seed, 1)
ivs = top(seed)
self.spe_iv = ivs & 0x1F
ivs >>= 5
self.spa_iv = ivs & 0x1F
ivs >>= 5
self.spd_iv = ivs & 0x1F
class WallyRaltsPokemon(Pokemon):
def __init__(self, seed):
# VBlank + 2 steps to generate TID (which we don't track)
seed = advanceRng(seed, 3)
male = False
while not male:
pid = 0
seed = advanceRng(seed, 1)
pid = top(seed)
seed = advanceRng(seed, 1)
pid += top(seed) << 16
male = ((pid & 0xf0) >> 4) > 7
self.nature = pid % 25
seed = advanceRng(seed, 1)
ivs = top(seed)
self.hp_iv = ivs & 0x1F
ivs >>= 5
self.att_iv = ivs & 0x1F
ivs >>= 5
self.def_iv = ivs & 0x1F
seed = advanceRng(seed, 1)
ivs = top(seed)
self.spe_iv = ivs & 0x1F
ivs >>= 5
self.spa_iv = ivs & 0x1F
ivs >>= 5
self.spd_iv = ivs & 0x1F
class WildPokemon(Pokemon):
def __init__(self, seed):
# The first step is one to check for Synchronize, which we don't track.
seed = advanceRng(seed, 2)
self.advances = 2
self.nature = top(seed) % 25
tentative_nature = -1
while tentative_nature != self.nature:
self.pid = 0
seed = advanceRng(seed, 1)
self.pid = top(seed)
seed = advanceRng(seed, 1)
self.pid += top(seed) << 16
tentative_nature = self.pid % 25
self.advances += 2
seed = advanceRng(seed, 1)
ivs = top(seed)
self.hp_iv = ivs & 0x1F
ivs >>= 5
self.att_iv = ivs & 0x1F
ivs >>= 5
self.def_iv = ivs & 0x1F
seed = advanceRng(seed, 1)
ivs = top(seed)
self.spe_iv = ivs & 0x1F
ivs >>= 5
self.spa_iv = ivs & 0x1F
ivs >>= 5
self.spd_iv = ivs & 0x1F
self.advances += 2
def feebasTilesFromSeed(seed):
tiles = []
i = 0
while i <= 5:
seed = (0x41c64e6d * seed + 0x3039) % (1 << 32)
tile = (top(seed) & 0xffff) % 0x1bf
if tile == 0:
tile = 447
if tile >= 4:
i += 1
tiles.append(tile)
return tiles
def rareCandies(seed, size=6):
candies = 0
for i in range(size):
seed = advanceRng(seed, 1)
if (top(seed) % 10 != 0):
continue
seed = advanceRng(seed, 1)
item = top(seed) % 100
if (item >= 50 and item < 60):
candies += 1
return candies
|
[
"lilyuhas@gmail.com"
] |
lilyuhas@gmail.com
|
49593cfef8190bf81ad085564003ab1b4b9ef236
|
7b7b0a813ad2008d08c32b67aa71e442a592fc38
|
/pytorch_wrapper/modules/sequence_basic_cnn_encoder.py
|
e9890f4aa161f51e15de4877514a7f19d14a9755
|
[
"MIT"
] |
permissive
|
HM102/pytorch-wrapper
|
312f259b0ce2645c1ec60b3c8513b4532e5018d8
|
ff95fb1c7153c67307f1b80349cca15e2c1ab0bf
|
refs/heads/master
| 2020-07-03T14:20:50.540972
| 2019-08-11T11:07:59
| 2019-08-11T11:07:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,441
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .. import functional as pwF
class SequenceBasicCNNEncoder(nn.Module):
"""
Basic CNN Encoder for sequences (https://arxiv.org/abs/1408.5882).
"""
def __init__(self,
time_step_size,
input_activation=None,
kernel_heights=(1, 2, 3, 4, 5),
out_channels=300,
pre_pooling_activation=nn.ReLU,
pooling_function=F.max_pool1d,
post_pooling_activation=None,
post_pooling_dp=0):
"""
:param time_step_size: Time step size.
:param input_activation: Callable that creates the activation used on the input.
:param kernel_heights: Tuple containing filter heights.
:param out_channels: Number of filters for each filter height.
:param pre_pooling_activation: Callable that creates the activation used before pooling.
:param pooling_function: Callable that performs a pooling function before the activation.
:param post_pooling_activation: Callable that creates the activation used after pooling.
:param post_pooling_dp: Callable that performs a pooling function before the activation.
"""
super(SequenceBasicCNNEncoder, self).__init__()
self._min_len = max(kernel_heights)
self._kernel_heights = kernel_heights
self._input_activation = input_activation() if input_activation is not None else input_activation
self._convolutional_layers = nn.ModuleList(
modules=[nn.Conv1d(in_channels=time_step_size, out_channels=out_channels, kernel_size=kernel_height)
for kernel_height in kernel_heights]
)
if pre_pooling_activation is not None:
self._pre_pooling_activation = pre_pooling_activation()
else:
self._pre_pooling_activation = None
self._pooling_function = pooling_function
if post_pooling_activation is not None:
self._post_pooling_activation = post_pooling_activation()
else:
self._post_pooling_activation = None
self._output_dp_layer = nn.Dropout(post_pooling_dp) if post_pooling_dp > 0 else None
def forward(self, batch_sequences):
"""
:param batch_sequences: 3D Tensor (batch_size, sequence_length, time_step_size) containing the sequence.
:return: 2D Tensor (batch_size, len(kernel_heights) * out_channels) containing the encodings.
"""
if self._min_len > batch_sequences.shape[1]:
batch_sequences = pwF.pad(batch_sequences, self._min_len - batch_sequences.shape[1], dim=1, pad_at_end=False)
convolutions = [conv(batch_sequences.transpose(1, 2)) for conv in self._convolutional_layers]
if self._pre_pooling_activation is not None:
convolutions = [self._pre_pooling_activation(c) for c in convolutions]
pooled = [self._pooling_function(c, c.shape[2]).squeeze(2) for c in convolutions]
if self._post_pooling_activation is not None:
pooled = [self._post_pooling_activation(p) for p in pooled]
if len(self._kernel_heights) > 1:
output = torch.cat(pooled, dim=1)
else:
output = pooled[0]
if self._output_dp_layer is not None:
output = self._output_dp_layer(output)
return output
|
[
"jkoutsikakis@gmail.com"
] |
jkoutsikakis@gmail.com
|
9cfe618f17d438eedbd354b0d2ae50576ab8c448
|
ca0ef0a1ed47d75e651fcd7109852c0723a10a3d
|
/msfalcon_beaker.py
|
32b7841b9f9380046793f98e7159a8c25d59a4e6
|
[
"MIT"
] |
permissive
|
stancikcom/test
|
e4066004cbea3802e07f0273d1a90d379375f058
|
4d05aae7cfc6a7f48c103d32910b0514af955088
|
refs/heads/master
| 2016-09-11T12:25:37.887674
| 2015-01-15T20:24:44
| 2015-01-15T20:24:44
| 29,316,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
from beaker.middleware import SessionMiddleware
def simple_app(environ, start_response):
# Get the session object from the environ
session = environ['beaker.session']
# Check to see if a value is in the session
user = 'logged_in' in session
# Set some other session variable
session['user_id'] = 10
start_response('200 OK', [('Content-type', 'text/plain')])
return ['User is logged in: %s' % user]
# Configure the SessionMiddleware
session_opts = {
'session.type': 'file',
'session.cookie_expires': True,
}
import falcon
class Resource(object):
def on_get(self, req, resp):
# resp.body = '{"message": "Hello world!"}'
session = req.env['beaker.session']
print session
resp.content_type = 'text/plain'
resp.body = 'Hello world!'
resp.status = falcon.HTTP_200
api = application = falcon.API()
api.add_route('/',Resource())
wsgi_app = SessionMiddleware(api, session_opts)
import bjoern
bjoern.listen(wsgi_app, host="127.0.0.1", port=8080)
bjoern.run()
|
[
"info@stancik.com"
] |
info@stancik.com
|
4fedb92719068acc90ab3c0697b69d31c3078c67
|
3e60b7d48d101d6a8057d4b8c5f10cb3d494a98a
|
/addinvoice.py
|
c7bc679fb992f372eae9311cb2434def4121d162
|
[] |
no_license
|
suraj-adewale/SmartAccount
|
15ebdd08954ead735e91b87c4702f4597674181e
|
cc7c0ca04b9a7a2da0cd0c6f8106041dc90e7ad3
|
refs/heads/main
| 2023-06-10T05:33:44.878772
| 2021-07-01T22:33:59
| 2021-07-01T22:33:59
| 378,435,258
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,419
|
py
|
from PyQt5.QtWidgets import QMainWindow,QHBoxLayout,QAction,QTabWidget,QCompleter,QTableWidgetItem,QCalendarWidget,QTableWidget,QAbstractItemView, QApplication,QDialog, QPushButton,QLabel,QMessageBox,\
QWidget,QVBoxLayout,QGridLayout,QComboBox,QLineEdit,QScrollArea,QDateEdit,QButtonGroup,QFormLayout,QTextEdit,QSpinBox
from PyQt5 import QtCore, QtNetwork,QtWidgets
from PyQt5.QtGui import QIcon,QPixmap,QPainter
from PyQt5.QtCore import Qt, QDate,QDateTime,pyqtSignal
from customers import Customers
from addcustomer import AddCustomer
import sys, json,base64
from babel.numbers import format_currency,parse_decimal#,parse_number
from functools import partial
class ImageWidget(QWidget):
def __init__(self, imagePath, parent):
super(ImageWidget, self).__init__(parent)
self.picture = QPixmap(imagePath)
def paintEvent(self, event):
painter = QPainter(self)
painter.drawPixmap(0, 0, self.picture)
class ClickableLineEdit(QLineEdit):
clicked=pyqtSignal()
def mousePressEvent(self,event):
if event.button()==Qt.LeftButton: self.clicked.emit()
class Invoice(QMainWindow):
def __init__(self,dic, parent=None):
super(Invoice, self).__init__(parent)
self.title = 'Invoice'
self.left = (self.x()+230)
self.top = (self.x()+50)
self.width = 900
self.height = 550
self.edit_data=dic
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
usertype=json.load(open("db/usertype.json", "r"))
if usertype=='Administrator':
self.ip='localhost'
if usertype=='User':
self.ip=json.load(open("db/ipaddress.json", "r"))
#self.setStyleSheet(open("qss/mainstyle.qss", "r").read())
self.InvoiceContent()
self.setCentralWidget(self.widget)
self.show()
def window_close(self):
self.close()
def InvoiceContent(self):
self.widget=QWidget()
self.widgetDic={}
self.balance=self.comborow=0
self.row=10
#self.row_col='00'
self.rowCounts=self.row
self.amt_placeholder=format_currency(0,'NGN', locale='en_US')
self.requireddata=json.load(open("db/addinvoice.json", "r"))
self.MessageBox=QMessageBox()
mainlayout=QVBoxLayout()
self.widget.setLayout(mainlayout)
billinglayout=QHBoxLayout()
mainlayout.addLayout(billinglayout,2)
billingtab=QTabWidget()
invoicetab=QTabWidget()
billinglayout.addWidget(billingtab,3)
billinglayout.addWidget(invoicetab,2)
self.billing = QWidget()
billingform=QFormLayout()
billingform.setHorizontalSpacing(50)
self.billing.setLayout(billingform)
self.billing.setStatusTip("Enter supplier information")
self.invoice = QWidget()
invoiceform=QFormLayout()
invoiceform.setHorizontalSpacing(50)
self.invoice.setLayout(invoiceform)
self.invoice.setStatusTip("Enter supplier information")
billingtab.addTab(self.billing,"Billing")
invoicetab.addTab(self.invoice,"Invoice")
customerlayout=QGridLayout()
self.customer=QComboBox()
self.customer.setEditable(True)
self.customerbtn=QPushButton("")
self.customeredit=QPushButton("")
customerlayout.addWidget(self.customer,0,0,0,4)
customerlayout.addWidget(self.customerbtn,0,4)
customerlayout.addWidget(self.customeredit,0,5)
self.customerbtn.setIcon(QIcon('image/icon/team.png'))
self.customerbtn.setIconSize(QtCore.QSize(20,20))
self.customeredit.setIcon(QIcon('image/icon/boy.png'))
self.customeredit.setIconSize(QtCore.QSize(15,15))
self.customerbtn.clicked.connect(self.CustomerWindow)
self.customeredit.clicked.connect(self.CustomerEdit)
self.address=QTextEdit()
self.address.setMaximumHeight(50)
self.po_no=QLineEdit()
self.customertax=QComboBox()
self.customertax.addItems(['Default','Exempt'])
createfromlayout=QGridLayout()
self.createfrom=QComboBox()
self.createfrombtn=QPushButton("")
createfromlayout.addWidget(self.createfrom,0,0,0,4)
createfromlayout.addWidget(self.createfrombtn,0,4)
self.date()
termlayout= QGridLayout()
self.term=QComboBox()
self.term.addItems(["Pay in days","COD"])
self.spinbox = QSpinBox()
self.spinbox.setValue(30)
termlayout.addWidget(self.term,0,0)
termlayout.addWidget(self.spinbox,0,1)
self.salesperson=QComboBox()
self.salesperson.setEditable(True)
self.invoice_no=QLineEdit()
self.invoice_no.setReadOnly(True)
self.createfrom.addItems(["[ New Invoice]","Existing Invoice"])
self.invoice_number=self.requireddata['invoiceno']
self.invoice_no=QLineEdit(self.invoice_number)
self.salesaccount=QComboBox()
self.salesaccount.setEditable(True)
self.receivableaccount=QComboBox()
self.receivableaccount.setEditable(True)
self.customerdata=self.requireddata['customerdata']
self.customer.addItem("")
self.customer.currentTextChanged.connect(self.CustomerChange)
row=0
for key in sorted(self.customerdata):
self.customer.insertItem(row,self.customerdata[key][0])
row=row+1
self.revenueaccounts=self.requireddata['revenueaccounts']
self.salesaccount.addItem("")
self.salesaccount.insertItem(0,'-- Create a new account --')
row=1
completerlist=[]
for key in self.revenueaccounts:
self.salesaccount.insertItem(row,self.revenueaccounts[key][2])
row=row+1
completerlist.append(self.revenueaccounts[key][2])
completer = QCompleter(completerlist)
self.salesaccount.setCompleter(completer)
self.receivables=self.requireddata['receivableaccounts']
self.receivableaccount.addItem("")
self.receivableaccount.insertItem(0,'-- Create a new account --')
row=1
completerlist=[]
for key in self.receivables:
self.receivableaccount.insertItem(row,self.receivables[key][2])
row=row+1
completerlist.append(self.receivables[key][2])
completer = QCompleter(completerlist)
self.receivableaccount.setCompleter(completer)
billingform.addRow("Customer:",customerlayout)
billingform.addRow("Billing to:",self.address)
billingform.addRow("Customer PO No:",self.po_no)
billingform.addRow("Customer Tax:",self.customertax)
invoiceform.addRow("Create from:",createfromlayout)
invoiceform.addRow("Date:",self.dateedit1)
invoiceform.addRow("Terms:",termlayout)
invoiceform.addRow("Salesperson:",self.salesperson)
invoiceform.addRow("Invoice No:",self.invoice_no)
invoiceform.addRow("Revenue Account:",self.salesaccount)
invoiceform.addRow("Receivables Account:",self.receivableaccount)
self.addJournalTable()
textlayout=QGridLayout()
buttonlayout=QGridLayout()
mainlayout.addLayout(self.tablelayout,5)
mainlayout.addLayout(textlayout,2)
mainlayout.addLayout(buttonlayout,1)
self.comment=QTextEdit()
self.comment.setPlaceholderText('[Enter invoice note]')
self.nocomment=QTextEdit('Please contact us for more information about payment options.')
self.privatecomment=QTextEdit()
self.privatecomment.setPlaceholderText('[Enter internal notes]')
self.footnote=QTextEdit('Thank you for your business.')
commentgtab=QTabWidget()
commentgtab.addTab(self.comment,"Comments")
commentgtab.addTab(self.privatecomment,"Private comments")
commentgtab.addTab(self.nocomment,"No comment")
commentgtab.addTab(self.footnote,"Foot Comments")
totalform=QFormLayout()
totalform.setVerticalSpacing(5)
self.subtotal=QLabel(self.amt_placeholder)
self.tax=QLabel(self.amt_placeholder)
self.total=QLabel()
self.total.setText('<b>'+self.amt_placeholder+'</b>')
totalform.addRow('Subtotal:',self.subtotal)
totalform.addRow('Tax:',self.tax)
totalform.addRow('<b>Total</b>',self.total)
textlayout.addWidget(commentgtab,0,0,1,2)
textlayout.addWidget(QLabel(''),0,2)
textlayout.addLayout(totalform,0,3)
self.record=QPushButton('Record')
self.cancel=QPushButton('Cancel')
self.help=QPushButton('Help')
self.record.clicked.connect(self.Save_record)
self.cancel.clicked.connect(self.close)
buttonlayout.addWidget(QLabel(),0,0,1,3)
buttonlayout.addWidget(self.record,0,4)
buttonlayout.addWidget(self.cancel,0,5)
buttonlayout.addWidget(self.help,0,6)
if self.edit_data !={}:
edit_data=self.edit_data['0']
date=edit_data['0'][10]
year=(date.split('-'))[0]
month=(date.split('-'))[1]
day=(date.split('-'))[2]
self.dateedit1.setDate(QDate(int(year),int(month),int(day)))
self.customer.setCurrentText(edit_data['0'][6])
self.address.setText(edit_data['0'][7])
self.invoice_no.setText(edit_data['0'][9])
self.salesperson.setCurrentText(edit_data['0'][11])
self.receivableaccount.setCurrentText(edit_data['0'][1])
self.salesaccount.setCurrentText(edit_data['0'][4])
edit_data=self.edit_data['1']
self.UpdateRows(edit_data)
self.comborow=len(edit_data)
self.unitprice_changed_function(self.comborow-1)
self.comborow=len(self.edit_data)
if self.comborow>10:
self.rowCounts=(self.comborow+5)
self.table.setRowCount(self.rowCounts)
self.table.resizeRowsToContents()
def CustomerWindow(self):
self.customerlist=Customers(self)
self.customerlist.show()
def CustomerEdit(self):
self.customeredit=AddCustomer({})
self.customeredit.show()
def CustomerChange(self,obj):
try:
index=str(self.customer.currentIndex())
address=(self.customerdata.get(index))[7]
except Exception as e:
address=''
self.address.setText(address)
def date(self):
date = QDate()
currentdate=date.currentDate()
self.dateedit1 = QDateEdit()
self.setObjectName("dateedit")
self.dateedit1.setDate(currentdate)
self.dateedit1.setDisplayFormat('dd/MM/yyyy')
self.dateedit1.setCalendarPopup(True)
def addJournalTable(self):
JournalHeader=[" Qty "," Item "," Description "," Unit Price "," Tax "," Total ",""]
self.tablelayout=QVBoxLayout()
self.table =QTableWidget()
self.table.setColumnCount(7) #Set three columns
self.table.setRowCount(self.row)
self.table.setEditTriggers(QAbstractItemView.AllEditTriggers)
#self.table.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Minimum)
self.table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
header = self.table.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(3, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(4, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(5,1*(QtWidgets.QHeaderView.Stretch)//2)
header.setSectionResizeMode(6, QtWidgets.QHeaderView.ResizeToContents)
self.tablelayout.addWidget(self.table)
self.table.clicked.connect(self.AddJournals)
self.table.resizeRowsToContents()
self.table.setSelectionMode(QAbstractItemView.MultiSelection)
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.table.setShowGrid(True)
self.table.setHorizontalHeaderLabels(JournalHeader)
self.table.horizontalHeaderItem(0).setToolTip("Click on any row to add an account")
self.table.horizontalHeaderItem(1).setToolTip("")
self.table.horizontalHeaderItem(2).setToolTip("")
self.table.horizontalHeaderItem(6).setToolTip("Click to delete a row")
def AddJournals(self,item):
currRow=(item.row())
col=item.column()
if col==0:
qty=QComboBox()
qty.setEditable(True)
item=QComboBox()
item.setEditable(True)
description=QComboBox()
description.setEditable(True)
unitprice=QLineEdit()
tax=QComboBox()
tax.setEditable(True)
total=QLabel()
image = ImageWidget('image/icon/clear.png', self)
unitprice.setPlaceholderText(self.amt_placeholder)
total.setText(self.amt_placeholder)
if self.comborow not in self.widgetDic:
widgetList=[]
widgetList.append(qty)
widgetList.append(item)
widgetList.append(description)
widgetList.append(unitprice)
widgetList.append(tax)
widgetList.append(total)
self.widgetDic[self.comborow]=widgetList
(self.widgetDic[self.comborow][3]).textChanged.connect(partial(self.unitprice_changed_function,self.comborow))
(self.widgetDic[self.comborow][0]).currentTextChanged.connect(partial(self.unitprice_changed_function,self.comborow))
self.table.setCellWidget(self.comborow,0,qty)
self.table.setCellWidget(self.comborow,1,item)
self.table.setCellWidget(self.comborow,2, description)
self.table.setCellWidget(self.comborow,3,unitprice)
self.table.setCellWidget(self.comborow,4,tax)
self.table.setCellWidget(self.comborow,5, total)
self.table.setCellWidget(self.comborow, 6, image)
self.comborow=self.comborow+1
if self.comborow==self.rowCounts:
self.rowCounts+5
self.rowCounts=(self.rowCounts+5)
self.table.setRowCount(self.rowCounts)
self.table.resizeRowsToContents()
if col==6:
self.DeleteRow(currRow)
def DeleteRow(self,row):
if row in self.widgetDic.keys():
self.widgetDic.pop(row)
invoicedata={}
index=0
for key in sorted(self.widgetDic):
data_list=[]
for col in range(6):
if col==0:
data_list.append((self.widgetDic[key][0]).currentText())
if col==1:
data_list.append((self.widgetDic[key][1]).currentText())
if col==2:
data_list.append((self.widgetDic[key][2]).currentText())
if col==3:
data_list.append((self.widgetDic[key][3]).text())
if col==4:
data_list.append((self.widgetDic[key][4]).currentText())
if col==5:
data_list.append((self.widgetDic[key][5]).text())
invoicedata[index]=data_list
index=index+1
self.UpdateRows(invoicedata)
self.comborow=self.comborow-1
if self.rowCounts>10:
self.rowCounts=(self.rowCounts-1)
self.table.setRowCount(self.rowCounts)
self.table.resizeRowsToContents()
self.unitprice_changed_function(row-1)
def UpdateRows(self,invoicedata):
self.table.clearContents()
self.widgetDic={}
for keys in sorted(invoicedata):
try:
widgetList=[]
qty=QComboBox()
qty.setEditable(True)
item=QComboBox()
item.setEditable(True)
description=QComboBox()
description.setEditable(True)
unitprice=QLineEdit()
tax=QComboBox()
tax.setEditable(True)
total=QLabel()
unitprice.setPlaceholderText(self.amt_placeholder)
qty.setCurrentText(invoicedata[keys][0])
item.setCurrentText(str(invoicedata[keys][1]))
description.setCurrentText(invoicedata[keys][2])
unitprice.setText(invoicedata[keys][3])
tax.setCurrentText(str(invoicedata[keys][4]))
total.setText(invoicedata[keys][5])
self.table.setCellWidget(int(keys),0,qty)
self.table.setCellWidget(int(keys),1,item)
self.table.setCellWidget(int(keys),2, description)
self.table.setCellWidget(int(keys),3,unitprice)
self.table.setCellWidget(int(keys),4,tax)
self.table.setCellWidget(int(keys),5, total)
image = ImageWidget('image/icon/clear.png', self)
self.table.setCellWidget(int(keys), 6, image)
widgetList.append(qty)
widgetList.append(item)
widgetList.append(description)
widgetList.append(unitprice)
widgetList.append(tax)
widgetList.append(total)
self.widgetDic[int(keys)]=widgetList
unitprice.textChanged.connect(partial(self.unitprice_changed_function,int(keys)))
qty.currentTextChanged.connect(partial(self.unitprice_changed_function,int(keys)))
except Exception as e:
print(e)
def unitprice_changed_function(self,currrow):
if currrow==-1:
return False
try:
float((self.widgetDic[currrow][0]).currentText())
except Exception as e:
(self.widgetDic[currrow][0]).setCurrentText('')
try:
float((self.widgetDic[currrow][3]).text())
except Exception as e:
(self.widgetDic[currrow][3]).setText('')
try:
qty=(self.widgetDic[currrow][0]).currentText()
unitprice=(self.widgetDic[currrow][3]).text()
if qty=="" or unitprice=="":
return False
total_=float(qty)*float(unitprice)
(self.widgetDic[currrow][5]).setText(format_currency(total_,'NGN', locale='en_US'))
total=0
for row in self.widgetDic:
widget=self.widgetDic[row]
if (widget[3]).text()=="" or (widget[3]).text()=="":
return False
qty=(widget[0]).currentText()
unitprice=(widget[3]).text()
total=total+float(qty)*float(unitprice)
self.subtotal.setText(format_currency(total,'NGN', locale='en_US'))
#self.tax=QLabel(self.amt_placeholder)
self.total.setText('<b>'+format_currency(total,'NGN', locale='en_US')+'</b>')
except Exception as e:
if (self.widgetDic[currrow][5]).text()=="":
return False
val1=(((self.widgetDic[currrow][5]).text()).split('₦'))[1]
val2=((((self.total.text()).split('₦'))[1]).split('</b>'))[0]
val=float(val2)-float(val1)
(self.widgetDic[currrow][5]).clear()
self.subtotal.setText(format_currency(val,'NGN', locale='en_US'))
#self.tax=QLabel(self.amt_placeholder)
self.total.setText('<b>'+format_currency(val,'NGN', locale='en_US')+'</b>')
def Save_record(self):
date1=self.dateedit1.date()
year1=str(date1.year())
day1=str(date1.day()) if len(str(date1.day()))==2 else '0'+str(date1.day())
month1=str(date1.month()) if len(str(date1.month()))==2 else '0'+str(date1.month())
date=(year1+'-'+month1+'-'+day1)
date2=self.dateedit1.date()
year2=str(date2.year())
day2=str(date2.day()) if len(str(date2.day()))==2 else '0'+str(date2.day())
month2=str(date2.month()) if len(str(date2.month()))==2 else '0'+str(date2.month())
duedate=(year2+'-'+month2+'-'+day2)
userdb=open("db/user.json", "r")
user=json.load(userdb)
journaltype="Sales"
address=(self.address.toPlainText())
customer=self.customer.currentText()
memo="Sales;"+customer
ref="SLS[AUTO]"
revenueaccounts=self.salesaccount.currentText()
receivables=self.receivableaccount.currentText()
customerdata=self.customer.currentText()
if revenueaccounts=="" or receivables=="" or customerdata=="":
return False
salesaccount=self.revenueaccounts[str(self.salesaccount.currentIndex()-1)]
receivableaccount=self.receivables[str(self.receivableaccount.currentIndex()-1)]
customer=self.customerdata[str(self.customer.currentIndex())]
invoiceDic={}
total=0
subtotal=[]
for row in self.widgetDic:
amnt=(self.widgetDic[row][5]).text()
amnt=amnt.split('₦')
invoicelist=[]
invoicelist.append(receivableaccount[2])
invoicelist.append(customer[8])
invoicelist.append(customer[0])
invoicelist.append(address)
invoicelist.append(ref)
invoicelist.append(self.invoice_no.text())
#invoicelist.append(int(self.requireddata['invoiceid'])+counts)
invoicelist.append(date)
invoicelist.append(duedate)
invoicelist.append(self.salesperson.currentText())
invoicelist.append((self.widgetDic[row][0]).currentText())
invoicelist.append((self.widgetDic[row][1]).currentText())
invoicelist.append((self.widgetDic[row][2]).currentText())
invoicelist.append((self.widgetDic[row][3]).text())
invoicelist.append(str(float(parse_decimal(amnt[1],locale='en_US'))))
invoicelist.append("Not Paid")
invoicelist.append(user)
invoicelist.append(salesaccount[2])
invoiceDic[row]=invoicelist
total=total+float(parse_decimal(amnt[1],locale='en_US'))
subtotal.append(float(parse_decimal(amnt[1],locale='en_US')))
postDic={}
rw=0
for sub in subtotal:
postList=[]
postList.append(salesaccount[2])
postList.append(str(sub))
postList.append('Credit')
postList.append(ref)
postList.append(journaltype)
postList.append(memo)
postList.append(date)
postList.append(user)
postDic[rw]=postList
rw=rw+1
postList=[]
postList.append(receivableaccount[2])
postList.append(str(total))
postList.append('Debit')
postList.append(ref)
postList.append(journaltype)
postList.append(memo)
postList.append(date)
postList.append(user)
postList.append(invoiceDic)
postDic[rw]=postList
postDic=json.dumps(postDic)
postDic=base64.b64encode(postDic.encode())
data = QtCore.QByteArray()
data.append("action=postjournal&")
data.append("invoice=invoice&")
data.append("journal={}".format(postDic.decode("utf-8")))
url = "http://{}:5000/journal".format(self.ip)
req = QtNetwork.QNetworkRequest(QtCore.QUrl(url))
req.setHeader(QtNetwork.QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
self.nam = QtNetwork.QNetworkAccessManager()
self.nam.finished.connect(self.handleResponse)
#return False
self.nam.post(req, data)
def handleResponse(self, reply):
er = reply.error()
if er == QtNetwork.QNetworkReply.NoError:
bytes_string = reply.readAll()
json_ar = json.loads(str(bytes_string, 'utf-8'))
#data = json_ar['form']
if json_ar['19']=='Success':
journaltype=json_ar['30']
ref=json_ar['25']
date=json_ar['35']
self.MessageBox.setWindowTitle('Post Journal')
self.MessageBox.setText("")
self.MessageBox.setInformativeText("{j} Journal with Ref: {r} was succesfully posted\non {d}. " "\n\nClick Ok to exit.".format(j=journaltype,r=ref,d=date))
self.MessageBox.setIcon(self.MessageBox.Information)
self.MessageBox.setStandardButtons(self.MessageBox.Ok)
self.MessageBox.show()
self.invoice_no.setText(str(int(self.invoice_no.text())+1))
result = self.MessageBox.exec_()
if result==self.MessageBox.Ok:
pass
else:
QMessageBox.critical(self, 'Databese Connection ', "\n {} \n".format(reply.errorString()))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Invoice({})
ex.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
noreply@github.com
|
4f2d7e9a93ccb1c73bfa12146ad9add11e573b27
|
d07a26e443538c5fc6b0711aff6e233daef79611
|
/LearnPythonGuessGame.py
|
e3a41526a4b12716d27871e2464f08f1855a7ba6
|
[] |
no_license
|
Zahidsqldba07/Python-learn
|
bd602d490ee53f8e5331e70f92919ca315944ff9
|
ffc1608695ed6c7c3d2b6789913e34235dcf468e
|
refs/heads/master
| 2023-03-16T02:18:19.155281
| 2020-09-19T09:12:48
| 2020-09-19T09:12:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
secret_word = "respect"
guess = ''
guess_count = 0
guess_limit = 7
out_of_guesses = False
while guess != secret_word and not (out_of_guesses):
if guess_count < guess_limit:
guess = input("What's the secret word?: ")
guess_count += 1
if guess != secret_word:
print("Hint: " + secret_word[int(guess_count)-1])
else:
out_of_guesses = True
if out_of_guesses:
print("All out of guesses, better luck next time!")
exit()
else:
print("Nice work!")
exit()
|
[
"noreply@github.com"
] |
noreply@github.com
|
6bd1bc226750e4fc2f58126a18698d94ddae4c97
|
605caafb8fd74e713d0a95014c559cede9033e8f
|
/selenium/copy_courses.py
|
cf6723011800c93e888f9d7a0a5d23bef93c8ea1
|
[] |
no_license
|
cbaca90/blackboard
|
ed0f2e66f29337a75aa0a8a412edd4b457517e44
|
93f1b1f60884eac9603596256b14e8cf339f2901
|
refs/heads/master
| 2023-08-12T03:45:09.150444
| 2021-09-21T14:41:44
| 2021-09-21T14:41:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
from auth import *
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
def copy_courses(driver):
with open('copy_courses.txt', 'r') as copy_list:
copy_line = copy_list.read().splitlines()
for copy in copy_line:
source, destination = copy.split("\t")
driver.get("https://courses.cscc.edu/webapps/blackboard/execute/copy_content?navItem=copy_course_content_exists&target=yes&type=course")
source_field = driver.find_element_by_id("sourceCourseId")
source_field.click()
source_field.clear()
source_field.send_keys(source)
driver.implicitly_wait(1)
destination_field = driver.find_element_by_id("destinationCourseId")
destination_field.click()
destination_field.clear()
destination_field.send_keys(destination)
driver.find_element_by_id("bottom_Submit").click()
driver.implicitly_wait(3)
driver.execute_script("selectAll(false, true);return false;")
body = driver.find_element_by_css_selector('body')
body.click()
body.send_keys(Keys.CONTROL+Keys.END)
driver.find_element_by_id("copyLinkToCourseFilesAndCopiesOfContent").click()
#driver.find_element_by_id("bottom_Submit").click()
driver.execute_script("document.getElementById('bottom_Submit').click();")
driver.implicitly_wait(5)
try:
print(driver.find_element_by_id("goodMsg1").text)
print("Success: "+source+" into "+destination)
except:
print("Failed: "+source+" into "+destination)
def main():
driver = login()
copy_courses(driver)
logout(driver)
main()
|
[
"hcrites@cscc.edu"
] |
hcrites@cscc.edu
|
3b475b2198f533613949bc998bef4a4c42ea826f
|
5eb13a4e16bd195e9ef823021bc296a747ff98bb
|
/pbsetq4.py
|
3ae1819bfbf979e447b978bf7e4af69530947dcc
|
[] |
no_license
|
Santosh2108/Python
|
59fff6d744ce4a1992489c43d7bacbe45a869a2a
|
b486fc18417d5463852a4f06eeb922aa2f648f6b
|
refs/heads/master
| 2020-03-22T11:22:29.245458
| 2018-07-12T10:37:41
| 2018-07-12T10:37:41
| 139,967,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
#A) finding area of the sphere
r = int(raw_input('Enter the radius of the sphere: '))
area=(4/3.0)*3.14*(r**3)
print ('the area of the sphere is',area )
#B) Wholesale rate
coverprice=24.95
discount=40/100.0
shippingcost=3
additional=0.75
count=60
discountprice=coverprice*discount
totalprice=(coverprice-discountprice)*count
total= totalprice+shippingcost+(count-1)*additional
print ('price for 60 books',total )
#C) Time calculation
timeleft = 6 * 3600 + 52 *60
easy = 2 * (8 * 60 + 15 )
fast = 3 * (7 * 60 + 12 )
totaltime = easy + fast + timeleft
hours = totaltime/ 3600
remainingseconds= totaltime % 3600
minutes = remainingseconds /60
seconds = remainingseconds % 60
print ('Hours:',hours)
print ('minutes:', minutes)
print ('seconds:', seconds)
|
[
"noreply@github.com"
] |
noreply@github.com
|
5ca34cd011e6668c0b56e664aa619380e5b92585
|
7d0f2252623d58de13d6c2b4bdb62f789e237aad
|
/tempimage.py
|
63b2148fbd466c62d2f3b9ee71df52629a8795d6
|
[] |
no_license
|
orakelet/Drivhuset
|
51d4270189065546d7e8d603e2147e3623abe00d
|
f7f03827179fb29e116f8dd8565351058882284b
|
refs/heads/master
| 2020-03-28T20:36:56.335258
| 2018-07-24T19:15:37
| 2018-07-24T19:15:37
| 149,087,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
# import the necessary packages
import uuid
import os
import time
class TempImage:
def __init__(self, basePath="/home/pi/OneDrive/Ute", ext=".jpg"):
# construct the file path
dateString = time.strftime("%Y%m%d-%H%M%S")
self.path = "{base_path}/{date_String}{ext}".format(base_path=basePath,
date_String=dateString, ext=ext)
def cleanup(self):
# remove the file
os.remove(self.path)
|
[
"bjorn@steine.me"
] |
bjorn@steine.me
|
4965e4de4a5b88bbbd49f754a5922043ebf947f4
|
c4076305e57b18fed25c3ad08f71cba263b8ded1
|
/ordinaryPython36/migrations/0011_feed_last_updated.py
|
379b49b0cf1e83b81f763b583d9c22bd1b15deb9
|
[] |
no_license
|
ruslandzh61/TerraNews_Backend
|
498b04a0267ae49c852a76880efd02f753863902
|
9b781b8fb1bec0627c4bc19d250f5dfe69997ca5
|
refs/heads/master
| 2021-03-13T03:53:20.994479
| 2019-02-19T01:47:39
| 2019-02-19T01:47:39
| 84,026,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-16 11:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ordinaryPython36', '0010_articletrain'),
]
operations = [
migrations.AddField(
model_name='feed',
name='last_updated',
field=models.DateTimeField(null=True),
),
]
|
[
"dzhumakaliev_r@auca.kg"
] |
dzhumakaliev_r@auca.kg
|
b7f9e19bc7036222cb812b05a20b982377dc3a8c
|
1147d91ae3552dfa72632727469c136ada3a7e8d
|
/src/Plot.py
|
65c89e1b02a637672c2923464248ed41a29aaa1a
|
[] |
no_license
|
StephanJon/Plot-Graph-Interpolation
|
3e8d23226640cd1e7b6f98e849a9d086af856034
|
0dd0339eed7fe99873a3f757ec6614c65a312bca
|
refs/heads/master
| 2020-04-03T03:55:43.030744
| 2019-03-16T20:23:58
| 2019-03-16T20:55:34
| 154,998,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
## @file Plot.py
# @author Stephanus Jonatan
# @date February 20, 2018
from CurveADT import *
import matplotlib.pyplot as win
## @brief Returns a x-y graph.
# @details Data points are in sequences X and Y.
# @param X is a sequence filled with x data points
# @param Y is a sequence filled with y data points
def PlotSeq(X, Y):
if len(X) != len(Y):
raise SeqSizeMismatch("The sequences are not the same size")
win.plot(X, Y, 'b')
win.xlabel("x-axis")
win.ylabel("y-axis")
win.show()
## @brief Returns a x-y graph of curve c.
# @details plots c at n equally spaced points
# @param c is a curve of CurveT
# @param n is the number of points inbetween each plotted data point of c
def PlotCurve(c, n):
interval = (c.maxD() - c.minD()) / n
X_data = []
Y_data = []
if c.order() == 1:
for i in range(c.minD(), c.maxD(), interval):
Y_data.append(c.eval(i))
elif c.order() == 2:
for i in range(c.minD(), c.maxD(), interval):
Y_data.append(c.eval(i))
X_data += range(c.minD(), c.maxD(), interval)
PlotSeq(X_data, Y_data)
|
[
"brianjonatan@DESKTOP-EH98R4U.localdomain"
] |
brianjonatan@DESKTOP-EH98R4U.localdomain
|
4c38981263972d95636d6e02fdba40dbd8f2c5a8
|
0f4cd79db1379dc151e74400b6fc6a79d5b52d08
|
/work06/code/server.py
|
3eda8298462d5eed64997dd7e199f250b574a1ff
|
[] |
no_license
|
Detect-er/Coursework
|
3cdffe84a61029e31420a4d89341208937520d02
|
91061dc0b2bed021d092e3da933e716c026ba838
|
refs/heads/master
| 2021-03-22T17:37:39.847713
| 2020-06-13T03:03:56
| 2020-06-13T03:03:56
| 247,388,020
| 3
| 2
| null | 2020-03-22T13:32:24
| 2020-03-15T02:29:43
|
C
|
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
from socket import *
from time import ctime
HOST = '127.0.0.1'
PORT = 4567
BUFSIZ = 1024
ADDR = (HOST,PORT)
filename = "/mnt/ext4scull"
#1、创建服务端的socket对象
tcpSerSock = socket(AF_INET,SOCK_STREAM)
#2、绑定一个ip和端口
tcpSerSock.bind(ADDR)
#3、服务器端一直监听是否有客户端进行连接
tcpSerSock.listen(5)
while True:
print('waiting for connection...')
# 4、如果有客户端进行连接、则接受客户端的连接
tcpCliSock, addr = tcpSerSock.accept() #返回客户端socket通信对象和客户端的ip
print('...connnecting from:', addr)
while True:
# 5、客户端与服务端进行通信
data = tcpCliSock.recv(BUFSIZ).decode()
if not data:
break
print("From client: %s"%data)
# 6、从filename文件中读取scull设备的信息
with open(filename) as f:
content = f.read()
f.close()
# 7、服务端给客户端回消息
tcpCliSock.send(('the time is: [%s]\ntemperature is: %s\nhumidity is: %s' % (
ctime(), content.split()[0], content.split()[1])).encode())
# 8、关闭socket对象
tcpCliSock.close()
tcpSerSock.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
1817dddcfb6a350fe4323472755486725543c750
|
d70db722710bccf7a834e8e4acdb376b151b20a1
|
/apps/finances/models.py
|
0f4b847dc96b1d4ee9872b62f624905c17cde98f
|
[] |
no_license
|
intentaware/Vader
|
b0d433f640b244d592126b2713506d214dc1d287
|
54d5d799beab1fc5cef99fb90d4e50e00720bfe0
|
refs/heads/master
| 2021-01-20T07:07:11.393929
| 2017-12-06T19:16:53
| 2017-12-06T19:16:53
| 30,995,526
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,356
|
py
|
import shortuuid
from django.db import models
from django.utils.text import slugify, Truncator
from django.contrib.postgres.fields import JSONField
from django_extensions.db.fields import ShortUUIDField
from apps.common.models import *
from apps.common.utils.money import convert_to_cents
from .mixins import Stripe, CURRENCY_CHOICES
class BasePaymentModel(Stripe, TimeStamped):
"""Basic Payment Model, inherits Stripe model, will be used for multiple
Attributes:
amount (Decimal): total amount charged to customer
attempted_on (Time): time on which the charge was attempted
attempts (Int): Number of times we tried to charge
charged_on (Time): If charge was succesful, populate the field with current time
gateway_response (Json): Response from the server
is_paid (Bool): if charge was succesful
service_charges (Decimal): Service charges if any, amount is inclusive of service_charges
taxes (Decimal): Taxes if any, Note: amount is inclusive of taxes
"""
amount = models.DecimalField(default=0.00, max_digits=20, decimal_places=4)
currency = models.CharField(
max_length=4,
choices=CURRENCY_CHOICES,
default='USD'
)
attempts = models.IntegerField(default=0)
#service charges
service_charges = models.DecimalField(
default=0.00,
max_digits=20,
decimal_places=4
)
taxes = models.DecimalField(default=0.0, max_digits=20, decimal_places=4)
#total_amount = models.DecimalField(default=0.00, max_digits=20, decimal_places=4)
# extra timestamps
attempted_on = models.DateTimeField(blank=True, null=True)
charged_on = models.DateTimeField(blank=True, null=True)
# json mapped response from stripe
gateway_response = JSONField(default={})
is_paid = models.BooleanField(default=False)
class Meta:
abstract = True
@property
def line_items_total(self):
return self.amount - self.service_charges - self.taxes
class Invoice(BasePaymentModel):
stripe_id = models.CharField(
max_length=256,
blank=True,
null=True,
help_text='id obtained from stripe'
)
company = models.ForeignKey('companies.Company', related_name='invoices')
class Module(TimeStamped):
[CORE, DMP, REPORTING] = range(3)
SEGMENT_CHOICES = [
(CORE, 'Core'),
(DMP, 'Data Management Platform'),
(REPORTING, 'Reporting'),
]
name = models.CharField(max_length=128, help_text='The name of the module')
segment = models.IntegerField(
choices=SEGMENT_CHOICES,
default=CORE,
help_text='The segment it is part of'
)
def __unicode__(self):
return self.name
class Plan(TimeStamped, Stripe):
[UNTIL_EXPIRY, DAY, WEEK, MONTH, YEAR] = range(5)
INTERVAL_CHOICES = [
(UNTIL_EXPIRY, 'untill expiry'),
(DAY, 'day'),
(WEEK, 'week'),
(MONTH, 'month'),
(YEAR, 'year'),
]
amount = models.DecimalField(default=0.00, max_digits=20, decimal_places=2)
currency = models.CharField(
max_length=4,
choices=CURRENCY_CHOICES,
default='USD'
)
name = models.CharField(max_length=128)
interval = models.IntegerField(
choices=INTERVAL_CHOICES,
default=UNTIL_EXPIRY
)
modules = models.ManyToManyField(Module, through='finances.PlanModule')
limit_campaigns = models.IntegerField(
default=0,
help_text='0 means unlimited'
)
limit_impressions = models.IntegerField(
default=0,
help_text='0 means unlimited'
)
stripe_id = ShortUUIDField(blank=True, null=True)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
"""Override the default save to hook the plans with Stripe.
Args:
*args: arguments, normally plain arguments
**kwargs: Keyword arguments
Returns:
name (obj): Django Plan model object
"""
plan = None
sd = self.stripe_dictionary
if sd and self.stripe_id:
try:
plan = self.stripe_plan
if int(plan.amount) != convert_to_cents(
self.amount
) or self.currency.lower() != plan.currency:
print 'not equal, creating new account'
self.stripe_id = shortuuid.uuid()
self.id = None
self.create_stripe_plan()
except self._stripe.error.InvalidRequestError:
self.create_stripe_plan()
return super(Plan, self).save(*args, **kwargs)
class Meta:
ordering = ['amount']
def create_stripe_plan(self, *args, **kwargs):
return self._stripe.Plan.create(**self.stripe_dictionary)
@property
def stripe_plan(self):
return self._stripe.Plan.retrieve(self.stripe_id)
def features(self):
from itertools import groupby
modules = Module.objects.all().values('id', 'name', 'segment')
plan_modules = self.modules.all().values('id', 'name', 'segment')
for m in modules:
if m in plan_modules:
m['is_included'] = True
else:
m['is_included'] = False
doc = dict()
for k, v in groupby(modules, lambda x: x['segment']):
doc[Module.SEGMENT_CHOICES[k][1]] = list(v)
return doc
@property
def stripe_dictionary(self):
doc = None
if not self.interval == 0:
doc = {
'id': self.stripe_id,
'name': '{name} ({currency})'.format(
name=self.name,
currency=self.currency
),
'amount': convert_to_cents(self.amount),
'currency': self.currency,
'interval': self.INTERVAL_CHOICES[self.interval][1],
'statement_descriptor': Truncator(
'IA: {name}'.format(
name=self.name
)
).chars(22)
}
return doc
class PlanModule(TimeStamped):
plan = models.ForeignKey(Plan)
module = models.ForeignKey(Module)
class Meta:
unique_together = ['plan', 'module']
|
[
"yousuf.jawwad@gmail.com"
] |
yousuf.jawwad@gmail.com
|
ed1d3892b3a5accf4bd2915df77884af4342a114
|
6a233770f9adec1c1258b493b5bd66d89f2c902a
|
/add_data.py
|
e16817d9ae9a405d3afb92d29035548211eac9c0
|
[] |
no_license
|
lionandbull/Movie-website-django
|
a13085701bfe4d30e4012c450c0e919c8936ec94
|
aa8c43e5eb9d9bcdfc9cffc3d2fa9f06d8f7d12d
|
refs/heads/master
| 2020-03-27T16:41:38.310421
| 2018-10-03T21:44:26
| 2018-10-03T21:44:26
| 146,800,333
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,701
|
py
|
from imdbpie import Imdb
import sqlite3
def execute_sql(s):
con = sqlite3.connect('movie.db')
with con:
cur = con.cursor()
cur.execute(s)
def single_quote(s):
if len(s) == 0:
return 'None'
if s.find('\'') != -1:
return s.replace("\'", "\'\'")
else:
return s
movie_list = []
movie_genres = {}
actor_set = {}
with open('data.csv') as f:
for row in f.readlines()[1:]:
columns = row.split(',')
movie_id = columns[0].split('/')[4]
genres = columns[1][:-1]
movie_list.append(movie_id)
movie_genres[movie_id] = genres
imdb = Imdb()
movie_count = 0
for movie_id in movie_list:
try:
title = imdb.get_title(movie_id)
sql = (
'''INSERT INTO movie_movie VALUES (\'{}\',\'{}\',\'{}\',\'{}\',\'{}\',\'{}\',\'{}\',\'{}\',\'{}\')'''.format(
movie_id,
single_quote(str(title['base']['title'])),
title['base']['year'],
title['base']['runningTimeInMinutes'],
movie_genres[movie_id],
title['ratings']['rating'],
single_quote(title['base']['image']['url']),
single_quote(str(title['plot']['outline']['text'])),
single_quote(str(imdb.get_title_videos(movie_id)['videos'][0]['encodings'][0]['play']))
))
execute_sql(sql)
movie_count += 1
print("Insert movie: " + movie_id, movie_count)
except Exception as e:
print('Movie Insert Failure: ' + movie_id, e)
continue
actors = imdb.get_title_credits(movie_id)
actor_length = len(actors['credits']['cast'])
print('Add Actors: ', end='')
for actor in actors['credits']['cast'][:5 if actor_length > 5 else actor_length]:
actor_id = actor['id'].split('/')[2]
try:
if actor_id not in actor_set:
sql = ('INSERT INTO movie_actor VALUES (\'{}\',\'{}\',\'{}\')'.format(
actor_id,
single_quote(str(actor['name'])),
single_quote(str(actor['image']['url']))))
execute_sql(sql)
actor_set[actor_id] = ''
print(actor_id + ' success; ', end='')
else:
print(actor_id + ' existed; ', end='')
sql = (
'INSERT INTO movie_act(actorid_id, movieid_id) VALUES (\'{}\',\'{}\')'.format(actor_id,
movie_id))
execute_sql(sql)
except Exception as e:
print(actor_id + ' failure', e, '; ', end='')
print('\n')
|
[
"liuweixi0819@gmail.com"
] |
liuweixi0819@gmail.com
|
5bc3cd7e613d2f19a2b9afeab49fed7008c3a986
|
4a344e17523c960a46e0d1d443044dae2505a2bc
|
/pages/contacts/GroupList.py
|
06ceb11c258b01b773f57891dad1426816a0463a
|
[] |
no_license
|
JordMo/andfetion_ui
|
99e024a12bde7b5ae36b6ba8ee0054f2e482bf2c
|
e05d7c1362a4543aa31c39326fb3690d2f7e38fb
|
refs/heads/master
| 2023-05-25T18:51:15.289265
| 2019-09-06T10:47:28
| 2019-09-06T10:47:28
| 206,772,795
| 0
| 0
| null | 2023-05-22T22:17:00
| 2019-09-06T10:41:26
|
Python
|
UTF-8
|
Python
| false
| false
| 30,769
|
py
|
from appium.webdriver.common.mobileby import MobileBy
from selenium.common.exceptions import NoSuchElementException
from library.core.BasePage import BasePage
from library.core.TestLogger import TestLogger
#import preconditions
import time
class GroupListPage(BasePage):
"""群组列表"""
ACTIVITY = 'com.cmcc.cmrcs.android.ui.activities.GroupChatListActivity2'
__locators = {
'移除成员_标题':(MobileBy.ID,'com.chinasofti.rcs:id/title'),
'搜索标签分组成员':(MobileBy.ID,'com.chinasofti.rcs:id/contact_search_bar'),
'刪除_标签名':(MobileBy.ID,'com.chinasofti.rcs:id/ib_label_del'),
'星标图标': (MobileBy.ID, 'com.chinasofti.rcs:id/iv_star'),
'星标': (MobileBy.ID, 'com.chinasofti.rcs:id/star'),
"电话号码":(MobileBy.ID,'com.chinasofti.rcs:id/tv_phone'),
"语音通话": (MobileBy.ID, 'com.chinasofti.rcs:id/tv_voice_call'),
"视频通话": (MobileBy.ID, 'com.chinasofti.rcs:id/tv_video_call'),
"分享名片": (MobileBy.ID, 'com.chinasofti.rcs:id/btn_share_card'),
"邀请使用": (MobileBy.ID, 'com.chinasofti.rcs:id/tv_invitation_to_use'),
"发送_邀请":(MobileBy.ID,'com.android.mms:id/right_btn'),
"信息邀请":(MobileBy.ID,'com.android.mms:id/msg_content'),
"修改标签名称":(MobileBy.ID,"com.chinasofti.rcs:id/label_toolbar_title"),
"标签名称框":(MobileBy.ID,'com.chinasofti.rcs:id/edit_label_group_name'),
"确定3":(MobileBy.ID,"com.chinasofti.rcs:id/tv_label_done"),
"移除成员_标题":(MobileBy.ID,'com.chinasofti.rcs:id/title'),
"多方电话提示框": (MobileBy.XPATH, "//*[@text='多方电话']"),
"飞信电话": (MobileBy.XPATH, "//*[@text='飞信电话']"),
"多方视频": (MobileBy.XPATH, "//*[@text='多方视频']"),
"多方视频图标": (MobileBy.XPATH, "//*[@text='多方视频']"),
'多方通话_图标':(MobileBy.ID,'com.chinasofti.rcs:id/action_multicall'),
'分组联系人':(MobileBy.ID,'com.chinasofti.rcs:id/action_setting'),
'分组联系人_标题':(MobileBy.ID,'com.chinasofti.rcs:id/title'),
'富媒体面板': (MobileBy.ID, 'com.chinasofti.rcs:id/ll_rich_panel'),
'返回': (MobileBy.ID, 'com.chinasofti.rcs:id/left_back'),
'群聊': (MobileBy.ID, 'com.chinasofti.rcs:id/contact_name'),
'新建群组': (MobileBy.ID, 'com.chinasofti.rcs:id/menu_add_btn'),
'搜索群组': (MobileBy.XPATH, '//*[contains(@resource-id,"search")]'),
'com.chinasofti.rcs:id/fragment_container': (MobileBy.ID, 'com.chinasofti.rcs:id/fragment_container'),
'群列表': (MobileBy.ID, 'com.chinasofti.rcs:id/recyclerView'),
'列表项': (MobileBy.ID, 'com.chinasofti.rcs:id/rl_group_list_item'),
'列表项首字母': (MobileBy.ID, 'com.chinasofti.rcs:id/contact_index'),
'群名': (MobileBy.ID, 'com.chinasofti.rcs:id/contact_name'),
'滚动条字符': (MobileBy.XPATH, '//*[@resource-id="com.chinasofti.rcs:id/contact_index_bar_container"]/*'),
'标题新建分组': (MobileBy.ID, 'com.chinasofti.rcs:id/label_toolbar_title'),
'确定': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_sure'),
'为你的分组创建一个名称': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_sub_title'),
'请输入标签分组名称': (MobileBy.ID, 'com.chinasofti.rcs:id/edit_group_name'),
'通讯录': (MobileBy.ID, 'com.chinasofti.rcs:id/tvContact'),
'标签分组': (MobileBy.ID, 'com.chinasofti.rcs:id/second_item'),
'新建分组':(MobileBy.XPATH,'//*[@text="新建分组"]'),
'知道了':(MobileBy.ID,'com.chinasofti.rcs:id/btn_cancel'),
'设置':(MobileBy.ID,'com.chinasofti.rcs:id/iv_label_setting'),
'删除标签':(MobileBy.XPATH,'//*[@text="删除标签"]'),
'移除成员':(MobileBy.XPATH,'//*[@text="移除成员"]'),
'标签名称':(MobileBy.XPATH,'//*[@text="标签名称"]'),
'刪除按钮':(MobileBy.ID,'com.chinasofti.rcs:id/btn_ok'),
'back_contact':(MobileBy.ID,'com.chinasofti.rcs:id/back'),
'联系人列表': (MobileBy.ID, 'com.chinasofti.rcs:id/contact_list_item'),
'back_gouppage':(MobileBy.ID,'com.chinasofti.rcs:id/rl_label_left_back'),
"back_contact2":(MobileBy.ID,'com.chinasofti.rcs:id/label_group_left_back'),
'back_newpage':(MobileBy.ID,'com.chinasofti.rcs:id/iv_back'),
'back_settings': (MobileBy.ID, 'com.chinasofti.rcs:id/label_setting_left_back'),
'aaa':(MobileBy.XPATH,'//*[@text="aaa"]'),
'bbb': (MobileBy.XPATH, '//*[@text="bbb"]'),
'添加成员':(MobileBy.XPATH,'//*[@text="添加成员"]'),
'添加成员菜单': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_first_colum'),
'群发信息': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_second_colum'),
'多方电话': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_third_colum'),
'多方视频': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_fourth_colum'),
'大佬1': (MobileBy.ID, 'com.chinasofti.rcs:id/contact_name'),
'大佬3':(MobileBy.XPATH,'//*[@text="大佬3"]'),
'大佬2': (MobileBy.ID, 'com.chinasofti.rcs:id/title'),
'搜索或输入手机号':(MobileBy.XPATH,"//*[@text='搜索或输入号码']"),
'搜索框-搜索结果':(MobileBy.ID, 'com.chinasofti.rcs:id/contact_list_item'),
'选择联系人':(MobileBy.ID,"com.chinasofti.rcs:id/title"),
'清空搜索框': (MobileBy.ID, 'com.chinasofti.rcs:id/iv_delect'),
'已选择的联系人': (MobileBy.ID, 'com.chinasofti.rcs:id/hor_contact_selection'),
'分组联系人-姓名': (MobileBy.ID, 'com.chinasofti.rcs:id/group_member_name'),
'分组联系人-电话号码': (MobileBy.ID, 'com.chinasofti.rcs:id/group_member_number'),
'移除-已选择联系人': (MobileBy.ID, 'com.chinasofti.rcs:id/image_text'),
'选择和通讯录联系人':(MobileBy.ID,'com.chinasofti.rcs:id/text_hint'),
'删除-搜索':(MobileBy.ID,'com.chinasofti.rcs:id/iv_delect'),
'联系人头像':(MobileBy.ID,'com.chinasofti.rcs:id/contact_icon'),
'允许':(MobileBy.XPATH,'//*[@text="允许"]'),
'和飞信测试':(MobileBy.ID,'com.chinasofti.rcs:id/tv_title_department'),
'和通讯本人': (MobileBy.ID, ' com.chinasofti.rcs:id/tv_name_personal_contactlist'),
'中软国际科技服务有限公司':(MobileBy.XPATH,'//*[@text="中软国际科技服务有限公司"]'),
'广州': (MobileBy.XPATH, '//*[@text=" 广州"]'),
'和通讯联系人': (MobileBy.ID,'com.chinasofti.rcs:id/img_icon_contactlist'),
'我已阅读': (MobileBy.ID,'com.chinasofti.rcs:id/btn_check'),
'已阅读_确定': (MobileBy.ID,'com.chinasofti.rcs:id/dialog_btn_ok'),
'群发_输入框': (MobileBy.ID,'com.chinasofti.rcs:id/et_message'),
'发送': (MobileBy.ID,'com.chinasofti.rcs:id/ib_send'),
'标签设置': (MobileBy.ID, 'com.chinasofti.rcs:id/label_setting_toolbar_title'),
'表情按钮': (MobileBy.ID,"com.chinasofti.rcs:id/ib_expression"),
'表情_微笑': (MobileBy.XPATH,'//*[@text="[微笑1]"]'),
'已转短信送达': (MobileBy.XPATH,'//*[@text="已转短信送达"]'),
'添加图片': (MobileBy.ID,'com.chinasofti.rcs:id/ib_pic'),
'选择图片': (MobileBy.ID,'com.chinasofti.rcs:id/iv_select'),
'图片发送': (MobileBy.ID,'com.chinasofti.rcs:id/button_send'),
'发送失败': (MobileBy.ID,'com.chinasofti.rcs:id/imageview_msg_send_failed'),
'成员头像': (MobileBy.ID,'com.chinasofti.rcs:id/avator'),
"确定_可用": (MobileBy.XPATH,'//*[@text="确定"]'),
"版本更新": (MobileBy.ID,'com.chinasofti.rcs:id/dialog_title'),
"以后再说": (MobileBy.ID,"com.chinasofti.rcs:id/btn_cancel"),
'立即更新': (MobileBy.ID,"com.chinasofti.rcs:id/btn_ok"),
'搜索': (MobileBy.ID,"com.chinasofti.rcs:id/edit_query"),
'索引字母容器': (MobileBy.ID, 'com.chinasofti.rcs:id/contact_index_bar_container'),
}
@TestLogger.log("修改标签名称")
def update_label_name(self,name='bbb'):
time.sleep(1)
self.click_element(self.__locators['标签名称'])
time.sleep(1)
self.click_element(self.__locators['标签名称框'])
time.sleep(1)
self.input_text(self.__locators['标签名称框'],name)
time.sleep(1)
self.click_sure_element()
time.sleep(1)
@TestLogger.log("移除按钮")
def click_move_label(self):
time.sleep(1)
self.click_element(self.__locators['移除成员'])
time.sleep(1)
@TestLogger.log("清空搜索框")
def clear_input_box(self):
time.sleep(1)
self.click_element(self.__locators['清空搜索框'])
time.sleep(1)
@TestLogger.log("清空搜索框")
def is_element_present(self, locator='清空搜索框'):
"""判断元素是否存在,默认清空搜索框"""
time.sleep(1)
return self._is_element_present(self.__locators[locator])
@TestLogger.log()
def sure_icon_is_checkable(self):
"""确定按钮是否可点击"""
return self._is_clickable(self.__class__.__locators['确定'])
@TestLogger.log("点击已选择联系人头像")
def click_selected_contacts(self):
time.sleep(1)
self.click_element(self.__class__.__locators['已选择的联系人'])
time.sleep(1)
@TestLogger.log("删除输入标签名称")
def delete_label_name(self, name='bbb'):
time.sleep(1)
self.click_element(self.__locators['标签名称'])
time.sleep(1)
self.click_element(self.__locators['标签名称框'])
time.sleep(1)
self.input_text(self.__locators['标签名称框'], name)
time.sleep(1)
self.click_element(self.__locators['刪除_标签名'])
time.sleep(1)
@TestLogger.log("标签名称")
def click_label_name(self):
time.sleep(1)
self.click_element(self.__locators['标签名称'])
time.sleep(1)
@TestLogger.log("点击设置")
def click_settings_button(self):
time.sleep(1)
self.click_element(self.__locators['设置'])
time.sleep(1)
@TestLogger.log("点击群发信息")
def click_send_message_to_group(self):
time.sleep(1)
self.click_element(self.__locators['群发信息'])
time.sleep(1)
@TestLogger.log("多方通话_图标")
def click_mult_call_icon(self):
time.sleep(1)
self.click_element(self.__locators['多方通话_图标'])
time.sleep(1)
@TestLogger.log("点击分组_图标")
def click_divide_group_icon(self):
time.sleep(1)
self.click_element(self.__locators['分组联系人'])
time.sleep(1)
@TestLogger.log('返回')
def click_back(self):
self.click_element(self.__locators['返回'])
@TestLogger.log('点击创建群')
def click_create_group(self):
self.click_element(self.__locators['新建群组'])
@TestLogger.log('搜索群')
def click_search_input(self):
self.click_element(self.__locators['搜索群组'])
@TestLogger.log('判断列表是否存在群XXX')
def is_group_in_list(self, name):
groups = self.mobile.list_iterator(self.__locators['群列表'], self.__locators['列表项'])
for group in groups:
if group.find_elements(MobileBy.XPATH,
'//*[@resource-id="com.chinasofti.rcs:id/contact_name" and ' +
'@text="{}"]'.format(name)):
return True
return False
@TestLogger.log('点击群')
def click_group(self, name):
if self.is_group_in_list(name):
self.click_element((MobileBy.XPATH,
'//*[@resource-id="com.chinasofti.rcs:id/contact_name" and ' +
'@text="{}"]'.format(name)))
else:
raise NoSuchElementException('找不到群:{}'.format((MobileBy.XPATH,
'//*[@resource-id="com.chinasofti.rcs:id/contact_name" and ' +
'@text="{}"]'.format(name))))
@TestLogger.log('等待群聊列表页面加载')
def wait_for_page_load(self, timeout=8, auto_accept_alerts=True):
self.wait_until(
condition=lambda d: self._is_element_present(self.__locators['新建群组']),
timeout=timeout,
auto_accept_permission_alert=auto_accept_alerts
)
@TestLogger.log('创建群聊')
def create_group_chats_if_not_exits(self, name, members_list):
"""
导入群聊数据
:param members_list:
:param name:
:return:
"""
self.click_search_input()
from pages import GroupListSearchPage
group_search = GroupListSearchPage()
group_search.input_search_keyword(name)
if group_search.is_group_in_list(name):
group_search.click_back()
else:
group_search.click_back()
self.click_create_group()
from pages import SelectContactPage
select_page = SelectContactPage()
select_page.search_and_select_contact(*members_list)
from pages import BuildGroupChatPage
build_page = BuildGroupChatPage()
build_page.create_group_chat(name)
from pages import ChatWindowPage
chat = ChatWindowPage()
if chat.is_tips_display():
chat.directly_close_tips_alert()
chat.wait_for_page_load()
chat.click_back1()
@TestLogger.log()
def click_label_grouping(self):
"""点击标签分组"""
self.click_element(self.__class__.__locators['标签分组'])
@TestLogger.log()
def open_contacts_page(self):
from pages.contacts.Contacts import ContactsPage
"""切换到标签页:通讯录"""
self.click_element(self.__locators['通讯录'])
ContactsPage().click_sim_contact()
@TestLogger.log()
def check_if_contains_element(self,text="确定"):
'''检查指定元素是否存在,默认是确定按钮'''
return self.page_should_contain_element(self.__locators[text])
@TestLogger.log("点击确定")
def click_sure_element(self):
time.sleep(2)
if self._is_element_present(self.__class__.__locators['确定']):
self.click_element(self.__class__.__locators['确定'])
else:
self.click_element(self.__class__.__locators['确定3'])
@TestLogger.log("点击某个联系人")
def click_contact_element(self,text='大佬3'):
for i in range(4):
time.sleep(2)
if self._is_element_present(self.__class__.__locators[text]):
self.click_element(self.__class__.__locators[text])
return True
else:
self.page_up()
return False
@TestLogger.log("点击允许权限")
def click_allow_button(self):
time.sleep(2)
if self._is_element_present(self.__class__.__locators['允许']):
self.click_element(self.__class__.__locators['允许'])
return True
@TestLogger.log("点击新建分组")
def click_new_group(self):
self.click_element(self.__class__.__locators['新建分组'])
@TestLogger.log("点击星标")
def click_star_icon(self):
self.click_element(self.__class__.__locators['星标图标'])
@TestLogger.log("点击通讯录星标")
def click_contact_star_icon(self):
self.click_element(self.__class__.__locators['星标'])
@TestLogger.log("点击输入框")
def click_input_element(self):
self.click_element(self.__class__.__locators['请输入标签分组名称'])
@TestLogger.log("分享名片")
def click_share_button(self):
time.sleep(1)
self.click_element(self.__class__.__locators['分享名片'])
time.sleep(1)
@TestLogger.log("邀请使用")
def click_innvation_button(self):
time.sleep(1)
if self._is_element_present(self.__class__.__locators['邀请使用']):
self.click_element(self.__class__.__locators['邀请使用'])
time.sleep(1)
self.click_element(self.__class__.__locators['发送_邀请'])
time.sleep(2)
if self._is_element_present(self.__class__.__locators['信息邀请']):
self.driver.background_app(seconds=10)
self.driver.launch_app()
time.sleep(1)
return True
else:
return False
return True
@TestLogger.log("发送_邀请")
def click_send_innvation_button(self):
time.sleep(1)
self.click_element(self.__class__.__locators['发送_邀请'])
time.sleep(1)
@TestLogger.log("点击搜索框")
def click_search_box(self,text='搜索或输入手机号'):
self.click_element(self.__class__.__locators[text])
@TestLogger.log("查看删除按钮是否存在")
def page_should_contain_element1(self, locator="删除-搜索"):
return self.page_should_contain_element(self.__locators[locator])
@TestLogger.log("输入搜索内容")
def input_search_text(self,text='dalao2'):
self.input_text(self.__class__.__locators['搜索或输入手机号'], text)
@TestLogger.log("搜索分组成员")
def search_menber_text(self,text='dalao2'):
self.input_text(self.__class__.__locators['搜索标签分组成员'], text)
@TestLogger.log("输入内容")
def input_content(self,text='祝一路顺风幸福美满'):
self.input_text(self.__class__.__locators['请输入标签分组名称'],text)
# @TestLogger.log("输入内容")
# def inputing_content(self,text):
# self.input_text(self.__class__.__locators['请输入标签分组名称'],text)
@TestLogger.log("获取标签分组输入框文本")
def get_text_of_lablegrouping_name(self):
return self.get_text(self.__class__.__locators['请输入标签分组名称'])
@TestLogger.log('使用坐标点击')
def click_coordinate(self, x=1/2, y=15/16):
width = self.driver.get_window_size()["width"]
height = self.driver.get_window_size()["height"]
print("width : ",width,height)
x_start = width*x
y_end = height*y
self.tap_coordinate([(x_start, y_end)])
@TestLogger.log('删除分组标签')
def delete_group(self, name='祝一路顺风幸福美满'):
if self.is_text_present(name):
self.click_text(name)
time.sleep(2)
flag = self._is_element_present(self.__class__.__locators['知道了'])
if flag:
self.click_element(self.__class__.__locators['知道了'])
time.sleep(1)
self.click_element(self.__class__.__locators['设置'])
time.sleep(1)
self.click_element(self.__class__.__locators['删除标签'])
time.sleep(1)
self.click_element(self.__class__.__locators['刪除按钮'])
time.sleep(2)
if self._is_element_present(self.__class__.__locators['允许']):
self.click_element(self.__class__.__locators['允许'])
time.sleep(2)
else:
print('标签不存在')
@TestLogger.log("确认弹框处理")
def tap_sure_box(self, text='知道了'):
time.sleep(2)
flag = self._is_element_present(self.__class__.__locators['知道了'])
if flag:
self.click_element(self.__class__.__locators[text])
else:
print('标签不存在')
@TestLogger.log()
def click_back_by_android(self, times=1):
"""
点击返回,通过android返回键
"""
# times 返回次数
for i in range(times):
self.driver.back()
time.sleep(1)
@TestLogger.log('返回按钮')
def click_back_button(self,times=1):
for i in range(times):
time.sleep(2)
if self._is_element_present(self.__class__.__locators['back_contact']):
self.click_element(self.__class__.__locators['back_contact'])
elif self._is_element_present(self.__class__.__locators['back_gouppage']):
self.click_element(self.__class__.__locators['back_gouppage'])
elif self._is_element_present(self.__class__.__locators['back_contact2']):
self.click_element(self.__class__.__locators['back_contact2'])
elif self._is_element_present(self.__class__.__locators['back_settings']):
self.click_element(self.__class__.__locators['back_settings'])
else:
self.click_element(self.__class__.__locators['back_newpage'])
time.sleep(1)
@TestLogger.log('获取元素y坐标')
def get_element_text_y(self,text='新建分组'):
element=self.get_element(self.__locators[text])
y=element.location.get('y')
return y
@TestLogger.log('获取元素y坐标')
def get_element_text_x(self, text='新建分组'):
element = self.get_element(self.__locators[text])
x = element.location.get('x')
return x
@TestLogger.log('判断元素是否存在')
def page_contain_element(self, locator='添加成员菜单'):
return self.page_should_contain_element(self.__class__.__locators[locator])
@TestLogger.log('判断元素不存在')
def page_not_contain_element(self, locator='添加成员菜单'):
return self.page_should_not_contain_element(self.__class__.__locators[locator])
@TestLogger.log('判断元素颜色')
def get_element_color(self, locator='选择联系人'):
element = self.get_element(self.__locators[locator])
x=self.get_element_text_x(text=locator)
y = self.get_element_text_y(text=locator)
print(x,y)
x=(x+1)/1440
y=(y+1)/2560
color=self.get_coordinate_color_of_element(element,x=x,y=y,by_percent=True)
print("color = ",color)
return color
@TestLogger.log("新建分组")
def new_group(self,name="aaa"):
time.sleep(1)
self.click_new_group()
time.sleep(1)
self.click_input_element()
time.sleep(1)
self.input_content(text=name)
time.sleep(1)
self.click_sure_element()
time.sleep(2)
self.click_allow_button()
time.sleep(1)
self.click_back_button()
time.sleep(2)
self.click_back_button()
time.sleep(2)
@TestLogger.log("添加成员dalao")
def add_member(self,name='dalao5',times=1):
member='大佬5'
time.sleep(1)
self.click_text('添加成员')
time.sleep(1)
self.click_search_box()
time.sleep(1)
self.input_search_text(name)
time.sleep(1)
self.hide_keyboard()
time.sleep(1)
if name is 'dalao6':
member='大佬6'
elif name is 'dalao7':
member='大佬7'
elif name is 'dalao1':
member = '大佬1'
elif name is 'dalao2':
member = '大佬2'
elif name is 'dalao3':
member = '大佬3'
if times==1:
self.click_text(member)
else:
#time=2,点击2次
self.click_text(member)
time.sleep(2)
self.click_text(member)
flag=self.is_toast_exist("该联系人不可选择")
isExist=1 #为是第1次添加该联系人,为2是重复添加该联系人
if flag:
print("联系人不可选")
time.sleep(1)
self.click_back_button()
time.sleep(1)
isExist = 2
else:
time.sleep(1)
self.click_sure_element()
time.sleep(1)
self.click_allow_button()
time.sleep(1)
isExist = 1
return isExist
@TestLogger.log("群发信息")
def send_message_to_group(self,message='aaaa'):
time.sleep(1)
self.click_element(self.__class__.__locators["群发信息"])
time.sleep(2)
flag= self._is_element_present(self.__class__.__locators['我已阅读'])
if flag:
self.click_element(self.__class__.__locators['我已阅读'])
time.sleep(1)
self.click_element(self.__class__.__locators['已阅读_确定'])
time.sleep(1)
self.click_element(self.__class__.__locators["群发_输入框"])
time.sleep(1)
self.input_text(self.__class__.__locators["群发_输入框"],message)
time.sleep(1)
self.click_element(self.__class__.__locators["发送"])
time.sleep(2)
@TestLogger.log("发送表情")
def send_express_to_group(self, message='aaaa'):
time.sleep(1)
self.click_element(self.__class__.__locators["群发信息"])
time.sleep(2)
flag = self._is_element_present(self.__class__.__locators['我已阅读'])
if flag:
self.click_element(self.__class__.__locators['我已阅读'])
time.sleep(1)
self.click_element(self.__class__.__locators['已阅读_确定'])
time.sleep(1)
self.click_element(self.__class__.__locators["表情按钮"])
time.sleep(1)
self.click_element(self.__class__.__locators["表情_微笑"])
time.sleep(1)
self.click_element(self.__class__.__locators["发送"])
time.sleep(2)
@TestLogger.log("发送图片")
def send_picture_to_group(self, message='aaaa'):
time.sleep(1)
self.click_element(self.__class__.__locators["群发信息"])
time.sleep(2)
flag = self._is_element_present(self.__class__.__locators['我已阅读'])
if flag:
self.click_element(self.__class__.__locators['我已阅读'])
time.sleep(1)
self.click_element(self.__class__.__locators['已阅读_确定'])
time.sleep(1)
self.click_element(self.__class__.__locators["添加图片"])
time.sleep(1)
self.click_element(self.__class__.__locators["选择图片"])
time.sleep(1)
self.click_element(self.__class__.__locators["图片发送"])
time.sleep(15)
@TestLogger.log("群发信息")
def enter_group_message(self, message='aaaa'):
time.sleep(1)
self.click_element(self.__class__.__locators["群发信息"])
time.sleep(2)
flag = self._is_element_present(self.__class__.__locators['我已阅读'])
if flag:
self.click_element(self.__class__.__locators['我已阅读'])
time.sleep(1)
self.click_element(self.__class__.__locators['已阅读_确定'])
time.sleep(1)
time.sleep(1)
@TestLogger.log("多方电话")
def enter_mutil_call(self, message='aaaa'):
time.sleep(1)
self.click_element(self.__class__.__locators["多方电话"])
@TestLogger.log("多方视频")
def enter_mutil_video_call(self, message='aaaa'):
time.sleep(1)
self.click_element(self.__class__.__locators["多方视频"])
def page_down(self):
"""向下滑动"""
self.swipe_by_percent_on_screen(50, 30, 50, 70, 800)
def find_star_by_name(self, locator, name, times=10):
"""根据联系人名称查找星标"""
if self._is_element_present(locator):
els = self.get_elements(locator)
if els:
for el in els:
if el.text.endswith(name):
return el
c = 0
while c < times:
# self.page_down()
self.page_up()
if self._is_element_present(locator):
els = self.get_elements(locator)
if els:
for el in els:
if el.text.endswith(name):
return el
c += 1
c = 0
while c < times:
# self.page_up()
self.page_down()
if self._is_element_present(locator):
els = self.get_elements(locator)
if els:
for el in els:
if el.text.endswith(name):
return el
c += 1
return None
def page_contain_star(self, name):
"""某联系人前是否存在星标"""
el=self.find_star_by_name((MobileBy.XPATH, '//*[contains(@text,"%s")]' % name), name)
if el:
return self.page_contain_element('星标图标')
else:
pass
# def swipe_select_one_member_by_name(self, name):
# """通过人名选择一个联系人"""
# el=self.get_element((MobileBy.XPATH, '//*[@text ="%s"]' % name)).text
# if el:
# self.click_text(el)
# else:
# self.find_star_by_name(el)
# time.sleep(2)
# self.click_text(el)
@TestLogger.log("输入群名")
def input_group_name(self,text):
self.input_text(self.__class__.__locators['搜索'], text)
def is_element_present_result(self):
return self._is_element_present(self.__locators['搜索结果展示'])
@TestLogger.log()
def is_exists_group_by_name(self, name):
"""是否存在指定群名字的搜索结果"""
locator = (MobileBy.XPATH, '//*[@resource-id="com.chinasofti.rcs:id/contact_name" and contains(@text, "%s")]' % name)
return self._is_element_present(locator)
def get_letters_index(self):
"""获取所有索引字母"""
container_el = self.get_element(self.__class__.__locators['索引字母容器'])
letter_els = container_el.find_elements(MobileBy.XPATH, "//android.widget.TextView")
if not letter_els:
raise AssertionError("No m005_contacts, please add m005_contacts in address book.")
letters = []
for el in letter_els:
letters.append(el.text)
return letters
@TestLogger.log()
def click_letter_index(self, letter):
"""点击字母索引"""
container_el = self.get_element(self.__class__.__locators['索引字母容器'])
container_el.find_element(MobileBy.XPATH, "//android.widget.TextView[@text='%s']" % letter).click()
|
[
"mozhuoda@139.com"
] |
mozhuoda@139.com
|
a304f77b1ab57e3a08ec5dc52f5ef0fd366f16de
|
7ccfe901f8cc39ef35b2fb5e5accadf11af8e90a
|
/dask_hpcconfig/__main__.py
|
484be2d8c46103e8b9c2a0fc800672e21778bfe1
|
[
"Apache-2.0"
] |
permissive
|
umr-lops/dask-hpcconfig
|
bbf34bda787b59afde12e4b3b4788b3923374d16
|
be886561ccb3e4574ae2dd3296f0fa771108b33f
|
refs/heads/main
| 2023-08-09T06:40:08.421511
| 2023-07-27T10:33:06
| 2023-07-27T10:33:06
| 452,758,751
| 6
| 2
|
Apache-2.0
| 2023-09-01T11:53:00
| 2022-01-27T16:30:29
|
Python
|
UTF-8
|
Python
| false
| false
| 60
|
py
|
from . import cli
if __name__ == "__main__":
cli.app()
|
[
"keewis@posteo.de"
] |
keewis@posteo.de
|
a7a66ee6bfc9b3d26e5dbb4a0a9df8f27b2a72e3
|
4c44c593048fa4e00fb0334209632a286886efd9
|
/sale_business_unit/models/product_business_unit.py
|
df6f50b9832b5d5adf851f1930983b0a7f67bcba
|
[] |
no_license
|
treytux/trey-addons
|
0c3fec43c584d46bd299b4bca47dcc334bedca60
|
1cda42c0eae702684badce769f9ec053c59d6e42
|
refs/heads/12.0
| 2023-06-08T21:56:09.945084
| 2023-05-29T10:05:53
| 2023-05-29T10:05:53
| 114,281,765
| 19
| 49
| null | 2023-05-29T10:05:55
| 2017-12-14T18:10:39
|
Python
|
UTF-8
|
Python
| false
| false
| 8,333
|
py
|
###############################################################################
# For copyright and license notices, see __manifest__.py file in root directory
###############################################################################
from datetime import date
from odoo import _, fields, models
class ProductBusinessUnit(models.Model):
_inherit = 'product.business.unit'
quotation_count = fields.Integer(
compute='_compute_sales',
string='Quotations',
readonly=True,
)
quotation_order_count = fields.Integer(
compute='_compute_sales',
string='Quotation Orders',
readonly=True,
)
quotation_amount = fields.Float(
compute='_compute_sales',
string='Quotations Revenues',
readonly=True,
)
sale_count = fields.Integer(
compute='_compute_sales',
string='Sales',
readonly=True,
)
sale_order_count = fields.Integer(
compute='_compute_sales',
string='Sale Orders',
readonly=True,
)
sale_amount = fields.Float(
compute='_compute_sales',
string='Sales Revenues',
readonly=True,
)
invoice_count = fields.Integer(
compute='_compute_invoices',
string='Sales',
readonly=True,
)
invoice_order_count = fields.Integer(
compute='_compute_invoices',
string='Sale Orders',
readonly=True,
)
invoice_amount = fields.Float(
compute='_compute_invoices',
string='Sales Revenues',
readonly=True,
)
dashboard_graph_model = fields.Selection(
selection_add=[
('sale.report', 'Sales'),
('account.invoice.report', 'Invoices'),
],
)
invoiced = fields.Integer(
compute='_compute_invoices',
string='Invoiced This Month',
readonly=True,
help=(
'Invoice revenue for the current month. This is the amount the '
'sales unit has invoiced this month. It is used to compute the '
'progression ratio of the current and target revenue on the '
'kanban view.'
),
)
invoiced_target = fields.Integer(
string='Invoicing Target',
help=(
'Target of invoice revenue for the current month. This is the '
'amount the sales unit estimates to be able to invoice this '
'month.'
),
)
def _compute_sales(self):
for unit in self:
lines = self.env['sale.order.line'].search([
('product_id', '!=', False),
('product_id.unit_id', '=', unit.id)])
quotation_lines = lines.filtered(
lambda l: l.order_id.state in ['draft', 'sent'])
sale_lines = lines.filtered(
lambda l: l.order_id.state in ['sale', 'done'])
unit.quotation_count = len(quotation_lines)
unit.quotation_order_count = len(
quotation_lines.mapped('order_id'))
unit.quotation_amount = sum(
quotation_lines.mapped('price_subtotal'))
unit.sale_count = len(sale_lines)
unit.sale_order_count = len(
sale_lines.mapped('order_id'))
unit.sale_amount = sum(sale_lines.mapped('price_subtotal'))
def _compute_invoices(self):
for unit in self:
lines = self.env['account.invoice.line'].search([
('invoice_id.state', 'not in', ['cancel', 'draft']),
('product_id', '!=', False),
('product_id.unit_id', '=', unit.id)])
unit.invoice_count = len(lines)
unit.invoice_amount = sum(lines.mapped('price_subtotal'))
invoices = lines.mapped('invoice_id')
unit.invoice_order_count = len(invoices)
month_invoices = invoices.filtered(
lambda i:
i.date <= date.today()
and i.date >= date.today().replace(day=1)
)
unit.invoiced = sum(month_invoices.mapped('amount_untaxed_signed'))
def update_invoiced_target(self, value):
return self.write({'invoiced_target': round(float(value or 0))})
def action_view_quotation_lines(self):
self.ensure_one()
lines = self.env['sale.order.line'].search([
('order_id.state', 'in', ['draft', 'sent']),
('product_id', '!=', False),
('product_id.unit_id', '=', self.id)])
action = self.env.ref(
'sale_business_unit.sale_order_line_quotation_action').read()[0]
action['domain'] = [('id', 'in', lines.ids)]
return action
def action_view_sale_lines(self):
self.ensure_one()
lines = self.env['sale.order.line'].search([
('order_id.state', 'in', ['sale', 'done']),
('product_id', '!=', False),
('product_id.unit_id', '=', self.id)])
action = self.env.ref(
'sale_business_unit.sale_order_line_sale_action').read()[0]
action['domain'] = [('id', 'in', lines.ids)]
return action
def action_view_invoice_lines(self):
self.ensure_one()
lines = self.env['account.invoice.line'].search([
('invoice_id.state', 'not in', ['cancel', 'draft']),
('product_id', '!=', False),
('product_id.unit_id', '=', self.id)])
action = self.env.ref(
'sale_business_unit.account_invoice_line_action').read()[0]
action['domain'] = [('id', 'in', lines.ids)]
return action
def action_view_quotation(self):
self.ensure_one()
lines = self.env['sale.order.line'].search([
('order_id.state', 'in', ['draft', 'sent']),
('product_id', '!=', False),
('product_id.unit_id', '=', self.id)])
action = self.env.ref('sale.action_quotations').read()[0]
action.update({
'domain': [('id', 'in', lines.mapped('order_id').ids)],
'context': {},
})
return action
def action_view_sale(self):
self.ensure_one()
lines = self.env['sale.order.line'].search([
('product_id', '!=', False),
('product_id.unit_id', '=', self.id)])
sale_lines = lines.filtered(
lambda l: l.order_id.state in ['sale', 'done'])
action = self.env.ref('sale.action_orders').read()[0]
action.update({
'domain': [('id', 'in', sale_lines.mapped('order_id').ids)],
'context': {},
})
return action
def action_view_invoice(self):
self.ensure_one()
lines = self.env['account.invoice.line'].search([
('product_id', '!=', False),
('product_id.unit_id', '=', self.id)])
invoice_lines = lines.filtered(
lambda l: l.invoice_id.state not in ['cancel', 'draft'])
action = self.env.ref('account.action_invoice_tree1').read()[0]
action.update({
'domain': [('id', 'in', invoice_lines.mapped('invoice_id').ids)],
'context': {},
})
return action
def _graph_date_column(self):
if self.dashboard_graph_model == 'sale.report':
return 'confirmation_date'
elif self.dashboard_graph_model == 'account.invoice.report':
return 'date'
return super()._graph_date_column()
def _graph_y_query(self):
if self.dashboard_graph_model == 'sale.report':
return 'SUM(price_subtotal)'
elif self.dashboard_graph_model == 'account.invoice.report':
return 'SUM(price_total)'
return super()._graph_y_query()
def _extra_sql_conditions(self):
if self.dashboard_graph_model == 'sale.report':
return "AND state in ('sale', 'done')"
elif self.dashboard_graph_model == 'account.invoice.report':
return "AND state in ('open', 'in_payment', 'paid')"
return super()._extra_sql_conditions()
def _graph_title_and_key(self):
if self.dashboard_graph_model == 'sale.report':
return ['', _('Sales: Untaxed Total')]
elif self.dashboard_graph_model == 'account.invoice.report':
return ['', _('Invoices: Untaxed Total')]
return super()._graph_title_and_key()
|
[
"roberto@trey.es"
] |
roberto@trey.es
|
cb5a4b34fb49207a33bf8d1192cb7f3761407b26
|
237598dd6cbd3b85f79221195491893814de8574
|
/webservicenew.py
|
6e60576ab4f7d1321564d3d4541d55ebdd81e368
|
[] |
no_license
|
harsha97sahajan/Road-Damage-Detection
|
88ede0cb90f93e9e6ab9df5b72432542c0af0240
|
5c85bb740151e872f027af28ab4a8e53fc2b5a8c
|
refs/heads/main
| 2023-08-30T07:03:41.069394
| 2021-11-12T05:53:38
| 2021-11-12T05:53:38
| 427,242,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,188
|
py
|
import os
from flask import *
import pymysql
from werkzeug.utils import secure_filename
from src.classify import predictfn
con = pymysql.connect(host='localhost', port=3306, user='root', passwd='', db='roadsens')
cmd = con.cursor()
app = Flask(__name__)
@app.route('/userreg', methods=['get', 'post'])
def userreg():
fname = request.form['fname']
mname = request.form['mname']
lname = request.form['lname']
ph = request.form['ph']
email = request.form['email']
username = request.form['un']
pwd = request.form['pwd']
cmd.execute("select * from login where username='" + username + "' and type='user'")
s = cmd.fetchone()
if s is not None:
return jsonify({'task': "invalid"})
else:
cmd.execute("INSERT INTO`login` values(null,'" + username + "','" + pwd + "','user')")
id = con.insert_id()
cmd.execute("insert into user_reg values(null,'" + str(id) + "','" + fname + "','" + mname + "','" + lname + "','" + ph + "','" + email + "')")
con.commit()
return jsonify({'task': "success"})
@app.route('/login', methods=['POST'])
def login():
try:
username = request.form['un']
pwd = request.form['pwd']
try:
cmd.execute(
"select * from login where username='" + username + "' and password='" + pwd + "'")
s = cmd.fetchone()
print(s)
if s is not None:
id = s[0]
print(id)
return jsonify({'task': str(id), 'type': s[3]})
else:
return jsonify({'task': "invalid"})
except Exception as e:
print(str(e))
return jsonify({'task': "invalid"})
except Exception as e:
print(e)
return jsonify({'task': "success"})
@app.route('/send_spot_complaint', methods=['get', 'post'])
def send_spot_complaint():
latitude = request.form['latitude']
print(latitude)
longitude = request.form['longitude']
complaint = request.form['complaint']
uid = request.form['uid']
image=request.files['files']
file=secure_filename(image.filename)
image.save(os.path.join("./static/image",file))
cmd.execute(
"insert into spotcomplaint values(null,'" + uid + "','" + latitude + "','" + longitude + "','" + complaint + "','pending',null,'"+str(file)+"')")
con.commit()
return jsonify({'task': "success"})
@app.route('/send_emg_alert', methods=['get', 'post'])
def send_emg_alert():
latitude = request.form['latitude']
longitude = request.form['longitude']
description = request.form['description']
uid = request.form['uid']
cmd.execute(
"insert into emergency_alert values(null,'" + uid + "','" + latitude + "','" + longitude + "','" + description + "')")
con.commit()
return jsonify({'task': "success"})
@app.route('/view_signal', methods=['POST', 'GET'])
def view_signal():
latitude = request.form['latitude']
longitude = request.form['longitude']
cmd.execute("select * ,(3959 * ACOS ( COS ( RADIANS('" + str(
latitude) + "') ) * COS( RADIANS(`latitude`) ) * COS( RADIANS(`longitude`) - RADIANS('" + str(
longitude) + "') ) + SIN ( RADIANS('" + str(
latitude) + "') ) * SIN( RADIANS(`latitude`) ))) AS user_distance from trafficsignal_reg HAVING user_distance < 6.2137")
print("select * ,(3959 * ACOS ( COS ( RADIANS('" + str(
latitude) + "') ) * COS( RADIANS(`latitude`) ) * COS( RADIANS(`longitude`) - RADIANS('" + str(
longitude) + "') ) + SIN ( RADIANS('" + str(
latitude) + "') ) * SIN( RADIANS(`latitude`) ))) AS user_distance from trafficsignal_reg HAVING user_distance < 6.2137")
s = cmd.fetchall();
print(s)
row_headers = [x[0] for x in cmd.description]
json_data = []
for result in s:
json_data.append(dict(zip(row_headers, result)))
print(json_data)
return jsonify(json_data)
@app.route('/view_important_place', methods=['POST', 'GET'])
def view_important_place():
latitude = request.form['latitude']
longitude = request.form['longitude']
cmd.execute("select * ,(3959 * ACOS ( COS ( RADIANS('" + str(latitude) + "') ) * COS( RADIANS(`latitude`) ) * COS( RADIANS(`longitude`) - RADIANS('" + str(
longitude) + "') ) + SIN ( RADIANS('" + str(latitude) + "') ) * SIN( RADIANS(`latitude`) ))) AS user_distance from imp_place_reg HAVING user_distance < 6.2137")
s = cmd.fetchall();
print(s)
row_headers = [x[0] for x in cmd.description]
json_data = []
for result in s:
json_data.append(dict(zip(row_headers, result)))
print(json_data)
return jsonify(json_data)
@app.route('/view_complaint', methods=['POST', 'GET'])
def view_complaint():
cmd.execute(" SELECT `spotcomplaint`.* ,`user_reg`.`fname`,`mname`,`lname`,`phone` FROM `user_reg` JOIN `spotcomplaint` ON `spotcomplaint`.`uid`=`user_reg`.lid where status='pending'")
s = cmd.fetchall();
print(s)
row_headers = [x[0] for x in cmd.description]
json_data = []
for result in s:
json_data.append(dict(zip(row_headers, result)))
print(json_data)
return jsonify(json_data)
@app.route('/view_status', methods=['POST', 'GET'])
def view_status():
uid = request.form['uid']
cmd.execute( " SELECT`spotcomplaint`.complaint,status,`traffic_police_reg`.`fname`,`mname`,`lname`,`phone` FROM `traffic_police_reg` JOIN `spotcomplaint` ON `spotcomplaint`.`policid`=`traffic_police_reg`.lid WHERE uid='" + uid + "'")
s = cmd.fetchall();
print(s)
row_headers = [x[0] for x in cmd.description]
json_data = []
for result in s:
json_data.append(dict(zip(row_headers, result)))
print(json_data)
return jsonify(json_data)
@app.route('/view_emergency_alert', methods=['POST', 'GET'])
def view_emergency_alert():
cmd.execute("SELECT `emergency_alert`.`descripion` ,`user_reg`.`fname`,`mname`,`lname`,`phone` FROM `user_reg` JOIN `emergency_alert` ON `emergency_alert`.`uid`=`user_reg`.lid ")
s = cmd.fetchall();
print(s)
row_headers = [x[0] for x in cmd.description]
json_data = []
for result in s:
json_data.append(dict(zip(row_headers, result)))
print(json_data)
return jsonify(json_data)
@app.route('/update_status', methods=['POST', 'GET'])
def update_status():
sc_id = request.form['cid']
reply=request.form['reply']
pid=request.form['pid']
cmd.execute( "UPDATE `spotcomplaint` SET `spotcomplaint`.`status`='"+reply+"',policid='"+pid+"' WHERE `spotcomplaint`.`sc_id`='"+str(sc_id)+"'")
con.commit()
return jsonify({'task': "success"})
@app.route('/emergency', methods=['get', 'post'])
def emergency():
latitude = request.form['latitude']
longitude = request.form['longitude']
speed = request.form['speed']
cmd.execute("insert into distruption values(null,'" + latitude + "','" + longitude + "','" +speed + "',now())")
con.commit()
return jsonify({'task': "success"})
@app.route('/service',methods=['POST'])
def service():
latitude=request.form['lati']
longitude=request.form['longi']
cmd.execute("select * ,(3959 * ACOS ( COS ( RADIANS("+latitude+") ) * COS( RADIANS(`latitude`) ) * COS( RADIANS(`longitude`) - RADIANS("+longitude+") ) + SIN ( RADIANS("+latitude+") ) * SIN( RADIANS(`latitude`) ))) AS user_distance from distruption where strength<4440 and strength>1000 and date>=DATE_ADD(curdate(),interval -10 day) HAVING user_distance < 2 ")
s=cmd.fetchall()
cmd.execute(
"select * ,(3959 * ACOS ( COS ( RADIANS(" + latitude + ") ) * COS( RADIANS(`latitude`) ) * COS( RADIANS(`longitude`) - RADIANS(" + longitude + ") ) + SIN ( RADIANS(" + latitude + ") ) * SIN( RADIANS(`latitude`) ))) AS user_distance from distruption where strength<4440 and strength>1000 and date<DATE_ADD(curdate(),interval -10 day)and date>DATE_ADD(curdate(),interval -20 day) HAVING user_distance < 2 ")
s1 = cmd.fetchall()
if len(s1)<len(s):
if len(s)>5:
return jsonify({"task":"yes"})
else:
return jsonify({"task": "no"})
else:
if len(s1)>5:
p=(len(s)/len(s1))*100
if p>50.0:
return jsonify({"task": "yes"})
else:
return jsonify({"task": "no"})
else:
return jsonify({"task": "no"})
@pp.route("/capture",methods=['post'])
def capture():
img=request.files["files"]
lt=request.form['latitude']
lon=request.form['longitude']
file = secure_filename(img.filename)
img.save(os.path.join("camimg/image", file))
re=predictfn(os.path.join("camimg/image", file))
if re=='normal':
cmd.execute("insert into distruption values(null,'" + lt + "','" + lon + "','4000',now())")
con.commit()
return jsonify({'task': "success"})
if (__name__ == "__main__"):
app.run(host='0.0.0.0', port=5000)
|
[
"noreply@github.com"
] |
noreply@github.com
|
19261cb62700033a9cef08d8687bae4821b6f92d
|
21569b68b510b55bdc2acb1ff5ae521b31d44a79
|
/bin/pyrsa-encrypt-bigfile
|
9afaf7317207ef369910d93588778e7aefc825d6
|
[] |
no_license
|
howarder3/Rpi3_study
|
a99faef434ae4f751d4d9f339aca918186f7cb3e
|
533ba60ae4d11b5e3cebc12283e067ccee5a5cfd
|
refs/heads/master
| 2020-03-18T18:11:01.030936
| 2018-05-27T20:46:40
| 2018-05-27T20:46:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
#!/home/pi/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import encrypt_bigfile
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(encrypt_bigfile())
|
[
"howarder3@gmail.com"
] |
howarder3@gmail.com
|
|
58438ab2d36cbce9f24fee5708909e935510e8a4
|
42e67c2ad5ec6500ab8523cfdcd8327997ad8486
|
/Pyproject/controlstructure/usatax.py
|
c6aa806b1efedb6ef2efc1ae2e1b65ad45dd8cff
|
[] |
no_license
|
meg1988/PycharmProjects
|
34f45e36b835492ea2022839ca658e19c03fd58e
|
d8cd1cf262c1a374236ba0b583bb4838069eb3f2
|
refs/heads/master
| 2020-12-30T23:47:29.268050
| 2017-02-02T21:23:41
| 2017-02-02T21:23:41
| 80,572,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
fedtax= .1
statetax = {"CA":.1, "MA" : .15}
def tax_calculate(income,state):
return (income * (1-fedtax + statetax[state]))
caincome = tax_calculate(1000,"CA")
print(caincome)
maincome = tax_calculate(1000,"MA")
print(maincome)
|
[
"megharastogi92.8@gmail.com"
] |
megharastogi92.8@gmail.com
|
f1403fe05fb506c6faed6be2e417b0d20f647e3b
|
c8b0f52d76d35986fd97d55857196b50627a5aa6
|
/jarvis.py
|
5fc1b66f26841f8ff8ab3af6fc6d891982376d8a
|
[] |
no_license
|
sakshampathak1508/jarvis
|
08780927e26c7bd71dce198371bac6f8edf0bebb
|
988c2488ab628ce073d36a9200cffd841da8909a
|
refs/heads/master
| 2023-03-08T23:03:54.341407
| 2021-03-01T19:17:04
| 2021-03-01T19:17:04
| 340,014,994
| 1
| 2
| null | 2021-03-01T19:17:05
| 2021-02-18T10:33:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,195
|
py
|
import pyttsx3
import datetime
import speech_recognition as sr
import os
import webbrowser,wikipedia
import pywhatkit as kit
import smtplib
from googlesearch import search
# time
# search
# wikipedia
# search youtube
# play youtube
# spotify
# send whatsapp message
phone_nums = {
"Enter name" : "Enter your phone number",
}
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[1].id)
def say(query):
engine.say(query)
engine.runAndWait()
def start():
h_time = int(datetime.datetime.now().hour)
if h_time>=0 and h_time<12:
say("Good morning")
elif h_time>=12 and h_time<=19:
say("Good Afternoon")
else:
say("Good Evening")
say("Hi i am jarvis. how may i help you")
def take_command():
rec = sr.Recognizer()
with sr.Microphone() as source:
print("Getting Your Voice...")
rec.pause_threshold = 1
audio = rec.listen(source)
try:
print("Listening")
query = rec.recognize_google(audio,language='en-in')
print(f"You said: {query}\n")
except Exception as e:
print("Not able to hear say that again please")
return "none"
return query
def send_email(to,content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('YourEMAILId@gmail.com', 'Your Password')
server.sendmail('YourEMAILId@gmail.com', to, content)
server.close()
if __name__ == "__main__":
data = take_command().lower()
if 'hi jarvis' or 'hey jarvis' in data:
start()
while True:
my_query = take_command().lower()
if 'search youtube' in my_query:
say("What you want to search")
data = take_command().lower()
webbrowser.open('https://www.youtube.com/results?search_query='+data)
elif 'play youtube' in my_query:
say("What you want to play")
data = take_command().lower()
kit.playonyt(data)
elif 'open' in my_query:
web = take_command()
webbrowser.open(web+".com")
elif 'search' in my_query:
say("Here are the browser results")
webbrowser.open("https://www.google.com/?#q="+my_query[6:])
for j in search(my_query, tld="co.in", num=10, stop=10, pause=2):
print(j)
elif 'wikipedia' in my_query:
say('Searching Wikipedia...')
my_query = my_query.replace("wikipedia", "")
results = wikipedia.summary(my_query, sentences=1)
say("According to Wikipedia")
print(results)
say(results)
elif 'the time' in my_query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
print(f"Sir, the time is {strTime}")
say(f"Sir, the time is {strTime}")
elif 'play spotify' in my_query:
path = "C:\\Users\\Vivek\\AppData\\Roaming\\Spotify\\Spotify.exe"
os.startfile(path)
elif 'send message' in my_query:
try:
say("Who is the reciever")
reciever = take_command().lower()
say("What is the message")
msg = take_command()
if reciever in phone_nums:
kit.sendwhatmsg("+91"+phone_nums[reciever],msg,int(datetime.datetime.now().hour), int(datetime.datetime.now().minute)+1)
else:
say("The reciever is not in your contact list")
except Exception as e:
say("sorry sir . i am not able to send this message ")
elif 'send email' in my_query:
try:
say("What should I say?")
content = take_command()
to = input("Enter email address: ")
send_email(to, content)
say("Email has been sent!")
except Exception as e:
print(e)
say("Sorry sir . I am not able to send this email")
elif 'quit' in my_query:
say("thank you i would be pleased to help again")
exit(1)
|
[
"sakshamvpathak@gmail.com"
] |
sakshamvpathak@gmail.com
|
e11e11e8f056afa4c697607dfc0b4a9a999a6217
|
3f31e3cf84277f48fe5c646bf383b9b8c36a19bc
|
/basics/Instrukcje/6.py
|
812a5508addb3bd0db0e213b51b8c8227f363b0d
|
[] |
no_license
|
kchmielewski/python_basics
|
046e0786bd0eb1668daaa2e6b196db7b2569d3f2
|
2d14c12617e4276da515c2dde6e64fabc92f24b4
|
refs/heads/master
| 2021-01-13T16:16:05.152126
| 2018-10-26T15:36:34
| 2018-10-26T15:36:34
| 81,139,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
'''
Poniższy kod generuje listę l 4 unikalnych losowych liczb całkowitych z przedziału <0, 5).
import random
l = random.sample(range(5), 4)
Napisz jednolinijkowy kod, za pomocą którego wyświetlisz na ekran napis “big number” jeżeli suma liczb w liście jest większa niż 6,
w przeciwnym razie wyświetl na ekran napis “small number”.
Skorzystaj z operatora trójargumentowego.'''
import random
l = random.sample(range(5), 4)
print(l)
print("big number" if sum(l)>6 else "small number")
|
[
"chmielewski.karol.96@gmail.com"
] |
chmielewski.karol.96@gmail.com
|
3e5af7c3636c13d85734ff1e08ce448c0397d9ba
|
74651a896dad75ddc8ba3e2e29e778049a349aff
|
/whatplane/models/predict_model.py
|
9ddd0ca63ac2c0f603594148a6d8c889eb71f6f7
|
[
"BSD-3-Clause"
] |
permissive
|
what-plane/what-plane-api
|
537eb5734adb7caa2c76fc18ecd8de9b25c8e0d3
|
fbd8ec8d59437cb8bcc0c55275850c653a7a902a
|
refs/heads/main
| 2023-03-01T03:02:11.459691
| 2021-02-14T21:40:39
| 2021-02-14T21:40:39
| 313,305,645
| 0
| 0
|
BSD-3-Clause
| 2021-02-11T00:08:26
| 2020-11-16T13:08:11
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,031
|
py
|
from pathlib import Path
from typing import List, Tuple
from PIL.Image import Image
import torch
from torchvision.models.densenet import DenseNet
from .data_helpers import process_image_data, process_image_file
def test(dataloaders, model, criterion):
# TODO Refactor this
# monitor test loss and accuracy
test_dataloader = dataloaders["test"]
test_loss = 0.0
test_accuracy = 0.0
predicted_classes = []
correct_classes = []
model.eval()
with torch.no_grad():
for images, labels in test_dataloader:
images, labels = images.to(model.device), labels.to(model.device)
outputs = model(images)
loss = criterion(outputs, labels)
test_loss += loss.item()
_, preds = torch.max(outputs, 1)
correct = preds == labels.view(*preds.shape)
test_accuracy += torch.mean(correct.type(torch.FloatTensor)).item()
predicted_classes.extend(preds.cpu().numpy().tolist())
correct_classes.extend(labels.cpu().numpy().tolist())
test_loss /= len(test_dataloader)
test_accuracy /= len(test_dataloader)
print("Test Loss: {:.6f}".format(test_loss))
print("Test Accuracy: {:.2f}".format(100 * test_accuracy))
return test_loss, test_accuracy, predicted_classes, correct_classes
def predict_image_data(
image_data: Image, model: DenseNet, topk: int = 1
) -> Tuple[List[float], List[str]]:
image = process_image_data(image_data).float().unsqueeze(0)
return predict_normalized(image, model, topk)
def predict(image_path: Path, model: DenseNet, topk: int = 1) -> Tuple[List[float], List[str]]:
image = process_image_file(image_path).float().unsqueeze(0)
return predict_normalized(image, model, topk)
def predict_normalized(processed_image: torch.Tensor, model: DenseNet, topk: int) -> Tuple[List[float], List[str]]:
""" Predict the class (or classes) of an image using a trained deep learning model.
Args:
image_path (str): Location of the image file
model (object): A trained PyTorch model
cat_to_name (dict): Dict which maps category numbers to category names
top_k (int): Number of top classes to return
device (obj): Device to perform inference on
Returns:
prediction_dict (dict): Dictionary of top classes predicted for that image
Example:
>>> result = predict('images/flower.jpg', model, cat_to_name, 5, torch.device('cpu'))
"""
processed_image = processed_image.to(model.device)
model.eval()
with torch.set_grad_enabled(False):
output = model(processed_image)
probs = torch.nn.functional.softmax(output, dim=1)
top_probs, top_classes = probs.topk(topk)
top_probs = top_probs.cpu().numpy().tolist()[0]
top_classes = [model.class_names[i] for i in top_classes.cpu().numpy().tolist()[0]]
return top_probs, top_classes
def predict_aircraft(image_path, model):
_, classes = predict(image_path, model)
return classes[0]
|
[
"parr.will.08@gmail.com"
] |
parr.will.08@gmail.com
|
2eb8f88b7861661d8799f529e1d447d7eb3f13dc
|
e4529d420f55200646f3891448f82c3907804a77
|
/test.py
|
6d112f98fb53945ac6b21fe0ca545fb771568b22
|
[] |
no_license
|
heyuanYao-pku/SynMapping
|
8779dff77735f6abcc1c11e7effb59b1a9915474
|
aab2d7d9b08b7b4d578a06e4b915da0d2178a6ba
|
refs/heads/master
| 2022-04-06T09:20:07.929773
| 2019-12-11T02:07:07
| 2019-12-11T02:07:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
import json
import cv2
import os
import numpy as np
import TensorMap
data_path = 'example\\data.json'
data = json.load(open( data_path,'r') )
print(data.keys())
mList = data['mlist'][:35]
tmp = data['P']
image_list_path = 'example\\image_list.npy'
image_list = np.load(image_list_path)
#mList = mList[0:20]
#Plist = np.array(Plist)
#Plist = Plist[0:20,0:20]
#image_list = image_list[0:20]
#print( np.shape(Plist) )
n = np.size(mList)
Plist = np.zeros(np.shape(tmp), np.ndarray)
print( n )
#print(mList[0],mList[100],mList[2],mList[102] )
for i in range(n):
for j in range(n):
if i==j:
Plist[i][j] = np.eye(mList[i])
continue
Plist[i][j] = np.array( tmp[j][i])
tensor = TensorMap.SynTensorMap(n,mList,Plist)
Q = tensor.solution()
np.save('example\\Q.npy',Q)
Q = tensor.rounded_solution(0.5,Q)
#Q = np.load('example\\Q.npy')
print(mList[0:2],Q)
draw = TensorMap.TensorMapVis(image_list,mList,Q).draw_image
save_path = 'example\\'
for i in range(n):
cv2.imwrite(os.path.join(save_path,'%d.png'%i),draw[i])
|
[
"18811711520@163.com"
] |
18811711520@163.com
|
5e36f42372dbf6feef4dfe79e1f36c07e3c53a73
|
070e299ab646eef31ac0c767b14e45e9e591683b
|
/16_1281_SubtracttheProductandSumofDigitsofanInteger.py
|
c65601301a71556d0b417b7421542747d655d5e2
|
[] |
no_license
|
benny84416/test01
|
b6e4deffb4936db0d73c0b64e3da4da1e1015db9
|
f391548376e9a9f88e7d6c01581a8ce3feed8f71
|
refs/heads/main
| 2023-04-21T02:58:10.293615
| 2021-04-10T16:50:03
| 2021-04-10T16:50:03
| 348,393,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
# Example 1:
# Input: n = 234
# Output: 15
# Explanation:
# Product of digits = 2 * 3 * 4 = 24
# Sum of digits = 2 + 3 + 4 = 9
# Result = 24 - 9 = 15
# n = 234
n = 705
Sum = 0
Product = 1
List = list(str(n))
for s in List:
# print(s)
Sum+=int(s)
Product*=int(s)
# print(Sum)
# print(Product)
print(Product - Sum)
|
[
"gasry691@yahoo.com.tw"
] |
gasry691@yahoo.com.tw
|
cba4f7f7a86bbd2c06e4b92208fe3e95d44f31ac
|
99b84337ae66ad2877544fd158f20e7f4cd96520
|
/day01-10/day04/晚间作业/4_求分数和.py
|
88cd4c266f76603fc47dbe64b8a9ae5e47a016b0
|
[] |
no_license
|
jiajiabin/python_study
|
cf145d54cabce2cb98914b3448ed7d0e5c1c146c
|
b4faaff26ee9728af2e80942ba6a7c7f6a8b0f86
|
refs/heads/master
| 2020-06-21T21:31:26.034978
| 2019-08-26T11:39:34
| 2019-08-26T11:39:34
| 197,556,254
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
#求1/1 + 1/2 + 1/3 + 1/10 的值
a = 10
b = 0
for i in range(1,a+1):
b += 1 / i
print(b)
|
[
"2592668397@qq.com"
] |
2592668397@qq.com
|
5979b6ffe880e35199d444411456ee90b6941755
|
4cf053a451cd0436411912268c9140a6fc7af808
|
/Python/Lab3/LogisticRegression.py
|
325da6c13d69c4491db26c396f78300ba4b6b326
|
[] |
no_license
|
Chaitanyaperavali/python-deeplearning
|
de63d5595df49cca8b0eb0d55ebc5a41f3eaf5ee
|
ea4863d41455b1ba451291ffdbd6acb9f7296110
|
refs/heads/master
| 2021-05-09T11:38:33.299206
| 2018-04-07T15:22:17
| 2018-04-07T15:22:17
| 118,994,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
import numpy as np
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
# LDA model
lg = LogisticRegression()
# Load wine data from input file
d = np.loadtxt('wineinput.data',delimiter=',',skiprows=1)
# split input data into input and response
x = d[:,1:]
y = d[:,0]
# Perform cross validation on given data
k_fold = cross_validation.KFold(len(x), 3, shuffle=True)
print('Logistic regression Results: ')
for (train, test) in k_fold:
lg.fit(x[train], y[train])
# computes accuracy of the system
outVal = lg.score(x[test], y[test])
# print overall output
print('Score: ' + str(outVal))
|
[
"Chaitanya Peravali"
] |
Chaitanya Peravali
|
c01a071eaa33b53d906dd544cdf2f54e10c8fb4b
|
8243514e8d95ccb7648d37a89accc8b91a602cc6
|
/setup.py
|
cb813da71ca0b01eb71d04572d790db37ff780f8
|
[] |
no_license
|
nelastrnadova/pipeline
|
12d9f02a294762f86a52ca708b5bdf5ef5f7c085
|
0fba29d9c1a9cde7a5471eb42d2088639c2f8a78
|
refs/heads/main
| 2023-03-18T05:59:59.774426
| 2021-03-15T14:55:45
| 2021-03-15T14:55:45
| 347,218,966
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
import argparse
from glob import glob
from database import Database
from file import File
from yaml import Yaml
parser = argparse.ArgumentParser()
parser.add_argument("-db", type=str, help="Path to db file", default="database.db")
args = parser.parse_args()
db = Database(args.db)
for file in glob("initial_data/*"):
db.exec(File(file).get_content())
|
[
"strnadova.nela@linuxmail.org"
] |
strnadova.nela@linuxmail.org
|
c05db23369700bf9bda51685ae774bb8e46349af
|
ef0f418e5ce98d2b85a7493b4dd4a9dd7672d3fe
|
/poc/vxworks_rpc_udp_17185_probe.py
|
0d83c724c26d645c76d6225ee5ca4f3558273d18
|
[] |
no_license
|
knownsec/VxPwn
|
481e3dbfd2d524c1cf8fd9a32cd24fd9042eea0d
|
6555f49675f0317d4a48568a89d0ec4332658402
|
refs/heads/master
| 2021-01-10T08:28:45.948113
| 2016-01-16T11:12:46
| 2016-01-16T11:12:46
| 49,614,826
| 160
| 96
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
#!/usr/bin/env python
# coding=utf-8
# probe on udp port 17185, VxWorks WDBRPC V1 & V2
# By dog2@404
import socket
import struct
def scanV1(host, port=17185, timeout=5):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(timeout)
payload_hex = 'cc6ff7e2000000000000000255555555000000010000000100000000000000000000000000000000ffff2e700000003026b00001'
try:
sock.sendto(payload_hex.decode('hex'), (host, port))
banner = sock.recv(65536)
except socket.error as err:
return None, ''
return 'vxworks' in banner.lower(), banner
def scanV2(host, port=17185, timeout=5):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(timeout)
connReq = ''.join([
struct.pack('>I', 0), # msgid
'\x00' * 4, # msgcall
'\x00\x00\x00\x02', # rpc version
'\x55' * 4, # wdb programe number
'\x00\x00\x00\x01', # programe version
struct.pack('>I', 122), # function number: WDB_TARGET_CONNECT2 = 122
'\x00' * 16,
'\x00' * 4,
'\x00\x00\x00\x30', # packet length
struct.pack('>I', 0), # msg seq
])
try:
sock.sendto(connReq, (host, port))
resp1 = sock.recv(65536)
except socket.error as err:
return None, '', ''
infoReq = ''.join([
struct.pack('>I', 1), # msgid
'\x00' * 4, # msgcall
'\x00\x00\x00\x02', # rpc version
'\x55' * 4, # wdb programe number
'\x00\x00\x00\x01', # programe version
struct.pack('>I', 123), # function number: WDB_TGT_INFO_GET = 123
'\x00' * 16,
'\x00' * 4,
'\x00\x00\x00\x44', # packet length
struct.pack('>I', 1), # msg seq
'\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00', # parameter
])
try:
sock.sendto(infoReq, (host, port))
resp2 = sock.recv(65536)
except socket.timeout as err:
resp2 = ''
return 'vxworks' in resp2.lower(), resp1, resp2
if __name__ == '__main__':
import sys
from pprint import pprint as pr
pr(scanV1(sys.argv[1]))
print
pr(scanV2(sys.argv[2]))
|
[
"dog2@dog2deMacBook-Pro.local"
] |
dog2@dog2deMacBook-Pro.local
|
be67fef614ce58137d6e137d9650ea154eee845d
|
b00846dbec1fdc932d5a7856ff42b24bc93ae9bf
|
/rec/__init__.py
|
0aa1a16fb15cfd6d68687099370138cf4b1c0e1e
|
[
"MIT"
] |
permissive
|
iawia002/rec
|
3c533536c94a75eeb7a3041f732ce67136cd093f
|
04637c2d68e5997efbd326eae912ef3b585b4dd8
|
refs/heads/master
| 2021-09-05T10:23:55.699510
| 2018-01-26T11:26:20
| 2018-01-26T11:26:20
| 111,183,941
| 2
| 0
|
MIT
| 2018-01-14T09:14:09
| 2017-11-18T06:54:31
|
Python
|
UTF-8
|
Python
| false
| false
| 46
|
py
|
# coding=utf-8
from .domain import * # noqa
|
[
"z2d@jifangcheng.com"
] |
z2d@jifangcheng.com
|
ba6088e55bae994ac54dc793a94b87326b66b35e
|
509b4121e18517cff4bbaa7ba1a4d2a24db8395d
|
/util.py
|
a7b87f35ab0e0400d050fc443f5d50bab29053af
|
[] |
no_license
|
jkim796/assembler
|
eec7158e8206fd34fe0fd0280f49e887c557f77d
|
d78d5a01cc3c020407079987f779bdff8f5d9e06
|
refs/heads/master
| 2021-05-07T01:02:38.217759
| 2017-11-11T05:16:47
| 2017-11-11T05:16:47
| 43,766,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,748
|
py
|
import re
import tables
### TODO: Error message when R-type, S-type are used wrong ###
nameTable = {}
labelTable = {}
wordTable = {}
# returns a list of lines, delimited by the \n character
def readFrom(path):
with open(path, 'r') as f:
lines = f.readlines()
return lines
def writeTo(path, hex):
with open(path, 'w') as f:
f.write(hex)
# This creates a dictionary of .NAME names and values. For example, .NAME IOBASE=0xF0000000 is parsed, then IOBASE and 0xF0000000 pair is put in util.nameTable
def updateNameTable(line):
nameRE = re.compile('\w+')
names = nameRE.findall(line)
name = names[1]
value = names[2].lower()
nameTable[name] = value
# equivalent of parseName. Don't really understand what this does, and haven't seen any examples yet...
def parseWord(line):
pass
# Will figure out what this actually does later
def parseOrig(line):
pass
# Returns true if the current line is a directive (like .NAME). False otherwise.
def isDirective(line):
if line.startswith('.'):
return True
else:
return False
# Will expand more on this later. For now, only checks if the beginning of a line is a comment.
def isComment(line):
if line.startswith(';'):
return True
else:
return False
# Returns true if current line is a label definition (like MainLoop:). False otherwise.
def isLabelDef(line):
if line.endswith(':'):
return True
else:
return False
# Determines whether given instruction is a pseudo instruction
def isPseudoInstr(line):
wordsRE = re.compile('\w+')
opcode = wordsRE.findall(line)[0].lower()
if opcode in tables.pseudoTable:
return True
else:
return False
# Replaces pseudo instruction with regular instruction
def replacePseudoInstr(line):
wordsRE = re.compile('\w+')
words = wordsRE.findall(line)
opcode = words[0].lower()
replacedLine = []
hasMoreLines = False
if opcode == 'br':
imm = words[-1]
rest = 's0' + ', ' + 's0' + ', ' + imm
replacedLine.append(tables.pseudoTable[opcode] + ' ' + rest)
elif opcode == 'not':
rd = words[-2]
rs = words[-1]
rest = rd + ', ' + rs + ', ' + rs
replacedLine.append(tables.pseudoTable[opcode] + ' ' + rest)
elif opcode == 'ble':
rs1 = words[-3]
rs2 = words[-2]
imm = words[-1]
rest = 's0' + ', ' + rs1 + ', ' + rs2
replacedLine.append(tables.pseudoTable[opcode] + ' ' + rest)
rest = 's0' + ', ' + imm
replacedLine.append('bnez' + ' ' + rest)
hasMoreLines = True
elif opcode == 'bge':
rs1 = words[-3]
rs2 = words[-2]
imm = words[-1]
rest = 's0' + ', ' + rs1 + ', ' + rs2
replacedLine.append(tables.pseudoTable[opcode] + ' ' + rest)
rest = 's0' + ', ' + imm
replacedLine.append('bnez' + ' ' + rest)
hasMoreLines = True
elif opcode == 'call':
rs1 = words[-1]
imm = words[-2]
rest = 'ra' + ', ' + imm + '(' + rs1 + ')'
replacedLine.append(tables.pseudoTable[opcode] + ' ' + rest)
elif opcode == 'ret':
rest = 'r9' + ', ' + '0' + '(' + 'ra' + ')'
replacedLine.append(tables.pseudoTable[opcode] + ' ' + rest)
elif opcode == 'jmp':
imm = words[-2]
rs1 = words[-1]
rest = 'r9' + ', ' + imm + '(' + rs1 + ')'
replacedLine.append(tables.pseudoTable[opcode] + ' ' + rest)
return replacedLine, hasMoreLines
# This puts label and the location it was defined in a table. Location is in hex string, based on the 0x40 byte addressable address (instead of the 2-bit shifted address)
def updateLabelTable(label, origOffset, origAddr):
location = '0x' + zext(hex(int(origAddr, 16) + 4 * origOffset), 8)
# or this: location = hex(0x10 + offsetFromORIG)[2:0]
labelTable[label] = location
# Given a hex, zero extends it to 4 hexademical places
def zext(imm, size):
#if len(imm) > size:
#raise Exception('Imm size too big!')
imm = imm[2:] # strip 0x in front
zeros = (size - len(imm)) * '0'
return zeros + imm
# Given a hex, trims it down to given size
def trim(imm, size):
#if len(imm) < size:
#raise Exception('Imm size too small to be trimmed!')
imm = imm[2:]
# need to figure out which part to trim out...for now just trim out the 4 MSB's
imm = imm[-4:]
return imm
def format(imm, size):
if len(imm[2:]) > size:
return trim(imm, size)
elif len(imm[2:]) < size:
return zext(imm, size)
else:
return imm[2:]
# Returns true if given input is a decimal number string. False otherwise.
def isDecimalOffset(imm):
decimalRE = re.compile('[0-9]+')
potential = decimalRE.match(imm)
if potential is None: # if input starts with a char, we know for sure this is not a decimal number string.
return False
else: # even if input starts with a decimal number, it could still contain chars so we need to check if the entire given input is a decimal number string.
num = potential.group()
if len(num) == len(imm):
return True
else:
return False
# I don't think this function is useful
def parseDirective(line):
directiveRE = re.compile('\w+') # this doesn't match the leading dot(.)
directives = directiveRE.findall(line)
directive = directives[0]
if directive == 'NAME':
updateNameTable(line)
elif directive == 'ORIG':
parseOrig(line)
elif directive == 'WORD':
parseWord(line)
else:
raise Exception('Not a valid assembler directive!')
# parses each line to opcode, registers, (and possibly labels)
def parseLine(line):
lineArr = line.split(' ', 1)
opcode = lineArr[0].lower()
stripOpcode = lineArr[1]
splitComma = stripOpcode.split(',')
stripSplit = []
for split in splitComma:
stripSplit.append(split.strip().lower()) #get rid of whitespace if any
#throw an error if it's not a valid opcode
if opcode not in tables.opcodeTable:
raise Exception('Invalid instruction opcode!')
#then, based on which opcode it is,
#look at how many registers it requires
if isImmType(opcode) == False:
#there are three registers
regs = [stripSplit[0], stripSplit[1], stripSplit[2]]
label = '000000000000' #don't care what this is
else:
#its either one or two registers
if opcode == 'mvhi' or opcode == 'bltz' or opcode == 'bltez' or opcode == 'bnez' or opcode == 'bgtez' or opcode == 'bgtz':
#its one register
regs = [stripSplit[0]]
label = stripSplit[1]
else:
#its two registers
if opcode == 'jal' or opcode == 'lw' or opcode == 'sw':
beginIndex = stripSplit[1].find("(")
endIndex = stripSplit[1].find(")")
regs = [stripSplit[0], stripSplit[1][beginIndex+1:endIndex]]
label = stripSplit[1][:beginIndex]
else:
regs = [stripSplit[0], stripSplit[1]]
label = stripSplit[2]
#check validity of registers
for reg in regs:
if reg not in tables.regTable:
print reg
raise Exception('Invalid register(s)!')
#check label validity partially
# this always throws an erro so im commenting it out for now
# if(label.startswith('0x')):
# #make sure its valid hex
# hexcheck = re.match('[0-9a-fA-F]{1,4}', label[1:])
# if hexcheck == None:
# raise Exception('Invalid Immediate value!')
return opcode, regs, label
# translate opcode to hex
def transOpcode(opcode):
return tables.opcodeTable[opcode]
# translate reg to hex
def transReg(reg):
return tables.regTable[reg]
# For now, there's no checking on if there are too many or not enough registers (there should only be 2 or 3 registers)
def transRegs(regs):
hex = ''
for reg in regs:
hex += transReg(reg)
return hex
# Returns true if instruction is Immediate type, otherwise false. In case of false, fill in 0x000 in the translated instruction.
def isImmType(opcode):
if opcode.endswith('i') or opcode.startswith('b') or opcode == 'jal' or opcode == 'lw' or opcode == 'sw':
return True
else:
return False
# calculates the Imm value from label
def calcLabelOffset(labelDefAddr, currAddr):
labelDefAddr = int(labelDefAddr, 16)
currAddr = int(currAddr, 16)
if labelDefAddr > currAddr:
return hex((labelDefAddr - (currAddr + 4)) / 4)
elif labelDefAddr < currAddr:
return hex(((labelDefAddr - (currAddr + 4)) / 4) & 0xffff)
|
[
"jkim796@gatech.edu"
] |
jkim796@gatech.edu
|
667907153fb3690183536d53d10538fd0e5ee2f8
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories_2to3/197978/Grasp-and-lift-EEG-challenge-master/genInfos.py
|
3fe287f7ae615d7d863ba13934411a5cad7ad2b9
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151
| 2018-12-20T19:09:50
| 2018-12-20T19:09:50
| 147,406,338
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,710
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 21:35:28 2015.
@author: fornax
"""
import numpy as np
import pandas as pd
from glob import glob
from mne import concatenate_raws
from preprocessing.aux import creat_mne_raw_object
# #### define lists #####
subjects = list(range(1, 13))
lbls_tot = []
subjects_val_tot = []
series_val_tot = []
ids_tot = []
subjects_test_tot = []
series_test_tot = []
# #### generate predictions #####
for subject in subjects:
print('Loading data for subject %d...' % subject)
# ############### READ DATA ###############################################
fnames = glob('data/train/subj%d_series*_data.csv' % (subject))
fnames.sort()
fnames_val = fnames[-2:]
fnames_test = glob('data/test/subj%d_series*_data.csv' % (subject))
fnames_test.sort()
raw_val = concatenate_raws([creat_mne_raw_object(fname, read_events=True)
for fname in fnames_val])
raw_test = concatenate_raws([creat_mne_raw_object(fname, read_events=False)
for fname in fnames_test])
# extract labels for series 7&8
labels = raw_val._data[32:]
lbls_tot.append(labels.transpose())
# aggregate infos for validation (series 7&8)
raw_series7 = creat_mne_raw_object(fnames_val[0])
raw_series8 = creat_mne_raw_object(fnames_val[1])
series = np.array([7] * raw_series7.n_times +
[8] * raw_series8.n_times)
series_val_tot.append(series)
subjs = np.array([subject]*labels.shape[1])
subjects_val_tot.append(subjs)
# aggregate infos for test (series 9&10)
ids = np.concatenate([np.array(pd.read_csv(fname)['id'])
for fname in fnames_test])
ids_tot.append(ids)
raw_series9 = creat_mne_raw_object(fnames_test[1], read_events=False)
raw_series10 = creat_mne_raw_object(fnames_test[0], read_events=False)
series = np.array([10] * raw_series10.n_times +
[9] * raw_series9.n_times)
series_test_tot.append(series)
subjs = np.array([subject]*raw_test.n_times)
subjects_test_tot.append(subjs)
# save validation infos
subjects_val_tot = np.concatenate(subjects_val_tot)
series_val_tot = np.concatenate(series_val_tot)
lbls_tot = np.concatenate(lbls_tot)
toSave = np.c_[lbls_tot, subjects_val_tot, series_val_tot]
np.save('infos_val.npy', toSave)
# save test infos
subjects_test_tot = np.concatenate(subjects_test_tot)
series_test_tot = np.concatenate(series_test_tot)
ids_tot = np.concatenate(ids_tot)
toSave = np.c_[ids_tot, subjects_test_tot, series_test_tot]
np.save('infos_test.npy', toSave)
|
[
"keesiu.wong@gmail.com"
] |
keesiu.wong@gmail.com
|
06f746d46e92260fad3bdfc7fdedbc74a1648ad6
|
a6002f605d2f906cf86a22c467ee7f72cb30f314
|
/sambuca/free_parameters.py
|
f3360d70b710af5e43da8e723a24c998460f84a9
|
[
"MIT"
] |
permissive
|
stevesagar/sambuca
|
c41e6d56d2e65a7b9b8ad47fc2564efa5dfec4df
|
d153684d48c31ec71c6df5c6c86b9c80da332793
|
refs/heads/master
| 2020-05-23T11:20:13.793098
| 2018-07-10T03:20:22
| 2018-07-10T03:20:22
| 80,386,653
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
""" Sambuca Free Parameters
Defines the default set of free parameters for use with the
default parameter estimation function.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals)
from builtins import *
from collections import namedtuple
FreeParameters = namedtuple('FreeParameters',
'''
chl,
cdom,
nap,
depth,
substrate_fraction
''')
""" namedtuple containing the default Sambuca free parameters.
Attributes:
chl (float): Concentration of chlorophyll (algal organic particulates).
cdom (float): Concentration of coloured dissolved organic particulates
(CDOM).
nap (float): Concentration of non-algal particulates (NAP).
depth (float): Water column depth.
substrate_fraction (float): relative proportion of substrate1
and substrate2.
"""
|
[
"daniel.collins@csiro.au"
] |
daniel.collins@csiro.au
|
7d61b22340803854812ce2fb50445f429aebeeb0
|
df44affab179c2546fb3e0d1dc29eebcfdf51c1c
|
/toughradius/common/smsapi.py
|
06534cfab472a0cec1e3f4ef54e6c8980f14269e
|
[] |
no_license
|
sailorhdx/taurusradius
|
121c508e7faffaddcd5326d2b6d3710eaf0ed08e
|
92d30820611a0c9102ae41713ea3c35437a3c6ee
|
refs/heads/master
| 2021-01-22T02:28:31.543338
| 2017-06-17T02:15:33
| 2017-06-17T02:15:33
| 92,362,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
#!/usr/bin/env python
# coding=utf-8
import time
import json
import base64
from urllib import urlencode
from toughradius.toughlib import apiutils
from toughradius.toughlib import logger
from toughradius.toughlib import utils
from toughradius.toughlib.smsutils import smscn
from toughradius.toughlib.smsutils import qcloud
from toughradius.toughlib.smsutils import sendcloud
from toughradius.toughlib.smsutils import toughcloud
from toughradius.toughlib.btforms import rules
from cyclone import httpclient
from twisted.internet import defer
class SmsApi(object):
def __init__(self):
self.gateways = ['toughcloud',
'smscn',
'qcloud',
'sendcloud']
self.smscalls = {}
def get_instance(self, gateway, apikey, apisecret):
if gateway in self.smscalls:
return self.smscalls[gateway]
if gateway == 'smscn':
self.smscalls[gateway] = smscn.SmsApi(apikey, apisecret)
elif gateway == 'qcloud':
self.smscalls[gateway] = qcloud.SmsApi(apikey, apisecret)
elif gateway == 'sendcloud':
self.smscalls[gateway] = sendcloud.SmsApi(apikey, apisecret)
elif gateway == 'toughcloud':
self.smscalls[gateway] = toughcloud.SmsApi(apikey, apisecret)
return self.smscalls.get(gateway)
@defer.inlineCallbacks
def send_sms(self, gateway, apikey, apisecret, sendphone, tplid, args = [], kwargs = {}):
if gateway not in self.gateways:
raise ValueError(u'gateway [%s] not support' % gateway)
if not rules.is_mobile.valid(sendphone):
raise ValueError(u'sendsms: %s mobile format error' % sendphone)
try:
api = self.get_instance(gateway, apikey, apisecret)
resp = yield api.send_sms(sendphone, tplid, args=args, kwargs=kwargs)
defer.returnValue(resp)
except Exception as err:
logger.exception(err)
defer.returnValue(False)
_smsapi = SmsApi()
send_sms = _smsapi.send_sms
|
[
"sailorhdx@hotmail.com"
] |
sailorhdx@hotmail.com
|
c8d81a928a94485e734bc350a35135eadc866d64
|
8683ea0e2a450f163737740bad07a6ca805120ce
|
/run.py
|
110ccab594694465d738cd4b00f02f5ce425814e
|
[] |
no_license
|
AllanVieira/flask-microblog
|
250f5bfe54707e34cbaba314e0c9970555f7df9d
|
88a846a22ac6ba49dad3ced36188cda4d4dca0d6
|
refs/heads/master
| 2021-01-25T09:53:31.980809
| 2015-07-11T15:57:46
| 2015-07-11T15:57:46
| 38,900,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
#!flask-microblog/Scripts/python
from app import app
app.run(debug=True)
|
[
"vieira.allanc@gmail.com"
] |
vieira.allanc@gmail.com
|
5818777c607c788982754ba5803d7c4594917e2c
|
4e06ed7a0438a7ed8289c4b77fce840750e8f631
|
/ProgrammingProblems/src/project_euler/P002_even_fibonacci_numbers/solution_02_best.py
|
f7d29892de8249997bff4a27b61976144d8b0c68
|
[
"MIT"
] |
permissive
|
PavanKumar-K-A/Notes2Myself
|
bdb3793cc5de32f994998116e86eeffbe0447059
|
8c4fdde1a4abd5fb444f5740af460792d345d964
|
refs/heads/master
| 2020-12-02T16:34:42.433654
| 2016-07-29T11:27:39
| 2016-07-29T11:27:39
| 64,475,802
| 1
| 1
| null | 2016-07-29T11:31:07
| 2016-07-29T11:31:07
| null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
"""
Technique
- A slight improvement over the previous technique and a slight but unnoticeable improvement in performance as well.
- Since the series starts with 2 odd number, the third one will be even because it is the sum of two odd numbers. The
next two will again be odd since one odd number gets added to an even number. In short, every third number in series
will be even. Example
1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
a, b, c, a, b, c, a, b, c, a, b, ...
- This simple observation can help us to solve this puzzle without the need to divide by 2 and find the reminder each
time.
Note
- Simple and best solutions
Instrumentation
- System Details: 8x Intel Core i7-3630QM CPU @ 2.40GHz, 16GB RAM, Ubuntu 14.04
- Input Details: UPPER_BOUND = 1 Billion
- Time for 100 runs: Minimum - 0.0 sec, Average - 0.0 sec, Maximum 0.0 sec
"""
def answer(upper_bound):
a, b = 1, 2
result = 0
while b < upper_bound:
result += b
a, b = b, b + a # 2, 3
a, b = b, b + a # 3, 5
a, b = b, b + a # 5, 8 - This is the third number that needs to be added
return result
|
[
"mrohith@perfios.com"
] |
mrohith@perfios.com
|
a890d4144bf7a932912ac6c94a213693f7940cdf
|
e9ceabd8414cdc90b374ee0b39c522b9df80c074
|
/tools/occam/occam/targets/interface.py
|
d8c64495b2edc048df9224861f7e4549932c0f8b
|
[] |
no_license
|
Wajihulhassan/SelfContainedPrevirt
|
275605079442ce97111350e999b1a6cdbbadb7c7
|
ea2f9127921e3bb3e72a72436f49ec3698137073
|
refs/heads/master
| 2020-05-17T23:52:39.022564
| 2013-06-09T23:26:35
| 2013-06-09T23:26:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,659
|
py
|
# ------------------------------------------------------------------------------
# OCCAM
#
# Copyright © 2011-2012, SRI International
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of SRI International nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from occam import passes
from occam import interface, formats
from occam import target
import sys
import getopt
import tempfile
def deep(libs, iface):
tf = tempfile.NamedTemporaryFile(suffix='.iface', delete=False)
tf.close()
if not (iface is None):
interface.writeInterface(iface, tf.name)
else:
iface = interface.emptyInterface()
progress = True
while progress:
progress = False
for l in libs:
passes.interface(l, tf.name, [tf.name], quiet=True)
x = interface.parseInterface(tf.name)
progress = interface.joinInterfaces(iface, x) or progress
interface.writeInterface(iface, tf.name)
tf.unlink(tf.name)
return iface
def shallow(libs, iface):
tf = tempfile.NamedTemporaryFile(suffix='.iface', delete=False)
tf.close()
if not (iface is None):
interface.writeInterface(iface, tf.name)
else:
iface = interface.emptyInterface()
for l in libs:
passes.interface(l, tf.name, [tf.name], quiet=True)
x = interface.parseInterface(tf.name)
interface.joinInterfaces(iface, x)
tf.unlink(tf.name)
return iface
def parse(fn):
if fn == '@main':
return interface.mainInterface()
else:
print fn
return interface.parseInterface(fn)
class InterfaceTool (target.Target):
def opts(self, args):
return getopt.getopt(args, 'o:', ['deep', 'join'])
def usage(self):
return '\n'.join(
["%s [-o <output.iface>] <interface.iface> <input.bc>+" % self.name,
"%s [-o <output.iface>] --deep <interface.iface> <input.bc>+" % self.name,
"%s [-o <output.iface>] --join <interfaces.iface>+" % self.name])
def desc(self):
return '\n'.join(
[" This tool computes the minimal interfaces accross all libraries.",
" !main! can be used as any interface file name and it will insert",
" the interface that has a single call to main(?,?)",
" which is the default entry point.",
" NOTE: This is only safe if there are no calls into these",
" libraries from modules that are not listed.",
" The tool supports the following usages:",
"%s <output.iface> <input.bc> [<interfaces.iface>+]" % self.name,
" compute the functions required for input.bc given the",
" calls in the given interface files are the entry points",
"%s --deep <output.iface> <input.bc>+ --with <interfaces.iface>+" % self.name,
" recursively compute the minimal interfaces needed for the input",
" bc files and write the cumulative interface to output.iface.",
" The --with parameters specify input interfaces",
"%s --join <output.iface> <interfaces.iface>+" % self.name,
" Join the given interfaces into a single interface,",
" write the combined interface to stdout"])
def run(self, cfg, flags, args):
output = target.flag(flags, '-o', '-')
if ('--join','') in flags:
if len(args) < 1:
raise target.ArgError()
ifs = [parse(x) for x in args]
result = ifs[0]
for x in ifs[1:]:
interface.joinInterfaces(result, x)
else:
# This is computing the interface
if len(args) < 1:
raise target.ArgError()
if args[0] == '@*':
iface = None
else:
iface = parse(args[0])
libs = args[1:]
if '--deep' in flags:
result = deep(libs, iface)
else:
result = shallow(libs, iface)
interface.writeInterface(result, output)
return 0
target.register('interface', InterfaceTool('interface'))
|
[
"scott@thinkmoore.net"
] |
scott@thinkmoore.net
|
d2fee084c8ea4287d28cdf169c8e321a67e58e32
|
44cbc40e66c5a81507c74f0afd8f63b8d1de42c5
|
/sequence_solver/sequence_solver.py
|
c59087fcf4c862938c20e1e212eff6517ebe6084
|
[
"MIT"
] |
permissive
|
jonathan-hepp/Sequence-Solver
|
3453666d68b5a6ea25f49783ae113b957c00664b
|
0f7e7ba287b9a90c181755f757bffa9c54d207f4
|
refs/heads/master
| 2021-01-19T13:46:39.600306
| 2017-02-27T18:26:05
| 2017-02-27T18:26:05
| 82,417,094
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
# -*- coding: utf-8 -*-
import strategies
class SequenceSolver:
def __init__(self, sequence):
self.sequence = sequence
def solve(self):
for strategy in StrategiesFactory():
try:
return strategy.solve(self.sequence)
except:
pass
class StrategiesFactory:
def __init__(self):
self.index = 0
self.strategies = StrategiesFactory.create_all()
def create_all():
strategy_list = []
for strategy in strategies.BaseStrategy.__subclasses__():
strategy_list.append(strategy())
return strategy_list
def __iter__(self):
return self
def __next__(self):
try:
strategy = self.strategies[self.index]
self.index += 1
return strategy
except:
raise StopIteration()
|
[
"jonathan.hepp@gmail.com"
] |
jonathan.hepp@gmail.com
|
c10252bfec21903bc8ae0d91f331ff0f08794f14
|
95ee2471fd393c9cb9807a867dbf6bc000c83484
|
/Python/extraset.py
|
f4b2b970cfc213c7388b6e33f581b6bff00581b5
|
[] |
no_license
|
Hugens25/School-Projects
|
6c7be0e3f96c651162595bb467de2334d2f91152
|
b8d40c4cfcf811e7a62f6d5e1c2bf5bd8d5b1480
|
refs/heads/master
| 2020-07-02T15:01:39.898406
| 2019-08-10T02:34:21
| 2019-08-10T02:34:21
| 201,565,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
#import itertools
def compare_vals(a,b):
possible_values = [0,1,2]
possible_values.remove(a)
possible_values.remove(b)
#print('Possible Values [0]: {}'.format(possible_values[0]))
return possible_values[0]
num_cases = int(input())
for i in range(num_cases):
info = list(map(int, input().split()))
attributes = info[0]
total_cards = info[1]
all_cards = []
count = 0
for j in range(total_cards):
all_cards.append(list(map(int, input().split())))
#combos = itertools.combinations(all_cards, 2)
needed_card = []
for i in range(len(all_cards)):
for j in range(i+1,len(all_cards)):
for k in range(attributes):
possible_values = [0,1,2]
if all_cards[i][k] == all_cards[j][k]:
needed_card.append(all_cards[i][k])
if all_cards[i][k] != all_cards[j][k]:
needed_card.append(compare_vals(all_cards[i][k],all_cards[j][k]))
#print(needed_card)
if needed_card in all_cards:
count += 1
#print(needed_card)
needed_card.clear()
else:
needed_card.clear()
print(int(count/3))
|
[
"noreply@github.com"
] |
noreply@github.com
|
8be063a8d6e92ff4b638faa109def6bca20d490b
|
1506063b93cbb1f9978ef78f822c797df4058a10
|
/test_circle.py
|
80508deed67561efcb6286df8d66abe910b5e627
|
[] |
no_license
|
renanstn/unit-test-python
|
37c39c0fa4274110a0acfe77ae0689a7b498fd70
|
1719123119cf039c5075c9120b5af43cb9f59fd7
|
refs/heads/master
| 2022-11-30T08:34:18.265902
| 2020-08-05T21:17:03
| 2020-08-05T21:17:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
import unittest
from circle import circle_area
from math import pi
'''
Para rodas os testes, executar em linha de comando:
python -m unittest test_circles
ou somente
python -m unittest
Para obter mais informações sobre os assert methods:
Entre no python,
import unittest,
help(unittest.assertSetEqual),
'''
class TestCircleArea(unittest.TestCase):
def test_area(self):
# Test areas when radius >= 0
self.assertAlmostEqual(circle_area(1), pi)
self.assertAlmostEqual(circle_area(0), 0)
self.assertAlmostEqual(circle_area(2.1), pi*2.1**2)
def test_values(self):
# Make sure value errors are raised when necessary
self.assertRaises(ValueError, circle_area, -2)
def test_types(self):
# Make sure type errors are raised when necessary
self.assertRaises(TypeError, circle_area, 3+5j)
self.assertRaises(TypeError, circle_area, True)
self.assertRaises(TypeError, circle_area, "radius")
|
[
"renan_ota@hotmail.com"
] |
renan_ota@hotmail.com
|
b8a0233512848689eab5dea8d359062c641e2a1d
|
6a2bda031f53b057e7aac3aeebd070151f5923f1
|
/zmqpy/zmqpy.py
|
d012bf64ed8ad12196b425161f1ed1cec45fec26
|
[
"BSD-2-Clause"
] |
permissive
|
pfw/zmqpy
|
ab34b9f9f7e662e5d056a5a35078c27f4c9b5d9b
|
185758349176709da43327e1f9b7c7c04d4ca850
|
refs/heads/master
| 2020-12-24T20:14:41.117019
| 2012-12-10T18:43:17
| 2012-12-10T18:43:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,024
|
py
|
# coding: utf-8
from ._cffi import C, ffi, zmq_version, new_uint64_pointer, \
new_int64_pointer, \
new_int_pointer, \
new_binary_data, \
value_uint64_pointer, \
value_int64_pointer, \
value_int_pointer, \
value_binary_data
from .constants import *
from .error import *
from .utils import jsonapi
class Context(object):
_state = {}
def __init__(self, iothreads=1):
if not iothreads > 0:
raise ZMQError(EINVAL)
self.__dict__ = self._state
self.zmq_ctx = C.zmq_init(iothreads)
self.iothreads = iothreads
self._closed = False
self.n_sockets = 0
self.max_sockets = 32
self._sockets = {}
self.sockopts = {LINGER: 1}
self.linger = 1
def term(self):
if self.closed:
return
for k, s in self._sockets.items():
if not s.closed:
s.close()
del self._sockets[k]
C.zmq_term(self.zmq_ctx)
self.zmq_ctx = None
self._closed = True
self.n_sockets = 0
@property
def closed(self):
return self._closed
def _add_socket(self, socket):
self._sockets[self.n_sockets] = socket
self.n_sockets += 1
return self.n_sockets
def _rm_socket(self, n):
del self._sockets[n]
def socket(self, sock_type):
if self._closed:
raise ZMQError(ENOTSUP)
socket = Socket(self, sock_type)
for option, option_value in self.sockopts.items():
socket.setsockopt(option, option_value)
return socket
def set_linger(self, value):
self.sockopts[LINGER] = value
self.linger = value
def new_pointer_from_opt(option, length=0):
if option in uint64_opts:
return new_uint64_pointer()
elif option in int64_opts:
return new_int64_pointer()
elif option in int_opts:
return new_int_pointer()
elif option in binary_opts:
return new_binary_data(length)
else:
raise ValueError('Invalid option')
def value_from_opt_pointer(option, opt_pointer, length=0):
if option in uint64_opts:
return int(opt_pointer[0])
elif option in int64_opts:
return int(opt_pointer[0])
elif option in int_opts:
return int(opt_pointer[0])
elif option in binary_opts:
return ffi.string(opt_pointer)
else:
raise ValueError('Invalid option')
def initialize_opt_pointer(option, value, length=0):
if option in uint64_opts:
return value_uint64_pointer(value)
elif option in int64_opts:
return value_int64_pointer(value)
elif option in int_opts:
return value_int_pointer(value)
elif option in binary_opts:
return value_binary_data(value, length)
else:
raise ValueError('Invalid option')
class Socket(object):
def __init__(self, context, sock_type):
self.context = context
self.sock_type = sock_type
self.zmq_socket = C.zmq_socket(context.zmq_ctx, sock_type)
if not self.zmq_socket:
raise ZMQError()
self._closed = False
self._attrs = {}
self.n = self.context._add_socket(self)
self.last_errno = None
@property
def closed(self):
return self._closed
def close(self):
if not self._closed:
C.zmq_close(self.zmq_socket)
self._closed = True
def bind(self, address):
ret = C.zmq_bind(self.zmq_socket, address)
return ret
def connect(self, address):
ret = C.zmq_connect(self.zmq_socket, address)
return ret
def setsockopt(self, option, value):
length = None
if isinstance(value, str):
length = len(value)
low_level_data = initialize_opt_pointer(option, value, length)
low_level_value_pointer = low_level_data[0]
low_level_sizet = low_level_data[1]
ret = C.zmq_setsockopt(self.zmq_socket,
option,
ffi.cast('void*', low_level_value_pointer),
low_level_sizet)
return ret
def getsockopt(self, option, length=0):
low_level_data = new_pointer_from_opt(option, length=length)
low_level_value_pointer = low_level_data[0]
low_level_sizet_pointer = low_level_data[1]
ret = C.zmq_getsockopt(self.zmq_socket,
option,
low_level_value_pointer,
low_level_sizet_pointer)
if ret < 0:
self.last_errno = C.zmq_errno()
return -1
return value_from_opt_pointer(option, low_level_value_pointer)
def send(self, message, flags=0, copy=False):
zmq_msg = ffi.new('zmq_msg_t*')
c_message = ffi.new('char[]', message)
C.zmq_msg_init_size(zmq_msg, len(message))
C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(message))
if zmq_version == 2:
ret = C.zmq_send(self.zmq_socket, zmq_msg, flags)
else:
ret = C.zmq_sendmsg(self. zmq_socket, zmq_msg, flags)
C.zmq_msg_close(zmq_msg)
if ret < 0:
self.last_errno = C.zmq_errno()
return ret
def recv(self, flags=0):
zmq_msg = ffi.new('zmq_msg_t*')
C.zmq_msg_init(zmq_msg)
if zmq_version == 2:
ret = C.zmq_recv(self.zmq_socket, zmq_msg, flags)
else:
ret = C.zmq_recvmsg(self.zmq_socket, zmq_msg, flags)
if ret < 0:
C.zmq_msg_close(zmq_msg)
raise zmqpy.ZMQError(_errno=C.zmq_errno())
value = ffi.buffer(C.zmq_msg_data(zmq_msg), int(C.zmq_msg_size(zmq_msg)))[:]
C.zmq_msg_close(zmq_msg)
return value
def make_zmq_pollitem(socket, flags):
zmq_socket = socket.zmq_socket
zmq_pollitem = ffi.new('zmq_pollitem_t*')
zmq_pollitem.socket = zmq_socket
zmq_pollitem.fd = 0
zmq_pollitem.events = flags
zmq_pollitem.revents = 0
return zmq_pollitem[0]
def _poll(zmq_pollitem_list, poller, timeout=-1):
if zmq_version == 2:
timeout = timeout * 1000
items = ffi.new('zmq_pollitem_t[]', zmq_pollitem_list)
list_length = ffi.cast('int', len(zmq_pollitem_list))
c_timeout = ffi.cast('long', timeout)
C.zmq_poll(items, list_length, c_timeout)
result = []
for index in range(len(items)):
if items[index].revents > 0:
result.append((poller._sockets[items[index].socket],
items[index].revents))
return result
# Code From PyZMQ
class Poller(object):
def __init__(self):
self.sockets = {}
self._sockets = {}
self.c_sockets = {}
def register(self, socket, flags=POLLIN|POLLOUT):
if flags:
self.sockets[socket] = flags
self._sockets[socket.zmq_socket] = socket
self.c_sockets[socket] = make_zmq_pollitem(socket, flags)
elif socket in self.sockets:
# uregister sockets registered with no events
self.unregister(socket)
else:
# ignore new sockets with no events
pass
def modify(self, socket, flags=POLLIN|POLLOUT):
self.register(socket, flags)
def unregister(self, socket):
del self.sockets[socket]
del self._sockets[socket.zmq_socket]
del self.c_sockets[socket]
def poll(self, timeout=None):
if timeout is None:
timeout = -1
timeout = int(timeout)
if timeout < 0:
timeout = -1
items = _poll(self.c_sockets.values(),
self,
timeout=timeout)
return items
|
[
"felipecruz@loogica.net"
] |
felipecruz@loogica.net
|
5a5f34191a28725a3827b90ff01bbdf2a66566b0
|
38b2cf51cbd5cdbb062bf1393dae12237dd4f071
|
/二叉搜索树与双向链表.py
|
d9ad708f048d710bb74d2d5c775321a945865022
|
[] |
no_license
|
Wsssssss/target-offer
|
4986ab6ec45a0dd123d4d578c041057df98f81ce
|
c595e5abee5244602adf9a285380bec9816e17e5
|
refs/heads/master
| 2020-06-17T03:33:46.318935
| 2019-09-16T15:48:57
| 2019-09-16T15:48:57
| 195,782,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
# 题目描述
# 输入一棵二叉搜索树,将该二叉搜索树转换成一个排序的双向链表。要求不能创建任何新的结点,只能调整树中结点指针的指向。
class Solution:
def Convert(self, pRootOfTree):
if not pRootOfTree:
return None
if not pRootOfTree.left and not pRootOfTree.right:
return pRootOfTree
left = self.Convert(pRootOfTree.left)
p = left
while left and p.right:
p = p.right
if left:
p.right = pRootOfTree
pRootOfTree.left = p
right = self.Convert(pRootOfTree.right)
if right:
pRootOfTree.right = right
right.left = pRootOfTree
return left if left else pRootOfTree
def Convert2(self, pRootOfTree):
if not pRootOfTree:
return None
stack =[]
resstack = []
p = pRootOfTree
while p or stack:
if p:
stack.append(p)
p = p.left
else:
node = stack.pop()
resstack.append(node)
p = node.right
head = resstack[0]
while resstack:
top = resstack.pop(0)
if resstack:
top.right = resstack[0]
resstack[0].left = top
return head
|
[
"569467716@qq.com"
] |
569467716@qq.com
|
79194914e7ccad5fd5e4bcf44079d6386cb961dd
|
1b01d3c77d30cf6fb6aea9be42ff726960191092
|
/build/laser_filters-indigo-devel/catkin_generated/pkg.develspace.context.pc.py
|
704444c1ebf8c08a9dbeb1b577615230ec778fa5
|
[] |
no_license
|
13386030098/laser_navigation
|
849f6e70bca212a6a37554181a617c3962294e4c
|
471aa4e3ce649a144b60aa77e808467cb208b443
|
refs/heads/master
| 2022-11-28T15:45:33.071171
| 2020-08-19T09:18:20
| 2020-08-19T09:18:20
| 267,012,401
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/zzz/laser_navigation/src/laser_filters-indigo-devel/include".split(';') if "/home/zzz/laser_navigation/src/laser_filters-indigo-devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "sensor_msgs;roscpp;tf;filters;message_filters;laser_geometry;pluginlib;angles".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lpointcloud_filters;-llaser_scan_filters".split(';') if "-lpointcloud_filters;-llaser_scan_filters" != "" else []
PROJECT_NAME = "laser_filters"
PROJECT_SPACE_DIR = "/home/zzz/laser_navigation/devel"
PROJECT_VERSION = "1.8.8"
|
[
"2668920514@qq.com"
] |
2668920514@qq.com
|
a1d0620331051435678cf0a9e29082e724a97bc7
|
8a907374778da9b7ea37bed9d1de862a28dcd30e
|
/appserver/community/migrations/0003_community_city.py
|
7af70f9f06772723fdc2b5d9574dba69a1a403f6
|
[
"MIT"
] |
permissive
|
onebitme/SWE574-Horuscope
|
f1375f1a233c8959fd1369a6dcebb958a267b057
|
9725dd356cbfd19f0ce88d4a208c872be765bd88
|
refs/heads/master
| 2022-12-01T06:05:21.130558
| 2020-08-07T11:04:37
| 2020-08-07T11:04:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# Generated by Django 2.1.1 on 2020-06-09 23:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('community', '0002_community_author'),
]
operations = [
migrations.AddField(
model_name='community',
name='city',
field=models.CharField(blank=True, max_length=100),
),
]
|
[
"ersuns@gmail.com"
] |
ersuns@gmail.com
|
e3081826c16e08d1104471dac1a6abd7e415551c
|
b8a195eff34bb7a03012b27356536f9713f18ff6
|
/I0320063_exercise9.5.py
|
957b970e95aadabddc1e142216cb19f8725219a3
|
[] |
no_license
|
AfinFirnas/Muhammad-Firnas-Balisca-Putra_I0320063_Abyan_Tugas9
|
f66711831a01f0b30af1779093051fcb43fc0b25
|
89083ee2c37f73fa9a218e12bb0a92db353bf8cc
|
refs/heads/main
| 2023-04-17T14:48:59.361287
| 2021-04-30T11:51:38
| 2021-04-30T11:51:38
| 363,116,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
A = [
[
[10,20,30],
[40,50,60]
],
[
[11,21,31],
[41,51,61]
]
]
# mengakses elemen 10
print(A[0][0][0])
# mengakses elemen 50
print(A[0][1][1])
|
[
"noreply@github.com"
] |
noreply@github.com
|
24da100dd2dcfb1fbf2dc0f990d2db5cecb40f9e
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_01_01/operations/_blob_services_operations.py
|
0d43959e0413d45f681583c3efd5aacfe3752027
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 13,580
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BlobServicesOperations(object):
"""BlobServicesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.BlobServiceItems"]
"""List blob services of storage account. It returns a collection of one object named default.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BlobServiceItems or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_01_01.models.BlobServiceItems]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobServiceItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('BlobServiceItems', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices'} # type: ignore
def set_service_properties(
self,
resource_group_name, # type: str
account_name, # type: str
parameters, # type: "_models.BlobServiceProperties"
**kwargs # type: Any
):
# type: (...) -> "_models.BlobServiceProperties"
"""Sets the properties of a storage account’s Blob service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The properties of a storage account’s Blob service, including properties for
Storage Analytics and CORS (Cross-Origin Resource Sharing) rules.
:type parameters: ~azure.mgmt.storage.v2021_01_01.models.BlobServiceProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.BlobServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
blob_services_name = "default"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_service_properties.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'BlobServicesName': self._serialize.url("blob_services_name", blob_services_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BlobServiceProperties')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}'} # type: ignore
def get_service_properties(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BlobServiceProperties"
"""Gets the properties of a storage account’s Blob service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.BlobServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
blob_services_name = "default"
accept = "application/json"
# Construct URL
url = self.get_service_properties.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'BlobServicesName': self._serialize.url("blob_services_name", blob_services_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}'} # type: ignore
|
[
"noreply@github.com"
] |
noreply@github.com
|
6efada3b5038ccf2cd2a1a11da2d52c3112ff747
|
013c6e0fc00ce9d0775b7c80466a65e9576ae163
|
/ImageDownload/imgLoader.py
|
948dfe40a7e686b356c1e68d619627a645b14007
|
[] |
no_license
|
DipuTut/Python
|
974f120370568e35df386ac6c064827fd1eeb270
|
ae425203dcfd9c84bef6730e61bd43a4ceca0f3c
|
refs/heads/master
| 2020-04-18T04:29:43.504804
| 2016-08-19T08:25:38
| 2016-08-19T08:25:38
| 66,065,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
# Retrieving Image data (keeping original names unchanged) from URLs in
# a Text file and store them in local hard disk
import urllib.request
import fileinput
for line in fileinput.input():
line = line.replace('\n', '')
URL = line
IMAGE = URL.rsplit('/',1)[1]
urllib.request.urlretrieve(URL, IMAGE)
|
[
"mddipu@gmail.com"
] |
mddipu@gmail.com
|
5f7a9d0592105bfbf7bb5cee5e5f926ff19e1c7b
|
e161cc9ad82699f83461985a5ffb930c63c4d16f
|
/app.py
|
4acd70d0ca49f27e4512737cd4c8a1cd76c61aef
|
[] |
no_license
|
karanbajrang07/GOCORONA
|
acb3acefc5c00da1e56959a0c4e30aa48e209a19
|
e9b5437d3dfbc9d9f8fa1f6431a67ddffdca841a
|
refs/heads/master
| 2023-05-25T09:44:02.543056
| 2020-03-22T10:15:42
| 2020-03-22T10:15:42
| 249,150,188
| 0
| 0
| null | 2023-05-22T22:42:35
| 2020-03-22T09:24:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,604
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
import plotly.graph_objects as go
from scrap import *
import os
# import urllib
import re
import pandas as pd
import urllib.request
import datetime
global str
global df
df=update_data()
df=pd.read_csv("data_covid1.csv")
app = dash.Dash()
labels = df["Name"]
values = df["Total Confirmed cases(Indian)"]
options=['Total Confirmed cases(Indian)','Total Confirmed cases(Foreign)', 'Cured', 'Death']
drop_down = []
for i in options:
drop_down.append({'label':str(i),'value':i})
app.layout = html.Div([
html.Div(id='last-update', style={'display':'none'}),
dcc.Dropdown(id='count_case',options=drop_down,value='Total Confirmed cases(Indian)'),
dcc.Graph(
id='graph'
),
html.Div([html.Button('Refresh Data', id='refresh-data')]),
])
@app.callback(Output('graph', 'figure'),
[Input('count_case', 'value')])
def figure_update(selected_value):
df[df[selected_value]>0][selected_value]
return go.Figure(go.Pie(labels=df[df[selected_value]>0]["Name"], values=df[df[selected_value]>0][selected_value],textinfo='label',textposition='inside'))
@app.callback(
Output('last-update','children'),
[Input('refresh-data','n_clicks')])
def refresh_data(value):
global df
df=update_data()
df=pd.read_csv("data_covid1.csv")
connection.close()
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if __name__ == '__main__':
app.run_server()
|
[
"vivekkhanna987@gmail.com"
] |
vivekkhanna987@gmail.com
|
1f0ecf8970323ca6e90866fda6313437fd553ddf
|
6e0363f62f8432bdbf84a6b076af8a6e64ffb77d
|
/database.py
|
310c9cd088eea54dae102af68caceefd5595b825
|
[] |
no_license
|
bandirenuka/Company_Info
|
63d1477b06a0cf5d7b7da2a763e008525144e911
|
54a682cde999d8be6022d1649700fd95d470424c
|
refs/heads/master
| 2023-06-26T20:26:59.200126
| 2021-08-01T17:36:23
| 2021-08-01T17:36:23
| 391,698,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
from model import Company
import motor.motor_asyncio
from typing import List
client=motor.motor_asyncio.AsyncIOMotorClient('mongodb://localhost:27017/')
database=client.Companyd
collection=database.Companyt
async def create_company(company):
document=company
result=await collection.insert_one(document)
return document
async def fetch_one_company(name):
document = await collection.find_one({"name":name})
return document
async def fetch_all_companys():
companys=[]
cursor=collection.find({})
async for document in cursor:
companys.append(Company(**document))
return companys
async def update_company(name,employee_size):
await collection.update_one({"name":name},{"$set":{"employee_size":employee_size}})
document=await collection.find_one({"name":name})
return document
async def remove_Compnay(name):
await collection.delete_one({"name":name})
return True
|
[
"71758275+bandirenuka@users.noreply.github.com"
] |
71758275+bandirenuka@users.noreply.github.com
|
a0ce3ae8e6e2426106cc017889777f243291f359
|
7b725bfc551953331c2624156738772b55ad5e03
|
/server/tests/test_transformers.py
|
c86c8984e49fb23a1e8cecbbcd4816627bb83a89
|
[] |
no_license
|
rbarbaresco/weathermap
|
5f0b32498fead2f77f45e1656d79ba95c66d6a9d
|
b298afdafc7ff8a9350284247ff206783a68977d
|
refs/heads/master
| 2021-07-13T04:54:31.662211
| 2017-10-19T12:29:14
| 2017-10-19T12:29:14
| 106,970,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
import unittest
import json
from server.weathermap.transformers import openweathertransform
class TestTransformers(unittest.TestCase):
with open('florianopolis.json', 'r') as file:
floripa_forecast = file.read()
def test_should_bring_five_days(self):
response = json.loads(openweathertransform(self.floripa_forecast))
self.assertEqual(5, len(response))
|
[
"barbaresco.rodrigo@gmail.com"
] |
barbaresco.rodrigo@gmail.com
|
650d4bc537b1b13cc59ad77a6032fd9401d7bda6
|
6a368201fab627448fc2165ecd533ad8c33d262f
|
/thucuong.py
|
da25c9def854e9f6fee1db302a6792d795308b62
|
[] |
no_license
|
thanhtrung5763/DoAnOOP_Bai2_Python
|
c05674475030f238058d604c202b662a876091aa
|
97f05d76e7fb5dfe6698d94aed9937f218a667fa
|
refs/heads/master
| 2023-02-21T09:32:28.939721
| 2021-01-27T05:42:20
| 2021-01-27T05:42:20
| 328,703,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
from thucpham import ThucPham
from quanlithucdon import QuanLiThucDon
class ThucUong(ThucPham):
def __init__(self, Ten=None, Gia=0, TinhTrang=None, ThoiDiemBan=None, Da=None):
super().__init__(Ten, Gia, TinhTrang, ThoiDiemBan)
self.Da = Da
QuanLiThucDon.lThucPham.append(self)
QuanLiThucDon.lThucUong.append(self)
def TaoThucUong(self):
super().TaoMonAn()
self.Da = input("Da Hay Khong Da: ")
def HienThi(self):
super().HienThi()
print(f"{self.Da:<10}")
|
[
"thanhtrung5763@gmail.com"
] |
thanhtrung5763@gmail.com
|
96faf968490377dd49d59ec2795364de9ee2223c
|
5d0c9055d52dae6dc0e76003213165ec843a950d
|
/mercadolibre/mercadolibre/settings.py
|
4b3d249ca8407257447a809a5c5a9c4a5f760487
|
[] |
no_license
|
gmtw/curso_web_scrapping
|
bdfac8ddb37c980b82dd6980813cefd8dae6fdc7
|
4bae3d747b644f208febc502389e76bc82860542
|
refs/heads/master
| 2020-12-21T10:43:33.142999
| 2020-01-27T03:00:14
| 2020-01-27T03:00:14
| 236,407,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,197
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for mercadolibre project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'mercadolibre'
SPIDER_MODULES = ['mercadolibre.spiders']
NEWSPIDER_MODULE = 'mercadolibre.spiders'
ITEM_PIPELINES = ('mercadolibre.pipelines.MercadoPipeline')
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mercadolibre (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'mercadolibre.middlewares.MercadolibreSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'mercadolibre.middlewares.MercadolibreDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'mercadolibre.pipelines.MercadolibrePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"julio_somo@ciencias.unam.mx"
] |
julio_somo@ciencias.unam.mx
|
f52a29a93a837fa8d7b68371fbafd667d115df5c
|
13525a82e1b1545b81d155956a1d310fa20a06fc
|
/Section-18/Python-data-structures/22_sum_floats/sum_floats.py
|
7db779a1fe6835ce94d877cd9d4dd1b8920a1e04
|
[] |
no_license
|
trevormcdonald/Springboard-Exercises
|
55cd9f3a4edf560632fe46c23dcccaddced95878
|
7dffcb0b524ae4fee0989af7b5e5b25808276d68
|
refs/heads/master
| 2023-02-21T05:24:00.269708
| 2021-01-19T00:57:46
| 2021-01-19T00:57:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
def sum_floats(nums):
"""Return sum of floating point numbers in nums.
>>> sum_floats([1.5, 2.4, 'awesome', [], 1])
3.9
>>> sum_floats([1, 2, 3])
0
"""
sum = 0
for num in nums:
if isinstance(num, float):
sum += num
return sum
|
[
"mrm257@cornell.edu"
] |
mrm257@cornell.edu
|
11b3528b2e69e8e20b3ffec5e3cabb26665f60f8
|
7653ddbbc2256fae9cc62251f0241d0e9696df7d
|
/pyshtools/spectralanalysis/cross_spectrum.py
|
5b48e4b63cf25c38d0ad3ff3a882735c27d890b2
|
[
"BSD-3-Clause"
] |
permissive
|
SHTOOLS/SHTOOLS
|
c3415b38da290805ecdfd59699587e5ac5233cc8
|
93e77dcc6b36b2363f07d79d07ec47d86e6cba65
|
refs/heads/master
| 2023-08-31T01:35:49.211882
| 2023-08-28T10:50:08
| 2023-08-28T10:50:08
| 24,725,612
| 315
| 117
|
BSD-3-Clause
| 2023-08-28T10:50:10
| 2014-10-02T15:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 6,773
|
py
|
import numpy as _np
from scipy.special import factorial as _factorial
def cross_spectrum(clm1, clm2, normalization='4pi', degrees=None, lmax=None,
convention='power', unit='per_l', base=10.):
"""
Return the cross-spectrum of the spherical harmonic coefficients as a
function of spherical harmonic degree.
Usage
-----
array = cross_spectrum(clm1, clm2, [normalization, degrees, lmax,
convention, unit, base])
Returns
-------
array : ndarray, shape (len(degrees))
1-D ndarray of the spectrum.
Parameters
----------
clm1 : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the first set of spherical harmonic coefficients.
clm2 : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the second set of spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized coefficients,
respectively.
lmax : int, optional, default = len(clm[0,:,0]) - 1.
Maximum spherical harmonic degree to output.
degrees : ndarray, optional, default = numpy.arange(lmax+1)
Array containing the spherical harmonic degrees where the spectrum
is computed.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum, 'energy'
for energy spectrum, and 'l2norm' for the l2-norm spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Notes
-----
This function returns either the cross-power spectrum, cross-energy
spectrum, or l2-cross-norm spectrum. Total cross-power is defined as the
integral of the clm1 times the conjugate of clm2 over all space, divided
by the area the functions span. If the mean of the functions is zero,
this is equivalent to the covariance of the two functions. The total
cross-energy is the integral of clm1 times the conjugate of clm2 over all
space and is 4pi times the total power. The l2-cross-norm is the
sum of clm1 times the conjugate of clm2 over all angular orders as a
function of spherical harmonic degree.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, and is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to
the total spectrum from all angular orders over an infinitessimal
logarithmic degree band. The contrubution in the band dlog_a(l) is
spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where
spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a).
"""
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError("The normalization must be '4pi', 'ortho', " +
"'schmidt', or 'unnorm'. Input value was {:s}."
.format(repr(normalization)))
if convention.lower() not in ('power', 'energy', 'l2norm'):
raise ValueError("convention must be 'power', 'energy', or " +
"'l2norm'. Input value was {:s}"
.format(repr(convention)))
if unit.lower() not in ('per_l', 'per_lm', 'per_dlogl'):
raise ValueError("unit must be 'per_l', 'per_lm', or 'per_dlogl'." +
"Input value was {:s}".format(repr(unit)))
if _np.iscomplexobj(clm1) is not _np.iscomplexobj(clm2):
raise ValueError('clm1 and clm2 must both be either real or ' +
'complex. \nclm1 is complex : {:s}\n'
.format(repr(_np.iscomplexobj(clm1))) +
'clm2 is complex : {:s}'
.format(repr(_np.iscomplexobj(clm2))))
if lmax is None:
lmax = len(clm1[0, :, 0]) - 1
if degrees is None:
degrees = _np.arange(lmax+1)
if _np.iscomplexobj(clm1):
array = _np.empty(len(degrees), dtype=_np.complex128)
else:
array = _np.empty(len(degrees))
if normalization.lower() == 'unnorm':
if convention.lower() == 'l2norm':
raise ValueError("convention can not be set to 'l2norm' when " +
"using unnormalized harmonics.")
for i, l in enumerate(degrees):
ms = _np.arange(l+1)
conv = _factorial(l+ms) / (2. * l + 1.) / _factorial(l-ms)
if _np.iscomplexobj(clm1):
array[i] = (conv[0:l + 1] * clm1[0, l, 0:l + 1] *
clm2[0, l, 0:l + 1].conjugate()).real.sum() + \
(conv[1:l + 1] * clm1[1, l, 1:l + 1] *
clm2[1, l, 1:l + 1].conjugate()).real.sum()
else:
conv[1:l + 1] = conv[1:l + 1] / 2.
array[i] = (conv[0:l + 1] * clm1[0, l, 0:l+1]**2).sum() + \
(conv[1:l + 1] * clm2[1, l, 1:l+1]**2).sum()
else:
for i, l in enumerate(degrees):
if _np.iscomplexobj(clm1):
array[i] = (clm1[0, l, 0:l + 1] *
clm2[0, l, 0:l + 1].conjugate()).sum() + \
(clm1[1, l, 1:l + 1] *
clm2[1, l, 1:l + 1].conjugate()).sum()
else:
array[i] = (clm1[0, l, 0:l + 1] * clm2[0, l, 0:l + 1]).sum() \
+ (clm1[1, l, 1:l + 1] * clm2[1, l, 1:l + 1]).sum()
if convention.lower() == 'l2norm':
return array
else:
if normalization.lower() == '4pi':
pass
elif normalization.lower() == 'schmidt':
array /= (2. * degrees + 1.)
elif normalization.lower() == 'ortho':
array /= (4. * _np.pi)
if convention.lower() == 'energy':
array *= 4. * _np.pi
if unit.lower() == 'per_l':
pass
elif unit.lower() == 'per_lm':
array /= (2. * degrees + 1.)
elif unit.lower() == 'per_dlogl':
array *= degrees * _np.log(base)
return array
|
[
"mark.a.wieczorek@gmail.com"
] |
mark.a.wieczorek@gmail.com
|
047f00770d20b8038d7afcd98a956810a9380d97
|
fa305ce263851654d122d2b2074ed08b0910c952
|
/bot.py
|
98fd1e289164908842495702378813acfad8959f
|
[] |
no_license
|
kimelecta/bustabot
|
8a413d328de07fb8a3aa696f7ddc6eeb0c6d1ce0
|
c613b5b33d43270a699ae76442bc28c79882ffa1
|
refs/heads/master
| 2022-11-06T22:03:05.376727
| 2020-06-17T13:55:11
| 2020-06-17T13:55:11
| 272,973,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,466
|
py
|
from http.server import HTTPServer, BaseHTTPRequestHandler
import os
import time
from selenium import webdriver
import selenium.webdriver.support.ui as ui
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import sys
HOST = ''
PORT = os.environ['PORT']
URL = os.environ['HEROKU_APP_URL']
SIMULATION = False
class MyHTTPHandler(BaseHTTPRequestHandler):
"""The request handler class for our server.
It is instantiated once per connection to the server.
"""
def do_GET(self):
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/plain')
self.end_headers()
# Modify log container height to get more log
self.server.webdriver.execute_script("document.getElementsByClassName('logListContainer')[0].style.height = '400px'") # modify the height to have more log
log = self.server.webdriver.find_element_by_xpath("//div[@class='ReactVirtualized__Grid__innerScrollContainer']").text
# Open profile screen
self.server.webdriver.find_element_by_xpath("//a[@href='/account/overview']").click()
# Wait for the information table to show
wait = ui.WebDriverWait(self.server.webdriver, 1)
try:
wait.until(EC.presence_of_element_located((By.XPATH, "//table[@class='table-light table table-condensed table-hover']")))
except:
self.wfile.write(b'error')
return
# Get usefull player informations
game_profit = self.server.webdriver.find_element_by_xpath("//table[@class='table-light table table-condensed table-hover']/tbody/tr[7]/td[2]").text
username = self.server.webdriver.find_element_by_xpath("//div[@class='account-header']/h3").text
balance = self.server.webdriver.find_element_by_xpath("//table[@class='table-light table table-condensed table-hover']/tbody/tr[8]/td[2]").text
# Close profile screen
self.server.webdriver.find_element_by_xpath("//button[@class='close']").click()
msg = 'Username : ' + username + '\nProfit : ' + game_profit + '\nBalance : ' + balance + '\n\n' + log
self.wfile.write(bytes(msg, 'utf-8'))
class Server:
"""This class deserve the Heroku $PORT environnement variable
It must be instantiated only once
"""
_httpd = None
def __init__(self, webdriver):
self._httpd = HTTPServer((HOST, int(PORT)), MyHTTPHandler)
self._httpd.webdriver = webdriver
def run(self):
self._httpd.serve_forever()
class Bustabit:
"""The Bustabit class is the core of this project
It instantiate and run the selenium's webdriver used to communicate with the bustabit site
"""
_error = False
_webdriver = None
_script = None
def __init__(self, profile_folder, script_name):
fd = open(script_name, "r")
self._script = fd.read()
fd.close()
# Launch Firefox GUI in headless mode
opt = webdriver.FirefoxOptions()
opt.headless = True
self._webdriver = webdriver.Firefox(firefox_profile=profile_folder, firefox_options=opt)
return
def _connect(self):
"""Init webdriver"""
self._webdriver.get('https://www.bustabit.com/play')
# Wait until we find the presence of the 'auto' button
try:
wait = ui.WebDriverWait(self._webdriver, 5)
wait.until(EC.presence_of_element_located((By.XPATH, "//li[@class='' and @role='presentation']/a[@role='button' and @href='#']")))
except:
print('Are you sure you are logged with your profile ?')
self._error = True
return
def _auto_bet(self):
"""Starting auto bet with the user script (butabit_script.js)"""
# Get and click on 'Auto' button
self._webdriver.find_element_by_xpath("//li[@class='' and @role='presentation']/a[@role='button' and @href='#']").click()
# Get and click on the eye button
self._webdriver.find_element_by_xpath("//button[@class='btn btn-xs btn-info']/i[@class='fa fa-eye']").click()
time.sleep(1) # Wait for the popup to dislay
# Fill the text area with the user script
text_area = self._webdriver.find_element_by_xpath("//textarea[@class='form-control']")
text_area.click()
text_area.send_keys(Keys.CONTROL, 'a')
text_area.send_keys(Keys.RETURN)
text_area.send_keys(self._script)
# Get and click on the 'Save Script' button
self._webdriver.find_element_by_xpath("//button[@class='btn btn-success' and @type='submit']").click()
time.sleep(1)
# Get and click on the 'Down arrow' button
self._webdriver.find_element_by_xpath("//button[@class='btn btn-xs btn-default']").click()
if (SIMULATION):
# Get and click on 'Simulation' checkbox
self._webdriver.find_element_by_xpath("//div[@class='checkbox simCheckbox']/label/input[@type='checkbox']").click()
# Get and fill the 'simulated balance'
SIMULATED_BALANCE = 100000
simulated_balance_textbox = self._webdriver.find_element_by_name("simulatedBalance")
simulated_balance_textbox.clear()
simulated_balance_textbox.send_keys(str(SIMULATED_BALANCE))
# Get and click on the 'Run script' button
self._webdriver.find_element_by_xpath("//button[@class='btn btn-success' and @type='submit']").click()
return
def _run(self):
"""Infinite loop"""
# Trick to keep this heroku app alive
# 60 * 1000 = 1 minute
self._webdriver.execute_script("""setInterval(function(){
fetch('""" + URL + """')
}, 60 * 1000 * 10)
""")
s = Server(self._webdriver)
s.run()
def start(self):
"""Start the Bustabit bot"""
self._connect()
if (self._error):
self._webdriver.quit()
return
self._auto_bet()
self._run()
return
FIREFOX_DIR = "firefox_profile"
SCRIPT_NAME = "bustabit_script.js"
if __name__ == "__main__":
if not os.path.isdir(FIREFOX_DIR):
print(FIREFOX_DIR + ' must be a directory')
exit(1)
if not os.path.isfile(SCRIPT_NAME):
print(SCRIPT_NAME + ' must be a file')
exit(1)
bot = Bustabit(FIREFOX_DIR, SCRIPT_NAME)
bot.start()
exit(0)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e1906118e59973542cece93528583102ab8c6cf2
|
96d8cf1ee8835fc1909e35de462e810019f49997
|
/02_sl_full_adv_v13/sl_full_linf_pgd7_v11_cifar10/core/attacks_delta.py
|
133217511fef98934a83f68731e51e607c6c2260
|
[] |
no_license
|
tnguyen9210/semi-supervised-learning-robustness-pytorch
|
d57913b7d3a71249f791563a1f232f25a4a6960b
|
1d51c77dfa9c6d80cc188227ddf9506b18545a4b
|
refs/heads/master
| 2022-10-19T04:57:25.733110
| 2020-06-05T08:51:37
| 2020-06-05T08:51:37
| 258,005,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,458
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.utils import *
class GradientSignAttack(object):
def __init__(self, loss_fn, eps, clip_min=0., clip_max=1.):
self.loss_fn = loss_fn
self.eps = eps
self.clip_min = clip_min
self.clip_max = clip_max
def perturb(self, net, x, y=None):
"""
"""
if y is None:
with torch.no_grad():
logit = net(x)
y = torch.argmax(logit, dim=1)
x_adv = x.detach().clone()
y = y.detach().clone()
# feed x_adv and compute grad
x_adv.requires_grad = True
logit_yadv = net(x_adv)
loss = self.loss_fn(logit_yadv, y)
x_grad = torch.autograd.grad(loss, x_adv)[0]
x_adv = x_adv + self.eps*x_grad.sign()
x_adv = torch.clamp(x_adv, min=self.clip_min, max=self.clip_max)
x_adv = x_adv.detach().clone()
return x_adv, 0
class LinfPGDAttack(object):
def __init__(
self, loss_fn, num_iters, eps, eps_iter,
rand_init=True, clip_min=0., clip_max=1.):
self.loss_fn = loss_fn
self.num_iters = num_iters
self.eps = eps
self.eps_iter = eps_iter
self.rand_init = rand_init
self.clip_min = clip_min
self.clip_max = clip_max
def perturb(self, net, x, y=None):
"""
"""
if y is None:
with torch.no_grad():
logit = net(x)
y = torch.argmax(logit, dim=1)
x_nat = x.detach().clone()
y = y.detach().clone()
# init perturb
if self.rand_init:
delta = torch.zeros_like(x).uniform_(-1,1)
delta = self.eps*delta
x_adv = torch.clamp(x_nat + delta, min=self.clip_min, max=self.clip_max)
delta = (x_adv - x_nat).detach().clone()
# pgd iterations
losses = []
for it in range(self.num_iters):
delta.requires_grad = True
# feed x_adv and compute grad
x_adv = x_nat + delta
logit_yadv = net(x_adv)
loss = self.loss_fn(logit_yadv, y)
grad = torch.autograd.grad(loss, delta)[0]
# compute delta
delta = delta + self.eps_iter*grad.sign()
delta = torch.clamp(delta, min=-self.eps, max=self.eps)
x_adv = torch.clamp(x_nat+delta, min=self.clip_min, max=self.clip_max)
delta = (x_adv-x_nat).detach().clone()
losses.append(round(loss.item(), 4))
x_adv = x_nat + delta
return x_adv, losses
class GradientAttack(object):
def __init__(self, loss_fn, eps, clip_min=0., clip_max=1.):
self.loss_fn = loss_fn
self.eps = eps
self.clip_min = clip_min
self.clip_max = clip_max
def perturb(self, net, x, y=None):
"""
"""
if y is None:
with torch.no_grad():
logit = net(x)
y = torch.argmax(logit, dim=1)
x_adv = x.detach().clone()
y = y.detach().clone()
# feed x_adv and compute grad
x_adv.requires_grad = True
logit_yadv = net(x_adv)
loss = self.loss_fn(logit_yadv, y)
x_grad = torch.autograd.grad(loss, x_adv)[0]
x_grad = normalize_by_pnorm(x_grad, 2)
x_adv = x_adv + self.eps*x_grad
x_adv = torch.clamp(x_adv, min=self.clip_min, max=self.clip_max)
x_adv = x_adv.detach().clone()
return x_adv, 0
class L2PGDAttack(object):
def __init__(
self, loss_fn, num_iters, eps, eps_iter,
rand_init=True, clip_min=0., clip_max=1.):
self.loss_fn = loss_fn
self.num_iters = num_iters
self.eps = eps
self.eps_iter = eps_iter
self.rand_init = rand_init
self.clip_min = clip_min
self.clip_max = clip_max
def perturb(self, net, x, y=None):
"""
"""
if y is None:
with torch.no_grad():
logit = net(x)
y = torch.argmax(logit, dim=1)
x_nat = x.detach().clone()
y = y.detach().clone()
# init perturb
if self.rand_init:
x_adv = torch.zeros_like(x).uniform_(self.clip_min,self.clip_max)
delta = x_adv - x_nat
delta = clamp_by_pnorm(delta, 2, self.eps)
x_adv = torch.clamp(x_nat + delta, min=self.clip_min, max=self.clip_max)
delta = (x_adv - x_nat).detach().clone()
# pgd iterations
losses = []
for it in range(self.num_iters):
delta.requires_grad = True
# feed x_adv and compute grad
x_adv = x_nat + delta
logit_yadv = net(x_adv)
loss = self.loss_fn(logit_yadv, y)
grad = torch.autograd.grad(loss, delta)[0]
# compute delta
grad = normalize_by_pnorm(grad, 2)
delta = delta + self.eps_iter*grad
delta = clamp_by_pnorm(delta, 2, self.eps)
x_adv = torch.clamp(x_nat+delta, min=self.clip_min, max=self.clip_max)
delta = (x_adv-x_nat).detach().clone()
losses.append(round(loss.item(), 4))
x_adv = x_nat + delta
return x_adv, losses
|
[
"tnguyen9210@gmail.com"
] |
tnguyen9210@gmail.com
|
0e10c4dc821d92d19a19781c29184f2c21a3a2f8
|
a2e0e03e31f892454e537df32e3e1e1d94764fa0
|
/virtual/bin/gunicorn_paster
|
a5acb6c5f33e8c0f85c1fc0f5f42198bd48c6b30
|
[
"MIT"
] |
permissive
|
MichelAtieno/Instagram-Clone
|
557272585a3fff6f7a7c552b08cc5ef5e2c129da
|
7250579e4f91084ad9bf8bd688df3f556dfef64a
|
refs/heads/master
| 2020-03-30T16:23:19.351522
| 2018-10-09T09:42:05
| 2018-10-09T09:42:05
| 151,406,356
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
#!/home/michel/Desktop/Python-Django/Instagram/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"mishqamish@gmail.com"
] |
mishqamish@gmail.com
|
|
6893b1b04629476fddf2845af7cfe5908b9cb720
|
72e11a80587342b3f278d4df18406cd4ce7531e8
|
/hgdemandimport/demandimportpy3.py
|
e2ea27fa0f1166fc55324efb1bbdaf6c4a5029c6
|
[] |
no_license
|
EnjoyLifeFund/Debian_py36_packages
|
740666f290cef73a4f634558ccf3fd4926addeda
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
refs/heads/master
| 2021-08-24T02:17:24.349195
| 2017-12-06T06:18:35
| 2017-12-06T06:18:35
| 113,167,612
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,498
|
py
|
# demandimportpy3 - global demand-loading of modules for Mercurial
#
# Copyright 2017 Facebook Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Lazy loading for Python 3.6 and above.
This uses the new importlib finder/loader functionality available in Python 3.5
and up. The code reuses most of the mechanics implemented inside importlib.util,
but with a few additions:
* Allow excluding certain modules from lazy imports.
* Expose an interface that's substantially the same as demandimport for
Python 2.
This also has some limitations compared to the Python 2 implementation:
* Much of the logic is per-package, not per-module, so any packages loaded
before demandimport is enabled will not be lazily imported in the future. In
practice, we only expect builtins to be loaded before demandimport is
enabled.
"""
# This line is unnecessary, but it satisfies test-check-py3-compat.t.
from __future__ import absolute_import
import contextlib
import importlib.abc
import importlib.machinery
import importlib.util
import sys
_deactivated = False
class _lazyloaderex(importlib.util.LazyLoader):
"""This is a LazyLoader except it also follows the _deactivated global and
the ignore list.
"""
def exec_module(self, module):
"""Make the module load lazily."""
if _deactivated or module.__name__ in ignore:
self.loader.exec_module(module)
else:
super().exec_module(module)
# This is 3.6+ because with Python 3.5 it isn't possible to lazily load
# extensions. See the discussion in https://python.org/sf/26186 for more.
_extensions_loader = _lazyloaderex.factory(
importlib.machinery.ExtensionFileLoader)
_bytecode_loader = _lazyloaderex.factory(
importlib.machinery.SourcelessFileLoader)
_source_loader = _lazyloaderex.factory(importlib.machinery.SourceFileLoader)
def _makefinder(path):
return importlib.machinery.FileFinder(
path,
# This is the order in which loaders are passed in in core Python.
(_extensions_loader, importlib.machinery.EXTENSION_SUFFIXES),
(_source_loader, importlib.machinery.SOURCE_SUFFIXES),
(_bytecode_loader, importlib.machinery.BYTECODE_SUFFIXES),
)
ignore = []
def init(ignorelist):
global ignore
ignore = ignorelist
def isenabled():
return _makefinder in sys.path_hooks and not _deactivated
def disable():
try:
while True:
sys.path_hooks.remove(_makefinder)
except ValueError:
pass
def enable():
sys.path_hooks.insert(0, _makefinder)
@contextlib.contextmanager
def deactivated():
# This implementation is a bit different from Python 2's. Python 3
# maintains a per-package finder cache in sys.path_importer_cache (see
# PEP 302). This means that we can't just call disable + enable.
# If we do that, in situations like:
#
# demandimport.enable()
# ...
# from foo.bar import mod1
# with demandimport.deactivated():
# from foo.bar import mod2
#
# mod2 will be imported lazily. (The converse also holds -- whatever finder
# first gets cached will be used.)
#
# Instead, have a global flag the LazyLoader can use.
global _deactivated
demandenabled = isenabled()
if demandenabled:
_deactivated = True
try:
yield
finally:
if demandenabled:
_deactivated = False
|
[
"raliclo@gmail.com"
] |
raliclo@gmail.com
|
e719552d07f6604b77bce83362de1ffe0652ab54
|
4491c65a31063f9282a504601866f63e52fe2c75
|
/tts.py
|
aa6a27419a7cdfd623a87739619acb6a33224752
|
[] |
no_license
|
Pranad17/text-to
|
904e66259319a3f9aaf0d660768ba8fcd8d4b700
|
b9ed2cb55765fe616482aee46b59375a11258877
|
refs/heads/main
| 2023-06-01T18:50:50.662538
| 2021-06-16T10:20:11
| 2021-06-16T10:20:11
| 377,455,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
import tkinter as tk
import pyttsx3
engine = pyttsx3.init()
class Widget():
def __init__(self):
self.root = tk.Tk()
self.root.title("TTS")
self.root.resizable(0,0)
self.root.configure(background="cyan")
self.label = tk.Label(text="What you want me to speak?",bg="cyan",fg="black",font="Arial 35 bold")
self.label.pack()
self.entry = tk.Entry(font="Arial 25",width=30)
self.entry.pack()
self.button = tk.Button(text="SPEAK",bg="royalblue",fg="brown",font="Arial 30 bold",command=self.clicked)
self.button.pack()
self.root.mainloop()
def clicked(self):
text = self.entry.get()
self.speak(text)
def speak(self,text):
engine.say(text)
engine.runAndWait()
if __name__ == "__main__":
temp = Widget()
|
[
"noreply@github.com"
] |
noreply@github.com
|
c9024f3ef48275c9eb9cfc94034cbd45602e30a9
|
48656e636c3992336f9acbc256cece0ce2d73d61
|
/PickANumber_A.py
|
915865e32acd5ce07d31a1a815afd6261dbe4ba3
|
[] |
no_license
|
KatherineWinter/python
|
dd38cd51d2845fb5005294bbe96aae511f380ae8
|
8957bc3af0845c309c12fbc754af775614578cce
|
refs/heads/master
| 2021-07-14T00:49:34.447904
| 2020-07-25T19:26:47
| 2020-07-25T19:26:47
| 188,038,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
import random
isPlaying = True
answer = random.randint(0, 9)
guessCount = 0
playerGuess = 0
# Play until the user can play no longer
while isPlaying:
# Only show the initial prompt once a game.
if guessCount == 0:
playerGuess = raw_input("I'm thinking of a number between 0-9... Guess!\n")
# Track how many times we have been in this loop
guessCount += 1
## Check for user error.
# Use must input a number. This could have been done in a try/catch, but those are expensive.
if playerGuess.isdigit() == False:
playerGuess = raw_input(
"Your guess must be a whole number between 0-9.\nTry again!\n")
continue
# If the input was a number... Check to see if the number was in range, and if the guess was correct
playerGuessAsInt = int(playerGuess)
if playerGuessAsInt < 0 or playerGuessAsInt > 9:
playerGuess = raw_input(
"Your guess must be a whole number between 0-9.\nTry again!\n")
continue
elif playerGuessAsInt != answer:
playerGuess = raw_input("Close, but not close enough. \nTry again!\n")
continue
print "Winner! You guessed in", guessCount, "tries! Woo!\n\n"
# Reset the game in the event the user wants to play again.
answer = random.randint(0, 9)
guessCount = 0
# Ask the user if they want to play again. Exit the game loop if they don't
while True:
playAgainInput = raw_input("Want to play again? (y/n) ")
if playAgainInput == 'n':
isPlaying = False
break
elif playAgainInput == 'y':
isPlaying = True
break
|
[
"katherine.m.winter@gmail.com"
] |
katherine.m.winter@gmail.com
|
d3218762cdff63ff7e0cd0f38c3aef5d2b9d8cda
|
840eff9a6db3324212851fde5a22d40057f0471a
|
/assignment3/inverted_index.py
|
d90ff7e3158d1d0f5c96ffd76cb7ece725f353a2
|
[] |
no_license
|
josyulakrishna/datascience_coursera
|
f013eded182711df956dc65043ffecad28dd8c41
|
ef544a431fafddaa1beab6851a6acf214a9f8c19
|
refs/heads/master
| 2021-01-13T01:25:12.222915
| 2013-08-16T06:34:44
| 2013-08-16T06:34:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
'''
Created on May 22, 2013
@author: Josyula
'''
# Part 1
import MapReduce
import sys
mr = MapReduce.MapReduce()
# Part 2
def mapper(record):
# key: document identifier
# value: document contents
document_id = record[0]
text = record[1]
words = text.split()
for w in words:
mr.emit_intermediate(w, document_id)
# Part 3
def reducer(key, list_of_values):
# key: word
# value: list of occurrence counts
docs = set(list_of_values)
doct = list(docs)
mr.emit((key, doct))
# Part 4
dpath = 'E:\\Data Science\\assignment3\\data\\books.json'
inputdata = open(dpath)
mr.execute(inputdata, mapper, reducer)
|
[
"josyula008@gmail.com"
] |
josyula008@gmail.com
|
d070ea5b57e7c9f251743e491b019532adcef562
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_indispositions.py
|
fabc644b17f9f752e045cebb4233bf3276caa5da
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#calss header
class _INDISPOSITIONS():
def __init__(self,):
self.name = "INDISPOSITIONS"
self.definitions = indisposition
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['indisposition']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d81f83f024e616bcef55ea18a79615a4a2ba197b
|
9763c89a87ab4d3c4c1aeed3b041f50cb63ca248
|
/pythonProject1/descriptions/descriptions/spiders/descriptions.py
|
2f911f1449246b06bdf8e5a18a4f2fe45b80bd0c
|
[] |
no_license
|
01Skymoon01/Scrapy
|
4423642b5c1ff8619ceab1e780bf9dfabcde933c
|
3726fe6f079bc35b4698eeadacb4ce84f47382b6
|
refs/heads/master
| 2022-11-30T06:38:04.644565
| 2020-08-21T04:21:36
| 2020-08-21T04:21:36
| 289,175,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
import scrapy
from ..items import DescriptionsItem
class QuoteSpider(scrapy.Spider):
name = 'quotes'
start_urls = {
'https://exchange.xforce.ibmcloud.com/vulnerabilities/184190'
}
def parse(self, response):
# title = response.css('title').extract()
# yield {'titleText': title}
items = DescriptionsItem()
all_div_quotes = response.css("div.instantresults")
for quotes in all_div_quotes:
title = quotes.css(".description::text").extract()
yield {
'title': title
}
|
[
"48031994+01Skymoon01@users.noreply.github.com"
] |
48031994+01Skymoon01@users.noreply.github.com
|
661af640e8e5f910169f00e38340591ad0fbe6a2
|
a0d2a1315b90ba54cf956aa83f72512d5d4a6019
|
/_createValueNetwork.py
|
ba5a68930563dd49dc7d6c8b04aa6a2ca2ea5ead
|
[] |
no_license
|
Silmaril64/Alphago
|
6a86cfff71802dabf33c647ba3d9771d1cad6edb
|
5a10807c4726af4060511bf6c15c7d8d235c6975
|
refs/heads/master
| 2023-03-03T08:59:43.576890
| 2021-02-09T19:43:16
| 2021-02-09T19:43:16
| 326,620,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
import gzip, os.path
import json
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Reshape
import numpy as np
model = Sequential([
Conv2D(192, 5, padding='same', activation = 'relu', data_format='channels_first', input_shape=(9,9,11)),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(192, 3, padding='same', activation = 'relu', data_format='channels_first'),
Conv2D(1 , 1, padding='same', activation = 'relu', data_format='channels_first'),
Flatten(),
Dense(256, activation='relu'),
Dense(1, activation='tanh')
])
model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])
model.summary()
model.save('./models/valueNetwork')
print("Value Network Created Successfully")
|
[
"csj0oe@gmail.com"
] |
csj0oe@gmail.com
|
f061753cf42f736ed0c97e963eb4432f17a31c35
|
14ac1cd2a1e1d5366ef06e26fadc3d34ff5d5fad
|
/apps/payinfo/urls.py
|
3af1a70532c586eebd4f77a1cce384f0f9595b75
|
[] |
no_license
|
wangdawei0515/django_project
|
1c5b2384eaab112cf65da032ed3d5fccd7e27c70
|
7834237a8c19b4b854e8450e2d64458a17584d36
|
refs/heads/master
| 2020-03-24T02:50:46.708396
| 2018-07-29T14:31:21
| 2018-07-29T14:31:21
| 140,164,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
#encoding: utf-8
from django.urls import path
from . import views
app_name = 'payinfo'
urlpatterns = [
path('',views.index,name='index'),
path('payinfo_order/',views.payinfo_order,name='payinfo_order'),
path('notify_view/',views.notify_view,name='notify_view'),
path('download_payinfo/',views.download_payinfo,name='download_payinfo')
]
|
[
"wangdawei_@outlook.com"
] |
wangdawei_@outlook.com
|
cc20d5ddabeb4b62b1d598fca3a72d742feb2a74
|
202bb7c5e37d3f117315e8bba3bd21e84b48fe6b
|
/alpha/WHSZIWHEN11.py
|
2ee339eed264849e5d11f95226f1fdd2cfbb9e8e
|
[] |
no_license
|
haishuowang/work_whs
|
897cd10a65035191e702811ed650061f7109b9fa
|
b6a17aefc5905ad9c11dba4d745591ed92b1e386
|
refs/heads/master
| 2020-07-03T10:30:14.231858
| 2020-06-09T08:47:18
| 2020-06-09T08:47:18
| 201,877,822
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74,134
|
py
|
import numpy as np
import pandas as pd
import os
import sys
from itertools import product, permutations, combinations
from datetime import datetime
import time
import matplotlib.pyplot as plt
from collections import OrderedDict
import sys
sys.path.append("/mnt/mfs/LIB_ROOT")
import open_lib.shared_paths.path as pt
from open_lib.shared_tools import send_email
def plot_send_result(pnl_df, sharpe_ratio, subject, text=''):
figure_save_path = os.path.join('/mnt/mfs/dat_whs', 'tmp_figure')
plt.figure(figsize=[16, 8])
plt.plot(pnl_df.index, pnl_df.cumsum(), label='sharpe_ratio={}'.format(sharpe_ratio))
plt.grid()
plt.legend()
plt.savefig(os.path.join(figure_save_path, '{}.png'.format(subject)))
plt.close()
to = ['whs@yingpei.com']
filepath = [os.path.join(figure_save_path, '{}.png'.format(subject))]
send_email.send_email(text, to, filepath, subject)
class BackTest:
@staticmethod
def AZ_Load_csv(target_path, index_time_type=True):
if index_time_type:
target_df = pd.read_table(target_path, sep='|', index_col=0, low_memory=False, parse_dates=True)
else:
target_df = pd.read_table(target_path, sep='|', index_col=0, low_memory=False)
return target_df
@staticmethod
def AZ_Catch_error(func):
def _deco(*args, **kwargs):
try:
ret = func(*args, **kwargs)
except:
ret = sys.exc_info()
print(ret[0], ":", ret[1])
return ret
return _deco
@staticmethod
def AZ_Time_cost(func):
t1 = time.time()
def _deco(*args, **kwargs):
ret = func(*args, **kwargs)
return ret
t2 = time.time()
print(f'cost_time: {t2-t1}')
return _deco
@staticmethod
def AZ_Sharpe_y(pnl_df):
return round((np.sqrt(250) * pnl_df.mean()) / pnl_df.std(), 4)
@staticmethod
def AZ_MaxDrawdown(asset_df):
return asset_df - np.maximum.accumulate(asset_df)
def AZ_Col_zscore(self, df, n, cap=None, min_periods=1):
df_mean = self.AZ_Rolling_mean(df, n, min_periods=min_periods)
df_std = df.rolling(window=n, min_periods=min_periods).std()
target = (df - df_mean) / df_std
if cap is not None:
target[target > cap] = cap
target[target < -cap] = -cap
return target
@staticmethod
def AZ_Row_zscore(df, cap=None):
df_mean = df.mean(axis=1)
df_std = df.std(axis=1)
target = df.sub(df_mean, axis=0).div(df_std, axis=0)
if cap is not None:
target[target > cap] = cap
target[target < -cap] = -cap
return target
@staticmethod
def AZ_Rolling(df, n, min_periods=1):
return df.rolling(window=n, min_periods=min_periods)
@staticmethod
def AZ_Rolling_mean(df, n, min_periods=1):
target = df.rolling(window=n, min_periods=min_periods).mean()
target.iloc[:n - 1] = np.nan
return target
@staticmethod
def AZ_Rolling_sharpe(pnl_df, roll_year=1, year_len=250, min_periods=1, cut_point_list=None, output=False):
if cut_point_list is None:
cut_point_list = [0.05, 0.33, 0.5, 0.66, 0.95]
rolling_sharpe = pnl_df.rolling(int(roll_year * year_len), min_periods=min_periods) \
.apply(lambda x: np.sqrt(year_len) * x.mean() / x.std(), raw=True)
rolling_sharpe.iloc[:int(roll_year * year_len) - 1] = np.nan
cut_sharpe = rolling_sharpe.quantile(cut_point_list)
if output:
return rolling_sharpe, cut_sharpe.round(4)
else:
return cut_sharpe.round(4)
@staticmethod
def AZ_Pot(pos_df, asset_last):
"""
计算 pnl/turover*10000的值,衡量cost的影响
:param pos_df: 仓位信息
:param asset_last: 最后一天的收益
:return:
"""
trade_times = pos_df.diff().abs().sum().sum()
if trade_times == 0:
return 0
else:
pot = asset_last / trade_times * 10000
return round(pot, 2)
@staticmethod
def AZ_Normal_IC(signal, pct_n, min_valids=None, lag=0):
signal = signal.shift(lag)
signal = signal.replace(0, np.nan)
corr_df = signal.corrwith(pct_n, axis=1).dropna()
if min_valids is not None:
signal_valid = signal.count(axis=1)
signal_valid[signal_valid < min_valids] = np.nan
signal_valid[signal_valid >= min_valids] = 1
corr_signal = corr_df * signal_valid
else:
corr_signal = corr_df
return round(corr_signal, 6)
def AZ_Normal_IR(self, signal, pct_n, min_valids=None, lag=0):
corr_signal = self.AZ_Normal_IC(signal, pct_n, min_valids, lag)
ic_mean = corr_signal.mean()
ic_std = corr_signal.std()
ir = ic_mean / ic_std
return ir, corr_signal
@staticmethod
def AZ_Leverage_ratio(asset_df):
"""
返回250天的return/(负的 一个月的return)
:param asset_df:
:return:
"""
asset_20 = asset_df - asset_df.shift(20)
asset_250 = asset_df - asset_df.shift(250)
if asset_250.mean() > 0:
return asset_250.mean() / (-asset_20.min())
else:
return asset_250.mean() / (-asset_20.max())
@staticmethod
def AZ_Locked_date_deal(position_df, locked_df):
"""
处理回测中停牌,涨停等 仓位需要锁死的情况
:param position_df:仓位信息
:param locked_df:停牌 涨跌停等不能交易信息(能交易记为1, 不能记为nan)
:return:
"""
position_df_adj = (position_df * locked_df).dropna(how='all', axis=0) \
.fillna(method='ffill')
return position_df_adj
@staticmethod
def AZ_Path_create(target_path):
"""
添加新路径
:param target_path:
:return:
"""
if not os.path.exists(target_path):
os.makedirs(target_path)
@staticmethod
def AZ_split_stock(stock_list):
"""
在stock_list中寻找A股代码
:param stock_list:
:return:
"""
eqa = [x for x in stock_list if (x.startswith('0') or x.startswith('3')) and x.endwith('SZ')
or x.startswith('6') and x.endwith('SH')]
return eqa
@staticmethod
def AZ_add_stock_suffix(stock_list):
"""
whs
给stock_list只有数字的 A股代码 添加后缀
如 000001 运行后 000001.SZ
:param stock_list:
:return:
"""
return list(map(lambda x: x + '.SH' if x.startswith('6') else x + '.SZ', stock_list))
@staticmethod
def AZ_Delete_file(target_path, except_list=None):
if except_list is None:
except_list = []
assert type(except_list) == list
file_list = os.listdir(target_path)
file_list = list(set(file_list) - set(except_list))
for file_name in sorted(file_list):
os.remove(os.path.join(target_path, file_name))
@staticmethod
def AZ_turnover(pos_df):
diff_sum = pos_df.diff().abs().sum().sum()
pos_sum = pos_df.abs().sum().sum()
if pos_sum == 0:
return .0
return diff_sum / float(pos_sum)
@staticmethod
def AZ_annual_return(pos_df, return_df):
temp_pnl = (pos_df * return_df).sum().sum()
temp_pos = pos_df.abs().sum().sum()
if temp_pos == 0:
return .0
else:
return temp_pnl * 250.0 / temp_pos
def AZ_fit_ratio(self, pos_df, return_df):
"""
传入仓位 和 每日收益
:param pos_df:
:param return_df:
:return: 时间截面上的夏普 * sqrt(abs(年化)/换手率), 当换手率为0时,返回0
"""
sharp_ratio = self.AZ_Sharpe_y((pos_df * return_df).sum(axis=1))
ann_return = self.AZ_annual_return(pos_df, return_df)
turnover = self.AZ_turnover(pos_df)
if turnover == 0:
return .0
else:
return round(sharp_ratio * np.sqrt(abs(ann_return) / turnover), 2)
def AZ_fit_ratio_rolling(self, pos_df, pnl_df, roll_year=1, year_len=250, min_periods=1, cut_point_list=None,
output=False):
if cut_point_list is None:
cut_point_list = [0.05, 0.33, 0.5, 0.66, 0.95]
rolling_sharpe, cut_sharpe = self.AZ_Rolling_sharpe(pnl_df, roll_year=roll_year, year_len=year_len,
min_periods=min_periods, cut_point_list=cut_point_list,
output=True)
rolling_return = pnl_df.rolling(int(roll_year * year_len), min_periods=min_periods).apply(
lambda x: 250.0 * x.sum().sum())
rolling_diff_pos = pos_df.diff().abs().sum(axis=1).rolling(int(roll_year * year_len),
min_periods=min_periods).apply(
lambda x: x.sum().sum())
rolling_return.iloc[:int(roll_year * year_len) - 1] = np.nan
rolling_diff_pos.iloc[:int(roll_year * year_len) - 1] = np.nan
rolling_fit_ratio = rolling_sharpe * np.sqrt(abs(rolling_return) / rolling_diff_pos)
rolling_fit_ratio = rolling_fit_ratio.replace(np.inf, np.nan)
rolling_fit_ratio = rolling_fit_ratio.replace(-np.inf, np.nan)
cut_fit = rolling_fit_ratio.quantile(cut_point_list)
return cut_fit.round(4)
@staticmethod
def AZ_VAR(pos_df, return_df, confidence_level, backward_len=500, forwward_len=250):
tradeDayList = pos_df.index[:-forwward_len]
col01 = return_df.columns[0]
varList = []
cut_point_list = [0.05, 0.33, 0.5, 0.66, 0.95]
if len(tradeDayList) == 0:
print('数据量太少')
else:
for tradeDay in tradeDayList:
tempPos = pos_df.loc[tradeDay, :]
dayIndex = list(return_df.loc[:tradeDay, col01].index[-backward_len:]) + list(
return_df.loc[tradeDay:, col01].index[:forwward_len])
return_df_c = return_df[list(tempPos.index)]
historyReturn = list(return_df_c.mul(tempPos, axis=1).loc[dayIndex[0]:dayIndex[-1], :].sum(axis=1))
historyReturn.sort()
varList.append(historyReturn[int(len(historyReturn) * confidence_level)])
var = pd.DataFrame({'var': varList}, index=tradeDayList)
var = var.dropna()
var_fit = var.quantile(cut_point_list)
return list(var_fit['var'])
bt = BackTest()
def filter_all(cut_date, pos_df_daily, pct_n, if_return_pnl=False, if_only_long=False):
pnl_df = (pos_df_daily * pct_n).sum(axis=1)
pnl_df = pnl_df.replace(np.nan, 0)
# pnl_df = pd.Series(pnl_df)
# 样本内表现
return_in = pct_n[pct_n.index < cut_date]
pnl_df_in = pnl_df[pnl_df.index < cut_date]
asset_df_in = pnl_df_in.cumsum()
last_asset_in = asset_df_in.iloc[-1]
pos_df_daily_in = pos_df_daily[pos_df_daily.index < cut_date]
pot_in = AZ_Pot(pos_df_daily_in, last_asset_in)
leve_ratio = AZ_Leverage_ratio(asset_df_in)
if leve_ratio < 0:
leve_ratio = 100
sharpe_q_in_df = bt.AZ_Rolling_sharpe(pnl_df_in, roll_year=1, year_len=250, min_periods=1,
cut_point_list=[0.3, 0.5, 0.7], output=False)
sp_in = bt.AZ_Sharpe_y(pnl_df_in)
fit_ratio = bt.AZ_fit_ratio(pos_df_daily_in, return_in)
ic = round(bt.AZ_Normal_IC(pos_df_daily_in, pct_n, min_valids=None, lag=0).mean(), 6)
sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d = sharpe_q_in_df.values
in_condition_u = sharpe_q_in_df_u > 0.9 and leve_ratio > 1
in_condition_d = sharpe_q_in_df_d < -0.9 and leve_ratio > 1
# 分双边和只做多
if if_only_long:
in_condition = in_condition_u
else:
in_condition = in_condition_u | in_condition_d
if sharpe_q_in_df_m > 0:
way = 1
else:
way = -1
# 样本外表现
pnl_df_out = pnl_df[pnl_df.index >= cut_date]
out_condition, sharpe_q_out = out_sample_perf_c(pnl_df_out, way=way)
if if_return_pnl:
return in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df
else:
return in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out
def mul_fun(a, b):
a_l = a.where(a > 0, 0)
a_s = a.where(a < 0, 0)
b_l = b.where(b > 0, 0)
b_s = b.where(b < 0, 0)
pos_l = a_l.mul(b_l)
pos_s = a_s.mul(b_s)
pos = pos_l.sub(pos_s)
return pos
def sub_fun(a, b):
return a.sub(b)
def add_fun(a, b):
return a.add(b)
def AZ_Cut_window(df, begin_date, end_date=None, column=None):
if column is None:
if end_date is None:
return df[df.index > begin_date]
else:
return df[(df.index > begin_date) & (df.index < end_date)]
else:
if end_date is None:
return df[df[column] > begin_date]
else:
return df[(df[column] > begin_date) & (df[column] < end_date)]
def AZ_Leverage_ratio(asset_df):
"""
返回250天的return/(负的 一个月的return)
:param asset_df:
:return:
"""
asset_20 = asset_df - asset_df.shift(20)
asset_250 = asset_df - asset_df.shift(250)
if asset_250.mean() > 0:
return round(asset_250.mean() / (-asset_20.min()), 2)
else:
return round(asset_250.mean() / (-asset_20.max()), 2)
def pos_daily_fun(df, n=5):
return df.rolling(window=n, min_periods=1).sum()
def AZ_Pot(pos_df_daily, last_asset):
trade_times = pos_df_daily.diff().abs().sum().sum()
if trade_times == 0:
return 0
else:
pot = last_asset / trade_times * 10000
return round(pot, 2)
def out_sample_perf_c(pnl_df_out, way=1):
# 根据sharpe大小,统计样本外的表现
# if cut_point_list is None:
# cut_point_list = [0.30]
# if way == 1:
# rolling_sharpe, cut_sharpe = \
# bt.AZ_Rolling_sharpe(pnl_df_out, roll_year=0.5, year_len=250, cut_point_list=cut_point_list, output=True)
# else:
# rolling_sharpe, cut_sharpe = \
# bt.AZ_Rolling_sharpe(-pnl_df_out, roll_year=0.5, year_len=250, cut_point_list=cut_point_list, output=True)
if way == 1:
sharpe_out = bt.AZ_Sharpe_y(pnl_df_out)
else:
sharpe_out = bt.AZ_Sharpe_y(-pnl_df_out)
out_condition = sharpe_out > 0.8
return out_condition, round(sharpe_out * way, 2)
def create_fun_set_2(fun_set):
mix_fun_set = []
for fun_1, fun_2 in product(fun_set, repeat=2):
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set += [{0}_{1}_fun]'.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
def create_fun_set_2_(fun_set):
mix_fun_set = {}
for fun_1, fun_2 in product(fun_set, repeat=2):
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set[\'{0}_{1}_fun\'] = {0}_{1}_fun'
.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
def create_fun_set_2_crt():
fun_2 = mul_fun
mix_fun_set = []
for fun_1 in [add_fun, sub_fun, mul_fun]:
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set += [{0}_{1}_fun]'.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
def create_fun_set_2_crt_():
fun_2 = mul_fun
mix_fun_set = dict()
for fun_1 in [add_fun, sub_fun, mul_fun]:
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set[\'{0}_{1}_fun\'] = {0}_{1}_fun'
.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
class FactorTest:
def __init__(self, root_path, if_save, if_new_program, begin_date, cut_date, end_date, time_para_dict, sector_name,
hold_time, lag, return_file, if_hedge, if_only_long, if_weight=0.5, ic_weight=0.5,
para_adj_set_list=None):
self.root_path = root_path
self.if_save = if_save
self.if_new_program = if_new_program
self.begin_date = begin_date
self.cut_date = cut_date
self.end_date = end_date
self.time_para_dict = time_para_dict
self.sector_name = sector_name
self.hold_time = hold_time
self.lag = lag
self.return_file = return_file
self.if_hedge = if_hedge
self.if_only_long = if_only_long
self.if_weight = if_weight
self.ic_weight = ic_weight
if para_adj_set_list is None:
self.para_adj_set_list = [
{'pot_in_num': 50, 'leve_ratio_num': 2, 'sp_in': 1.5, 'ic_num': 0.0, 'fit_ratio': 2},
{'pot_in_num': 40, 'leve_ratio_num': 2, 'sp_in': 1.5, 'ic_num': 0.0, 'fit_ratio': 2},
{'pot_in_num': 50, 'leve_ratio_num': 2, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 1},
{'pot_in_num': 50, 'leve_ratio_num': 1, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 2},
{'pot_in_num': 50, 'leve_ratio_num': 1, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 1},
{'pot_in_num': 40, 'leve_ratio_num': 1, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 1}]
return_choose = self.load_return_data()
self.xinx = return_choose.index
sector_df = self.load_sector_data()
self.xnms = sector_df.columns
return_choose = return_choose.reindex(columns=self.xnms)
self.sector_df = sector_df.reindex(index=self.xinx)
# print('Loaded sector DataFrame!')
if if_hedge:
if ic_weight + if_weight != 1:
exit(-1)
else:
if_weight = 0
ic_weight = 0
index_df_1 = self.load_index_data('000300').fillna(0)
# index_weight_1 = self.load_index_weight_data('000300')
index_df_2 = self.load_index_data('000905').fillna(0)
# index_weight_2 = self.load_index_weight_data('000905')
#
# weight_df = if_weight * index_weight_1 + ic_weight * index_weight_2
hedge_df = if_weight * index_df_1 + ic_weight * index_df_2
self.return_choose = return_choose.sub(hedge_df, axis=0)
# print('Loaded return DataFrame!')
suspendday_df, limit_buy_sell_df = self.load_locked_data()
limit_buy_sell_df_c = limit_buy_sell_df.shift(-1)
limit_buy_sell_df_c.iloc[-1] = 1
suspendday_df_c = suspendday_df.shift(-1)
suspendday_df_c.iloc[-1] = 1
self.suspendday_df_c = suspendday_df_c
self.limit_buy_sell_df_c = limit_buy_sell_df_c
# print('Loaded suspendday_df and limit_buy_sell DataFrame!')
def reindex_fun(self, df):
return df.reindex(index=self.xinx, columns=self.xnms)
@staticmethod
def create_log_save_path(target_path):
top_path = os.path.split(target_path)[0]
if not os.path.exists(top_path):
os.mkdir(top_path)
if not os.path.exists(target_path):
os.mknod(target_path)
@staticmethod
def row_extre(raw_df, sector_df, percent):
raw_df = raw_df * sector_df
target_df = raw_df.rank(axis=1, pct=True)
target_df[target_df >= 1 - percent] = 1
target_df[target_df <= percent] = -1
target_df[(target_df > percent) & (target_df < 1 - percent)] = 0
return target_df
@staticmethod
def pos_daily_fun(df, n=5):
return df.rolling(window=n, min_periods=1).sum()
def check_factor(self, name_list, file_name):
load_path = os.path.join('/mnt/mfs/dat_whs/data/new_factor_data/' + self.sector_name)
exist_factor = set([x[:-4] for x in os.listdir(load_path)])
print()
use_factor = set(name_list)
a = use_factor - exist_factor
if len(a) != 0:
print('factor not enough!')
print(a)
print(len(a))
send_email.send_email(f'{file_name} factor not enough!', ['whs@yingpei.com'], [], 'Factor Test Warning!')
@staticmethod
def create_all_para(tech_name_list, funda_name_list):
target_list_1 = []
for tech_name in tech_name_list:
for value in combinations(funda_name_list, 2):
target_list_1 += [[tech_name] + list(value)]
target_list_2 = []
for funda_name in funda_name_list:
for value in combinations(tech_name_list, 2):
target_list_2 += [[funda_name] + list(value)]
target_list = target_list_1 + target_list_2
return target_list
# 获取剔除新股的矩阵
def get_new_stock_info(self, xnms, xinx):
new_stock_data = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Tab01/CDSY_SECUCODE/LISTSTATE.csv'))
new_stock_data.fillna(method='ffill', inplace=True)
# 获取交易日信息
return_df = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_14/aadj_r.csv')).astype(float)
trade_time = return_df.index
new_stock_data = new_stock_data.reindex(index=trade_time).fillna(method='ffill')
target_df = new_stock_data.shift(40).notnull().astype(int)
target_df = target_df.reindex(columns=xnms, index=xinx)
return target_df
# 获取剔除st股票的矩阵
def get_st_stock_info(self, xnms, xinx):
data = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Tab01/CDSY_CHANGEINFO/CHANGEA.csv'))
data = data.reindex(columns=xnms, index=xinx)
data.fillna(method='ffill', inplace=True)
data = data.astype(str)
target_df = data.applymap(lambda x: 0 if 'ST' in x or 'PT' in x else 1)
return target_df
def load_return_data(self):
return_choose = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_14/aadj_r.csv'))
return_choose = return_choose[(return_choose.index >= self.begin_date) & (return_choose.index < self.end_date)]
return return_choose
# 获取sector data
def load_sector_data(self):
market_top_n = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_10/' + self.sector_name + '.csv'))
market_top_n = market_top_n.reindex(index=self.xinx)
market_top_n.dropna(how='all', axis='columns', inplace=True)
xnms = market_top_n.columns
xinx = market_top_n.index
new_stock_df = self.get_new_stock_info(xnms, xinx)
st_stock_df = self.get_st_stock_info(xnms, xinx)
sector_df = market_top_n * new_stock_df * st_stock_df
sector_df.replace(0, np.nan, inplace=True)
return sector_df
def load_index_weight_data(self, index_name):
index_info = bt.AZ_Load_csv(self.root_path + f'/EM_Funda/IDEX_YS_WEIGHT_A/SECURITYNAME_{index_name}.csv')
index_info = self.reindex_fun(index_info)
index_mask = (index_info.notnull() * 1).replace(0, np.nan)
mkt_cap = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/LICO_YS_STOCKVALUE/AmarketCapExStri.csv'))
mkt_roll = mkt_cap.rolling(250, min_periods=0).mean()
mkt_roll = self.reindex_fun(mkt_roll)
mkt_roll_qrt = np.sqrt(mkt_roll)
mkt_roll_qrt_index = mkt_roll_qrt * index_mask
index_weight = mkt_roll_qrt_index.div(mkt_roll_qrt_index.sum(axis=1), axis=0)
return index_weight
# 涨跌停都不可交易
def load_locked_data(self):
raw_suspendday_df = bt.AZ_Load_csv(
os.path.join(self.root_path, 'EM_Funda/TRAD_TD_SUSPENDDAY/SUSPENDREASON.csv'))
suspendday_df = raw_suspendday_df.isnull().astype(int)
suspendday_df = suspendday_df.reindex(columns=self.xnms, index=self.xinx, fill_value=True)
suspendday_df.replace(0, np.nan, inplace=True)
return_df = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_14/aadj_r.csv')).astype(float)
limit_buy_sell_df = (return_df.abs() < 0.095).astype(int)
limit_buy_sell_df = limit_buy_sell_df.reindex(columns=self.xnms, index=self.xinx, fill_value=1)
limit_buy_sell_df.replace(0, np.nan, inplace=True)
return suspendday_df, limit_buy_sell_df
# 获取index data
def load_index_data(self, index_name):
data = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/INDEX_TD_DAILYSYS/CHG.csv'))
target_df = data[index_name].reindex(index=self.xinx)
return target_df * 0.01
# 读取部分factor
def load_part_factor(self, sector_name, xnms, xinx, file_list):
factor_set = OrderedDict()
for file_name in file_list:
load_path = os.path.join('/mnt/mfs/dat_whs/data/new_factor_data/' + sector_name)
target_df = pd.read_pickle(os.path.join(load_path, file_name + '.pkl'))
factor_set[file_name] = target_df.reindex(columns=xnms, index=xinx).fillna(0)
return factor_set
# 读取factor
def load_factor(self, file_name):
factor_set = OrderedDict()
load_path = os.path.join('/mnt/mfs/dat_whs/data/new_factor_data/' + self.sector_name)
target_df = pd.read_pickle(os.path.join(load_path, file_name + '.pkl'))
factor_set[file_name] = target_df.reindex(columns=self.xnms, index=self.xinx).fillna(0)
return factor_set
def deal_mix_factor(self, mix_factor):
if self.if_only_long:
mix_factor = mix_factor[mix_factor > 0]
# 下单日期pos
order_df = mix_factor.replace(np.nan, 0)
# 排除入场场涨跌停的影响
order_df = order_df * self.sector_df * self.limit_buy_sell_df_c * self.suspendday_df_c
order_df = order_df.div(order_df.abs().sum(axis=1).replace(0, np.nan), axis=0)
order_df[order_df > 0.05] = 0.05
order_df[order_df < -0.05] = -0.05
daily_pos = pos_daily_fun(order_df, n=self.hold_time)
daily_pos.fillna(0, inplace=True)
# 排除出场涨跌停的影响
daily_pos = daily_pos * self.limit_buy_sell_df_c * self.suspendday_df_c
daily_pos.fillna(method='ffill', inplace=True)
return daily_pos
def save_load_control(self, tech_name_list, funda_name_list, suffix_name, file_name):
# 参数存储与加载的路径控制
result_save_path = '/mnt/mfs/dat_whs/result'
if self.if_new_program:
now_time = datetime.now().strftime('%Y%m%d_%H%M')
if self.if_only_long:
file_name = '{}_{}_{}_hold_{}_{}_{}_long.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
else:
file_name = '{}_{}_{}_hold_{}_{}_{}.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_dict = dict()
para_ready_df = pd.DataFrame(list(self.create_all_para(tech_name_list, funda_name_list)))
total_para_num = len(para_ready_df)
if self.if_save:
self.create_log_save_path(log_save_file)
self.create_log_save_path(result_save_file)
self.create_log_save_path(para_save_file)
para_dict['para_ready_df'] = para_ready_df
para_dict['tech_name_list'] = tech_name_list
para_dict['funda_name_list'] = funda_name_list
pd.to_pickle(para_dict, para_save_file)
else:
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_tested_df = pd.read_table(log_save_file, sep='|', header=None, index_col=0)
para_all_df = pd.read_pickle(para_save_file)
total_para_num = len(para_all_df)
para_ready_df = para_all_df.loc[sorted(list(set(para_all_df.index) - set(para_tested_df.index)))]
print(file_name)
print(f'para_num:{len(para_ready_df)}')
return para_ready_df, log_save_file, result_save_file, total_para_num
@staticmethod
def create_all_para_(change_list, ratio_list, tech_list):
target_list = list(product(change_list, ratio_list, tech_list))
return target_list
def save_load_control_(self, change_list, ratio_list, tech_list, suffix_name, file_name):
# 参数存储与加载的路径控制
result_save_path = '/mnt/mfs/dat_whs/result'
if self.if_new_program:
now_time = datetime.now().strftime('%Y%m%d_%H%M')
if self.if_only_long:
file_name = '{}_{}_{}_hold_{}_{}_{}_long.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
else:
file_name = '{}_{}_{}_hold_{}_{}_{}.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_dict = dict()
para_ready_df = pd.DataFrame(list(self.create_all_para_(change_list, ratio_list, tech_list)))
total_para_num = len(para_ready_df)
if self.if_save:
self.create_log_save_path(log_save_file)
self.create_log_save_path(result_save_file)
self.create_log_save_path(para_save_file)
para_dict['para_ready_df'] = para_ready_df
para_dict['change_list'] = change_list
para_dict['ratio_list'] = ratio_list
para_dict['tech_list'] = tech_list
pd.to_pickle(para_dict, para_save_file)
else:
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_tested_df = pd.read_table(log_save_file, sep='|', header=None, index_col=0)
para_all_df = pd.read_pickle(para_save_file)
total_para_num = len(para_all_df)
para_ready_df = para_all_df.loc[sorted(list(set(para_all_df.index) - set(para_tested_df.index)))]
print(file_name)
print(f'para_num:{len(para_ready_df)}')
return para_ready_df, log_save_file, result_save_file, total_para_num
class FactorTestSector(FactorTest):
def __init__(self, *args):
super(FactorTestSector, self).__init__(*args)
def load_tech_factor(self, file_name):
load_path = os.path.join('/media/hdd1/DAT_PreCalc/PreCalc_whs/' + self.sector_name)
target_df = pd.read_pickle(os.path.join(load_path, file_name + '.pkl')) \
.reindex(index=self.xinx, columns=self.xnms)
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def load_daily_factor(self, file_name):
load_path = f'{self.root_path}/EM_Funda/daily/'
tmp_df = bt.AZ_Load_csv(os.path.join(load_path, file_name + '.csv')) \
.reindex(index=self.xinx, columns=self.xnms)
target_df = self.row_extre(tmp_df, self.sector_df, 0.3)
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def load_jerry_factor(self, file_name):
factor_path = '/mnt/mfs/temp/dat_jerry/signal'
raw_df = bt.AZ_Load_csv(f'{factor_path}/{file_name}')
a = list(set(raw_df.iloc[-1, :100].dropna().values))
tmp_df = raw_df.reindex(index=self.xinx, columns=self.xnms)
if len(a) > 5:
target_df = self.row_extre(tmp_df, self.sector_df, 0.3)
else:
target_df = tmp_df
pass
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def load_whs_factor(self, file_name):
load_path = f'{self.root_path}/EM_Funda/dat_whs/'
tmp_df = bt.AZ_Load_csv(os.path.join(load_path, file_name + '.csv')) \
.reindex(index=self.xinx, columns=self.xnms)
target_df = self.row_extre(tmp_df, self.sector_df, 0.3)
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def load_remy_factor(self, file_name):
load_path = f'{self.root_path}/EM_Funda/DERIVED_F1'
raw_df = bt.AZ_Load_csv(f'{load_path}/{file_name}')
a = list(set(raw_df.iloc[-1, :100].dropna().values))
tmp_df = raw_df.reindex(index=self.xinx, columns=self.xnms)
if len(a) > 5:
target_df = self.row_extre(tmp_df, self.sector_df, 0.3)
else:
target_df = tmp_df
pass
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def single_test(self, name_1):
factor_1 = getattr(self, my_factor_dict[name_1])(name_1)
daily_pos = self.deal_mix_factor(factor_1).shift(2)
in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df = filter_all(self.cut_date, daily_pos, self.return_choose,
if_return_pnl=True,
if_only_long=self.if_only_long)
if bt.AZ_Sharpe_y(pnl_df) > 0:
return 1
else:
return -1
def single_test_c(self, name_list):
mix_factor = pd.DataFrame()
for i in range(len(name_list)):
tmp_name = name_list[i]
buy_sell_way = self.single_test(tmp_name)
tmp_factor = getattr(self, my_factor_dict[tmp_name])(tmp_name)
mix_factor = mix_factor.add(tmp_factor * buy_sell_way, fill_value=0)
# daily_pos = self.deal_mix_factor(mix_factor).shift(2)
# in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
# fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df = \
# filter_all(self.cut_date, daily_pos, self.return_choose, if_return_pnl=True, if_only_long=False)
# print(in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d,
# pot_in, fit_ratio, leve_ratio, sp_in, sharpe_q_out)
return mix_factor
def single_test_real(self, name_list):
mix_factor = pd.DataFrame()
for i in range(len(name_list)):
tmp_name = name_list[i]
# result_list = self.single_test(tmp_name)
# print(tmp_name, result_list)
# print(1)
buy_sell_way = self.single_test(tmp_name)
tmp_factor = getattr(self, my_factor_dict[tmp_name])(tmp_name)
part_daily_pos = self.deal_mix_factor(tmp_factor).shift(2)
mix_factor = mix_factor.add(part_daily_pos * buy_sell_way, fill_value=0)
daily_pos = mix_factor / len(name_list)
in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df = \
filter_all(self.cut_date, daily_pos, self.return_choose, if_return_pnl=True, if_only_long=False)
print(in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d,
pot_in, fit_ratio, leve_ratio, sp_in, sharpe_q_out)
return mix_factor
def load_index_data(index_name, xinx):
data = bt.AZ_Load_csv(os.path.join('/mnt/mfs/DAT_EQT', 'EM_Tab09/INDEX_TD_DAILYSYS/CHG.csv'))
target_df = data[index_name].reindex(index=xinx)
return target_df * 0.01
def get_corr_matrix(cut_date=None):
pos_file_list = [x for x in os.listdir('/mnt/mfs/AAPOS') if x.startswith('WHS')]
return_df = bt.AZ_Load_csv('/mnt/mfs/DAT_EQT/EM_Funda/DERIVED_14/aadj_r.csv').astype(float)
index_df_1 = load_index_data('000300', return_df.index).fillna(0)
index_df_2 = load_index_data('000905', return_df.index).fillna(0)
sum_pnl_df = pd.DataFrame()
for pos_file_name in pos_file_list:
pos_df = bt.AZ_Load_csv('/mnt/mfs/AAPOS/{}'.format(pos_file_name))
cond_1 = 'IF01' in pos_df.columns
cond_2 = 'IC01' in pos_df.columns
if cond_1 and cond_2:
hedge_df = 0.5 * index_df_1 + 0.5 * index_df_2
return_df_c = return_df.sub(hedge_df, axis=0)
elif cond_1:
hedge_df = index_df_1
return_df_c = return_df.sub(hedge_df, axis=0)
elif cond_2:
hedge_df = index_df_2
return_df_c = return_df.sub(hedge_df, axis=0)
else:
print('alpha hedge error')
continue
pnl_df = (pos_df.shift(2) * return_df_c).sum(axis=1)
pnl_df.name = pos_file_name
sum_pnl_df = pd.concat([sum_pnl_df, pnl_df], axis=1)
# plot_send_result(pnl_df, bt.AZ_Sharpe_y(pnl_df), 'mix_factor')
if cut_date is not None:
sum_pnl_df = sum_pnl_df[sum_pnl_df.index > cut_date]
return sum_pnl_df
def get_all_pnl_corr(pnl_df, col_name):
all_pnl_df = pd.read_csv('/mnt/mfs/AATST/corr_tst_pnls', sep='|', index_col=0, parse_dates=True)
all_pnl_df_c = pd.concat([all_pnl_df, pnl_df], axis=1)
a = all_pnl_df_c.iloc[-600:].corr()[col_name]
return a[a > 0.71]
def corr_test_fun(pnl_df, alpha_name):
sum_pnl_df = get_corr_matrix(cut_date=None)
sum_pnl_df_c = pd.concat([sum_pnl_df, pnl_df], axis=1)
corr_self = sum_pnl_df_c.corr()[[alpha_name]]
other_corr = get_all_pnl_corr(pnl_df, alpha_name)
print(other_corr)
self_corr = corr_self[corr_self > 0.7].dropna(axis=0)
print(self_corr)
if len(self_corr) >= 2 or len(other_corr) >= 2:
print('FAIL!')
send_email.send_email('FAIL!\n' + self_corr.to_html(),
['whs@yingpei.com'],
[],
'[RESULT DEAL]' + alpha_name)
else:
print('SUCCESS!')
send_email.send_email('SUCCESS!\n' + self_corr.to_html(),
['whs@yingpei.com'],
[],
'[RESULT DEAL]' + alpha_name)
print('______________________________________')
return 0
def config_test():
# pass 132.43 5.4 5.66 2.9698 2.58
# factor_str = 'vr_original_45days.csv|R_NETPROFIT_s_QYOY|REMFF.24|wgt_return_p120d_0.2|RQYE_p60d_col_extre_0.2' \
# '|R_NETPROFIT_s_QYOY_and_QTTM_0.3|RQMCL_p345d_continue_ud|RZYE_row_extre_0.2|REMTK.11|M1_p1|M1'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# pass 97.91 4.07 4.34 2.601 3.41
# factor_str = 'news_num_df_20|turn_p120d_0.2|RQMCL_p345d_continue_ud|RQYE_p20d_col_extre_0.2' \
# '|R_FairValChg_TotProfit_s_First|MA_LINE_10_5|vr_afternoon_10min_20days|REMTK.06' \
# '|R_NetCashflowPS_s_First|REMFF.06|M1_p1'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# pass 99.89 3.8 3.26 2.4056 3.04 ?????
# factor_str = 'TotRev_and_mcap_QYOY_Y3YGR_0.3|RQMCL_p345d_continue_ud|RQYE_p10d_col_extre_0.2' \
# '|R_OPEX_sales_QYOY_and_QTTM_0.3|RZYE_p10d_col_extre_0.2' \
# '|TVOL_row_extre_0.2|R_NETPROFIT_s_QYOY_and_QTTM_0.3'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# pass 105.39 4.18 2.92 2.5765 2.71 ziwhen10
# factor_str = 'M1|turn_p150d_0.18|ab_sale_mng_exp|REMFF.24|RZCHE_row_extre_0.2|R_ParentProfit_s_YOY_First' \
# '|RQMCL_p345d_continue_ud|evol_p10d|TVOL_row_extre_0.2|REMTK.06|RZYE_p10d_col_extre_0.2'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# 130.81 5.84 5.78 3.2277 2.54
# factor_str = 'REMFF.08|vr_original_45days.csv|RQYE_row_extre_0.2|evol_p10d|M1|R_Cashflow_s_YOY_First|' \
# 'news_num_df_20|wgt_return_p60d_0.2|R_OPEX_sales_QYOY_and_QTTM_0.3|RQYE_p20d_col_extre_0.2' \
# '|vr_afternoon_10min_20days'
# factor_str = 'REMFF.08|RQYE_row_extre_0.2|evol_p10d|R_Cashflow_s_YOY_First|' \
# 'news_num_df_20|wgt_return_p60d_0.2|R_OPEX_sales_QYOY_and_QTTM_0.3|RQYE_p20d_col_extre_0.2'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# factor_str = 'TotRev_and_mcap_QYOY_Y3YGR_0.3|bulletin_num_df_20|RQYE_p20d_col_extre_0.2|REMWB.03' \
# '|bias_turn_p120d|evol_p20d|wgt_return_p20d_0.2|ADX_40_20_10|RZYE_row_extre_0.2|M1_p2|REMWB.05'
# info_str = 'market_top_300plus_industry_20_25_30_35|20|False'
# pass 99.64 5.33 8.85 3.3766 2.45
# factor_str = 'R_EPS_s_YOY_First|continue_ud_p200d|RQYE_p10d_col_extre_0.2|REMFF.20|LIQ_mix.csv|REMWB.03|REMTK.13' \
# '|aadj_r_p345d_continue_ud|wgt_return_p20d_0.2|ADX_40_20_10|REMTK.11'
# info_str = 'market_top_300plus_industry_20_25_30_35|20|False'
# pass 142.46 5.21 3.62 2.7607 2.59
# factor_str = 'aadj_r_p60d_col_extre_0.2|PE_TTM_row_extre_0.2|continue_ud_p20d|TotRev_and_asset_Y3YGR_Y5YGR_0.3' \
# '|R_EBITDA2_QYOY_and_QTTM_0.3|R_OTHERLASSET_QYOY_and_QTTM_0.3|REMTK.16|aadj_r_p10d_col_extre_0.2' \
# '|RQMCL_p345d_continue_ud|R_WorkCapital_QYOY|wgt_return_p20d_0.2'
#
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 174.61 5.44 5.15 2.6052 2.67
# factor_str = 'REMTK.21|continue_ud_p20d|REMFF.40|continue_ud_p100d' \
# '|TVOL_p345d_continue_ud|BBANDS_10_1|R_INVESTINCOME_s_QYOY|R_OTHERLASSET_QYOY_and_QTTM_0.3' \
# '|REMFF.20|tab2_9_row_extre_0.3'
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 148.13 5.44 3.13 2.8275 1.49
# factor_str = 'REMFF.11|R_WorkCapital_QYOY_and_QTTM_0.3|continue_ud_p100d|aadj_r_p60d_col_extre_0.2' \
# '|R_LOANREC_s_QYOY_and_QTTM_0.3|TVOL_p345d_continue_ud|REMTK.32' \
# '|R_OTHERLASSET_QYOY_and_QTTM_0.3|wgt_return_p20d_0.2'
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 117.41 4.48 2.87 2.6127 2.65
# factor_str = 'REMFF.20|R_INVESTINCOME_s_QYOY|REMTK.32|aadj_r_p10d_col_extre_0.2' \
# '|TotRev_and_mcap_intdebt_Y3YGR_Y5YGR_0.3|TVOL_p345d_continue_ud' \
# '|aadj_r_p120d_col_extre_0.2|R_NetAssets_s_YOY_First|continue_ud_p90d'
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 152.11 5.24 2.64 2.6867 2.87
# factor_str = 'continue_ud_p100d|REMFF.26|turn_p20d_0.2|aadj_r_p120d_col_extre_0.2|REMTK.06' \
# '|R_LOANREC_s_QYOY_and_QTTM_0.3|TVOL_p345d_continue_ud|R_OTHERLASSET_QYOY_and_QTTM_0.3' \
# '|RQMCL_p345d_continue_ud|wgt_return_p20d_0.2'
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 67.37 3.78 4.38 2.9121 2.72
# factor_str = 'PS_TTM_row_extre_0.2|R_WorkCapital_QYOY_and_QTTM_0.3|REMTK.32' \
# '|R_TangAssets_IntDebt_QYOY_and_QTTM_0.3|aadj_r_p120d_col_extre_0.2|R_INVESTINCOME_s_QYOY' \
# '|bar_num_7_df|wgt_return_p20d_0.2|OPCF_and_asset_Y3YGR_Y5YGR_0.3|R_GrossProfit_TTM_QYOY_and_QTTM_0.3'
# info_str = 'market_top_300plus_industry_45_50|5|False'
factor_name_list = factor_str.split('|')
alpha_name = 'WHSZIWHEN11'
sector_name, hold_time, if_only_long = info_str.split('|')
hold_time = int(hold_time)
if if_only_long == 'True':
if_only_long = True
else:
if_only_long = False
cut_date = '20180601'
begin_date = pd.to_datetime('20130101')
end_date = datetime.now()
root_path = '/media/hdd1/DAT_EQT'
# root_path = '/mnt/mfs/DAT_EQT'
if_save = False
if_new_program = True
lag = 2
return_file = ''
if_hedge = True
if sector_name.startswith('market_top_300plus'):
if_weight = 1
ic_weight = 0
elif sector_name.startswith('market_top_300to800plus'):
if_weight = 0
ic_weight = 1
else:
if_weight = 0.5
ic_weight = 0.5
time_para_dict = dict()
main = FactorTestSector(root_path, if_save, if_new_program, begin_date, cut_date, end_date, time_para_dict,
sector_name, hold_time, lag, return_file, if_hedge, if_only_long, if_weight, ic_weight)
# mix_factor = main.single_test_c(factor_name_list)
# sum_pos_df_new = main.deal_mix_factor(mix_factor)
sum_pos_df_new = main.single_test_real(factor_name_list)
if if_weight != 0:
sum_pos_df_new['IF01'] = -if_weight * sum_pos_df_new.sum(axis=1)
if ic_weight != 0:
sum_pos_df_new['IC01'] = -ic_weight * sum_pos_df_new.sum(axis=1)
pnl_df = (sum_pos_df_new.shift(2) * main.return_choose).sum(axis=1)
pnl_df.name = alpha_name
plot_send_result(pnl_df, bt.AZ_Sharpe_y(pnl_df), alpha_name)
corr_test_fun(pnl_df, alpha_name)
# sum_pos_df_new.round(10).fillna(0).to_csv(f'/mnt/mfs/AAPOS/{alpha_name}.pos', sep='|', index_label='Date')
return sum_pos_df_new
my_factor_dict = dict({
'RZCHE_p120d_col_extre_0.2': 'load_tech_factor',
'RZCHE_p60d_col_extre_0.2': 'load_tech_factor',
'RZCHE_p20d_col_extre_0.2': 'load_tech_factor',
'RZCHE_p10d_col_extre_0.2': 'load_tech_factor',
'RZCHE_p345d_continue_ud': 'load_tech_factor',
'RZCHE_row_extre_0.2': 'load_tech_factor',
'RQCHL_p120d_col_extre_0.2': 'load_tech_factor',
'RQCHL_p60d_col_extre_0.2': 'load_tech_factor',
'RQCHL_p20d_col_extre_0.2': 'load_tech_factor',
'RQCHL_p10d_col_extre_0.2': 'load_tech_factor',
'RQCHL_p345d_continue_ud': 'load_tech_factor',
'RQCHL_row_extre_0.2': 'load_tech_factor',
'RQYL_p120d_col_extre_0.2': 'load_tech_factor',
'RQYL_p60d_col_extre_0.2': 'load_tech_factor',
'RQYL_p20d_col_extre_0.2': 'load_tech_factor',
'RQYL_p10d_col_extre_0.2': 'load_tech_factor',
'RQYL_p345d_continue_ud': 'load_tech_factor',
'RQYL_row_extre_0.2': 'load_tech_factor',
'RQYE_p120d_col_extre_0.2': 'load_tech_factor',
'RQYE_p60d_col_extre_0.2': 'load_tech_factor',
'RQYE_p20d_col_extre_0.2': 'load_tech_factor',
'RQYE_p10d_col_extre_0.2': 'load_tech_factor',
'RQYE_p345d_continue_ud': 'load_tech_factor',
'RQYE_row_extre_0.2': 'load_tech_factor',
'RQMCL_p120d_col_extre_0.2': 'load_tech_factor',
'RQMCL_p60d_col_extre_0.2': 'load_tech_factor',
'RQMCL_p20d_col_extre_0.2': 'load_tech_factor',
'RQMCL_p10d_col_extre_0.2': 'load_tech_factor',
'RQMCL_p345d_continue_ud': 'load_tech_factor',
'RQMCL_row_extre_0.2': 'load_tech_factor',
'RZYE_p120d_col_extre_0.2': 'load_tech_factor',
'RZYE_p60d_col_extre_0.2': 'load_tech_factor',
'RZYE_p20d_col_extre_0.2': 'load_tech_factor',
'RZYE_p10d_col_extre_0.2': 'load_tech_factor',
'RZYE_p345d_continue_ud': 'load_tech_factor',
'RZYE_row_extre_0.2': 'load_tech_factor',
'RZMRE_p120d_col_extre_0.2': 'load_tech_factor',
'RZMRE_p60d_col_extre_0.2': 'load_tech_factor',
'RZMRE_p20d_col_extre_0.2': 'load_tech_factor',
'RZMRE_p10d_col_extre_0.2': 'load_tech_factor',
'RZMRE_p345d_continue_ud': 'load_tech_factor',
'RZMRE_row_extre_0.2': 'load_tech_factor',
'RZRQYE_p120d_col_extre_0.2': 'load_tech_factor',
'RZRQYE_p60d_col_extre_0.2': 'load_tech_factor',
'RZRQYE_p20d_col_extre_0.2': 'load_tech_factor',
'RZRQYE_p10d_col_extre_0.2': 'load_tech_factor',
'RZRQYE_p345d_continue_ud': 'load_tech_factor',
'RZRQYE_row_extre_0.2': 'load_tech_factor',
'WILLR_200_40': 'load_tech_factor',
'WILLR_200_30': 'load_tech_factor',
'WILLR_200_20': 'load_tech_factor',
'WILLR_140_40': 'load_tech_factor',
'WILLR_140_30': 'load_tech_factor',
'WILLR_140_20': 'load_tech_factor',
'WILLR_100_40': 'load_tech_factor',
'WILLR_100_30': 'load_tech_factor',
'WILLR_100_20': 'load_tech_factor',
'WILLR_40_40': 'load_tech_factor',
'WILLR_40_30': 'load_tech_factor',
'WILLR_40_20': 'load_tech_factor',
'WILLR_20_40': 'load_tech_factor',
'WILLR_20_30': 'load_tech_factor',
'WILLR_20_20': 'load_tech_factor',
'WILLR_10_40': 'load_tech_factor',
'WILLR_10_30': 'load_tech_factor',
'WILLR_10_20': 'load_tech_factor',
'BBANDS_10_2': 'load_tech_factor',
'BBANDS_10_1.5': 'load_tech_factor',
'BBANDS_10_1': 'load_tech_factor',
'MACD_20_60_18': 'load_tech_factor',
'BBANDS_200_2': 'load_tech_factor',
'BBANDS_200_1.5': 'load_tech_factor',
'BBANDS_200_1': 'load_tech_factor',
'BBANDS_140_2': 'load_tech_factor',
'BBANDS_140_1.5': 'load_tech_factor',
'BBANDS_140_1': 'load_tech_factor',
'BBANDS_100_2': 'load_tech_factor',
'BBANDS_100_1.5': 'load_tech_factor',
'BBANDS_100_1': 'load_tech_factor',
'BBANDS_40_2': 'load_tech_factor',
'BBANDS_40_1.5': 'load_tech_factor',
'BBANDS_40_1': 'load_tech_factor',
'BBANDS_20_2': 'load_tech_factor',
'BBANDS_20_1.5': 'load_tech_factor',
'BBANDS_20_1': 'load_tech_factor',
'MA_LINE_160_60': 'load_tech_factor',
'MA_LINE_120_60': 'load_tech_factor',
'MA_LINE_100_40': 'load_tech_factor',
'MA_LINE_60_20': 'load_tech_factor',
'MA_LINE_10_5': 'load_tech_factor',
'MACD_12_26_9': 'load_tech_factor',
'intra_up_vwap_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_vol_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_div_dn_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_div_daily_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_vwap_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_vol_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_div_dn_15_bar_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_div_daily_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_vwap_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_vol_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_div_daily_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_vwap_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_vol_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_div_daily_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_vwap_row_extre_0.3': 'load_tech_factor',
'intra_up_vol_row_extre_0.3': 'load_tech_factor',
'intra_up_div_dn_row_extre_0.3': 'load_tech_factor',
'intra_up_div_daily_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_vwap_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_vol_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_div_dn_15_bar_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_div_daily_row_extre_0.3': 'load_tech_factor',
'intra_dn_vwap_row_extre_0.3': 'load_tech_factor',
'intra_dn_vol_row_extre_0.3': 'load_tech_factor',
'intra_dn_div_daily_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_vwap_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_vol_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_div_daily_row_extre_0.3': 'load_tech_factor',
'tab5_15_row_extre_0.3': 'load_tech_factor',
'tab5_14_row_extre_0.3': 'load_tech_factor',
'tab5_13_row_extre_0.3': 'load_tech_factor',
'tab4_5_row_extre_0.3': 'load_tech_factor',
'tab4_2_row_extre_0.3': 'load_tech_factor',
'tab4_1_row_extre_0.3': 'load_tech_factor',
'tab2_11_row_extre_0.3': 'load_tech_factor',
'tab2_9_row_extre_0.3': 'load_tech_factor',
'tab2_8_row_extre_0.3': 'load_tech_factor',
'tab2_7_row_extre_0.3': 'load_tech_factor',
'tab2_4_row_extre_0.3': 'load_tech_factor',
'tab2_1_row_extre_0.3': 'load_tech_factor',
'tab1_9_row_extre_0.3': 'load_tech_factor',
'tab1_8_row_extre_0.3': 'load_tech_factor',
'tab1_7_row_extre_0.3': 'load_tech_factor',
'tab1_5_row_extre_0.3': 'load_tech_factor',
'tab1_2_row_extre_0.3': 'load_tech_factor',
'tab1_1_row_extre_0.3': 'load_tech_factor',
'RSI_200_30': 'load_tech_factor',
'RSI_140_30': 'load_tech_factor',
'RSI_100_30': 'load_tech_factor',
'RSI_40_30': 'load_tech_factor',
'RSI_200_10': 'load_tech_factor',
'RSI_140_10': 'load_tech_factor',
'RSI_100_10': 'load_tech_factor',
'RSI_40_10': 'load_tech_factor',
'ATR_200_0.2': 'load_tech_factor',
'ATR_140_0.2': 'load_tech_factor',
'ATR_100_0.2': 'load_tech_factor',
'ATR_40_0.2': 'load_tech_factor',
'ADOSC_60_160_0': 'load_tech_factor',
'ADOSC_60_120_0': 'load_tech_factor',
'ADOSC_40_100_0': 'load_tech_factor',
'ADOSC_20_60_0': 'load_tech_factor',
'MFI_200_70_30': 'load_tech_factor',
'MFI_140_70_30': 'load_tech_factor',
'MFI_100_70_30': 'load_tech_factor',
'MFI_40_70_30': 'load_tech_factor',
'CMO_200_0': 'load_tech_factor',
'CMO_140_0': 'load_tech_factor',
'CMO_100_0': 'load_tech_factor',
'CMO_40_0': 'load_tech_factor',
'AROON_200_80': 'load_tech_factor',
'AROON_140_80': 'load_tech_factor',
'AROON_100_80': 'load_tech_factor',
'AROON_40_80': 'load_tech_factor',
'ADX_200_20_10': 'load_tech_factor',
'ADX_140_20_10': 'load_tech_factor',
'ADX_100_20_10': 'load_tech_factor',
'ADX_40_20_10': 'load_tech_factor',
'TotRev_and_mcap_intdebt_QYOY_Y3YGR_0.3': 'load_tech_factor',
'TotRev_and_asset_QYOY_Y3YGR_0.3': 'load_tech_factor',
'TotRev_and_mcap_QYOY_Y3YGR_0.3': 'load_tech_factor',
'TotRev_and_mcap_intdebt_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'TotRev_and_asset_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'TotRev_and_mcap_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'NetProfit_and_mcap_intdebt_QYOY_Y3YGR_0.3': 'load_tech_factor',
'NetProfit_and_asset_QYOY_Y3YGR_0.3': 'load_tech_factor',
'NetProfit_and_mcap_QYOY_Y3YGR_0.3': 'load_tech_factor',
'NetProfit_and_mcap_intdebt_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'NetProfit_and_asset_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'NetProfit_and_mcap_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'EBIT_and_mcap_intdebt_QYOY_Y3YGR_0.3': 'load_tech_factor',
'EBIT_and_asset_QYOY_Y3YGR_0.3': 'load_tech_factor',
'EBIT_and_mcap_QYOY_Y3YGR_0.3': 'load_tech_factor',
'EBIT_and_mcap_intdebt_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'EBIT_and_asset_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'EBIT_and_mcap_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'OPCF_and_mcap_intdebt_QYOY_Y3YGR_0.3': 'load_tech_factor',
'OPCF_and_asset_QYOY_Y3YGR_0.3': 'load_tech_factor',
'OPCF_and_mcap_QYOY_Y3YGR_0.3': 'load_tech_factor',
'OPCF_and_mcap_intdebt_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'OPCF_and_asset_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'OPCF_and_mcap_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'R_OTHERLASSET_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_WorkCapital_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_TangAssets_IntDebt_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_SUMLIAB_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_ROE1_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_OPEX_sales_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_OperProfit_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_OperCost_sales_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_OPCF_TTM_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_NETPROFIT_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_NetInc_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_NetAssets_s_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_LOANREC_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_LTDebt_WorkCap_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_INVESTINCOME_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_IntDebt_Mcap_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_GSCF_sales_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_GrossProfit_TTM_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_FINANCEEXP_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_FairVal_TotProfit_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_ESTATEINVEST_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_EPSDiluted_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_EBITDA2_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_CostSales_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_CFO_s_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_Cashflow_s_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_ASSETDEVALUELOSS_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_ACCOUNTREC_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_ACCOUNTPAY_QYOY_and_QTTM_0.3': 'load_tech_factor',
'CCI_p150d_limit_12': 'load_tech_factor',
'CCI_p120d_limit_12': 'load_tech_factor',
'CCI_p60d_limit_12': 'load_tech_factor',
'CCI_p20d_limit_12': 'load_tech_factor',
'MACD_40_160': 'load_tech_factor',
'MACD_40_200': 'load_tech_factor',
'MACD_20_200': 'load_tech_factor',
'MACD_20_100': 'load_tech_factor',
'MACD_10_30': 'load_tech_factor',
'bias_turn_p120d': 'load_tech_factor',
'bias_turn_p60d': 'load_tech_factor',
'bias_turn_p20d': 'load_tech_factor',
'turn_p150d_0.18': 'load_tech_factor',
'turn_p30d_0.24': 'load_tech_factor',
'turn_p120d_0.2': 'load_tech_factor',
'turn_p60d_0.2': 'load_tech_factor',
'turn_p20d_0.2': 'load_tech_factor',
'log_price_0.2': 'load_tech_factor',
'wgt_return_p120d_0.2': 'load_tech_factor',
'wgt_return_p60d_0.2': 'load_tech_factor',
'wgt_return_p20d_0.2': 'load_tech_factor',
'return_p90d_0.2': 'load_tech_factor',
'return_p30d_0.2': 'load_tech_factor',
'return_p120d_0.2': 'load_tech_factor',
'return_p60d_0.2': 'load_tech_factor',
'return_p20d_0.2': 'load_tech_factor',
'PBLast_p120d_col_extre_0.2': 'load_tech_factor',
'PBLast_p60d_col_extre_0.2': 'load_tech_factor',
'PBLast_p20d_col_extre_0.2': 'load_tech_factor',
'PBLast_p10d_col_extre_0.2': 'load_tech_factor',
'PBLast_p345d_continue_ud': 'load_tech_factor',
'PBLast_row_extre_0.2': 'load_tech_factor',
'PS_TTM_p120d_col_extre_0.2': 'load_tech_factor',
'PS_TTM_p60d_col_extre_0.2': 'load_tech_factor',
'PS_TTM_p20d_col_extre_0.2': 'load_tech_factor',
'PS_TTM_p10d_col_extre_0.2': 'load_tech_factor',
'PS_TTM_p345d_continue_ud': 'load_tech_factor',
'PS_TTM_row_extre_0.2': 'load_tech_factor',
'PE_TTM_p120d_col_extre_0.2': 'load_tech_factor',
'PE_TTM_p60d_col_extre_0.2': 'load_tech_factor',
'PE_TTM_p20d_col_extre_0.2': 'load_tech_factor',
'PE_TTM_p10d_col_extre_0.2': 'load_tech_factor',
'PE_TTM_p345d_continue_ud': 'load_tech_factor',
'PE_TTM_row_extre_0.2': 'load_tech_factor',
'volume_moment_p20120d': 'load_tech_factor',
'volume_moment_p1040d': 'load_tech_factor',
'volume_moment_p530d': 'load_tech_factor',
'moment_p50300d': 'load_tech_factor',
'moment_p30200d': 'load_tech_factor',
'moment_p40200d': 'load_tech_factor',
'moment_p20200d': 'load_tech_factor',
'moment_p20100d': 'load_tech_factor',
'moment_p10100d': 'load_tech_factor',
'moment_p1060d': 'load_tech_factor',
'moment_p510d': 'load_tech_factor',
'continue_ud_p200d': 'load_tech_factor',
'evol_p200d': 'load_tech_factor',
'vol_count_down_p200d': 'load_tech_factor',
'vol_p200d': 'load_tech_factor',
'continue_ud_p100d': 'load_tech_factor',
'evol_p100d': 'load_tech_factor',
'vol_count_down_p100d': 'load_tech_factor',
'vol_p100d': 'load_tech_factor',
'continue_ud_p90d': 'load_tech_factor',
'evol_p90d': 'load_tech_factor',
'vol_count_down_p90d': 'load_tech_factor',
'vol_p90d': 'load_tech_factor',
'continue_ud_p50d': 'load_tech_factor',
'evol_p50d': 'load_tech_factor',
'vol_count_down_p50d': 'load_tech_factor',
'vol_p50d': 'load_tech_factor',
'continue_ud_p30d': 'load_tech_factor',
'evol_p30d': 'load_tech_factor',
'vol_count_down_p30d': 'load_tech_factor',
'vol_p30d': 'load_tech_factor',
'continue_ud_p120d': 'load_tech_factor',
'evol_p120d': 'load_tech_factor',
'vol_count_down_p120d': 'load_tech_factor',
'vol_p120d': 'load_tech_factor',
'continue_ud_p60d': 'load_tech_factor',
'evol_p60d': 'load_tech_factor',
'vol_count_down_p60d': 'load_tech_factor',
'vol_p60d': 'load_tech_factor',
'continue_ud_p20d': 'load_tech_factor',
'evol_p20d': 'load_tech_factor',
'vol_count_down_p20d': 'load_tech_factor',
'vol_p20d': 'load_tech_factor',
'continue_ud_p10d': 'load_tech_factor',
'evol_p10d': 'load_tech_factor',
'vol_count_down_p10d': 'load_tech_factor',
'vol_p10d': 'load_tech_factor',
'volume_count_down_p120d': 'load_tech_factor',
'volume_count_down_p60d': 'load_tech_factor',
'volume_count_down_p20d': 'load_tech_factor',
'volume_count_down_p10d': 'load_tech_factor',
'price_p120d_hl': 'load_tech_factor',
'price_p60d_hl': 'load_tech_factor',
'price_p20d_hl': 'load_tech_factor',
'price_p10d_hl': 'load_tech_factor',
'aadj_r_p120d_col_extre_0.2': 'load_tech_factor',
'aadj_r_p60d_col_extre_0.2': 'load_tech_factor',
'aadj_r_p20d_col_extre_0.2': 'load_tech_factor',
'aadj_r_p10d_col_extre_0.2': 'load_tech_factor',
'aadj_r_p345d_continue_ud': 'load_tech_factor',
'aadj_r_p345d_continue_ud_pct': 'load_tech_factor',
'aadj_r_row_extre_0.2': 'load_tech_factor',
'TVOL_p90d_col_extre_0.2': 'load_tech_factor',
'TVOL_p30d_col_extre_0.2': 'load_tech_factor',
'TVOL_p120d_col_extre_0.2': 'load_tech_factor',
'TVOL_p60d_col_extre_0.2': 'load_tech_factor',
'TVOL_p20d_col_extre_0.2': 'load_tech_factor',
'TVOL_p10d_col_extre_0.2': 'load_tech_factor',
'TVOL_p345d_continue_ud': 'load_tech_factor',
'TVOL_row_extre_0.2': 'load_tech_factor',
'R_ACCOUNTPAY_QYOY': 'load_daily_factor',
'R_ACCOUNTREC_QYOY': 'load_daily_factor',
'R_ASSETDEVALUELOSS_s_QYOY': 'load_daily_factor',
'R_AssetDepSales_s_First': 'load_daily_factor',
'R_BusinessCycle_First': 'load_daily_factor',
'R_CFOPS_s_First': 'load_daily_factor',
'R_CFO_TotRev_s_First': 'load_daily_factor',
'R_CFO_s_YOY_First': 'load_daily_factor',
'R_Cashflow_s_YOY_First': 'load_daily_factor',
'R_CostSales_QYOY': 'load_daily_factor',
'R_CostSales_s_First': 'load_daily_factor',
'R_CurrentAssetsTurnover_QTTM': 'load_daily_factor',
'R_DaysReceivable_First': 'load_daily_factor',
'R_DebtAssets_QTTM': 'load_daily_factor',
'R_DebtEqt_First': 'load_daily_factor',
'R_EBITDA2_QYOY': 'load_daily_factor',
'R_EBITDA_IntDebt_QTTM': 'load_daily_factor',
'R_EBITDA_sales_TTM_First': 'load_daily_factor',
'R_EBIT_sales_QTTM': 'load_daily_factor',
'R_EPS_s_First': 'load_daily_factor',
'R_EPS_s_YOY_First': 'load_daily_factor',
'R_ESTATEINVEST_QYOY': 'load_daily_factor',
'R_FCFTot_Y3YGR': 'load_daily_factor',
'R_FINANCEEXP_s_QYOY': 'load_daily_factor',
'R_FairValChgPnL_s_First': 'load_daily_factor',
'R_FairValChg_TotProfit_s_First': 'load_daily_factor',
'R_FairVal_TotProfit_QYOY': 'load_daily_factor',
'R_FairVal_TotProfit_TTM_First': 'load_daily_factor',
'R_FinExp_sales_s_First': 'load_daily_factor',
'R_GSCF_sales_s_First': 'load_daily_factor',
'R_GrossProfit_TTM_QYOY': 'load_daily_factor',
'R_INVESTINCOME_s_QYOY': 'load_daily_factor',
'R_LTDebt_WorkCap_QTTM': 'load_daily_factor',
'R_MgtExp_sales_s_First': 'load_daily_factor',
'R_NETPROFIT_s_QYOY': 'load_daily_factor',
'R_NOTICEDATE_First': 'load_daily_factor',
'R_NetAssets_s_POP_First': 'load_daily_factor',
'R_NetAssets_s_YOY_First': 'load_daily_factor',
'R_NetCashflowPS_s_First': 'load_daily_factor',
'R_NetIncRecur_QYOY': 'load_daily_factor',
'R_NetIncRecur_s_First': 'load_daily_factor',
'R_NetInc_TotProfit_s_First': 'load_daily_factor',
'R_NetInc_s_First': 'load_daily_factor',
'R_NetInc_s_QYOY': 'load_daily_factor',
'R_NetMargin_s_YOY_First': 'load_daily_factor',
'R_NetProfit_sales_s_First': 'load_daily_factor',
'R_NetROA_TTM_First': 'load_daily_factor',
'R_NetROA_s_First': 'load_daily_factor',
'R_NonOperProft_TotProfit_s_First': 'load_daily_factor',
'R_OPCF_NetInc_s_First': 'load_daily_factor',
'R_OPCF_TTM_QYOY': 'load_daily_factor',
'R_OPCF_TotDebt_QTTM': 'load_daily_factor',
'R_OPCF_sales_s_First': 'load_daily_factor',
'R_OPEX_sales_TTM_First': 'load_daily_factor',
'R_OPEX_sales_s_First': 'load_daily_factor',
'R_OTHERLASSET_QYOY': 'load_daily_factor',
'R_OperCost_sales_s_First': 'load_daily_factor',
'R_OperProfit_YOY_First': 'load_daily_factor',
'R_OperProfit_s_POP_First': 'load_daily_factor',
'R_OperProfit_s_YOY_First': 'load_daily_factor',
'R_OperProfit_sales_s_First': 'load_daily_factor',
'R_ParentProfit_s_POP_First': 'load_daily_factor',
'R_ParentProfit_s_YOY_First': 'load_daily_factor',
'R_ROENetIncRecur_s_First': 'load_daily_factor',
'R_ROE_s_First': 'load_daily_factor',
'R_RecurNetProft_NetProfit_s_First': 'load_daily_factor',
'R_RevenuePS_s_First': 'load_daily_factor',
'R_RevenueTotPS_s_First': 'load_daily_factor',
'R_Revenue_s_POP_First': 'load_daily_factor',
'R_Revenue_s_YOY_First': 'load_daily_factor',
'R_SUMLIAB_QYOY': 'load_daily_factor',
'R_SUMLIAB_Y3YGR': 'load_daily_factor',
'R_SalesCost_s_First': 'load_daily_factor',
'R_SalesGrossMGN_QTTM': 'load_daily_factor',
'R_SalesGrossMGN_s_First': 'load_daily_factor',
'R_SalesNetMGN_s_First': 'load_daily_factor',
'R_TangAssets_TotLiab_QTTM': 'load_daily_factor',
'R_Tax_TotProfit_QTTM': 'load_daily_factor',
'R_Tax_TotProfit_s_First': 'load_daily_factor',
'R_TotAssets_s_YOY_First': 'load_daily_factor',
'R_TotLiab_s_YOY_First': 'load_daily_factor',
'R_TotRev_TTM_Y3YGR': 'load_daily_factor',
'R_TotRev_s_POP_First': 'load_daily_factor',
'R_TotRev_s_YOY_First': 'load_daily_factor',
'R_WorkCapital_QYOY': 'load_daily_factor',
'bar_num_7_df': 'load_whs_factor',
'bar_num_12_df': 'load_whs_factor',
'repurchase': 'load_whs_factor',
'dividend': 'load_whs_factor',
'repurchase_news_title': 'load_whs_factor',
'repurchase_news_summary': 'load_whs_factor',
'dividend_news_title': 'load_whs_factor',
'dividend_news_summary': 'load_whs_factor',
'staff_changes_news_title': 'load_whs_factor',
'staff_changes_news_summary': 'load_whs_factor',
'funds_news_title': 'load_whs_factor',
'funds_news_summary': 'load_whs_factor',
'meeting_decide_news_title': 'load_whs_factor',
'meeting_decide_news_summary': 'load_whs_factor',
'restricted_shares_news_title': 'load_whs_factor',
'restricted_shares_news_summary': 'load_whs_factor',
'son_company_news_title': 'load_whs_factor',
'son_company_news_summary': 'load_whs_factor',
'suspend_news_title': 'load_whs_factor',
'suspend_news_summary': 'load_whs_factor',
'shares_news_title': 'load_whs_factor',
'': 'load_whs_factor',
'shares_news_summary': 'load_whs_factor',
'ab_inventory': 'load_whs_factor',
'ab_rec': 'load_whs_factor',
'ab_others_rec': 'load_whs_factor',
'ab_ab_pre_rec': 'load_whs_factor',
'ab_sale_mng_exp': 'load_whs_factor',
'ab_grossprofit': 'load_whs_factor',
'lsgg_num_df_5': 'load_whs_factor',
'lsgg_num_df_20': 'load_whs_factor',
'lsgg_num_df_60': 'load_whs_factor',
'bulletin_num_df': 'load_whs_factor',
'bulletin_num_df_5': 'load_whs_factor',
'bulletin_num_df_20': 'load_whs_factor',
'bulletin_num_df_60': 'load_whs_factor',
'news_num_df_5': 'load_whs_factor',
'news_num_df_20': 'load_whs_factor',
'news_num_df_60': 'load_whs_factor',
'staff_changes': 'load_whs_factor',
'funds': 'load_whs_factor',
'meeting_decide': 'load_whs_factor',
'restricted_shares': 'load_whs_factor',
'son_company': 'load_whs_factor',
'suspend': 'load_whs_factor',
'shares': 'load_whs_factor',
'buy_key_title__word': 'load_whs_factor',
'sell_key_title_word': 'load_whs_factor',
'buy_summary_key_word': 'load_whs_factor',
'sell_summary_key_word': 'load_whs_factor',
})
my_factor_dict_2 = dict({
'REMTK.40': 'load_remy_factor',
'REMTK.39': 'load_remy_factor',
'REMTK.38': 'load_remy_factor',
'REMTK.37': 'load_remy_factor',
'REMTK.36': 'load_remy_factor',
'REMTK.35': 'load_remy_factor',
'REMTK.34': 'load_remy_factor',
'REMTK.33': 'load_remy_factor',
'REMTK.32': 'load_remy_factor',
'REMTK.31': 'load_remy_factor',
'REMFF.40': 'load_remy_factor',
'REMFF.39': 'load_remy_factor',
'REMFF.38': 'load_remy_factor',
'REMFF.37': 'load_remy_factor',
'REMFF.36': 'load_remy_factor',
'REMFF.35': 'load_remy_factor',
'REMFF.34': 'load_remy_factor',
'REMFF.33': 'load_remy_factor',
'REMFF.32': 'load_remy_factor',
'REMFF.31': 'load_remy_factor',
'REMWB.12': 'load_remy_factor',
'REMWB.11': 'load_remy_factor',
'REMWB.10': 'load_remy_factor',
'REMWB.09': 'load_remy_factor',
'REMWB.08': 'load_remy_factor',
'REMWB.07': 'load_remy_factor',
'REMWB.06': 'load_remy_factor',
'REMWB.05': 'load_remy_factor',
'REMWB.04': 'load_remy_factor',
'REMWB.03': 'load_remy_factor',
'REMWB.02': 'load_remy_factor',
'REMWB.01': 'load_remy_factor',
'REMTK.30': 'load_remy_factor',
'REMTK.29': 'load_remy_factor',
'REMTK.28': 'load_remy_factor',
'REMTK.27': 'load_remy_factor',
'REMTK.26': 'load_remy_factor',
'REMTK.25': 'load_remy_factor',
'REMTK.24': 'load_remy_factor',
'REMTK.23': 'load_remy_factor',
'REMTK.22': 'load_remy_factor',
'REMTK.21': 'load_remy_factor',
'REMTK.20': 'load_remy_factor',
'REMTK.19': 'load_remy_factor',
'REMTK.18': 'load_remy_factor',
'REMTK.17': 'load_remy_factor',
'REMTK.16': 'load_remy_factor',
'REMTK.15': 'load_remy_factor',
'REMTK.14': 'load_remy_factor',
'REMTK.13': 'load_remy_factor',
'REMTK.12': 'load_remy_factor',
'REMTK.11': 'load_remy_factor',
'REMTK.10': 'load_remy_factor',
'REMTK.09': 'load_remy_factor',
'REMTK.08': 'load_remy_factor',
'REMTK.07': 'load_remy_factor',
'REMTK.06': 'load_remy_factor',
'REMTK.05': 'load_remy_factor',
'REMTK.04': 'load_remy_factor',
'REMTK.03': 'load_remy_factor',
'REMTK.02': 'load_remy_factor',
'REMTK.01': 'load_remy_factor',
'REMFF.30': 'load_remy_factor',
'REMFF.29': 'load_remy_factor',
'REMFF.28': 'load_remy_factor',
'REMFF.27': 'load_remy_factor',
'REMFF.26': 'load_remy_factor',
'REMFF.25': 'load_remy_factor',
'REMFF.24': 'load_remy_factor',
'REMFF.23': 'load_remy_factor',
'REMFF.22': 'load_remy_factor',
'REMFF.21': 'load_remy_factor',
'REMFF.20': 'load_remy_factor',
'REMFF.19': 'load_remy_factor',
'REMFF.18': 'load_remy_factor',
'REMFF.17': 'load_remy_factor',
'REMFF.16': 'load_remy_factor',
'REMFF.15': 'load_remy_factor',
'REMFF.14': 'load_remy_factor',
'REMFF.13': 'load_remy_factor',
'REMFF.12': 'load_remy_factor',
'REMFF.11': 'load_remy_factor',
'REMFF.10': 'load_remy_factor',
'REMFF.09': 'load_remy_factor',
'REMFF.08': 'load_remy_factor',
'REMFF.07': 'load_remy_factor',
'REMFF.06': 'load_remy_factor',
'REMFF.05': 'load_remy_factor',
'REMFF.04': 'load_remy_factor',
'REMFF.03': 'load_remy_factor',
'REMFF.02': 'load_remy_factor',
'REMFF.01': 'load_remy_factor'
})
jerry_factor_dict = dict({
'LIQ_all_original.csv': 'load_jerry_factor',
'LIQ_all_pure.csv': 'load_jerry_factor',
'LIQ_mix.csv': 'load_jerry_factor',
'LIQ_p1_original.csv': 'load_jerry_factor',
'LIQ_p1_pure.csv': 'load_jerry_factor',
'LIQ_p2_original.csv': 'load_jerry_factor',
'LIQ_p2_pure.csv': 'load_jerry_factor',
'LIQ_p3_original.csv': 'load_jerry_factor',
'LIQ_p3_pure.csv': 'load_jerry_factor',
'LIQ_p4_original.csv': 'load_jerry_factor',
'LIQ_p4_pure.csv': 'load_jerry_factor',
'M0': 'load_jerry_factor',
'M1': 'load_jerry_factor',
'M1_p1': 'load_jerry_factor',
'M1_p2': 'load_jerry_factor',
'M1_p3': 'load_jerry_factor',
'M1_p4': 'load_jerry_factor',
'vr_afternoon_10min_20days': 'load_jerry_factor',
'vr_afternoon_last10min_20days.csv': 'load_jerry_factor',
'vr_original_20days.csv': 'load_jerry_factor',
'vr_original_45days.csv': 'load_jerry_factor',
'vr_original_75days.csv': 'load_jerry_factor',
})
my_factor_dict.update(my_factor_dict_2)
my_factor_dict.update(jerry_factor_dict)
if __name__ == '__main__':
t1 = time.time()
sum_pos_df = config_test()
t2 = time.time()
print(round(t2 - t1, 4))
|
[
"1612255875@qq.com"
] |
1612255875@qq.com
|
68b377124eb26ae187dc04f00fc3c6cc81fed129
|
a21d2fb3f111f30b842a4c3a5c6940d1a003b94d
|
/Python3/Foundation/Day 8/进程 join.py
|
883a86cf1f1208853b0f0f2f71b0dd2e70e1d4ae
|
[] |
no_license
|
hygnic/MyPython
|
438f16206770a006a3b7bcf2ada9150c71ce8af9
|
26aaa57728ad545af5920ff2015eae258712d077
|
refs/heads/master
| 2021-07-01T02:10:15.810495
| 2021-02-26T08:07:54
| 2021-02-26T08:07:54
| 220,612,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
# User: hygnic
# Date: 2018/9/8
# User: hygnic
# Date: 2018/9/8
import os
import time
from multiprocessing import Process
# help(os)
def func1(args):
print(args)
time.sleep(2)
print('son process: ', os.getpid())
def func2(filename, content):
with open(filename, 'w') as content_wp:
content_wp.write(content)
if __name__ == '__main__':
# 注册进程
j_list = []
for i in range(10): # 开启多个子进程
f1 = Process(target=func1, args=('*' * i,)) # 单个参数时有一个逗号,元组
# p2 = Process(target=func, args=('实参', '实参2')) 通过这种方式开启多个子进程
f1.start() # 开启一个子进程 内部会调用run()方法
j_list.append(f1) # 表中全是一个个进程
f2 = Process(target=func2, args=('info', 'func2 content'))
f2.start()
# print(j_list)
# 阻塞当前进程,直到调用join方法的那个进程执行完,再继续执行当前进程。将异步改为同步
[f1.join() for f1 in j_list] # 列表表达式
print('Done! father process: ', os.getpid())
|
[
"hygnic@outlook.com"
] |
hygnic@outlook.com
|
ea9dec07d951070f1a5289ec38c7b9b3c3248485
|
de8b2b0de2ba522493e2c86fa055df8c7c40aa69
|
/e008-largest-product.py
|
e1ab6fb66b56899bf4385b65ab78fb2590571507
|
[
"Unlicense"
] |
permissive
|
bayramcicek/mini-programs
|
56edbd2013704813d6730ecaf684baf9042d21ab
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
refs/heads/master
| 2021-07-03T03:57:02.874127
| 2020-09-27T11:09:07
| 2020-09-27T11:09:07
| 138,440,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
#!/usr/bin/python3.6
# created by cicek on 12.10.2018 15:09
digits = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
d_list = list(digits)
i, res = 0, 1
product_array = []
while ((i+12) < len(d_list)):
for x in range(0, 13):
res *= int(d_list[i+x])
product_array.append(res)
res = 1
i += 1
product_array.sort()
print(product_array[-1])
|
[
"noreply@github.com"
] |
noreply@github.com
|
35e9113444ac17cadf33e735da85c9f5274af0c6
|
ce9a89a1988b9cd3738cc0ee310f8d9a262058b6
|
/carchive/h5data.py
|
1b4ba5a442ad96088e7d2dc7b17cbd7607ae82ab
|
[] |
no_license
|
epicsdeb/carchivetools
|
64b9d7a2783b5f3bde674fec0699a414f3ee21c4
|
4c37624a22ba863469ba87c29aa44bd367825525
|
refs/heads/master
| 2022-02-24T20:13:23.418035
| 2022-01-02T19:01:32
| 2022-01-02T19:30:13
| 10,649,869
| 5
| 6
| null | 2022-02-15T19:16:48
| 2013-06-12T19:04:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,391
|
py
|
# -*- coding: utf-8 -*-
"""
Copyright 2015 Brookhaven Science Assoc.
as operator of Brookhaven National Lab.
"""
import logging
_log = logging.getLogger("carchive.h5data")
import h5py, numpy
__all__=['h5open','sevr2str']
_sevr={0:'',1:'MINOR',2:'MAJOR',3:'INVALID',
3968:'Est_Repeat',3856:'Repeat',3904:'Disconnect',
3872:'Archive_Off',3848:'Archive_Disable'
}
def sevr2str(S):
try:
return _sevr[S]
except KeyError:
return str(S)
class H5PV(object):
"""The dataset(s) for a single PV
Provides attributes: value, severity, status, and time
"""
def __init__(self, name, G):
self.name = name
self.value = G['value']
self.meta = G['meta']
self.status = self.meta['status']
self.severity = self.meta['severity']
self.scalar = self.value.shape[1]==1
def __len__(self):
return self.meta.shape[0]
@property
def time(self):
try:
return self.__posix
except AttributeError:
self.__posix = P = self.meta['sec']+1e-9*self.meta['ns']
return P
def plotdata(self):
"""Return plot-able step data
Returns a typle (time, value) where each is an array which has
2*len(self)-1 points. Each additional point is placed between
a pair of input points in the input, and has the value of the
preceding point with a time 1 ns before the time of the point
which follows.
Analogous to
Input=[(T0,Y0),(T1,Y1)]
Output[(T0,Y0),(T1-1e-9,Y0),(T1,Y1)]
"""
if len(self)<=1:
return self.time, self.value
S = self.value.shape
T = numpy.ndarray((2*S[0]-1,), dtype=self.time.dtype)
V = numpy.ndarray((2*S[0]-1, S[1]), dtype=self.value.dtype)
T[0::2] = self.time
V[0::2] = self.value
T[1::2] = self.time[1:]-1e-9
V[1::2] = self.value[:-1,:]
return T,V
class H5Data(object):
"""Access an HDF5 file containing data retrieved from PVs.
>>> pvset = h5open('mydata.h5')
>>> assert 'pvone' in pvset
>>> pv1 = pvset['pvone']
>>> allpvs = pvset.astuple()
>>> assert pv1 in allpvs
>>> val1 = pv1.value
"""
def __init__(self, fname, mode='r'):
name, _, path = fname.partition(':')
self.__F=h5py.File(name, mode)
self.__G=self.__F[path or '/']
haspv=False
for pv in self.__G:
P = self.__G[pv]
V, M = P.get('value',None), P.get('meta', None)
if V and M and V.shape[0]==M.shape[0]:
haspv=True
elif not V and not M: # ignore unrelated
continue
else:
_log.warn("%s/%s has incorrectly formatted data", fname, pv)
if not haspv:
raise ValueError("%s contains no data"%fname)
def __len__(self):
return len(self.__G)
def __iter__(self):
return iter(self.__G)
def __contains__(self, key):
return key in self.__G
def __getitem__(self, key):
return H5PV(key, self.__G[key])
def astuple(self):
"""Return a tuple of H5PV instances.
The order is establish by sorting the PV names.
"""
pvs = list(self.__G)
pvs.sort()
return tuple(map(self.__getitem__, pvs))
h5open = H5Data
|
[
"mdavidsaver@bnl.gov"
] |
mdavidsaver@bnl.gov
|
486b7a6679e963cbbe74c87c996446b130cafa06
|
cc076b26000e177a5e0a9c50257dc173620bfc9c
|
/deep_RL_benchmarks/eval_best_RL.py
|
44b681922cf449be3244136b6299c0a9fd270be7
|
[
"BSD-2-Clause"
] |
permissive
|
Mabrokma/deps_arXiv2020
|
73dc16f8493bf59ce3bc63a33e5d7a29c5295ef1
|
8f9bd9436e2a2ed94039a4cab83620d7b28f2964
|
refs/heads/master
| 2023-05-08T02:21:07.469279
| 2021-06-04T02:50:59
| 2021-06-04T02:50:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,005
|
py
|
import warnings
warnings.filterwarnings("ignore")
import matplotlib
# matplotlib.use("Agg")
import matplotlib.pyplot as plt
import argparse
import mlflow
import os
from scipy.io import loadmat
from gym import spaces, Env
from stable_baselines.common.env_checker import check_env
from stable_baselines.common.policies import MlpPolicy
from stable_baselines import PPO2, A2C, ACKTR
# from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.sac.policies import MlpPolicy as SacMlpPolicy
import numpy as np
import numpy.matlib
import tensorflow as tf
import time
# def min_max_norm(M):
# return (M - M.min(axis=0).reshape(1, -1))/(M.max(axis=0) - M.min(axis=0)).reshape(1, -1)
def normalize(M, Mmin=None, Mmax=None):
"""
:param M: (2-d np.array) Data to be normalized
:param Mmin: (int) Optional minimum. If not provided is inferred from data.
:param Mmax: (int) Optional maximum. If not provided is inferred from data.
:return: (2-d np.array) Min-max normalized data
"""
Mmin = M.min(axis=0).reshape(1, -1) if Mmin is None else Mmin
Mmax = M.max(axis=0).reshape(1, -1) if Mmax is None else Mmax
M_norm = (M - Mmin) / (Mmax - Mmin)
return np.nan_to_num(M_norm)
def min_max_denorm(M, Mmin, Mmax):
"""
denormalize min max norm
:param M: (2-d np.array) Data to be normalized
:param Mmin: (int) Minimum value
:param Mmax: (int) Maximum value
:return: (2-d np.array) Un-normalized data
"""
M_denorm = M*(Mmax - Mmin) + Mmin
return np.nan_to_num(M_denorm)
def control_profile(max_input=4e3, samples_day=288, sim_days=7):
"""
"""
U_day = max_input*np.sin(np.arange(0, 2*np.pi, 2*np.pi/samples_day)) # samples_day
U = np.tile(U_day, sim_days).reshape(-1, 1) # samples_day*sim_days
return U
def disturbance(file='../../TimeSeries/disturb.mat', n_sim=8064):
return loadmat(file)['D'][:, :n_sim].T # n_sim X 3
class ToyBuilding(Env):
"""Custom Environment that follows gym interface"""
metadata = {'render.modes': ['human']}
def __init__(self, fully_observable=True, obs_norm=True, act_denorm=True,
w_mean=0.0, w_var=0.0, theta_mean=0.0, theta_var=0.0):
super().__init__()
self.nsim, nsim = 8064, 8064
self.fully_observable = fully_observable
self.act_denorm = act_denorm
self.obs_norm = obs_norm
self.w_mean = w_mean
self.w_var = w_var
self.theta_mean = theta_mean
self.theta_var = theta_var
self.nx, self.ny, self.nu, self.nd = 4, 1, 1, 3
# self.action_space = spaces.Box(-np.inf, np.inf, shape=(self.nu,), dtype=np.float32)
self.action_space = spaces.Box(0, 5000, shape=(self.nu,), dtype=np.float32)
self.observation_space = spaces.Box(-np.inf, np.inf,
shape=[(self.ny+self.nd+self.ny+2*self.ny+2*self.nu,
self.nx+self.nd+self.ny+2*self.ny+2*self.nu)[fully_observable]],
dtype=np.float32)
self.A = np.matrix([[0.9950, 0.0017, 0.0000, 0.0031], [0.0007, 0.9957, 0.0003, 0.0031],
[0.0000, 0.0003, 0.9834, 0.0000], [0.2015, 0.4877, 0.0100, 0.2571]])
self.B = np.matrix([[1.7586e-06], [1.7584e-06],
[1.8390e-10], [5.0563e-04]])
self.E = np.matrix([[0.0002, 0.0000, 0.0000], [0.0002, 0.0000, 0.0000],
[0.0163, 0.0000, 0.0000], [0.0536, 0.0005, 0.0001]])
self.C = np.matrix([[0.0, 0.0, 0.0, 1.0]])
self.x = 20 * np.ones(4, dtype=np.float32)
self.tstep = 0
self.y = self.C*np.asmatrix(self.x).T
self.U = control_profile(samples_day=288, sim_days=nsim//288)
nsim = self.U.shape[0]
self.D = disturbance(file='disturb.mat', n_sim=nsim)
self.X, self.Y = self.loop(8064, self.U, self.D)
self.x, self.y = self.X[2016], self.Y[2016]
self.init_idx = {'train': 0, 'dev': 2015, 'test': 4031}
self.X, self.Y = self.X[2016:], self.Y[2016:]
self.X_out = np.empty(shape=[0, 4])
print(self.X.shape)
plot_trajectories([self.X[:, k] for k in range(self.X.shape[1])], [self.X[:, k] for k in range(self.X.shape[1])], ['$x_1$', '$x_2$', '$x_3$', '$x_4$'])
# constraints and references
self.ymin_val = 19
self.ymax_val = 25
self.umin_val = 0
self.umax_val = 5000
self.s_ymin = self.ReLU(-self.y + self.ymin_val)
self.s_ymax = self.ReLU(self.y - self.ymax_val)
self.s_umin = self.ReLU(-np.array([0]) + self.umin_val)
self.s_umax = self.ReLU(np.array([0]) - self.umax_val)
samples_day = 288 # 288 samples per day with 5 min sampling
# R_day_train = 15 + 10 * np.sin(np.arange(0, 2 * np.pi, 2 * np.pi / samples_day)) # daily control profile
R_day_train = 20 + 2 * np.sin(np.arange(0, 2 * np.pi, 2 * np.pi / samples_day)) # daily control profile
Sim_days = 35 # number of simulated days
self.Ref = np.matlib.repmat(R_day_train, 1, Sim_days).T # Sim_days control profile
self.reference = self.Ref[2016]
# self.Ref_train = self.Ref[2016:4032].squeeze() # ad hoc fix
# self.Ref_train = 15 + 25 * np.random.rand(2016)
# weights - the same as for deep MPC
self.Q_con_u = 5e-7
self.Q_con_x = 50
self.Q_con_y = 50
self.Q_u = 1e-7
self.Q_u = 1e-6
self.Q_ref = 20
self.alpha_con = 0
def xtrue(self, dset):
start = self.init_idx[dset]
return self.X[start:start+2016]
def loop(self, nsim, U, D):
"""
:param nsim: (int) Number of steps for open loop response
:param U: (ndarray, shape=(nsim, self.nu)) Control profile matrix
:param D: (ndarray, shape=(nsim, self.nd)) Disturbance matrix
:param x: (ndarray, shape=(self.nx)) Initial state. If not give will use internal state.
:return: The response matrices are aligned, i.e. X[k] is the state of the system that Y[k] is indicating
"""
Y = np.zeros((nsim+1, 1)) # output trajectory placeholders
X = np.zeros((nsim+1, 4))
X[0] = self.x
for k in range(nsim):
Y[k+1] = self.C*np.asmatrix(X[k]).T
d = np.asmatrix(D[k]).T
u = np.asmatrix(U[k]).T
x = self.A*np.asmatrix(X[k]).T + self.B*u + self.E*d
X[k+1] = x.flatten()
return X, Y
def obs_normalize(self, obs):
###### Normalize min max bounds
ymin = 0
ymax = 40
umin = 0
umax = 5000
dmin = np.min(self.D, 0)
dmax = np.max(self.D, 0)
rmin = np.min(self.Ref, 0)
rmax = np.max(self.Ref, 0)
if self.fully_observable is True:
ny = self.nx
else:
ny = self.ny
y_norm = normalize(obs[0:ny], ymin, ymax)
d_norm = normalize(obs[ny:ny+self.nd], dmin, dmax)
r_norm = normalize(obs[ny + self.nd:ny + self.nd+self.ny], rmin, rmax)
sy_norm = normalize(obs[ny + self.nd + self.ny:ny + self.nd + 3*self.ny], ymin, ymax)
su_norm = normalize(obs[ny + self.nd + 3 * self.ny:], umin, umax)
obs_norm = np.concatenate([y_norm, d_norm, r_norm, sy_norm, su_norm])
return obs_norm
def action_denorm(self, action):
umin = 0
umax = 5000
action = min_max_denorm(action, umin, umax)
return action
def ReLU(self, x):
return x * (x > 0)
def step(self, action):
if self.act_denorm is True:
action = self.action_denorm(action)
w = (self.w_mean - self.w_var) + (2 * self.w_var) * np.asmatrix(
np.random.rand(self.nx, 1)) # additive uncertainty
theta = (1 + self.theta_mean - self.theta_var) + (2 * self.theta_var) * np.asmatrix(
np.random.rand(self.nx, self.nx)) # parametric uncertainty
self.d = self.D[2016+self.tstep].reshape(3,1)
# self.x = self.A*np.asmatrix(self.x).reshape(4, 1) + self.B*action.T + self.E*self.d
self.x = np.multiply(theta, self.A)*np.asmatrix(self.x).reshape(4, 1) + self.B*action.T + self.E*self.d + w
self.y = self.C * np.asmatrix(self.x)
self.reference = self.Ref[2016+self.tstep]
self.tstep += 1
# Original features in deep MPC: xi = torch.cat((x, d, r, symin, symax, umin, umax), 1)
y_obsv = np.concatenate([np.array(self.y).flatten(), self.d.flatten(), self.reference,
np.array(self.s_ymin).flatten(), np.array(self.s_ymax).flatten(),
np.array(self.s_umin).flatten(), np.array(self.s_umax).flatten()])
x_obsv = np.concatenate([np.array(self.x).flatten(), self.d.flatten(), self.reference,
np.array(self.s_ymin).flatten(), np.array(self.s_ymax).flatten(),
np.array(self.s_umin).flatten(), np.array(self.s_umax).flatten()])
# y_obsv = np.concatenate((np.array(self.y).flatten(), self.d.flatten(), self.reference))
# x_obsv = np.concatenate((np.array(self.x).flatten(), self.d.flatten(), self.reference))
observation = (y_obsv, x_obsv)[self.fully_observable].astype(np.float32)
if self.obs_norm is True:
observation = self.obs_normalize(observation)
self.X_out = np.concatenate([self.X_out, np.array(self.x.reshape([1, 4]))])
self.action = action
self.s_ymin = self.ReLU(-self.y + self.ymin_val)
self.s_ymax = self.ReLU(self.y - self.ymax_val)
self.s_umin = self.ReLU(-action + self.umin_val)
self.s_umax = self.ReLU(action - self.umax_val)
return np.array(observation).flatten(), self.reward(), self.tstep == self.X.shape[0], {'xout': self.X_out}
def reward(self):
# return -np.mean((np.array(self.y - self.Y[self.tstep]))**2)
con_penalties = self.Q_u * np.mean((np.array(self.action))**2) \
+ self.Q_con_y * np.mean((np.array(self.s_ymin))**2) \
+ self.Q_con_y * np.mean((np.array(self.s_ymax))**2) \
+ self.Q_con_u * np.mean((np.array(self.s_umin))**2) \
+ self.Q_con_u * np.mean((np.array(self.s_umax))**2)
r = -self.Q_ref * np.mean((np.array(self.y - self.Ref[2016+self.tstep]))**2) \
- self.alpha_con*con_penalties
return r
def reset(self, dset='train'):
self.x = 15+5*np.random.randn(self.nx).reshape([-1,1])
self.y = self.x[3].reshape([-1,1])
self.reference = self.Ref[2016+self.init_idx[dset]]
self.d = self.D[2016+self.init_idx[dset]]
self.tstep = self.init_idx[dset]
self.s_ymin = self.ReLU(self.y + self.ymin_val)
self.s_ymax = self.ReLU(self.y - self.ymax_val)
y_obsv = np.concatenate([np.array(self.y).flatten(), self.d.flatten(), self.reference,
np.array(self.s_ymin).flatten(), np.array(self.s_ymax).flatten(),
np.array([self.umin_val]), np.array([self.umax_val])])
x_obsv = np.concatenate([np.array(self.x).flatten(), self.d.flatten(), self.reference,
np.array(self.s_ymin).flatten(), np.array(self.s_ymax).flatten(),
np.array([self.umin_val]), np.array([self.umax_val])])
# y_obsv = np.concatenate((self.y.flatten(), self.reference))
# x_obsv = np.concatenate((self.x.flatten(), self.reference))
observation = (y_obsv, x_obsv)[self.fully_observable].astype(np.float32)
self.X_out = np.empty(shape=[0, 4])
return np.array(observation).flatten()
def render(self, mode='human'):
print('render')
def plot_control(R, Y, U, D, Ymax=None, Ymin=None, Umax=None, Umin=None, figname='test.png'):
fig, ax = plt.subplots(3, 1, figsize=(8, 8))
ax[0].plot(R, '--', label='R')
ax[0].plot(Y, label='Y')
ax[0].plot(Ymax, 'k--') if Ymax is not None else None
ax[0].plot(Ymin, 'k--') if Ymin is not None else None
ax[0].set(ylabel='Y')
ax[1].plot(U, label='U')
ax[1].plot(Umax, 'k--') if Umax is not None else None
ax[1].plot(Umin, 'k--') if Umin is not None else None
ax[1].set(ylabel='U')
ax[2].plot(D, label='D')
ax[2].set(ylabel='D')
plt.tight_layout()
plt.savefig(figname)
def plot_trajectories(traj1, traj2, labels, figname='test.png'):
fig, ax = plt.subplots(len(traj1), 1)
for row, (t1, t2, label) in enumerate(zip(traj1, traj2, labels)):
if t2 is not None:
ax[row].plot(t1.flatten(), label='True')
ax[row].plot(t2.flatten(), '--', label='Pred')
else:
ax[row].plot(t1)
steps = range(0, t1.shape[0] + 1, 288)
days = np.array(list(range(len(steps))))+7
ax[row].set(xticks=steps,
xticklabels=days,
ylabel=label,
xlim=(0, len(t1)))
ax[row].tick_params(labelbottom=False)
ax[row].tick_params(labelbottom=True)
ax[row].set_xlabel('Day')
plt.tight_layout()
plt.savefig(figname)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=None,
help="Gpu to use")
# OPTIMIZATION PARAMETERS
# stable_baselines.ppo2.PPO2(policy, env, gamma=0.99, n_steps=128, ent_coef=0.01,
# learning_rate=0.00025, vf_coef=0.5, max_grad_norm=0.5,
# lam=0.95, nminibatches=4, noptepochs=4, cliprange=0.2,
# cliprange_vf=None, verbose=0, tensorboard_log=None,
# _init_setup_model=True, policy_kwargs=None,
# full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None)
# https://arxiv.org/abs/1707.06347
# stable_baselines.a2c.A2C(policy, env, gamma=0.99, n_steps=5, vf_coef=0.25, ent_coef=0.01,
# max_grad_norm=0.5, learning_rate=0.0007, alpha=0.99, momentum=0.0,
# epsilon=1e-05, lr_schedule='constant', verbose=0, tensorboard_log=None,
# _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
# seed=None, n_cpu_tf_sess=None)
# https://arxiv.org/abs/1602.01783
# stable_baselines.acktr.ACKTR(policy, env, gamma=0.99, nprocs=None,
# n_steps=20, ent_coef=0.01, vf_coef=0.25,
# vf_fisher_coef=1.0, learning_rate=0.25, max_grad_norm=0.5,
# kfac_clip=0.001, lr_schedule='linear', verbose=0,
# tensorboard_log=None, _init_setup_model=True,
# async_eigen_decomp=False, kfac_update=1, gae_lambda=None,
# policy_kwargs=None, full_tensorboard_log=False, seed=None, n_cpu_tf_sess=1)
# https://arxiv.org/abs/1708.05144
opt_group = parser.add_argument_group('OPTIMIZATION PARAMETERS')
opt_group.add_argument('-epochs', type=int, default=5)
opt_group.add_argument('-lr', type=float, default=0.01,
help='Step size for gradient descent.')
opt_group.add_argument('-alg', type=str, choices=['PPO2', 'A2C', 'ACKTR'], default='A2C')
parser.add_argument('-gpu', type=str, default=None,
help="Gpu to use")
#################
# DATA PARAMETERS
data_group = parser.add_argument_group('DATA PARAMETERS')
data_group.add_argument('-nsteps', type=int, default=128,
help='Number of steps for open loop during training.')
data_group.add_argument('-constrained', type=float, default=1.0,
help='Constrained yes or no.')
##################
# MODEL PARAMETERS
model_group = parser.add_argument_group('MODEL PARAMETERS')
# model_group.add_argument('-num_layers', type=int, default=1)
model_group.add_argument('-bias', action='store_true', help='Whether to use bias in the neural network models.')
model_group.add_argument('-nx_hidden', type=int, default=10,
help='Number of hidden units.')
####################
# LOGGING PARAMETERS
log_group = parser.add_argument_group('LOGGING PARAMETERS')
log_group.add_argument('-savedir', type=str, default='test',
help="Where should your trained model be saved")
log_group.add_argument('-modeldir', type=str, default='best_model',
help="Best saved models from previous runs")
log_group.add_argument('-verbosity', type=int, default=10,
help="How many epochs in between status updates")
log_group.add_argument('-exp', default='test',
help='Will group all run under this experiment name.')
log_group.add_argument('-location', default='mlruns',
help='Where to write mlflow experiment tracking stuff')
log_group.add_argument('-run', default='test',
help='Some name to tell what the experiment run was about.')
log_group.add_argument('-logger', choices=['mlflow', 'wandb', 'stdout'],
help='Logging setup to use')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
####################################
###### DATA SETUP
####################################
env = ToyBuilding()
# env = DummyVecEnv([ToyBuilding for i in range(4)])
check_env(env)
env.alpha_con = args.constrained
n_hidden = args.nx_hidden
algs = {'PPO2': PPO2, 'A2C': A2C, 'ACKTR': ACKTR}
policies = {'PPO2': MlpPolicy, 'A2C': MlpPolicy, 'ACKTR': MlpPolicy}
policy_kwargs = {'layers': [n_hidden, n_hidden]}
model = algs[args.alg](policies[args.alg], env, n_steps=args.nsteps, verbose=0, learning_rate=args.lr, policy_kwargs=policy_kwargs)
def openloop(dset='train', x0=None):
if x0 is None:
obs = env.reset(dset)
else:
obs = np.concatenate([x0, env.reference])
rewards = []
states = []
disturbances = []
references = []
actions = []
if env.fully_observable:
ny = env.nx
else:
ny = env.ny
for j in range(2016):
action, _states = model.predict(obs)
if action < 0:
action = 0
elif action > 1:
action = 1
env.obs_norm = True
obs, reward, dones, info = env.step(action)
env.tstep = env.tstep - 1
# denormalize states and actions for plotting
env.obs_norm = False
obs_denorm, _, _, _ = env.step(action)
rewards.append(reward)
actions.append(env.action)
states.append(obs_denorm[0:ny])
disturbances.append(obs_denorm[ny:ny+env.nd])
references.append(obs_denorm[ny+env.nd])
mse_ref_open = -np.mean(np.array(rewards))
return mse_ref_open, np.array(references), np.array(states), np.array(actions), np.array(disturbances)
##################################
# SIMULATE
##################################
Eval_runs = 20 # number of randomized closed-loop simulations, Paper value: 20
param_uncertainty = True
add_uncertainty = True
show_plots = False
if add_uncertainty:
env.w_mean = 0
env.w_var = 0.1
else:
env.w_mean = 0
env.w_var = 0.0
if param_uncertainty:
env.theta_mean = 0
env.theta_var = 0.01
else:
env.theta_mean = 0
env.theta_var = 0.00
# Load best model
# best_model = model.load(os.path.join(args.modeldir, "RL_model_best_ACKTR.h5"))
# https: // stable - baselines.readthedocs.io / en / master / modules / base.html
best_model = model.load(os.path.join(args.modeldir, "RL_model_best_ACKTR_constr.h5"))
# simulate best model
model = best_model
CPU_mean_time = np.zeros(Eval_runs)
CPU_max_time = np.zeros(Eval_runs)
MAE_constr_run = np.zeros(Eval_runs)
MSE_ref_run = np.zeros(Eval_runs)
MA_energy_run = np.zeros(Eval_runs)
for run in range(0, Eval_runs):
preds = []
refs = []
mses = []
U = []
D = []
start_step_time = time.time()
for dset in ['train', 'dev', 'test']:
mse, ref, pred, actions, disturb = openloop(dset)
preds.append(pred)
refs.append(ref)
mses.append(mse)
U.append(actions)
D.append(disturb)
eval_time = time.time() - start_step_time
Ymax = env.ymax_val*np.ones([2016,1])
Ymin = env.ymin_val * np.ones([2016,1])
Umax = env.umax_val * np.ones([2016,1])
Umin = env.umin_val * np.ones([2016,1])
# closed loop simulations plots
if show_plots:
plot_control(R=refs[0], Y=preds[0][:, 3], U=U[0], D=D[0],
Ymax=Ymax, Ymin=Ymin, Umax=Umax, Umin=Umin,
figname=os.path.join(args.savedir, 'control_train.png'))
plot_control(R=refs[2], Y=preds[2][:, 3], U=U[2], D=D[2],
Ymax=Ymax, Ymin=Ymin, Umax=Umax, Umin=Umin,
figname=os.path.join(args.savedir, 'control_test.png'))
MAE_constr_run[run] = np.mean(np.maximum((preds[2][:, 3] - Ymax.squeeze()), 0)) + \
np.mean(np.maximum((-preds[2][:, 3] + Ymin.squeeze()), 0))
MSE_ref_run[run] = np.mean(np.square(preds[2][:, 3] - refs[0]))
MA_energy_run[run] = np.mean(np.absolute(U[2]))
MSE_ref = np.mean(MSE_ref_run)
MA_energy = np.mean(MA_energy_run)
MAE_constr = np.mean(MAE_constr_run)
|
[
"jan.drgona@pnnl.gov"
] |
jan.drgona@pnnl.gov
|
ac08a963c69dab17953608a66b69822aa0e3b7e1
|
28e52f6df9a2fe3df78022dc4162a15aecb0e5b9
|
/venv/bin/django-admin.py
|
94fad1eafff3fea0d6dca30ebd3dffc03d597073
|
[] |
no_license
|
garonzhang/genealogy_wechat
|
4b952b307e0e682eceb23521cc5ea8784c368954
|
1cf3f5b29acb3192e57eb47fcb9b22263437f147
|
refs/heads/master
| 2020-03-22T19:31:24.057225
| 2018-11-03T01:10:38
| 2018-11-03T01:10:38
| 140,534,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
#!/root/wechat/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"garonzhang@gmail.com"
] |
garonzhang@gmail.com
|
3a4c7f9d712049cf02648c56d53ff66b940cd9fb
|
05d692469305dd1adb9ebc46080525bb4515b424
|
/Exception handling/tryfinally5.py
|
7a605945006ab61d29eedb42aaf62afea001654d
|
[] |
no_license
|
rajdharmkar/pythoncode
|
979805bc0e672f123ca1460644a4bd71d7854fd5
|
15b758d373f27da5680a711bf12c07e86758c447
|
refs/heads/master
| 2020-08-07T18:30:55.575632
| 2019-10-14T12:46:09
| 2019-10-14T12:46:09
| 213,551,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
try:
fob = open ( 'test.txt', 'w' )
fob.write ( "It's my test file to verify try-finally in exception handling!!"
)
print 'try block executed'
finally:
fob.close ()
print 'finally block executed'
|
[
"rajdharmkar@gmail.com"
] |
rajdharmkar@gmail.com
|
e534e727ba2b594e582bdc4dfeba77f66ca78cca
|
57aef2eb6dc11d1e795475fe283aecbc494910bc
|
/sigmoid_draw.py
|
223584f87d20a69d7f022128facf410b1a637c8e
|
[] |
no_license
|
junyeon-no/tensorflow_study
|
2cd4ccd1cdcb4a0ae1cdd06599658d94e13334b2
|
ea70d8f94f1174084738072091190ed5e871e14e
|
refs/heads/master
| 2022-12-22T20:14:29.165576
| 2020-10-02T00:54:33
| 2020-10-02T00:54:33
| 296,458,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from step_func_draw import step_function
def sigmoid(x):
return 1/(1 + np.exp(-x))
if __name__ == "__main__":
x = np.arange(-5.0, 5.0 , 0.1)
y = sigmoid(x)
plt.plot(x, y, label = "sigmoid")
plt.ylim(-0.1, 1.1)
x = np.arange(-5.0, 5.0, 0.1)
y = step_function(x)
plt.plot(x, y, linestyle = "--", label = "setp")
plt.legend()
plt.show()
|
[
"whwns5203@naver.com"
] |
whwns5203@naver.com
|
97538fec829806a6dc0663c869c8d080db247647
|
f62aa26461e381435c86019ca2f502d10ff75b88
|
/catalog/migrations/0006_auto_20170121_1008.py
|
aa8cfe9522994670ffaf090ae699983f1dd31edd
|
[] |
no_license
|
viktortat/CAP
|
edb2aef09169d9bcf04b541682d8dcb067edf1c6
|
60221d8fa1d1ccb209e40001554cb004480dd2d5
|
refs/heads/master
| 2021-06-12T13:03:33.917809
| 2017-04-30T19:58:38
| 2017-04-30T19:58:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-21 07:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('catalog', '0005_auto_20170113_0259'),
]
operations = [
migrations.AddField(
model_name='catalogsite',
name='pub_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата'),
),
migrations.AlterField(
model_name='catalogsite',
name='price',
field=models.FloatField(default=0),
),
]
|
[
"l2maximum@mail.ru"
] |
l2maximum@mail.ru
|
66fcec0a1f396ab7a1b7c0d07f995827b2518a3f
|
b482536080ffcb0194c7691464ccbf32be9fcbb9
|
/Modules.py
|
ccffe60cb565105384a2178d74bcd5768e1b8cdc
|
[
"MIT"
] |
permissive
|
CristiSima/StellarisShipBuilder
|
f29f3d9f104773f7a0c7b52654950699e8cef1e9
|
19d40f84c704bad022b9477f51174245aee7dcaa
|
refs/heads/master
| 2021-11-10T17:54:13.817827
| 2021-11-02T11:49:23
| 2021-11-02T11:49:23
| 191,032,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,151
|
py
|
class Module:
def __init__(self,Type,Name,Ship):
self.Name=Name
self.Type=Type
self.Ship=Ship
self.Weapon1=self.Weapon2=self.Weapon3=self.Weapon4=self.Weapon5=self.Weapon6=None
self.Utility1=self.Utility2=self.Utility3=self.Utility4=self.Utility5=self.Utility6=None
def Save(self,File):
#File.write(str(type(self))[8:-2]+"\n")
if(self.Weapon1):
self.Weapon1.Save(File)
if(self.Weapon2):
self.Weapon2.Save(File)
if(self.Weapon3):
self.Weapon3.Save(File)
if(self.Weapon4):
self.Weapon4.Save(File)
if(self.Weapon5):
self.Weapon5.Save(File)
if(self.Weapon6):
self.Weapon6.Save(File)
File.write("\n")
if(self.Utility1):
self.Utility1.Save(File)
if(self.Utility2):
self.Utility2.Save(File)
if(self.Utility3):
self.Utility3.Save(File)
if(self.Utility4):
self.Utility4.Save(File)
if(self.Utility5):
self.Utility5.Save(File)
if(self.Utility6):
self.Utility6.Save(File)
File.write("\n")
def Load(self,File):
if(self.Weapon1):
self.Weapon1.Load(File)
if(self.Weapon2):
self.Weapon2.Load(File)
if(self.Weapon3):
self.Weapon3.Load(File)
if(self.Weapon4):
self.Weapon4.Load(File)
if(self.Weapon5):
self.Weapon5.Load(File)
if(self.Weapon6):
self.Weapon6.Load(File)
File.readline()
if(self.Utility1):
self.Utility1.Load(File)
if(self.Utility2):
self.Utility2.Load(File)
if(self.Utility3):
self.Utility3.Load(File)
if(self.Utility4):
self.Utility4.Load(File)
if(self.Utility5):
self.Utility5.Load(File)
if(self.Utility6):
self.Utility6.Load(File)
def Print(self):
print(self.Name)
print()
if(self.Weapon1):
self.Weapon1.Print()
if(self.Weapon2):
self.Weapon2.Print()
if(self.Weapon3):
self.Weapon3.Print()
if(self.Weapon4):
self.Weapon4.Print()
if(self.Weapon5):
self.Weapon5.Print()
if(self.Weapon6):
self.Weapon6.Print()
print()
if(self.Utility1):
self.Utility1.Print()
if(self.Utility2):
self.Utility2.Print()
if(self.Utility3):
self.Utility3.Print()
if(self.Utility4):
self.Utility4.Print()
if(self.Utility5):
self.Utility5.Print()
if(self.Utility6):
self.Utility6.Print()
print()
print()
def Build(self):
if(self.Weapon1):
self.Weapon1.Build()
if(self.Weapon2):
self.Weapon2.Build()
if(self.Weapon3):
self.Weapon3.Build()
if(self.Weapon4):
self.Weapon4.Build()
if(self.Weapon5):
self.Weapon5.Build()
if(self.Weapon6):
self.Weapon6.Build()
if(self.Utility1):
self.Utility1.Build()
if(self.Utility2):
self.Utility2.Build()
if(self.Utility3):
self.Utility3.Build()
if(self.Utility4):
self.Utility4.Build()
if(self.Utility5):
self.Utility5.Build()
if(self.Utility6):
self.Utility6.Build()
class Slot:
def __init__(self,Type,Ship):
self.Type=Type
self.Ship=Ship
### AB
### A=[W(Weapon),U(Utility),M(Module)]
### B: Specific subtype
###
###
self.Thing=None
def Equip(self,Thing):
if(Thing):
if(Thing=="None"):
return
if(self.Type==Thing.Type):
self.Thing=Thing(self.Ship)
self.Ship.Build()
def Target(self):
if(self.Type[0]=="W"):
if(self.Type[1]=="S"):
return Weapons.Small
if(self.Type[1]=="M"):
return Weapons.Medium
if(self.Type[1]=="L"):
return Weapons.Large
if(self.Type[1]=="P"):
return Weapons.Point
if(self.Type[1]=="G"):
return Weapons.Explosives
if(self.Type[0]=="U"):
if(self.Type[1]=="S"):
return Utilitys.Small
if(self.Type[1]=="M"):
return Utilitys.Medium
if(self.Type[1]=="L"):
return Utilitys.Large
if(self.Type[1]=="A"):
return Utilitys.Auxilary
print("ERRRROR")
print("ERRRROR")
print("ERRRROR")
print("ERRRROR")
print("ERRRROR")
def Save(self,File):
'''
if(self.Type[0]=='M'):
self.Thing.Save(File)
else:
if(self.Thing):
File.write(str(type(self.Thing))[8:-2]+"\n")
else:
File.write("None"+"\n")
'''
if(self.Thing):
File.write(str(type(self.Thing))[8:-2]+"\n")
if(self.Type[0]=='M'):
self.Thing.Save(File)
else:
File.write("None"+'\n')
def Print(self):
if(self.Thing):
self.Thing.Print()
else:
print("None")
def Load(self,File):
None
'''
'''
self.Equip(eval(File.readline()[:-1]))
if(self.Thing and self.Type[0]=="M"):
self.Thing.Load(File)
def Build(self):
if(self.Thing):
self.Thing.Build()
'''
Type="TYPE"
Name="Artillery"
Info=["Weapons: ","Utilitys: "]
def __init__(self,Ship):
Module.__init__(self,"TYPE","Artillery",Ship)
self.Weapon=Slot("W",Ship)
self.Utility=Slot("U",Ship)
'''
class Corvete:
class Core:
Type="MCC"
class Interceptor(Module):
Type="MCC"
Name="Interceptor"
Info=["Weapons: 3S","Utility: 3S 1A"]
def __init__(self,Ship):
Module.__init__(self,"MCC","Interceptor",Ship)
self.Weapon1=Slot("WS",Ship)
self.Weapon2=Slot("WS",Ship)
self.Weapon3=Slot("WS",Ship)
self.Utility1=Slot("US",Ship)
self.Utility2=Slot("US",Ship)
self.Utility3=Slot("US",Ship)
self.Utility4=Slot("UA",Ship)
class MissileBoat(Module):
Type="MCC"
Name="Missile Boat"
Info=["Weapons: 1S 1G","Utility: 3S 1A"]
def __init__(self,Ship):
Module.__init__(self,"MCC","Missile Boat",Ship)
self.Weapon1=Slot("WS",Ship)
self.Weapon2=Slot("WG",Ship)
self.Utility1=Slot("US",Ship)
self.Utility2=Slot("US",Ship)
self.Utility3=Slot("US",Ship)
self.Utility4=Slot("UA",Ship)
class PicketShip(Module):
Type="MCC"
Name="Picket Ship"
Info=["Weapons: 2S 1P","Utility: 3S 1A"]
def __init__(self,Ship):
Module.__init__(self,"MCC","Picket Ship",Ship)
self.Weapon1=Slot("WS",Ship)
self.Weapon2=Slot("WS",Ship)
self.Weapon3=Slot("WP",Ship)
self.Utility1=Slot("US",Ship)
self.Utility2=Slot("US",Ship)
self.Utility3=Slot("US",Ship)
self.Utility4=Slot("UA",Ship)
Variants=[Interceptor,MissileBoat,PicketShip]
Variants=[Core]
class Destroyer:
class Bow:
Type="MDB"
class Artillery(Module):
Type="MDB"
Name="Artillery"
Info=["Weapons: 1L","Utilitys: 6S"]
def __init__(self,Ship):
Module.__init__(self,"MDB","Artillery",Ship)
self.Weapon1=Slot("WL",Ship)
self.Utility1=Slot("US",Ship)
self.Utility2=Slot("US",Ship)
self.Utility3=Slot("US",Ship)
self.Utility4=Slot("US",Ship)
self.Utility5=Slot("US",Ship)
self.Utility6=Slot("US",Ship)
class Gunship(Module):
Type="MDB"
Name="Gunship"
Info=["Weapons: 2S 1M","Utilitys: 6S"]
def __init__(self,Ship):
Module.__init__(self,"MDB","Gunship",Ship)
self.Weapon1=Slot("WM",Ship)
self.Weapon2=Slot("WS",Ship)
self.Weapon3=Slot("WS",Ship)
self.Utility1=Slot("US",Ship)
self.Utility2=Slot("US",Ship)
self.Utility3=Slot("US",Ship)
self.Utility4=Slot("US",Ship)
self.Utility5=Slot("US",Ship)
self.Utility6=Slot("US",Ship)
class PicketShip(Module):
Type="MDB"
Name="Picket Ship"
Info=["Weapons: 2S 1P","Utilitys: 6S"]
def __init__(self,Ship):
Module.__init__(self,"MDB","Picket Ship",Ship)
self.Weapon1=Slot("WP",Ship)
self.Weapon2=Slot("WS",Ship)
self.Weapon3=Slot("WS",Ship)
self.Utility1=Slot("US",Ship)
self.Utility2=Slot("US",Ship)
self.Utility3=Slot("US",Ship)
self.Utility4=Slot("US",Ship)
self.Utility5=Slot("US",Ship)
self.Utility6=Slot("US",Ship)
Variants=[Artillery,Gunship,PicketShip]
class Stern:
Type="MDS"
class Gunship(Module):
Type="MDS"
Name="Gunship"
Info=["Weapons: 1M","Utilitys: 1A"]
def __init__(self,Ship):
Module.__init__(self,"MDS","Gunship",Ship)
self.Weapon1=Slot("WM",Ship)
self.Utility1=Slot("UA",Ship)
class Interceptor(Module):
Type="MDS"
Name="Interceptor"
Info=["Weapons: 2S","Utilitys: 1A"]
def __init__(self,Ship):
Module.__init__(self,"MDS","Interceptor",Ship)
self.Weapon1=Slot("WS",Ship)
self.Weapon2=Slot("WS",Ship)
self.Utility1=Slot("UA",Ship)
class PicketShip(Module):
Type="MDS"
Name="Picket Ship"
Info=["Weapons: 2P","Utilitys: 1A"]
def __init__(self,Ship):
Module.__init__(self,"MDS","Picket Ship",Ship)
self.Weapon1=Slot("WP",Ship)
self.Weapon2=Slot("WP",Ship)
self.Utility1=Slot("UA",Ship)
Variants=[Gunship,Interceptor,PicketShip]
Variants=[Bow,Stern]
class Cruiser:
class Bow:
Type="MCB"
class Artillery(Module):
Type="MCB"
Name="Artillery"
Info=["Weapons: 1L ","Utilitys: 4M"]
def __init__(self,Ship):
Module.__init__(self,"MCB","Artillery",Ship)
self.Weapon1=Slot("WL",Ship)
self.Utility1=Slot("UM",Ship)
self.Utility2=Slot("UM",Ship)
self.Utility3=Slot("UM",Ship)
self.Utility4=Slot("UM",Ship)
class Broadside(Module):
Type="MCB"
Name="Broadside"
Info=["Weapons: 2M ","Utilitys: 4M"]
def __init__(self,Ship):
Module.__init__(self,"MCB","Broadside",Ship)
self.Weapon1=Slot("WM",Ship)
self.Weapon2=Slot("WM",Ship)
self.Utility1=Slot("UM",Ship)
self.Utility2=Slot("UM",Ship)
self.Utility3=Slot("UM",Ship)
self.Utility4=Slot("UM",Ship)
class Torpedo(Module):
Type="MCB"
Name="Torpedo"
Info=["Weapons: 2S 1G ","Utilitys: 4M"]
def __init__(self,Ship):
Module.__init__(self,"MCB","Torpedo",Ship)
self.Weapon1=Slot("WS",Ship)
self.Weapon2=Slot("WS",Ship)
self.Weapon3=Slot("WG",Ship)
self.Utility1=Slot("UM",Ship)
self.Utility2=Slot("UM",Ship)
self.Utility3=Slot("UM",Ship)
self.Utility4=Slot("UM",Ship)
Variants=[Artillery,Broadside,Torpedo]
class Core:
Type="MCC"
class Artillery(Module):
Type="MCC"
Name="Artillery"
Info=["Weapons: 1L 1M","Utilitys: 4M"]
def __init__(self,Ship):
Module.__init__(self,"MCC","Artillery",Ship)
self.Weapon1=Slot("WL",Ship)
self.Weapon2=Slot("WM",Ship)
self.Utility1=Slot("UM",Ship)
self.Utility2=Slot("UM",Ship)
self.Utility3=Slot("UM",Ship)
self.Utility4=Slot("UM",Ship)
class Broadside(Module):
Type="MCC"
Name="Broadside"
Info=["Weapons: 3M","Utilitys: 4M"]
def __init__(self,Ship):
Module.__init__(self,"MCC","Broadside",Ship)
self.Weapon1=Slot("WM",Ship)
self.Weapon2=Slot("WM",Ship)
self.Weapon3=Slot("WM",Ship)
self.Utility1=Slot("UM",Ship)
self.Utility2=Slot("UM",Ship)
self.Utility3=Slot("UM",Ship)
self.Utility4=Slot("UM",Ship)
class Hangar(Module):
Type="MCC"
Name="Hangar"
Info=["Weapons: 2P 1H","Utilitys: 4M"]
def __init__(self,Ship):
Module.__init__(self,"MCC","Hangar",Ship)
self.Weapon1=Slot("WP",Ship)
self.Weapon2=Slot("WP",Ship)
self.Weapon3=Slot("WH",Ship)
self.Utility1=Slot("UM",Ship)
self.Utility2=Slot("UM",Ship)
self.Utility3=Slot("UM",Ship)
self.Utility4=Slot("UM",Ship)
class Torpedo(Module):
Type="MCC"
Name="Torpedo"
Info=["Weapons: 2S 2G","Utilitys: 4M"]
def __init__(self,Ship):
Module.__init__(self,"MCC","Torpedo",Ship)
self.Weapon1=Slot("WS",Ship)
self.Weapon2=Slot("WS",Ship)
self.Weapon3=Slot("WG",Ship)
self.Weapon4=Slot("WG",Ship)
self.Utility1=Slot("UM",Ship)
self.Utility2=Slot("UM",Ship)
self.Utility3=Slot("UM",Ship)
self.Utility4=Slot("UM",Ship)
Variants=[Artillery,Broadside,Hangar,Torpedo]
class Stern:
Type="MCS"
class Broadside(Module):
Type="MCS"
Name="Broadside"
Info=["Weapons: 1M","Utilitys: 2A"]
def __init__(self,Ship):
Module.__init__(self,"MCC","Broadside",Ship)
self.Weapon1=Slot("WM",Ship)
self.Utility1=Slot("UA",Ship)
self.Utility2=Slot("UA",Ship)
class Gunship(Module):
Type="MCC"
Name="Gunship"
Info=["Weapons: 2S","Utilitys: 2A"]
def __init__(self,Ship):
Module.__init__(self,"MCC","Gunship",Ship)
self.Weapon1=Slot("WS",Ship)
self.Weapon2=Slot("WS",Ship)
self.Utility1=Slot("UA",Ship)
self.Utility2=Slot("UA",Ship)
Variants=[Broadside,Gunship]
Variants=[Bow,Core,Stern]
class Battleship:
class Bow:
Type="MBB"
class Artillery(Module):
Type="MBB"
Name="Artillery"
Info=["Weapons: 2L","Utilitys: 3L"]
def __init__(self,Ship):
Module.__init__(self,"MBB","Artillery",Ship)
self.Weapon1=Slot("WL",Ship)
self.Weapon2=Slot("WL",Ship)
self.Utility1=Slot("UL",Ship)
self.Utility2=Slot("UL",Ship)
self.Utility3=Slot("UL",Ship)
class Broadside(Module):
Type="MBB"
Name="Broadside"
Info=["Weapons: 2S 1M 1L","Utilitys: 2A"]
def __init__(self,Ship):
Module.__init__(self,self.Type,self.Name,Ship)
self.Weapon1=Slot("WS",Ship)
self.Weapon2=Slot("WS",Ship)
self.Weapon3=Slot("WM",Ship)
self.Weapon4=Slot("WL",Ship)
self.Utility1=Slot("UL",Ship)
self.Utility2=Slot("UL",Ship)
self.Utility3=Slot("UL",Ship)
class Hangar(Module):
Type="MBB"
Name="Hangar"
Info=["Weapons: 1M 2P 1H","Utilitys: 3L"]
def __init__(self,Ship):
Module.__init__(self,self.Type,self.Name,Ship)
self.Weapon1=Slot("WM",Ship)
self.Weapon2=Slot("WP",Ship)
self.Weapon3=Slot("WP",Ship)
self.Weapon4=Slot("WH",Ship)
self.Utility1=Slot("UL",Ship)
self.Utility2=Slot("UL",Ship)
self.Utility3=Slot("UL",Ship)
class SpinalMount(Module):
Type="MBB"
Name="Spinal Mount"
Info=["Weapons: 1X","Utilitys: 3L"]
def __init__(self,Ship):
Module.__init__(self,self.Type,self.Name,Ship)
self.Weapon1=Slot("WX",Ship)
self.Utility1=Slot("UL",Ship)
self.Utility2=Slot("UL",Ship)
self.Utility3=Slot("UL",Ship)
Variants=[Artillery,Broadside,Hangar,SpinalMount]
class Core:
Type="MBC"
class Artillery(Module):
Type="MBC"
Name="Artillery"
Info=["Weapons: 3L","Utilitys: 3L"]
def __init__(self,Ship):
Module.__init__(self,self.Type,self.Name,Ship)
self.Weapon1=Slot("WL",Ship)
self.Weapon2=Slot("WL",Ship)
self.Weapon3=Slot("WL",Ship)
self.Utility1=Slot("UL",Ship)
self.Utility2=Slot("UL",Ship)
self.Utility3=Slot("UL",Ship)
class Broadside(Module):
Type="MBC"
Name="Broadside"
Info=["Weapons: 2M 2L","Utilitys: 3L"]
def __init__(self,Ship):
Module.__init__(self,self.Type,self.Name,Ship)
self.Weapon1=Slot("WM",Ship)
self.Weapon2=Slot("WM",Ship)
self.Weapon3=Slot("WL",Ship)
self.Weapon4=Slot("WL",Ship)
self.Utility1=Slot("UL",Ship)
self.Utility2=Slot("UL",Ship)
self.Utility3=Slot("UL",Ship)
class Carrier(Module):
Type="MBC"
Name="Carrier"
Info=["Weapons: 2S 2P 2H","Utilitys: 3L"]
def __init__(self,Ship):
Module.__init__(self,self.Type,self.Name,Ship)
self.Weapon1=Slot("WS",Ship)
self.Weapon2=Slot("WS",Ship)
self.Weapon3=Slot("WP",Ship)
self.Weapon4=Slot("WP",Ship)
self.Weapon5=Slot("WH",Ship)
self.Weapon6=Slot("WH",Ship)
self.Utility1=Slot("UL",Ship)
self.Utility2=Slot("UL",Ship)
self.Utility3=Slot("UL",Ship)
class Hangar(Module):
Type="MBC"
Name="Hangar"
Info=["Weapons: 4M 1H","Utilitys: 3L"]
def __init__(self,Ship):
Module.__init__(self,self.Type,self.Name,Ship)
self.Weapon1=Slot("WM",Ship)
self.Weapon2=Slot("WM",Ship)
self.Weapon3=Slot("WM",Ship)
self.Weapon4=Slot("WM",Ship)
self.Weapon5=Slot("WH",Ship)
self.Utility1=Slot("UL",Ship)
self.Utility2=Slot("UL",Ship)
self.Utility3=Slot("UL",Ship)
Variants=[Artillery,Broadside,Carrier,Hangar]
class Stern:
Type="MBS"
class Artillery(Module):
Type="MBS"
Name="Artillery"
Info=["Weapons: 1L","Utilitys: 2A"]
def __init__(self,Ship):
Module.__init__(self,self.Type,self.Name,Ship)
self.Weapon1=Slot("WL",Ship)
self.Utility1=Slot("UA",Ship)
self.Utility2=Slot("UA",Ship)
class Broadside(Module):
Type="MBS"
Name="Broadside"
Info=["Weapons: 2M","Utilitys: 2A"]
def __init__(self,Ship):
Module.__init__(self,self.Type,self.Name,Ship)
self.Weapon1=Slot("WM",Ship)
self.Weapon2=Slot("WM",Ship)
self.Utility1=Slot("UA",Ship)
self.Utility2=Slot("UA",Ship)
Variants=[Artillery,Broadside]
Variants=[Bow,Core,Stern]
if(__loader__!="Modules"):
import Modules
import Utilitys
import Weapons
import Components
|
[
"CristiSima@github.com"
] |
CristiSima@github.com
|
e98b22fa6ef267f696bb0d745c79f47d0d9e171b
|
20f16917c9245aae71cb50fcc4b3e34e1e2a5006
|
/LessonThree/Python07/src/Story_start.py
|
bf42f5f5399b21892641b105ca409e6280efa206
|
[] |
no_license
|
yinsendemogui/Alex
|
f4bce794efb5cacdf547c420d7a3a3c5d27be5c8
|
eeb230b9028ced5c7fc0f293c1d4d7b98c521721
|
refs/heads/master
| 2020-06-11T19:17:41.397658
| 2017-01-07T15:50:48
| 2017-01-07T15:50:48
| 75,628,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,646
|
py
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
# auther:Mr.chen
# 描述:
import time,os,sys
sys.path.append('..')
from lib import common
# from lib.Players_model import players_Model
DIR = os.path.dirname(__file__)
DIR = DIR.replace('src','db/')
TAG = True
def Pre_chapter(user):
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *预章:传说* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
相传很久以前,于古国疆域,有一奇人姓夸名父.
以大力闻于世间,以才智惊于圣贤,以风韵传于万载..
忽一日,慕之者至.询问之,其曰...
吾父乃真之才,生于凡中.无师而达天地...
终其一生教化万民,此乃吾真之所持..
父之事迹.且听我慢慢道来...
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
The_first_chapter(user)
def The_first_chapter(user):
# dict = common.log_info_read(DIR + 'config_conf')
# for S in dict['students']:
# if S.Name == user.Name:
time.sleep(2)
introduce = """
登场人物介绍
姓名:{0}
年龄:{1}
国籍:{2}
特长:{3}
体力:{4}
武力:{5}
智力:{6}
魅力:{7}
秘籍:无
点评:屌丝,唯撩妹甚
姓名:灵儿
年龄:22
国籍:china
特长:
体力:1000
武力:70
智力:70
魅力:100
秘籍:游戏保护,万法不侵
点评:白富美
""".format(user.Name,user.Age,user.Nationality,user.Specialty,user.Strength,user.Force,user.IQ,user.Charm)
for i in introduce.decode('utf-8'):
if i != ' ':
time.sleep(0.2)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第一章:缘启* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
我的父亲叫做{0},本是一介草民,少时机缘之下,
救助了一个跳河自杀之人,本也并无所求,只因
我父那时在河中捕鱼,闲河中波澜太盛,吓跑鱼儿,
故,救之,以安抚鱼心。谁想此人竟是一小门派
掌教之子,因修炼走火,盲目间跌落河中。恰逢我父
出海,机缘所致,掌教有感我父恩德,故收其为徒,
传功授法,指引修行。说来也怪,我父不论武力{1},
智力{1}魅力{2}尽数低于常人,但唯独撩妹能力
极其出众,故派中最小师妹灵儿常伴左右,个中滋味
不足为外人道也。
""".format(user.Name,user.Force,user.IQ,user.Charm)
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
The_second_chapter(user)
def The_second_chapter(user):
time.sleep(2)
introduce = """
登场人物介绍
姓名:高富帅
年龄:34
国籍:china
特长:有钱有势
体力:1000
武力:70
智力:70
魅力:70
秘籍:无
点评:如其名
"""
for i in introduce.decode('utf-8'):
if i != ' ':
time.sleep(0.2)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第二章:幻灭* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
我父和灵儿就这样朝夕相处,日久生情,只待谈婚论嫁之时。
但,世事难料。一日,掌门大寿,宴请四方,祝寿者繁多。
有一人姓高名富帅,乃当朝一品大员之子,见灵儿貌美,
意欲图之。在其下手一刻,幸被我父所阻,于是心生恨意,
命其下人,禀报大员,以圣上赐婚为由,向掌门施压。怎料,
掌门欲息事宁人,遂命灵儿随高富帅回京,奉旨完婚。师命
难违,灵儿纵千般不愿,亦感无可奈何。临行前,挥泪别过,
劝我父放下仇恨,勿思勿念。我父伤心之余,亦感自身渺小。
暗发宏愿,以期报仇雪恨,救灵儿于水火之间。
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
The_third_chapter(user)
def The_third_chapter(user):
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第三章:暗涛* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
灵儿事毕,我父再无心静修,辞别掌教,下山入世。
得一高人指点,拜于一隐门之中,勤学苦练,终得
真传。我父正欲出山报仇,被隐门上士所阻,言道
京城宦官家有一大内高手田伯光,武力高达90有余,
欲胜之需闯本门的锁妖塔拿一绝世宝物(双倍暴击率)
方可成行。
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
time.sleep(2)
while TAG:
text = """
剧情分支选择如下:
1,听劝
2,不听劝
"""
print (text)
choose = raw_input("请输入索引进行选择")
if choose == '1':
Lock_demon_tower(user)
elif choose == '2':
Fail_ending_one()
else:
print ("你的选择有误!")
def Lock_demon_tower(user):
List = []
dict = common.log_info_read(DIR + 'config_conf')
for pobj in dict['players']:
if pobj.Name == user.Name:
P = pobj
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第四章:勇闯锁妖塔* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
反复思量,我父还是决定暂缓报仇,遵从隐士的看法,
独自一人来到锁妖塔前,看者前方雄伟的高达{0}
层的锁妖塔,暗下决心,要尽快完成闯塔拿到宝物.
于是,我父来到了塔下的驿站里...
""".format(str(len(user.Tlist_obj)))
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
while TAG:
test = """
请问现在你想去哪?
1,闯塔
2,打开背包(吃药) 你还有{0}体力
3,不闯了,直接去报仇
""".format(str(P.Strength))
print (test)
choose = raw_input("请输入索引进行选择:")
num = 0
bum = 0
if choose == '1':
for tobj in dict['towers']:
if P.schedule[tobj] == 100:
schedule = '已达成'
bum += 1
else:
schedule = P.schedule[tobj]
print ("{0},{1},难度系数:{2},进度率:{3}%,创塔次数:{4}次".format(str(num+1),tobj.Lname,tobj.Difficulty,str(schedule),str(P.num[tobj])))
if bum == len(P.Tlist_obj):
print ("{0},锁妖塔顶层,难度系统:0".format(str(num+2)))
num += 1
List.append(str(num))
decide = raw_input("请输入索引进行选择:")
if decide == str(len(P.Tlist_obj)+1) and bum == len(P.Tlist_obj):
Lock_demon_tower_Top(user)
if decide in List:
if P.schedule[dict['towers'][int(decide)-1]] < 100:
for i in range(10):
re = P.Begins(dict['towers'][int(decide)-1])
if re == False:
common.log_info_write(DIR + 'config_conf', dict)
break
else:
common.log_info_write(DIR + 'config_conf', dict)
else:
print ("本层已经闯过了!")
else:
print ("你的输入有误!")
elif choose == '2':
while TAG:
text = """
背囊物品如下: 你还有{0}体力
1,大还丹:{1}个
2,小还丹 {2}个
""".format(str(P.Strength),str(P.Item['大还丹']),str(P.Item['大还丹']))
print (text)
choose = raw_input("请输入索引进行选择:")
if choose == '1':
if P.Item['大还丹'] > 0 :
P.Item['大还丹'] -= 1
P.Strength += 500
common.log_info_write(DIR + 'config_conf', dict)
break
else:
print ("大还丹个数为0")
break
elif choose == '2':
if P.Item['小还丹'] > 0:
P.Item['小还丹'] -= 1
P.Strength += 200
common.log_info_write(DIR + 'config_conf', dict)
break
else:
print ("小还丹个数为0")
break
else:
print ("你的输入有误!请重新输入!")
elif choose == '3':
Fail_ending_one()
else:
print ("你的输入有误!")
def Lock_demon_tower_Top(user):
dict = common.log_info_read(DIR + 'config_conf')
for pobj in dict['players']:
if pobj.Name == user.Name:
P = pobj
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第五章:锁妖塔顶* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
克服磨难,吾父终至,锁妖塔顶。与前相比,此地奇静。
地方不大,有水缸一口,两人高有余。好奇之下,
侧身观之,怎料竟有活人居于缸内,遂上前,救出。
原来此人就是灵儿。询问下,方知,那日毕,其心已死,
趁高富帅不备,遂逃出,寻短见,幸被隐门上士所救,居
此疗伤,恰逢我父闯塔,喜得相逢。至此,我父恍然,直呼,
此宝胜万宝也(主角瞬间满怒体力翻倍)
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
P.Strength = P.Strength * 2
common.log_info_write(DIR + 'config_conf', dict)
Wu_Duo(user)
def Wu_Duo(user):
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *终章:武夺* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
经过不懈的努力,战胜了诸多困苦(实在懒得编了),
我们的主角终于和美女团结友爱的在一起生活,剧终
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
exit()
def Fail_ending_one():
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *终章:武夺* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
报仇心切,我父终是不肯听劝,遂一人趁夜逃出隐门,
数日后,进京踩点,待万事俱备只欠东风之时,奈何
大员祖宅大内高手,先知先觉,早已暗随我父三日有余,
眼见我父正待出手,遂突袭之,我父重伤,感叹报仇无望,
自此隐居山林,不问世事.....BAD END......
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
exit()
|
[
"215379068@qq.com"
] |
215379068@qq.com
|
7c758c74009c56b967069fdd7c768b6aca99de7e
|
921c6a7a41e318a207140c463778c12e0d4879da
|
/phonesimulator.py
|
f3db75ec0825b1ab419b73d718b0b56516eb1fae
|
[] |
no_license
|
kezzayuno/summerTutoringSolutions
|
79b7352e1cd52504bb95277f828459dab4a3c075
|
73fa7ac97e9076d1f99de35a1bce2cf1007c1b1f
|
refs/heads/main
| 2023-05-03T09:52:20.611134
| 2021-05-27T00:08:57
| 2021-05-27T00:08:57
| 365,408,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,815
|
py
|
# there can be errors in the file
# - number would have not an integer
# - duplicates phone number like two tims with the same number
# - two numbers with two different people which imply that these two live together
def createDict(file):
phoneBook = {}
done = False
for line in file:
splitLine = line.rstrip().split('$')
number = formatNumber(splitLine[1])
if len(phoneBook) != 0:
for name in phoneBook.keys():
if phoneBook[name] == number:
saved = name
phoneBook.pop(name)
phoneBook[saved + ', ' + splitLine[0]] = number
break
if done == False:
phoneBook[splitLine[0]] = number
return phoneBook
def formatNumber(number):
newNumber = []
strNumber = ''
for tupleDigit in zip(*([iter(number)] * 3)):
newNumber.append(''.join(tupleDigit))
for num in newNumber:
if num == newNumber[len(newNumber) - 1]:
strNumber += num
else:
strNumber += num + '-'
strNumber += number[len(number) - 1]
return strNumber
def checkPhoneNumber(phoneBook, name):
for phoneName in phoneBook.keys():
if phoneName == name:
return phoneBook[phoneName]
if ',' in phoneName:
splitNames = phoneName.split(',')
for aName in splitNames:
if name == aName:
return phoneBook[phoneName]
return "This name does not exist in this phone book."
def main():
with open('myContacts.txt', 'r') as f:
readFile = f.readlines()
phoneBook = createDict(readFile)
getPhoneNumber = input("Whose phone number do you wish to see? >")
print(checkPhoneNumber(phoneBook, getPhoneNumber))
main()
|
[
"ayuno@ualberta.ca"
] |
ayuno@ualberta.ca
|
d5edeac4cbc477ba3102868460c60680c85531d0
|
b27c5b8d5441cf10774305b79dddd9a35d366f31
|
/recipeapp/settings.py
|
704c9d111c0ab4d452c3bb193dc28bfaf89bf25b
|
[] |
no_license
|
ins099/RecipeWebApp
|
58a925f8f224f5e867212d7a368f84fc3d882087
|
36d6961042a78c074b94ba6c1bf67f968975152c
|
refs/heads/main
| 2023-02-13T22:34:57.569192
| 2021-01-13T23:09:06
| 2021-01-13T23:09:06
| 321,590,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,588
|
py
|
"""
Django settings for recipeapp project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9mt#j3mlm3p90xn$5pf5%l_ngn(4wd88$va6f&2yroj*g2%+#m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'recipe',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'recipeapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'recipeapp.wsgi.application'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'recipe.User'
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/login'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_URL = '/images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'recipe/static/images/')
|
[
"alaminsaram92@gmail.com"
] |
alaminsaram92@gmail.com
|
148b9208b0b1a0d77e8b6ca8105ddce0cca0bb8a
|
379e7d33dc72ebe0c2cdf605174813accc957797
|
/main.py
|
1833697eb5be2b619c469b03e4664800a896552f
|
[] |
no_license
|
Kayt/heartmed
|
8838cea4c07be67b80c0294a6ccd7edeb64589a8
|
7dbbf3be44c912c4a1708cadaa987ef000ae1f73
|
refs/heads/master
| 2021-08-09T01:28:05.447174
| 2017-11-11T19:47:27
| 2017-11-11T19:47:27
| 110,351,336
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,488
|
py
|
from numpy import exp, array, random, dot, asarray
class NeuralNetwork():
def __init__(self):
# Seed the random number generator, so it generates the same numbers
# every time the program runs.
random.seed(1)
# We model a single neuron, with 3 input connections and 1 output connection.
# We assign random weights to a 3 x 1 matrix, with values in the range -1 to 1
# and mean 0.
self.synaptic_weights = 2 * random.random((100, 1)) - 1
# The Sigmoid function, which describes an S shaped curve.
# We pass the weighted sum of the inputs through this function to
# normalise them between 0 and 1.
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
# The derivative of the Sigmoid function.
# This is the gradient of the Sigmoid curve.
# It indicates how confident we are about the existing weight.
def __sigmoid_derivative(self, x):
return x * (1 - x)
# We train the neural network through a process of trial and error.
# Adjusting the synaptic weights each time.
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in xrange(number_of_training_iterations):
# Pass the training set through our neural network (a single neuron).
output = self.think(training_set_inputs)
# Calculate the error (The difference between the desired output
# and the predicted output).
error = training_set_outputs - output
# Multiply the error by the input and again by the gradient of the Sigmoid curve.
# This means less confident weights are adjusted more.
# This means inputs, which are zero, do not cause changes to the weights.
adjustment = dot(training_set_inputs.T, error * self.__sigmoid_derivative(output))
# Adjust the weights.
self.synaptic_weights += adjustment
# The neural network thinks.
def think(self, inputs):
# Pass inputs through our neural network (our single neuron).
return self.__sigmoid(dot(asarray(inputs, self.synaptic_weights)))
if __name__ == "__main__":
#Intialise a single neuron neural network.
neural_network = NeuralNetwork()
print "Random starting synaptic weights: "
print neural_network.synaptic_weights
# The training set. We have 4 examples, each consisting of 3 input values
# and 1 output value.
training_set_inputs = array([
[63,1,1,145,233,1,2,150,0,2.3,3,0,6],
[67,1,4,160,286,0,2,108,1,1.5,2,3,3],
[67,1,4,120,229,0,2,129,1,2.6,2,2,7],
[37,1,3,130,250,0,0,187,0,3.5,3,0,3],
[41,0,2,130,204,0,2,172,0,1.4,1,0,3],
[56,1,2,120,236,0,0,178,0,0.8,1,0,3],
[62,0,4,140,268,0,2,160,0,3.6,3,2,3],
[57,0,4,120,354,0,0,163,1,0.6,1,0,3],
[63,1,4,130,254,0,2,147,0,1.4,2,1,7],
[53,1,4,140,203,1,2,155,1,3.1,3,0,7],
[57,1,4,140,192,0,0,148,0,0.4,2,0,6],
[56,0,2,140,294,0,2,153,0,1.3,2,0,3],
[56,1,3,130,256,1,2,142,1,0.6,2,1,6],
[44,1,2,120,263,0,0,173,0,0,1,0,7],
[52,1,3,172,199,1,0,162,0,0.5,1,0,7],
[57,1,3,150,168,0,0,174,0,1.6,1,0,3],
[48,1,2,110,229,0,0,168,0,1,3,0,7],
[54,1,4,140,239,0,0,160,0,1.2,1,0,3],
[48,0,3,130,275,0,0,139,0,0.2,1,0,3],
[49,1,2,130,266,0,0,171,0,0.6,1,0,3],
[64,1,1,110,211,0,2,144,1,1.8,2,0,3],
[58,0,1,150,283,1,2,162,0,1,1,0,3],
[58,1,2,120,284,0,2,160,0,1.8,2,0,3],
[58,1,3,132,224,0,2,173,0,3.2,1,2,7],
[60,1,4,130,206,0,2,132,1,2.4,2,2,7],
[50,0,3,120,219,0,0,158,0,1.6,2,0,3],
[58,0,3,120,340,0,0,172,0,0,1,0,3],
[66,0,1,150,226,0,0,114,0,2.6,3,0,3],
[43,1,4,150,247,0,0,171,0,1.5,1,0,3],
[40,1,4,110,167,0,2,114,1,2,2,0,7],
[69,0,1,140,239,0,0,151,0,1.8,1,2,3],
[60,1,4,117,230,1,0,160,1,1.4,1,2,7],
[64,1,3,140,335,0,0,158,0,0,1,0,3],
[59,1,4,135,234,0,0,161,0,0.5,2,0,7],
[44,1,3,130,233,0,0,179,1,0.4,1,0,3],
[42,1,4,140,226,0,0,178,0,0,1,0,3],
[43,1,4,120,177,0,2,120,1,2.5,2,0,7],
[57,1,4,150,276,0,2,112,1,0.6,2,1,6],
[55,1,4,132,353,0,0,132,1,1.2,2,1,7],
[61,1,3,150,243,1,0,137,1,1,2,0,3],
[65,0,4,150,225,0,2,114,0,1,2,3,7],
[40,1,1,140,199,0,0,178,1,1.4,1,0,7],
[71,0,2,160,302,0,0,162,0,0.4,1,2,3],
[59,1,3,150,212,1,0,157,0,1.6,1,0,3],
[61,0,4,130,330,0,2,169,0,0,1,0,3],
[58,1,3,112,230,0,2,165,0,2.5,2,1,7],
[51,1,3,110,175,0,0,123,0,0.6,1,0,3],
[50,1,4,150,243,0,2,128,0,2.6,2,0,7],
[65,0,3,140,417,1,2,157,0,0.8,1,1,3],
[53,1,3,130,197,1,2,152,0,1.2,3,0,3],
[41,0,2,105,198,0,0,168,0,0,1,1,3],
[65,1,4,120,177,0,0,140,0,0.4,1,0,7],
[44,1,4,112,290,0,2,153,0,0,1,1,3],
[44,1,2,130,219,0,2,188,0,0,1,0,3],
[60,1,4,130,253,0,0,144,1,1.4,1,1,7],
[54,1,4,124,266,0,2,109,1,2.2,2,1,7],
[50,1,3,140,233,0,0,163,0,0.6,2,1,7],
[41,1,4,110,172,0,2,158,0,0,1,0,7],
[54,1,3,125,273,0,2,152,0,0.5,3,1,3],
[51,1,1,125,213,0,2,125,1,1.4,1,1,3],
[51,0,4,130,305,0,0,142,1,1.2,2,0,7],
[46,0,3,142,177,0,2,160,1,1.4,3,0,3],
[58,1,4,128,216,0,2,131,1,2.2,2,3,7],
[54,0,3,135,304,1,0,170,0,0,1,0,3],
[54,1,4,120,188,0,0,113,0,1.4,2,1,7],
[60,1,4,145,282,0,2,142,1,2.8,2,2,7],
[60,1,3,140,185,0,2,155,0,3,2,0,3],
[54,1,3,150,232,0,2,165,0,1.6,1,0,7],
[59,1,4,170,326,0,2,140,1,3.4,3,0,7],
[46,1,3,150,231,0,0,147,0,3.6,2,0,3],
[65,0,3,155,269,0,0,148,0,0.8,1,0,3],
[67,1,4,125,254,1,0,163,0,0.2,2,2,7]
])
training_set_outputs = array([[0,1,1,0,0,0,1,0,1,1,0,0,1,0,0,0,1,0,0,0,0,0,1,1,1,0,0,0,0,1,0,1,1,0,0,0,1,1,1,0,1,0,0,0,1,1,0,1,0,0,0,0,1,0,1,1,1,1,0,0,1,0,1,0,1,1,1,0,1,1,0,1]]).T
# Train the neural network using a training set.
# Do it 10,000 times and make small adjustments each time.
neural_network.train(training_set_inputs, training_set_outputs, 10000)
print "New synaptic weights after training: "
print neural_network.synaptic_weights
# Test the neural network with a new situation.
print "Considering new situation [65,1,4,110,248,0,2,158,0,0.6,1,2,6] -> ?: "
print neural_network.think(array([65,1,4,110,248,0,2,158,0,0.6,1,2,6]))
|
[
"achim_munene@hotmail.com"
] |
achim_munene@hotmail.com
|
362e9abe387de8ee165bafdca75d22ecb015f9f1
|
40efe4cdb4d7845ce7adca4b3d48aaeb2c0f35fb
|
/rotate_sort.py
|
8412180883b47c121dd7bd07654c069a3a569c9d
|
[] |
no_license
|
viz06/python_codes
|
b663d360d223e549401b5c10534c9085c5def6a9
|
93b067afa4ba02fba73c9709c922983b2bf9ac09
|
refs/heads/master
| 2022-12-19T10:16:42.645697
| 2020-09-20T19:27:10
| 2020-09-20T19:27:10
| 295,674,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
class Solution:
count=0
def findKRotation(self,arr, n):
for i in range(n-1):
if(arr[i]>arr[i+1]):
return i+1
return 0
if __name__=='__main__':
tc=int(input())
while tc>0:
n=int(input())
a=list(map(int,input().strip().split()))
ob=Solution()
ans=ob.findKRotations(a,n)
print(ans)
tc=tc-1
|
[
"noreply@github.com"
] |
noreply@github.com
|
306b1205394bae0e6a5f9c63abcc56d5c25288b4
|
5a6ea469a1a6571281e8a23ff8fbc4c3ea205a0f
|
/util/visualizer.py
|
283a78e9dfa482932160f889533973dfff9919bf
|
[] |
no_license
|
123456789lin/Pytorch-CycleGAN-Simplified
|
ee00d32d1db3c5b7daf2bfdb273dcb487ee4e8de
|
364b1c36a0090c0a37e22c95eec4388dc5db90e4
|
refs/heads/master
| 2022-12-10T00:57:48.497532
| 2020-08-30T06:12:54
| 2020-08-30T06:12:54
| 291,289,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,896
|
py
|
import numpy as np
import time
import os
import sys
import ntpath
from . import util
from subprocess import Popen, PIPE
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def save_images(result_dir, visuals, image_path, aspect_ratio=1.0):
"""Save images to the disk.
Parameters:
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
"""
image_dir = os.path.join(result_dir, 'images')
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
if not os.path.exists(image_dir):
os.makedirs(image_dir)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
"""
def __init__(self, opt):
self.opt = opt # cache the option
self.name = opt.name
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def display_current_results(self,img_dir, visuals, epoch):
# save images to the disk
if not os.path.exists(img_dir):
os.makedirs(img_dir)
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
message = '(epoch: %d, iters: %d) ' % (epoch, iters)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
|
[
"noreply@github.com"
] |
noreply@github.com
|
e285fd667bf08064ff25f762ee61ea06d3addb99
|
5a082f78df56e88d0b7096323f16d7a34f7751e0
|
/venv/Scripts/pip3.7-script.py
|
6ab5023b081a735352f9e21f4fc49c712766008b
|
[] |
no_license
|
yonitouitou/PDFMerger
|
36e9f9fde2c25e2bc81b5b49b95f9330097f720a
|
d0f5e06cc1cf2375d9f5a0ee9a08d12e5f253af6
|
refs/heads/master
| 2020-12-09T10:31:25.168385
| 2020-05-10T07:51:08
| 2020-05-10T07:51:08
| 233,278,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
#!"C:\Users\Yoni Touitou\PycharmProjects\PdfMerger\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"yonitouitouu@gmail.com"
] |
yonitouitouu@gmail.com
|
a7cab9582745010178a2ff7a604325a6acc85ace
|
f2d961fe88b67f9d1eb52170d4df7c0363fae073
|
/paper/plottraces_wt.py
|
0f6fb632d5797568f4da3376d5ce5e68dd5a2795
|
[] |
no_license
|
acvmanzo/mn
|
12ffbf1aae9c8088a8461bb2e7f4823e31329338
|
c1d52e65e0bdec504d4e3954ad02ffe91f7f31ad
|
refs/heads/master
| 2021-01-23T04:23:47.366109
| 2014-06-08T11:05:11
| 2014-06-08T11:05:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,820
|
py
|
import mn.dftf.dftf as dftf
import os
import matplotlib.pyplot as plt
import numpy as np
import operator
from mn.cmn.cmn import *
import matplotlib
import pickle
import matplotlib as mpl
# Plots raw traces on one graph from the movies specified below.
# Run from the 'data' folder; in the 'data' folder are individual movie folders
# (similar to the experiment/data folders).
# Same values as in dftf.py, but only plotting roi1.
DFTSIZE=10000
RESULTS_FILE = 'results1.txt'
PARAMS_FILE = 'params'
CORRPARAMS_FILE = 'corrparams'
HZ_BOUND1 = 0.5
HZ_BOUND2 = 'end'
KEYLIST = 'keylist'
COLS= ['Mean1']
ROIS = ['roi1']
TYPE = 'raw' # Choose 'dft' or 'raw'
if TYPE == 'dft':
PLOTNAME = 'dfttraces.png'
YLABEL = 'Amplitude'
XLABEL = 'Hz'
YMIN = 0
YLIM = 4
if TYPE == 'raw':
PLOTNAME = 'rawtraces'
YLABEL = 'Arbitrary Intensity'
XLABEL = 'Time (s)'
YMIN = -5
YLIM = 90
FONTSIZE = 6.7 # Font size for tick labels, axis labels.
FIGW = 1.75 # Figure width in inches
FIGH = 2.5 # Figure height in inches
FIGDPI = 600 # Figure dpi
BORDER = 'no'
YAXISTICKS = 2
TIME = 1 # Length of time the traces show.
XLIMHZ = 10
LINEWIDTH = 0.75
# Dictionary where the keys are the movie names and the values are the condition, the y offset of
# the trace (so that they aren't on top of each other), and the color the of the trace.
#MOVIES = {'mov_20101130_200135': ['112648-GAL4', 32.5+1, 'k'], 'mov_20110803_190537': ['UAS-TNT', 14+1, 'b'], 'mov_20101213_193258': ['112648 x TNT', 0, 'r']}
#DFT_MOVIES = {'mov_20101130_200135': ['112648-GAL4', 3.1-0.25, 'k'], 'mov_20110803_190537': ['UAS-TNT', 1.8-0.25, 'b'], 'mov_20101213_193258': ['112648 x TNT', 0.25, 'r']}
#MOVIES = {'mov_20110518_184507': ['24', 70, 'k'], 'mov_20110518_185105': ['30', 20, 'b'], 'mov_20110518_184217': ['24', 50, 'k'], 'mov_20110518_184849': ['30', 0, 'b']}
#MOVIES = {'mov_20101130_200533': ['control', 45, 'k'], 'mov_20110518_191243': ['112648 x dtrpa1 - 24', 30, 'b'], 'mov_20110527_163607_part2' :['112648 x dtrpa1 - 32', 15, 'r'], 'mov_20110518_192012': ['112648 x dtrpa1 - 32', -5, 'r']}
#MOVIES = {'mov_20110830_152007': ['24 h/100 mM suc', 70, 'k', '(i) '], 'mov_20110830_192926': ['10 h/100 mM suc', 45, 'k', '(ii) '], 'mov_20110901_182709' :['24 h/500 mM suc', 20, 'k', '(iii) '], 'mov_20110113_180524': ['500 mM suc + 2.5% MC', -1, 'k', '(iv) ']}
MOVIES = {'mov_20110830_192926': ['10 h/100 mM suc', 70, 'k', '(i) '],
'mov_20110830_152007': ['24 h/100 mM suc', 45, 'k', '(ii) '],
'mov_20110901_182709' :['24 h/500 mM suc', 20, 'k', '(iii) '],
'mov_20110113_180524': ['24 h/500 mM suc + 2.5% MC', -1, 'k', '(iv) ']}
matplotlib.rc('axes', linewidth=LINEWIDTH)
def oneplot(moviedict, toplotdict, figw, figh, figdpi, fontsz, border, ylabel, ylim, time, ymin,
lw):
"""Moviedict is the above dictionary of movies, toplotdict is a dictionary produced by
toplot(), and other values are what's specified as global variables."""
print(toplotdict.keys())
fontv = mpl.font_manager.FontProperties()
# Uncomment line below to set the font to verdana; the default matplotlib font is very
# similar (just slightly narrower).
fontv = mpl.font_manager.FontProperties(fname='/usr/share/matplotlib/mpl-data/fonts/ttf/arial.ttf')
fontv.set_size(fontsz)
fig1 = plt.figure(figsize=(figw, figh), dpi=figdpi, facecolor='w', edgecolor='k')
#Plots data on one graph with parameters specified in the moviedict directory.
for k, v in moviedict.iteritems():
print(k)
cond1, offset, color, inum = v
xvals = toplotdict[k][0]
data = toplotdict[k][1] + offset
condition = cond1
plt.plot(xvals, data, color, linewidth=lw, label=condition)
print(condition)
#if k == 'mov_20110113_180524':
#plt.text(0.5, offset+7, inum+condition, horizontalalignment='left',
#fontproperties=fontv)
#else:
#plt.text(0.5, offset+9, inum+condition, horizontalalignment='left',
#fontproperties=fontv)
if k == 'mov_20110113_180524':
plt.text(0.05, offset+7, inum+condition, horizontalalignment='left',
fontproperties=fontv)
else:
plt.text(0.05, offset+9, inum+condition, horizontalalignment='left',
fontproperties=fontv)
ax = plt.gca()
## Plots legend.
#legend = plt.legend()
### Manipulates order of the legend entries.
##handles, labels = ax.get_legend_handles_labels()
##handles2 = handles[0], handles[2], handles[1], handles[3]
##labels2 = labels[0], labels[2], labels[1], labels[3]
##legend = ax.legend(handles2, labels2, bbox_to_anchor=(0, 0, 1, 1), transform=plt.gcf().transFigure)
### Changes legend font to fontsz.
#ltext = legend.get_texts()
#plt.setp(ltext, fontsize=fontsz)
### Removes border around the legend.
#legend.draw_frame(False)
#Uncomment lines below to display without top and right borders.
if border == 'no':
for loc, spine in ax.spines.iteritems():
if loc in ['left','bottom']:
pass
elif loc in ['right','top']:
spine.set_color('none') # don't draw spine
else:
raise ValueError('unknown spine location: %s'%loc)
#Uncomment lines below to display ticks only where there are borders.
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
## Removes tick labels and ticks from yaxis.
ax.axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
# Specifies axis labels and axis tick label sizes.
plt.xlabel(XLABEL, fontproperties=fontv, labelpad=4)
plt.ylabel(ylabel, fontproperties=fontv, labelpad=4)
plt.xticks(fontproperties=fontv)
plt.yticks(fontproperties=fontv)
# Specifies axis limits.
plt.axis( [0, time, ymin, ylim])
# Adjusts the space between the plot and the edges of the figure; (0,0) is the lower
#lefthand corner of the figure.
fig1.subplots_adjust(top=0.95)
fig1.subplots_adjust(left=0.15)
#fig1.subplots_adjust(right=0.95)
fig1.subplots_adjust(bottom=0.15)
def gentoplot(time):
"""Generates a dictionary where the keys are movie names and the values are the raw trace for
plotting. Time specifies the length of time in seconds of the plots shown."""
toplot = {}
# Generates a list of movie paths in the data folder.
files = dftf.batch_s('.')
# Generates dft traces and plots for each roi in each movie.
for file in files:
os.chdir(file)
print(os.path.basename(file))
for col in COLS:
if os.path.exists('params') == True:
rawtracedata = dftf.TraceData(fname=RESULTS_FILE, paramsfile=PARAMS_FILE,
corrparamsfile=CORRPARAMS_FILE, colname=col)
td = rawtracedata.Processrawtrace(DFTSIZE, HZ_BOUND1, HZ_BOUND2)
moviename = os.path.basename(os.path.abspath('.'))
# Selects the area of the raw trace to plot.
frames = time * td['fps']
#print(frames)
plottime = td['seltrace'][:frames]/6
#print(len(plottime))
ms = plottime-np.mean(plottime)
xsec = np.linspace(0, len(plottime)/td['fps'], len(plottime))
#print(xsec)
condition = td['condition']
toplot[moviename] = [xsec, ms, condition]
print(np.max(ms), np.min(ms))
return(toplot)
def gentoplot_dft(xlimhz):
toplot = {}
# Generates a list of movie paths in the data folder.
files = dftf.batch_s('.')
# Generates dft traces and plots for each roi in each movie.
for file in files:
os.chdir(file)
print(os.path.basename(file))
for col in COLS:
if os.path.exists('params') == True:
rawtracedata = dftf.TraceData(fname=RESULTS_FILE, paramsfile=PARAMS_FILE,
corrparamsfile=CORRPARAMS_FILE, colname=col)
td = rawtracedata.Processrawtrace(DFTSIZE, HZ_BOUND1, HZ_BOUND2)
condition = td['condition']
m = td['peakf']
xpoints = np.linspace(0, td['fps']/2, td['dftsize']/2)
prop = xlimhz/(td['fps']/2)
tracelen = np.rint(prop*len(td['dftnormtrunctrace']))
toplot[td['moviename']] = [xpoints[:tracelen],
td['dftnormtrunctrace'][:tracelen], condition]
return(toplot)
if TYPE == 'dft':
toplot = gentoplot_dft(XLIMHZ)
#oneplot(MOVIES, toplot, FIGW, FIGH, FIGDPI, FONTSIZE, BORDER, YLABEL, YLIM, TIME)
oneplot(DFT_MOVIES, toplot, FIGW, FIGH, FIGDPI, FONTSIZE, BORDER, YLABEL, YLIM, XLIMHZ, YMIN)
# Saves the figures in plots/plots.
plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))), 'plots')
makenewdir(plotfolder)
figname = os.path.join(plotfolder, PLOTNAME)
plt.savefig(figname, dpi=FIGDPI)
# Saves a file showing the movies I used for the plot.
fname = os.path.join(plotfolder, 'movies_used_for_dfttraces.txt')
with open(fname, 'w') as f:
for k, v in MOVIES.iteritems():
f.write(k + ' ' + v[0] + '\n')
if TYPE == 'raw':
toplot = gentoplot(TIME)
oneplot(MOVIES, toplot, FIGW, FIGH, FIGDPI, FONTSIZE, BORDER, YLABEL, YLIM, TIME, YMIN,
LINEWIDTH)
# Saves the figures in plots/plots.
plotfolder = os.path.join(os.path.dirname(os.path.abspath('../')), 'plots')
makenewdir(plotfolder)
figname = os.path.join(plotfolder, PLOTNAME)
plt.savefig(figname+'.svg', dpi=FIGDPI)
plt.savefig(figname+'.png', dpi=FIGDPI)
# Saves a file showing the movies I used for the plot and a pickle file with all the variables.
fname = os.path.join(plotfolder, 'movies_used_for_rawtraces.txt')
with open(fname, 'w') as f:
for k, v in MOVIES.iteritems():
f.write(k + ' ' + v[0] + '\n')
picklename = os.path.join(plotfolder, 'picklefile')
with open(picklename, 'w') as h:
d = {}
d['MOVIES'] = MOVIES
d['FONTSIZE'] = FONTSIZE
d['FIGW'] = FIGW
d['FIGH'] = FIGH
d['FIGDPI'] = FIGDPI
d['YAXISTICKS'] = YAXISTICKS
d['TIME'] = TIME
d['XLIMHZ'] = XLIMHZ
d['PLOTNAME'] = PLOTNAME
d['YLABEL'] = YLABEL
d['XLABEL'] = XLABEL
d['YMIN'] = YMIN
d['YLIM'] = YLIM
print(d)
picklefile = pickle.Pickler(h)
picklefile.dump(d)
|
[
"acvmanzo@gmail.com"
] |
acvmanzo@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.