blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
13edc751748509838dd764bca9d907474eba6783 | Python | gbernal/protolab_sound_recognition | /sound_classification/classification_service.py | UTF-8 | 9,658 | 2.6875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | """
Main file for online classification service
"""
__author__ = 'lgeorge'
from collections import namedtuple
from sklearn import preprocessing
import glob
import sklearn
import sklearn.base
import sklearn.feature_extraction
import sklearn.svm
import pandas as pd
from sklearn.cross_validation import StratifiedKFold
from sklearn_pandas import DataFrameMapper
import numpy as np
from sound_classification.generate_database_humavips import generate_aldebaran_dataset
from sound_processing.segmentaxis import segment_axis
from sound_processing.features_extraction import get_features
from sound_classification.confidence_scaling_based_on_confusion import get_threshold_cum_precision
import sound_processing.io_sound
ClassificationResult = namedtuple("ClassificationResult",
["timestamp_start", "timestamp_end", "class_predicted", "confidence", "score"])
def get_confidence_prediction(clf, val):
"""
Compute a score for a prediction
:param clf: the classifier, probability should be activated
:param val: val to classify
:return: a probability score between 0 and 1
"""
return np.max(clf.predict_proba(val))
class SoundClassificationException(Exception):
pass
class SoundClassification(object):
"""
the service object allowed to learn a specific dataset (default aldebaran sounds)
and classify a new file
"""
def __init__(self, wav_file_list=None, clf=None, confidence_threshold=0.2, window_block_learning=None,
calibrate_score=True):
"""
:param wav_file_list: , each files should be named # TODO : replace that with a list of namedTuple (file, class) for example ?
:param clf: default is SVC with rbf kernel
:param confidence_threshold:
:return:
"""
if wav_file_list is None:
wav_file_list = glob.glob('/mnt/protolab_innov/data/sounds/dataset/*.wav')
if clf is None:
# TODO : try with linearSVC .. and one vs all
clf = sklearn.svm.SVC(kernel='rbf', probability=True, verbose=False)
print("CLF is %s" % clf)
self.to_sklearn_features = DataFrameMapper([('features', sklearn.feature_extraction.DictVectorizer())])
self.scaler = None # init during learn
self.wav_file_list = wav_file_list
self.nfft = 1024
self.fs = 48000. # for now we force it .. TODO
self.clf = clf
self.confidence_threshold = confidence_threshold
self.window_block_learning = window_block_learning
self.calibrate_score = calibrate_score
# initialized during learn :
self.score_normalized_coefs = {} # coefs to convert confidence into "normalized" score
self.min_expected_cum_precision = 1.0
self.confidence_thresholds = {} # threshold of score for each classifier in order to have `min_expected_cum_precision` for this class
self.df = None
def learn(self):
# TODO: ne pas utiliser une fonction annexe.. mais pouvoir comprendre rapidement
self.df = generate_aldebaran_dataset(self.wav_file_list, nfft=self.nfft,
window_block=self.window_block_learning)
self._learning_data_X = self.to_sklearn_features.fit_transform(self.df)
self._learning_data_Y = self.df.expected_class
# normalization
self.scaler = preprocessing.StandardScaler().fit(self._learning_data_X)
self._learning_data_X_scaled = self.scaler.transform(self._learning_data_X)
self.clf.fit(self._learning_data_X_scaled, self._learning_data_Y)
if self.calibrate_score:
self._learn_calibration()
print("confidence threshold are %s" % self.confidence_thresholds)
print("confidence coefficients are %s" % self.score_normalized_coefs)
def _learn_calibration(self):
"""
calibrate score/threshold in order to provide a high precision to the user
Warning calibration is done on same dataset that the learning
we use using a 3-fold scheme
:return:
"""
# computing threshold scores
n_folds = 3
stratified_fold = StratifiedKFold(self.df.expected_class,
n_folds) # we use only 3 fold.. as we have only 16 values on some data
expected = []
predicted = []
filenames = []
fold_num = 0
for train_set, test_set in stratified_fold:
train_files = self.df.iloc[train_set].full_filename
cloned_clf = sklearn.base.clone(self.clf)
# we build a clone classifier service.. to do the learning on a fold..
new_classifier_service = self.__class__(wav_file_list=train_files.tolist(), clf=cloned_clf, calibrate_score=False,
window_block_learning=self.window_block_learning)
new_classifier_service.learn()
for index in test_set:
val = self.df.iloc[index]
try:
prediction = new_classifier_service.processed_wav(val.full_filename)
expected.extend([val.expected_class] * len(prediction))
predicted.extend(prediction)
# we append the num of fold to filename to have easy difference after that.
filenames.extend(['_'.join([val.file_name, '_fold%s' % fold_num])] * len(prediction))
except SoundClassificationException as e:
print("Exception {} detected on {}".format(e, val.full_filename))
fold_num += 1
predicted_class = [x.class_predicted for x in predicted]
predicted_confidence = [x.confidence for x in predicted]
df = pd.DataFrame(zip(expected, predicted_class, predicted_confidence), columns=['class_expected', 'class_predicted', 'confidence'])
for class_name in set(self.df.expected_class):
self.confidence_thresholds[class_name] = get_threshold_cum_precision(df,
true_positive_class=class_name,
min_expected_cum_precision=self.min_expected_cum_precision)
# computing coeficient to `normalized` based on threshold scores per class
self.score_normalized_coefs = {predicted_class: 1. if val == 0 else 1.0 / float(val) for predicted_class, val in
self.confidence_thresholds.iteritems()}
def post_processed_score(self, confidence=None, class_predicted=None):
score = -1 # -1 => not used
if self.score_normalized_coefs: # dict is not empty
if class_predicted in self.score_normalized_coefs:
score = confidence * self.score_normalized_coefs[class_predicted]
return score
def processed_signal(self, data=None, fs=48000., window_block=1.0):
"""
:param data:
:param fs:
:param window_block: duration of window block to use, default : 1.0 second, if None, the full signal is used as
one big window
:return: list of ClassificationResult namedtuple
"""
assert (np.ndarray == type(data))
assert (len(data.shape) == 1) # we only support one channel for now
assert (data.size != 0)
res = []
if window_block is None:
block_size = data.size
else:
block_size = min(window_block * fs, data.size)
overlap = int(block_size) >> 1 # int(block_size / 2)
for num, signal in enumerate(segment_axis(data, block_size, overlap=overlap, end='cut')):
preprocessed_features = get_features(signal, nfft=self.nfft, scaler=self.scaler)
confidence = get_confidence_prediction(self.clf, preprocessed_features)
# if confidence > self.confidence_threshold:
class_predicted = self.clf.predict(preprocessed_features)[
0] # [0] : as asked by Alex we return only class in string not an np.array
timestamp_start = num * (block_size - overlap) / float(fs)
# print("timestamp_start is %s" % timestamp_start)
timestamp_end = timestamp_start + block_size / float(fs)
score = self.post_processed_score(confidence=confidence, class_predicted=class_predicted)
new_result = ClassificationResult(timestamp_start, timestamp_end, class_predicted, confidence, score)
res.append(new_result)
return res
def processed_wav(self, filename, window_block=1.0, ignore_fs=False):
data, fs = sound_processing.io_sound.load_sound(filename)
if not ignore_fs and fs != self.fs:
raise (SoundClassificationException('fs (%s) != self.fs (%s)' % (fs, self.fs)))
if len(data.shape) > 1:
data = data[:, 0]
return self.processed_signal(data=data, fs=fs, window_block=window_block)
def main():
"""
Just a short demo how to use the SoundClassification class
"""
import time
sound_classification_obj = SoundClassification()
sound_classification_obj.learn()
# test_file = "/mnt/protolab_innov/data/sounds/test_segment/2015_06_12-17h33m05s546ms_PepperAlex.wav"
test_file = "test_data/bell_test.wav"
start_time = time.time()
res = sound_classification_obj.processed_wav(test_file)
duration = time.time() - start_time
print("duration of processing is {}".format(duration))
print(res)
return res
if __name__ == "__main__":
main()
| true |
69d158afb35eaee67a3a5f9dc107071e2b6cddb0 | Python | ahmed1salama/os-database | /multisocketserver.py | UTF-8 | 268 | 2.53125 | 3 | [] | no_license | import zmq
port1 = "5559"
context = zmq.Context()
print ("Connecting to server...")
socket = context.socket(zmq.REQ)
socket.connect ("tcp://localhost:%s" % port1)
print ("Sending request ")
socket.send_string ("Hello")
# Get the reply.
message = socket.recv()
| true |
d40094afb1c03b208d5f43b366a06674e2a022f3 | Python | padraigryan/setup | /scripts/podcastrip.py | UTF-8 | 690 | 2.625 | 3 | [] | no_license | #!/home/prya/usr/bin/python
import urllib2
import datetime
import os
import re
# Create new folder
dir_name = "/home/prya/Podcasts/" + str(datetime.date.today());
print dir_name
html = ""
try:
os.stat(dir_name)
f = open(dir_name + "/index.html", "r")
html = f.read()
except:
print "Getting the webpage"
os.mkdir(dir_name)
webpage = urllib2.urlopen('http://www.thisamericanlife.org');
html = webpage.read()
f = open(dir_name + "/index.html", "w")
f.write(html)
start_tag = '<li class="download"><a href="'
end_tag = '" download="'
s = re.search(start_tag+'(.+?)'+end_tag, html)
file_link = s.group(1)
os.system('wget -P ' + dir_name + ' ' + file_link)
| true |
11e15eb33cdb34eb79f2a641f0a13f774f50c050 | Python | sparamona/SierpinskiTriangleSVG | /Sierpinski-triangle.py | UTF-8 | 1,771 | 2.9375 | 3 | [] | no_license | ##
## Sierpinski triangle SVG builder
## Jonathan Sheena, February 2018
##
import svgwrite
### PARAMETERS
maxlevel=6 # how many levels of recursion
page=[1100,850] # page size
line_stroke_width=1 # line width
squish = 1 # use 1 for triangles
### don't change this
levelheight = page[1]/(2**maxlevel)
# Function for line drawing
def draw(dwg,current_group,p,w,h,level):
dwg.add(dwg.line(start=[p[0]+w/2-w/4,p[1]+h/2],end=[p[0]+w/4+w/2,p[1]+h/2],stroke='red',stroke_width=1))
midstroke='blue'
outerstroke='pink'
dwg.add(dwg.line(start=[p[0]+w/2,p[1]+h/2],
end =[p[0]+w/2,p[1]+h],stroke=midstroke,stroke_width=1))
dwg.add(dwg.line(start=[p[0]+w/2-w/4,p[1]+h/2],
end =[p[0]+w/2-w/4,p[1]+h/2+levelheight],stroke=outerstroke,stroke_width=1))
dwg.add(dwg.line(start=[p[0]+w/2+w/4,p[1]+h/2],
end =[p[0]+w/2+w/4,p[1]+h/2+levelheight],stroke=outerstroke,stroke_width=1))
def d(dwg,sq,level):
if (level==maxlevel+1):
return
p=sq[0]
w=sq[1][0]
h=sq[1][1]
#dwg.add(dwg.rect(insert=p,size=[w,h],stroke='black',stroke_width=.5,fill="none"))
# First draw the main line
draw(dwg,current_group,p,w,h,level)
#1 up
d(dwg,[[p[0]+w/4,p[1]],[w/2,h/2*squish]],level+1)
#2 below
d(dwg,[[p[0],p[1]+h/2],[w/2,h/2]],level+1)
d(dwg,[[p[0]+w/2,p[1]+h/2],[w/2,h/2]],level+1)
#Main program
name="triangles"
# set up the drawing
dwg = svgwrite.Drawing(filename="triangles.svg", debug=True, size=(page[0],page[1]))
# full page border
dwg.add(dwg.rect(insert=[0,0],size=page,stroke='black',stroke_width=1,fill="none"))
current_group = dwg.add(dwg.g(id=name, stroke='red', stroke_width=3, fill='none', fill_opacity=0 ))
sq = [[0,0],page]
# start running
d(dwg,sq,1)
#dwg.add(dwg.text('Test', insert=(0, 10), fill='red'))
dwg.save()
| true |
576d50a309f7e6bd0f3e6d1f30b83f2820babd40 | Python | wzpfish/artifact-card | /card_crawler.py | UTF-8 | 1,641 | 2.734375 | 3 | [] | no_license | # coding: utf-8
import requests
import logging
from util import retry_session
from model import Card
import db
def get_crawl_url(setid):
assert setid in ["00", "01"]
url = f"https://playartifact.com/cardset/{setid}/"
r = retry_session().get(url)
if r.status_code != requests.codes.ok:
logging.error(f"failed to get url {url}")
return
result = r.json()
crawl_url = result["cdn_root"] + result["url"]
return crawl_url
def extract_card_color(item):
colors = ["blue", "red", "black", "green"]
for color in colors:
if f"is_{color}" in item:
return color
return None
def crawl():
url = get_crawl_url("01")
print(url)
r = retry_session().get(url)
if r.status_code != requests.codes.ok:
logging.error(f"failed to get url {url}")
return
result = r.json()
card_list = result["card_set"]["card_list"]
cards = []
for item in card_list:
card = Card(card_id=item.get("card_id"),
card_type=item.get("card_type"),
card_name=item.get("card_name", {}).get("schinese"),
rarity=item.get("rarity"),
color=extract_card_color(item),
item_def=item.get("item_def"),
price=None
)
cards.append(card)
return cards
def run():
cards = crawl()
logging.info(f"crawl cards successfully, count={len(cards)}")
db.save_cards(cards)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format="[%(levelname)s] [%(asctime)s] [%(module)s] %(message)s")
run()
| true |
d8926aa8bbfece59c29cedce56dd2395903641bc | Python | secrecy27/MachineLearning | /PycharmProjects/DeepLearning/MachineLearning/SMS_SpamFiltration_bySklearn.py | UTF-8 | 1,307 | 2.9375 | 3 | [] | no_license | import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
spam_header= "spam\t"
no_spam_header="ham\t"
documents=[]
labels=[]
with open("SMSSpamCollection", encoding="UTF-8") as file_handle:
for line in file_handle:
# 시작 부분에 spam/ham 여부가 있으므로 두가지로 나눔
if line.startswith(spam_header):
labels.append(1)
# documents에 spam 글자를 제외한 내용부분 추가
documents.append(line[len(spam_header):])
elif line.startswith(no_spam_header):
labels.append(0)
# documents에 ham 글자를 제외한 내용부분 추가
documents.append(line[len(no_spam_header):])
vectorizer=CountVectorizer()
term_counts=vectorizer.fit_transform(documents) # 단어 횟수 세기
vocabulary=vectorizer.get_feature_names()
# 단어 횟수 feature에서 단어 빈도 feature 로
# tf-idf에서 idf를 생성하지 않을 시 단어 빈도가 만들어짐.
tf_transformer=TfidfTransformer(use_idf=False).fit(term_counts)
features=tf_transformer.transform(term_counts)
# pickle을 통해 파일 저장
with open("processed.pickle","wb") as file_handle:
pickle.dump((vocabulary,features,labels),file_handle) | true |
fb66ee7a485d05850ba8dab1583a85264ba928f4 | Python | jiachen0212/numpy_deeplearning | /conv2d.py | UTF-8 | 1,294 | 2.734375 | 3 | [] | no_license | # https://blog.csdn.net/xo19882011/article/details/79306641 the csdn url ...
#coding=utf-8
# https://www.tensorflow.org/api_docs/python/tf/nn/conv2d
import tensorflow as tf
import numpy as np
def conv2d(
input,
filter,
strides,
padding=None
):
ish = input.shape
fsh = filter.shape
output = np.zeros([ish[0],(ish[1]-fsh[0])//strides[1]+1,(ish[2]-fsh[1])//strides[2]+1,fsh[3]])
osh = output.shape
for p in range(osh[0]):
for i in range(osh[1]):
for j in range(osh[2]):
for di in range(fsh[0]):
for dj in range(fsh[1]):
t = np.dot(
input[p,strides[1]*i+di,strides[2]*j+dj,:],
filter[di,dj,:,:]
)
output[p,i,j] = np.sum(
[
t,
output[p,i,j]
],
axis=0
)
return output
input_t = np.random.randint(10,size=(3,5,5,3))
filter_t = np.random.randint(10,size=(2,2,3,2))
strides_t = [1,1,1,1]
print('numpy conv2d:')
res = conv2d(input_t,filter_t,strides_t)
print(res)
print(res.shape)
print('tensorflow conv2d')
a = tf.Variable(input_t,dtype=tf.float32)
b = tf.Variable(filter_t,dtype=tf.float32)
op = tf.nn.conv2d(a,b,
strides=strides_t,
padding='VALID')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
res = sess.run(op)
print(res)
print(res.shape)
| true |
cbb5ff0c99f5f23cab5cad7f31d92f58d95e9315 | Python | 981377660LMT/algorithm-study | /11_动态规划/dp分类/区间dp/dfs/回文/中心扩展法求回文子串/2472. 不重叠回文子字符串的最大数目/2472. 不重叠回文子字符串的最大数目.py | UTF-8 | 2,151 | 4 | 4 | [] | no_license | # 给你一个字符串 s 和一个 正 整数 k 。
# 从字符串 s 中选出一组满足下述条件且 不重叠 的子字符串:
# !每个子字符串的长度 至少 为 k 。
# !每个子字符串是一个 回文串 。
# !返回最优方案中能选择的子字符串的 最大 数目。
class Solution:
def maxPalindromes2(self, s: str, k: int) -> int:
"""
贪心+马拉车 O(n)
我们只需要考虑长度为k和k+1的回文串数目就行。
如果k+2i是回文串,那么掐头去尾,肯定有长度为k的回文串,
要数目最多,我们就选最短的。
!只需要判断 [i,i+k-1] 和 [i,i+k]是否为回文串即可,
!使用 manacher 算法可以在 O(n) 时间内判断一个子串是否为回文串
"""
# !js-algorithm\17_模式匹配\马拉车拉马\2472. 不重叠回文子字符串的最大数目.py
...
def maxPalindromes1(self, s: str, k: int) -> int:
"""O(n^2)dp"""
def expand(left: int, right: int) -> None:
"""中心扩展法求s[left:right+1]是否为回文串"""
while left >= 0 and right < len(s) and s[left] == s[right]:
if right - left + 1 >= k:
isPalindrome[left][right] = True
left -= 1
right += 1
n = len(s)
isPalindrome = [[False] * n for _ in range(n)] # dp[i][j] 表示 s[i:j+1] 是否是回文串
for i in range(n):
expand(i, i)
expand(i, i + 1)
# 选出最多数量的区间,使得它们互不重叠 (dp)
dp = [0] * (n + 1) # 第i个字符结尾的最多不重叠回文子串数目
for i in range(1, n + 1):
dp[i] = dp[i - 1] # jump
for j in range(i - k + 1): # not jump
if isPalindrome[j][i - 1]:
dp[i] = max(dp[i], dp[j] + 1)
return dp[-1]
print(Solution().maxPalindromes1(s="abaccdbbd", k=3))
print(Solution().maxPalindromes1(s="iqqibcecvrbxxj", k=1))
print(Solution().maxPalindromes1(s="i" * 2000, k=1))
| true |
594b0c39f130f5e924805022818d0f684a206620 | Python | Patrick-Erath/OpenCV | /Tutorials/Tutorial_5/Codes/feature_matching2.py | UTF-8 | 1,652 | 2.8125 | 3 | [] | no_license | import numpy as np
import cv2
import matplotlib.pyplot as plt
img_left = cv2.imread("../S1.jpg")
img_right = cv2.imread("../S2.jpg")
img_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2RGB)
img_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2RGB)
# Create SIFT object and find descriptors
sift = cv2.xfeatures2d.SIFT_create()
keypoints_left, descriptors_left = sift.detectAndCompute(img_left, None)
keypoints_right, descriptors_right = sift.detectAndCompute(img_right, None)
# Create a BF Matcher object to find matches
bf = cv2.BFMatcher()
# Match descriptors
matches = bf.match(descriptors_right, descriptors_left)
# Sort mathces
matches = sorted(matches, key = lambda x:x.distance)
# Draw keypoints
imgmatch = cv2.drawMatches(img_right, keypoints_right, img_left, keypoints_left, matches[:10], None, flags=2)
# Arrange matching keypoints in two seperate lists
GoodMatches = []
for i, m in enumerate(matches):
if m.distance < 1000:
GoodMatches.append((m.trainIdx, m.queryIdx))
# Get the keypoints that are good matches
mpr = np.float32([ keypoints_right[i].pt for (__, i) in GoodMatches])
mpl = np.float32([ keypoints_left[i].pt for (i, __) in GoodMatches])
# Find homography and wrap image accordingly
H, __ = cv2.findHomography(mpr, mpl, cv2.RANSAC, 4)
print(H)
wimg = cv2.warpPerspective(img_right, H, (img_right.shape[1]+img_left.shape[1], img_right.shape[0]))
wimg[:,:img_left.shape[1],:] = img_left
plt.figure(figsize=(8,3))
plt.subplot(121)
plt.imshow(imgmatch)
plt.title("Matches keypoints"), plt.xticks([]), plt.yticks([])
plt.subplot(122)
plt.imshow(wimg)
plt.title("Panoramic image"), plt.xticks([]), plt.yticks([])
plt.show() | true |
857b136d67756f3c1c891d050a388cb7ab5ca4aa | Python | loinly/TextStego | /textlab/word2vec/vector.py | UTF-8 | 4,644 | 2.703125 | 3 | [] | no_license | #! python3
# -*- coding:utf-8 -*-
import os
import time
import warnings
import config
import logging
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence, PathLineSentences
from pretreatment.pretreatment import PreDeal
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
class MySentences(object):
"""
一行一句,主要考虑文件过大的情况,节省内存
"""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
for line in open(os.path.join(self.dirname, fname)):
yield line.split()
class Seg(object):
"""
分割路径下所有文件
"""
def __init__(self):
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
self.logger = logging.getLogger("Segment")
def segtext(self, dirname, savepath):
for path in os.listdir(dirname):
outfile = os.path.join(savepath, path + '.txt')
filepath = os.path.join(dirname, path)
if os.path.isdir(filepath):
self.logger.info('seg all files under "path"')
fout = open(outfile, 'w+', encoding='utf-8')
for name in os.listdir(filepath):
filename = os.path.join(filepath, name)
for line in open(filename, 'r', encoding='utf-8'):
_line = PreDeal.seg(line)
sentence = ' '.join(_line)
fout.write(sentence)
fout.write('\n')
fout.close()
self.logger.info(' all files are segmented!')
class WV(object):
"""
word2vec处理类
"""
def __init__(self):
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
pass
# 训练 corpus 下所有文件(每个文件事先分好词以空格隔开), 并保存到modelpath
@staticmethod
def train(corpus, modelpath):
if not os.path.isdir(corpus):
raise ValueError('input is should be a path')
sentences = PathLineSentences(corpus)
model = Word2Vec(iter=3)
model.build_vocab(sentences)
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
model.save(modelpath)
# 增量训练,输入一个分好词的文本corpus
@staticmethod
def moretrain(models, corpus):
sentences = LineSentence(corpus)
model = Word2Vec.load(models)
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
@staticmethod
def similarwords(keyword, modelpath=config.modelpath, tops=5):
# 默认获取前10个相似关键词
start = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("start execute Word2vec, get similar keywords! Time:" + start +">>>>>>>>>>>>>>>>>>>>>")
try:
model = Word2Vec.load(modelpath)
words = model.wv.most_similar(keyword, topn=tops)
except KeyError:
print("word '%s' not in vocabulary" % keyword)
return None
end = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if not words:
return None
# res = [[item[0], item[1]] for item in words] # 相似关键词及其相似度
res = []
for word in words:
res.append([word[0], word[1]])
print(word[0], "\t", word[1])
print("get similar keywords end!................... Time:" + end + ">>>>>>>>>>>>>>>>>>>>>")
return res
# WMD 距离
@staticmethod
def wmd(model, sent1, sent2):
sent1 = PreDeal.seg(sent1)
sent2 = PreDeal.seg(sent2)
# model = Word2Vec.load(model)
# 这边是归一化词向量,不加这行的话,计算的距离数据可能会非常大
model.init_sims(replace=True)
distance = model.wv.wmdistance(sent1, sent2)
return distance
if __name__ == '__main__':
dirname1 = r'F:\LabData\NetBigData\test\word2vec'
savename1 = r'F:\LabData\NetBigData\test\out'
# 1.分割
s = Seg()
s.segtext(dirname=dirname1, savepath=savename1)
# 2.训练
wv = WV()
wv.train(corpus=savename1, modelpath='./m.bin')
# wv.train(r'F:\LabData\NetBigData\test\word2vec\x1.txt', './m.bin')
# wv.moretrain('./m.bin', r'F:\LabData\NetBigData\test\word2vec\x2.txt')
# keys = "推动"
# simikeys = WV.similarwords(keys)
| true |
b2059e492b55a089e9589cba079f856cc0c65fac | Python | fannarl/traveler | /prof_undirb/card.py | UTF-8 | 973 | 3.1875 | 3 | [] | no_license | class Card(object):
def __init__(self, __rank = 0, __suit = ''):
if type(__rank) is int and __rank > 0 and __rank < 14:
if __rank == 1:
self.rank = 'A'
elif __rank == 11:
self.rank = 'J'
elif __rank == 12:
self.rank = 'Q'
elif __rank == 13:
self.rank = 'K'
else:
self.rank = __rank
elif type(__rank) is str and len(__rank) == 'AJQKajqk':
self.rank = __rank.upper()
else:
self.rank = 0
if type(__suit) is str and len(__suit) == 1 and __suit in 'HSDChsdc':
self.suit = __suit.upper()
else:
self.suit = ''
def __str__(self):
if self.is_blank():
return 'blk'
else:
return '{0:>3}{1}'.format(self.rank, self.suit)
def is_blank(self):
return self.rank == 0 or self.suit == ''
| true |
0979746cd3f61d58d7888cc663e78335dddde97e | Python | naviddianati/FEC | /src/disambiguation/core/utils.py | UTF-8 | 22,701 | 2.546875 | 3 | [] | no_license | '''
This module loads commonly used packages and modules.
'''
import json
import pprint
import cPickle
import datetime
import glob
import igraph
import multiprocessing
import os
import pickle
import random
import re
import sys
import time
import numpy as np
import pandas as pd
from .. import config
import states
import math
from ast import literal_eval
import filters
import resource
# list of all alphanumeric characters
abcd = 'abcdefghijklmnopqrstuvwxz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
random.seed()
# list of juink pieces often found in names. These must be
# removed before the name can be parsed.
name_junk = ['Ms', 'Miss', 'Mrs', 'Mr', 'Master', 'Rev' , 'Fr' , 'Dr' , 'MD', 'Atty' , 'Prof' \
, 'Hon' , 'Pres', 'Gov' , 'Coach', 'Ofc' , 'Msgr' , 'Sr' , 'Br' , 'Supt', 'Rep' \
, 'Sen' , 'Amb' , 'Treas', 'Sec' , 'Pvt' , 'Cpl' , 'Sgt' , 'Adm' , 'Maj' , 'Capt' \
, 'Cmdr' , 'Lt' , 'Col' , 'Gen', 'esq', 'esquire', 'jr', 'jnr', 'sr', 'snr', \
'ii', 'iii', 'iv', 'And', 'The', 'Is', 'Are', 'To', 'But', 'Over', 'At', 'Honorable', 'Judge' ]
# Regex that matches junck pieces in a name.
name_regex = re.compile('|'.join([r'(\b%s\b)'.encode('ASCII') % s.upper() for s in name_junk]))
# list of juink pieces often found in employers.
employer_junk = set(['AND', 'WITH', 'CO'])
# Regex that matches junck pieces in a name.
employer_regex = re.compile('|'.join([r'(\b%s\b)'.encode('ASCII') % s.upper() for s in employer_junk]))
def match_token_permuted(employer1, employer2, verbose=False):
'''
Split employer strings into tokens and check
if a significant number of the tokens are in common.
(e.g. "ALVARARDO & GERKEN" and "ALVARADO GERKAN & BENNETT"
Only count tokens that are 4 or more characters.
Returns True if either there are more than two tokens in common
with length more than 4, or there is one, but it is 6 characters or more.
'''
e1, e2 = employer1, employer2
e1 = employer_regex.sub('', e1)
e1 = strip_string(re.sub(r'\.|\,|-|\&|\\|\/', ' ', e1))
e2 = employer_regex.sub('', e2)
e2 = strip_string(re.sub(r'\.|\,|-|\&|\\|\/', ' ', e2))
s1 = set([x for x in e1.split(' ') if len(x) > 3])
s2 = set([x for x in e2.split(' ') if len(x) > 3])
if verbose:
print s1
print s2
intersection = s1.intersection(s2)
n = len(intersection)
if n >= 2:
return True
elif n == 1 and len(intersection.pop()) >= 6:
return True
else:
return False
def strip_string(s):
'''
Collapse multiple whitespaces, and strip.
@param s: a string.
'''
return re.sub(r'\s+', ' ', s).strip()
def get_index(mylist, x):
'''
Find indices of all occurrences of C{x} in C{mylist}.
'''
return [i for i, y in enumerate(mylist) if y == x]
def splitname(name):
'''
Parse a name and return a three-tuple:
C{(lastname, middlename, firstname)}.
When parsing, we first remove all junk as defined
by L{name_junk} and L{name_regex}.
'''
s = name
s1 = name_regex.sub('', s)
s1 = re.sub(r'\.', ' ', s1)
s1 = re.sub(r'[0-9]', ' ', s1)
firstname, middlename, lastname = '', '', ''
tree = ''
# If ',' exists, split based on that. Everything before
# is last name.
if s1.find(',') > 0:
tree += '1'
# Last name is everything left of the FIRST comma
lastname, s_right = re.findall(r'([^\,]*),(.*)', s1)[0]
# In case there are more commas:
s_right = re.sub(r'\,', ' ', s_right)
s_right = strip_string(s_right)
tokens = s_right.split(' ')
lengths = [len(s) for s in tokens]
length_max = max(lengths)
if len(lengths) == 1:
tree += '1'
firstname = tokens[0]
return lastname, middlename, firstname
else:
# multiple tokens on the right
tree += '0'
indices_1 = get_index(lengths, 1)
if len(indices_1) == 0:
tree += '2'
# Multiple tokens, all more than one letter
# First token is first name, next is middle name
firstname = tokens[0]
tokens.remove(firstname)
middlename = ' '.join(tokens)
return lastname, middlename, firstname
elif len(indices_1) == 1:
tree += '1'
# Only one single letter token.
middlename = tokens[indices_1[0]]
tokens.remove(middlename)
firstname = ' '.join(tokens)
# firstname = ' '.join(tokens)
return lastname, middlename, firstname
else:
tree += '0'
# multiple single-letter tokens
# First one is middlename,
middlename = tokens[indices_1[0]]
# If the first single letter token is not the
# first RHS token, first name is all tokens uptp
# the middle initial
if indices_1[0] >= 1:
tree += '1'
firstname = ' '.join(tokens[0:indices_1[0]])
else:
tree += '0'
# The first RHS token is single letter.
# What's the first name?
if length_max == 1:
tree += '1'
# If there are no multi-letter tokens,
# pick the second single letter one
# as first name
firstname = tokens[indices_1[1]]
else:
tree += '0'
# pick the first multi-letter token
# as first name
firstname = tokens[lengths.index(length_max)]
# firstname = ' '.join(tokens)
return lastname, middlename, firstname
else:
# String doesn't contain comma
# I examined a large number of records with NAME not
# containing a comma. None were human names. So it doesn't
# really matter how you parse those.
tokens = s1.split(' ')
lastname = tokens[0]
firstname = ' '.join(tokens[1:])
return lastname, middlename, firstname
def permute_inplace(X, Y):
''''
permute the list C{X} inplace, according to C{Y}. C{Y} is a dictionary
C{{c_index : t_index }} which means the value of C{X[c_index]} should
end up in C{X[t_index]}.
'''
while Y:
# key values to be deleted from Y at the end of each runthrough
death_row_keys = []
# Iterate through current indexes
for c_index in Y:
# Target index
t_index = Y[c_index]
if c_index == t_index:
death_row_keys.append(c_index)
continue
# Swap values of the current and target indexes in X
X[t_index], X[c_index] = X[c_index], X[t_index]
Y[t_index], Y[c_index] = Y[c_index], Y[t_index]
for key in death_row_keys:
del Y[key]
def bad_identifier(identifier, type='employer'):
'''
Decide if the affiliation identifier is a "bad" or "low-information"
identifier.
'''
if identifier == '': return True
if type == 'employer':
regex = r'\bNA\b|N\.A|employed|self|N\/A|\
|information request|retired|teacher\b|scientist\b|\
|applicable|not employed|none|\
|homemaker|requested|executive|educator\b|\
|attorney\b|physician|real estate|\
|student\b|unemployed|professor\b|refused|docto\br|housewife|\
|at home|president|best effort|consultant\b|\
|email sent|letter sent|software engineer|CEO|founder|lawyer\b|\
|instructor\b|chairman\b'
elif type == 'occupation':
regex = r'unknown|requested|retired|none|retire|retited|ret\b|declined|N.A\b|refused|NA\b|employed|self'
else:
print 'invalid identifier type'
raise Exception("Identifier type must be either 'employer' or 'occupation'")
if re.search(regex, identifier, flags=re.IGNORECASE):
return True
else:
return False
def loadAffiliationNetwork(state, affiliation, percent=5, poststage1=False):
'''
Loads the saved output of AffiliatoinAnalyzer from file: the affiliation network.
It also adds a new attribute to the graph instance that contains a dictionary from
affiliation identifier strings to the index of their corresponding vertex in the graph object.
TODO: allow filtering based on value of an edge (or vertex) parameter
'''
def prune(G, field='significance', percent=5):
'''
Remove all but the top X percent of the edges with respect to the value of their field.
'''
deathrow = []
n = len(G.es)
threshold_index = n - n * percent / 100
threshold_value = sorted(G.es[field])[threshold_index]
for e in G.es:
if e[field] < threshold_value:
deathrow.append(e.index)
G.delete_edges(deathrow)
return G
try:
if affiliation == 'employer':
if poststage1:
filename = config.affiliation_poststage1_employer_file_template % state
else:
filename = config.affiliation_employer_file_template % state
elif affiliation == 'occupation':
if poststage1:
filename = config.affiliation_poststage1_occupation_file_template % state
else:
filename = config.affiliation_occupation_file_template % state
else:
raise Exception("Unable to load affiliation graphs. Affiliation must be 'occupation' or 'employer'")
# filename = f = data_path + label + affiliation + '_graph.gml'
print filename
G = igraph.Graph.Read_GML(filename)
try:
G = prune(G, field='significance', percent=percent)
except Exception, e:
print e
print "Error pruning the affiliation graph. Reloading the full graph."
G = igraph.Graph.Read_GML(filename)
dict_string_2_ind = {v['label']:v.index for v in G.vs}
G.dict_string_2_ind = dict_string_2_ind
except IOError:
print "ERROR: Affiliation Network data not found."
G = None
# Not really necessary any more. I construct a {string: index} dictionary from the loaded Graph myself.
# metadata = json.load(open(data_path + label + '-' + affiliation + '-metadata.json'))
return G
def Log(message, msg_type="Error"):
'''
Log a message to the log file defined in config
@param message: string message/
@param msg_type: type of message. Can be "Error", "Warning", etc.
'''
filename = config.log_filename
with open(filename, 'a') as f:
now = time.strftime("%c")
msg = config.log_message_template % (now, msg_type, message)
f.write(msg)
def partition_list_of_graphs(mylist, num_partitions):
'''
Partition a list of graphs into subsets such that
the total number of nodes in each subset is roughly
equal.
@param mylist: list of igraph.Graph instances
@param num_partitions: desired number of partitions.
@return: list where each element is a list of graphs.
'''
mylist.sort(key=lambda g:g.vcount())
A = [[[], 0] for i in range(num_partitions)]
while mylist:
g = mylist.pop()
A.sort(key=lambda subset:subset[1])
A[0][0].append(g)
A[0][1] += g.vcount()
# print "Sizes of partitions: ", [subset[1] for subset in A]
return [item[0] for item in A]
def prune_dict(mydict, condition_fcn, filename=''):
'''
conditionally remove items from dict without
copying it in memory:
write dict to file, clear the dict, then
read back the data from file and insert into
fresh dict based on condition.
@param mydict: dictionary with tuple(int,int) keys and int values
@param condition_fcn: condition function applied to values. Values
for which condition_fcn(value) is True will be kept.
@param filename: name of tmp file to use.
'''
if not filename:
filename = config.dict_paths['tmp_path'] + "tmp-%d.txt" % random.randint(0, 100000)
with open(filename, 'w') as f:
for key, value in mydict.iteritems():
if condition_fcn(value):
f.write("%s~~~%s\n" % (str(key), str(value)))
mydict.clear()
mydict = {}
with open(filename) as f:
for line in f:
l = line.strip()
key, value = l.split("~~~")
# Parse the key string into a tuple of two ints
key = literal_eval(key)
value = int(value)
mydict[key] = value
os.remove(filename)
return mydict
def partition_integer(N, n):
'''
Partition an integer number into n roughly equal integers.
Return these integers in a list. N = n_1 + n_2 + ... + n_n.
@param N: the integer to be partitioned
@param n: the number of partitions.
'''
a = np.round((float(N) / n) * np.arange(n + 1))
b = (a[1:] - a[:-1]).astype(int)
return b
def find_all_in_list(regex, str_list):
''' Given a list of strings, str_list and a regular expression, regex, return a dictionary with the
frequencies of all mathces in the list.'''
dict_matches = {}
for s in str_list:
# s_list = re.findall(r'\b\w\b', s)
s_list = re.findall(regex, s)
for s1 in s_list:
if s1 in dict_matches:
dict_matches[s1] += 1
else:
dict_matches[s1] = 1
return dict_matches
def get_next_batch_id():
return str(np.random.randint(0, 10000000))
with open(config.src_path + '../../config/batches.list') as f:
s = f.read()
try:
i = int(s)
except:
print "ERROR: bad batch id found in file: " , s
raise
with open(config.src_path + '../../config/batches.list', 'w') as f:
f.write(str(i + 1))
return(str(i))
def load_normalized_attributes(state):
'''
Load and return normalized attributes for state.
'''
filename = config.normalized_attributes_file_template % state
with open(filename) as f:
dict_normalized_attributes = cPickle.load(f)
return dict_normalized_attributes
def load_feature_vectors(state, tokenizer_class_name='Tokenizer'):
'''
Load and return feature vectors for state and tokenizer class.
'''
filename = config.vectors_file_template % (state, tokenizer_class_name)
with open(filename) as f:
dict_vectors = cPickle.load(f)
return dict_vectors
def load_tokendata(state, tokenizer_class_name='Tokenizer'):
'''
Load and return tokendata for state and tokenizer class.
'''
filename = config.tokendata_file_template % (state, tokenizer_class_name)
with open(filename) as f:
tokendata = cPickle.load(f)
return tokendata
def load_hashes(state, tokenizer_class_name='Tokenizer'):
'''
Load and return hashes for state and tokenizer class.
'''
filename = config.hashes_file_template % (state, tokenizer_class_name)
with open(filename) as f:
dict_hashes = cPickle.load(f)
return dict_hashes
def jaccard_similarity(set1, set2):
'''
Return the Jaccard similarity of two sets
'''
return 1. * len(set1.intersection(set2)) / len(set1.union(set2))
def chunks_replace(l, n):
'''
split a list into precisely n contiguous chunks of roughly equal size.
As a chunk is extracted, delete that chunk from l. This is useful when
working with very large lists where due to memory concerns, we want to
avoid keeping duplicates of the chunks in memory.
@param l: list to be split.
@return: list of contiguous chunks extracted from l.
'''
N = len(l)
size = float(N) / n
n_removed = 0
list_chunks = []
for i in range(n):
chunk = l[int(i * size) - n_removed : int((i + 1) * size) - n_removed]
list_chunks.append(chunk)
del l[int(i * size) - n_removed : int((i + 1) * size) - n_removed]
n_removed += len(chunk)
return list_chunks
def chunks(l, n):
'''
split a list into precisely n contiguous chunks of roughly equal size.
@param l: list to be split.
@return: list of contiguous chunks extracted from l.
'''
N = len(l)
size = float(N) / n
return [l[int(i * size):int((i + 1) * size)] for i in xrange(n)]
def chunks_gen(l, n):
'''
A generator to split a list into precisely n
contiguous chunks of roughly equal size.
@param l: list to be split.
@param n: number of chunks
@return: one contiguous chunk at a time extracted from l.
'''
N = len(l)
size = float(N) / n
for i in xrange(n):
chunk = l[int(i * size):int((i + 1) * size)]
if chunk: yield chunk
def chunks_size(l, size):
'''
Divide a list into chunks of size C{size}.
@return: a list of chunks (each one a list).
'''
list_chunks = []
i = 0
while i * size < len(l):
list_chunks.append(l[int(i * size):int((i + 1) * size)])
i += 1
return list_chunks
def chunks_size_gen(l, size):
'''
Generator that divide a list into chunks of size C{size}.
@return: one chunk at a time (each one a list).
'''
list_chunks = []
i = 0
while i * size < len(l):
yield l[int(i * size):int((i + 1) * size)]
i += 1
def chunkit_padded(list_input, i, num_chunks, overlap=0):
'''
This function splits a list into "total_count" sublists of roughly equal sizes and returns the "ith" one.
These lists are overlapping.
'''
n = len(list_input)
size = float(n) / num_chunks
x, y = int(math.ceil(i * size)), min(int(math.ceil((i + 1) * size)) + overlap, n)
return list_input[x:y]
def covariance(list_of_strs):
n = len(list_of_strs)
m = len(list_of_strs[0])
M = np.zeros([n, n])
for i in range(n):
for j in range(i, n):
M[i, j] = sum([list_of_strs[i][k] == list_of_strs[j][k] for k in range(m)])
return M
def Hamming_distance(s1, s2):
'''This function computes the Hamming distance between two strings'''
return sum([c1 != c2 for c1, c2 in zip(s1, s2)])
def random_uniform_hyperspherical(n):
'''
Return a hyperspherically random vector in n dimensions.
'''
vec = np.zeros([n, ])
for i in range(n):
vec[i] = random.gauss(0, 1)
vec = vec / np.linalg.norm(vec)
return vec
def shuffle_list_of_str(list_of_strs):
''' This function takes a list of strings (of equal lengths) and
applies the same random permutation to all of them, in place.'''
n = len(list_of_strs[0])
l = range(n)
random.shuffle(l)
for j in range(len(list_of_strs)):
list_of_strs[j] = ''.join([list_of_strs[j][l[i]] for i in range(n) ])
def argsort_list_of_dicts(seq, orderby):
''' argsort for a sequence of dicts or dict subclasses. Allows sorting by the value of a given key of the dicts.
returns the indices of the sorted sequence'''
if not orderby:
raise Exception("Must specify key to order dicts by.")
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key=lambda index: seq.__getitem__(index)[orderby])
def chunk_dict(dictionary, num_chunks):
'''
Split the dictionary into num_chunks roughly equal sub-dictionaries
and return a list containing these sub-dictionaries.
'''
list_dicts = [{} for i in range(num_chunks)]
counter = 0
for key, value in dictionary.iteritems():
list_dicts[counter % num_chunks][key] = value
counter += 1
return list_dicts
def argsort(seq):
'''
Generic argsort. returns the indices of the sorted sequence
U{Source: <http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python>}
'''
return sorted(range(len(seq)), key=seq.__getitem__)
def shuffle_str(s):
'''
Shuffle string by converting to list, shuffling and converting back
'''
l = list(s)
random.shuffle(l)
return ''.join(l)
def sparse_vector(vec):
'''
This function converts a numpy 2d vector to a dictionary
where the key is the index of the vector element and the
value is the vector component corresponding to that coordinate.
'''
vec_sparse = {}
for i in range(len(vec)):
if vec[i] != 0:
vec_sparse[i] = vec[i]
return vec_sparse
def vec_norm(vec):
'''
this function computes the 2-norm of a sparse vector.
'''
total = 0
for ind in vec:
total += vec[ind] ** 2
return np.sqrt(total)
def inner_product(vec_short, vec):
'''
This function computes the inner product of two sparse vectors.
It is assumed that the first argument is the shorter of the two.
The inned_product function takes advantage of the sparse representation of the vectors
to significantly optimize the operation. The complexity of the inner product is independent
of dim, it only depends on the average number of "non-zero" elements in the vectors.
'''
total = 0
counter = 0
for ind in vec_short:
counter += 1
total += vec_short[ind] * vec[ind]
return total
def generate_rand_list_of_vectors(N, dim):
'''
Generate a random set of input vectors. For testing purposes.
'''
list_of_vectors = []
i = 0
cluster_size = 0
vec_previous = []
while (i < N):
vec = {}
if cluster_size == 0:
cluster_size = round(abs(random.gauss(0, 1) * 5)) + 1
else:
cluster_size -= 1
# generate a random sparse vector
if cluster_size != 0:
for j in vec_previous:
if random.random() < 0.3:
vec[j] = 1
for j in range(25):
if random.random() < 0.05:
vec[random.randint(0, dim - 1)] = 1
if vec:
list_of_vectors.append(vec)
vec_previous = vec
i += 1
return list_of_vectors
def print_resource_usage(msg):
print msg, resource.getrusage(resource.RUSAGE_SELF)
def get_random_string(length):
'''
Return a random string of lengh length
'''
s = abcd
return ''.join([random.choice(s) for i in range(length)])
| true |
f06efaa14b870a6b2c6a2fad2584d93ea07c7bd0 | Python | Pasarus/ess | /src/ess/reflectometry/orso.py | UTF-8 | 10,753 | 2.671875 | 3 | [
"BSD-3-Clause"
] | permissive | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
# flake8: noqa: E501
"""
This is the implementation of the ORSO header information.
"""
# author: Andrew R. McCluskey (arm61)
import yaml
import socket
import datetime
import pathlib
import getpass
from ess import __version__ as VERSION
OSRO_VERSION = 0.1
def noop(self, *args, **kw):
pass
yaml.emitter.Emitter.process_tag = noop
def _repr(class_to_represent):
"""
The representation object for all the Header sub-classes. This returns a string in a yaml format which will be ORSO compatible.
Args:
class_to_represent (:py:class:`object`): The class to be represented.
Returns:
(:py:attr:`str`): A string representation.
"""
return yaml.dump(class_to_represent, sort_keys=False)
class Header:
"""
The super class for all of the __repr__ items in the orso module
"""
def __repr__(self):
"""
The string representation for the Header class objects.
"""
return _repr(self)
class Orso:
"""
The class for the Orso header object.
Args:
creator (:py:class:`orso.Creator`): Information about the creation of the reduction.
data_source (:py:class:`orso.DataSource`): Details of the data being reduced.
reduction (:py:class:`orso.Reduction`): Information about the reduction that is performed.
columns (:py:attr:`list` of :py:class:`orso.Column`): A list of the different columns persent in the data.
"""
def __init__(self, creator, data_source, reduction, columns):
self.creator = creator
self.data_source = data_source
self.reduction = reduction
self.columns = columns
def __repr__(self):
"""
To ensure the prescence of the ORSO top line, the `orso.Orso` class has a slightly different :code:`__repr__`.
"""
return f'# ORSO reflectivity data file | {OSRO_VERSION:.1f} standard | YAML encoding | https://reflectometry.org\n' + _repr(
self)
class Creator(Header):
"""
The information about who and when the reduced data was created, ie. when the reduction was performed and by whom.
Args:
name (:py:attr:`str`, optional): The name of the person that performed the reduction, can also include an email address. Optional, defaults to the machine username.
affiliation (:py:attr:`str`, optional): The affiliation of the person that performed the reduction. Optional, defaults to :code:`None`.
time (:py:attr:`str`, optional): The time that the reduction was performed, in the format :code:`"%Y-%m-%dT%H:%M:%S"`. Optional, defaults to the current time.
system (:py:attr:`str`, optional): The machine that the reduction was performed on. Optional, defaults to the machine's host name.
"""
def __init__(self, name=None, affiliation=None, time=None, system=None):
self.name = name
if name is None:
self.name = getpass.getuser()
if affiliation is not None:
self.affiliation = affiliation
self.time = time
if time is None:
self.time = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
self.system = system
if system is None:
self.system = socket.gethostname()
class Sample(Header):
"""
Sample information.
Args:
name (:py:attr:`str`, optional): A identifiable description for the sample. Optional, defaults to :code:`'Sample'`.
"""
def __init__(self, name='Sample'):
self.name = name
class ValueScalar(Header):
"""
A single value with an unit.
Args:
magnitude (:py:attr:`float`): The value.
unit (:py:attr:`str`, optional): The unit. Optional, defaults to :code:`'dimensionless'`.
"""
def __init__(self, magnitude, unit='dimensionless'):
self.magnitude = magnitude
if unit == '\xC5':
unit = 'angstrom'
self.unit = unit
class ValueRange(Header):
"""
A range with an upper and lower limit and a unit.
Args:
min (:py:attr:`float`): The minimum value.
max (:py:attr:`float`): The maximum value.
unit (:py:attr:`str`, optional): The unit. Optional, defaults to :code:`'dimensionless'`.
"""
def __init__(self, min, max, unit='dimensionless'):
self.min = min
self.max = max
if unit == '\xC5':
unit = 'angstrom'
self.unit = unit
class Measurement(Header):
"""
Details of the measurement that is performed.
Args:
scheme (:py:attr:`str`, optional): The measurement scheme (ie. :code:`'energy-dispersive'`). Optional, defaults to :code:`'None'`.
omega (:py:class:`orso.ValueScalar` or :py:class:`orso.ValueRange`, optional): The incident angle value or range. Optional, defaults to :code:`'None'`.
wavelength (:py:class:`orso.ValueScalar` or :py:class:`orso.ValueRange`, optional): The measured wavelength value or range. Optional, defaults to :code:`'None'`.
polarisation (:py:attr:`str`, optional): The polarisation present, typically as a :code:`'+'` or :code:`'-'` or combination. Optional, defaults to :code:`'None'`.
"""
def __init__(self, scheme=None, omega=None, wavelength=None, polarisation=None):
if scheme is not None:
self.scheme = scheme
if omega is not None:
self.omega = omega
if wavelength is not None:
self.wavelength = wavelength
if polarisation is not None:
self.polarisation = polarisation
class Experiment(Header):
"""
Experimental details.
Args:
instrument (:py:attr:`str`, optional): The name of the instrument. Optional, defaults to :code:`'None'`.
probe (:py:attr:`str`, optional): The name of the probing radiation. Optional, defaults to :code:`'None'`.
sample (:py:class:`orso.Sample`, optional): A description of the sample. Optional, defaults to :code:`'None'`.
"""
def __init__(self, instrument=None, probe=None, sample=None):
if instrument is not None:
self.instrument = instrument
if probe is not None:
self.probe = probe
if sample is not None:
self.sample = sample
class DataSource(Header):
"""
Details of where and who the data came from.
Args:
owner (:py:attr:`str`, optional): The name (and affiliation/email address) of the owner of the data. Optional, defaults to :code:`'None'`.
facility (:py:attr:`str`, optional): The name of the facility the data was generated at. Optional, defaults to :code:`'None'`.
experiment_id (:py:attr:`str`, optional): An identifier for the experiment (ie. proposal ID). Optional, defaults to :code:`'None'`.
experiment_date (:py:attr:`str`, optional): The date or date range that the experiment was conducted on, in the format :code:`"%Y-%m-%d"`. Optional, defaults to :code:`'None'`.
title (:py:attr:`str`, optional): A name of the data source. Optional, defaults to :code:`'None'`.
experiment (:py:class:`orso.Experiment`, optional): Information about the experimental setup. Optional, defaults to :code:`'None'`.
measurement (:py:class:`orso.Measurement`, optional): Details of the measurement scheme. Optional, defaults to :code:`'None'`.
"""
def __init__(self,
owner=None,
facility=None,
experiment_id=None,
experiment_date=None,
title=None,
experiment=None,
measurement=None):
if owner is not None:
self.owner = owner
if facility is not None:
self.facility = facility
if experiment_id is not None:
self.experiment_id = experiment_id
self.experiment_date = experiment_date
if experiment_date is None:
self.experiment_date = datetime.datetime.now().strftime("%Y-%m-%d")
if title is not None:
self.title = title
if experiment is not None:
self.experiment = experiment
if measurement is not None:
self.measurement = measurement
class File(Header):
"""
Information for a given file.
Attributes:
creation (:py:attr:`str`): The date and time of the file creation.
Args:
file (:py:attr:`str`): The file name/path.
"""
def __init__(self, file):
self.file = file
fpath = pathlib.Path(file)
if not fpath.exists():
raise FileNotFoundError(f'The file {file} could not be found.')
else:
self.creation = datetime.datetime.fromtimestamp(
fpath.stat().st_ctime).strftime("%Y-%m-%dT%H:%M:%S")
class Files(Header):
"""
Information on data files and associated reference files.
Args:
data_files (:py:attr:`list` of :py:class:`orso.File`): The experimental data files.
reference_files (:py:attr:`list` of :py:class:`orso.File`, optional): The reference files. Optional, defaults to :code:`'None'`.
"""
def __init__(self, data_files, reference_files=None):
self.data_files = data_files
if reference_files is not None:
self.reference_files = reference_files
class Reduction(Header):
"""
Details of the reduction processes.
Args:
script (:py:attr:`str`, optional): The file name/path for the reduction script or notebook. Optional, defaults to :code:`'None'`.
input_files (:py:class:`orso.Files`, optional): The input files for the reduction. Optional defaults to :code:`'None'`.
comments (:py:attr:`str`, optional): An additional comment on the reduction. Optional, defaults to :code:`'None'`.
"""
def __init__(self, script=None, input_files=None, comment=None):
self.software = f'ess-{VERSION}'
if script is not None:
self.script = script
if input_files is not None:
self.input_files = input_files
if comment is not None:
self.comment = comment
class Column:
"""
Information on a data column.
Args:
quantity (:py:attr:`str`): The name of the column.
unit (:py:attr:`str`, optional): The unit. Optional, defaults to :code:`'dimensionless'`.
comments (:py:attr:`str`, optional): An additional comment on the column (ie. the definition for an uncertainty column). Optional, defaults to :code:`'None'`.
"""
def __init__(self, quantity, unit='dimensionless', comment=None):
self.quantity = quantity
self.unit = unit
if comment is not None:
self.comment = comment
| true |
0d3f9c01db844af9d94595bfb2575688b4373333 | Python | ignaciosticco/metropolis | /ising/src/implementacion/Graficos/pick.py | UTF-8 | 2,259 | 2.5625 | 3 | [] | no_license | # Sacado de https://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/LaTeX_Examples.html
import pylab
import numpy as np
import matplotlib.pyplot as plt
import math
import scipy
from scipy import stats
golden_mean = (math.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = 3+3/8 # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
params = {'backend': 'ps',
'axes.titlesize': 8,
'axes.labelsize': 9,
'axes.linewidth': 0.5,
'axes.grid': True,
'axes.labelweight': 'normal',
'font.family': 'serif',
'font.size': 8.0,
'font.weight': 'normal',
'text.color': 'black',
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,
'legend.fontsize': 8,
'figure.dpi': 300,
'figure.figsize': fig_size,
'savefig.dpi': 300,
}
pylab.rcParams.update(params)
#################### DATA ##############################
##def fzeta(a,b,c):
## f=a/(b*c)
## return f
def fzeta(a,b):
f=a/b
return f
comp_ns=np.zeros(14)
proba=np.zeros(14)
#print (comp_ns)
data = np.genfromtxt('pick_site_equi.txt')
x = data[:,0]
#print (len(ns))
#print (len(nsc))
pick = [go.Histogram(x=x)]
py.iplot(pick, filename='basic histogram')
#################### PLOT specification ###############################
"""plt.plot(logs,logproba,'ko',markersize=2,label='k',zorder=3)
pylab.ylabel('log(p-pc)')
pylab.xlabel('log(s)')
#pylab.ylim(2, 18)
pylab.xlim(1.5, 3)
#pylab.show()
coef_fit=np.polyfit(logs,logproba,1)
plt.hold(True)
x=np.linspace(1.5,3,100)
y=coef_fit[0]*x+coef_fit[1]
plt.plot(x,y)
print (coef_fit[0])
def rsquared(x, y):
Return R^2 where x and y are array-like.
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)
return r_value**2
print (rsquared(logs,logproba))
pylab.savefig('fig_5.eps', format='eps', bbox_inches='tight')
for j in range(1,16):
plt.plot(zeta[j],f[j],'ko',markersize=1,label='k',zorder=3)
pylab.ylabel('f(z)')
pylab.xlabel('z')
#pylab.ylim(2, 18)
pylab.xlim(-5, 5)
#pylab.show()
pylab.savefig('fig_5.eps', format='eps', bbox_inches='tight')"""
""" | true |
1cd7976228a3c3748995bd356b709483c06eb7f2 | Python | Angus-McLean/dynamic-connect-4 | /searchAlgo.py | UTF-8 | 1,752 | 2.609375 | 3 | [] | no_license | ## search-algo.py
import dynamicConnect4
import heuristic
from hashlib import sha1
import numpy as np
import time
import re
DB_TRANSP = {}
DB_STATE = {}
def put(state):
strHash = sha1(state.tobytes()).hexdigest()
DB_STATE[strHash] = state
return strHash
def get(strHash):
return DB_STATE[strHash]
def executeTurn(state, player, depth):
arrSuccessors = dynamicConnect4.generateSuccessorsForPlayer(player, state)
actionValues = [heuristic.scoreMaxConnected(player, st) for st in arrSuccessors]
if max(actionValues) >= 4:
return (max(actionValues), arrSuccessors[actionValues.index(max(actionValues))])
return alphaBetaHeuristic(put(state), player, depth)
def alphaBetaHeuristic(stateStr, player, depth, alpha=-np.inf, beta=np.inf, heuristic=heuristic.fuzzyHeuristic):
if hasattr(alphaBetaHeuristic, 'count'): alphaBetaHeuristic.count = alphaBetaHeuristic.count+1
if (depth, stateStr) in DB_TRANSP: return DB_TRANSP[(depth, stateStr)]
state = get(stateStr)
score = heuristic(player*-1, state)*player*-1
if depth==0 or abs(score)>=1:
return (score, state)
actionValues = []
arrSuccessors = dynamicConnect4.generateSuccessorsForPlayer(player, state)
for st in arrSuccessors:
v, _ = alphaBetaHeuristic(put(st), player*-1, depth-1, alpha, beta)
actionValues.append(v)
if player>0:
alpha = max(alpha, v)
else :
beta = min(beta, v)
if beta <= alpha:
break
bestScore = max(actionValues) if (player > 0) else min(actionValues)
DB_TRANSP[(depth, stateStr)] = (bestScore, arrSuccessors[actionValues.index(bestScore)])
return DB_TRANSP[(depth, stateStr)]
print('done!') | true |
5227a3ed33ba1835ad7047633a6032c4d12e7ded | Python | pkcsecurity/internal | /users/josh/carrier-etl/utils.py | UTF-8 | 134 | 2.640625 | 3 | [] | no_license | import re
def permissive_numeric_parse(s):
try:
return float(re.sub('[^0-9.-]', '', s))
except:
return None
| true |
17b0db922bae79f123fe42501db6f4a6cc024ef2 | Python | washingtoncandeia/PyCrashCourse | /06_Dicionarios/aliensmod.py | UTF-8 | 549 | 3.875 | 4 | [] | no_license | #!/bin/env python
# p. 159
# Cria uma lista vazia para armazenar aliens.
aliens = []
for alien in range(30):
new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}
aliens.append(new_alien)
for alien in aliens[0:3]:
if alien['color'] == 'green':
alien['color'] = 'yellow'
alien['speed'] = 'medium'
alien['points'] = 10
elif alien['color'] == 'yellow':
alien['color'] = 'red'
alien['speed'] = 'fast'
alien['points'] = 15
for alien in aliens[0:10]:
print(alien)
print('...') | true |
4009580e54cfb44a243dcbf39582d395951b99b6 | Python | yh4api/EE-RE-QC | /CHEntityExtraction/parseHownet.py | UTF-8 | 658 | 2.625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import re, os, sys
import xml.etree.ElementTree as ET
special = "#*?!@%&$+(){}[]"
special_exp = r'[#!@%&\+\$\*\?\(\)\{\}\[\]]'
tree = ET.parse("concept.xml")
root = tree.getroot()
fout = open("a.out", "w")
for concepts in root.findall("c"):
defi = []
word = concepts.get("w").encode("utf-8")
definition = concepts.get("d").encode("utf-8")
defs = definition.split(",")
for d in defs:
dplus = re.sub(special_exp, "", d)
if "=" in dplus:
dplus = dplus.partition("=")[2]
tmp = dplus.split("|")
for t in tmp:
defi.append(t+"#NN#"+t)
fout.write(word+"; ")
fout.write(" ".join(defi))
fout.write("\n\n")
fout.close()
| true |
b8f3afa54a5448dd06032c7011c0faf8f791c837 | Python | keshavg220/Pythoncodes | /json3.py | UTF-8 | 386 | 2.984375 | 3 | [] | no_license | import json
import urllib.request, urllib.parse, urllib.error
url = input("Enter..")
print('Retrieved :',url)
html = urllib.request.urlopen(url)
info = json.loads(html.read())
print('User count:', len(info))
sp = info['comments']
# t = tuple()
count = 0
for item in sp:
count = count + item['count']
# x = ('Count:', item['count'])
# t = t + x
# l = list(t)
print(count)
| true |
a1cf741cf3942e39e2024f445ed37603b3c78d1b | Python | NightZpy/QuimicaDidactica | /src/sce_winner.py | UTF-8 | 1,417 | 2.6875 | 3 | [] | no_license | '''
Created on 29/06/2012
@author: nightzpy
'''
from scene import Scene
from config import PNG_EXT
import config
from buttom import Buttom
from graphics import load_image, resize
import pygame
class Sce_Winner(Scene):
'''
classdocs
'''
def __init__(self, director, background_name, continue_scene):
'''
Constructor
'''
Scene.__init__(self, director, background_name)
for buttom in self.common_buttoms.itervalues():
buttom.is_visible = True
self.continue_scene = continue_scene
self.next_btn = Buttom((config.width/2, (config.height/2 + 150)), config.b_size, 'continue_pressed'+PNG_EXT, 'continue_release'+PNG_EXT, True)
def on_update(self):
self.time = self.director.time
self.update()
self.next_btn.updater()
if self.next_btn.is_pressed: self.go_scene = self.continue_scene
def on_event(self, event):
self.event(event)
if event.type == pygame.MOUSEMOTION:
mouse_pos = pygame.mouse.get_pos()
self.next_btn.mouse_over(mouse_pos)
if event.type == pygame.MOUSEBUTTONUP:
mouse_pos = pygame.mouse.get_pos()
self.next_btn.pressed(mouse_pos)
def on_draw(self, screen):
self.draw(screen)
self.next_btn.draw(screen)
| true |
9d66ffa4d4461c33ffd307c15bf7b41e12bfb497 | Python | Satily/leetcode_python_solution | /solutions/solution312.py | UTF-8 | 613 | 2.6875 | 3 | [
"MIT"
] | permissive | class Solution:
def maxCoins(self, nums: 'List[int]') -> 'int':
ln = len(nums)
dp = [[0] * (ln + 1) for _ in range(ln + 1)]
for l in range(1, ln + 1):
for i in range(ln + 1 - l):
s = 1
if i - 1 != -1:
s *= nums[i - 1]
if i + l != ln:
s *= nums[i + l]
for k in range(l):
dp[i][l] = max(dp[i][l], dp[i][k] + dp[i + k + 1][l - k - 1] + s * nums[i + k])
return dp[0][ln]
if __name__ == "__main__":
print(Solution().maxCoins([3, 1, 5, 8]))
| true |
93c095f5bec2d2161e423c815d0de3df27c9c6f3 | Python | row-yanbing/LeetCode | /[202]快乐数.py | UTF-8 | 1,335 | 4.03125 | 4 | [] | no_license | # 编写一个算法来判断一个数 n 是不是快乐数。
#
# 「快乐数」定义为:
#
#
# 对于一个正整数,每一次将该数替换为它每个位置上的数字的平方和。
# 然后重复这个过程直到这个数变为 1,也可能是 无限循环 但始终变不到 1。
# 如果 可以变为 1,那么这个数就是快乐数。
#
#
# 如果 n 是快乐数就返回 true ;不是,则返回 false 。
#
#
#
# 示例 1:
#
#
# 输入:19
# 输出:true
# 解释:
# 12 + 92 = 82
# 82 + 22 = 68
# 62 + 82 = 100
# 12 + 02 + 02 = 1
#
#
# 示例 2:
#
#
# 输入:n = 2
# 输出:false
#
#
#
#
# 提示:
#
#
# 1 <= n <= 231 - 1
#
# Related Topics 哈希表 数学 双指针
# 👍 642 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def isHappy(self, n: int) -> bool:
fast = self.next(n)
slow = n
while slow != fast:
slow = self.next(slow)
fast = self.next(self.next(fast))
return slow == 1
def next(self, n):
res = sum(int(s)**2 for s in str(n))
return res
# leetcode submit region end(Prohibit modification and deletion)
solution = Solution()
print(solution.isHappy(19))
print(solution.isHappy(2))
| true |
17aeb90cbe1359cd023ad9b92a46aea4c0d7d837 | Python | cowboybebophan/LeetCode | /Solutions/160. Intersection of Two Linked Lists.py | UTF-8 | 1,049 | 3.828125 | 4 | [] | no_license | """
https://leetcode.com/problems/intersection-of-two-linked-lists/discuss/49798/Concise-python-code-with-comments
# the idea is if you switch head, the possible difference between length would be countered.
# On the second traversal, they either hit or miss.
# if they meet, pa or pb would be the node we are looking for,
# if they didn't meet, they will hit the end at the same iteration, pa == pb == None, return either one of them is the same.
"""
# Two pointers
class Solution(object):
def getIntersectionNode(self, headA, headB):
if not headA or not headB:
return None
pa, pb = headA, headB # 2 pointers
while pa != pb:
pa = pa.next if pa else headB # if either pointer hits the end, switch head and continue the second traversal,
pb = pb.next if pb else headA # if not hit the end, just move on to next
return pa # only 2 ways to get out of the loop, they meet or the both hit the end = None
| true |
07e043c1e1bc45399dc551a8dc39cd0f3c39f042 | Python | Grolandman/TPK5120_Assignment3 | /productionLine.py | UTF-8 | 6,330 | 3.15625 | 3 | [] | no_license | # Assignment 3
# Table of Contents
# 1) Imported modules
# 2) Main
# 1) Imported modules
# -------------------
import sys
import math
import random
import statistics
import numpy as np
from matplotlib import pyplot as plt
# 1.2) Sides functions
def frange(start, stop, step):
i = start
while i < stop:
yield i
i += step
# 2) Main
# -------
seed = 92732
mu = 40
sigma = 2
lowerBound = 2
upperBound = 4
speed = 50.0
d1 = 300.0
d2 = 250.0
d3 = 180.0
numberOfRuns = 1000
# 3) Parts
# --------
class Part:
def __init__(self,t0):
self.t0=t0 #start of the process
self.t1=0 #reach machine1
self.t2=0 #end of process machine 1
self.t3=0 #reach machine2
self.t4=0 #end of process machine 2
self.t5=0 #reach outline
def drawSchedule(self):
#introduction time
# time to reach machine 1
self.t1 = d1/speed+self.t0
# machine 1
z11 = random.normalvariate(mu, sigma)
z12 = random.uniform(lowerBound, upperBound)
z1 = z11 + z12
self.t2 = self.t1 + z1
# time to reach machine 2
self.t3 = self.t2 + d2/speed
# machine 2
z21 = random.normalvariate(mu, sigma)
z22 = random.uniform(lowerBound, upperBound)
z2 = z21 + z22
self.t4 = self.t3 + z2
# time to get out the line
self.t5 = self.t4 + d3/speed
# for statistics
#L.append(t5)
#s += t5
#s2 += t5*t5
def printPart(self):
print("t0="+str(self.t0)+" t1="+str(self.t1)+" t2="+str(self.t2)+" t3="+str(self.t3)+" t4="+str(self.t4)+" t5="+str(self.t5))
class Machine:
def __init__(self,machineN):
self.machineN=machineN #machine number
self.table=[] #table of use of machine
def addToTable(self,part):
if self.machineN==1:
self.table.append([part.t1,part.t2])
if self.machineN==2:
self.table.append([part.t3,part.t4])
def useTime(self):
sum=0
for t in self.table:
t1=t[0]
t2=t[1]
use=t2-t1
sum=sum+use
return sum
class System:
def __init__(self):
self.machines=[]
self.parts=[]
self.tOP=0 #time of process
def partInConflict(self):
state=0
for i in range(len(self.parts)):
for j in range(len(self.parts)):
if i!=j:
if (self.parts[j].t1<self.parts[i].t1<self.parts[j].t2 or self.parts[j].t3<self.parts[i].t3<self.parts[j].t4):
state=1
return state
def addMachine(self,machine):
self.machines.append(machine)
def addPart(self,part):
self.parts.append(part)
def generateParts(self,nbOfParts,dT):
t=0
for i in range(nbOfParts):
self.addPart(Part(t))
t=t+dT
def dispParts(self):
for i in range(len(self.parts)):
print("Part n°"+str(i)+":")
print (self.parts[i].printPart())
def drawAllSchedule(self):
for i in range(len(self.parts)):
self.parts[i].drawSchedule()
self.machines[0].addToTable(self.parts[i])
self.machines[1].addToTable(self.parts[i])
def machineUseTimes(self):
useTimes=[]
for i in range(len(self.machines)):
useTimes.append(self.machines[i].useTime()/self.parts[-1].t5)
return useTimes
random.seed(seed)
timeOfRun=14400 #s
abscisse=[]
ordonnee1=[]
ordonnee2=[]
ordonnee3=[]
ordonnee4=[]
for dT in frange(50, 60,0.25):
numOfParts=int(round(timeOfRun/dT))
s = 0
s2 = 0
L = []
numberOfRuns=500
#creating parts
lesT5=[]
conflicts=[]
useTimes=[]
for i in range(numberOfRuns):
machineA=Machine(1)
machineB=Machine(2)
controller=System()
controller.addMachine(machineA)
controller.addMachine(machineB)
controller.generateParts(numOfParts,dT)
controller.drawAllSchedule()
#print(len(controller.parts))
#print("Etat du conflit : "+str(controller.partInConflict()))
conflicts.append(controller.partInConflict())
for part in controller.parts:
lesT5.append(part.t5-part.t0)
#print (lesT5)
# for i in range(len(controller.machines)):
# print("La machine n°"+str(i)+" a un useTime de "+str(controller.machineUseTimes()[i]))
useTimes.append(controller.machineUseTimes())
for i in range(len(controller.machines)):
print("La machine n°"+str(i)+" a un useTime moyen de "+str(np.mean(useTimes,axis=0)[i]))
print("Le taux de conflit est de "+str(np.mean(conflicts)*100)+"%")
print("Une part met en moyenne "+str(np.mean(lesT5))+"s à être produite, avec un écart type de "+str(np.std(lesT5)))
abscisse.append(dT)
ordonnee1.append(np.mean(conflicts)*100)
ordonnee2.append(np.mean(lesT5))
ordonnee3.append(np.mean(useTimes,axis=0)[0])
ordonnee4.append(np.mean(useTimes,axis=0)[1])
plt.figure(1, figsize=(9, 15))
plt.title('A tale of 2 subplots')
plt.subplot(411)
plt.plot(abscisse, ordonnee1)
plt.ylabel('Conflict rate (%)')
plt.subplot(412)
plt.scatter(abscisse, ordonnee2)
plt.ylabel('Mean time for a part (s)')
plt.subplot(413)
plt.plot(abscisse, ordonnee3)
plt.ylabel('UseTime machine A')
plt.subplot(414)
plt.plot(abscisse, ordonnee4)
plt.ylabel('Use time Machine B')
plt.xlabel('Time space between every part')
plt.show()
# part1=Part(0)
# part2=Part(1)
# part1.printPart()
# part2.printPart()
# controller=System()
# controller.addPart(part1)
#process
# for run in range(0, numberOfRuns):
# # time to reach machine 1
# L.append(t5)
# s += t5
# s2 += t5*t5
# mean = s/numberOfRuns
# m2 = s2/numberOfRuns
# variance = m2 - mean*mean
# standardDeviation = math.sqrt(variance)
# print("mean\t" + str(mean))
# print("standardDeviation\t" + str(standardDeviation))
# print("With statistics module")
# print("mean\t" + str(statistics.mean(L)))
# print("standardDeviation\t" + str(statistics.stdev(L)))
# controller.dispParts()
# print(len(controller.parts))
# print("Etat du conflit : "+str(controller.partInConflict())) | true |
2a301b01ba55f0cd3cfa98a52d26d3d077e055b6 | Python | alina-molnar/Mean_or_median | /mean_or_median.py | UTF-8 | 1,097 | 3.765625 | 4 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Create weekdays' list.
weekdays = ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]
# Create cash amount list.
amount = [10, 10, 5, 20, 15, 15, 60]
# Zip the two lists in a dataframe.
cash_df = pd.DataFrame(zip(weekdays, amount), columns=["weekday", "amount"])
# Create list of unique amounts to be used as ticks on x axis.
unique_amount = cash_df["amount"].unique()
x_lab = range(0, 65, 5)
# # Plot amount of cash each day.
# graph = sns.lineplot(x="weekday", y="amount", data=cash_df, marker="o")
# graph.set(title="Amount each day", xlabel="Weekday", ylabel="Amount")
# plt.show()
# Plot distribution of amount, mark mean and median.
graph = sns.kdeplot(x="amount", data=cash_df)
graph.axvline(x=cash_df["amount"].median(), color="#2ca02c", linestyle="solid", label="median")
graph.axvline(x=cash_df["amount"].mean(), color="#ff7f0e", linestyle="dashed", label="mean")
graph.set(title="Distribution mean and median", xlabel="Amount")
graph.set_xticks(x_lab)
graph.set_xlim([0, 65])
graph.legend(fontsize=25)
plt.show()
| true |
b290183581917b3a8aeb021f0123d84266216ef4 | Python | camillevuillaume/SB1602E_driver | /SB1602E_driver.py | UTF-8 | 4,663 | 2.890625 | 3 | [
"MIT"
] | permissive | """
This is a Python driver for I2C text LCD with a command set compatible with the SB1602E.
It was tested with the following 8x2 LCD from switch-science:
http://www.switch-science.com/catalog/1516/
The software is based on the SB1602E C library by Ted Okano.
http://mbed.org/users/okano/code/TextLCD_SB1602E/file/694061176edf/TextLCD_SB1602E.h
Copyright (c) 2014 Camille Vuillaume
Released under the MIT License
"""
import smbus
from time import *
ADDRESS = 0x3E
#original datasheet
Comm_FunctionSet_Normal = 0x38
Comm_FunctionSet_Extended = 0x39
Comm_InternalOscFrequency = 0x14
Comm_ContrastSet = 0x70
Comm_PwrIconContrast = 0x5C
Comm_FollowerCtrl = 0x60
Comm_DisplayOnOff = 0x0C
Comm_ClearDisplay = 0x01
Comm_EntryModeSet = 0x04
Comm_ReturnHome = 0x02
Comm_SetDDRAMAddress = 0x80
DDRAMAddress_Ofst = [0x00, 0x40]
#SB1602E setting values
default_Contrast = 0x35
COMMAND = 0x00
DATA = 0x40
#Model-dependent
MaxCharsInALine = 0x08
NrLines = 2
class lcd:
#cursor position
curs = [0, 0]
def __init__(self):
"""LCD initialization"""
self.bus = smbus.SMBus(1)
#initialize LCD
sleep(0.04)
self.lcd_command(Comm_FunctionSet_Normal)
sleep(30e-6)
self.lcd_command(Comm_ReturnHome)
sleep(30e-6)
self.lcd_command(Comm_FunctionSet_Extended)
sleep(30e-6)
self.lcd_command(Comm_InternalOscFrequency)
sleep(30e-6)
self.lcd_command(Comm_ContrastSet | ( default_Contrast & 0xF))
sleep(30e-6)
self.lcd_command(Comm_PwrIconContrast | ((default_Contrast >> 4) & 0x3))
sleep(30e-6)
self.lcd_command(Comm_FollowerCtrl | 0x0A)
sleep(0.2)
self.lcd_command(Comm_DisplayOnOff)
sleep(30e-6)
self.lcd_command(Comm_ClearDisplay)
sleep(30e-6)
self.lcd_command(Comm_EntryModeSet)
sleep(0.0011)
def clear_rest_of_line(line):
"""Clear remaining part of line"""
for i in range(curs[line], MaxCharsInALine):
self.putcxy(' ', i, line)
def clear(self):
"""clear the LCD"""
self.lcd_command( Comm_ClearDisplay );
sleep(2e-3);
self.curs[0] = 0
self.curs[1] = 0
def putcxy(self, c, x, y):
"""Write character at position x y"""
if (x < MaxCharsInALine) and (y < NrLines):
self.lcd_command( (Comm_SetDDRAMAddress | DDRAMAddress_Ofst[ y ]) + x)
self.lcd_data(c)
def putc(self, line, c):
"""Write one character at current position of given line
Returns the number of characters left in given line
"""
if (c == '\n') or (c == '\r'):
self.clear_rest_of_line( line )
self.curs[line] = MaxCharsInALine
return 0
elif self.curs[line] < MaxCharsInALine:
self.putcxy(c, self.curs[line], line)
self.curs[line] += 1
return MaxCharsInALine - self.curs[line]
def puts(self, line, str):
"""Write string at current position of given line
Returns the number of characters left in given line
"""
CharsLeft = 0
for c in list(str):
CharsLeft = self.putc(line, ord(c))
if CharsLeft <= 0:
break
return CharsLeft
def puts(self, str):
"""Clear LCD and write string"""
self.clear()
line = 0
for c in list(str):
CharsLeft = self.putc(line, ord(c))
if CharsLeft <= 0:
line += 1
if line >= NrLines:
break
def puts_scroll(self, str1, str2):
"""Endlessly scroll strings from left to right and then right to left"""
i = 0
j = 0
incr_i = 1
incr_j = 1
while 1:
self.clear()
for c in list(str1)[i: i+MaxCharsInALine]:
self.putc(0, ord(c))
for c in list(str2)[j: j+MaxCharsInALine]:
self.putc(1, ord(c))
i += incr_i
j += incr_j
if i == 0 and j == 0:
incr_i = 1
incr_j = 1
sleep(0.5)
elif i+MaxCharsInALine == len(str1) and j+MaxCharsInALine == len(str2):
incr_i = -1
incr_j = -1
sleep(0.5)
elif i == 0 or i+MaxCharsInALine == len(str1):
incr_i = 0
elif j == 0 or j+MaxCharsInALine == len(str2):
incr_j = 0
sleep(0.5)
def set_contrast(constrast):
"""Set LCD contrast"""
self.lcd_command(Comm_FunctionSet_Extended )
self.lcd_command(Comm_ContrastSet | (contrast & 0x0f))
self.lcd_command(Comm_PwrIconContrast | ((contrast>>4) & 0x03))
self.lcd_command(Comm_FunctionSet_Normal)
def lcd_command(self, command):
"""Low level function to send command"""
self.bus.write_byte_data(ADDRESS, COMMAND, command)
sleep(0.0001)
def lcd_data(self, data):
"""Low level function to send data"""
self.bus.write_byte_data(ADDRESS, DATA, data)
sleep(0.0001)
| true |
1d3a95bd95e10aed57661559250c71cafa34bfdb | Python | VolodymyrBodnar/PyBoost_group1 | /cards.py | UTF-8 | 910 | 3.28125 | 3 | [] | no_license |
class Card:
class SUIT:
diamond = "diamonds"
hearts = "hearts"
spades = "spades"
clubs = "clubs"
class RANK:
ace = "ace"
jack = "jack"
queen = "queen"
king = "king"
n_2 = "2"
n_3 = "3"
n_4 = "4"
n_5 = "5"
n_6 = "6"
n_7 = "7"
n_8 = "8"
n_9 = "9"
n_10 = "10"
SUITS = (SUIT.diamond, SUIT.hearts, SUIT.spades, SUIT.clubs)
RANKS = (RANK.ace, RANK.jack, RANK.queen,
RANK.king, RANK.n_2, RANK.n_3, RANK.n_4,
RANK.n_5,RANK.n_6, RANK.n_7, RANK.n_8,
RANK.n_9, RANK.n_10)
def __init__(self, suit, rank):
if suit not in self.SUITS or rank not in self.RANKS:
raise ValueError('Incorrect rank or suit.')
self.suit = suit
self.rank = rank
def __add__(self, other):
pass
| true |
8180980580a4d09cfe7b5fcfbb8d21943a1412d3 | Python | Mrd278/Codeforces_Java | /Forgotten Language.py | UTF-8 | 900 | 2.875 | 3 | [] | no_license | for i in range(int(input())):
n, k = map(int, input().split())
f_l_d = list(map(str, input().split()))
sentence = []
d = dict()
for i in range(n):
d[i]="NO"
for m in range(k):
sentence += list(map(str, input().split()))
for j in range(n):
if f_l_d[j] in sentence:
d[j] = "YES"
else:
d[j] = "NO"
for l in range(n):
print(d[l], end = " ")
print()
'''
t=int(input())
while t:
t=t-1
m,k=list(map(int,input().split(" ")))
arr = list(map(str,input().split(" ")))
d=dict()
for i in arr:
d[i]="NO"
while k:
k=k-1
lst = list(map(str,input().split(" ")))
lst=lst[1:]
for i in lst:
if d.get(i)!=None:
d[i]="YES"
for i in arr:
print(d[i],end=" ")
print()
'''
| true |
385cec2219b906a17554b0e96f947ab844463b2e | Python | nkullman/ChileanReserveStudy | /ChileanReserve_Frontier4_DataScrubbing.py | UTF-8 | 2,470 | 3.5 | 4 | [] | no_license |
# coding: utf-8
# # Restoration of Chilean eucalyptus plantation
# ## Data wrangling exercise using Python/pandas
# Goal is to take existing solution information and convert to Tableau-friendly format
#
# Steps to get here:
#
# 1. Model created by T. Walter, edits by N. Kullman
# 2. Frontier corner points (ideal solutions) solved individually using standalone CPLEX runs
# 3. Interior frontier pts found using Alpha Delta Program (calls CPLEX for optimization)
# 4. Dominated solutions removed
# 5. Solution files underwent one round of scrubbing through Java program built specifically for this task.
#
# What's left (why we're here): Need all stands under a single column, rather than each having their own. And I want to learn data manipulation with Python/pandas.
#
# Commence data manipulation...
# In[334]:
import pandas
import numpy
import pylab
from pandas import *
from pylab import *
# In[335]:
# read in and save solution data to dataframe
reserveSolns = read_csv('ADP_20150414_192212/FrontierSolutions_All_SolnYearID.csv')
# In[336]:
# Strip leading spaces from column names
reserveSolns = reserveSolns.rename(columns=lambda x: x.strip())
# In[337]:
# Stands are 'pivoted' across the table (col for each stand)
# Here we unpivot them, creating column for stand and prescription. This will require a merge.
# Merge's left is the original dataset, minus the stands columns
left = reserveSolns.ix[:,:"RI"]
# Merge's right is an unpivoted version of just the stands columns.
# Unpivoting done with the help of the melt function
right = pd.melt(reserveSolns,
id_vars = ['Solution Index', 'Year'],
# unpivot on the stands(first of which is XF502B)
value_vars = reserveSolns.columns.tolist()[reserveSolns.columns.tolist().index("XF502B"):],
var_name = 'Stand',
value_name = 'Prescription')
# Merge to create our desired dataset
meltedReserveSolns = merge(left, right,
on = ["Solution Index", "Year"],
how = 'outer')
# In[338]:
# Convert prescription column vals to int
meltedReserveSolns["Prescription"] = meltedReserveSolns["Prescription"].apply(int)
# In[339]:
# Export to CSV for data viz with Tableau
meltedReserveSolns.to_csv('ReserveSolutions_Frontier4_AllPts.csv')
# We've finished what we set out to accomplish: learn some Python/pandas and clean a dataset for further analysis through visualization.
| true |
331fa5ccd0e8da5eff60574c984a749558858ef3 | Python | hopefulp/sandbox | /pyvasp/VAS_OSZICAR/OSZICAR_MD_Plot/Plot_OSZICAR_MD.py | UTF-8 | 2,905 | 2.890625 | 3 | [] | no_license | #!/usr/bin/python
#-*- coding:utf-8 -*-
import pylab as p
try:
input = raw_input
except NameError:
pass
info="T : is the current temperature.\n\
E : is the total free energy (including the kinetic energy of the ions and the energy of the Nosé thermostat).\n\
F : is the total free energy (at this point the energy of the reference atom has been subtracted).\n\
E0 : is the energy for sigma-> 0 .\n\
EK : is the kinetic energy.\n\
SP : is the potential energy of the Nosé thermostat.\n\
SK : is the corresponding kinetic energy.\n\n\
more infos : https://cms.mpi.univie.ac.at/vasp/vasp/stdout_OSZICAR_file.html\n"
ff=" as a function of MD Steps"
OSZ_Labels={ "Steps" : [0," Molecular Dynamic Steps "," (time) "],
"T" : [1," Temperature "," (K) ","Temperature"+ff],
"E" : [2," Free Energy "," (eV) ","Free Energy"+ff],
"F" : [3," Free Energy "," (eV) ","Free Energy"+ff],
"E0" : [4," Energy$_{\sigma=0}$ "," (eV) ","Energy $\sigma=0$"+ff ],
"EK" : [5," Kinetic Energy "," (eV) ","Kinetic Energy"+ff],
"SP" : [6," Potential Energy "," (eV) ","Potential Energy"+ff],
"SK" : [7," Corresponding $EK$ "," (eV) ","Corresponding $EK$"+ff]
}
# Molecular Dynamic OSZICAR Reading
def OSZICAR_READ():
f=input(" OSZICAR File Name ? > ")
inp=open(f,"r")
f=inp.readlines();inp.close()
DATA=p.array([])
for i in range(len(f)):
if "T=" in f[i]:
info=f[i].split()
if len(DATA)==0:
DATA=p.array([info[::2]],dtype=float)
else:
DATA=p.append(DATA,[p.array(info[::2],dtype=float)],axis=0)
return DATA
##
#Plot Data
def PLOT_DATA(arr,Xplot,Yplot):
x=arr[:,OSZ_Labels[Xplot][0]]
y=arr[:,OSZ_Labels[Yplot][0]]
lb=OSZ_Labels[Yplot][1]
print("\n\tMean {} = {} {}\n".format(OSZ_Labels[Yplot][1],p.mean(y),OSZ_Labels[Yplot][2]))
p.rcParams["font.family"] = "serif"
p.figure(figsize=(10,6))
p.rcParams.update({"font.size": 14})
p.xlabel(OSZ_Labels[Xplot][1]+OSZ_Labels[Xplot][2])
p.ylabel(OSZ_Labels[Yplot][1]+OSZ_Labels[Yplot][2])
p.title(OSZ_Labels[Yplot][3])
p.plot(x,y,"r")
p.show()
##
OSZ=OSZICAR_READ()
print(info)
PLOT=True
while PLOT:
#Xplot=input("Xplot (Steps, T, E, F, E0, EK, SP or SK ) > ")
Xplot="Steps"
Yplot=input("Yplot (T, E, F, E0, EK, SP or SK ) > ")
if (Xplot in OSZ_Labels) and (Yplot in OSZ_Labels):
PLOT_DATA(OSZ,Xplot,Yplot)
else:
print("\n Error in Xplot or Yplot Label!!..\n")
PLOT=False
Todo=input("Plot Other Quantities (yes/no)? > ")
if Todo=="yes":
PLOT=True
continue
else:
PLOT=False
print("\n Exit!..\n")
| true |
c8d43189c4dbeda6f57786a787922414eab598c9 | Python | aravindsriraj/machine-learning-python-datacamp | /Machine Learning Scientist with Python Track/16. Introduction to Deep Learning with Keras/ch4_exercises.py | UTF-8 | 5,759 | 3.140625 | 3 | [
"MIT"
] | permissive | # Exercise_1
# Import keras backend
import keras.backend as K
# Input tensor from the 1st layer of the model
inp = model.layers[0].input
# Output tensor from the 1st layer of the model
out = model.layers[0].output
# Define a function from inputs to outputs
inp_to_out = K.function([inp], [out])
# Print the results of passing X_test through the 1st layer
print(inp_to_out([X_test]))
--------------------------------------------------
# Exercise_2
for i in range(0, 21):
# Train model for 1 epoch
h = model.fit(X_train, y_train, batch_size=16, epochs=1,verbose=0)
if i%4==0:
# Get the output of the first layer
layer_output = inp_to_out([X_test])[0]
# Evaluate model accuracy for this epoch
test_accuracy = model.evaluate(X_test, y_test)[1]
# Plot 1st vs 2nd neuron output
plot()
--------------------------------------------------
# Exercise_3
# Start with a sequential model
autoencoder = Sequential()
# Add a dense layer with the original image as input
autoencoder.add(Dense(32, input_shape=(784, ), activation="relu"))
# Add an output layer with as many nodes as the image
autoencoder.add(Dense(784, activation="sigmoid"))
# Compile your model
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# Take a look at your model structure
autoencoder.summary()
--------------------------------------------------
# Exercise_4
#1
# Build your encoder
encoder = Sequential()
encoder.add(autoencoder.layers[0])
# Encode the images and show the encodings
preds = encoder.predict(X_test_noise)
show_encodings(preds)
#2
# Build your encoder
encoder = Sequential()
encoder.add(autoencoder.layers[0])
# Encode the images and show the encodings
preds = encoder.predict(X_test_noise)
show_encodings(preds)
# Predict on the noisy images with your autoencoder
decoded_imgs = autoencoder.predict(X_test_noise)
# Plot noisy vs decoded images
compare_plot(X_test_noise, decoded_imgs)
--------------------------------------------------
# Exercise_5
# Import the Conv2D and Flatten layers and instantiate model
from keras.layers import Conv2D,Flatten
model = Sequential()
# Add a convolutional layer of 32 filters of size 3x3
model.add(Conv2D(filters= 32, input_shape=(28, 28, 1), kernel_size=3, activation='relu'))
# Add a convolutional layer of 16 filters of size 3x3
model.add(Conv2D(filters=16 , kernel_size=3, activation='relu'))
# Flatten the previous layer output
model.add(Flatten())
# Add as many outputs as classes with softmax activation
model.add(Dense(10, activation='softmax'))
--------------------------------------------------
# Exercise_6
# Obtain a reference to the outputs of the first layer
layer_output = model.layers[0].output
# Build a model using the model input and the first layer output
first_layer_model = Model(inputs = model.input, outputs = layer_output)
# Use this model to predict on X_test
activations = first_layer_model.predict(X_test)
# Plot the first digit of X_test for the 15th filter
axs[0].matshow(activations[0,:,:,14], cmap = 'viridis')
# Do the same but for the 18th filter now
axs[1].matshow(activations[0,:,:,17], cmap = 'viridis')
plt.show()
--------------------------------------------------
# Exercise_7
# Import image and preprocess_input
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input
# Load the image with the right target size for your model
img = image.load_img(img_path, target_size=(224, 224))
# Turn it into an array
img_array = image.img_to_array(img)
# Expand the dimensions of the image
img_expanded = np.expand_dims(img_array, axis = 0)
# Pre-process the img in the same way original images were
img_ready = preprocess_input(img_expanded)
--------------------------------------------------
# Exercise_8
# Instantiate a ResNet50 model with 'imagenet' weights
model = ResNet50(weights='imagenet')
# Predict with ResNet50 on your already processed img
preds = model.predict(img_ready)
# Decode the first 3 predictions
print('Predicted:', decode_predictions(preds, top=3)[0])
--------------------------------------------------
# Exercise_9
# Split text into an array of words
words = text.split()
# Make lines of 4 words each, moving one word at a time
lines = []
for i in range(4, len(words)):
lines.append(' '.join(words[i-4:i]))
# Instantiate a Tokenizer, then fit it on the lines
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
# Turn lines into a sequence of numbers
sequences = tokenizer.texts_to_sequences(lines)
print("Lines: \n {} \n Sequences: \n {}".format(lines[:5],sequences[:5]))
--------------------------------------------------
# Exercise_10
# Import the Embedding, LSTM and Dense layer
from keras.layers import Embedding, LSTM, Dense
model = Sequential()
# Add an Embedding layer with the right parameters
model.add(Embedding(input_dim=vocab_size, output_dim=8, input_length=3))
# Add a 32 unit LSTM layer
model.add(LSTM(32))
# Add a hidden Dense layer of 32 units and an output layer of vocab_size with softmax
model.add(Dense(32, activation='relu'))
model.add(Dense(vocab_size, activation='softmax'))
model.summary()
--------------------------------------------------
# Exercise_11
def predict_text(test_text):
if len(test_text.split())!=3:
print('Text input should be 3 words!')
return False
# Turn the test_text into a sequence of numbers
test_seq = tokenizer.texts_to_sequences([test_text])
test_seq = np.array(test_seq)
# Get the model's next word prediction by passing in test_seq
pred = model.predict(test_seq).argmax(axis = 1)[0]
# Return the word associated to the predicted index
return tokenizer.index_word[pred]
--------------------------------------------------
| true |
f6c0d7907d5bc8639ff10f7445b7e922215981c4 | Python | rigidus/compilers_labs | /compiler/syntaxer.py | UTF-8 | 14,623 | 2.515625 | 3 | [] | no_license | import json
from ply import yacc
from lexer import tokens
from models import Node
from utils import NodeDrawer,JSONEncoder
start = 'programm'
def p_programm(p):
'''
programm : main_class class_list
'''
p[0] = Node('programm', children=[p[1], p[2]])
def p_empty(p):
'''
empty :
'''
def p_empty_class_list(p):
'''
class_list : empty
'''
p[0] = Node('class_list')
def p_class_list(p):
'''
class_list : class_list class
'''
children = p[1].children[:] if p[1] else []
children.append(p[2])
p[0] = Node('class_list', children=children)
def p_main_class(p):
'''
main_class : CLASS IDENTIFIER LEFT_BRACE main_method RIGHT_BRACE
'''
id_node = Node('identifier', children=[p[2]])
p[0] = Node('main_class', children=[id_node, p[4]])
def p_main_method(p):
'''
main_method : PUBLIC STATIC VOID MAIN LEFT_PARENTHESIS STRING LEFT_BRACKET RIGHT_BRACKET IDENTIFIER RIGHT_PARENTHESIS LEFT_BRACE statements_list RIGHT_BRACE
'''
dim_node = Node('dim', children=[1])
type_node = Node('type', children=[dim_node, p[6]])
id_node = Node('identifier', children=[p[9]])
arg_node = Node('arg', children=[type_node, id_node])
p[0] = Node('main_method', children=[arg_node, p[12]])
def p_class(p):
'''
class : CLASS IDENTIFIER extends LEFT_BRACE declaration_list RIGHT_BRACE
'''
id_node = Node('identifier', children=[p[2]])
p[0] = Node('class', children=[id_node, p[3], p[5]])
def p_empty_extends(p):
'''
extends : empty
'''
p[0] = Node('extends')
def p_extends(p):
'''
extends : EXTENDS IDENTIFIER
'''
id_node = Node('identifier', children=[p[2]])
p[0] = Node('extends', id_node)
def p_empty_declaration_list(p):
"""
declaration_list : empty
"""
def p_declaration_list(p):
"""
declaration_list : declaration_list field
| declaration_list method
"""
children = p[1].children[:] if p[1] else []
children.append(p[2])
p[0] = Node('declaration_list', children=children)
def p_field(p):
"""
field : single_or_array_var SEMICOLON
"""
p[0] = Node('field', children=[p[1]])
def p_int_single_or_array_var(p):
'''
single_or_array_var : INT identifier_or_brackets
'''
type_node = Node('type', children=[p[2][0], p[1]])
p[0] = Node('variable', children=[type_node, p[2][1]])
def p_boolean_single_or_array_var(p):
'''
single_or_array_var : BOOLEAN IDENTIFIER
'''
dim_node = Node('dim', children=[0])
type_node = Node('type', children=[dim_node, p[1]])
id_node = Node('identifier', children=[p[2]])
p[0] = Node('variable', children=[type_node, id_node])
def p_single_or_array_var_ref(p):
'''
single_or_array_var : IDENTIFIER IDENTIFIER
'''
dim_node = Node('dim', children=[0])
type_id_node = Node('identifier', children=[p[1]])
type_node = Node('type', children=[dim_node, type_id_node])
id_node = Node('identifier', children=[p[2]])
p[0] = Node('variable', children=[type_node, id_node])
def p_method(p):
"""
method : PUBLIC return_type_and_name LEFT_PARENTHESIS params_list RIGHT_PARENTHESIS LEFT_BRACE statements_list RETURN expression SEMICOLON RIGHT_BRACE
"""
return_node = Node('return', children=[p[9]])
children = [p[4], p[7], return_node]
children.extend(p[2])
p[0] = Node('method', children=children)
def p_int_return_type_and_name(p):
'''
return_type_and_name : INT identifier_or_brackets
'''
type_node = Node('type', children=[p[2][0], p[1]])
id_node = p[2][1]
p[0] = [type_node, id_node]
def p_boolean_return_type_and_name(p):
'''
return_type_and_name : BOOLEAN IDENTIFIER
'''
dim_node = Node('dim', children=[0])
type_node = Node('type', children=[dim_node, p[1]])
id_node = Node('identifier', children=[p[2]])
p[0] = [type_node, id_node]
def p_id_return_type_and_name(p):
'''
return_type_and_name : IDENTIFIER IDENTIFIER
'''
dim_node = Node('dim', children=[0])
type_id_node = Node('identifier', children=[p[1]])
type_node = Node('type', children=[dim_node, type_id_node])
id_node = Node('identifier', children=[p[2]])
p[0] = [type_node, id_node]
def p_empty_params_list(p):
"""
params_list : empty
"""
p[0] = Node('args')
def p_params_list(p):
"""
params_list : args_list
"""
p[0] = p[1]
def p_single_args_list(p):
"""
args_list : arg
"""
p[0] = Node('args', children=[p[1]])
def p_args_list(p):
"""
args_list : args_list COMMA arg
"""
children = p[1].children[:] if p[1] else []
children.append(p[3])
p[0] = Node('args', children=children)
def p_arg(p):
'''
arg : single_or_array_var
'''
p[0] = Node('arg', children=[p[1]])
def p_empty_statements_list(p):
'''
statements_list : empty
'''
p[0] = Node('statements')
def p_statements_list(p):
'''
statements_list : statements_list statement
'''
children = p[1].children[:] if p[1] else []
children.append(p[2])
p[0] = Node('statements', children=children)
def p_bool_statement(p):
'''
statement : BOOLEAN IDENTIFIER SEMICOLON
'''
dim_node = Node('dim', children=[0])
type_node = Node('type', children=[dim_node, p[1]])
id_node = Node('identifier', children=[p[2]])
var_node = Node('variable', children=[type_node, id_node])
p[0] = Node('statement', children = [var_node])
def p_int_statement(p):
'''
statement : INT identifier_or_brackets SEMICOLON
'''
type_node = Node('type', children=[p[2][0], p[1]])
var_node = Node('variable', children=[type_node, p[2][1]])
p[0] = Node('statement', children = [var_node])
def p_identifier_or_brackets_id(p):
'''
identifier_or_brackets : IDENTIFIER
'''
dim_node = Node('dim', children=[0])
id_node = Node('identifier', children=[p[1]])
p[0] = [dim_node, id_node]
def p_identifier_or_brackets_br(p):
'''
identifier_or_brackets : LEFT_BRACKET RIGHT_BRACKET IDENTIFIER
'''
dim_node = Node('dim', children=[1])
id_node = Node('identifier', children=[p[3]])
p[0] = [dim_node, id_node]
def p_complex_statement(p):
'''
statement : IDENTIFIER identifier_or_assignment SEMICOLON
'''
st_type = p[2]
if st_type[0] == 'var':
dim_node = Node('dim', children=[0])
type_id_node = Node('identifier', children=[p[1]])
type_node = Node('type', children=[dim_node, type_id_node])
id_node = st_type[1]
var_node = Node('variable', children=[type_node, id_node])
statement_node = Node('statement', children=[var_node])
elif st_type[0] == 'assign':
id_node = Node('identifier', children=[p[1]])
left_part_children = [id_node]
if st_type[1]:
left_part_children.append(st_type[1])
left_part_node = Node('left_part', children=left_part_children)
right_part_node = st_type[2]
assignment_node = Node('assignment', children=[left_part_node, right_part_node])
statement_node = Node('statement', children=[assignment_node])
p[0] = statement_node
def p_identifier_or_assignment_id(p):
'''
identifier_or_assignment : IDENTIFIER
'''
id_node = Node('identifier', children=[p[1]])
p[0] = ('var', id_node)
def p_identifier_or_assignment_assign(p):
'''
identifier_or_assignment : ASSIGNMENT expression
'''
right_part_node = Node('rignt_part', children=[p[2]])
p[0] = ('assign', None, right_part_node)
def p_identifier_or_assignment_array_element_assign(p):
'''
identifier_or_assignment : LEFT_BRACKET expression RIGHT_BRACKET ASSIGNMENT expression
'''
index_node = Node('index', children=[p[2]])
right_part_node = Node('rignt_part', children=[p[5]])
p[0] = ('assign',index_node, right_part_node)
def p_if_statement(p):
'''
statement : IF LEFT_PARENTHESIS expression RIGHT_PARENTHESIS statement else_statement
'''
condition_node = Node('condition', children=[p[3]])
children = [condition_node, p[5]]
if p[6]:
children.append(p[6])
if_node = Node('if', children=children)
p[0] = Node('statement', children=[if_node])
def p_empty_else_statement(p):
'''
else_statement : empty
'''
def p_else_statement(p):
'''
else_statement : ELSE statement
'''
p[0] = Node('else', children=[p[2]])
def p_while_statement(p):
'''
statement : WHILE LEFT_PARENTHESIS expression RIGHT_PARENTHESIS statement
'''
while_node = Node('while', children=[p[3], p[5]])
p[0] = Node('statement', children=[while_node])
def p_print_statement(p):
'''
statement : SYSTEM POINT OUT POINT PRINTLN LEFT_PARENTHESIS expression RIGHT_PARENTHESIS SEMICOLON
'''
print_node = Node('print', children=[p[7]])
p[0] = Node('statement', children=[print_node])
def p_block_statement(p):
'''
statement : LEFT_BRACE statements_list RIGHT_BRACE
'''
block_node = Node('block', children=[p[2]])
p[0] = Node('statement', children=[block_node])
def p_expression(p):
'''
expression : array_element_expression
| field_expression
| call_method_expression
| binary_expression
| parenthesis_expression
| unary_expression
| new_expression
| identifier_expression
| integer_literal_expression
| boolean_expression
| this_expression
| null_expression
'''
p[0] = Node('expression', children=[p[1]])
def p_array_element_expression(p):
'''
array_element_expression : expression LEFT_BRACKET expression RIGHT_BRACKET
'''
p[0] = Node('array_element', children=[p[1], p[3]])
def p_field_expression(p):
'''
field_expression : expression POINT IDENTIFIER
'''
id_node = Node('identifier', children=[p[3]])
p[0] = Node('field', children=[p[1], id_node])
def p_call_method_expression(p):
'''
call_method_expression : expression POINT IDENTIFIER LEFT_PARENTHESIS expression_list RIGHT_PARENTHESIS
'''
id_node = Node('identifier', children=[p[3]])
p[0] = Node('method_call', children=[p[1], id_node, p[5]])
def p_empty_expression_list(p):
'''
expression_list : empty
'''
#p[0] = Node('expressions')
def p_nonempty_expression_list(p):
'''
expression_list : nonempty_expression_list
'''
p[0] = p[1]
def p_single_expression_list(p):
'''
nonempty_expression_list : expression
'''
p[0] = Node('expressions', children=[p[1]])
def p_expression_list_head(p):
'''
nonempty_expression_list : nonempty_expression_list COMMA expression
'''
children=p[1].children[:]
children.append(p[3])
p[0] = Node('expressions', children=children)
def p_binary_expression(p):
'''
binary_expression : expression OR expression
| expression AND expression
| expression EQUAL expression
| expression NOT_EQUAL expression
| expression GREATER expression
| expression LESS expression
| expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
'''
operand1_node = Node('operand1', children=[p[1]])
operand2_node = Node('operand2', children=[p[3]])
operation_node = Node('operation', children=[p[2]])
p[0] = Node('binary_expression', children=[operand1_node, operation_node, operand2_node])
def p_parenthesis_expression(p):
'''
parenthesis_expression : LEFT_PARENTHESIS expression RIGHT_PARENTHESIS
'''
p[0] = Node('parenthesis_expression', children=[p[2]])
def p_unary_expression(p):
'''
unary_expression : MINUS expression %prec UMINUS
| NOT expression
'''
operation_node = Node('operation', children=[p[1]])
operand_node = Node('operand', children=[p[2]])
p[0] = Node('unary_expression', children=[operation_node, operand_node])
def p_new_array_expression(p):
'''
new_expression : NEW INT LEFT_BRACKET expression RIGHT_BRACKET
'''
type_node = Node('type', children=[p[2]])
p[0] = Node('new_array_expression', children=[type_node, p[4]])
def p_new_identifier_expression(p):
'''
new_expression : NEW IDENTIFIER LEFT_PARENTHESIS RIGHT_PARENTHESIS
'''
id_node = Node('identifier', children=[p[2]])
p[0] = Node('new_expression', children=[id_node])
def p_identifier_expression(p):
'''
identifier_expression : IDENTIFIER
'''
p[0] = Node('identifier', children=[p[1]])
def p_integer_literal_expression(p):
'''
integer_literal_expression : INTEGER_LITERAL
'''
p[0] = Node('integer', children=[p[1]])
def p_boolean_expression(p):
'''
boolean_expression : TRUE
| FALSE
'''
p[0] = Node('boolean', children=[p[1]])
def p_this_expression(p):
'''
this_expression : THIS
'''
p[0] = Node('this')
def p_null_expression(p):
'''
null_expression : NULL
'''
p[0] = Node('null')
def p_error(p):
print "Syntax error in input! %s" % p
precedence = (
('left', 'OR', 'AND'),
('nonassoc', 'LESS', 'GREATER', 'EQUAL', 'NOT_EQUAL'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('right', 'UMINUS', 'NOT'),
('right', 'POINT'),
)
def parse(filename):
parser = yacc.yacc()
with open(filename) as fin:
result = parser.parse(fin.read())
print json.dumps(result, cls=JSONEncoder)
drawer = NodeDrawer()
drawer.draw(result)
return result
if __name__ == '__main__':
import logging
logging.basicConfig(
level = logging.DEBUG,
filename = "parselog.txt",
filemode = "w",
format = "%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()
parser = yacc.yacc(debug=True)
with open('test.java') as fin:
result = parser.parse(fin.read(), debug=log)
print json.dumps(result, cls=JSONEncoder)
drawer = NodeDrawer()
drawer.draw(result)
| true |
c5ea51faba44cfdf761bdf2a08346a3274612a95 | Python | bhklab/DA-Detection | /CNN/RUNME.py | UTF-8 | 1,486 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 09:05:04 2019
@author: welchm
"""
#Example script for using DAClassification.py module for
# classification of Dental artifact presence in single H&N CT image.
import DAClassification
import os
import sys
#path to image
image_path = 'Q:/RTxomics/sorted/NRRD/0435496/20110117_/Reconstructions/2 Neck 2.0 CE.nrrd'
#Path to CNN file assumed to be in same directory as DAClassification.py
net_path = os.path.abspath(os.path.dirname(sys.argv[0]))
net_name = 'testCheckpoint.pth.tar'
#Load and transform image.
# 1) Image loading achieved using SimpleITK.
# 2) Image resampled to isotropic voxel size 1x1x1
# 3) Resampled image resized to 256x256x256 array while retaining aspect ratio
# 4) Resampled and resized image numpy array formated to Pytorch tensor
image = DAClassification.LoadImage(image_path)
#Load CNN for DA Classification.
#Uses five 3D convolutional layers to achieve a final machine generated
# features of size 8x8x8
net = DAClassification.LoadNet(net_path, net_name)
#Apply net to image for DA classification prediction
#Returns:
# 1) predicted_label = class associated maximum value returned by the net
# 2) softmax_prob = array of probabilities
# a) softmax_prob[0] = probability of DA- image (ie. no DA)
# b) softmax_prob[1] = probability of DA+ image (ie. DA present))
predicted_label, softmax_prob = DAClassification.GetPredictions(image, net) | true |
bca4095846845f28be295fec2fbf99dc5401e1c9 | Python | guillaume-philippon/slam-v2 | /slam/slam_network/models.py | UTF-8 | 19,561 | 2.75 | 3 | [] | no_license | """
This module provide model for networks. There are 2 models
- Network: which represent a IPv6 or IPv4 network
- Address: which represent a IPv6 or IPv5
As we use django models.Model, pylint fail to find objects method. We must disable pylint
test E1101 (no-member)
"""
# We need to remove C0103 form pylint as ip is not reconnized as a valid snake cas naming.
# pylint: disable=E1101, C0103
import ipaddress
from django.db import models
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.utils import IntegrityError
from slam_core.utils import error_message, name_validator
from slam_domain.models import DomainEntry, Domain
from slam_network.exceptions import NetworkFull
class Network(models.Model):
"""
Network class represent a IPv4 or IPv6 network
- name: The human reading name of the network
- description: A short description of the network
- address: network address (192.168.0.0)
- prefix: network prefix (/24)
- gateway: the IP of the network gateway
- dns_master: The IP of DNS master for reverse resolution (used to push data in production)
- contact: a contact email for the network
- dhcp: the IP of DHCP server (used to push data in production)
- freeradius: the IP of freeradius server (used to push data in production)
- vlan: the VLAN id of the network
"""
name = models.CharField(max_length=50, unique=True, validators=[name_validator])
ip = models.GenericIPAddressField(unique=True)
prefix = models.IntegerField(default=24)
description = models.CharField(max_length=150, default='')
gateway = models.GenericIPAddressField(blank=True, null=True)
dns_master = models.GenericIPAddressField(blank=True, null=True)
dhcp = models.GenericIPAddressField(blank=True, null=True)
radius = models.GenericIPAddressField(blank=True, null=True)
vlan = models.IntegerField(default=1)
contact = models.EmailField(blank=True, null=True)
def show(self, key=False, short=False):
"""
This method return a dict construction of the object. We have 3 types of output,
- standard: all information about object it-self, short information about associated
objects (like ForeignKey and ManyToManyField)
- short: some basic information about object it-self, primary key of associated objects
- key: primary key of the object
:param short: if set to True, method return a short output
:param key: if set to True, method return a key output. It will overwrite short param
:return:
"""
if key:
result = {
'name': self.name
}
elif short:
addresses_used = len(self.addresses())
addresses_total = ipaddress.ip_network('{}/{}'.format(self.ip,
self.prefix)).num_addresses
result = {
'name': self.name,
'address': self.ip,
'prefix': self.prefix,
'version': ipaddress.ip_address(self.ip).version,
'description': self.description,
'used_addresses': addresses_used,
'total': addresses_total
}
else:
result_addresses = []
for address in self.addresses():
result_addresses.append(address.show(short=True))
addresses_used = len(self.addresses())
addresses_total = ipaddress.ip_network('{}/{}'.format(self.ip,
self.prefix)).num_addresses
result = {
'name': self.name,
'address': self.ip,
'prefix': self.prefix,
'version': ipaddress.ip_address(self.ip).version,
'description': self.description,
'gateway': self.gateway,
'dns_master': self.dns_master,
'dhcp': self.dhcp,
'radius': self.radius,
'vlan': self.vlan,
'contact': self.contact,
'used_addresses': addresses_used,
'total': addresses_total,
'addresses': result_addresses
}
return result
def is_include(self, ip):
"""
This method check if ip is included on a network
:param ip: IP address
:return:
"""
address = ipaddress.ip_address(ip)
network = ipaddress.ip_network('{}/{}'.format(self.ip, self.prefix))
if address.version == network.version and address in network:
return True
return False
def addresses(self):
"""
This method return all addresses which are in the current network.
:return:
"""
result = self.address_set.all()
return result
def get_free_ip(self):
"""
:return:
"""
network = ipaddress.ip_network('{}/{}'.format(self.ip, self.prefix))
addresses = []
for address in self.addresses():
addresses.append(ipaddress.ip_address(address.ip))
for result_address in network.hosts():
if result_address not in addresses:
return result_address
raise NetworkFull()
def version(self):
"""
:return:
"""
return ipaddress.ip_network('{}/{}'.format(self.ip, self.prefix)).version
@staticmethod
def create(name, address, prefix, description='A short description', gateway=None,
dns_master=None, dhcp=None, radius=None, vlan=1, contact=None):
# pylint: disable=R0913
"""
This is a custom way to create a network
:param name: human reading name of the network
:param address: IPv4 or IPv6 network address
:param prefix: network prefix
:param description: A short description of the network
:param gateway: IP of the network gateway
:param dns_master: IP of DNS master
:param dhcp: IP of DHCP server
:param vlan: VLAN id of the network
:param contact: a contact email for the network
:return:
"""
try:
network = Network(name=name, ip=address, prefix=prefix, description=description,
gateway=gateway, dns_master=dns_master, dhcp=dhcp, vlan=vlan,
contact=contact, radius=radius)
network.full_clean()
except (IntegrityError, ValidationError) as err: # In case network already exist
return error_message('network', name, err)
network.full_clean()
network.save()
return {
'network': network.name,
'status': 'done'
}
@staticmethod
def update(name, description=None, gateway=None, dns_master=None, dhcp=None, vlan=None,
contact=None, radius=None):
# pylint: disable=R0913
"""
This is a custom method to update value on a existing network
:param name: human reading name of the network
:param description: A short description of the network
:param gateway: The IP of the gateway
:param dns_master: The IP of DNS master
:param dhcp: The IP of DHCP server
:param vlan: The VLAN id
:param contact: a contact email for the network
:return:
"""
try:
network = Network.objects.get(name=name)
except ObjectDoesNotExist as err:
return error_message('network', name, err)
if description is not None:
network.description = description
if gateway is not None:
network.gateway = gateway
if dns_master is not None:
network.dns_master = dns_master
if dhcp is not None:
network.dhcp = dhcp
if radius is not None:
network.radius = radius
if vlan is not None:
network.vlan = vlan
if contact is not None:
network.contact = contact
try:
network.full_clean()
except ValidationError as err:
return error_message('network', name, err)
network.save()
return {
'network': name,
'status': 'done'
}
@staticmethod
def remove(name):
"""
This is a custom method to delete a network. As delete is already used by models.Model,
we should call it with another name
:param name: name of network we want delete
:return:
"""
try:
network = Network.objects.get(name=name)
network.delete()
except (ObjectDoesNotExist, IntegrityError) as err:
return error_message('network', name, err)
return {
'network': name,
'status': 'done'
}
@staticmethod
def get(name):
"""
This is a custom method to get all information for a network
:param name: name of the network
:return:
"""
try:
network = Network.objects.get(name=name)
except ObjectDoesNotExist as err:
return error_message('network', name, err)
result = network.show()
return result
@staticmethod
def search(filters=None):
"""
This is a custom method to get all networks that match the filters
:param filters: a dict of field / regex
:return:
"""
if filters is None:
networks = Network.objects.all()
else:
networks = Network.objects.filter(**filters)
result = []
for network in networks:
result.append(network.show(short=True))
return result
class Address(models.Model):
"""
Address class represent a specific address on a network.
- ip: IPv4 or IPv6 address
- ns_entries: all other NS entries for this IP (CNAME, A, ...)
"""
ip = models.GenericIPAddressField(unique=True)
ns_entries = models.ManyToManyField(DomainEntry)
creation_date = models.DateTimeField(auto_now_add=True, null=True)
network = models.ForeignKey(Network, on_delete=models.PROTECT)
def show(self, key=False, short=True):
"""
:param key:
:param short:
:return:
"""
if key:
result = {
'ip': self.ip,
}
elif short:
result_entries = []
for entry in self.ns_entries.all():
result_entries.append(entry.show(key=True))
result = {
'ip': self.ip,
'ns_entries': result_entries,
'creation_date': self.creation_date,
'network': self.network.show(key=True)
}
else:
result_entries = []
for entry in self.ns_entries.all():
result_entries.append(entry.show(short=True))
result = {
'ip': self.ip,
'ns_entries': result_entries,
'creation_date': self.creation_date,
# 'network': self.network.show(short=True)
}
return result
def version(self):
"""
:return:
"""
return ipaddress.ip_address(self.ip).version
@staticmethod
def create(ip, network, ns_entry=None):
"""
This is a custom method to create a Address.
:param ip:
:param network:
:param ns_entry:
:return:
"""
try:
try:
network_address = Network.objects.get(name=network)
except ObjectDoesNotExist as err:
return error_message('address', ip, err)
if network_address is not None and not network_address.is_include(ip):
return error_message('address', ip, 'Address {} not in Network {}/{}'.format(
ip, network_address.address, network_address.prefix))
address = Address(ip=ip, network=network_address)
address.full_clean()
except (IntegrityError, ValueError, ValidationError) as err:
return error_message('address', ip, err)
address.save()
if ns_entry is not None:
try:
domain = Domain.objects.get(name=ns_entry['domain'])
except ObjectDoesNotExist as err:
return error_message('address', ip, err)
try:
entry = DomainEntry.objects.get(name=ns_entry['name'], domain=domain, type='A')
except ObjectDoesNotExist as err:
# If NS entry not exist, we create it.
result = DomainEntry.create(name=ns_entry['name'], domain=ns_entry['domain'])
if result['status'] != 'done':
return error_message('address', ip, result['message'])
entry = DomainEntry.objects.get(name=ns_entry['name'], domain=domain, type='A')
try:
entry_ptr = DomainEntry.objects.get(name=ns_entry['name'], domain=domain,
type='PTR')
except ObjectDoesNotExist:
result = DomainEntry.create(name=ns_entry['name'], domain=ns_entry['domain'],
ns_type='PTR')
if result['status'] != 'done':
return result
entry_ptr = DomainEntry.objects.get(name=ns_entry['name'], domain=domain,
type='PTR')
address.ns_entries.add(entry)
address.ns_entries.add(entry_ptr)
return {
'address': address.ip,
'status': 'done'
}
@staticmethod
def include(ip, network, ns_entry, ns_type='A'):
"""
This is a custom method to add a entry in a address
:param ip: IP address
:param network: network
:param ns_entry: NS entry
:param ns_type: NS entry type
:return:
"""
fqdn = ns_entry.split('.', 1)
ns = fqdn[0]
domain = fqdn[1]
try:
network_entry = Network.objects.get(name=network)
if network_entry is not None and not network_entry.is_include(ip):
return error_message('entry', ns_entry, 'Address {} not in Network {}/{}'.format(
ip, network_entry.address, network_entry.prefix))
address_entry = Address.objects.get(ip=ip)
domain_entry = Domain.objects.get(name=domain)
ns_entry_obj = DomainEntry.objects.get(name=ns, domain=domain_entry, type=ns_type)
if ns_type == 'PTR' and len(ns_entry_obj.address_set.all()) != 0:
return error_message('entry', ip, 'PTR record is used')
except ObjectDoesNotExist as err:
return error_message('entry', ns_entry, err)
address_entry.ns_entries.add(ns_entry_obj)
return {
'entry': ns_entry,
'status': 'done'
}
@staticmethod
def exclude(ip, network, ns_entry, ns_type='A'):
"""
This is a custom method to remove a NS entry from address
:param ip: IP address
:param network: network
:param ns_entry: NS entry
:param ns_type: NS type
:return:
"""
fqdn = ns_entry.split('.', 1)
ns = fqdn[0]
domain_entry = fqdn[1]
try:
address_entry = Address.objects.get(ip=ip)
domain_entry = Domain.objects.get(name=domain_entry)
ns_entry_entry = DomainEntry.objects.get(name=ns, domain=domain_entry, type=ns_type)
except ObjectDoesNotExist as err:
return error_message('entry', ns_entry, err)
address_entry.ns_entries.remove(ns_entry_entry)
return {
'entry': ns_entry,
'status': 'done'
}
@staticmethod
def remove(ip, network, ns_entry=True):
"""
This is a custom method to delete Address
:param ip: The IP address we will delete
:param network: The network name
:param ns_entry: If true, we also remove PTR and A resolution name (default True)
:return:
"""
try:
try:
network_address = Network.objects.get(name=network)
except ObjectDoesNotExist:
network_address = None
if network_address is not None and not network_address.is_include(ip):
return error_message('address', ip, 'Address {} not in Network {}/{}'.format(
ip, network_address.address, network_address.prefix))
address = Address.objects.get(ip=ip)
try:
entry_ptr = address.ns_entries.get(type='PTR')
except ObjectDoesNotExist:
entry_ptr = None
try:
entry_a = address.ns_entries.get(type='A')
except ObjectDoesNotExist:
entry_a = None
address.delete()
except (ObjectDoesNotExist, IntegrityError) as err:
return error_message('address', ip, err)
if ns_entry:
if entry_ptr is not None:
try:
if len(entry_ptr.address_set.all()) == 0:
# We only delete PTR if no other address use it. (ie it s a orphan entry)
entry_ptr.delete()
except (IntegrityError, ObjectDoesNotExist) as err:
return error_message('address', ip, err)
if entry_a is not None:
try:
if len(entry_a.address_set.all()) == 0:
# We only delete A if no other address use it. (ie it s a orphan entry)
entry_a.delete()
except (IntegrityError, ObjectDoesNotExist) as err:
return error_message('address', ip, err)
return {
'address': ip,
'status': 'done'
}
@staticmethod
def get(ip, network):
"""
This is a custom method to get information about a address
:param ip: IP address
:param network: Network
:return:
"""
try:
network = Network.objects.get(name=network)
except ObjectDoesNotExist as err:
network = None
try:
address = Address.objects.get(ip=ip)
except ObjectDoesNotExist as err:
return error_message('address', ip, err)
result = address.show()
return result
@staticmethod
def search(filters=None):
"""
This is a custom method to get all networks that match the filters
:param filters: a dict of field / regex
:return:
"""
if filters is None:
addresses = Address.objects.all()
else:
addresses = Address.objects.filter(**filters)
result = []
for address in addresses:
result.append(address.show(short=True))
return result
@staticmethod
def match_network(ip):
"""
This method return the network associated with the address
:return:
"""
networks = Network.objects.all()
for network in networks:
if network.is_include(ip):
return network
return None
| true |
b7334f5b3bccab12826fd15116442c1db01a40cc | Python | mcmxl22/Python-doodles | /Security/numli.py | UTF-8 | 474 | 3.890625 | 4 | [] | no_license | #!/usr/bin/env python3
"""
numli Version 1.3
Python 3.7
"""
class Menu:
"""
Create a menu.
Example: Menu.list_choices(['Cat','Dog'])
Output:
1 Cat
2 Dog
"""
def add_numbers(num):
"""Add numbers to the menu list."""
for c, value in enumerate(num, 1):
print(c, value)
def list_choices(options_list, **kwargs: list):
"""Give user a choice of actions."""
return Menu.add_numbers(options_list)
| true |
e15cd5717f52b4c92a4dec79fb9843b02efa3f57 | Python | HAS-Tools-Fall2020/homework-shwetanarkhede | /Submissions/Narkhede_HW3.py | UTF-8 | 4,108 | 3.65625 | 4 | [] | no_license | # Forecasting Assignment 3
# Name: Shweta Narkhede
# Last Edited: Sept 14th, 2020
# %%
# Importing the modules
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %%
# ** MODIFY **
# Set the file name and path to where you have stored the data
filename = 'streamflow_week3.txt'
filepath = os.path.join('/Users/owner/Documents/GitHub/homework-shwetanarkhede/data/', filename)
print(os.getcwd())
print(filepath)
# %%
# DON'T change this part -- this creates the lists you
# should use for the rest of the assignment
# no need to worry about how this is being done now we will cover
# this in later sections.
#Read the data into a pandas dataframe
data=pd.read_table(filepath, sep = '\t', skiprows=30,
names=['agency_cd', 'site_no', 'datetime', 'flow', 'code']
)
# Expand the dates to year month day
data[["year", "month", "day"]] =data["datetime"].str.split("-", expand=True)
data['year'] = data['year'].astype(int)
data['month'] = data['month'].astype(int)
data['day'] = data['day'].astype(int)
#make lists of the data
flow = data.flow.values.tolist()
date = data.datetime.values.tolist()
year = data.year.values.tolist()
month = data.month.values.tolist()
day = data.day.values.tolist()
# Getting rid of the pandas dataframe since we wont be using it this week
del(data)
# %%
# Calculating some basic properites
print(min(flow))
print(max(flow))
print(np.mean(flow))
print(np.std(flow))
#%% September Flows in 2020
ilist = [] # Making and empty list to store index values of interest
# Loop over the length of the flow list
# and adding the index value to the ilist
# if it meets the specified crieteria
for i in range(len(flow)):
if month[i] == 9 and year[i]== 2020 : # by changing year, data for specific year can be pieced out
ilist.append(i)
# Shows how many times crieteria was met
#print(len(ilist))
# Grabs the data of interest
sept_flows_20 = [flow[j] for j in ilist]
sept_day_20 = [day[j] for j in ilist]
# Plotting data for visual analysis
plt.plot(sept_flows_20)
plt.title('September 2020')
#plt.xlabel('Days')
plt.ylabel('Daily streamflow (cfs)')
#%% Pulling out data that had values greater than prediction in 2020
ilist2 =[]
for i in range(len(flow)):
if flow [i] > 62 and month[i] == 9 and year[i]==2020:
ilist2.append(i)
# Shows how many times crieteria was met
print("Number of times flow was greater than prediction in Sept 2020 =",len(ilist2))
sept_20_flows_g = [flow[j] for j in ilist2]
# Exceedence
print( "Percent times daily streamflow exceeded than prediction in 2020 =",len(sept_20_flows_g)/len(sept_flows_20)*100)
# Calcualting mean percentage exceedence in flow
percent_incr = []
for i in range(0,len(sept_20_flows_g)):
perc = (sept_20_flows_g[i]-62)/62*100
percent_incr.append(perc)
print("Mean percent exceedence (2020) = ",np.mean(percent_incr))
#%% September flows from 1989 - 2020
ilist3 = [] # Making and empty list to store index values of interest
for i in range(len(flow)):
if month[i] == 9 and year[i]<= 2000: # for the data before 2000 add year[i]< 2000
ilist3.append(i)
# Grabs the data of interest
sept_flows = [flow[j] for j in ilist3]
sept_day = [day[j] for j in ilist3]
# Plotting data for visual analysis
#plt.plot(sept_day,sept_flows)
#%% Pulling out data that had values greater than prediction from 1989 - 2020
ilist4 =[]
for i in range(len(flow)):
if flow [i] > 62 and month[i] == 9 and year[i]<= 2000: # for the data before 2000 add year[i]<=2000
ilist4.append(i)
# Shows how many times crieteria was met
print("Number of times flow was greater than prediction =",len(ilist4))
sept_flows_g = [flow[j] for j in ilist4]
# Exceedence
print( "Percent times daily streamflow exceeded than prediction =",len(sept_flows_g)/len(sept_flows)*100)
# Calcualting mean percentage exceedence in flow
percent_incr = []
for i in range(0,len(sept_flows_g)):
perc = (sept_flows_g[i]-62)/62*100
percent_incr.append(perc)
print("Mean percent exceedence = ",np.mean(percent_incr))
| true |
925cb104bcd48e007b53a236ac3ae636804500bc | Python | wmc24/3D-Potential-Fields | /sim/geometry.py | UTF-8 | 3,090 | 3.15625 | 3 | [] | no_license | import numpy as np
class Pose2D(object):
def __init__(self, pos, epsilon=10.0):
assert(pos.size==2)
self.pos = pos
self.A = np.array([[1, 0], [0, 1/epsilon]])
self.angle = 0
self.R = np.eye(2, dtype=np.float32)
def get_direction(self):
return self.R[:,0]
def move_using_holonomic_point(self, velocity, max_speed, max_angular_speed, dt):
pose_vel = np.matmul(self.A, np.matmul(self.R.transpose(), velocity))
linear_vel = pose_vel[0] * self.R[:,0]
if np.linalg.norm(linear_vel) > max_speed:
linear_vel *= max_speed/np.linalg.norm(max_speed)
self.pos += dt * linear_vel
angular_vel = pose_vel[1]
if abs(angular_vel) > max_angular_speed:
angular_vel = np.sign(angular_vel) * max_angular_speed
self.angle += angular_vel * dt
while (self.angle < -np.pi): self.angle += 2*np.pi
while (self.angle > np.pi): self.angle -= 2*np.pi
self.update_R()
def update_R(self):
self.R = np.array([
[np.cos(self.angle), -np.sin(self.angle)],
[np.sin(self.angle), np.cos(self.angle)]
])
def get_vector(self):
return np.array([self.pos[0], self.pos[1], self.angle])
class Pose3D(object):
def __init__(self, pos, epsilon=10.0):
assert(pos.size==3)
self.pos = pos
self.A = np.array([
[1, 0, 0],
[0, 0, -1/epsilon],
[0, 1/epsilon, 0]
])
self.R = np.eye(3, dtype=np.float32)
def get_direction(self):
return self.R[:,0]
def move_using_holonomic_point(self, velocity, max_speed, max_angular_speed, dt):
pose_vel = np.matmul(self.A, np.matmul(self.R.transpose(), velocity))
linear_vel = pose_vel[0] * self.R[:,0]
if np.linalg.norm(linear_vel) > max_speed:
linear_vel *= max_speed/np.linalg.norm(max_speed)
self.pos += dt * linear_vel
omega = np.matmul(self.R, np.array([0, pose_vel[1], pose_vel[2]]))
dtheta = np.linalg.norm(omega)
if dtheta==0: return
n = omega / dtheta
S = np.array([
[0, -n[2], n[1]],
[n[2], 0, -n[0]],
[-n[1], n[0], 0]
])
if abs(dtheta) > max_angular_speed:
dtheta = max_angular_speed * np.sign(dtheta)
dtheta *= dt
self.R = np.matmul(np.eye(3) + S*np.sin(dtheta) + np.matmul(S,S)*(1 - np.cos(dtheta)), self.R)
def get_vector(self):
return np.concatenate([self.pos, self.R[:,0]])
class Goal:
def __init__(self, pos, direction, close_dist=20):
self.pos = pos
self.direction = direction
self.close_dist = close_dist
def reached(self, pose):
disp = pose.pos - self.pos
dist = np.linalg.norm(disp)
return dist < self.close_dist
# cos_theta = np.dot(pose.get_direction(), self.direction)
# if dist < self.close_dist:
# print(cos_theta)
# return dist < self.close_dist and cos_theta > 0.87 # < 30 degrees
| true |
9714a71bbba98fe690e2096a21d1da0df6143bd0 | Python | CERT-Polska/mwdb_iocextract | /src/model.py | UTF-8 | 12,624 | 2.703125 | 3 | [] | no_license | import re
from base64 import b64encode
from enum import Enum
from typing import List, Optional, Tuple, Union, cast
from urllib.parse import urlparse
from Cryptodome.PublicKey import RSA # type: ignore
from malduck import base64, rsa # type: ignore
from pymisp import MISPAttribute, MISPObject # type: ignore
from .errors import IocExtractError
PUBKEY_PEM_TEMPLATE = (
"-----BEGIN PUBLIC KEY-----\n{}\n-----END PUBLIC KEY-----"
)
def is_ipv4(possible_ip: str):
"""Very simple heuristics to distinguish IPs from domains"""
return re.match(
"^[0-9]{1,3}[.][0-9]{1,3}[.][0-9]{1,3}[.][0-9]{1,3}$", possible_ip
)
class LocationType(Enum):
"""Type of malicious URL. Not all URLs in malware have the
same role, and often it's necessary to treat them differently."""
# C&C server, usually administrated by criminals. Malware connects to
# it (usually with a custom protocol) to get new commands and updates.
CNC = "cnc"
# Download url. Used to download more malware samples. Sometimes just a
# hacked legitimate website.
DOWNLOAD_URL = "download_url"
# Malware panel. HTTP service used by criminals to manage the botnet.
PANEL = "panel"
# Peer. IP/port of infected machine of a legitimate computer user.
PEER = "peer"
# Other kind of URL found in the malware.
OTHER = "other"
class RsaKey:
"""Represents a RSA public key used by malware"""
def __init__(self, n: int, e: int, d: Optional[int] = None) -> None:
"""Initialise RsaKey instance using n and e parameters directly"""
self.n = n
self.e = e
self.d = d
@classmethod
def parse_pem(cls, pem: str) -> "RsaKey":
"""Parse PEM ("-----BEGIN PUBLIC KEY" header) key"""
key = RSA.import_key(pem)
return cls(key.n, key.e)
@classmethod
def parse_base64(cls, b64: Union[str, bytes]) -> "RsaKey":
"""Parse raw base64 key (used by danabot for example)"""
blob = base64.decode(b64)
key = rsa.import_key(blob)
return cls.parse_pem(key) # type: ignore
def to_misp(self) -> MISPObject:
mo = MISPObject("crypto-material", standalone=False)
mo.add_attribute("type", "RSA")
mo.add_attribute("origin", "malware-extraction")
mo.add_attribute("modulus", hex(self.n)[2:])
mo.add_attribute("e", self.e)
if self.d is not None:
mo.add_attribute("d", self.d)
return mo
def prettyprint(self) -> str:
"""Pretty print for debugging"""
d_part = f" d={self.d}" if self.d else ""
return f"RsaKey n={self.n} e={self.e}{d_part}"
class EcdsaCurve:
"""Represents a ECDSA curve used by malware"""
def __init__(self, t: str, x: int, y: int) -> None:
self.t = t
self.x = x
self.y = y
def to_misp(self) -> MISPObject:
co = MISPObject("crypto-material", standalone=False)
co.add_attribute("type", "ECDSA")
if self.t == "ecdsa_pub_p384":
co.add_attribute("ecdsa-type", "NIST P-384")
else:
co.add_attribute("ecdsa-type", self.t)
co.add_attribute("x", self.x)
co.add_attribute("y", self.y)
return co
def prettyprint(self) -> str:
return f"EcdsaCurve t={self.t} x={str(self.x)} y={str(self.y)}"
class NetworkLocation:
"""Represents a network location. Can be a domain, ip with a port, etc."""
def __init__(
self, url: str, location_type: LocationType = LocationType.CNC
) -> None:
self.url = urlparse(url)
if self.url.hostname is None:
self.url = urlparse("unknown://" + url)
self.location_type = location_type
@property
def ip(self) -> Optional[str]:
if self.url.hostname and is_ipv4(self.url.hostname):
return self.url.hostname
return None
@property
def domain(self) -> Optional[str]:
if self.url.hostname and not is_ipv4(self.url.hostname):
return self.url.hostname
return None
@property
def port(self) -> Optional[int]:
return self.url.port
@property
def path(self) -> str:
return self.url.path
@property
def query(self) -> str:
return self.url.query
@property
def scheme(self) -> Optional[str]:
scheme = self.url.scheme
# `unknown://` scheme is a placeholder used for URLs with a missing scheme
# that we unfortunately have to support.
if scheme == "unknown":
return None
return scheme
@property
def pretty_url(self) -> str:
url = self.url.geturl()
if url.startswith("unknown://"):
return url[len("unknown://") :]
return url
def to_misp(self) -> MISPObject:
if any((self.scheme, self.path, self.query, self.url.fragment)):
misp_object_type = "url"
else:
misp_object_type = "domain-ip"
obj = MISPObject(misp_object_type, standalone=False)
# url-specific attributes
if self.scheme:
url = cast(
MISPAttribute, obj.add_attribute("url", self.pretty_url)
)
url.add_tag(f"mwdb:location_type:{self.location_type.value}")
if self.path:
obj.add_attribute("resource_path", self.path)
if self.url.fragment:
obj.add_attribute("fragment", self.url.fragment)
if self.query:
obj.add_attribute("query_string", self.query)
# generic attributes that apply to both url and domain-ip
if self.ip:
ip = cast(MISPAttribute, obj.add_attribute("ip", self.ip))
ip.add_tag(f"mwdb:location_type:{self.location_type.value}")
if self.domain:
domain = cast(
MISPAttribute, obj.add_attribute("domain", self.domain)
)
domain.add_tag(f"mwdb:location_type:{self.location_type.value}")
if self.port:
obj.add_attribute("port", self.port)
return obj
def prettyprint(self) -> str:
"""Pretty print for debugging"""
return "NetLoc " + self.pretty_url
class IocCollection:
"""Represents a collection of parsed IoCs"""
def __init__(self) -> None:
"""Creates an empty IocCollection instance"""
self.rsa_keys: List[RsaKey] = []
self.ecdsa_curves: List[EcdsaCurve] = []
self.keys: List[Tuple[str, str]] = [] # (keytype, hexencoded key)
self.passwords: List[str] = []
self.network_locations: List[NetworkLocation] = []
self.mutexes: List[str] = []
self.dropped_filenames: List[str] = []
self.emails_to: List[str] = []
self.emails_from: List[str] = []
self.ransom_messages: List[str] = []
self.campaign_ids: List[str] = []
def add_rsa_key(self, rsakey: RsaKey) -> None:
self.rsa_keys.append(rsakey)
def add_ecdsa_curve(self, ecdsa_curve: EcdsaCurve) -> None:
self.ecdsa_curves.append(ecdsa_curve)
def add_key(self, key_type: str, xor_key: str) -> None:
"""Add a hex encoded other raw key - for example, xor key"""
self.keys.append((key_type, xor_key))
def try_add_rsa_from_pem(self, pem: str) -> None:
try:
if pem:
self.add_rsa_key(RsaKey.parse_pem(pem))
except IocExtractError:
pass
def try_add_rsa_from_asn1_bytes(self, blob: bytes) -> None:
pem = PUBKEY_PEM_TEMPLATE.format(b64encode(blob).decode())
try:
self.add_rsa_key(RsaKey.parse_pem(pem))
except IocExtractError:
pass
def try_add_rsa_from_base64(self, pem: str) -> None:
try:
self.add_rsa_key(RsaKey.parse_base64(pem))
except IocExtractError:
pass
def add_network_location(self, netloc: NetworkLocation) -> None:
self.network_locations.append(netloc)
def add_host_port(
self, host: str, port: Union[str, int], schema: str = "unknown"
) -> None:
if isinstance(port, str):
port_val = int(port)
else:
port_val = port
try:
self.try_add_url(f"{schema}://{host}:{port_val}")
except IocExtractError:
pass
def try_add_url(
self, url: str, location_type: LocationType = LocationType.CNC
) -> None:
if not url.strip():
return
try:
self.network_locations.append(
NetworkLocation(url, location_type=location_type)
)
except IocExtractError:
pass
def add_password(self, password: str) -> None:
self.passwords.append(password)
def add_drop_filename(self, filename: str) -> None:
self.dropped_filenames.append(filename)
def add_mutex(self, mutex: str) -> None:
self.mutexes.append(mutex)
def add_email_to(self, email: str) -> None:
self.emails_to.append(email)
def add_email_from(self, email: str) -> None:
self.emails_from.append(email)
def add_ransom_message(self, ransom_message: str) -> None:
self.ransom_messages.append(ransom_message)
def add_campaign_id(self, campaign_id: str) -> None:
self.campaign_ids.append(campaign_id)
def to_misp(self) -> List[MISPObject]:
"""MISP JSON output"""
to_return = []
for rsa_key in self.rsa_keys:
to_return.append(rsa_key.to_misp())
for ecdsa_curve in self.ecdsa_curves:
to_return.append(ecdsa_curve.to_misp())
if self.keys:
for k in self.keys:
crypto_obj = MISPObject("crypto-material", standalone=False)
crypto_obj.add_attribute("type", k[0])
crypto_obj.add_attribute("generic-symmetric-key", k[1])
to_return.append(crypto_obj)
if self.passwords:
for password in self.passwords:
credential_obj = MISPObject("credential", standalone=False)
credential_obj.add_attribute("password", password)
to_return.append(credential_obj)
if self.mutexes:
for mutex in self.mutexes:
mutex_obj = MISPObject("mutex", standalone=False)
mutex_obj.add_attribute("name", mutex)
to_return.append(mutex_obj)
for netloc in self.network_locations:
to_return.append(netloc.to_misp())
# TODO self.dropped_filenames
for email in self.emails_to:
obj = MISPObject("email", standalone=False)
obj.add_attribute("to", email)
to_return.append(obj)
for email in self.emails_from:
obj = MISPObject("email", standalone=False)
obj.add_attribute("from", email)
to_return.append(obj)
# filter out objects without any attributes
to_return = list(filter(lambda x: bool(x.attributes), to_return))
return to_return
def prettyprint(self) -> str:
"""Pretty print for debugging"""
result = []
for rsa_key in self.rsa_keys:
result.append(rsa_key.prettyprint())
for ecdsa_curve in self.ecdsa_curves:
result.append(ecdsa_curve.prettyprint())
for key_type, key_data in self.keys:
result.append(f"Key {key_type}:{key_data}")
for password in self.passwords:
result.append("Password " + password)
for netloc in self.network_locations:
result.append(netloc.prettyprint())
for mutex in self.mutexes:
result.append("Mutex " + mutex)
for drop_filename in self.dropped_filenames:
result.append("Drop " + drop_filename)
for email in self.emails_to:
result.append("EmailTo " + email)
for email in self.emails_from:
result.append("EmailFrom " + email)
for ransom_message in self.ransom_messages:
result.append("RansomMessage: " + ransom_message)
for campaign_id in self.campaign_ids:
result.append("CampaignId: " + campaign_id)
return "\n".join(result)
def __bool__(self) -> bool:
return any(
[
self.rsa_keys,
self.keys,
self.passwords,
self.network_locations,
self.mutexes,
self.dropped_filenames,
self.emails_to,
self.emails_from,
self.ransom_messages,
self.campaign_ids,
]
)
| true |
dbb53b30100d415ffe70c6f0a45518de6c4c8e64 | Python | lzlmike/ECS10 | /HW3/primes.py | UTF-8 | 440 | 3.703125 | 4 | [] | no_license | def prime(k):
if k%2==0:
return False
for i in range(3,k//2+2):
if k%i==0:
return False
return True
def main():
num=str(input("Please enter an integer >= 2: "))
while(not num.isdigit()) or int(num)<2:
num=str(input("Please enter an integer >= 2: "))
for i in range(2,int(num)+1):
if i==2:
print(2)
elif prime(i):
print(i)
main()
| true |
b368daa9b144bc9f0b4104cc34c5399e10616711 | Python | Lizimoo/tensorflow_test | /testNN_Classification_mnist.py | UTF-8 | 2,418 | 2.6875 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
# number 1 to 10 data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def add_layer(inputs, in_size, out_size, activation_function=None, n_layer=1):
layer_name = 'layer%s' % n_layer
# with tf.name_scope('layer'):
# with tf.name_scope('Weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
# tf.summary.histogram(layer_name+'/weights', Weights)
# with tf.name_scope('bias'):
bias = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='bias') # bias初始值不为0
# with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights), bias, name='res')
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
# tf.summary.histogram(layer_name+'/outputs', outputs)
return outputs
# def compute_accuracy(v_xs, v_ys):
# global y
# y_pre = sess.run(y, feed_dict={xs: v_xs})
# correct_prediction = tf.equal(tf.arg_max(y_pre, 1), tf.arg_max(v_ys, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
# return result
xs = tf.placeholder(tf.float32, [None, 784])
ys = tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# y = tf.nn.softmax(tf.matmul(xs, W2) + b2)
dense = tf.layers.dense(inputs=xs, units=784, activation=tf.nn.relu)
pre_y = tf.nn.softmax(tf.matmul(dense, W) + b)
# l1 = add_layer(xs, 784, 2000, activation_function=tf.nn.softmax)
# prediction = add_layer(xs, 784, 10, activation_function=tf.nn.softmax)
cross_entropy = -tf.reduce_sum(ys*tf.log(pre_y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) # lr不能太高
correct_prediction = tf.equal(tf.argmax(pre_y, 1), tf.argmax(ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(20000):
batch_xs, batch_ys = mnist.train.next_batch(50)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})
if i % 200 == 0:
print(sess.run(accuracy, feed_dict={xs: mnist.test.images, ys: mnist.test.labels}))
| true |
07171ea6791203bf412b96087d85bcd916854e31 | Python | kidusasfaw/addiscoder_2016 | /labs/server_files_without_solutions/lab2/wellSpaced/wellSpaced.py | UTF-8 | 265 | 3.28125 | 3 | [] | no_license | ### students should implement wellSpaced
def wellSpaced(n):
# student should implement this function and return the correct value
###########################################
# INPUT OUTPUT CODE. DO NOT EDIT CODE BELOW.
n = int(raw_input())
print wellSpaced(n)
| true |
7e734cd39ef029ee8d61ad886fa3f0a36935adcd | Python | JakeColtman/TennisSimulator | /tests/engine/game.py | UTF-8 | 958 | 2.859375 | 3 | [] | no_license | from unittest import TestCase
import unittest
from TennisSimulator.engine.settings import Settings
from TennisSimulator.engine.game import Game
from TennisSimulator.engine.player import Player
from TennisSimulator.engine.point import Point
class GameTest(TestCase):
def setUp(self):
self.player_one = Player("player_one")
self.player_two = Player("player_two")
def test_simple_server_win(self):
settings = Settings(max_points=2)
game = Game(self.player_one, self.player_two, [], settings)
self.assertEqual(game.is_won(), False)
point = Point(self.player_one)
game = game.add_point(point)
self.assertEqual([point], game.points)
self.assertEqual(game.is_won(), False)
game = game.add_point(Point(self.player_one))
self.assertEqual(game.is_won(), True)
self.assertEqual(game.winner(), self.player_one)
if __name__ == '__main__':
unittest.main() | true |
545e576074ef1e3ac37fd51ace1a95c8e9419dde | Python | C-Ashcroft/packer-lab | /Clustering Analysis (Electrophysiology) .py | UTF-8 | 10,552 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
sys.path
from ams_paq_utilities import *
from ams_utilities import *
import scipy
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'notebook')
import pyabf
import paq2py
import numpy as np
import pandas as pd
import seaborn as sns
import scipy.interpolate
import itertools
from copy import deepcopy
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.cluster import AgglomerativeClustering
from sklearn import metrics
import scipy.stats as ss
from itertools import product
from sklearn.cluster import AffinityPropagation
from itertools import cycle
# In[2]:
# Import data from spreadsheet
df = GS_to_df('https://docs.google.com/spreadsheets/d/1ziOx80em0ZhmMmSjKePYbOq3K6sHcOapfHjy9S4oDbk/edit#gid=1828339698')
df
# In[3]:
# Eliminate rows containing redundant, uninformative or incomplete (NaN) data
df = df.drop([1,33,34], axis=0)
df = df.replace([np.inf, -np.inf], np.nan)
dattab = df.dropna(axis=1, how='any')
celllabels = dattab.iloc[0,1:]
dattab = dattab.drop([0], axis=0)
metriclabels = dattab.iloc[:,0]
# Convert to float array and standardise data ((x - mean)/std)
dattab = str_flt(dattab.iloc[:,1:])
dattab -= np.mean(dattab)
dattab /= np.std(dattab)
# Transpose array so variables arranged column-wise
data = dattab.T
data.columns = metriclabels
data.index.names = ["Cell Number"]
allcelldata = pd.DataFrame(data)
allcelldata
# In[4]:
# SANITY CHECK - do we observe expected correlations between different properties?
# Generate a heat map
plt.figure(figsize=(20,12))
plt.title('Correlation of Electrophysiological Properties', fontsize=20, y = 1.03);
cor = data.corr()
sns.heatmap(cor, annot=False, cmap=plt.cm.seismic)
plt.show()
# In[5]:
# Compute correlations between properties
def corrank(X):
df = pd.DataFrame([[(i,j),X.corr().loc[i,j]] for i,j in list(itertools.combinations(X.corr(), 2))],columns=['pairs','corr'])
print(df.sort_values(by='corr',ascending=False))
corrank(data)
# In[6]:
# PRINCIPAL COMPONENT ANALYSIS
# The following analysis will attempt to identify clusters of related cell based on a lower dimensional data-set established via PCA
# In[7]:
# Perform PCA
pca = PCA(n_components = 10).fit(data)
X_pca = pca.transform(data)
PCA_components = pd.DataFrame(X_pca)
print(data.shape, X_pca.shape)
# Can determine % variance explained to extract appropraite number of components
plt.subplots()
features = range(pca.n_components_)
plt.bar(features, pca.explained_variance_ratio_, color='red')
plt.xlabel('PCA dimensions')
plt.ylabel('Variance %')
plt.title('PCA: % Explained Variance')
# Eigenvalues greater than one explain more variance than the original variables
print(pca.explained_variance_)
# In[8]:
# Create a scree plot to establish components for which eigenvalue > 1
plt.subplots()
plt.scatter(features, pca.explained_variance_, color = 'blue')
plt.plot(features, pca.explained_variance_, color = 'blue')
plt.title('Scree Plot')
plt.xlabel('Dimensions')
plt.ylabel('Eigenvalue')
plt.grid()
plt.show()
# In[9]:
# Cumulative % variance explained by extracted components
plt.figure()
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.axvline(x=1, color='r', linestyle='--')
plt.xlabel('Number of Dimensions')
plt.ylabel('Variance (%)') #for each component
plt.title('Cumulative Explained Variance')
plt.show()
# In[10]:
# Re run PCA on appropraite number of components
pca = PCA(n_components=2)
pca.fit_transform(allcelldata)
# Plot weightings of original metrics on extracted components
plt.figure(figsize=(20,5))
plt.title('PCA: Metric Weights', fontsize=20, y = 1.03);
PCAweights = pd.DataFrame(pca.components_,columns=allcelldata.columns,index = ['PC1','PC2'])
sns.heatmap(PCAweights, annot = True, cmap=plt.cm.seismic)
# In[11]:
# Create plot of PC space (components 1/2)
P1 = PCA_components[0]
P2 = PCA_components[1]
labels = celllabels
fig, ax = plt.subplots(figsize=(10,10))
plt.scatter(P1,P2, color='blue')
ax.set_xlabel('Principal Component 1', fontsize = 15)
ax.set_ylabel('Principal Component 2', fontsize = 15)
ax.set_title('Two Component PCA Space (All Data)', fontsize = 20)
ax.grid()
for i,type in enumerate(labels):
x = P1[i]
y = P2[i]
plt.text(x+0.03, y+0.03, type, fontsize=10)
plt.show()
# In[12]:
# Assess appropriate number of clusters by identifying 'elbow' in KMeans Inertia plot
ks = range(1, 10)
inertias = []
for k in ks:
model = KMeans(n_clusters=k)
model.fit(PCA_components.iloc[:,0:1])
inertias.append(model.inertia_)
fig, ax = plt.subplots()
plt.plot(ks, inertias, '-o', color='black')
plt.xlabel('Number of Clusters, K')
plt.ylabel('Sum of Squared Distance')
plt.title('Elbow Plot')
plt.xticks(ks)
plt.show()
# In[13]:
P1 = np.array(X_pca[:,0])
P2 = np.array(X_pca[:,1])
X = np.column_stack((P1,P2))
# In[15]:
# Clustering method 1: K-Means + Silhouette analysis
# Sihouette analysis enables you to compare results of K-means for different number of clusters
# Highest silhouette value indicates most variance explained
range_n_clusters = [2,3,4,5,6,7,8,9,10]
for n_clusters in range_n_clusters:
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(10, 4)
ax1.set_xlim([-0.1, 1])
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
y_lower = y_upper + 10
ax1.set_title("Silhouette pot")
ax1.set_xlabel("Silhouette Cofficient Values")
ax1.set_ylabel("Cluster label")
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([])
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.cluster_centers_
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("Clustered data")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering: "
"n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
# In[ ]:
# Clustering Method 2: Ward's method
# Hierarchical clustering (bottom-up, agglomerative clustering)
import scipy.cluster.hierarchy as shc
plt.figure(figsize=(11, 10))
plt.title("Clustering Dendrogram (Ward's Method)")
plt.xlabel("Cell")
# plt.axhline(y=10, color='r', linestyle='--')
plt
dend = shc.dendrogram(shc.linkage(X, method='ward'))
# In[ ]:
# Calinski Harabasz Score = the ratio between the wthin cluster dispersion and between cluster dispersion
# Alternative means of assessing most appopriate cutoff in Ward's dendrogram
CHS = pd.DataFrame()
for k in range(2,10):
cluster = AgglomerativeClustering(n_clusters=k, affinity='euclidean', linkage='ward')
cluster.fit_predict(X)
df = pd.DataFrame([metrics.calinski_harabasz_score(data,cluster.labels_)],[k])
CHS = CHS.append(df)
CHS.columns = ['CHS Value']
CHS.index.name = 'Cluster'
CHS
# In[ ]:
# Plot clustered data in principal component space
plt.figure(figsize=(10,10))
cluster = AgglomerativeClustering(n_clusters =3, affinity = 'euclidean', linkage = 'ward')
cluster.fit_predict(X)
plt.scatter(X[:,0],X[:,1], c=cluster.labels_, cmap='rainbow')
plt.xlabel('Feature Space for 1st Feature')
plt.ylabel('Feature Space for 2nd Feature')
plt.title('Clustering: Wards Method (n=3)')
P1 = X[:,0]
P2 = X[:,1]
for i,type in enumerate(labels):
x = P1[i]
y = P2[i]
plt.text(x+0.03, y+0.03, type, fontsize=8)
plt.show()
# In[ ]:
# Identify cells in each cluster
df = pd.DataFrame(cluster.labels_, labels)
df.columns = ['Cluster']
df.index.name = 'Cell'
Group1 = df.loc[df['Cluster'] == 0]
Group2 = df.loc[df['Cluster'] == 1]
Group3 = df.loc[df['Cluster'] == 2]
a = pd.DataFrame(Group1.index)
b = pd.DataFrame(Group2.index)
c = pd.DataFrame(Group3.index)
clusters = pd.concat([a,b,c], ignore_index=True, axis=1)
clusters.columns = ['Cluster 1', 'Cluster 2', 'Cluster 3']
clusters
# In[ ]:
# Clustering Method 3: Affinity Propagation
af = AffinityPropagation(preference = None).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.xlabel('Feature Space for 1st feature')
plt.ylabel('Feature Space for 2nd feature')
plt.show()
| true |
8d76a634769713fa4cc54c034746f469d2006ef0 | Python | Index197511/AtCoder_with_Python | /Kyou11.py | UTF-8 | 98 | 2.625 | 3 | [] | no_license | import itertools
n,m=map(int,input().split())
list(itertools.combinations_with_replacement(s,n))
| true |
5043301f0827bfac0f68fc54467890477aaa1880 | Python | TharlesClaysson/Python-Basico | /segundoModulo/PYTHON - DODO/0.3_list-f-c-retorno-s-parametros(TERMINADO)/1.py | UTF-8 | 179 | 3.75 | 4 | [
"MIT"
] | permissive | def Mult2():
N = int(input('Informe um número para ser multiplicado por 2: '))
return N * 2
def main():
print('O resultado da multiplicaçao: ',Mult2())
main()
| true |
128e70a0fa0d6e4cb610b78b366348d16b7fa629 | Python | rtuita23/tbay | /main.py | UTF-8 | 435 | 2.578125 | 3 | [] | no_license | from tbay import User, Item, Bid, session, datetime
def main():
# Creating rows for User table
user = session.query(User).first()
user.username = 'solange'
session.commit()
# Creating rows for Item table
ball = Item()
ball.name = 'ball'
ball.description = 'Basketball'
ball.start_time = datetime.utcnow
session.add(ball)
session.commit()
if __name__ == '__main__':
main() | true |
016c294e1a84aa8c4a77279b170d6b4b74d4a350 | Python | koushikreddyvayalpati/BUILDING-BLOCK-CHAIN-AND-CRYPTOCURRENCY | /bitcoin_node_5003.py | UTF-8 | 6,718 | 3.15625 | 3 | [
"MIT"
] | permissive | #Libraries required
import json
import requests
from flask import Flask, jsonify, request
import datetime
from urllib.parse import urlparse
import hashlib
from uuid import uuid4
##########################
## creating the BLOCKCHAIN ###
##########################
class BLOCKCHAIN:
def __init__(self):
self.chain = []
self.transactions = []
# creating the first and it is called genesis block
self.createBlock(proof = 1, previousHashValue = '0')
self.nodes = set()
def createBlock(self, proof, previousHashValue):
block = {'index' : len(self.chain) + 1,
'timeStamp' : str(datetime.datetime.now()),
'proof' : proof,
'previousHashValue' : previousHashValue,
'transactions' : self.transactions}
self.transactions = []
self.chain.append(block)
return block
def GetPreviousBlock(self):
return self.chain[-1]
#Proof of work function
def proof_Of_Work(self, previousProof):
newProof = 1
checkProof = False
while checkProof is False:
hashOperation = hashlib.sha256(str(newProof**2 - previousProof**2).encode()).hexdigest()
if hashOperation[:4] == '0000':
checkProof = True
else:
newProof += 1
return newProof
#defining hash function
def hash(self, block):
encodedBlock = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encodedBlock).hexdigest()
#checking if the chain is valid
def validateChain(self, chain):
previousBlock = chain[0]
blockIndex = 1
while blockIndex < len(chain):
block = chain[blockIndex]
if block['previousHashValue'] != self.hash(previousBlock):
return False
previousProof = previousBlock['proof']
proof = block['proof']
hashOperation = hashlib.sha256(str(proof**2 - previousProof**2).encode()).hexdigest()
if hashOperation[:4] != '0000':
return False
previousBlock = block
blockIndex += 1
return True
def addTransaction(self, sender, receiver, amount):
self.transactions.append({'sender':sender,
'receiver': receiver,
'amount': amount})
previousBlock = self.GetPreviousBlock()
return previousBlock['index'] + 1
def addNode(self, address):
ParsedUrl = urlparse(address)
self.nodes.add(ParsedUrl.netloc)
def replaceChain(self):
network = self.nodes
longestChain= None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/getChain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.validateChain(chain):
max_length = length
longestChain= chain
if longestChain:
self.chain = longestChain
return True
return False
# second step Mining Our BLOCKCHAIN
#Creating a Web App
app = Flask(__name__)
#Creating an address for the node on Port 5000
node_address = str(uuid4()).replace('-','')
#Creating a BLOCKCHAIN
BLOCKCHAIN = BLOCKCHAIN()
#mining a new Block
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previousBlock = BLOCKCHAIN.GetPreviousBlock()
previousProof = previousBlock['proof']
proof = BLOCKCHAIN.proof_Of_Work(previousProof)
previousHashValue = BLOCKCHAIN.hash(previousBlock)
block = BLOCKCHAIN.createBlock(proof, previousHashValue)
BLOCKCHAIN.addTransaction(sender = node_address, receiver = 'Jayanth', amount = 1)
response = {
'message':'Congratulations, you just mined a block!',
'index': block['index'],
'timeStamp': block['timeStamp'],
'proof': block['proof'],
'previousHashValue': block['previousHashValue'],
'transactions' : block['transactions']
}
return jsonify(response), 200
########Getting the full BLOCKCHAIN##################
#####################################################
@app.route('/getChain', methods = ['GET'])
def getChain():
res = {
'chain': BLOCKCHAIN.chain,
'length': len(BLOCKCHAIN.chain)
}
return jsonify(res), 200
#Checking if the BLOCKCHAIN is valid
@app.route('/isValid', methods = ['GET'])
def isValid():
isValid = BLOCKCHAIN.validateChain(BLOCKCHAIN.chain)
if isValid:
res = {
'message': 'The Block chain has not been tamperd and every thing is valid'
}
else:
res = {'message' : 'Harshith, unAuthorized some body tried to modify the chain'}
return jsonify(res), 200
#adding a new transaction to a BLOCKCHAIN
@app.route('/addTransaction', methods = ['POST'])
def addTransaction():
json = request.get_json()
transactionKeys = ['sender', 'receiver', 'amount']
if not all (key in json for key in transactionKeys):
return 'Some elements of the transaction are missing', 400
index = BLOCKCHAIN.addTransaction(json['sender'], json['receiver'], json['amount'])
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
## Decentralising our BLOCKCHAIN######
######################################
@app.route('/connectNode', methods = ['POST'])
def connectNode():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return "No Node", 400
for node in nodes:
BLOCKCHAIN.addNode(node)
response = {
'message' : 'The nodes are connected proceed to transcation. The Bitcoin blockchain now contains the following nodes:',
'total_nodes' : list(BLOCKCHAIN.nodes)
}
return jsonify(response), 201
#checking the consenses Replacing the chain by the longest chain
@app.route('/replaceChain', methods = ['GET'])
def replaceChain():
checkReplace = BLOCKCHAIN.replaceChain()
if checkReplace:
response = {
'message': 'The node had different length chains, so the node was replaced by the longest one.',
'new_chain': BLOCKCHAIN.chain
}
else:
response = {
'message' : 'All good, the chain is the largest one.',
'actual_chain' : BLOCKCHAIN.chain
}
return jsonify(response), 200
#Running the server Local machine
app.run(host = '0.0.0.0', port = 5003)
| true |
054ccb9cfc51021457b8573dcd65f007d678373a | Python | TiMusBhardwaj/pythonExample | /python-and-mongo-master/Tree/check-given-binary-tree-symmetric-structure-not.py | UTF-8 | 1,189 | 4.25 | 4 | [] | no_license |
class Node:
# A utility function to create a new node
def __init__(self, key):
self.data = key
self.left = None
self.right = None
def __repr__(self):
return str(self.data)
#Check left and right subtree for symmetry
def is_symmetric(node_a, node_b):
if not node_a and not node_b:
return True
# return true if
# 1. both trees are non-empty and
# 2. left subtree is mirror image of right subtree and
# 3. right subtree is mirror image of left subtree
return (node_a and node_b) and is_symmetric(node_a.left, node_b.right) and is_symmetric(node_a.right, node_b.left)
def is_sysmmetric(root):
if not root:
return True
return is_symmetric(root.left, root.right)
#Driver program to check symmetry
if __name__ == '__main__':
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.right.right= Node(5)
if is_sysmmetric(root):
print("Tree is Symmetric")
else:
print("Tree is not Symmetric")
root.right.right.left= Node(6)
if is_sysmmetric(root):
print("Tree is Symmetric")
else:
print("Tree is not Symmetric")
| true |
7248af6c9061925424f973be49eeb6eb2377947a | Python | AjayKarki/DWIT_Training | /Week1/Assignments/Assignment4.py | UTF-8 | 170 | 3.6875 | 4 | [] | no_license | number = int(input("Enter a number"))
list_of_divisors =[]
for n in range(1, number+1):
if number % n == 0:
list_of_divisors.append(n)
print(list_of_divisors) | true |
d3fa29d4adc2ef7fa39c70329d212a937f0eeac0 | Python | bernaba123/alx-higher_level_programming | /0x00-python-hello_world/5-print_string.py | UTF-8 | 121 | 3.03125 | 3 | [] | no_license | #!/usr/bin/python3
str = "Holberton School"
print("{}{}{}".format(str, str, str))
str = str[0:9]
print("{}".format(str))
| true |
b5dbd42ec95301d22c3e1fbfb85f0458319cf41f | Python | phoenixtype/ECE610-Simulation-project | /Question4a.py | UTF-8 | 4,444 | 3.5625 | 4 | [] | no_license | # Copyright 2014 Dr. Greg M. Bernstein
""" Simulation model for an M/Ek/1 queue based on SimPy. Estimates queue sizes and
wait times and compares to theory. Vary number of packets sent, inter-arrival
and service time via parameters in the main portion of the script.
Our model is based on processes for packet generation and consumption, along
with a SimPy Store resource to model the FIFO output queue of a packet
switching port.
This code uses global (module) level variables and hence is not very extensible, nor
an example of good OO design.
"""
import random
import simpy
import numpy as np
from scipy.stats import gamma
k = 1
class Packet(object):
""" A very simple class that represents a packet.
This packet will run through a queue at a switch output port.
Parameters
----------
time : float
the time the packet arrives at the output queue.
id : int
an identifier for the packet (not used for anything yet)
src, dst : int
identifiers for source and destination (not used yet)
"""
def __init__(self, time, id, src="a", dst="z"):
self.time = time
self.id = id
self.src = src
self.dst = dst
def packet_generator(numPackets, env, out_pipe):
"""A generator function for creating packets.
Generates packets with exponentially varying inter-arrival times and
placing them in an output queue.
Parameters
----------
numPackets : int
number of packets to send.
env : simpy.Environment
The simulation environment.
out_pipe : simpy.Store
the output queue model object.
"""
global queue_size
for i in range(numPackets):
# wait for next transmission
yield env.timeout(random.expovariate(1 / ARRIVAL))
# print "Sending packet {} at time {}".format(i, env.now)
p = Packet(env.now, i)
# Measuring queue statistics here is only valid for Poisson arrivals.
queue_size += len(out_pipe.items)
yield out_pipe.put(p)
def packet_consumer(env, in_pipe):
""" A generator function which consumes packets.
Consumes packets from the packet queue, i.e., models sending a packet of exponentially
varying size over a link.
Parameters
----------
env : simpy.Environment
the simulation environment.
in_pipe : simpy.Store
the FIFO model object where packets are kept.
"""
global queue_wait, total_wait
while True:
# Get event for message pipe
msg = yield in_pipe.get()
queue_wait += env.now - msg.time
yield env.timeout(random.expovariate(k / (SERVICE)))
total_wait += env.now - msg.time
# print "at time {} processed packet: {} ".format(env.now, msg.id)
if __name__ == '__main__':
# The number of packets to be sent over the life of the simulation.
NUM_PACKETS = 10000
# The mean inter-arrival time
ARRIVAL = 0.25
# The mean service time
SERVICE = 0.2
# To compute the average queue waiting time
queue_wait = 0
# To compute the average total waiting time
total_wait = 0
# To compute the average queue size.
queue_size = 0
# Setup and start the simulation
# The simulation environment.
env = simpy.Environment()
# The switch output port object based on the SimPy Store class
pipe = simpy.Store(env)
# Turns our generator functions into SimPy Processes
env.process(packet_generator(NUM_PACKETS, env, pipe))
env.process(packet_consumer(env, pipe))
print('A simple M/Ek/1 queueing simulation')
env.run()
print("Ending simulation time: {}".format(env.now))
# Formulas from Klienrock, "Queueing Systems, Volume I:Theory", 1975.
mu = 1.0 / SERVICE
l = 1.0 / ARRIVAL
rho = l / mu
W = rho / mu / (1 - rho) # average wait in the queue
T = 1 / mu / (1 - rho) # average total system time.
nq_bar = rho / (1.0 - rho) - rho # The average number waiting in the queue
print("Theory: avg queue wait (M/M/1) for comparison {}, avg total time {}, avg queue size {}".format(W, T, nq_bar))
print('Sim Average queue wait = {}'.format(queue_wait / NUM_PACKETS))
print('Sim Average total wait = {}'.format(total_wait / NUM_PACKETS))
print('Sim Average queue size = {}'.format(queue_size / float(NUM_PACKETS)))
| true |
06bae2aba436b3e97737abaeaf9507b5145bbce4 | Python | vaishnav-197/DSA-Algo | /leetcode/leetcode_test_inrange.py | UTF-8 | 277 | 2.96875 | 3 | [] | no_license | def in_Range(nums):
d={}
for i in range(1,len(nums)+1):
d[i]=0
for i in nums:
d[i]=1
res=[]
for i in d:
if d[i]==0:
res.append(i)
return res
in_Range([4,3,2,7,8,2,3,1])
| true |
f68a7d4c76c5b3426cf69931c29482041dcfe53b | Python | foob26uk/coding-questions | /ch1/ch114.py | UTF-8 | 613 | 4 | 4 | [] | no_license | import random
#1.14
# to find the majority element in an array, just keep discarding two distinct elements and we will be left with the majority element at the end
# majority element occurs more than half the time
def findMajorityElement(arr):
while len(arr) > 2:
firstElement = arr.pop(0)
i = 0
while i < len(arr) and arr[i] == firstElement:
i = i + 1
if i == len(arr):
break
arr.pop(i)
return arr[0]
x = range(10)
y = [5] * 10
x = x + y
random.shuffle(x)
print x
print findMajorityElement(x)
x = range(11)
y = [9] * 10
x = x + y
random.shuffle(x)
print x
print findMajorityElement(x)
| true |
dea326a2dd60adeb8a4f298808ef92150bad4744 | Python | yuezaixz/PythonStudy | /leetcode/math/evalRPN.py | UTF-8 | 953 | 3.234375 | 3 | [] | no_license | class Solution:
# @param tokens, a list of string
# @return an integer
def evalRPN(self, tokens):
if not tokens : return -1
operator = ["+","-","*","/"]
operatorStack = []
for operStr in tokens:
if operStr in operator :
if len(operatorStack) < 2 : return -1
result = None
two = operatorStack.pop()
one = operatorStack.pop()
if operStr == '+': result = one + two
if operStr == '*': result = one * two
if operStr == '-': result = one - two
if operStr == '/': result = -((-one)/two) if one*two < 0 else one/two
operatorStack.append(result)
else:
operatorStack.append(int(operStr))
return operatorStack[0]
if __name__ == '__main__':
print Solution().evalRPN(["10","6","9","3","+","-11","*","/","*","17","+","5","+"]) | true |
95a9f873ad4244f4c96ed37f67a787f849ed219c | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_67/11.py | UTF-8 | 1,304 | 2.84375 | 3 | [] | no_license | from unionfind import *
class Rect:
def __init__(self,l):
self.x1=l[0]
self.y1=l[1]
self.x2=l[2]
self.y2=l[3]
def colliderect(self,r2):
r1=self
if r1.y2 < r2.y1-1: return False
if r2.y2 < r1.y1-1: return False
if r1.x2 < r2.x1-1: return False
if r2.x2 < r1.x1-1: return False
return True
def xl(l):
return xrange(len(l))
def newr(l):
return Rect(l)
#return Rect(l[0],l[1],l[2]-l[0]+2,l[3]-l[1]+2)
debug=False
#debug=True
for case in range(input()):
print "Case #"+str(case+1)+":",
R=input()
uf=UnionFind()
rects=[]
for i in xrange(R):
newrect=newr(map(int,raw_input().split()))
rects.append(newrect)
#uf.insert_objects(rects)
uf.insert_objects(range(R))
for i in xrange(R):
for j in xrange(R):
if rects[i].colliderect(rects[j]):
uf.union(i,j)
if debug:
print rects
print uf.allsets()
times=[]
for l in uf.allsets().values():
if l:
rl=[rects[i] for i in l]
maxx=max(r.x2 for r in rl)
maxy=max(r.y2 for r in rl)
if debug: print maxx,maxy
times.append(max(maxx-r.x1+maxy-r.y1 for r in rl))
print max(times)+1
| true |
cfc5d64924216cb7c874d444222721b7126fe467 | Python | suvarnak/PythonSkillDevelopmentLab | /code/tkinter_demo3.py | UTF-8 | 695 | 3.40625 | 3 | [] | no_license | # to create a simple window
import tkinter
window = tkinter.Tk()
window.title("My first app")
window.geometry('600x400')
username_label = tkinter.Label(master=window,foreground="red",background="yellow",text="User Name")
#grid is a geometry manager organizes widgets in a table-like structure in the parent widget.
username_label.grid(column=0,row=0)
password_label = tkinter.Label(master=window,foreground="red",background="yellow",text="Password")
password_label.grid(column=0,row=1)
username_entry = tkinter.Entry(master=window,bd=3)
username_entry.grid(column=1,row=0)
username_entry = tkinter.Entry(master=window)
username_entry.grid(column=1,row=1)
window.mainloop() | true |
d9632e39016ef67885100fe6416be980eb1ffbea | Python | Haoran1227/LSTM-Neural-Network | /Layers/Initializers.py | UTF-8 | 1,201 | 3.15625 | 3 | [] | no_license | import numpy as np
class Constant:
def __init__(self, constant = 0.1): #default value is 0.1
self.constant = constant
def initialize(self, weights_shape, fan_in, fan_out): #为了保证后续调用initializer时的一致性,这两个未用到的函数参数不能删去
init_tensor = np.zeros(weights_shape) + self.constant #all the initial weights are set with constant value
return init_tensor
class UniformRandom:
def initialize(self, weights_shape, fan_in, fan_out):
init_tensor = np.random.uniform(0, 1, weights_shape) #initialize weights by uniform distribution
return init_tensor
class Xavier:
def initialize(self, weights_shape, fan_in, fan_out): #fan_in and fan_out is two values which indicates the number of input oroutput nodes
variance = np.sqrt(2 / (fan_in + fan_out))
init_tensor = np.random.normal(0.0, variance, weights_shape)
return init_tensor
class He:
def initialize(self, weights_shape, fan_in, fan_out):
variance = np.sqrt(2 / fan_in)
init_tensor = np.random.normal(0.0, variance, weights_shape)
return init_tensor
| true |
03993e80c586f7d92d6dac858a351c0ea2e14cf2 | Python | Rohin-Shreshth/water-quality | /main.py | UTF-8 | 490 | 3.421875 | 3 | [
"MIT"
] | permissive | '''
-preprocess data
-train-test split our data
-build our model
-test our model
'''
import pandas as pd
from sklearn import preprocessing
# Read data
df = pd.read_csv('./datasets/water_potability.csv')
# print('data')
# Preprocess the data
df.dropna(inplace=True)
# Normalize data (convert all numbers to [0,1] range)
x = df.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df = pd.DataFrame(x_scaled)
print(df)
| true |
79099a159afe949fe52a38c380382fe35135ae83 | Python | EduardoZortea18/PythonExercises | /ex003.py | UTF-8 | 146 | 3.90625 | 4 | [] | no_license | n1 = int(input('Type a number:' ))
n2 = int(input('Type another number: '))
sum = n1 + n2
print('sum between {} and {} is {}!'.format(n1,n2,sum)) | true |
27caa39d7cf1b1f3881e5394792611ffd71ed980 | Python | benyl/Python | /homework/homework4_gen_key.py | UTF-8 | 2,895 | 3.875 | 4 | [] | no_license | ###############################################
# COMS3101.003 - Python - Homework 4
#
################################################
# Part 2 - Object Serialization and Command Line Arguments
#
# (a) Write a module 'gen_key', that contains a class Key with base
# class dict. When Key instances are initialized they should
# randomly map each of the 26 English letters [A-Z] and whitespace
# to a unique ciphertext number between 0 and 26.
# (b) Write a module 'encode', that contains a function encode, which
# takes as parameters a plaintext message and a key instance and
# encodes the message in the following simple unary representation.
# The message is represented as a list and each element of the list
# represents a letter.
# (c) Write a module 'decode', that contains a function decode, which
# takes as parameters an encoded message object, of the type described
# above, and a Key object, and returns a plaintext string.
# Write a main function that unplickes a message object and a Key
# object from a file, uses decode to decode the message and prints
# the plaintext.
################################################
# Part 3 - Exceptions
#
# (a) When encoding a message, the plaintext may contain symbols other
# than 26 letters or space. In the 'encode' module, create an
# exception class EncodingException and raise an exception on this
# type in the encode function when an input symbol cannot be encoded.
# Modify the main method to handle this exception.
# (b) The object to be decoded is not a valid encrypted message.
# This can be case for a number of reasons.
# 1. The message object is not a list at all.
# 2. The outer list contains a non-list element.
# 3. There are too many elements in one of the inner lists.
# Implement this behavior and provide examples for objects causing
# each type of exception.
# module gen_key
import sys, random, pickle
class Key(dict):
def __init__(self):
words = [' '] + [chr(i) for i in range(ord('A'),ord('Z')+1)]
for i in range(27):
w = random.choice(words)
self[w] = i
words.remove(w)
def main():
if len(sys.argv) != 2:
filename = 'pickled_Key.pickle' # default filename
print "no path provided, pickle to file:", filename
else:
filename = sys.argv[1]
print "pickle to file:", filename
# generate key and pickle to file
k = Key()
with open(filename,'w') as f:
pickle.dump(k, f)
print k
if __name__ == "__main__": # Default "main method" idiom.
print '#--------------------------------------'
print '# Part 2 - Object Serialization and Command Line Arguments'
print '#--------------------------------------'
print '\n# gen_key : \n'
main()
| true |
7568bb9d87bb406a77363c67e0e2a30223ff804d | Python | shubhamupasani11/Automatic-Process-Logger | /script2.py | UTF-8 | 508 | 2.953125 | 3 | [] | no_license |
from sys import *
def Function(value):
print("Inside the function paramter: "+value)
def main():
if (len(argv)!=2):
print("Insufficient argumets to the Srcipt")
exit()
if (argv[1]=="-u") or (argv[1]=="U"):
print("Use the Script as Name.py Parameters")
exit()
if(argv[1]=="-h") or (argv[1]=="H"):
print("This is demo automation script")
exit()
Function(argv[1])
if __name__=="__main__":
main() | true |
8294e1373cb62422ebd120ac02679a31fdba3272 | Python | nirajmahajan/Digit-Recognition | /models/CNN/train.py | UTF-8 | 2,804 | 3.015625 | 3 | [] | no_license | from helpers import *
parser = argparse.ArgumentParser(description='Image Detection')
parser.add_argument('-use_trained_model', action = 'store_true')
# Create and print the training dataset
train_dataset = dsets.MNIST(root='../../utils/data', train=True, download=True, transform=composed)
# print("Downloaded the training dataset:\n ", train_dataset)
# Create and print the validating dataset
validation_dataset = dsets.MNIST(root='../../utils/data', train=False, download=True, transform=composed)
# print("Downloaded the validating dataset:\n ", validation_dataset)
# Create Dataloader objects
trainloader = DataLoader(dataset = train_dataset, batch_size = 100)
validationloader = DataLoader(dataset = validation_dataset, batch_size = 5000)
args = parser.parse_args()
if(not args.use_trained_model):
# Define a criterion function
criterion = nn.CrossEntropyLoss()
# Define model parameters and create a model
# The Neural Network will have a single hidden layer with 100 neurons
in_dim = 784
out_dim = 10
Hidden = 100
model = CNN(out_1=16, out_2=32)
# Define an optimizer
optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)
# Define analysis function
def analyse():
correct=0
N_test=len(validation_dataset)
#perform a prediction on the validation data
for x_test, y_test in validationloader:
model.eval()
z = model(x_test)
_, yhat = torch.max(z.data, 1)
correct += (yhat == y_test).sum().item()
accuracy = correct / N_test
return (correct, N_test-correct, accuracy)
# train the model now!! (on 100 epochs)
epochs = 10
for epoch in range(epochs):
print('Ran {} epochs till now'.format(epoch), flush = True)
(C, I, A) = analyse()
print("Accuracy =", A, flush=True)
for (x,y) in trainloader:
model.train()
optimizer.zero_grad()
z = model(x)
loss = criterion(z, y)
loss.backward()
optimizer.step()
with open('model/trained_model.pkl', 'wb') as handle:
pickle.dump(model, handle, protocol=pickle.HIGHEST_PROTOCOL)
else:
# Define analysis function
def analyse():
correct=0
N_test=len(validation_dataset)
#perform a prediction on the validation data
for x_test, y_test in validationloader:
model.eval()
z = model(x_test)
_, yhat = torch.max(z.data, 1)
correct += (yhat == y_test).sum().item()
accuracy = correct / N_test
return (correct, N_test-correct, accuracy)
if(not os.path.isfile('model/trained_model.pkl')):
print('Train the model first')
os._exit(1)
with open('model/trained_model.pkl', 'rb') as f:
model = pickle.load(f)
# Count the classified and miss classified data using the validation set
(C, I, A) = analyse()
print("Analysis:")
print("Correctly classified data count =", C)
print("Incorrectly classified data count =", I)
print("Accuracy =", A)
plt.show() | true |
533cb35826fc64db4356e6bcffe52a26aff0f046 | Python | archnemesis/homeserver | /homeserver/homeprotocol/messages/intercom_channel_accept.py | UTF-8 | 698 | 2.609375 | 3 | [] | no_license |
import struct
from .message import Message
class IntercomChannelAcceptMessage(Message):
MESSAGE_ID = 4
MESSAGE_SIZE = 6
STRUCT_FORMAT = "<IH"
def __init__(self, remote_addr=0, remote_port=0):
self.remote_addr = remote_addr
self.remote_port = remote_port
@classmethod
def unpack(cls, data):
data = struct.unpack(cls.STRUCT_FORMAT, data)
obj = cls()
obj.remote_addr = data[0]
obj.remote_port = data[1]
return obj
def pack(self):
struct_data = []
struct_data.append(self.remote_addr)
struct_data.append(self.remote_port)
return struct.pack(self.STRUCT_FORMAT, *struct_data)
| true |
5736352e75baa1135242681e383bffe88d50305e | Python | JuanDiegoCamposNeira/Pygame | /Pygame.pathfinding/path_finding.py | UTF-8 | 650 | 3.140625 | 3 | [] | no_license | import pygame
screen_width = 800
screen = pygame.display.set_mode((screen_width, screen_width))
white = (255, 255, 255)
red = (0, 255, 0)
grey = (128, 128, 128)
class Spot:
def __init__(self, row, col, width, height):
self.row = row
self.col = col
self.width = row * width
self.height = col * width
self.color = white
self.neightbors = []
self.width = width
# Eod
# Eoc
def main():
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# Eoi
# Eof
# Eow
# Eom
main()
| true |
5e993700d640be60551e7a0a76a4aed48d089647 | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/allergies/1ce5c0b6746d4a25a301c752069ff020.py | UTF-8 | 444 | 3.09375 | 3 | [] | no_license | ALLERGY_LIST = [
'eggs',
'peanuts',
'shellfish',
'strawberries',
'tomatoes',
'chocolate',
'pollen',
'cats'
]
class Allergies(object):
def __init__(self,score):
#self.score=score
self.list = self.calc_list(score)
def is_allergic_to(self,item):
#print self.list
return item in self.list
#@classmethod
def calc_list(self,score):
return [ALLERGY_LIST[i] for i in range(8) if score & 1 << i]
| true |
238caaa682dc595015a0540be8f815d06cc2d72d | Python | niranjancrr/Practice | /LongestCommonSubstring.py | UTF-8 | 961 | 3.5625 | 4 | [] | no_license | def LongestCommonSubstring(string1,string2):
cols = len(string1)+1
rows = len(string2)+1
matrix = [[0 for j in range(cols)] for i in range(rows)]
maximum = 0
maxrow = 0
maxcol=0
for row in range(1,rows):
for col in range(1,cols):
if string1[col-1] == string2[row-1]:
matrix[row][col] = matrix[row-1][col-1] + 1
if matrix[row][col] > maximum:
maxrow = row
maxcol = col
maximum = matrix[row][col]
#Generate Common Substring from the matrix
seq = ''
if maximum == 0:
return seq
else:
j = maxcol
i = maxrow
while matrix[i][j] > 0:
if string1[j-1] == string2[i-1]:
seq += string1[j-1]
i-=1
j-=1
return(seq[::-1])
sequence = LongestCommonSubstring('niranjan','raamanujan')
print(sequence) | true |
3fa096f139b1498b3f4e823fcb996b98f6a1aa0d | Python | JIghtuse/python-playground | /crash_course/conditions/conditional_cases.py | UTF-8 | 908 | 4.3125 | 4 | [] | no_license | book = 'terror'
print("Is book == 'terror'? I predict True.")
print(book == 'terror')
print("\nIs book == 'misery'? I predict False.")
print(book == 'misery')
number = 19
print("\nIs number == 19? I predict True.")
print(number == 19)
print("\nIs number == 15? I predict False.")
print(number == 15)
print("\nIs number < 15? I predict False.")
print(number < 15)
print("\nIs number > 15? I predict True.")
print(number > 15)
print("\nIs number >= 19? I predict True.")
print(number >= 19)
print("\nIs number <= 18? I predict False.")
print(number <= 18)
movie = 'fountain'
print("\nIs movie == 'Fountain'? I predict True.")
print(movie.title() == 'Fountain')
print("\nIs movie != 'Fountain'? I predict False.")
print(movie.title() != 'Fountain')
print("\nIs movie == 'Pi'? I predict False.")
print(movie.title() == 'Pi')
print("\nIs movie != 'Pi'? I predict True.")
print(movie.title() != 'Pi')
| true |
45079b8714e8bd42453074abd29f33d2e6c7cded | Python | StrawyTony/KaggleKKBox | /Codes/train_predict_churn.py | UTF-8 | 3,789 | 2.703125 | 3 | [] | no_license |
import gc;
import pandas as pd
import numpy
import math
import matplotlib.pyplot as plt
import xgboost as xgb
import lightgbm as lgb
import sklearn
from sklearn import *
from catboost import CatBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder
gc.enable()
train = pd.read_csv('train_input_filtered.csv')
test = pd.read_csv('test_input_filtered.csv')
# get total columns (features)
cols = [c for c in train.columns if c not in ['is_churn','msno']]
print(cols)
# define xgb_score metric for logloss
def xgb_score(preds, dtrain):
labels = dtrain.get_label()
return 'log_loss', metrics.log_loss(labels, preds)
# parameters for xgb
xgb_params = {
'eta': 0.05,
'max_depth': 7,
'objective': 'binary:logistic',
'eval_metric': 'logloss',
'seed': 3228,
'silent': True,
'tree_method': 'exact'
}
# parameters for lgb
lgb_params = {
'learning_rate': 0.05,
'application': 'binary',
'max_depth': 7,
'num_leaves': 256,
'verbosity': -1,
'metric': 'binary_logloss',
'num_boost_round': 600,
'early_stopping_rounds': 50
}
#train = train.sample(100000)
# splitting training dataset to train and test evaluate the models
x1, x2, y1, y2 = model_selection.train_test_split(train[cols], train['is_churn'], test_size=0.2, random_state=3228)
# catboost
print('cat training')
model = CatBoostClassifier(iterations=500, learning_rate=0.05, depth=7, l2_leaf_reg=5, loss_function='Logloss', eval_metric='Logloss')
# training dataset
model = model.fit(x1, y1,eval_set=(x2,y2),logging_level='Silent')
# actual test dataset
cat_pred = model.predict_proba(test[cols])[:,1]
# training-test dataset
cat_valid = model.predict_proba(x2)[:,1]
print('cat valid log loss = {}'.format(log_loss(y2,cat_valid)))
# xgb
print('xgb training')
watchlist = [(xgb.DMatrix(x1, y1), 'train'), (xgb.DMatrix(x2, y2), 'valid')]
# training dataset
model = xgb.train(xgb_params, xgb.DMatrix(x1, y1), 500, watchlist, feval=xgb_score, maximize=False, verbose_eval=100, early_stopping_rounds=50)
# actual test dataset
xgb_pred = model.predict(xgb.DMatrix(test[cols]), ntree_limit=model.best_ntree_limit)
# training-test dataset
xgb_valid = model.predict(xgb.DMatrix(x2))
print('xgb valid log loss = {}'.format(log_loss(y2,xgb_valid)))
# lgbm
print('lgb training')
d_train = lgb.Dataset(x1, label=y1)
d_valid = lgb.Dataset(x2, label=y2)
watchlist = [d_train, d_valid]
# training dataset
model = lgb.train(lgb_params, train_set=d_train, valid_sets=watchlist, verbose_eval=100)
# feature importance of lgb
ax = lgb.plot_importance(model)
plt.tight_layout()
plt.savefig('feature_importance_graph.png')
# actual test dataset
lgb_pred = model.predict(test[cols])
# training-test dataset
lgb_valid = model.predict(x2)
print('lgb valid log loss = {}'.format(log_loss(y2,lgb_valid)))
# averaging
merged_pred = (cat_pred + xgb_pred + lgb_pred) / 3
test['is_churn'] = cat_pred.clip(0.+1e-15, 1-1e-15)
test = pd.DataFrame({'is_churn' : test.groupby(['msno'])['is_churn'].mean()}).reset_index()
test[['msno','is_churn']].to_csv('cat.csv', index=False)
test['is_churn'] = xgb_pred.clip(0.+1e-15, 1-1e-15)
test = pd.DataFrame({'is_churn' : test.groupby(['msno'])['is_churn'].mean()}).reset_index()
test[['msno','is_churn']].to_csv('xgb.csv', index=False)
test['is_churn'] = lgb_pred.clip(0.+1e-15, 1-1e-15)
test = pd.DataFrame({'is_churn' : test.groupby(['msno'])['is_churn'].mean()}).reset_index()
test[['msno','is_churn']].to_csv('lgb.csv', index=False)
test['is_churn'] = merged_pred.clip(0.+1e-15, 1-1e-15)
test = pd.DataFrame({'is_churn' : test.groupby(['msno'])['is_churn'].mean()}).reset_index()
test[['msno','is_churn']].to_csv('ensemble.csv', index=False)
| true |
68a771963487477cbadc6a6835858e8638068067 | Python | openmm/openmm | /wrappers/python/openmm/app/internal/charmm/_charmmfile.py | UTF-8 | 5,779 | 2.96875 | 3 | [] | no_license | """
Provides a class for reading CHARMM-style files. The key component to these
files is that the ! character is a comment character and everything after ! is
ignored.
This file is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of Biological
Structures at Stanford, funded under the NIH Roadmap for Medical Research,
grant U54 GM072970. See https://simtk.org. This code was originally part of
the ParmEd program and was ported for use with OpenMM.
Copyright (c) 2014 the Authors
Author: Jason M. Swails
Contributors:
Date: April 18, 2014
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
from openmm.app.internal.charmm.exceptions import CharmmFileError
import sys
if sys.version_info < (3, 0):
from codecs import open
class CharmmFile(object):
"""
A CHARMM file that recognizes the "!" character as a 'comment' token. It
can be iterated over and generally treated like a file object, but only
spits out strings that have been truncated at its first comment character.
There is currently no way to recognize a ! as a _non_ comment character,
since allowing an escape character does not seem to be common practice and
would likely introduce negative performance implications.
"""
def __init__(self, fname, mode='r'):
if mode not in ('r', 'w'):
raise ValueError('Cannot open CharmmFile with mode "%s"' % mode)
if mode == 'r':
self.status = 'OLD'
else:
self.status = 'NEW'
try:
self._handle = open(fname, mode, encoding='utf-8')
except IOError as e:
raise CharmmFileError(str(e))
self.closed = False
self.line_number = 0
def write(self, *args, **kwargs):
return self._handle.write(*args, **kwargs)
def __iter__(self):
# Iterate over the file
for line in self._handle:
try:
idx = line.index('!')
end = '\n'
except ValueError:
# There is no comment...
idx = None
end = ''
yield line[:idx] + end
def readline(self):
self.line_number += 1
line = self._handle.readline()
try:
idx = line.index('!')
end = '\n'
except ValueError:
idx = None
end = ''
return line[:idx] + end
def readlines(self):
return [line for line in self]
def read(self):
return ''.join(self.readlines())
def close(self):
self._handle.close()
self.closed = True
def rewind(self):
""" Return to the beginning of the file """
self._handle.seek(0)
def __del__(self):
try:
self.closed or self._handle.close()
except AttributeError:
# It didn't make it out of the constructor
pass
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class CharmmStreamFile(object):
"""
The stream file is broken down into sections of commands delimited by the
strings:
read <section> <options>
....
....
end
This object provides iterators over those sections and a file-like API for
dealing with the text.
"""
def __init__(self, fname):
self.lines = CharmmFile(fname, 'r').readlines()
self.line_number = 0
def __iter__(self):
return iter(self.lines)
def rewind(self):
""" Return to the beginning of the file """
self.line_number = 0
def next_section(self):
"""
Fast-forwards the file to the next CHARMM command section
Returns: (str, list)
- The first string is the line defining the section that's being
returned
- The list is a list of all lines contained in the section
excluding the "read <blah>" and "end" lines.
Notes:
The line pointer will be set to the line defining the
"""
lines = []
while self.line_number < len(self.lines):
line = self.lines[self.line_number].strip()
if line[:4].lower() == 'read':
title = line.strip()
self.line_number += 1
line = self.lines[self.line_number]
while line and not line.strip().lower().startswith('end'):
lines.append(line)
self.line_number += 1
line = self.lines[self.line_number]
return title, lines
self.line_number += 1
# No sections left
return None, None
def __del__(self):
pass
| true |
48de70401203dacfd13899a626c0672ca2fa794e | Python | hvaara/CompetitiveProgramming | /ProjectEuler/0007/solution.py | UTF-8 | 430 | 3.234375 | 3 | [
"MIT"
] | permissive | def sieve(m):
l = [True]*(int(m**.5)+2)
l[0], l[1] = False, False
for i in range(2, len(l)):
if not l[i]:
continue
for j in range(i*2, len(l), i):
l[j] = False
return l
def solution(n):
k = 100000000000
c = 0
for i, prime in enumerate(sieve(k)):
if prime:
c += 1
if c == n:
return i
return -1
print(solution(10001))
| true |
2c9674033f71d79dcc867cf0bb5f49e004378520 | Python | LyceeClosMaire/projet-final-assistant-jdr | /Code_Savre/Lancé de dé.py | UTF-8 | 1,787 | 4.0625 | 4 | [] | no_license | from random import * #importation de l'aléatoire
lancer = int(input("combien de lancer voulez vous ?:")) # sélection du nombre de lancer de dé
k =int(input("dé de combien ?")) # nous demande quelle type de dé on veut lancé
de=randint(1,k) # chercher une valeur aléatoire sachant que le dé à une valeur compris entre 1 et k; k étant valeur défini par nous
def lancer_des(nb_lancers, nb_faces): # il s'agit ici de la definition de plusieurs jets de dés
resultats = [] # la liste
for lancer in range(nb_lancers): # pour lancer pour un certains nombre de lancé
resultats.append(randint(1,nb_faces)) # on a le resultat ajouter (donc une liste de nombre) sera alétoire sur un dé allant de 1 à une valeur définie
return(sum(resultats)) # on afiche la somme des résultats
#ici on est dans le cas d'un multiple (en général un jet de dégat)____________________________________________________________________
if lancer > 1 : #si on effectue plusieur lancés
print(lancer_des(lancer,k)) # alors on effectue la fonction lancé de dé
#ici on est dans le cas d'un jets simple (de statistique du personnage)_______________________________________________________________
if lancer == 1 : # si on effectue qu'un seul lancé
stat = int(input("Entrer votre statistique")) # valeur de la statistique du personnage
if stat >= de : # Si la condition est supérieur à la valeur du dé
print ("réussite") # on affiche réussite
print(de,"/",stat) # afficher le résultat du dé sur la statistique
elif stat < de : # Si la condition est inférieur a la valeur du dé
print ("échec") #on affiche échec
print(de,"/",stat) # afficher le résultat du dé sur la statistique
| true |
84bcffe320fa70f5f7a0408782462d23d256c3f1 | Python | priyatransbit/leveragedFundStrategy | /Quotes.py | UTF-8 | 799 | 2.671875 | 3 | [
"WTFPL"
] | permissive | # -*- coding: utf-8 -*-
'''
Created on 2014年12月3日
@author: cheng.li
'''
class MinutesQuote:
def __init__(self,
name,
quteDuration,
date,
time,
openPrice,
highPrice,
lowPrice,
closePrice,
volume,
amount):
self.name = name
self.quoteDuration = quteDuration
self.date = date
self.time = time
self.open = openPrice
self.high = highPrice
self.low = lowPrice
self.close = closePrice
self.volume = volume
self.amount = amount
import unittest
class TestQuotes(unittest.TestCase):
def testMinutesQuote(self):
pass
| true |
2050d1cd6181e010fc4518c087a1a63270567cdb | Python | publiccoding/prog_ln | /mypractice/project/multilevelInheritance.py | UTF-8 | 252 | 2.609375 | 3 | [] | no_license | class A:
def __init__(self):
print("i am in A")
class B(A):
## def __init__(self):
## print("i am in B")
pass
class C(B):
## def __init__(self):
## print("i am in C")
pass
c = C()
| true |
51dde975867101661a6253e5bb454fda1059cafa | Python | AronsonDan/Python | /Introspection/Introspector.py | UTF-8 | 280 | 3.4375 | 3 | [] | no_license | import inspect
def dump(obj):
print("Type")
print("====")
print(type(obj))
print()
print("Documentation")
print("=============")
print(inspect.getdoc(obj))
print()
print("Methods")
print("=======")
# TODO
print()
i = 10
dump(i) | true |
370b934cf5134506ea22474f237c40fc01a54cea | Python | ll-O-ll/algos | /python/topologicalsort.py | UTF-8 | 1,085 | 3.0625 | 3 | [] | no_license | def topologicalSort(jobs, deps):
# using DFS algo... already defined
permanent = [0]*len(jobs)
visited = [0]*len(jobs)
answer = []
boo = True
for n in range(len(permanent)):
if permanent[n] == 0:
boo = visit(permanent, visited, n, jobs, deps, answer, boo)
if boo == False:
return []
return answer
def visit(permanent, visited, node, jobs, deps, answer, boo):
if permanent[node] == 1:
return
if visited[node] == 1:
#stop
return False
visited[node] = 1
for edge in deps:
if edge[0] == jobs[node]:
m = jobs.index(edge[1])
boo = visit(permanent, visited, m, jobs, deps, answer, boo)
if boo == False:
return boo
visited[node] = 0
permanent[node] = 1
#prepend
answer.insert(0,jobs[node])
if __name__ == '__main__':
jobs = [1, 2, 3, 4, 5, 6, 7, 8]
deps = [[3, 1], [8, 1], [8, 7], [5, 7], [5, 2], [1, 4], [1, 6], [1, 2], [7, 6]]
print(topologicalSort(jobs,deps)) | true |
11f44f15a050f684d62d908dd034f062dc54a40f | Python | Phy9/PathfindingAlgorithm | /constants.py | UTF-8 | 802 | 2.671875 | 3 | [] | no_license | """CONSTANTS TO BE USED. All constants can be changed here"""
from enum import Enum, auto
class Game:
FPS = 60
class Window:
WINSIZE_X: int = 820
WINSIZE_Y: int = 820
BOARD_WIDTH: int = 120
BOARD_HEIGHT: int = 120
BORDER_X: int = 10
BORDER_Y: int = 10
PIXEL_X: int = (WINSIZE_X - 2*BORDER_X) // BOARD_WIDTH
PIXEL_Y: int = (WINSIZE_Y - 2*BORDER_Y) // BOARD_HEIGHT
GAP_X: int = 1
GAP_Y: int = 1
class NodeState(Enum):
EMPTY: dict = {"color": (240, 240, 240)}
HALF: dict = {"color": (120, 120, 240)}
DONE: dict = {"color": (120, 120, 0)}
FULL: dict = {"color": (60, 60, 60)}
START: dict = {"color": (0, 240, 0)}
GOAL: dict = {"color": (240, 0, 0)}
class PathCost:
DIAGONAL: int = 14
STRAIGHT: int = 10
Missing = object() | true |
e2aaf715a2234dd341419d51311256b5af1482e5 | Python | ProfessorX737/EMS | /src/Event.py | UTF-8 | 5,147 | 2.765625 | 3 | [] | no_license | import datetime
from src.Period import *
from src.User import *
from src.exceptions.VenueCapacityException import *
from src.exceptions.OverlappingBookingException import *
from src.Period import *
from src.exceptions.InvalidEventDateException import *
import abc
class Event():
def __init__(self,eventId,startDateTime,endDateTime,name,descr,venue,convener,capacity,deregEnd,fee,earlybirdEnd):
self.__id = eventId
self.__period = Period(startDateTime,endDateTime,eventId)
self.__venue = venue
self.__convener = convener
self.__capacity = capacity
self.__deregEnd = deregEnd
self.__isCancelled = False
self.__attendees = {}
self.__name = name # String
self.__descr = descr # String
self.__fee = fee
self.__earlybirdEnd = earlybirdEnd
def getName(self):
return self.__name
def getDescription(self):
return self.__descr
def getStartDateTime(self):
return self.__period.getStartDateTime()
def getEndDateTime(self):
return self.__period.getEndDateTime()
def getPeriodId(self):
return self.__period.getId()
def getPeriod(self):
return self.__period
def setName(self, name):
self.__name = name
def setDescription(self, descr):
self.__descr = descr
def setStartDateTime(self, startDateTime):
if startDateTime <= datetime.datetime.now():
raise InvalidEventDateException('startDateTime','startdatetime > current date time')
self.__period.setStartDateTime(startDateTime)
def setEndDateTime(self, endDateTime):
self.__period.setEndDateTime(endDateTime)
def addAttendee(self, user):
if not self.isFull():
self.__attendees[user.get_id()] = user
def setAttendees(self, attendeeList):
for attendee in attendeeList:
self.__attendees[attendee.get_id()] = attendee
def removeAttendee(self,userID):
if userID in self.__attendees:
del self.__attendees[userID]
def hasAttendee(self,userID):
if userID in self.__attendees:
return True
return False
def isCancelled(self):
return self.__isCancelled
def isOpen(self):
if self.getEndDateTime() < datetime.datetime.now() or self.__isCancelled:
return False
else:
return True
def isPastDeregEnd(self):
if self.getDeregEnd() < datetime.datetime.now():
return True
return False
def isFull(self):
if len(self.__attendees.values()) >= self.getCapacity():
return True
return False
def getId(self):
return self.__id
def getConvener(self):
return self.__convener
def getConvenerName(self):
return self.__convener.getName()
def getConvenerId(self):
return self.__convener.get_id()
def getVenue(self):
return self.__venue
def getVenueId(self):
return self.__venue.getId()
def getVenueName(self):
return self.__venue.getName()
def getCapacity(self):
return self.__capacity
def getDeregEnd(self):
return self.__deregEnd
def getAttendees(self):
return self.__attendees.values()
def getNumAttendees(self):
return len(self.__attendees.values())
def getEarlyBirdEnd(self):
return self.__earlybirdEnd
def getCost(self):
if datetime.datetime.now() > self.__earlybirdEnd:
return self.__fee
else:
return 0.5*self.__fee
def getFee(self):
return self.__fee
def setCapacity(self,capacity):
if capacity > self.__venue.getMaxCapacity():
raise VenueCapacityException('Capacity','New event capacity > Venue capacity')
else:
self.__capacity = capacity
def setDeregEnd(self,deregEnd):
if deregEnd <= datetime.datetime.now():
raise InvalidEventDateException('Deregister period','Deregister period > current date time')
self.__deregEnd = deregEnd
def setVenue(self, venue):
self.__venue.deletePeriod(self.__period.getId())
if venue.overlaps(self.__period):
raise OverlappingBookingException('Event', 'Overlapping booking time with previously booked event at this venue')
venue.addPeriod(self.__period)
self.__venue = venue
def setConvener(self, convenerName):
self.__convener = convenerName
def setEarlyBirdEnd(self, earlybirdEnd):
if earlybirdEnd <= datetime.datetime.now():
raise InvalidEventDateException('Early bird registration','Earlybird registration > current date time')
self.__earlybirdEnd = earlybirdEnd
def setFee(self,fee):
self.__fee = fee
def cancelEvent(self):
self.__isCancelled = True
def getStatus(self):
if self.isOpen():
return "Open"
elif self.isCancelled():
return "Cancelled"
else:
return "Closed"
@abc.abstractmethod
def getClassName(self):
pass
| true |
5c038d976ffc220348401a01e31cfbab1b55b9ed | Python | watertap-org/watertap | /watertap/core/util/model_diagnostics/tests/test_ipopt_initialization.py | UTF-8 | 4,011 | 2.515625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | import pytest
from pyomo.environ import Block, Var, SolverFactory
from idaes.core.solvers import get_solver
from watertap.core.util.model_diagnostics.ipopt_initialization import (
generate_initialization_perturbation,
print_initialization_perturbation,
assert_no_initialization_perturbation,
)
class TestPerturbationHelper:
@pytest.fixture(scope="class")
def b(self):
b = Block(concrete=True)
b.x = Var(bounds=(1e-8, None), initialize=1e-7)
b.y = Var(bounds=(1e1, 1e2), initialize=1e3)
b.z = Var(bounds=(0.0, 1e-8), initialize=1e-20)
b.z.fix()
b.w = Var(bounds=(None, 1), initialize=0.5)
return b
@pytest.mark.unit
def test_generate_initialization_perturbation(self, b):
r = list(generate_initialization_perturbation(b))
assert r[0][0].name == "x"
assert r[1][0].name == "y"
assert r[0][1] == 1e-7
assert r[1][1] == 1e3
assert r[0][2] == 1e-2
assert r[1][2] == 99.100000989
r = list(generate_initialization_perturbation(b, bound_relax_factor=0.0))
assert r[0][0].name == "x"
assert r[1][0].name == "y"
assert r[0][2] == 1.000001e-2
assert r[1][2] == 99.1
r = list(
generate_initialization_perturbation(
b, bound_relax_factor=0.0, bound_frac=1e-3
)
)
assert r[0][0].name == "x"
assert r[1][0].name == "y"
assert r[0][2] == 1.000001e-2
assert r[1][2] == 99.91
r = list(
generate_initialization_perturbation(
b, bound_relax_factor=0.0, bound_frac=1e-3, bound_push=1e-3
)
)
assert r[0][0].name == "x"
assert r[1][0].name == "y"
assert r[0][2] == 1.00001e-3
assert r[1][2] == 99.91
r = list(
generate_initialization_perturbation(
b, bound_relax_factor=0.0, bound_frac=1e-3, bound_push=1e-10
)
)
assert r[0][0].name == "y"
assert r[0][2] == 100.0 - 1e-8
r = list(generate_initialization_perturbation(b, bound_push=1e-6))
assert r[0][0].name == "x"
assert r[1][0].name == "y"
assert r[0][2] == 1.0e-6
assert r[1][2] == 99.999900999999
@pytest.mark.unit
def test_print_initialization_perturbation(self, b, capsys):
print_initialization_perturbation(b, 1e-2, 1e-2, 1e-8, True)
captured = capsys.readouterr()
assert (
captured.out
== """IPOPT will move scaled initial value for variable x from 1.000000e-07 to 1.000000e-02
IPOPT will move scaled initial value for variable y from 1.000000e+03 to 9.910000e+01
"""
)
@pytest.mark.unit
def test_assert_no_initialization_perturbation1(self, b):
with pytest.raises(
ValueError,
match="IPOPT will move scaled initial value for variable x from 1.000000e-07 to 1.000000e-02",
):
assert_no_initialization_perturbation(b)
@pytest.mark.unit
def test_assert_no_initialization_perturbation2(self, b):
optarg = {"bound_push": 1e-10}
b.y.value = 5e1
assert_no_initialization_perturbation(b, optarg=optarg)
@pytest.mark.unit
def test_assert_no_initialization_perturbation3(self, b):
solver = get_solver()
solver.options["bound_push"] = 1e-10
b.y.value = 5e1
assert_no_initialization_perturbation(b, solver=solver)
@pytest.mark.unit
def test_assert_no_initialization_perturbation4(self, b):
with pytest.raises(ValueError, match="Supply a solver or optarg, not both"):
assert_no_initialization_perturbation(b, solver=b, optarg=b)
@pytest.mark.unit
def test_assert_no_initialization_perturbation5(self, b):
with pytest.raises(ValueError, match="Solver cbc is not supported"):
assert_no_initialization_perturbation(b, solver=SolverFactory("cbc"))
| true |
028f05a83e454dc3e2dd0f845cc7590b12067a1b | Python | LookAtMe-Genius-Cameraman/T_System | /t_system/online_stream/__init__.py | UTF-8 | 18,094 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: online_stream
:platform: Unix
:synopsis: the top-level submodule of T_System that contains the classes related to T_System's online stream broadcast feature.
.. moduleauthor:: Cem Baybars GÜÇLÜ <cem.baybars@gmail.com>
"""
import os # Miscellaneous operating system interfaces
import uuid # The random id generator
import subprocess # Subprocess managements
import json
import signal
from tinydb import Query # TinyDB is a lightweight document oriented database
from t_system.db_fetching import DBFetcher
from t_system import T_SYSTEM_PATH, dot_t_system_dir
from t_system import log_manager
logger = log_manager.get_logger(__name__, "DEBUG")
class OnlineStreamer:
"""Class to define an online stream ability to famous platforms of T_System.
This class provides necessary initiations and functions named :func:`t_system.online_stream.OnlineStream.go_live`
for provide starting the live broadcast streaming on specified websites, named :func: `t_system.online_stream.OnlineStream.stop_live`
for provide stopping continuing live broadcast.
"""
def __init__(self, camera, hearer):
"""Initialization method of :class:`t_system.online_stream.OnlineStream` class.
Args:
camera: Camera object from PiCamera.
hearer: Hearer object.
"""
self.folder = f'{dot_t_system_dir}/streaming'
self.__check_folders()
self.websites_table = DBFetcher(self.folder, "db", "websites").fetch()
self.websites = []
self.stream_pipes = []
self.streamer_config_file = f'{T_SYSTEM_PATH}/online_stream/config.json'
self.__set_websites()
if not self.websites:
self.__create_websites()
self.camera = camera
self.hearer = hearer
def __prepare_stream(self):
"""Method to prepare live stream parameters.
"""
self.stream_pipes = []
common_stream_cmd = "ffmpeg -f h264 -r 25 -i - -itsoffset 5.5 -fflags nobuffer -f alsa -ac 1 -i hw:1,0 -vcodec copy -acodec aac -ac 1 -ar 8000 -ab 32k -map 0:0 -map 1:0 -strict experimental -f flv"
for website in self.websites:
if website.to_be_used:
stream_cmd = f'{common_stream_cmd} {website.server}{website.active_stream_id["key"]}'
self.stream_pipes.append(subprocess.Popen(stream_cmd, shell=True, stdin=subprocess.PIPE, preexec_fn=os.setsid))
def go_live(self):
"""Method to start live stream by OnlineStreamer's members.
"""
self.__prepare_stream()
for stream_pipe in self.stream_pipes:
self.camera.start_recording(stream_pipe.stdin, format='h264', bitrate=2000000)
def stop_live(self):
"""Method to stop live stream.
"""
self.camera.stop_recording()
for stream_pipe in self.stream_pipes:
os.killpg(os.getpgid(stream_pipe.pid), signal.SIGTERM)
@staticmethod
def is_stream_available():
"""Method to check the stream's availability about networks connection.
"""
from t_system import network_connector
return network_connector.is_network_online()
def get_websites(self, w_ids=None):
"""Method to get existing website in given id. If w_id is None it returns all websites.
Args:
w_ids (list): ID list of the websites.
"""
websites = []
if w_ids:
for w_id in w_ids:
for website in self.websites:
if website.id == w_id:
websites.append(website)
return websites
return self.websites
def set_website_usage_stat(self, w_id, to_be_used):
"""Method to set given usage status of website as to be used or not to be used.
Args:
w_id (str): ID of the website.
to_be_used (bool): To be used flag that specify usage status of website on live stream.
"""
for website in self.websites:
if w_id == website.id:
website.set_usage_stat(to_be_used)
return True
return False
def activate_website_stream(self, w_id, stream_acc_name):
"""Method to set given stream key for using on the current live stream for the website.
Args:
w_id (str): ID of the website.
stream_acc_name (str): Account name of the stream.
"""
for website in self.websites:
if w_id == website.id:
website.activate_stream_key(stream_acc_name)
return True
return False
def set_website_stream(self, w_id, stream_id):
"""Method to add or update personal stream information to the given w_id's website.
Args:
w_id (str): ID of the website.
stream_id (dict): Identity information of websites stream.
"""
for website in self.websites:
if w_id == website.id:
website.upsert_stream_key(stream_id["account_name"], stream_id["key"])
return True
return False
def remove_website_stream(self, w_id, stream_acc_name):
"""Method to remove personal stream information to the given w_id's website.
Args:
w_id (str): ID of the website.
stream_acc_name (str): Account name of the stream.
"""
for website in self.websites:
if w_id == website.id:
website.remove_stream_key(stream_acc_name)
return True
return False
def refresh_websites(self):
"""Method to refresh existing websites on runtime alterations.
"""
self.websites.clear()
self.__set_websites()
def add_website(self, name, url, server, force_insert=False):
"""Method to create websites by given parameters to the `config.json` file.
Args:
name: Name of the WebSite. youtube, facebook etc.
url: Website's page URL.
server: Website's Live stream server RTMP URL.
force_insert (bool): Force insert flag.
"""
is_website_exist = False
with open(self.streamer_config_file) as conf_file:
config = json.load(conf_file)
for website_conf in config["available_websites"]:
if website_conf["name"] == name:
is_website_exist = True
if force_insert:
website_conf["url"] = url
website_conf["server"] = server
for website in self.websites:
if website.name == name:
website.update_self(url, server)
break
break
if not is_website_exist:
config["available_websites"].append({"name": name, "url": url, "server": server})
conf_file.seek(0) # <--- should reset file position to the beginning.
json.dump(config, conf_file, indent=4)
conf_file.truncate() # remove remaining part
self.refresh_websites()
return True
def remove_websites(self, w_ids):
"""Method to create websites by given parameters to the `config.json` file.
Args:
w_ids (list): ID list of the WebSites. youtube, facebook etc.
"""
result = False
with open(self.streamer_config_file) as conf_file:
config = json.load(conf_file)
for website_conf in config["available_websites"]:
for website_id in w_ids:
for website in self.websites:
if website_id == website.id:
if website_conf["name"] == website.name:
website.delete_self()
self.websites.remove(website)
config["available_websites"].remove(website_conf) # for removing object from list
conf_file.seek(0) # <--- should reset file position to the beginning.
json.dump(config, conf_file, indent=4)
conf_file.truncate() # remove remaining part
result = True
break
if result:
self.refresh_websites()
return result
def show_websites(self, w_ids=None):
"""Method to show existing website in given id. If w_id is None it returns all websites.
Args:
w_ids (list): ID list of the websites.
"""
from tabulate import tabulate
websites = []
for website in self.get_websites(w_ids):
websites.append([website.id, website.name, website.url, website.server])
print(tabulate(websites, headers=["ID", "Name", "URL", "Server"]))
def show_stream_ids(self, w_ids=None):
"""Method to show existing stream IDs of website in given id. If w_id is None it returns all stream IDs.
Args:
w_ids (list): ID list of the websites.
"""
from tabulate import tabulate
stream_ids = []
for website in self.get_websites(w_ids):
website_id = website.id
website_name = website.name
for stream_id in website.stream_ids:
stream_ids.append([website_id, website_name, stream_id["account_name"], stream_id["key"]])
website_id = ""
website_name = ""
print(tabulate(stream_ids, headers=["Website ID", "Website Name", "Account Name", "Key"]))
def __create_websites(self):
"""Method to create websites by config.json file.
"""
with open(self.streamer_config_file) as conf_file:
available_websites = json.load(conf_file)["available_websites"]
for website in available_websites:
self.websites.append(StreamWebSite(website["name"], website["url"], website["server"]))
def __set_websites(self):
"""Method to set existing websites.
"""
for website in self.websites_table.all():
self.websites.append(StreamWebSite(website["name"], website["url"], website["server"], website["to_be_used"], website["stream_ids"], website["active_stream_id"], website["id"]))
def __check_folders(self):
"""Method to checking the necessary folders created before. If not created creates them.
"""
if not os.path.exists(self.folder):
os.mkdir(self.folder)
class StreamWebSite:
"""Class to define website that will used as live stream platform.
This class provides necessary initiations and functions named :func:`t_system.online_stream.StreamWebSite.upsert_stream_key`
as the new account adding point and existing account updating point.
"""
def __init__(self, name, url, server, to_be_used=False, stream_ids=None, active_stream_id=None, id=None):
"""Initialization method of :class:`t_system.online_stream.OnlineStream` class.
Args:
name: Name of the WebSite. youtube, facebook etc.
url: Website's page URL.
server: Website's Live stream server RTMP URL.
to_be_used (bool): To be used flag that specify usage status of website on live stream.
stream_ids: hexadecimal stream keys of the website.
active_stream_id: hexadecimal stream key that use in current stream of the website.
id: Unique ID of the website.
"""
self.id = id
if not id:
self.id = str(uuid.uuid1())
self.stream_ids = stream_ids
if not stream_ids:
self.stream_ids = []
self.active_stream_id = active_stream_id
if not active_stream_id:
self.active_stream_id = {}
self.name = name
self.url = url
self.server = server
self.to_be_used = to_be_used
self.streaming_folder = f'{dot_t_system_dir}/streaming'
self.keys_folder = f'{self.streaming_folder}/keys'
self.parent_folder = f'{self.streaming_folder}/websites'
self.folder = f'{self.parent_folder}/{self.name}'
self.key_file = f'{self.keys_folder}/{self.name}.key'
self.__check_folders()
self.table = DBFetcher(self.streaming_folder, "db", "websites").fetch()
self.__db_upsert()
def set_usage_stat(self, to_be_used):
"""Method to set website as to be used or not to be used.
Args:
to_be_used (bool): To be used flag that specify usage status of website on live stream.
"""
self.to_be_used = to_be_used
self.__db_upsert(force_insert=True)
def activate_stream_key(self, account_name):
"""Method to set given stream key for using on the current live stream for the website.
Args:
account_name (str): Name of the website's account.
"""
for stream_id in self.stream_ids:
if stream_id["is_active"]:
stream_id["is_active"] = False
break
for stream_id in self.stream_ids:
if stream_id["account_name"] == account_name:
with open(self.key_file, "w+") as key_file:
key_file.write(stream_id["key"])
key_file.close()
stream_id["is_active"] = True
self.active_stream_id = stream_id
self.__db_upsert(force_insert=True)
return True
return False
def upsert_stream_key(self, account_name, key):
"""Method to insert(or update) new stream key and its account name.
Args:
account_name (str): Name of the website's account.
key (str): Hexadecimal live stream key of the websites's account.
"""
is_update = False
for stream_id in self.stream_ids:
if account_name == stream_id["account_name"]:
stream_id["key"] = key
is_update = True
break
if not is_update:
self.stream_ids.append({"account_name": account_name, "key": key, "key_file": f'{self.folder}/{account_name}.key', "is_active": False})
self.__db_upsert(force_insert=True)
self.__set_key_files(account_name)
return True
def remove_stream_key(self, account_name):
"""Method to remove existing stream key and its account name.
Args:
account_name (str): Name of the website's account.
"""
for stream_id in self.stream_ids:
if account_name == stream_id["account_name"]:
self.stream_ids.remove(stream_id) # for removing object from list
self.__db_upsert(force_insert=True)
os.remove(stream_id["key_file"])
return True
return False
def update_self(self, url, server):
"""Method to update website itself.
"""
if url:
self.url = url
if server:
self.server = server
self.__db_upsert(force_insert=True)
def delete_self(self):
"""Method to delete website itself.
"""
self.table.remove((Query().id == self.id))
def __db_upsert(self, force_insert=False):
"""Function to insert(or update) the record to the database.
Args:
force_insert (bool): Force insert flag.
Returns:
str: Response.
"""
if self.table.search((Query().id == self.id)):
if force_insert:
self.table.update({'id': self.id, 'name': self.name, 'url': self.url, 'server': self.server, 'to_be_used': self.to_be_used, 'stream_ids': self.stream_ids, 'active_stream_id': self.active_stream_id}, Query().id == self.id)
else:
return "Already Exist"
else:
self.table.insert({
'id': self.id,
'name': self.name,
'url': self.url,
'server': self.server,
'to_be_used': self.to_be_used,
'stream_ids': self.stream_ids,
'active_stream_id': self.active_stream_id
}) # insert the given data
return ""
def __set_key_files(self, account_name=None):
"""Method to crete `.key` files that keeps stream keys of the websites live stream entry.
Args:
account_name (str): Name of the website's account.
"""
result = False
stream_ids = []
if account_name:
for stream_id in self.stream_ids:
if account_name == stream_id["account_name"]:
stream_ids.append(stream_id)
result = True
break
else:
stream_ids = self.stream_ids
for stream_id in stream_ids:
result = True
with open(stream_id["key_file"], "w+") as key_file:
key_file.write(stream_id["key"])
key_file.close()
return result
def __check_folders(self):
"""Method to checking the necessary folders created before. If not created creates them.
"""
if not os.path.exists(self.parent_folder):
os.mkdir(self.parent_folder)
if not os.path.exists(self.keys_folder):
os.mkdir(self.keys_folder)
if not os.path.exists(self.folder):
os.mkdir(self.folder)
| true |
99e00c658b32a59c1226665ec0b7855a920b6208 | Python | meharbhatia/MIDAS_KG_Construction | /working_code/rest_code/RuleBased_TripletsExtraction.py | UTF-8 | 7,217 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import unicodedata
import nltk
import re
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag
from nltk import sent_tokenize
import csv
ex = 'BYD debuted its E-SEED GT concept car and Song Pro SUV alongside its all-new e-series models at the Shanghai International Automobile Industry Exhibition.'
# ex = "John was very fast"
# ex = "Running is the favourite hobby of John."
# ex = "John ran away with his sister."
# ex = "John eats Apple, Orange and Coconut."
# ex = "The company also showcased its latest Dynasty series of vehicles, which were recently unveiled at the company’s spring product launch in Beijing."
# ex = "A total of 23 new car models were exhibited at the event, held at Shanghai’s National Convention and Exhibition Center, fully demonstrating the BYD New Architecture (BNA) design, the 3rd generation of Dual Mode technology, plus the e-platform framework."
# ex = "John is doing excercise."
# ex = "John is a good boy."
# ex = "Father of John taught him dancing."
def getTriplets(ex):
# print(ex)
def preprocess(sent):
sent = nltk.word_tokenize(sent)
sent = nltk.pos_tag(sent)
return sent
sent = preprocess(ex)
# print("NORMAL POS TAGGING")
# print(sent)
# print("NER DEFAULT")
# print(nltk.ne_chunk(sent))
triplets = []
nn = ()
ml = []
nstr = ""
k=0
# All the consecutive NN pos tags are merged in this loop
while k<len(sent):
if ("NN" in sent[k][1]):
nstr = nstr + sent[k][0] + " "
else:
if (len(nstr)>0):
nstr = nstr.strip()
nn = (nstr,) + ("NN",)
ml.append(nn)
nstr = ""
ml.append(sent[k])
else:
ml.append(sent[k])
k+=1
# print("NER MODIFIED")
# print(nltk.ne_chunk(ml))
ignore_verbs = ["is","was","were","will","shall","must","should","would","can","could","may","might"] #verbs which are often modal verbs
entities = []
k=0
# Here, all nouns NN are catched by their verb VB to form a triplet
while k<len(ml):
if ("NN" in ml[k][1] or "VBG" in ml[k][1]): # VBG are verbs acting as nouns
entities.append(ml[k]) # unless you encounter a VB or CC or IN tag, keep a stack of all nouns
elif ("VB" in ml[k][1]): # verb found
ismodal = False
for x in ignore_verbs:
if (ml[k][0] == x):
ismodal = True
break
k2 = k # remember the verb
k+=1
while k < len(ml): #find the noun coming after the verb
if ("NN" in ml[k][1] or "VBG" in ml[k][1]):
break
if (ismodal and "VB" in ml[k][1]):
k2 = k
ismodal = False
k+=1
if (k < len(ml)): # if there exists a noun after the verb
if(len(entities) > 0): # if there exists a noun before the verb (in the stack)
n1 = entities[-1][0]
# entities = entities[:-1] # remove that noun from the stack
if (k2+1 < len(ml) and "IN" in ml[k2+1][1]):
r = ml[k2][0] + " " + ml[k2+1][0]
else:
r = ml[k2][0]
n2 = ml[k][0]
triplets.append((n1,)+(r,)+(n2,))
elif ("CC" in ml[k][1]): #conjuction like AND OR found
if (len(triplets)>0): # if there already exists a triplet before
while k < len(ml):
if ("NN" in ml[k][1] or "VBG" in ml[k][1]): #find the NN coming just after CC
break
k+=1
if (k<len(ml)): # if there exists such a NN
n1 = triplets[-1][0] # extract node 1 from last triplet
r = triplets[-1][1] # extract relation from last triplet
n2 = ml[k][0] # select this NN you just got
triplets.append((n1,)+(r,)+(n2,))
elif (len(entities)>0): #list of nouns (@maher not completed yet)
while (k<len(ml)):
if ("NN" in ml[k][1] or "VBG" in ml[k][1]):
break # final entry in the list found
k+=1
# if (k<len(ml)):
elif ("IN" in ml[k][1]): # a preposition found
if (len(triplets)>0):
k2 = k
while k < len(ml): # find a noun NN after the preposition
if ("NN" in ml[k][1] or "VBG" in ml[k][1]):
entities.append(ml[k]) # put the noun in entities stack
break
k+=1
if (k<len(ml)): #if at least one noun is found
if(ml[k2][0] == "of" or ml[k2][0] == "alongside"): #these two prepositions are more often associated with object rather than subject (of last triplet)
n1 = triplets[-1][2] # node 2 of last triplet
r = ml[k2][0]
n2 = n2 = ml[k][0]
else:
n1 = triplets[0][0] # node 1 of first triplet
r = triplets[0][1]+" "+ml[k2][0] # relation of first triplet + preposition of this
n2 = ml[k][0]
triplets.append((n1,)+(r,)+(n2,))
elif (len(entities) > 0):
k2 = k
while k < len(ml): # find a noun NN after the preposition
if ("NN" in ml[k][1] or "VBG" in ml[k][1]):
entities.append(ml[k]) # put the noun in entities stack
break
k+=1
if (k<len(ml)):
n1 = entities[-2][0]
r = ml[k2][0]
n2 = entities[-1][0]
triplets.append((n1,)+(r,)+(n2,))
k+=1
k=0
entities = []
# here we select the adjectives
while k<len(ml):
if ("JJ" in ml[k][1]):
n1 = ml[k][0]
r = "quality"
k2 = k
while k<len(ml): #find the NN coming just next to JJ
if ("NN" in ml[k][1]):
break
k+=1
if (k<len(ml) and k==k2+1): # if NN found
n2 = ml[k][0]
triplets.append((n1,)+(r,)+(n2,))
elif (len(entities)>0): # if no NN found after JJ and stack is not empty
n2 = entities[-1][0] # assume that the adjective is associated with last NN in stack
entities = []
triplets.append((n1,)+(r,)+(n2,))
elif "NN" in ml[k][1]:
entities.append(ml[k]) # stack of nouns NN
k+=1
k=0
entities = []
# here we select the cardinal numbers
while k<len(ml):
if ("CD" in ml[k][1]):
n1 = ml[k][0]
r = "number"
while k<len(ml): #find the NN coming just next to CD
if ("NN" in ml[k][1]):
break
k+=1
if (k<len(ml)): # if NN found
n2 = ml[k][0]
triplets.append((n1,)+(r,)+(n2,))
elif (len(entities)>0): # if no NN found after CD and stack is not empty
n2 = entities[-1][0] # assume that the number is associated with last NN in stack
entities = []
triplets.append((n1,)+(r,)+(n2,))
elif "NN" in ml[k][1]:
entities.append(ml[k]) # stack of nouns NN
k+=1
return triplets
output = [['industry', 'index', 's1', 'r', 's2']]
#change path
with open('/home/ritwik/Downloads/icdm_contest_data.csv', 'r') as csvFile:
reader = csv.reader(csvFile)
next(reader) #so that first line is ignored
k=0
tlen = 300
for row in reader:
article = row[1] #picking article
triplets = []
for x in sent_tokenize(article):
ml = getTriplets(x) #getting triplets for each sentence
triplets+=ml
#at this point triplets variable contains all the triples from the article
tl = []
for x in triplets:
ttl = []
ttl.append(row[2])
ttl.append(row[0])
ttl.append(x[0])
ttl.append(x[1])
ttl.append(x[2])
tl.append(ttl)
output+=tl
k+=1
print(str(k)+" / "+str(tlen))
# if k>2:
# break
file = open('tempcsv.csv','w')
for x in output:
for y in x:
file.write(y.replace(',','').replace('‘','\'').replace('’','\'').replace('“','\'').replace('”','\'')+', ')
file.write("\n")
file.close()
csvFile.close()
| true |
9c9c45e27dfc64247944656d928bfe3b107dd83f | Python | 0xhughes/credential_db | /MD5colSHA1s_to_SHA1col.py | UTF-8 | 1,299 | 2.78125 | 3 | [
"MIT"
] | permissive | import sqlite3
import sys
import os
def menu():
sane = 1
while sane == 1:
print "[ - ] Please enter absolute path to cred. DB: "
in_path = raw_input()
if os.path.exists(in_path):
pass
sane = 0
else:
os.system('cls' if os.name == 'nt' else 'clear')
print "[ - ] Invalid path, try again."
return(in_path)
def main(in_path):
in_path = in_path
try:
db_conn = sqlite3.connect(in_path)
except sqlite3.OperationalError:
print "[ - ] SQLite connection error to database, check path, exiting."
sys.exit(1)
curs = db_conn.cursor()
rows = curs.execute("SELECT * FROM main WHERE length(srcMD5) = 40;")
cnt = 0
print "[ - ] Running..."
for row in rows:
curs = db_conn.cursor()
pri_Index = str(row[0])
bad_md5 = str(row[3])
db_conn.execute('UPDATE main SET srcSHA1 = srcMD5 WHERE pri_Index = "'+pri_Index+'";')
db_conn.execute('UPDATE main SET srcMD5 = NULL WHERE pri_Index = "'+pri_Index+'";')
cnt+=1
print "[ + ] "+str(cnt)+" rows updated, press enter to commit, or CTRL+C to cancel."
end = raw_input()
print "[ - ] Committing changes..."
db_conn.commit()
print "[ - ] Done."
db_conn.close()
try:
main(menu())
except KeyboardInterrupt:
print "[ - ] Caught keyboard interrupt, closing"
sys.exit(0)
| true |
defe89cbd987fca04d2ae9677c9d814731768733 | Python | hrgunn/MLH | /MVC Intro(V:C)/rpg_MVC_example/s_controller.py | UTF-8 | 1,105 | 2.609375 | 3 | [] | no_license | from practice_view import View
class Phone_Book:
def __init__(self):
self.view = View()
self.model = Model()
def return_or_new(self):
answer = self.view.return_or_new()
if answer == "Yes":
info = self.view.welcome_back()
check = self.model.return_check(info[0],info[1])
counter = 0
while counter < 3 and check == False:
info = self.view.welcome_back()
check = self.model.return_check(info[0],info[1])
counter += 1
if check == True:
self.search_db()
else:
self.gather_info()
else:
self.gather_info()
def gather_info(self):
info = self.view.gatherer()
check = self.model.check_username(info[0])
while check != None:
info = self.view.gatherer2()
check = self.model.check_username(info[0])
else:
self.contact_info(info[0],info[1])
def contact_info(self,username,password):
contact_info = self.view.contact_info()
self.model.save_info(contact_info[0],contact_info[1],contact_info[2],username,password)
self.view.info_saved()
self.search_db()
def search_db(self):
self.view.logged_on()
new = Phone_Book()
new.return_or_new() | true |
2279faf8e9048f22b53d9af441070708a003899a | Python | caozhang1996/CS231N_Two_Layer_Neural_Network | /classifier/neural_net.py | UTF-8 | 6,800 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 4 20:03:11 2019
@author: caozhang
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
class TwoLayerNet(object):
"""
"""
def __init__(self, input_size, hidden_size, output_size, weight_scale=0.0001):
"""
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['w1'] = weight_scale * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['w2'] = weight_scale * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, Y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- Y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
w1, b1 = self.params['w1'], self.params['b1']
w2, b2 = self.params['w2'], self.params['b2']
N, D = X.shape
scores = None
hidden_out = np.maximum(0, X.dot(w1) + b1) # relu activation function
scores = hidden_out.dot(w2) + b2 # 输出层不需要激活函数
if Y is None:
return scores
# compute loss
loss = None
scores -= np.max(scores, axis=1, keepdims=True)
exp_scores = np.exp(scores)
sum_exp_scores = np.sum(exp_scores, axis=1, keepdims=True)
probs = exp_scores / sum_exp_scores
data_loss = np.sum(-np.log(probs[range(N), Y])) / N
reg_loss = 0.5 * reg * (np.sum(w1 * w1) + np.sum(w2 * w2))
loss = data_loss + reg_loss
# backward pass: compute gradients
grads = {}
dscores = probs.copy() # 计算在得分上的梯度
dscores[np.arange(N), Y] -= 1
dscores /= N
dw2 = np.dot(hidden_out.T, dscores)
db2 = np.sum(dscores, axis=0)
dhidden = np.dot(dscores, w2.T) # 对隐藏层变量求梯度
dhidden[hidden_out <= 0] = 0
dw1 = np.dot(X.T, dhidden)
db1 = np.sum(hidden_out, axis=0)
dw2 += reg * w2
dw1 += reg * w1
grads['w1'] = dw1
grads['b1'] = db1
grads['w2'] = dw2
grads['b2'] = db2
return loss, grads
def train(self, X, Y, X_val, Y_val,
lr_rate=1e-3, reg_strength=1e-5, lr_rate_decay=0.95,
num_iters=1000, batch_size=200, verbose=False):
"""
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has launtitled2bel 0 <= c < C for C classes.
- lr_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
Y_batch = None
index = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[index, :]
Y_batch = Y[index]
loss, grads = self.loss(X_batch, Y_batch, reg=reg_strength)
loss_history.append(loss)
self.params['w2'] -= lr_rate * grads['w2']
self.params['b2'] -= lr_rate * grads['b2']
self.params['w1'] -= lr_rate * grads['w1']
self.params['b1'] -= lr_rate * grads['b1']
if verbose == True and it % 50 == 0:
print ('iteration: %d/%d, loss: %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# check accuracy
train_acc = np.mean(self.predict(X_batch) == Y_batch)
val_acc = np.mean(self.predict(X_val) == Y_val)
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
lr_rate *= lr_rate_decay
return {'loss history': loss_history,
'train acc history': train_acc_history,
'val acc history': val_acc_history
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
hidden_out = np.maximum(0, X.dot(self.params['w1']) + self.params['b1'])
scores = hidden_out.dot(self.params['w2']) + self.params['b2']
y_pred = np.argmax(scores, axis=1)
return y_pred | true |
84674e7be3204cdfa46412f649d6408e4d66d1c5 | Python | dawidsielski/Python-learning | /sites with exercises/python101.readthedocs.io/2.Matplotlib/2.4 Quadratic functions.py | UTF-8 | 402 | 3.65625 | 4 | [] | no_license | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import pylab
import numpy as np
a = int(input("Enter a coefficient: "))
b = int(input("Enter b coefficient: "))
c = int(input("Enter c coefficient: "))
x_values = np.arange(-10,10,0.1)
y_values = []
for element in x_values:
y = a * element**2 + b * element + c
y_values.append(y)
pylab.plot(x_values,y_values)
pylab.grid(True)
pylab.show() | true |
70211a3adde67aab18accdf5b07c3b3cb3707ea1 | Python | mrlooi/labelme_scripts | /compare_dir_basenames.py | UTF-8 | 584 | 3 | 3 | [] | no_license | import argparse
import glob
import os.path as osp
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d1", "--dir1", required = True,
help = "Directory 1")
ap.add_argument("-d2", "--dir2", required = True,
help = "Directory 2")
args = vars(ap.parse_args())
d1_files = [f[:f.rfind('.')].split("/")[-1] for f in glob.glob(osp.join(args["dir1"],"*"))]
d2_files = [f[:f.rfind('.')].split("/")[-1] for f in glob.glob(osp.join(args["dir2"],"*"))]
files_not_in_d2 = [f for f in d1_files if f not in d2_files]
print(files_not_in_d2)
| true |
a9b0a04638d55ccc04738c7c62a14c791d645d4a | Python | RocheleauLab/Cell-Segmentation-Using-Deep-Learning | /utils/.ipynb_checkpoints/feature_extraction-checkpoint.py | UTF-8 | 1,842 | 2.921875 | 3 | [] | no_license | import numpy as np
def extract_anisotropy_features (Parameters, image, mask=None):
"""Given an image and a mask, return a dictionary with the relevant anisotropy features"""
data_inputs = {}
Ka, Kb, Kc = Parameters.kA, Parameters.kB, Parameters.kC
h, w, channels = image.shape
if channels == 2:
channel_types = ["Para", "Perp"]
elif channels == 3:
channel_types = ["Open", "Para", "Perp"]
for index, channel in enumerate(channel_types):
data_inputs[channel] = np.sum(image[:,:, index])/np.count_nonzero(image[:,:, index])
#Additional parameters
para_value = data_inputs['Para']
perp_value = data_inputs['Perp']
data_inputs['AniAvg'] = (para_value - perp_value)/(para_value + 2*perp_value)
#With corrections
data_inputs['Ix'] = Ix = ((Ka+Kb)*perp_value - (Ka+Kc)*para_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)
data_inputs['Iy'] = Iy = (Kb*para_value - Kc*perp_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)
data_inputs['AniAvg'] = (Ix - Iy)/(Ix + 2*Iy)
return (data_inputs)
def extract_intensity_features(Parameters, image, mask=None):
"""Given an image and a mask, return a dictionary with the relevant intensity features"""
data_inputs = {}
channel_types = ["Intensity"]
for index, channel in enumerate(channel_types):
data_inputs[channel] = np.sum(image[:,:, index])/np.count_nonzero(image[:,:, index])
return (data_inputs)
def extract_area(Parameters, image, mask = None):
data_inputs = {}
#if image.ndim == 3: h, w, _ = image.shape
#else: h,w = image.shape
non_zero = np.count_nonzero(image)
data_inputs['Segmented_Area'] = non_zero
data_inputs['Segmented_Percentage'] = non_zero/image.size
return data_inputs
| true |
30970808a8b23851b262e4378fc0763aa8ff3f4f | Python | phyothiriswe3/4CE | /streaming.py | UTF-8 | 6,495 | 2.515625 | 3 | [] | no_license | import io
import picamera
import socket, traceback
import serial
from time import sleep
import RPi.GPIO as GPIO
import time
import logging
import socketserver
import _thread
from threading import Condition
from http import server
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
p=GPIO.PWM(7,50)
p1=GPIO.PWM(11,50)
p.start(8)
p1.start(7.5)
PAGE="""\
<html>
<head>
<title>Raspberry Pi - Surveillance Camera</title>
</head>
<body>
<center><img src="stream.mjpg" width="640" height="480"></center>
</body>
</html>
"""
def streaming():
class StreamingOutput(object):
def __init__(self):
self.frame = None
self.buffer = io.BytesIO()
self.condition = Condition()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, copy the existing buffer's content and notify all
# clients it's available
self.buffer.truncate()
with self.condition:
self.frame = self.buffer.getvalue()
self.condition.notify_all()
self.buffer.seek(0)
return self.buffer.write(buf)
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
elif self.path == '/index.html':
content = PAGE.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/stream.mjpg':
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
try:
while True:
with output.condition:
output.condition.wait()
frame = output.frame
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(frame))
self.end_headers()
self.wfile.write(frame)
self.wfile.write(b'\r\n')
except Exception as e:
logging.warning(
'Removed streaming client %s: %s',
self.client_address, str(e))
else:
self.send_error(404)
self.end_headers()
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
with picamera.PiCamera(resolution='640x480', framerate=24) as camera:
output = StreamingOutput()
#Uncomment the next line to change your Pi's Camera rotation (in degrees)
#camera.rotation = 90
camera.start_recording(output, format='mjpeg')
try:
address = ('', 8000)
server = StreamingServer(address, StreamingHandler)
server.serve_forever()
finally:
camera.stop_recording()
def servo():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind(('192.168.43.65', 5555))
print ("Listening for broadcasts...")
time.sleep(0.2)
message, address = s.recvfrom(8192)
a =message.decode("utf-8")
no1,no2,x1,y1,z1,no3,x2,y2,z2= a.split(",")
print(a)
print(no1)
print(no2)
print(x1)
print(y1)
print(z1)
print(no3)
print(x2)
print(y2)
print(z2)#whole message signal
a1=float(z1)
b1=float(z2)
s.close();
if(a1>6): #uppper limit
a=12.3
elif(a1>5):
a=11.9
elif(a1>4):
a=11.2
elif(a1>3):
a=10.8
elif(a1>2):
a=10.5
elif(a1>1):
a=10.2
elif(a1>0):
a=9 #middle limit
elif(a1>-1):
a=8
elif(a1>-2):
a=7
elif(a1>-3):
a=7.5
else:
a=6.9
time.sleep(0.2)
if(b1>13):
#b=180#12.5
b=12.5
elif(b1>10):
#b=170#12.3
b=12.3
elif(b1>8):
#b=160#11.7
b=11.7
elif(b1>7):
#b=150#11.1
b=11.1
elif(b1>5):
#b=140#10.5
b=10.5
elif(b1>4):
#b=130#9.9
b=9.9
elif(b1>3):
#b=120#9.3
b=9.3
elif(b1>2):
#b=110#8.7
b=8.7
elif(b1>1):
#b=100#8.1
b=8.1
elif(b1>0):
#b=90 #7.5
b=7.5
elif(b1>-5):
#b=80#7.3
b=7.3
elif(b1>-7):
#b=60#6.7
b=6.7
elif(b1>-9):
#b=50#6.0
b=6.0
elif(b1>-14):
#b=40#5.3
b=5.3
elif(b1>-17):
#b=30#4.6
b=4.6
elif(b1>-20):
#b=20#3.9
b=3.9
elif(b1>-21):
#b=10#3.2
b=3.2
else:
#b=0#2.5
b=2.5
p1.ChangeDutyCycle(b)
time.sleep(0.4)
p.ChangeDutyCycle(a)
time.sleep(0.4)
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
while True:
try:
_thread.start_new_thread(streaming)
_thread.start_new_thread(servo)
except:
print "Error: unable to start thread" | true |
f5c9c2fefac01087d068061ff66d2990e21e8e47 | Python | informatik-mannheim/PyTorchMedical-Workshop | /basic_exercises/codewars/numpy/Array Initialisierungen/simple_testcase.py | UTF-8 | 924 | 3.140625 | 3 | [] | no_license | import numpy as np
#from template import *
#from head import *
def check(user_arr, sol_list, function_name):
sol_arr = np.array(sol_list)
test.expect(np.array_equal(user_arr, sol_arr),
"Error at {fct}\nuser result:\n{usr}\nsolution:\n{sol}\n"
.format(usr=user_arr, sol=sol_arr, fct=function_name))
test_shape = (2, 2)
user_arr = init_array_from_range_and_put_it_into_shape(4, test_shape)
sol_arr = [[0, 1], [2, 3]]
check(user_arr, sol_arr, "init_array_from_range_and_put_it_into_shape")
user_arr = init_array_with_given_value(5.5, test_shape)
sol_arr = [[5.5, 5.5], [5.5, 5.5]]
check(user_arr, sol_arr, "init_array_with_given_value")
user_arr = init_array_with_ones(test_shape)
sol_arr = [[1, 1], [1, 1]]
check(user_arr, sol_arr, "init_array_with_ones")
user_arr = init_array_with_zeros(test_shape)
sol_arr = [[0, 0], [0, 0]]
check(user_arr, sol_arr, "init_array_with_zeros")
| true |
f4ebbca69b3973bac4f70f2547bbf3df7936c8d3 | Python | KhushbuAgarwal/Snomed2Vec | /src/embedding_learning/metapath2vec_pattern_path_gen_snomed.py | UTF-8 | 11,606 | 2.890625 | 3 | [] | no_license | import sys
import os
import random
from collections import Counter
class MetaPathGenerator:
"""Snomed Adaptation for metapath2vec path generator
1) We capture disease-drug interations through all possible paths
(direct and indirect). The 1-hop indirect paths are found through common
procedures and anatomies
2) Generate random instances from sampling disease-drug-disease pathways
Note: Metapath2vec code only supports four node types 'v', 'a', 'i' , 'f'.
hence, we map snomed nodes as following:
v -> disorder
a -> anatomy
i -> chemicals and drugs
f -> procedure
WORK EXTENSION: How do we extend the pattern generation code when we have several node
types"""
# load snomed graph by node type
def __init__(self, snomed_dir):
self.disorder_drugslist = dict()
self.disorder_anatlist = dict()
self.disorder_proclist = dict()
self.drugs_disorderlist = dict()
self.drugs_anatlist = dict()
self.drugs_proclist = dict()
self.anat_disorderlist = dict()
self.anat_drugslist = dict()
self.anat_proclist = dict()
self.proc_disorderlist = dict()
self.proc_drugslist = dict()
self.proc_anatlist = dict()
print("Getting concept types from", snomed_dir)
self.disorders, self.drugs, self.anatomy, self.procedures = self.load_node_list(snomed_dir)
print("Number of disorders, drugs, antomy, procedures",
len(self.disorders), len(self.drugs), len(self.anatomy),
len(self.procedures))
self.init_graph(snomed_dir)
print("Done initing graph and node types")
print("Adj list size :", len(self.disorder_drugslist),
len(self.drugs_disorderlist))
def load_node_list(self, snomed_dir):
snomed_graph_dir = snomed_dir + "/Unique_SNOMEDCT_concepts_per_semantic_group/"
filename = snomed_graph_dir + "/mrconso_snomed_diso_unique.txt"
disoders = self.parse_node_list_file(filename)
filename = snomed_graph_dir + "/mrconso_snomed_chem_unique.txt"
drugs = self.parse_node_list_file(filename)
filename = snomed_graph_dir + "/mrconso_snomed_anat_unique.txt"
anatomy = self.parse_node_list_file(filename)
filename = snomed_graph_dir + "/mrconso_snomed_proc_unique.txt"
procedures = self.parse_node_list_file(filename)
return disoders, drugs, anatomy, procedures
def parse_node_list_file(self, filename):
concept_list = []
with open(filename, "r") as f:
header = f.readline()
for line in f:
arr = line.strip().split('|')
# Use snomed concept ids
concept = arr[1] #arr[4].replace(" ", "")
concept_list.append(concept)
return concept_list
def init_graph(self, snomed_dir):
filename = snomed_dir + "/SNOMEDCT_relations.txt"
print("Reading graph from...", filename)
with open(filename, "r") as f:
header = f.readline()
for line in f:
arr = line.strip().split('|')
src = arr[1] #arr[2].replace(" ", "")
dest = arr[3] #arr[4].replace(" ", "")
relation = arr[5].replace(" ", "")
if src in self.disorders:
self.add_disorder(src, dest)
elif src in self.drugs:
self.add_drugs(src, dest)
#elif src in self.anatomy:
# self.add_anatomy(src, dest)
#elif src in self.procedures:
# self.add_procedure(src, dest)
else:
continue
return
#add the edge iff of type disorder -> {drugs | procedure | anatomy}
def add_disorder(self, src, dest):
if dest in self.drugs:
if(src not in self.disorder_drugslist):
self.disorder_drugslist[src] = []
self.disorder_drugslist[src].append(dest)
#elif dest in self.anatomy:
# if(src not in self.disorder_anatlist):
# self.disorder_anatlist[src] = []
# self.disorder_anatlist[src].append(dest)
#elif dest in self.procedures:
# if(src not in self.disorder_proclist):
# self.disorder_proclist[src] = []
# self.disorder_proclist[src].append(dest)
else: #if dest in self.disorders:
return
#else:
# print("In add_disorder() Found a node of unknown type", dest)
return
#add the edge iff of type drugs->disorder/proc/anatomy
def add_drugs(self, src, dest):
if dest in self.disorders:
#print("Adding drugs->disorder", src, dest)
if(src not in self.drugs_disorderlist):
self.drugs_disorderlist[src] = []
self.drugs_disorderlist[src].append(dest)
#elif(dest in self.procedures):
# if(src not in self.drugs_proclist):
# self.drugs_proclist[src] = []
# self.drugs_proclist[src].append(dest)
#elif(dest in self.anatomy):
# if(src not in self.drugs_anatlist):
# self.drugs_anatlist[src] = []
# self.drugs_anatlist[src].append(dest)
else: #if(dest in self.drugs):
return
#add the edge iff of type procedure->disorder/prioc/anatomy
def add_procedure(self, src, dest):
if dest in self.disorders:
if(src not in self.proc_disorderlist):
self.proc_disorderlist[src] = []
self.proc_disorderlist[src].append(dest)
elif dest in self.drugs:
if(src not in self.proc_drugslist):
self.proc_drugslist[src] = []
self.proc_drugslist[src].append(dest)
elif dest in self.anatomy:
if(src not in self.proc_anatlist):
self.proc_anatlist[src] = []
self.proc_anatlist[src].append(dest)
else: # dest in self.procedures or another node type
return
#add the edge iff of type anatomy->disorder
def add_anatomy(self, src, dest):
if dest in self.disorders:
if(src not in self.anat_disorderlist):
self.anat_disorderlist[src] = []
self.anat_disorderlist[src].append(dest)
elif dest in self.drugs:
if(src not in self.anat_drugslist):
self.anat_drugslist[src] = []
self.anat_drugslist[src].append(dest)
elif dest in self.procedures:
if(src not in self.anat_proclist):
self.anat_proclist[src] = []
self.anat_proclist[src].append(dest)
else: # dest in self.anatomy:
return
#def update_disorder_and_drugs_adj(self):
# for procedure in procedure_disorderlist.keys():
# if procedure in procedure_drugslist.keys():
# generate initial path patterns around disorder, since thats our main input
# to patient model:
# P1 : Disorder, drug, disoder
# P2 : Disorder, procedure, disorder
# P3 : Disorder, anatomy, disorder
# Mapping snomed nodes as following:
# v -> disorder
# a -> anatomy
# i -> chemicals and drugs
# f -> procedure
# To generate path P1: Load all disorders and their nbrs, load all drugs
# and their disorder nbrs. sample from the neighbourhood like ACA
# path generation
def generate_paths(self, outfilename, numwalks, walklength):
self.generate_random_concept1_to_concept2_paths(outfilename,
self.disorder_drugslist,
self.drugs_disorderlist,
'v', 'i', numwalks,
walklength)
def generate_random_concept1_to_concept2_paths(self, outfilename,
c1_to_c2,
c2_to_c1,
c1_prefix_letter, c2_prefix_letter,
numwalks, walklength):
outfile = open(outfilename, "w")
#print("Walking disoredrs", self.disorders)
#for disorder in self.disorders:
all_valid_c1 = list(c1_to_c2.keys())
for c1 in all_valid_c1:
c1_0 = c1
for j in range(0, numwalks):
outline = c1_prefix_letter + c1_0
for i in range(0, walklength):
if c1 in c1_to_c2:
all_valid_c2 = c1_to_c2[c1]
c2 = random.choice(all_valid_c2)
if c2 in c2_to_c1:
valid_c1s = c2_to_c1[c2]
outline = outline + " " + c2_prefix_letter + c2
c1 = random.choice(valid_c1s)
outline = outline + " " + c1_prefix_letter + c1
else:
continue
else:
continue
if(len(outline.split(" ")) >= walklength):
outfile.write(outline+ "\n")
else:
print("Could not complete walk, ignoring ", outline)
outfile.flush()
outfile.close()
return
#Generate {author->list(conference)} and {conference->list(authors)} from
#author->paper and paper->conference edges
#def generate_random_aca(self, outfilename, numwalks, walklength):
# for conf in self.conf_paper:
# self.conf_authorlist[conf] = []
# for paper in self.conf_paper[conf]:
# if paper not in self.paper_author: continue
# for author in self.paper_author[paper]:
# self.conf_authorlist[conf].append(author)
# if author not in self.author_conflist:
# self.author_conflist[author] = []
# self.author_conflist[author].append(conf)
# #print "author-conf list done"
# outfile = open(outfilename, 'w')
# For every conference : generate numwalks of pattern "ACA.."
# for conf in self.conf_authorlist:
# conf0 = conf
# for j in xrange(0, numwalks ): #wnum walks
# outline = self.id_conf[conf0]
#start walk with aconference and generate
# conf -> author -> conf walks
# for i in xrange(0, walklength):
# authors = self.conf_authorlist[conf]
# numa = len(authors)
# authorid = random.randrange(numa)
# author = authors[authorid]
# outline += " " + self.id_author[author]
# confs = self.author_conflist[author]
# numc = len(confs)
# confid = random.randrange(numc)
# conf = confs[confid]
# outline += " " + self.id_conf[conf]
# outfile.write(outline + "\n")
# outfile.close()
#python py4genMetaPaths.py 1000 100 net_aminer output.aminer.w1000.l100.txt
#python py4genMetaPaths.py 1000 100 net_dbis output.dbis.w1000.l100.txt
dirpath = "snomed"
# OR
dirpath = "net_dbis"
#takes as input path to snomed graph dir (containing umls graph) and the
#numwalks, walklength. Gernerates pattern walk for "disorder->drug->disorder"
numwalks = int(sys.argv[1])
walklength = int(sys.argv[2])
dirpath = sys.argv[3]
outfilename = sys.argv[4]
def main():
mpg = MetaPathGenerator(dirpath)
mpg.generate_paths(outfilename, numwalks, walklength)
if __name__ == "__main__":
main()
| true |
44ec074ee90efefb5cae2f56b4dcf210dee1c573 | Python | dudugang/WiRE-LES2 | /rotate.py | UTF-8 | 421 | 2.6875 | 3 | [
"MIT"
] | permissive | import os
import numpy as np
import pandas as pd
import sys
theta = sys.argv[1]
theta = np.radians(np.double(theta))
wind_df = pd.read_csv("HornsRev.dat")
x = wind_df["x"]
y = wind_df["y"]
x_rotate = x[0] + (x-x[0])*np.cos(theta) - (y-y[0]) * np.sin(theta)
y_rotate = y[0] + (y-y[0])*np.cos(theta) + (x-x[0]) * np.sin(theta)
wind_df["x"] = x_rotate
wind_df["y"] = y_rotate
wind_df.to_csv("turb_loc.dat",index=False)
| true |
038714249b401d90509ed442f36a6ac53630584b | Python | Gruce15/TheImperialGod | /TheImperialGod/cogs/economy/shop.py | UTF-8 | 5,838 | 2.71875 | 3 | [
"CC0-1.0"
] | permissive | """
HUGE THANKS TO BotDotBot's code and BotDotCom for writing some of the logic.
Be sure to check that file here: https://github.com/BobDotCom/BobDotBot/blob/main/cogs/economy.py
Follow him: https://github.com/BotDotCom
Make sure you do that, cause its been taken inspiration from by using his system!
not 100% but like 60%
"""
import discord
from discord.ext import commands
from discord.ext.commands import cooldown, BucketType
import aiosqlite
import asyncio
class Shop(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Shop commands are ready")
async with aiosqlite.connect("./data/economy.db") as connection:
async with connection.cursor() as cursor:
await cursor.execute("CREATE TABLE IF NOT EXISTS shop (id INTEGER, name TEXT, price INTEGER, available BOOL)")
await connection.commit()
@commands.group(invoke_without_command = True)
@cooldown(1, 20, BucketType.user)
async def shop(self, ctx):
async with aiosqlite.connect("./data/economy.db") as connection:
async with connection.cursor() as cursor:
await cursor.execute("SELECT * FROM shop WHERE available = ?", (True,))
items = await cursor.fetchall()
em = discord.Embed(title = "Shop", color = ctx.author.color)
for item in items:
em.add_field(name = f"{item[1]}", value = f"Cost: {item[2]}\nID: {item[3]}")
await ctx.channel.send(embed = em)
await connection.commit()
@shop.error
async def shop_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
em = discord.Embed(title=f"<:fail:761292267360485378> Slow it down C'mon", color=ctx.author.color)
em.add_field(name=f"Reason:", value="You can always see the shop idiot!")
em.add_field(name="Try again in:", value="{:.2f} seconds".format(error.retry_after))
em.set_thumbnail(url=ctx.author.avatar_url)
await ctx.send(embed=em)
@shop.command()
@commands.is_owner()
async def add(self, ctx, name, price : int):
async with aiosqlite.connect("./data/economy.db") as connection:
async with connection.cursor() as cursor:
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
await ctx.send("Type the availibility for this item! (True or False)")
msg = await self.client.wait_for('message', timeout=15.0, check=check)
except asyncio.TimeoutError:
await ctx.send('You didn\'t answer in time, please be quicker next time!')
return
else:
if msg.content.lower() == "true":
await cursor.execute("INSERT INTO shop (name, price, available) VALUES (?, ?, ?)", (name, price, True,))
elif msg.content.lower() == "false":
await cursor.execute("INSERT INTO shop (name, price, available) VALUES (?, ?, ?)", (name, price, False,))
await cursor.execute('SELECT id FROM shop')
rows = await cursor.fetchall()
number = rows[-1][0]
await cursor.execute(f'ALTER TABLE users ADD COLUMN item{number} INTEGER;')
await connection.commit()
await ctx.send(f"{ctx.author.mention}, item was created!\nName: {name} | Price {price} | ID = {number}")
@shop.command()
@commands.is_owner()
async def remove(self, ctx, item_id):
item_id = int(item_id)
async with aiosqlite.connect("./data/economy.db") as connection:
async with connection.cursor() as cursor:
await cursor.execute("SELECT * FROM shop WHERE id = ? AND available = ?", (item_id, True,))
rows = await cursor.fetchone()
if rows:
await ctx.send(f"Successfully removed item `{item_id}` from the shop")
else:
await ctx.send("That item doesnt exist")
return
await cursor.execute("UPDATE shop SET available = ? WHERE id = ?", (False, item_id,))
await connection.commit()
@shop.command()
@commands.is_owner()
async def enable(self, ctx, item_id):
item_id = int(item_id)
async with aiosqlite.connect("economy.db") as connection:
async with connection.cursor() as cursor:
await cursor.execute("SELECT * FROM shop WHERE id = ? AND available = ?", (item_id, False,))
rows = await cursor.fetchone()
if rows:
await ctx.send(f"Successfully enabled item `{item_id}` in the shop")
else:
await ctx.send("That item doesnt exist or is enabled")
await cursor.execute("UPDATE shop SET available = ? WHERE id = ?", (True, item_id,))
await connection.commit()
@shop.command()
@commands.is_owner()
async def edit(self, ctx, item_id, price):
item_id = int(item_id)
price = int(price)
async with aiosqlite.connect("economy.db") as connection:
async with connection.cursor() as cursor:
await cursor.execute("UPDATE shop SET price = ? WHERE id = ?", (price, item_id,))
await connection.commit()
await ctx.send(f"Successfully changed price of item `{item_id}` to `{price}`")
def setup(client):
client.add_cog(Shop(client)) | true |
0a257bf083d7a2a802a6f630b25cbcc18a3b7681 | Python | vilau/fft | /app_bot.py | UTF-8 | 607 | 3.1875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import json
import requests
import time
print("Hello bot ...")
bot = input()
print("In what building do you want to stay?")
building = input()
print("What's the message?")
message = input()
print("And finally.. How much time between messages? (in sec)")
sec = input()
while True:
r = requests.post('https://asint-project-227919.appspot.com/API/bot/sendMessage', json={'name': bot, 'building': building, 'message': message})
if r.status_code == 200:
print(r.text)
else:
print(r.status_code, r.reason, r.text)
break
time.sleep(float(sec)) | true |
0de0b16faf68eb533fed63321042acbc7ba613ad | Python | codeaudit/Optimus | /tests/tests.py | UTF-8 | 3,039 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | import airbrake
import optimus as op
from pyspark.sql.types import StringType, IntegerType, StructType, StructField
from pyspark.sql.functions import col
import pyspark
import sys
logger = airbrake.getLogger()
def assert_spark_df(df):
assert (isinstance(df, pyspark.sql.dataframe.DataFrame))
def create_df(spark_session):
try:
# Building a simple dataframe:
schema = StructType([
StructField("city", StringType(), True),
StructField("country", StringType(), True),
StructField("population", IntegerType(), True)])
countries = ['Colombia ', 'US@A', 'Brazil', 'Spain']
cities = ['Bogotá', 'New York', ' São Paulo ', '~Madrid']
population = [37800000, 19795791, 12341418, 6489162]
# Dataframe:
df = spark_session.createDataFrame(list(zip(cities, countries, population)), schema=schema)
assert_spark_df(df)
return df
except RuntimeError:
logger.exception('Could not create dataframe.')
sys.exit(1)
def test_transformer(spark_session):
try:
transformer = op.DataFrameTransformer(create_df(spark_session))
assert isinstance(transformer.get_data_frame, pyspark.sql.dataframe.DataFrame)
except RuntimeError:
logger.exception('Could not create transformer.')
sys.exit(1)
def test_trim_col(spark_session):
try:
transformer = op.DataFrameTransformer(create_df(spark_session))
transformer.trim_col("*")
assert_spark_df(transformer.get_data_frame)
except RuntimeError:
logger.exception('Could not run trim_col().')
sys.exit(1)
def test_drop_col(spark_session):
try:
transformer = op.DataFrameTransformer(create_df(spark_session))
transformer.drop_col("country")
assert_spark_df(transformer.get_data_frame)
except RuntimeError:
logger.exception('Could not run drop_col().')
sys.exit(1)
def test_keep_col(spark_session):
try:
transformer = op.DataFrameTransformer(create_df(spark_session))
transformer.keep_col(['city', 'population'])
assert_spark_df(transformer.get_data_frame)
except RuntimeError:
logger.exception('Could not run keep_col().')
sys.exit(1)
def test_replace_col(spark_session):
try:
transformer = op.DataFrameTransformer(create_df(spark_session))
transformer.replace_col(search='Tokyo', change_to='Maracaibo', columns='city')
assert_spark_df(transformer.get_data_frame)
except RuntimeError:
logger.exception('Could not run replace_col().')
sys.exit(1)
def test_delete_row(spark_session):
try:
transformer = op.DataFrameTransformer(create_df(spark_session))
func = lambda pop: (pop > 6500000) & (pop <= 30000000)
transformer.delete_row(func(col('population')))
assert_spark_df(transformer.get_data_frame)
except RuntimeError:
logger.exception('Could not run delete_row().')
sys.exit(1)
| true |
4700c9efecedbade17c15ca0d20bf82ce59bfffa | Python | mjenrungrot/musescore-dataset-generator | /zipData.py | UTF-8 | 1,213 | 2.734375 | 3 | [
"MIT"
] | permissive | # pylint: disable=invalid-name
"""
zipData is a Python script that combines generated data into a single zip file.
"""
import zipfile
import glob
import os
OUTPUT_FILENAME = 'generated_dataset.zip'
file_paths = set(map(lambda x: os.path.splitext(os.path.basename(x))[0],
glob.glob('annot_audio/**', recursive=True)))
FOLDER_TO_ZIP = ['annot_audio/**', 'annot_sheet/**', 'pdf/**', 'midi/**']
# Populate zip_paths
file_path_candidates = []
for folder in FOLDER_TO_ZIP:
file_path_candidates.extend(glob.glob(folder, recursive=True))
file_paths_zip = []
for file_path_candidate in file_path_candidates:
filename = os.path.splitext(os.path.basename(file_path_candidate))[0]
if '_beats' in filename:
filename = filename.replace('_beats', '')
if os.path.isfile(file_path_candidate) and filename in file_paths:
file_paths_zip.append(file_path_candidate)
# Compression mode
compression = zipfile.ZIP_DEFLATED
# Create zip file
zf = zipfile.ZipFile(OUTPUT_FILENAME, mode='w')
try:
for file_path in file_paths_zip:
zf.write(file_path, file_path, compress_type=compression)
except FileNotFoundError:
print("An error occurred")
finally:
zf.close()
| true |
3262ba909cb6723e7d9b2c718368c11561724aa9 | Python | Darker/RPH | /spam/word.py | UTF-8 | 1,013 | 3.640625 | 4 | [] | no_license | from bcolors import bcolors as ANSI
class Word(object):
'''Describes a word found in emails, and with what probabilities'''
def __init__(self, word, is_spam=None):
self.occurences = 0
self.spam = 0
self.ham = 0
self.word = word
if is_spam is not None:
self.increment(is_spam)
def increment(self, is_spam):
self.occurences += 1
if is_spam:
self.spam += 1
else:
self.ham += 1
# Method names inspured by bayes formula
# See mroe here:
# https://en.wikipedia.org/wiki/Naive_Bayes_spam_filtering#Computing_the_probability_that_a_message_containing_a_given_word_is_spam
@property
def PrWS(self):
return self.spam/self.occurences
@property
def PrWH(self):
return self.ham/self.occurences
def printme(self):
print(self.word + " ("+str(self.occurences)+" | "+ANSI.RED+str(self.spam)+ANSI.RESET+" | "+ANSI.GREEN+str(self.ham)+ANSI.RESET+")") | true |
72a90d77759e3c2c76c1fefc93ee5e63c5f648d7 | Python | yangjiao2/Python | /structure/class_struct/bag.py | UTF-8 | 3,211 | 4.03125 | 4 | [] | no_license | # Yang Jiao, Lab 6
# Anita Marie Gilbert, Lab 6
# We certify that we worked cooperatively on this programming
# assignment, according to the rules for pair programming
class Bag:
def __init__(self, parameter):
'''has one parameter, an iterable of values that initalize the bag.
Writing Bag(['d','a','b','d','c','b','d']) construct a bag with one 'a',
two 'b', one 'c', and three 'd'
'''
self.para = parameter
self.dict = dict()
for item in parameter:
self.dict[item] = self.dict.setdefault(item, 0) + 1
def __repr__(self):
'''returns a string, which when passed to eval returns a newly constructed
rational with the same value (==) to the object __repr__ was called on.
For example, for the Bag in the discussion of __init__ the __repr__ method
would print its result as Bag(['a', 'c', 'b', 'b', 'd', 'd', 'd']).
Bags like sets are not sorted, so these 7 values can appear in any order.
'''
return 'Bag(' + str(self.para) + ')'
def __str__(self):
'''returns a string that more compactly shows a bag.
For example, for the Bag in the discussion of __init__ the __str__
method would print its result as Bag(a[1], c[1], b[2], d[3]).
Bags like sets are not sorted, so these 7 values can appear in any order.
'''
result = 'Bag('
for k in self.dict:
result += str(k) + '[' + str(self.dict[k]) + '], '
return result[:-2] + ')'
def __len__(self):
''' returns the total number of values in the Bag.
For example, for the Bag in the discussion of __init__ the __len__ method would return 7.
'''
return sum(list(self.dict.values()))
def __contains__(self, arg):
''' returns whether or not its argument is in the Bag.
'''
return (arg in list(self.dict.keys()))
def count(self, arg):
''' returns the number of times its argument is in the Bag:
0 if the argument is not in the Bag.
'''
return self.dict[arg]
def add(self, item):
''' adds its argument to the Bag: if that value is already in the Bag,
its count is incremented by 1; if it is not, it is added to the Bag with a count of 1.
'''
self.dict[item] = self.dict.setdefault(item, 0) + 1
def remove(self, item):
''' removes its argument from the Bag: if that value is already in the Bag,
its count is decremented by 1 (and if the count reduces to 0 it is removed from the dict;
if it is not in the Bag, throw a ValueError exception, with an appropriate message that
includes the value that could not be removed.
'''
self.dict[item] = self.dict.setdefault(item, 0) - 1
def __iter__(self):
''' that returns an object on which next can be called to produce every value in the Bag:
all len of them.
'''
# self.iterdict = iter(self.dict)
def __gen(d):
for k in d:
c = d[k]
yield k*c
return __gen(self.dict)
| true |
445e01091b0d3bec7fb5104beee0b93586e3673a | Python | RedCrow9564/SketchingMethodsInDataAnalysis-Final-Project | /ComparedAlgorithms/base_least_square_solver.py | UTF-8 | 1,001 | 2.90625 | 3 | [
"MIT"
] | permissive | from Infrastructure.utils import ColumnVector, Matrix
class BaseSolver(object):
def __init__(self, data_features: Matrix, output_samples: ColumnVector, n_alphas: int,
cross_validation_folds: int):
self._data_features = data_features
self._output_samples = output_samples
self._cross_validation_folds = cross_validation_folds
self._n_alphas = n_alphas
self._model = None
self._fitted_coefficients: ColumnVector = None
def fit(self) -> ColumnVector:
raise NotImplementedError("Any subclass MUST implement this method!")
def calc_residuals(self) -> ColumnVector:
"""
A method for calculating the estimation errors of the fitted model.
It can NOT be invoked before the 'fit' method.
Returns:
A column vector of the estimated coefficients and the estimator's residuals.
"""
return self._output_samples - self._data_features.dot(self._fitted_coefficients)
| true |
11274018d05ef30890289f3e996bb2106a1a45c2 | Python | apotato369550/python-malware-scanner | /antivirus.py | UTF-8 | 4,484 | 2.671875 | 3 | [
"MIT"
] | permissive | import Tkinter as tk
from Tkinter import *
import ttk
import tkFileDialog
import webbrowser
import hashlib
import os
import config
import updater
## Functions
def openReadme():
webbrowser.open(config.readMe)
def openDictionary():
if not os.path.exists(config.dictionaryList):
print("file does not exist")
mainDictionary = open("dictionary.txt", "w+")
webbrowser.open(config.dictionaryList)
def browseDictionary():
file = tkFileDialog.askopenfilename()
config.dictionaryFile = file
dictionaryEntry.delete(0, END)
dictionaryEntry.insert(0, config.dictionaryFile)
## Antivirus functions
def scanFileHash():
if not os.path.exists(config.dictionaryList):
mainDictionary = open("dictionary.txt", "w+")
file = open(config.dictionaryList, "r")
infected = False
with file as f:
for line in f:
hash = line.strip()
if hash == config.hash:
infected = True
break
f.close()
if infected:
result.config(text="Infected!")
else:
result.config(text="Safe!")
def getFileHash():
try:
with open(config.directory) as fileToHash:
contents = fileToHash.read()
hash = hashlib.md5(contents).hexdigest()
config.hash = hash
hashOutput.config(text="MD5 Hash: " + config.hash)
except:
hashOutput.config(text="Invalid file selected")
def browseFile():
file = tkFileDialog.askopenfilename()
config.directory = file
directory.delete(0, END)
directory.insert(0, file)
##### window, notebook, and frames
myWidth = 375
myHeight = 250
root = tk.Tk()
root.geometry("375x200")
root.title("Supr Simple AV")
root.iconbitmap("icon.ico")
notebook = ttk.Notebook(root)
notebook.pack()
antivirusFrame = Frame(notebook)
settingsFrame = Frame(notebook)
antivirusFrame.pack(fill="both", expand=1)
settingsFrame.pack(fill="both", expand=1)
##### creating widgets #####
# buttons
fileSelector = Button(antivirusFrame, text="Browse file", command=browseFile)
fileHasher = Button(antivirusFrame, text="Get MD5 Hash", command=getFileHash)
hashScanner = Button(antivirusFrame, text="Scan Hash", command=scanFileHash)
dictionarySelector = Button(settingsFrame, text="Browse File", command=browseDictionary)
enterDictionary = Button(settingsFrame, text="Add dictionary", command=updater.enterDictionary)
openDictionary = Button(settingsFrame, text="Open Dictionary File", command=openDictionary)
openReadme = Button(settingsFrame, text="Open Readme File", command=openReadme)
enterHash = Button(settingsFrame, text="Enter Hash", command=lambda : updater.enterHash(hashEntry.get()))
# Labels
title = Label(antivirusFrame, text="Supr Simpl AV")
hashOutput = Label(antivirusFrame, text="Hash goes here...")
result = Label(antivirusFrame, text="Results go here...")
selectorInstructions = Label(settingsFrame, text="Select an .html/.txt file:")
hexInstructions = Label(settingsFrame, text="Enter a hash:")
settingsResult = Label(settingsFrame, text="")
# Entries
directory = Entry(antivirusFrame, borderwidth=3, width=40)
dictionaryEntry = Entry(settingsFrame, borderwidth=3, width=40)
hashEntry = Entry(settingsFrame, borderwidth=3, width=40)
##### grids ######
# Buttons
fileSelector.grid(row=1, column=1, padx=3, pady=5)
fileHasher.grid(row=3, column=1, padx=3, pady=5)
hashScanner.grid(row=5, column=1, padx=3, pady=5)
dictionarySelector.grid(row=1, column=1, padx=3, pady=3)
enterDictionary.grid(row=2, column=0, padx=3, pady=3)
openDictionary.grid(row = 2, column = 1, padx = 3, pady = 3)
openReadme.grid(row = 3, column = 1, padx = 3, pady = 3)
enterHash.grid(row=5, column=0, padx=3, pady=3)
# Labels
title.grid(row=0, column=0, padx=3, pady=5)
result.grid(row=5, column=0, padx=3, pady=5)
hexInstructions.grid(row=3, column=0, padx=3, pady=3)
selectorInstructions.grid(row=0, column=0, padx=3, pady=3)
settingsResult.grid(row=5, column=1, padx=3, pady=3)
# Entries
directory.grid(row=1, column=0, padx=3, pady=5)
hashOutput.grid(row=3, column=0, padx=3, pady=5)
dictionaryEntry.grid(row=1, column=0, padx=3, pady=3)
hashEntry.grid(row=4, column=0, padx=3, pady=3)
# adding frames to notebook
notebook.add(antivirusFrame, text="Antivirus")
notebook.add(settingsFrame, text="Settings")
root.mainloop()
| true |
2a824f6bc1e7e7055f94251edb460f0ec9bcc6c0 | Python | green-fox-academy/andrasnyarai | /week-02/d01/parametric_average.py | UTF-8 | 557 | 4.34375 | 4 | [] | no_license | # Write a program that asks for a number.
# It would ask this many times to enter an integer,
# if all the integers are entered, it should print the sum and average of these
# integers like:
#
# Sum: 22, Average: 4.4
x = input("Feed the machine a number: ")
y = input("Feed the machine a number: ")
w = input("Feed the machine a number: ")
z = input("Feed the machine a number: ")
q = input("Feed the machine a number: ")
p = input("Feed the machine a number: ")
sum = x+y+w+z+q+p
sum = int(sum)
ave = sum/6
print("Sum: "+ str(sum) + " Average: "+ str(ave)) | true |
46522ac5fc962df4d2fae82a58f6992e9c53b12a | Python | quantumjim/NoiseEstimationFromSyndromes | /customEM.py | UTF-8 | 29,414 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Implements the EM algorithm and Variants for error rates estimation from syndromes in concatenated quantum codes.
Also provides methods for simulations of this algorithm on our cluster, using some parallelization
"""
import CustomBP as bp
import QueueParallelization
import numpy as np
import math
import json
import pickle
import copy
import datetime
import sys
import os
import multiprocessing as mp
"""
Standard Expectation Maximization Algorithm for Error Rates estimation (for EM compare [Koller, Probabilistic Graphical Models:Principles and Techniques])
"""
def NormalizeRows(a):
"""
Helper
"""
if a is not None:
row_sums = a.sum(axis = 1)
return a / row_sums[:,np.newaxis]
else:
return None
def EM(Code,SyndromeData,Iterations,InitQubitRates = None,QubitPriorStatistics = None,InitMeasurementRates = None, MeasurementPriorStatistics = None):
"""
Perform EM on given ConcatQCode with given Syndrome Data
Both returns the the Estimated Rates of all iterations, and updates the rates in the ConcatQCode object that is passed, such that after calling this method it has the final rates
Can specify a Dirichlet Prior to perform MAP Estimation, by passing to PriorStatistics a vector of "Pseudocounts" for each parameter.
PriorStatistics = np.zeros((Code.n_physical_Qubits,4)) corresponds to maximum likelihood estimation for a code without measurement errors
e.g. PriorStatistics = [[2,1,1,1]] would correspond to a dirichlet prior for a single qubit that is biased towards the identity error.
"""
if QubitPriorStatistics is None:
QubitPriorStatistics = np.zeros((Code.n_physical_Qubits,4))
if Code.HasMeasurementErrors:
if MeasurementPriorStatistics is None:
MeasurementPriorStatistics = np.zeros((Code.n_total_checks,2))
QubitRates = [] #The Predicted Qubit Error Rates over the Iterations
MeasurementRates = [] # The Predicted Measurement Error Rates over the Iterations
Code.SetErrorRates(InitQubitRates,InitMeasurementRates) #If one of them is None then we just keep the corresponding rates that are currently set on the code.
qrate,measrate = Code.GetRates(flat=True)
QubitRates += [qrate]
MeasurementRates += [measrate]
SyndromeCounts = GetSyndromeCounts(SyndromeData)
for iteration in range(Iterations):
#Expectation Step
SyndromeMarginals = CalculateMarginals(SyndromeCounts, Code)
QubitSufficientStatistics = QubitPriorStatistics
MeasurementSufficientStatistics = MeasurementPriorStatistics
for s in SyndromeCounts.keys():
QubitSufficientStatistics = QubitSufficientStatistics + SyndromeMarginals[s][0]*SyndromeCounts[s]
if Code.HasMeasurementErrors:
MeasurementSufficientStatistics = MeasurementSufficientStatistics + SyndromeMarginals[s][1]*SyndromeCounts[s]
#Normalize
QubitSufficientStatistics = NormalizeRows(QubitSufficientStatistics)
if Code.HasMeasurementErrors:
MeasurementSufficientStatistics = NormalizeRows(MeasurementSufficientStatistics)
if np.any(np.isnan(QubitSufficientStatistics)):
"""
Chose convention that we keep old error rates if nan is encountered
This can happen for very low / 0 error rates
"""
QubitSufficientStatistics = Code.ErrorRates
print("Encountered Nan")
if Code.HasMeasurementErrors:
if np.any(np.isnan(MeasurementSufficientStatistics)):
"""
Chose convention that we keep old error rates if nan is encountered
This can happen for very low / 0 error rates
"""
MeasurementSufficientStatistics = Code.GetRates(flat=True)[1] #The old measurement error rates in appropriate shape
print("Encountered Nan")
#Maximization step
Code.SetErrorRates(QubitSufficientStatistics,MeasurementSufficientStatistics)
qrate,measrate = Code.GetRates(flat=True)
QubitRates += [qrate]
MeasurementRates += [measrate]
return QubitRates,MeasurementRates
def GetSyndromeCounts(SyndromeData):
"""
Counts the number of occurences of each syndrome in the data set
"""
SyndromeCounts = {}
for syndrome in SyndromeData:
s = tuple(syndrome)
if s in SyndromeCounts:
SyndromeCounts[s] += 1
else:
SyndromeCounts[s] = 1
return SyndromeCounts
def CalculateMarginals(SyndromeCounts,Code):
"""
Calculates the marginals of all qubits and measurement error nodes for each syndrome in the data set
Stores two sets of marginals for each syndrome: One correspondign to qubit errors, one corresponding to measurement errors
"""
SyndromeMarginals = {}
#Calculate all Marginals
for syndrome in SyndromeCounts.keys():
s = np.array(syndrome)
Code.RunBP(s)
Marginals = Code.LeafMarginals()
QubitMarginals = np.array(Marginals[0:Code.n_physical_Qubits])
MeasurementMarginals = []
if Code.HasMeasurementErrors:
MeasurementMarginals =np.array(Marginals[Code.n_physical_Qubits:])
SyndromeMarginals[syndrome] = (QubitMarginals,MeasurementMarginals)
return SyndromeMarginals
#%%
"""
Hard Assignment EM (compare [Koller, Probabilistic Graphical Models:Principles and Techniques])
"""
def HardAssignmentEM(Code,SyndromeData,Iterations,InitQubitRates = None,QubitPriorStatistics = None,InitMeasurementRates = None, MeasurementPriorStatistics = None):
"""
Note that this must have the same signature as EM for compatibility with test methods
Can specify a Dirichlet Prior to perform MAP Estimation, by passing to PriorStatistics a vector of "Pseudocounts" for each parameter.
PriorStatistics = np.zeros((Code.n_physical_Qubits,4)) corresponds to maximum likelihood estimation
e.g. PriorStatistics = [[2,1,1,1]] would correspond to a Dirichlet prior for a single qubit that is biased towards the identity error.
"""
n_qubits = Code.n_physical_Qubits
n_check = Code.n_total_checks
if QubitPriorStatistics is None:
QubitPriorStatistics = np.zeros((Code.n_physical_Qubits,4))
if Code.HasMeasurementErrors:
if MeasurementPriorStatistics is None:
MeasurementPriorStatistics = np.zeros((Code.n_total_checks,2))
Code.SetErrorRates(InitQubitRates,InitMeasurementRates) #If one of them is None then we just keep the corresponding rates that are currently set on the code.
QubitRates = [] #The Predicted Qubit Error Rates over the Iterations
MeasurementRates = [] # The Predicted Measurement Error Rates over the Iterations
qrate,measrate = Code.GetRates(flat=True)
QubitRates += [qrate]
MeasurementRates += [measrate]
SyndromeCounts = GetSyndromeCounts(SyndromeData)
n_data = SyndromeData.shape[0]
for iteration in range(Iterations):
SyndromeMAPS = CalculateMAPS(SyndromeCounts, Code)
QubitSufficientStatistics = QubitPriorStatistics
MeasurementSufficientStatistics = MeasurementPriorStatistics
for s in SyndromeCounts.keys():
e = SyndromeMAPS[s]
for i in range(e.size):
if i<n_qubits:
QubitSufficientStatistics[i,e[i]]+=SyndromeCounts[s]
else: #This shoudl only happen if the code has measurement errors
MeasurementSufficientStatistics[i-n_qubits,e[i]]+=SyndromeCounts[s]
#Normalize
QubitSufficientStatistics /= n_data
if Code.HasMeasurementErrors:
MeasurementSufficientStatistics /= n_data
if np.any(np.isnan(QubitSufficientStatistics)):
"""
Chose convention that we keep old error rates if nan is encountered
"""
QubitSufficientStatistics = Code.ErrorRates
print("Encountered Nan")
if Code.HasMeasurementErrors:
if np.any(np.isnan(MeasurementSufficientStatistics)):
"""
Chose convention that we keep old error rates if nan is encountered
"""
MeasurementSufficientStatistics = Code.GetRates(flat=True)[1] #The old measurement error rates in appropriate shape
print("Encountered Nan")
#Maximization step
Code.SetErrorRates(QubitSufficientStatistics,MeasurementSufficientStatistics)
qrate,measrate = Code.GetRates(flat=True)
QubitRates += [qrate]
MeasurementRates += [measrate]
return QubitRates,MeasurementRates
def CalculateMAPS(SyndromeCounts,Code):
"""
Calculate the most likely error for each syndrome
"""
MAPConfigurations = {}
#Calculate all Marginals
for syndrome in SyndromeCounts.keys():
s = np.array(syndrome)
Code.RunMaxSum(s)
MAPError = Code.MAPConfig()
MAPConfigurations[syndrome] = MAPError
return MAPConfigurations
#%% Testing of EM via Decoding
class TestDecoderEMPrior:
"""
Different possibilities of choosing a Dirichlet prior for MAP estimation via EM
Different Prior Types are possible:
"None": Perform Simple Maximum Likelihood Estimation
"AroundInit": will choose a Prior around the initial Guess Error Rates using a total number of Pseudocounts n_prior_counts (per qubit / measurement bit)
"Uniform": will choose a uniform Prior using a total number of Pseudocounts n_prior_counts (per qubit / measurement bit)
"""
def __init__(self,Type,n_prior_counts):
self.Type = Type
self.n_prior_counts=n_prior_counts
def ToDict(self):
Params = {}
Params["Type"] = self.Type
Params["n_prior_counts"] = self.n_prior_counts
return Params
def GetPrior(self,InitQubitErrorRates,n_qubits,InitMeasurementErrorRates = None,n_meas=None):
"""
GuessRates -> Error Rates for qubits, should be in correct shape (n_qubits,4)
MeasurementGuessRates -> Error Rates for Measurement Errors, should be passed in flattened shape
QubitPrior is returned in proper shape if the rates are passed in proper shape, but MeasurementPrior is flattened because reshaping into a ragged array is difficult
"""
if InitMeasurementErrorRates is not None:
n_meas = len(InitMeasurementErrorRates)
QubitPriorValues = None
MeasurementPriorValues = None
if self.Type == "None":
QubitPriorValues = None
MeasurementPriorValues = None
elif self.Type == "AroundInit":
QubitPriorValues = InitQubitErrorRates*self.n_prior_counts
if MeasurementPriorValues is not None:
MeasurementPriorValues = InitMeasurementErrorRates * self.n_prior_counts #np.concatenate flattens the list into a numpy array
elif self.Type == "Uniform":
QubitPriorValues = np.ones((n_qubits,4))*self.n_prior_counts / 4 #Divide by 4 so total prior count is n_prior_counts, no tobservations of each event
if InitMeasurementErrorRates is not None:
MeasurementPriorValues = np.ones((n_meas,2))*self.n_prior_counts / 2
else:
raise ValueError("Not a valid prior type")
return QubitPriorValues,MeasurementPriorValues
class TestDecoderParameters:
"""
A struct that holds all the parameters used in Decoder + EM simulations
"""
def __init__(self,n_tries = 100, n_Iterations = 1,DecodeAtIterations = None,n_concatenations = 1,p_mean = 0.1/3, Pseudocounts = 20,n_Estimation_data = 10**3,n_Test_Data = 10**4,Printperiod = math.inf, Faithful = False,FixRealRates = True,UseHardAssignments = False, PriorType = "AroundInit", n_prior_counts = 20,p_mean_measurement=0.1):
"""
Using p_mean_measurement = None corresponds to no measurement error nodes. p_mean_measurement = 0 still creates them, which slows down computation.
"""
self.n_tries = n_tries
self.n_Iterations = n_Iterations
if DecodeAtIterations is None:
self.DecodeAtIterations = [0,n_Iterations]
else:
self.DecodeAtIterations = DecodeAtIterations #An array specifying after which iterations of EM to estimate the logical error rates. [0,n_Iterations] to only estimate logical error rate before any EM steps and final logical error rates
self.n_concatenations = n_concatenations
self.p_mean = p_mean
self.p_mean_measurement=p_mean_measurement
self.Pseudocounts = Pseudocounts #Pseudocounts for the Dirichlet drawing the random error rates of the real model. This is NOT a prior used for map estimation.
self.n_Test_Data = n_Test_Data
self.n_Estimation_data = n_Estimation_data
self.Faithful = Faithful #If faithful is true uses the actual rates for decoding, used to estimate the error rate of the decoder with perfect knowledge of the physical error rates
self.FixRealRates = FixRealRates #Whether to fix the actual error rates and randomly chose the initialization [True] or the other way around [False]
self.UseHardAssignments = UseHardAssignments # Whether to use normal of hard assignment EM
self.Printperiod = Printperiod
#Info about prior for EM
self.EMPrior = TestDecoderEMPrior(Type = PriorType,n_prior_counts=n_prior_counts)#Prior for doing Map Estimation
def ToDict(self):
Params = {}
Params["n_tries"] = self.n_tries
Params["n_Iterations"] = self.n_Iterations
Params["DecodeAtIterations"] = self.DecodeAtIterations
Params["n_concatenations"] = self.n_concatenations
Params["p_mean"] = self.p_mean
Params["p_mean_measurement"] = self.p_mean_measurement
Params["Pseudocounts"] = self.Pseudocounts
Params["n_Test_Data"] = self.n_Test_Data
Params["n_Estimation_Data"] = self.n_Estimation_data
Params["Printperiod"] = self.Printperiod
Params["Faithful"] = self.Faithful
Params["FixRealRates"] = self.FixRealRates
Params["UseHardAssignments"] = self.UseHardAssignments
Params["EMPriorInformation"] = self.EMPrior.ToDict()
return Params
def ToJson(self):
Params = self.ToDict()
return json.dumps(Params)
def Copy(self):
return copy.copy(self)
def Decode(Model,Syndromes,Printperiod = math.inf):
"""
Implementation of the Maximum Likelihood Decoder
"""
LogErrs = np.zeros(Syndromes.shape[0],dtype = int)
for i,s in enumerate(Syndromes):
Model.BPUpwardsPass(s)
LogErr = np.argmax(Model.TopMarginal())
LogErrs[i] = LogErr
if (i+1) % Printperiod == 0:
print(str(i+1) + " Errors Decoded")
sys.stdout.flush()
return LogErrs
def TestDecoderParallel(n_test,GuessModel,RealModel,Printperiod = math.inf):
"""
Tests the Decoder (that assumes rates from GuessModel) on a random errors generated from RealModel and computes the logical error rate
Parallelized by dividing the test set into chunks and decoding them in parallel using _TestDecoderChunk
"""
if n_test > 0:
print("Decoding")
n_concat = GuessModel.n_concatenations
GuessRates,MeasurementGuessRates = GuessModel.GetRates(flat = True)
RealRates,MeasurementRealRates = RealModel.GetRates(flat=True)
n_processes = mp.cpu_count()
print("CPU count: ", n_processes)
chunk_sizes = _DistributeWork(n_test, n_processes)
print("Chunksizes: ", chunk_sizes)
with mp.Pool(n_processes) as pool:
proc = [pool.apply_async(_TestDecoderChunk,args=(n, GuessRates, RealRates,MeasurementGuessRates,MeasurementRealRates, n_concat,Printperiod)) for n in chunk_sizes]
res = [p.get() for p in proc]
TotalSuccessrate = 0
for n,s in zip(chunk_sizes,res):
TotalSuccessrate += n*s
TotalSuccessrate /= n_test
return TotalSuccessrate
else:
return 0
def _TestDecoderChunk(n_test,GuessRates,RealRates,MeasurementGuessRates,MeasurementRealRates,n_concatenations,Printperiod = math.inf):
"""
Helper for TestDecoder. Decodes one chunk of the data. Multiple instances of this are called in parallel.
"""
if n_test > 0:
BaseCode = bp.Create5QubitPerfectCode()
GuessModel = bp.ConcatQCode(BaseCode,n_concatenations,GuessRates,MeasurementGuessRates)
RealModel = bp.ConcatQCode(BaseCode, n_concatenations,RealRates,MeasurementRealRates)
Syndromes,LogErrs,PhysErrs = RealModel.GenerateData(n_test)
DecodedErrs = Decode(GuessModel,Syndromes,Printperiod)
SucessRate = np.count_nonzero(np.equal(DecodedErrs,LogErrs)) / n_test
return SucessRate
else:
return 0
#Distribute the number of simulations as evenly as possible between processors
def _DistributeWork(n_work,n_processes):
chunksize = math.floor(n_work / n_processes)
remainder = n_work - chunksize*n_processes
Chunksizes = [chunksize]*n_processes
for i in range(remainder):
Chunksizes[i] += 1
return Chunksizes
def EMWithDecoding(GuessModel,RealModel,Syndromes,Parameters):
"""
Performs EM on the given syndrome set to estimate the error rates of the code.
Estimates the logcial error rate of the decoder at the given iterations of EM. (Using independently generated Syndromes)
Note that Parameters.DecodeAtIterations should be in ascending order and include 0 as the first entry if you want to estimate the initial error rates
Returns the estimated error rates of each iteration and the estimated logical error rates of the given iterations.
"""
#Get the prior
qrates,mrates = GuessModel.GetRates(flat=True)
PriorRates,MeasurementPriorRates = Parameters.EMPrior.GetPrior(qrates,GuessModel.n_physical_Qubits,mrates,GuessModel.n_total_checks)
#Convert to format that tells you how many additional iterations to do in each step before decoding instead of the absolute number of the iteration
DecodeAtIterations = Parameters.DecodeAtIterations
Iterations = Parameters.n_Iterations
DecodeForIterations = [DecodeAtIterations[0]] + [DecodeAtIterations[i] - DecodeAtIterations[i-1] for i in range(1,len(DecodeAtIterations))]
if Parameters.UseHardAssignments == False:
EMFunction = EM
else:
EMFunction = HardAssignmentEM
prates,mrates = GuessModel.GetRates(flat=True)
EstimatedPhysicalRates = [prates]
EstimatedMeasurementRates = [mrates]
EstimatedSuccessRates = []
# print(DecodeForIterations)
for it in DecodeForIterations:
EstRates,MeasEstRates = EMFunction(GuessModel,Syndromes,it,QubitPriorStatistics=PriorRates,MeasurementPriorStatistics=MeasurementPriorRates)
#print(EstRates)
EstimatedPhysicalRates += EstRates[1:] #Exclude first entry since its identical to last entry of previous estimation round
EstimatedMeasurementRates += MeasEstRates[1:]
SuccessRate = TestDecoderParallel(Parameters.n_Test_Data,GuessModel,RealModel,Printperiod = Parameters.Printperiod)
EstimatedSuccessRates += [SuccessRate]
it = Iterations - DecodeAtIterations[-1]
EstRates,MeasEstRates = EMFunction(GuessModel,Syndromes,it,QubitPriorStatistics=PriorRates,MeasurementPriorStatistics=MeasurementPriorRates)
EstimatedPhysicalRates += EstRates[1:]
EstimatedMeasurementRates += MeasEstRates[1:]
return np.array(EstimatedPhysicalRates),np.array(EstimatedMeasurementRates),np.array(EstimatedSuccessRates)
#%%
"""
Simulations of Decoder + EM
"""
#Note: My p_mean must be 1/3 of the p given in [Poulin] because of different conventions for the depolarizing channel
def TestDecoderEstimation(Parameters):
"""
Sets up actual and guess error rates and performs EM + Decoding. Returns the Estimated Rates (of each Iteration) and the estimated Logical Error Rates (only for Iterations in Parameters.DecodeAtIterations)
Parameters should be an Object of TestDecoderParameters
"""
RealModel,GuessModel = Setup5QubitModels(n_concat=Parameters.n_concatenations,p_mean=Parameters.p_mean,p_mean_measurement=Parameters.p_mean_measurement,Pseudocounts=Parameters.Pseudocounts,FixRealRates = Parameters.FixRealRates)
#Parameters.EMPrior.SetPriorValues(GuessModel.ErrorRates) #Set the prior Values
if Parameters.Faithful == False:
Syndromes,Errs,PhysErrs = RealModel.GenerateData(Parameters.n_Estimation_data)
if Parameters.Faithful == False:
#InitSucessRate = TestDecoderParallel(Parameters.n_Test_Data,GuessModel,RealModel,Printperiod = Parameters.Printperiod)
EstimatedRates,EstimatedMeasRates,SuccessRates = EMWithDecoding(GuessModel,RealModel,Syndromes,Parameters)
else:
SuccessRates = np.array([TestDecoderParallel(Parameters.n_Test_Data,RealModel,RealModel)])
EstimatedRates = []
EstimatedMeasRates = []
if Parameters.n_Test_Data > 0:
print("Initial Error Rate: ", bp.FormatScientific(1- SuccessRates[0]))
if Parameters.Faithful == False:
print("Error Rate after Estimation: ", bp.FormatScientific(1 - SuccessRates[-1]))
# RealPhysicalRates = RealModel.ErrorRates
# RealMeasRates = RealModel.MeasurementErrorRates
RealPhysicalRates,RealMeasRates = RealModel.GetRates(flat=True)
FailureRates = 1 - SuccessRates
return FailureRates,RealPhysicalRates,RealMeasRates,EstimatedRates,EstimatedMeasRates
def _TestDecoderEstimationChunk(n,Parameters):
"""
Deprecated, for parallelization via chunks
"""
Results = []
for i in range(n):
print("Starting Estimation: ", i)
sys.stdout.flush()
res = TestDecoderEstimation(Parameters)
Results += [res]
return Results
class ErrorRatesEstimationTask():
"""
For Parallelization via Queues
Parameters: An instance of TestDecoderParameters
"""
def __init__(self,Parameters):
self.Parameters = Parameters
def __call__(self):
print("Starting Estimation")
sys.stdout.flush()
np.random.seed(int.from_bytes(os.urandom(4), byteorder='little')) #Reseed the Random number generator in each new process since otherwise you get same sequences
res = TestDecoderEstimation(self.Parameters)
#print("Estimation Finished")
sys.stdout.flush()
return res
def TestDecoderEstimationMultiple(Parameters,SaveFolder,name):
"""
This is the best method to actually call for simulations
Runs multiple simulations of Decoding + EM (using different initialization / actual rates in each one) and returns the data for each. (This is how the box plots in paper are created)
Depending on the Value of Parameters.n_Test_Data either the simulations are run sequentially but the decoding is parallelized or the simulations are run in parallel.
"""
#Containers for Logical Error Rates, and Real and Estimated rates of Qubit and Measurement errors
LogicalErrorRates = []
RealPhysicalRates = []
RealMeasurementRates = []
EstimatedPhysicalRates = []
EstimatedMeasurementRates = []
print("SaveFolder: ", SaveFolder, "Name : ", name)
if os.path.isfile(SaveFolder+"/"+name+".pkl"):
raise ValueError("Existing Data would be overwritten by this Simulation")
try:
f = open(SaveFolder+"/"+name+".pkl","wb")
f.close()
except:
raise ValueError("SaveFolder could not be opened")
print("Parameters: ")
print(Parameters.ToDict())
if Parameters.n_Test_Data > 0:
"""
Parallelization is over the Test Data Set in this case each time the decoder is tested, no parallelization during EM
"""
print("Simulations with Decoding")
for i in range(Parameters.n_tries):
print()
print("Estimation Nr: ", i)
LogicalErr,RealPhysRates,RealMeasRates,EstRates,EstMeasRates = TestDecoderEstimation(Parameters)
LogicalErrorRates += [LogicalErr]
RealPhysicalRates += [RealPhysRates]
RealMeasurementRates += [RealMeasRates]
EstimatedPhysicalRates += [EstRates]
EstimatedMeasurementRates+=[EstMeasRates]
else:
"""
Parallelize over the Simulations
"""
print("Simulations without Decoding")
n_processes = mp.cpu_count()
print("n_cpu: ", n_processes)
TaskQueue = mp.JoinableQueue()
ResultQueue = mp.Queue()
SimulationWorkers = [QueueParallelization.QueueWorker(TaskQueue,ResultQueue,Print=True) for i in range(n_processes)]
for i in range(Parameters.n_tries):
TaskQueue.put(ErrorRatesEstimationTask(Parameters))
for i in range(n_processes): #Put termination signals for the processes
TaskQueue.put(None)
for worker in SimulationWorkers:
worker.start()
TaskQueue.join()
Results = [ResultQueue.get() for i in range(Parameters.n_tries)] #It is important that this is done before joining the processes, see:https://stackoverflow.com/questions/26025486/python-processes-not-joining
print("TaskQueue joined")
#Close all workers
for worker in SimulationWorkers:
worker.join()
print("Workers closed")
# Results = list(itertools.chain.from_iterable(res)) #Concatenate the results
LogicalErrorRates = [r[0] for r in Results]
RealPhysicalRates = [r[1] for r in Results]
RealMeasurementRates = [r[2] for r in Results]
EstimatedPhysicalRates = [r[3] for r in Results]
EstimatedMeasurementRates = [r[4] for r in Results]
LogicalErrorRates = np.array(LogicalErrorRates)
RealPhysicalRates = np.array(RealPhysicalRates)
EstimatedPhysicalRates = np.array(EstimatedPhysicalRates)
RealMeasurementRates = np.array(RealMeasurementRates)
EstimatedMeasurementRates = np.array(EstimatedMeasurementRates)
Gains = LogicalErrorRates[:,-1] - LogicalErrorRates[:,0]
AverageGain = np.sum(Gains) / Gains.size
StdDevGain = np.std(Gains,dtype = np.float64 ,ddof = 1)
#RelativeImprovements = 1 - FinalRates / InitialRates
RelativeImprovements = (LogicalErrorRates[:,0] - LogicalErrorRates[:,-1]) / LogicalErrorRates[:,0]
AverageRelativeImprovement = np.sum(RelativeImprovements) / RelativeImprovements.size
print("Improvement: ", AverageGain , " +- ", StdDevGain)
print("Average Relative Improvement: ", AverageRelativeImprovement)
InitialLogicalErrorRates = LogicalErrorRates[:,0]
FinalLogicalErrorRates = LogicalErrorRates[:,-1] #Initial and FinalLogicalErrorRates are just for backwards compatibility with old plotting scripts, in principle one can jsut use the LogicalErrorRates array
Data = {"LogicalErrorRates" : LogicalErrorRates,"InitialLogicalErrorRates":InitialLogicalErrorRates,"FinalLogicalErrorRates":FinalLogicalErrorRates,"Parameters":Parameters.ToDict(),"RelativeImprovements":RelativeImprovements, "RealPhysicalRates": RealPhysicalRates, "EstimatedPhysicalRates": EstimatedPhysicalRates, "RealMeasurementErrorRates":RealMeasurementRates, "EstimatedMeasurementErrorRates": EstimatedMeasurementRates}
with open(SaveFolder+"/"+name+".pkl","wb") as out:
pickle.dump(Data,out)
with open(SaveFolder+"/"+name+".info","w") as out:
json.dump(Parameters.ToDict(),out)
out.write("\n")
out.write("# Average Relative Improvement: " + str(AverageRelativeImprovement))
out.write("\n"+"#"+str(datetime.datetime.now()))
def Setup5QubitModels(n_concat,p_mean,p_mean_measurement,Pseudocounts = 20,logdomain = True,FixRealRates = False):
"""
Generates two 5 qubit models, one with the Real Rates drawn from a dirichlet around p_mean, and one with fixed Guess Rates of p_mean
If FixRealRates = True, the roles of are exchanged, i.e. the real rates are fixed to p_mean and the guess is drawn from the dirichlet
"""
BaseCode = bp.Create5QubitPerfectCode()
n_physical_qubits = BaseCode.n_qubits ** n_concat
n_total_checks = n_physical_qubits - 1 #Assumes only one qubit is encoded
RealRates = GetRandomRates(p_mean,Pseudocounts,n_physical_qubits,cardinality=4)
GuessRates = np.reshape(np.array([1-3*p_mean,p_mean,p_mean,p_mean]*n_physical_qubits),(-1,4))
if p_mean_measurement is not None:
RealMeasRates = GetRandomRates(p_mean_measurement,Pseudocounts,n_total_checks,cardinality=2)
GuessMeasRates = [1-p_mean_measurement,p_mean_measurement]*n_total_checks
else:
RealMeasRates = None
GuessMeasRates = None
RealModel = bp.ConcatQCode(BaseCode, n_concat,RealRates,RealMeasRates,logdomain)
GuessModel = bp.ConcatQCode(BaseCode, n_concat,GuessRates,GuessMeasRates,logdomain)
if FixRealRates == False:
return RealModel,GuessModel
else:
return GuessModel,RealModel
def GetRandomRates(p_mean,PseudoSamples,n,cardinality):
#alpha = [PseudoSamples*(1-3*p_mean),PseudoSamples*p_mean,PseudoSamples*p_mean,PseudoSamples*p_mean]
c = cardinality-1
alpha = [PseudoSamples*(1-c*p_mean)] + [PseudoSamples*p_mean]*c
ErrorRates = np.random.dirichlet(alpha,n)
return ErrorRates | true |