blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0b732ca9a6a612c6f91f0b86b89a5b96b4035923 | Python | adnanshahz2018/Interview-Coding-Problems | /spiralprint.py | UTF-8 | 1,387 | 3.4375 | 3 | [] | no_license | # COMPLETED
# A = [[1,2,3],
# [4,5,6],
# [7,8,9]]
# A = [ [ 1, 2, 3, 4 ],
# [ 5, 6, 7, 8 ],
# [ 9, 10, 11, 12 ],
# [ 13, 14, 15, 16 ],
# [17, 18, 19, 20],
# [21, 22, 23, 24] ]
A = [[1],[2], [3], [4], [5], [106]]
rowend = row = len(A) # the end boundary for traversal in a row
colend = col = len(A[0]) # the end boundary for traversal in a column
print(row)
print(col)
colstart = rowstart = 0
count = 0
# comments are 4 by 4 matrix
while (count < row*col):
print('line')
for i in range(colstart,colend): #iter-1, 1 2 3 4 #2-iter 6, 7
print(A[colstart][i], end = " , ")
count +=1
if (count >= row*col): break
rowstart +=1 # 1 , 2
colend -= 1 # 0 , 2
for i in range(rowstart,rowend): #iter-1 8 12 16 #2-iter 11, 15
print(A[i][colend], end=" , ")
count +=1
if (count >= row*col): break
rowend -=1 # 3
for i in range(colend-1,colstart-1, -1): #iter-1 15 , 14, 13
print(A[rowend][i], end=" , ")
count +=1
if (count >= row*col): break
# rowend -= 1 # 3
for i in range(rowend-1,rowstart-1, -1): #iter-1 9, 5
print(A[i][colstart], end=" , ")
count +=1
if (count >= row*col): break
colstart +=1 # 1
| true |
3e95fc623d9e70a283ad3f67411fba0f606a481e | Python | Jackleila/Words-frequency | /nltkTest.py | UTF-8 | 620 | 3.421875 | 3 | [] | no_license | import nltk
import matplotlib.pyplot as ptl
from nltk.corpus import stopwords
#Language selection
language = input("Language: ")
#Opening and reading file
with open(input("Enter Filename: "), 'r') as myfile:
data=myfile.read()
#Tokenizing
tokens = [t for t in data.split()]
#Removing stop words
clean_tokens = tokens[:]
sr = stopwords.words(language)
for token in tokens:
if token in sr:
clean_tokens.remove(token)
#Getting the frequency
freq = nltk.FreqDist(clean_tokens)
for key,val in freq.items():
print (str(key) + ':' + str(val))
#Plotting
freq.plot(20, cumulative=False)
ptl.show()
| true |
1bab41541c740c2e6d80b982cc9b102f01d13d69 | Python | FredrikM97/Medical-ROI | /src/roi.py | UTF-8 | 4,140 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | """
Transformation of ROI with the help of RoiAlign.
"""
from typing import List, Tuple, Union
import numpy as np
import torch
from roi_align import RoIAlign
from torchvision.ops._utils import convert_boxes_to_roi_format
from src.files.preprocess import tensor2numpy
class RoiTransform:
"""Apply ROI transform to shange shape of images and Transform boundary boxes to correct format."""
def __init__(self, output_shape:Tuple[int,int,int]=None, boundary_boxes:Union[List[Tuple[int,int,int,int,int,int]]]=None, batch_size=6,**args):
""" Init RoiTransform object.
Parameters
----------
output_shape : Tuple[int,int,int]
(Default value = None)
boundary_boxes : Union[List[Tuple[int,int,int,int,int,int]]]
(Default value = None)
batch_size :
(Default value = 6)
**args :
Returns
-------
"""
if not boundary_boxes: raise ValueError("bounding_boxes list can not be empty!")
self.batch_size = batch_size
self.roi = RoIAlign(output_shape,spatial_scale=1.0,sampling_ratio=-1)
if isinstance(boundary_boxes, list):
self.boundary_boxes = convert_boxes_to_roi_format([torch.stack([torch.Tensor(x) for x in boundary_boxes])])
self.num_bbox = len(boundary_boxes)
elif isinstance(boundary_boxes, dict):
self.boundary_boxes = {key:convert_boxes_to_roi_format([torch.stack([torch.Tensor(x) for x in value])]) for key,value in boundary_boxes.items()}
self.num_bbox = sum(map(len, self.boundary_boxes.values()))
else:
raise ValueError("boundary_boxes needs to be of type list or dict")
def __call__(self, x:'tensor', y) -> 'Tuple[tensor, tensor]':
"""Expect to take an y of integer type and if boundary_boxes are a dict then the key should be a numeric value.
Parameters
----------
x : torch.Tensor
Input value. Expect shape (B,C,D,H,W)
y : Tensor
Target value
Returns
-------
"""
if isinstance(self.boundary_boxes, list):
image_rois = self.roi.forward(x,torch.cat(x.shape[0]*[self.boundary_boxes.to(x.device)]))#.detach()
y = self.num_bbox*y
elif isinstance(self.boundary_boxes, dict):
image_rois = self.roi.forward(x,torch.cat([self.boundary_boxes[one_target].to(x.device) for one_target in tensor2numpy(y)]))
y = torch.from_numpy(np.concatenate([len(self.boundary_boxes[one_target])*[one_target] for one_target in tensor2numpy(y)])).to(x.device)
else:
raise ValueError("boundary_boxes needs to be of type list or dict")
return image_rois, y
def __str__(self) -> str:
""" """
return (
f"\n\n***Defined ROI-Transformer:***\n"
f"Number of BBoxes: {self.num_bbox}\n"
f"BBox Count: {len(self.boundary_boxes) if isinstance(self.boundary_boxes, list) else ', '.join([f'{x}:{y}' for x,y in zip(self.boundary_boxes.keys(),map(len, self.boundary_boxes.values()))])}"
)
def roi_align(image, boxes:list, output_shape:Tuple=(40,40,40), displayed:bool=False) -> 'torch.Tensor':
"""Create aligned image rois for the neural network
Args:
image: Image of shape Tuple[D,H,W]
boxes(list): List of features (z0,y0,z1,y1,x0,x1). Shape is expected based on the input of ROIAlign
output_shape(Tuple, optional): (Default value = (40,40,40))
displayed(bool, optional): (Default value = False)
Returns:
Raises:
"""
image_tensor = torch.from_numpy(image).unsqueeze(0).unsqueeze(0).float().cuda()
box_tensor = [torch.stack([torch.tensor(x) for x in boxes]).cuda()]
roialign = RoIAlign(output_shape,spatial_scale=1.0,sampling_ratio=-1)
image_rois = roialign.forward(image_tensor,box_tensor)
# None branched syntax
if displayed:
[plot.display_3D(x[0],step=1) for x in tensor2numpy(image_rois)]
return image_rois | true |
ef8830924b7969b19ee44bee356df159cd893d2f | Python | gulatiaditya30/Thesis | /testScripts/ftModelGenerator.py | UTF-8 | 1,776 | 2.703125 | 3 | [] | no_license | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras import Sequential
from keras.layers import Dense
from sklearn.metrics import confusion_matrix
from sklearn.metrics import confusion_matrix
dataset = pd.read_csv("C:/Users/gulat/Desktop/thesis/gitThesis/trainingData/ftData/columnwise/MaxValueSynchedBalanced.csv")
print(dataset.head(2))
print(len(dataset.head(1)))
#print (dataset.describe(include='all'))
x= dataset.iloc[:,1:201]
y= dataset.iloc[:,0]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
print("=====================")
classifier = Sequential()
#First Hidden Layer
classifier.add(Dense(100, activation='relu', kernel_initializer='random_normal', input_dim=200))
#Second Hidden Layer
classifier.add(Dense(50, activation='relu', kernel_initializer='random_normal'))
#Output Layer
classifier.add(Dense(1, activation='sigmoid', kernel_initializer='random_normal'))
#Compiling the neural network
classifier.compile(optimizer ='adam',loss='binary_crossentropy', metrics =['accuracy'])
#Fitting the data to the training dataset
classifier.fit(X_train,y_train, batch_size=10, epochs=100)
eval_model=classifier.evaluate(X_train, y_train)
eval_model
y_pred=classifier.predict(X_test)
y_pred =(y_pred>0.5)
#print("========================="+str(y_pred)+"==========================")
from sklearn.metrics import confusion_matrix
testevaluation = confusion_matrix(y_test, y_pred)
print(testevaluation)
a= testevaluation[0][0]
b = testevaluation[0][1]
c = testevaluation[1][0]
d = testevaluation[1][1]
print("final accuracy" + str((a+d)/(a+b+c+d)) )
print("===================================================")
print(y_test)
| true |
2c24e6bcb78da35c18619249fb438ae6f5d8a287 | Python | jimmy-academia/Deeper-Learnings | /codestosort/ComputerVision/yolov1/module/yololoss.py | UTF-8 | 5,685 | 2.515625 | 3 | [
"MIT"
] | permissive |
#encoding:utf-8
#
#created by xiongzihua 2017.12.26
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class myloss(nn.Module):
def __init__(self):
super(myloss,self).__init__()
self.S = 7
self.B = 2
self.l_coord = 5
self.l_noobj = 0.5
def compute_iou(self, box1, box2):
'''Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].
Args:
box1: (tensor) bounding boxes, sized [N,4].
box2: (tensor) bounding boxes, sized [M,4].
Return:
(tensor) iou, sized [N,M].
'''
N = box1.size(0)
M = box2.size(0)
lt = torch.max(
box1[:,:2].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]
box2[:,:2].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]
)
rb = torch.min(
box1[:,2:].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]
box2[:,2:].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]
)
wh = rb - lt # [N,M,2]
wh[wh<0] = 0 # clip at 0
inter = wh[:,:,0] * wh[:,:,1] # [N,M]
area1 = (box1[:,2]-box1[:,0]) * (box1[:,3]-box1[:,1]) # [N,]
area2 = (box2[:,2]-box2[:,0]) * (box2[:,3]-box2[:,1]) # [M,]
area1 = area1.unsqueeze(1).expand_as(inter) # [N,] -> [N,1] -> [N,M]
area2 = area2.unsqueeze(0).expand_as(inter) # [M,] -> [1,M] -> [N,M]
iou = inter / (area1 + area2 - inter)
return iou
def forward(self,pred_tensor,target_tensor):
'''
pred_tensor: (tensor) size(batchsize,S,S,Bx5+20=30) [x,y,w,h,c]
target_tensor: (tensor) size(batchsize,S,S,30)
'''
N = pred_tensor.size()[0]
coo_mask = target_tensor[:,:,:,4] > 0
noo_mask = target_tensor[:,:,:,4] == 0
coo_mask = coo_mask.unsqueeze(-1).expand_as(target_tensor)
noo_mask = noo_mask.unsqueeze(-1).expand_as(target_tensor)
coo_pred = pred_tensor[coo_mask].view(-1,26)
box_pred = coo_pred[:,:10].contiguous().view(-1,5) #box[x1,y1,w1,h1,c1]
class_pred = coo_pred[:,10:] #[x2,y2,w2,h2,c2]
coo_target = target_tensor[coo_mask].view(-1,26)
box_target = coo_target[:,:10].contiguous().view(-1,5)
class_target = coo_target[:,10:]
# compute not contain obj loss
noo_pred = pred_tensor[noo_mask].view(-1,26)
noo_target = target_tensor[noo_mask].view(-1,26)
noo_pred_mask = torch.cuda.ByteTensor(noo_pred.size())
noo_pred_mask.zero_()
noo_pred_mask[:,4]=1;noo_pred_mask[:,9]=1
noo_pred_c = noo_pred[noo_pred_mask] #noo pred只需要计算 c 的损失 size[-1,2]
noo_target_c = noo_target[noo_pred_mask]
nooobj_loss = F.mse_loss(noo_pred_c,noo_target_c,size_average=False)
#compute contain obj loss
coo_response_mask = torch.cuda.ByteTensor(box_target.size())
coo_response_mask.zero_()
coo_not_response_mask = torch.cuda.ByteTensor(box_target.size())
coo_not_response_mask.zero_()
box_target_iou = torch.zeros(box_target.size()).cuda()
for i in range(0,box_target.size()[0],2): #choose the best iou box
box1 = box_pred[i:i+2]
box1_xyxy = Variable(torch.FloatTensor(box1.size()))
box1_xyxy[:,:2] = box1[:,:2]/14. -0.5*box1[:,2:4]
box1_xyxy[:,2:4] = box1[:,:2]/14. +0.5*box1[:,2:4]
box2 = box_target[i].view(-1,5)
box2_xyxy = Variable(torch.FloatTensor(box2.size()))
box2_xyxy[:,:2] = box2[:,:2]/14. -0.5*box2[:,2:4]
box2_xyxy[:,2:4] = box2[:,:2]/14. +0.5*box2[:,2:4]
iou = self.compute_iou(box1_xyxy[:,:4],box2_xyxy[:,:4]) #[2,1]
max_iou,max_index = iou.max(0)
max_index = max_index.data.cuda()
coo_response_mask[i+max_index]=1
coo_not_response_mask[i+1-max_index]=1
#####
# we want the confidence score to equal the
# intersection over union (IOU) between the predicted box
# and the ground truth
#####
box_target_iou[i+max_index,torch.LongTensor([4]).cuda()] = (max_iou).data.cuda()
box_target_iou = Variable(box_target_iou).cuda()
#1.response loss
box_pred_response = box_pred[coo_response_mask].view(-1,5)
box_target_response_iou = box_target_iou[coo_response_mask].view(-1,5)
box_target_response = box_target[coo_response_mask].view(-1,5)
contain_loss = F.mse_loss(box_pred_response[:,4],box_target_response_iou[:,4],size_average=False)
loc_loss = F.mse_loss(box_pred_response[:,:2],box_target_response[:,:2],size_average=False) + F.mse_loss(torch.sqrt(box_pred_response[:,2:4]),torch.sqrt(box_target_response[:,2:4]),size_average=False)
#2.not response loss
box_pred_not_response = box_pred[coo_not_response_mask].view(-1,5)
box_target_not_response = box_target[coo_not_response_mask].view(-1,5)
box_target_not_response[:,4]= 0
#not_contain_loss = F.mse_loss(box_pred_response[:,4],box_target_response[:,4],size_average=False)
#I believe this bug is simply a typo
not_contain_loss = F.mse_loss(box_pred_not_response[:,4], box_target_not_response[:,4],size_average=False)
#3.class loss
class_loss = F.mse_loss(class_pred,class_target,size_average=False)
return (self.l_coord*loc_loss + 1*contain_loss + 0.5*not_contain_loss + self.l_noobj*nooobj_loss + class_loss)/N
| true |
56b34ca49f5befe04b844cee265b3fc9ad053b87 | Python | KTH-EXPECA/ExperimentRecorder | /tests/test_experiment.py | UTF-8 | 7,043 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) 2021 KTH Royal Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Any, NamedTuple, Type
import tables
from twisted.trial import unittest
from exprec import ExperimentWriter
from exprec.experiment import ExperimentElementError
class TestVariable(NamedTuple):
name: str
type: Type
value: Any
class TestExperiments(unittest.TestCase):
h5file_path = Path('/tmp/h5test.h5')
exp_id = 'TestExperiment'
exp_title = 'Test Experiment'
exp_vars_valid = [
TestVariable('a', int, 3),
TestVariable('b', float, 3.9),
TestVariable('c', bool, False)
]
exp_vars_invalid = [
TestVariable('d', str, 'Foo'),
TestVariable('e', bytes, b'Bar'),
TestVariable('f', object, object())
]
sub_exp_id = 'SubExperiment'
sub_exp_title = 'Sub Experiment'
def setUp(self) -> None:
self.assertFalse(self.h5file_path.exists())
# open an experiment, check that the file exists too
self.experiment = ExperimentWriter.create(
file_path=self.h5file_path,
exp_id=self.exp_id,
exp_title=self.exp_title,
variables={v.name: v.type for v in self.exp_vars_valid}
)
self.assertTrue(self.h5file_path.exists())
def tearDown(self) -> None:
# flush, close and delete the file
self.experiment.flush()
self.experiment.close()
self.assertFalse(self.experiment.h5file.isopen)
self.h5file_path.unlink(missing_ok=False)
def test_experiment_creation(self):
# check that the experiment was created correctly
file = self.experiment.h5file
# check that the experiment group was created correctly, has the
# right type, and the correct title
try:
g = file.get_node(file.root, self.exp_id)
self.assertIsInstance(g, tables.Group)
self.assertEqual(g._v_attrs.TITLE, self.exp_title)
except tables.NoSuchNodeError:
self.fail('Could not find experiment group in HDF5 File.')
def test_sub_experiment_creation(self):
# with sub_experiments we can use the context manager mode
def make_sub_exp():
return self.experiment.make_sub_experiment(
sub_exp_id=self.sub_exp_id,
variables={v.name: v.type for v in self.exp_vars_valid},
sub_exp_title=self.sub_exp_title
)
with make_sub_exp() as sub_exp:
# file should be the same
self.assertEqual(self.experiment.h5file, sub_exp.h5file)
file = sub_exp.h5file
# check that the experiment group was created correctly, has the
# right type, and the correct title
try:
g = file.get_node(self.experiment._group, self.sub_exp_id)
self.assertIsInstance(g, tables.Group)
self.assertEqual(g._v_attrs.TITLE, self.sub_exp_title)
except tables.NoSuchNodeError:
self.fail('Could not find sub-experiment group in HDF5 File.')
# check that we can get the same experiment from the top level
# experiment by looking it up
with self.experiment.get_sub_experiment(self.sub_exp_id) as \
same_sub_exp:
self.assertEqual(same_sub_exp, sub_exp)
# trying to recreate a sub-experiment should fail
self.assertRaises(ExperimentElementError, make_sub_exp)
def test_write_variables(self):
# check that values are actually stored correctly
file = self.experiment.h5file
for var in self.exp_vars_valid:
self.experiment.record_variable(var.name, var.value, 0.0)
# can't store mismatching types
# boolean columns seem to coerce everything to
# True/False, so they don't raise any TypeErrors
if var.type != bool:
self.assertRaises(TypeError,
self.experiment.record_variable,
var.name, object(), 1)
# check the value
tbl = file.get_node(self.experiment._group, var.name)
self.assertIsInstance(tbl, tables.Table)
tbl.flush() # needed since we're gonna read it
vals = [row['value'] for row in tbl.iterrows()]
self.assertEqual(var.value, vals[0])
# can't record variables which weren't registered
for var in self.exp_vars_invalid:
self.assertRaises(
ExperimentElementError,
lambda: self.experiment.record_variable(var.name, var.value, 0)
)
def test_dont_overwrite_file(self):
# trying to create an experiment on an already existing path should fail
with self.assertRaises(FileExistsError):
ExperimentWriter.create(
file_path=self.h5file_path,
exp_id=self.exp_id,
exp_title=self.exp_title,
variables={}
)
def test_invalid_variables(self):
# trying to create an experiment with invalid variable types should fail
# and the file should be cleaned up
fpath = Path('/tmp/should_not_exist.h5')
with self.assertRaises(ExperimentElementError):
ExperimentWriter.create(
file_path=fpath,
exp_id='invalid',
exp_title='invalid variable types',
variables={var.name: var.type for var in self.exp_vars_invalid}
)
self.assertFalse(fpath.exists())
def test_register_sub_exp_with_name_conflict(self):
# trying to register an experiment with the same name as an existing
# variable should fail
for var in self.exp_vars_valid:
self.assertRaises(ExperimentElementError,
self.experiment.make_sub_experiment,
sub_exp_id=var.name,
variables={})
# registering a valid sub-experiment twice should fail the second time
self.experiment.make_sub_experiment(sub_exp_id=self.sub_exp_id,
variables={})
self.assertRaises(ExperimentElementError,
self.experiment.make_sub_experiment,
sub_exp_id=self.sub_exp_id,
variables={})
| true |
9459042466ad2b47960b32204dbc024d7edf5013 | Python | p2slugs/recipebox | /tester.py | UTF-8 | 3,052 | 4.125 | 4 | [] | no_license | import json
#Make a Dictionary of you, your siblings, parents, and grandparents. Have at least 4 attributes per person.
fm1 = { "name":"Linda", "age":18, "food":"cheese"}
fm2 = { "name":"Christina", "age":44, "food":"seafood", "parent":True}
fm3 = { "name":"Henry", "age":48, "food":"beef", "parent":True}
fm4 = { "name":"Yu", "age":74, "food":"pork", "grandparent":True}
fm5 = { "name":"Zhong", "age":83, "food":"cookies", "grandparent":True}
#Turn the dictionaries into a list.
list_of_family_members = [fm1, fm2, fm3, fm4, fm5]
print("List Of Family Members")
print(type(list_of_family_members))
print(list_of_family_members)
print()
for family_member in list_of_family_members:
print(family_member['name'] + ", " + str(family_member['age']) + ", " + family_member['food'])
print()
#Turn the list into a dictionary called 'family'
family = {'family_members': list_of_family_members}
#Print members to run/console
print("Dictionary Of Family Members")
print(type(family))
print(family)
family_members = family['family_members']
print()
for family_member in family_members:
print(family_member['name'] + ", " + str(family_member['age']) + ", " + family_member['food'])
print()
#Turn the dictionary into a JSON file
json_family_members = json.dumps(family)
#Print the JSON file to the run/console window, inserting a character or emoji in between each item in the JSON.
print("JSON Family Members")
print(type(json_family_members))
print(json_family_members)
x = json.loads(json_family_members)
family_members = x['family_members']
print()
for family_member in family_members:
print(family_member['name'] + ", " + str(family_member['age']) + ", " + family_member['food'])
print('👍🏻')
print()
#Return JSON file to a dictionary with key "family"- my key is family_members
print("Return JSON File To A Dictionary - Key is family_members")
print(json_family_members)
#Using program logic, make new dictionaries from "family" key/values, creating "parents" and "grandparents" key with logically corresponding lists as values.
dictfamilymembers = json.loads(json_family_members)
listfamilymembers = dictfamilymembers['family_members']
parents = []
grandparents = []
me = []
for member in listfamilymembers:
if "parent" in member:
parents.append(member)
elif "grandparent" in member:
grandparents.append(member)
else:
me.append(member)
#Print members to run/console in fashion that highlights
print()
print("Organized Members")
print()
print("Parents")
print(parents)
print()
for member in parents:
print(member['name'] + ", " + str(member['age']) + ", " + member['food'])
print()
print("Grandparents")
print(grandparents)
print()
for member in grandparents:
print(member['name'] + ", " + str(member['age']) + ", " + member['food'])
print()
print("Me")
print(me)
print()
for member in me:
print(member['name'] + ", " + str(member['age']) + ", " + member['food'])
print()
#Place code in text form in journal as well as run/console output.
| true |
7489a431a1cd61822619b603e512f7aed1a363f7 | Python | atul7cloudyuga/stanfordkarel | /stanfordkarel/karel.py | UTF-8 | 15,609 | 3.03125 | 3 | [
"MIT"
] | permissive | """
This file defines the Karel class, which provides the actual
implementation of all functions described in the Karel Reference
Guide.
All instances of a Karel object store a reference to the world
in which they exist. Each Karel object exists on a given
(avenue, street) intersection and holds a certain number of beepers
in its beeper bag.
Original Author: Nicholas Bowman
Credits: Kylie Jue, Tyler Yep
License: MIT
Version: 1.0.0
Email: nbowman@stanford.edu
Date of Creation: 10/1/2019
Last Modified: 3/31/2020
"""
from .karel_ascii import compare_output, karel_ascii
from .karel_definitions import COLOR_MAP, INFINITY, Direction, KarelException
from .karel_world import KarelWorld
NEXT_DIRECTION_MAP = {
Direction.NORTH: Direction.WEST,
Direction.WEST: Direction.SOUTH,
Direction.SOUTH: Direction.EAST,
Direction.EAST: Direction.NORTH,
}
NEXT_DIRECTION_MAP_RIGHT = {v: k for k, v in NEXT_DIRECTION_MAP.items()}
# This map associates directions with the delta that Karel
# undergoes if it were to move one step in that direction
# delta is in terms of (avenue, street)
DIRECTION_DELTA_MAP = {
Direction.NORTH: (0, 1),
Direction.EAST: (1, 0),
Direction.SOUTH: (0, -1),
Direction.WEST: (-1, 0),
}
class Karel:
def __init__(self, world_file=None):
"""
This functions instantiates a new Karel instance and sets its
location and current number of beepers to be the default starting
values as indicated by the given world object.
Parameters:
world (KarelWorld) - The world that Karel should exists in
Returns: None
"""
self.world = KarelWorld(world_file)
self._avenue, self._street = self.world.karel_starting_location
self._direction = self.world.karel_starting_direction
self._num_beepers = self.world.karel_starting_beeper_count
def __repr__(self):
return karel_ascii(self.world, self.street, self.avenue)
def __eq__(self, other):
return self.__dict__ == other.__dict__
@property
def avenue(self):
"""
This property can be used to access Karel's current avenue location.
Parameters: None
Returns:
avenue (int) - The current avenue Karel is standing on.
"""
return self._avenue
@avenue.setter
def avenue(self, val):
"""
This property can be used to set Karel's current avenue location.
Parameters:
val (int) - The new avenue that Karel will be standing on.
Returns: None
"""
self._avenue = val
@property
def street(self):
"""
This property can be used to access Karel's current street location.
Parameters: None
Returns:
street (int) - The current street Karel is standing on.
"""
return self._street
@street.setter
def street(self, val):
"""
This property can be used to set Karel's current street location.
Parameters:
val (int) - The new street that Karel will be standing on.
Returns: None
"""
self._street = val
@property
def direction(self):
"""
This property can be used to access Karel's current direction.
Parameters: None
Returns:
street (Direction[Enum]) - The current direction Karel is facing.
"""
return self._direction
@direction.setter
def direction(self, val):
"""
This property can be used to set Karel's current direction.
Parameters:
val (Direction[Enum]) - The new direction that Karel will be facing.
Returns: None
"""
self._direction = val
@property
def num_beepers(self):
"""
This property can be used to access Karel's current number of beepers.
Parameters: None
Returns:
num_beepers (int) - The current number of beepers Karel has.
"""
return self._num_beepers
@num_beepers.setter
def num_beepers(self, val):
"""
This property can be used to set Karel's current number of beepers.
Parameters:
val (int) - The new number of beepers that Karel will have.
Returns: None
"""
self._num_beepers = val
def compare_with(self, other, two_columns=True):
"""
Options:
two_columns: bool (default=True)
"""
if self == other:
return True
if not two_columns:
print("\n\nStudent output:\n{}".format(self))
print("\nExpected output:\n{}".format(other))
return False
print(compare_output(self, other))
return False
def reset_state(self):
"""
This function is used to reset Karel's location and direction to the original
starting parameters as indicated by the world that Karel lives in.
Parameters: None
Returns: None
"""
self._avenue, self._street = self.world.karel_starting_location
self._direction = self.world.karel_starting_direction
self._num_beepers = self.world.karel_starting_beeper_count
def move(self):
"""
This function moves Karel forward one space in the direction that it is
currently facing. If Karel's front is not clear (blocked by wall or boundary
of world) then a KarelException will be raised).
Parameters: None
Returns: None
"""
if not self.front_is_clear():
raise KarelException(
self._avenue,
self._street,
self._direction,
"Karel attempted to move, but its front was blocked.",
)
delta_avenue, delta_street = DIRECTION_DELTA_MAP[self._direction]
self._avenue += delta_avenue
self._street += delta_street
def turn_left(self):
"""
This function turns Karel 90 degrees counterclockwise.
Parameters: None
Returns: None
"""
self._direction = NEXT_DIRECTION_MAP[self._direction]
def put_beeper(self):
"""
This function places a beeper on the corner that Karel is currently standing
on and decreases Karel's beeper count by 1. If Karel has no more beepers in its
beeper bag, then this function raises a KarelException.
Parameters: None
Returns: None
"""
if self._num_beepers == 0:
raise KarelException(
self._avenue,
self._street,
self._direction,
"Karel attempted to put a beeper, but it had none left in its bag.",
)
if self._num_beepers != INFINITY:
self._num_beepers -= 1
self.world.add_beeper(self._avenue, self._street)
def pick_beeper(self):
"""
This function removes a beeper from the corner that Karel is currently
standing on and increases Karel's beeper count by 1. If there are no beepers
on Karel's current corner, then this function raises a KarelException.
Parameters: None
Returns: None
"""
if not self.beepers_present():
raise KarelException(
self._avenue,
self._street,
self._direction,
"Karel attempted to pick up a beeper, "
"but there were none on the current corner.",
)
if self._num_beepers != INFINITY:
self._num_beepers += 1
self.world.remove_beeper(self._avenue, self._street)
def front_is_clear(self):
"""
This function returns a boolean indicating whether or not there is a wall
in front of Karel.
Parameters: None
Returns:
is_clear (Bool) - True if there is no wall in front of Karel
False otherwise
"""
return self.direction_is_clear(self._direction)
def direction_is_clear(self, direction):
"""
This is a helper function that returns a boolean indicating whether
or not there is a barrier in the specified direction of Karel.
Parameters:
direction (Direction[Enum]) - The direction in which to check for a barrier
Returns:
is_clear (Bool) - True if there is no barrier in the specified direction
False otherwise
"""
delta_avenue, delta_street = DIRECTION_DELTA_MAP[direction]
next_avenue = self._avenue + delta_avenue
next_street = self._street + delta_street
# front is not clear if we are about to go out of bounds
if not self.world.in_bounds(next_avenue, next_street):
return False
# front is not clear if wall exists in same direction we're currently facing
if self.world.wall_exists(self._avenue, self._street, direction):
return False
# must also check for alternate possible representation of wall
opposite_direction = NEXT_DIRECTION_MAP[NEXT_DIRECTION_MAP[direction]]
if self.world.wall_exists(next_avenue, next_street, opposite_direction):
return False
# If all previous conditions checked out, then the front is clear
return True
def front_is_blocked(self):
"""
This function returns a boolean indicating whether there is a wall
in front of Karel.
Parameters: None
Returns:
is_blocked (Bool) - True if there is a wall in front of Karel
False otherwise
"""
return not self.front_is_clear()
def left_is_clear(self):
"""
This function returns a boolean indicating whether or not there is a wall
to the left of Karel.
Parameters: None
Returns:
is_clear (Bool) - True if there is no wall to the left of Karel
False otherwise
"""
return self.direction_is_clear(NEXT_DIRECTION_MAP[self._direction])
def left_is_blocked(self):
"""
This function returns a boolean indicating whether there is a wall
to the left of Karel.
Parameters: None
Returns:
is_blocked (Bool) - True if there is a wall to the left of Karel
False otherwise
"""
return not self.left_is_clear()
def right_is_clear(self):
"""
This function returns a boolean indicating whether or not there is a wall
to the right of Karel.
Parameters: None
Returns:
is_clear (Bool) - True if there is no wall to the right of Karel
False otherwise
"""
return self.direction_is_clear(NEXT_DIRECTION_MAP_RIGHT[self._direction])
def right_is_blocked(self):
"""
This function returns a boolean indicating whether there is a wall
to the right of Karel.
Parameters: None
Returns:
is_blocked (Bool) - True if there is a wall to the right of Karel
False otherwise
"""
return not self.right_is_clear()
def beepers_present(self):
"""
This function returns a boolean indicating whether or not there is
a beeper on Karel's current corner.
Parameters: None
Returns:
beepers_on_corner (Bool) - True if there's at least one beeper
on Karel's current corner, False otherwise
"""
return self.world.beepers[(self.avenue, self.street)] != 0
def no_beepers_present(self):
return not self.beepers_present()
def beepers_in_bag(self):
"""
This function returns a boolean indicating whether or not there is
at least one beeper in Karel's beeper bag.
Parameters: None
Returns:
beepers_in_bag (Bool) - True if there is at least one beeper in Karel's bag
False otherwise
"""
# Can't check > 0 because INFINITY beepers is -1
return self._num_beepers != 0
def no_beepers_in_bag(self):
# Only 0 beepers in bag indicates empty bag – negative represents INFINITY
return self._num_beepers == 0
def facing_north(self):
"""
This function returns a boolean indicating whether or not Karel is currently
facing North.
Parameters: None
Returns:
facing_north (Bool) - True if Karel is currently facing North
False otherwise
"""
return self.direction == Direction.NORTH
def not_facing_north(self):
return not self.facing_north()
def facing_east(self):
"""
This function returns a boolean indicating whether or not Karel is currently
facing East.
Parameters: None
Returns:
facing_east (Bool) - True if Karel is currently facing East
False otherwise
"""
return self.direction == Direction.EAST
def not_facing_east(self):
return not self.facing_east()
def facing_west(self):
"""
This function returns a boolean indicating whether or not Karel is currently
facing West.
Parameters: None
Returns:
facing_west (Bool) - True if Karel is currently facing West
False otherwise
"""
return self.direction == Direction.WEST
def not_facing_west(self):
return not self.facing_west()
def facing_south(self):
"""
This function returns a boolean indicating whether or not Karel is currently
facing South.
Parameters: None
Returns:
facing_south (Bool) - True if Karel is currently facing South
False otherwise
"""
return self.direction == Direction.SOUTH
def not_facing_south(self):
return not self.facing_south()
def paint_corner(self, color):
"""
This function makes Karel paint its current corner the indicated color.
This function will raise a KarelException if the indicated color is not one
of the valid predefined colors. For this list of colors, check the
kareldefinitions.py file.
Parameters:
color (str) - The color string specifying which color to paint the corner
Returns: None
"""
if color is not None and color not in COLOR_MAP:
raise KarelException(
self._avenue,
self._street,
self._direction,
"Karel attempted to paint the corner with color {}, "
"which is not valid.".format(color),
)
self.world.paint_corner(self.avenue, self.street, color)
def corner_color_is(self, color):
"""
This function returns a boolean indicating whether or not Karel's current
corner is the specified color.
Parameters:
color (str) - Color string representing the color to
check the current corner for
Returns:
is_color (Bool) - True if Karel's current corner is the specified color
False otherwise
"""
return self.world.corner_color(self.avenue, self.street) == color
| true |
755806277505a0ddd237c3205d11be972b5f1fd0 | Python | alexeyvkuznetsov/Latin_Text_Preprocessing_Python | /2/BasicNLP.py | UTF-8 | 3,189 | 2.984375 | 3 | [] | no_license | import nltk
from cltk.tokenize.sentence import TokenizeSentence
from cltk.tokenize.word import WordTokenizer
from collections import Counter
from IPython.display import Image
from cltk.stop.latin import STOPS_LIST
# See http://docs.cltk.org/en/latest/latin.html#sentence-tokenization
cato_agri_praef = "Est interdum praestare mercaturis rem quaerere, nisi tam periculosum sit, et item foenerari, si tam honestum. Maiores nostri sic habuerunt et ita in legibus posiverunt: furem dupli condemnari, foeneratorem quadrupli. Quanto peiorem civem existimarint foeneratorem quam furem, hinc licet existimare. Et virum bonum quom laudabant, ita laudabant: bonum agricolam bonumque colonum; amplissime laudari existimabatur qui ita laudabatur. Mercatorem autem strenuum studiosumque rei quaerendae existimo, verum, ut supra dixi, periculosum et calamitosum. At ex agricolis et viri fortissimi et milites strenuissimi gignuntur, maximeque pius quaestus stabilissimusque consequitur minimeque invidiosus, minimeque male cogitantes sunt qui in eo studio occupati sunt. Nunc, ut ad rem redeam, quod promisi institutum principium hoc erit."
cato_agri_praef_lowered = cato_agri_praef.lower()
# create a tokenizer instance of the TokenizeSentence Class
latin_sentence_tokenizer = TokenizeSentence('latin')
#tokenize the text into sentence tokens
cato_sentence_tokens = latin_sentence_tokenizer.tokenize_sentences(cato_agri_praef)
# tokenize the text (or specific sentences) into specific words
latin_word_tokenizer = WordTokenizer('latin')
cato_word_tokens = latin_word_tokenizer.tokenize(cato_agri_praef_lowered)
cato_word_tokens_WO_punt = [token for token in cato_word_tokens if token not in ['.', ',', ':', ';']]
#print the tokens and the number of tokens
num_of_sentences = len(cato_sentence_tokens)
num_of_words = len(cato_word_tokens_WO_punt)
#print("There are " + str(num_of_sentences) + " sentences in the text")
#print("There are " + str(num_of_words) + " words in the text")
# for sentence in cato_sentence_tokens:
# print(sentence)
# print()
#print(cato_word_tokens_WO_punt)
#You can actually make the words unique by using a set
cato_word_tokens_WO_punt_unique = set(cato_word_tokens_WO_punt)
num_of_unique_words = len(cato_word_tokens_WO_punt_unique)
print("There are " + str(num_of_unique_words) + " unique words in the text")
print(cato_word_tokens_WO_punt_unique)
#lets alphabetize all the words in the list
alphabetized_list = []
for word in cato_word_tokens_WO_punt_unique:
alphabetized_list.append(word)
alphabetized_list.sort()
#print the new list
print(alphabetized_list)
#Count the amount of times that words occur
#The counter is actually a dictionary, so you can look up
#The frequency of specific words
cato_word_counts_counter = Counter(cato_word_tokens_WO_punt)
print(cato_word_counts_counter)
#todo go back and figure out how to show charts and graphs
Image('images/tableau_bubble.png')
#these are all the stopwords included in the stops list for latin
print(STOPS_LIST)
cato_no_stops = [w for w in cato_word_tokens_WO_punt if not w in STOPS_LIST]
print(cato_no_stops)
cato_no_stops_counter = Counter(cato_no_stops)
print(cato_no_stops_counter)
| true |
8bdc88eac09f773c27844ba64c54f2064ce187d0 | Python | Aasthaengg/IBMdataset | /Python_codes/p02678/s623249206.py | UTF-8 | 750 | 2.875 | 3 | [] | no_license | from collections import deque
N, M = map(int, input().split())
route = [[] for _ in range(N)]
for _ in range(M):
A, B = map(int, input().split())
route[A-1].append(B-1)
route[B-1].append(A-1)
ans = [-1]*(N)
def bfs():
tmp = 0
prv = 0
visited = [False]*N
kouho = deque()
for room in route[0]:
kouho.append([room,0])
while True:
visited[tmp] = True
ans[tmp] = prv
for room in route[tmp]:
if not visited[room]:
kouho.append([room,tmp])
if len(kouho) == 0:
return
while True:
tmp_ = kouho.popleft()
tmp = tmp_[0]
prv = tmp_[1]
if not visited[tmp]:
break
if len(kouho) == 0:
return
bfs()
print("Yes")
for n in range(1,N):
print(ans[n]+1)
| true |
8602d3352338e5c060206213c0cdc9f0654fa320 | Python | RonElhar/IsraeliMediaTendency | /NewsSpider/news_scraper.py | UTF-8 | 3,785 | 2.890625 | 3 | [] | no_license | import re
from datetime import date
from bs4 import BeautifulSoup
from abc import abstractmethod
class NewsScraperBS(BeautifulSoup):
def __init__(self, html_page, domain_name, base_url, **kwargs):
super().__init__(html_page, features='html.parser')
self.domain_name = domain_name
self.base_url = base_url
@abstractmethod
def find_links(self):
if self.domain_name == 'ynet':
return self.find_links_ynet()
@abstractmethod
def is_relevant_article(self, from_date, until_date):
return self.is_relevant_ynet(from_date, until_date)
@abstractmethod
def is_published_between(self, meta_published, from_date, until_date):
pass
class YnetScraper(NewsScraperBS):
def __init__(self, html_page, base_url, **kwargs):
super(YnetScraper, self).__init__(html_page, "ynet", base_url, **kwargs)
def find_links(self):
meta_category = self.find('meta', attrs={'name': re.compile("^vr:category")})
if meta_category is None:
return []
category = meta_category.get("content")
if category != 'News' and category != 'Central':
return []
links = set()
link_tags = self.findAll('a', attrs={'href': re.compile("^/articles|/home|^http://www.ynet")})
for link_tag in link_tags:
link = link_tag.get('href')
if 'tags' in link:
continue
if link.startswith('/'):
link = str.replace('https://ynet.co.il' + link, '\'', '')
links.add(link)
return links
def is_relevant_article(self, from_date, until_date):
meta_type = self.find('meta', attrs={'property': re.compile("^og:type")})
if meta_type is None or meta_type.get('content') != 'article':
return False
meta_published = self.find('meta', attrs={'property': re.compile("^og:published_time")})
if meta_published is None:
return False
if not self.is_published_between(meta_published, from_date, until_date):
return False
return True
def is_published_between(self, meta_published, from_date, until_date):
date_str = str.split(str.split(meta_published.get("content"), ',')[1], '.')
published = date(int(date_str[2]), int(date_str[1]), int(date_str[0]))
return from_date <= published <= until_date
def insert_before(self, successor):
pass
def insert_after(self, successor):
pass
class NewsScraperGenerator:
def __init__(self, html_page, domain_name, base_url, **kwargs):
self.html_page = html_page
self.domain_name = domain_name
self.base_url = base_url
self.kwargs = kwargs
def generate(self):
if self.domain_name == 'ynet':
return YnetScraper(self.html_page, self.base_url, **self.kwargs)
# print(is_published_after_ynet(date(94, 8, 24)))
# print(is_published_after_ynet(date(94, 8, 22)))
# <meta property="og:type" content="article">
# <meta property="og:title" content="בהשבעת הכנסת: מאבטחי נתניהו דחפו את ריבלין ואת חיות">
# <meta property="og:description" content="מאבטחי היחידה לאבטחת אישים, הממונים על אבטחת רה"מ, דחפו שוב ושוב את נשיא המדינה ואת נשיאת העליון עד שהשניים סירבו להמשיך לצעוד עם נתניהו ואדלשטיין. ל"ידיעות אחרונות" נודע כי בעקבות זאת נערך בשב"כ בירור. שב"כ: "נתחקר מול בית הנשיא". בית הנשיא: אין לנו טענות לשב"כ">
# <meta property="og:published_time" content="07:30 , 03.05.19">
| true |
d9ce3ed16e54040b2fbdba7aaf23ccd694c1f30f | Python | luismedinaeng/holbertonschool-higher_level_programming | /0x0B-python-input_output/7-save_to_json_file.py | UTF-8 | 173 | 2.78125 | 3 | [] | no_license | #!/usr/bin/python3
def save_to_json_file(my_obj, filename):
import json
with open(filename, mode="w", encoding="utf-8") as a_file:
json.dump(my_obj, a_file)
| true |
53c60eb1da673eb8203d89f66e52034b9de921f7 | Python | LRegan666/Athene_Leetcode | /Subsets_II.py | UTF-8 | 776 | 3.359375 | 3 | [] | no_license | class Solution:
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums:
return [[]]
tmp, res =[], []
for k in range(len(nums)+1):
self.search_subset(nums, k, tmp, res)
return res
def search_subset(self, nums, k, tmp, res):
if len(tmp) == k:
tmp.sort()
if tmp not in res:
res.append(tmp)
return
if not nums:
return
for i in range(len(nums)):
self.search_subset(nums[i+1:], k, tmp+[nums[i]], res)
if __name__ == '__main__':
nums = [1,2,3,4,5,6,7,8,10,0]
finder = Solution()
res = finder.subsetsWithDup(nums)
print(res)
| true |
aa46e9bb0686991316cdc9e0be1023626af70262 | Python | itgsod-Isak-Johansson/RomerskaSiffror | /test/romanize_test.py | UTF-8 | 1,896 | 3.5625 | 4 | [] | no_license | #encoding: utf-8
import random
from nose.tools import *
import sys
sys.path.append('..')
from romanizer import romanize
def test_romanize_takes_a_number_as_argument():
assert_raises(TypeError, romanize)
def test_romanize_number_can_not_be_negative():
with assert_raises(ValueError) as e:
romanize(random.randint(1, 1000) * -1)
assert_equal(e.exception.message, 'can not romanize negative numbers')
def test_romanize_number_can_not_be_zero():
with assert_raises(ValueError) as e:
romanize(0)
assert_equal(e.exception.message, 'can not romanize zero')
def test_romanize_returns_string():
assert_is_instance(romanize(random.randint(1, 1000)), str)
def test_romanize_should_encode_single_digit_numbers():
assert_equal(romanize(1), 'I')
assert_equal(romanize(2), 'II')
assert_equal(romanize(3), 'III')
assert_equal(romanize(4), 'IV')
assert_equal(romanize(5), 'V')
assert_equal(romanize(6), 'VI')
assert_equal(romanize(7), 'VII')
assert_equal(romanize(8), 'VIII')
assert_equal(romanize(9), 'IX')
def test_romanize_should_encode_double_digit_numbers():
assert_equal(romanize(10), 'X')
assert_equal(romanize(12), 'XII')
assert_equal(romanize(20), 'XX')
assert_equal(romanize(36), 'XXXVI')
assert_equal(romanize(44), 'XLIV')
assert_equal(romanize(87), 'LXXXVII')
assert_equal(romanize(92), 'XCII')
def test_romanize_should_encode_triple_digit_numbers():
assert_equal(romanize(100), 'C')
assert_equal(romanize(666), 'DCLXVI')
assert_equal(romanize(747), 'DCCXLVII')
assert_equal(romanize(999), 'CMXCIX')
def test_romanize_should_encode_four_digit_numbers():
assert_equal(romanize(1000), 'M')
assert_equal(romanize(1066), 'MLXVI')
assert_equal(romanize(1492), 'MCDXCII')
assert_equal(romanize(1978), 'MCMLXXVIII')
assert_equal(romanize(2063), 'MMLXIII')
| true |
ee6cb546897f8ea9acb350935d275ee08bd8ce5a | Python | pints-team/pints | /pints/tests/test_nested_rejection_sampler.py | UTF-8 | 3,141 | 2.90625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python3
#
# Tests nested rejection sampler.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import unittest
import numpy as np
import pints
import pints.toy
class TestNestedRejectionSampler(unittest.TestCase):
"""
Unit (not functional!) tests for :class:`NestedRejectionSampler`.
"""
@classmethod
def setUpClass(cls):
""" Prepare for the test. """
# Create toy model
model = pints.toy.LogisticModel()
cls.real_parameters = [0.015, 500]
times = np.linspace(0, 1000, 1000)
values = model.simulate(cls.real_parameters, times)
# Add noise
np.random.seed(1)
cls.noise = 10
values += np.random.normal(0, cls.noise, values.shape)
cls.real_parameters.append(cls.noise)
# Create an object with links to the model and time series
problem = pints.SingleOutputProblem(model, times, values)
# Create a uniform prior over both the parameters and the new noise
# variable
cls.log_prior = pints.UniformLogPrior(
[0.01, 400],
[0.02, 600]
)
# Create a log-likelihood
cls.log_likelihood = pints.GaussianKnownSigmaLogLikelihood(
problem, cls.noise)
def test_construction_errors(self):
# Tests if invalid constructor calls are picked up.
# First arg must be a log likelihood
self.assertRaisesRegex(
ValueError, 'must extend pints.LogPrior',
pints.NestedRejectionSampler, self.log_likelihood)
def test_hyper_params(self):
# Tests the hyper parameter interface is working.
sampler = pints.NestedRejectionSampler(self.log_prior)
self.assertEqual(sampler.n_hyper_parameters(), 1)
sampler.set_hyper_parameters([220])
def test_getters_and_setters(self):
# Tests various get() and set() methods.
sampler = pints.NestedRejectionSampler(self.log_prior)
# Active points
x = sampler.n_active_points() + 1
self.assertNotEqual(sampler.n_active_points(), x)
sampler.set_n_active_points(x)
self.assertEqual(sampler.n_active_points(), x)
self.assertRaisesRegex(
ValueError, 'greater than 5', sampler.set_n_active_points, 5)
self.assertEqual(sampler.name(), 'Nested rejection sampler')
self.assertTrue(not sampler.needs_initial_phase())
def test_ask(self):
# Tests ask.
sampler = pints.NestedRejectionSampler(self.log_prior)
pts = sampler.ask(1)
self.assertTrue(np.isfinite(self.log_likelihood(pts)))
# test multiple points being asked and tell'd
sampler = pints.NestedRejectionSampler(self.log_prior)
pts = sampler.ask(50)
self.assertEqual(len(pts), 50)
fx = [self.log_likelihood(pt) for pt in pts]
proposed = sampler.tell(fx)
self.assertTrue(len(proposed) > 1)
if __name__ == '__main__':
unittest.main()
| true |
94cd33a4e9b13f15d7999e0fc2325eaf987c061c | Python | Gangezilla/notes-py | /notes/current_note_gui.py | UTF-8 | 888 | 2.953125 | 3 | [] | no_license | import tkinter as tk
class CurrentNoteGUI:
def __init__(self, frame, selected_note, save_note):
print('making current note', selected_note)
scroll = tk.Scrollbar(frame)
text = tk.Text(frame)
text.insert('end', selected_note["Content"])
button = tk.Button(text="Save", command=lambda: self.save(selected_note, save_note, text))
scroll.pack(side='right', fill='y')
text.pack(side='left', fill='y')
button.pack(side='bottom')
scroll.config(command=text.yview)
text.config(yscrollcommand=scroll.set)
print(text.get(1.0, 'end'))
def save(self, selected_note, save_note, text):
new_note = dict(selected_note)
new_note["Content"] = text.get(1.0, 'end')
save_note(new_note)
# def update_selected_note(self, selected_note):
# def delete(self, selected_note):
| true |
5932cc723100e840e0dd88242432293bf5f52320 | Python | joechung99/Computer-Programming-and-Engineering-Application | /project1/0551287hw1.py | UTF-8 | 1,445 | 3.046875 | 3 | [] | no_license | def readfile():
import re
f = open('0551287IN.txt','r')
node=list()
bar=list()
area=list()
for line in f.readlines():
line = line.strip()
line=re.split('=|,',line)
if line[0].find('points')!=-1:
nodenum=int(line[1])
continue
elif line[0].find('p')!=-1 and line[0]!='points':
node.append(line)
continue
elif line[0].find('bars')!=-1:
barsnum=int(line[1])
continue
elif line[0].find('bar')!=-1 and line[0]!='bars':
bar.append(line)
continue
elif line[0].find('area')!=-1:
area.append(line)
continue
return nodenum,node,barsnum,bar,area
def cal(nodenum,node,barsnum,bar,area):
import math
E=1000
barlen=[[None] * 2 for i in range(barsnum)]
for i in range(barsnum):
tmp=list()
head=int(bar[i][1])-1
tail=int(bar[i][2])-1
intbarlen=math.sqrt((int(node[head][1])-int(node[tail][1]))**2+(int(node[head][2])-int(node[tail][2]))**2)
barlen[i][0]=intbarlen
barlen[i][1]=E*int(area[i][1])/intbarlen
#barlen[i].extend(area[i][1])/intbarlen)
#tmp.append(intbarlen)
#tmp.append(E*int(area[i][1])/intbarlen)
#barlen.append(tmp)
return barlen
def output(barlen,barsnum):
f = open('0551287OUT.txt', 'w')
for i in range(barsnum):
f.write('bar%d的長度=%d E*A/L=%d\n' %(i+1,barlen[i][0],barlen[i][1]))
print('output end')
def main():
nodenum,node,barsnum,bar,area=readfile()
barlen=cal(nodenum,node,barsnum,bar,area)
output(barlen,barsnum)
if __name__ == '__main__':
main() | true |
fd36d5d6269c4c51f51b94ce0405580d52815e70 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2464/60619/252822.py | UTF-8 | 405 | 3.609375 | 4 | [] | no_license | target = int(input())
num = input().split(",")
numbers = [int(i) for i in num]
lengths = []
for i in range(len(numbers)-1):
current = numbers[i]
le = 1
for j in range(i+1, len(numbers)):
current += numbers[j]
le += 1
if current >= target:
lengths.append(le)
break
if len(lengths) == 0:
print(0)
else:
lengths.sort()
print(lengths[0]) | true |
da020f278b718d5ab1940672febc85b53a4ba9e1 | Python | bss233/CS126-SI-Practice | /Week 8/Palindrome with a loop.py | UTF-8 | 299 | 3.625 | 4 | [] | no_license | def palindrome(word):
word = word.lower().replace(' ', '').replace(',', '').replace("'", '')
forward = 0
backward = -1
for count in range(len(word)):
if word[forward] != word[backward]:
return False
forward += 1
backward -= 1
return True
| true |
e7f611db0296a05d900903e7a5652c9acaaecc20 | Python | Claudio5/ML_project | /project1/src/cross_validation.py | UTF-8 | 1,933 | 2.671875 | 3 | [] | no_license | import numpy as np
from proj1_helpers import *
from implementations import *
from utils import *
def cross_validation(optim_method, loss_function, tx, y, indexes_te, indexes_tr,
k_fold, isBuildPoly = False, args_optim = (), args_loss = ()):
"""Cross validation of the training set for any optimization method and for
any value of k_fold"""
err_tr_list = []
err_te_list = []
accuracy_list = []
for i in range(k_fold):
x_te = tx[indexes_te[i]]
y_te = y[indexes_te[i]]
x_tr = tx[(indexes_tr[i]).astype(int)]
y_tr = y[(indexes_tr[i]).astype(int)]
if not isBuildPoly:
x_tr, x_te = standardize(x_tr, 0, x_te, True)
else:
# Does not take into account the column containing only ones to avoid a std of 0
# It happens when we try to add polynomial features
x_tr[:,1:], x_te[:,1:] = standardize(x_tr[:,1:], 0, x_te[:,1:], True)
# Get the final value of w
w, err_tr = optim_method(y_tr, x_tr, *args_optim)
# Loss function corresponding to the method
err_te = loss_function(y_te, x_te, w, *args_loss)
y_predicted = predict_labels(w, x_te)
# When doing logistic regression put again the testing preditions to -1
y_te[y_te == 0] = -1
# Compute the accuracy by checking how many values are corrected with
# testing labels
accuracy_list.append(np.sum(np.equal(y_predicted, y_te)/len(y_te)))
err_tr_list.append(err_tr)
err_te_list.append(err_te)
# Compute the final statistics taking the mean of the computed means
mse_tr_mean = np.mean(err_tr_list)
mse_te_mean = np.mean(err_te_list)
rmse_tr_mean = np.sqrt(2*mse_tr_mean)
rmse_te_mean = np.sqrt(2*mse_te_mean)
accuracy_mean = np.mean(accuracy_list)
return mse_tr_mean, mse_te_mean, rmse_tr_mean, rmse_te_mean, accuracy_mean
| true |
d8bbd3858a251a34a29eb19dd89de512a369fcd7 | Python | wenwen252/Auto-testing | /class_04/04字典.py | UTF-8 | 1,045 | 4.1875 | 4 | [] | no_license | """
============================
-*- coding:utf-8 -*-
Author :稳稳的幸福
E_mail :1107924184@qq.com
Time :2019/12/30 21:13
File :04字典.py
============================
"""
"""
字典:每一个元素都是由一个键值对(key:value)组成
字典的定义:使用花括号来表示
字典中的数据规范:
key:不能重复,只能是不可以变类型的数据(数值,字符串,元组),建议key使用字符串
value:可以是任意类型的数据
字典定义的两种方式:
第一种:
user_info = {"name": "稳稳的幸福", "age": 14, "sex": "女"}
第二种:
user_info = dict(name="稳稳的幸福",age=14,sex="女")
第三种:
data=[("name","稳稳的幸福"),("age",18),("sex","女")]
user_info=dict(data)
li1=[('aa', 11), ('cc', 11), ('bb', 22)]
dict1=dict(li1)
"""
user_info = {"name": "稳稳的幸福", "age": 14, "sex": "女"}
name = user_info["name"]
print(age)
#
# # key重复的话,则取最后一个键值对
# dic = {"a": 11, "a": 111, "a": 1111}
# print(dic)
| true |
454b2bfa12dfec0ffcd1a2080f6921b0745326c3 | Python | lee000000/leetcodePractice | /83.py | UTF-8 | 1,052 | 3.890625 | 4 | [] | no_license | '''
83. Remove Duplicates from Sorted List
Given a sorted linked list, delete all duplicates such that each element appear only once.
For example,
Given 1->1->2, return 1->2.
Given 1->1->2->3->3, return 1->2->3.
'''
from ListNode import *
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
current = head
while current.next:
if current.val == current.next.val:
current.next = current.next.next
else:
current = current.next
return head
def test():
a = [1, 1, 2, 2, 3, 4, 4, 5, 6]
la = List_to_Link(a)
current = la.head
sl = Solution()
lst = Link_to_List(sl.deleteDuplicates(current))
lst.print_list()
if __name__ == "__main__":
test()
| true |
b73b4078a1730e3123de76722dcaa44eb51e6720 | Python | houshengandt/My-Solutions-For-Show-Me-The-Code | /0000/0000.py | UTF-8 | 416 | 2.984375 | 3 | [] | no_license | from PIL import Image, ImageDraw, ImageFont
def add_num(filename, text='9', size=40, color='red'):
a = Image.open(filename)
print(a.size)
f = ImageFont.truetype("arial.ttf", size)
b = ImageDraw.Draw(a)
x, y = a.size
xy = (x - 40, y - 190)
b.text(xy, text, fill=color, font=f)
newname = "new" + filename
a.save(newname)
if __name__ == "__main__":
test = add_num("1.jpg")
| true |
0e70aec42f3f17d049fd079d297aab1febccdd2c | Python | davidyuqiwei/davidyu_stock | /scripts/backup/combine_all_csv.py | UTF-8 | 1,618 | 2.5625 | 3 | [
"MIT"
] | permissive | # coding: utf-8
## this script combine all the csv files in the folder
from package_path_define.path_define import *
from package_downloaddata.download_data_v1 import save_dir1
import pandas as pd
from package_functions.combine_allCsv_inFolder import combine_csv_in_folder
path_stock_owner_liutong=r'\\'.join([main_path,"data_to_sql_owner"])
#os.system('ls')
#df1=combine_csv_in_folder(path_stock_owner_liutong)
#print(df1.head())
'''
file_name1="data_combine.csv"
file_name=r'\\'.join([path_stock_owner_liutong,file_name1])
file_name2="data_combine_tr.csv"
file_name_tr=r'\\'.join([path_stock_owner_liutong,file_name2])
os.system('iconv -f utf_8_sig -t utf-8 '+file_name +' >'+file_name_tr)
'''
df1=pd.read_csv(path_stock_owner_liutong+'\\data_combine.csv')
df1=df1.round(2).iloc[1:30,]
df1.to_csv(path_stock_owner_liutong+'\\data_combine.csv_tr.csv',encoding="utf-8",index=False)
#def combine_csv_in_folder(folder,save_name="data_combine.csv"):
# files=os.listdir(folder)
# data_file='.//'.join([folder,files[0]])
# df1=pd.read_csv(data_file)
# for i in range(1,len(files)):
# #for i in range(1,20):
# data_file='.//'.join([folder,files[i]])
# df2=pd.read_csv(data_file)
# df1=pd.concat([df1,df2])
# df1.to_csv('.//'.join([folder,save_name]),index=False)
# return df1
#
#path_stock_owner_liutong=r'\\'.join([main_path,"data_to_sql_owner"])
#df1=combine_csv_in_folder(path_stock_owner_liutong)
# In[16]:
## this script combine all the csv files in the folder
#import pandas as pd
#import os
# In[7]:
#print df1.head()
#file_name1="stock_owner_liutong_combine.csv"
| true |
f0d0e6298f56b6c49f7869b5c41745f648fa456a | Python | w8s/python-asana | /asana/resources/gen/project_statuses.py | UTF-8 | 2,554 | 2.875 | 3 | [
"MIT"
] | permissive |
class _ProjectStatuses:
"""A _project status_ is an update on the progress of a particular project, and is sent out to all project
followers when created. These updates include both text describing the update and a color code intended to
represent the overall state of the project: "green" for projects that are on track, "yellow" for projects
at risk, and "red" for projects that are behind.
Project statuses can be created and deleted, but not modified.
"""
def __init__(self, client=None):
self.client = client
def create_in_project(self, project, params={}, **options):
"""Creates a new status update on the project.
Returns the full record of the newly created project status update.
Parameters
----------
project : {Gid} The project on which to create a status update.
[data] : {Object} Data for the request
- text : {String} The text of the project status update.
- color : {String} The color to associate with the status update. Must be one of `"red"`, `"yellow"`, or `"green"`.
"""
path = "/projects/%s/project_statuses" % (project)
return self.client.post(path, params, **options)
def find_by_project(self, project, params={}, **options):
"""Returns the compact project status update records for all updates on the project.
Parameters
----------
project : {Gid} The project to find status updates for.
[params] : {Object} Parameters for the request
"""
path = "/projects/%s/project_statuses" % (project)
return self.client.get_collection(path, params, **options)
def find_by_id(self, project_status, params={}, **options):
"""Returns the complete record for a single status update.
Parameters
----------
project-status : {Gid} The project status update to get.
[params] : {Object} Parameters for the request
"""
path = "/project_statuses/%s" % (project_status)
return self.client.get(path, params, **options)
def delete(self, project_status, params={}, **options):
"""Deletes a specific, existing project status update.
Returns an empty data record.
Parameters
----------
project-status : {Gid} The project status update to delete.
"""
path = "/project_statuses/%s" % (project_status)
return self.client.delete(path, params, **options)
| true |
402c2a4d60e6c4783ee6f588c269201da0e48df8 | Python | zhuolikevin/Algorithm-Practices-Python | /Indeed/validPythonCode.py | UTF-8 | 2,522 | 3.75 | 4 | [] | no_license | # Given a list of strings. Each string represents a line of python code
# return the line number of first invalid line. If no invalid line, return -1
# rules for validation:
# 1. No indentation in the first line
# 2. There must be more indentations in the next line of control statements(if, else, for, etc)
# 3. If a line is not a control statement, it should have the same indentation as the line above or any control statement above.
def isValidPython(codes):
if not codes: return -1
i = 0
while i < len(codes) and isWhiteSpace(codes[i]):
i += 1
if i == len(codes):
return -1
if getIndentation(codes[i]) != 0:
return i+1
controlFlag = isControlStatement(codes[0])
stack = [0]
for i in range(1, len(codes)):
if isWhiteSpace(codes[i]):
continue
indentation = getIndentation(codes[i])
if controlFlag:
if indentation <= stack[-1]:
return i+1
else:
stack.append(indentation)
else:
if indentation > stack[-1]:
return i+1
else:
while stack and stack[-1] != indentation:
stack.pop()
if not stack:
return i+1
controlFlag = isControlStatement(codes[i])
return -1
def isWhiteSpace(line):
payload = line.strip()
return payload == ''
def getIndentation(line):
i = 0
for ch in line:
if ch != ' ':
break
i += 1
return i
def isControlStatement(line):
controlWords = set(['if', 'else:', 'while', 'for', 'def', 'class'])
words = line.strip().split(' ')
for word in words:
if word in controlWords:
return True
return False
codes = ['for i in range(1, len(codes)):',
' if isWhiteSpace(codes[i]):',
' continue',
'',
' indentation = getIndentation(codes[i])',
' if controlFlag:',
' if indentation <= stack[-1]:',
' return i+1',
' else:',
' stack.append(indentation)',
' else:',
' if indentation > stack[-1]:',
' return i+1',
' else:',
' while stack and stack[-1] != indentation:',
' stack.pop()',
' if not stack:',
' return i+1',
' controlFlag = isControlStatement(codes[i])]']
print isValidPython(codes)
| true |
c8f403bb85c7a0f7fe6fb049462819f44a53fc0f | Python | BiggyZable/proman-sprint-1 | /main.py | UTF-8 | 3,260 | 2.65625 | 3 | [] | no_license | from flask import Flask, render_template, url_for, request
from util import json_response
import data_handler
app = Flask(__name__)
@app.route("/")
def index():
"""
This is a one-pager which shows all the boards and cards
"""
return render_template('index.html')
@app.route("/get-boards")
@json_response
def get_boards():
"""
All the boards
"""
return data_handler.get_boards()
@app.route("/get-cards/<int:board_id>")
@json_response
def get_cards_for_board(board_id: int):
"""
All cards that belongs to a board
:param board_id: id of the parent board
"""
return data_handler.get_cards_for_board(board_id)
@app.route("/add-board", methods=["POST"])
@json_response
def add_board():
if request.method == "POST":
board_title = request.get_json()
data_handler.add_board(board_title)
return f'ez itt a {board_title}'
@app.route("/rename-board", methods=["POST"])
@json_response
def rename_board():
if request.method == "POST":
old_board_title = request.get_json()['old_board_title']
new_board_title = request.get_json()['new_board_title']
data_handler.rename_board(old_board_title, new_board_title)
return f'ez itt az uj {new_board_title} a {old_board_title} helyett'
@app.route("/get-statuses", methods=["GET"])
@json_response
def get_statuses():
if request.method == "GET":
return data_handler.get_statuses()
@app.route("/add-status", methods=["POST"])
@json_response
def add_status():
if request.method == "POST":
status_board_dict = request.get_json()
status_name = status_board_dict['status_name']
board_name = status_board_dict['board_name']
data_handler.add_status(status_name)
data_handler.add_status_link(status_name, board_name)
return f'A new status was added: {status_name} to the board {board_name}'
@app.route("/rename-column", methods=["POST"])
@json_response
def rename_column():
if request.method == "POST":
column_title_dict = request.get_json()
old_column_title = column_title_dict['old_column_title']
new_column_title = column_title_dict['new_column_title']
board_title = column_title_dict['board_title']
data_handler.rename_column(old_column_title, new_column_title, board_title)
return f'The name of the column {old_column_title} from board: {board_title} was renamed to {new_column_title}'
@app.route('/add-card', methods=['POST'])
@json_response
def add_new_card():
if request.method == "POST":
card_dict = request.get_json()
card_title = card_dict['card_title']
board_id = card_dict['board_id']
status_name = card_dict['status_name']
data_handler.add_new_card(card_title, board_id, status_name)
return f'Added card: {card_title} to board {board_id}'
@app.route('/show-cards', methods=['GET'])
@json_response
def show_cards():
if request.method == "GET":
return data_handler.get_cards()
def main():
app.run(debug=True)
# Serving the favicon
with app.app_context():
app.add_url_rule('/favicon.ico', redirect_to=url_for('static', filename='favicon/favicon.ico'))
if __name__ == '__main__':
main()
| true |
7f8e3635bf7d57fd1e0a8f684fe337075bd0e53d | Python | vihervirveli/portfolio | /AI_and_Python/Python_ImageClassificationFaceRecognition/model_best_so_far.py | UTF-8 | 9,275 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
"""
@vihervirveli project for AI and IoT.
Purpose of the project:
• Make a CNN that will determine that the pictures used in an age determining program
1. are big enough
2. have one (1) face in them
3. in addition to a face, the picture also contains face landmarks
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
import os
import numpy as np
import matplotlib.pyplot as plt
AUTOTUNE = tf.data.experimental.AUTOTUNE
import IPython.display as display
from PIL import Image
import pathlib
kasvods_dir = "D://kasvokuvat//kasvods//for_use//"
print("testi")
#the directory where we keep our test and train directories. They both contain directories keep and delete.
data_dir = pathlib.Path(kasvods_dir)
print(data_dir)
#how many pictures do we have for our uses
image_count = len(list(data_dir.glob('*/*/*.jpg'))) #+/*?
image_count
#what our class names are (keep and delete)
CLASS_NAMES = np.array([item.name for item in data_dir.glob('*/*')])
CLASS_NAMES
#here we can see a few example pictures from train deletes
deletes = list(data_dir.glob('train//delete//*'))
for image_path in deletes[:3]:
display.display(Image.open(str(image_path)))
#let's create a dataset of the file paths
list_ds = tf.data.Dataset.list_files(str(data_dir/'*/*/*'))
for f in list_ds.take(10):
print(f.numpy())
BATCH_SIZE = 32
IMG_HEIGHT = 150
IMG_WIDTH = 150
STEPS_PER_EPOCH = np.ceil(image_count/BATCH_SIZE)
def get_label(file_path):
# convert the path to a list of path components
parts = tf.strings.split(file_path, os.path.sep)
# The second to last is the class-directory
return parts[-2] == CLASS_NAMES
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# resize the image to the desired size.
return tf.image.resize(img, [IMG_WIDTH, IMG_HEIGHT])
def process_path(file_path):
"""
Returns: a pair of img: image and label:label so that our model can know which is the correct answer when learning
and testing
"""
label = get_label(file_path)
# load the raw data from the file as a string
img = tf.io.read_file(file_path)
img = decode_img(img)
return img, label
labeled_ds = list_ds.map(process_path, num_parallel_calls=AUTOTUNE)
for image, label in labeled_ds.take(2):
print("Image shape: ", image.numpy().shape)
print("Label: ", label.numpy())
def show_batch(image_batch, label_batch):
plt.figure(figsize=(10,10))
for n in range(25):
ax = plt.subplot(5,5,n+1)
plt.imshow(image_batch[n])
plt.title(CLASS_NAMES[label_batch[n]==1][0].title())
plt.axis('off')
def prepare_for_training(ds, cache=True, shuffle_buffer_size=1000):
# This is a small dataset, only load it once, and keep it in memory.
# use `.cache(filename)` to cache preprocessing work for datasets that don't
# fit in memory.
if cache:
if isinstance(cache, str):
ds = ds.cache(cache)
else:
ds = ds.cache()
ds = ds.shuffle(buffer_size=shuffle_buffer_size)
# Repeat forever
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model
# is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
train_ds = prepare_for_training(labeled_ds)
image_batch, label_batch = next(iter(train_ds))
show_batch(image_batch.numpy(), label_batch.numpy())
PATH = os.path.dirname(kasvods_dir)
train_dir = os.path.join(PATH, 'train')
test_dir = os.path.join(PATH, 'test')
train_keep_dir = os.path.join(train_dir, 'keep')
train_delete_dir = os.path.join(train_dir, 'delete')
test_keep_dir = os.path.join(test_dir, 'keep')
test_delete_dir = os.path.join(test_dir, 'delete')
num_keeps_tr = len(os.listdir(train_keep_dir))
num_deletes_tr = len(os.listdir(train_delete_dir))
num_keep_test = len(os.listdir(test_keep_dir))
num_delete_test = len(os.listdir(test_delete_dir))
total_train = num_keeps_tr + num_deletes_tr
total_test = num_keep_test + num_delete_test
print('total training keep images:', num_keeps_tr)
print('total training delete images:', num_deletes_tr)
print('total validation keep images:', num_keep_test)
print('total validation delete images:', num_delete_test)
print("--")
print("Total training images:", total_train)
print("Total validation images:", total_test)
batch_size = 50 #55=> 86%, 60 => 88%, 40 => 88%, 50 => 91%, 128 => 85-86%
epochs = 10 #20, 15
IMG_HEIGHT = 350
IMG_WIDTH = 350
train_image_generator = ImageDataGenerator(rescale=1./255,horizontal_flip=True) # Generator for our training data
test_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our validation data
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,
directory=test_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
sample_training_images, _ = next(train_data_gen)
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
plotImages(sample_training_images[:5])
#our actual model and a summary of it
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(),
Dropout(0.1),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Dropout(0.1),
Flatten(),
Dense(512, activation='relu'),
Dense(1, activation='sigmoid')
])
adam = Adam(learning_rate=0.001) #0.000001 =>69% default 0.001
model.compile(optimizer=adam,
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
earlystop_callback = EarlyStopping(monitor='val_accuracy', min_delta=0.0001,
patience=1)
#and now we run the model to teach it and see how well it does
history = model.fit_generator(
train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=epochs,
validation_data = test_data_gen,
validation_steps=total_test // batch_size
)
"""
After running the model, we can see here the accuracy level and how well we reduced loss. If the lines are close to each other,
our model is pretty good. If the lines are far apart, a few things could be off: if the loss line keeps zigzagging,
it might mean we have too few testing pictures. If the validation accuracy doesn't meet training accuracy, problem could be
overfitting => the model is adapting too well to the training data, and doesn't generalize well. In that case we must regularize
our model better, by using dropouts or other methods.
"""
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Testing Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Testing Loss')
plt.legend(loc='upper right')
plt.title('Training and Testing Loss')
plt.show()
get_ipython().system('pip install -q pyyaml h5py')
model.save("D://kasvokuvat//kasvods//model.h5")
"""
dropoutin kera, jos tarviit sitä vielä, kopioi tästä
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(),
Dropout(0.2),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Dropout(0.2),
Flatten(),
Dense(512, activation='relu'),
Dense(1, activation='sigmoid')
])
"""
| true |
8a73b2e296a260dcf1269aa377e6b856307c2c63 | Python | kmcrayton7/python_coding_challenges | /programmr/strings/capitalize_me.py | UTF-8 | 212 | 4 | 4 | [] | no_license | # Write a program which capitalizes the first letter of a given string.
print "Please enter a sentence using all lowercase letters."
sentence = raw_input('> ')
sentence = sentence.capitalize()
print sentence
| true |
c1406bfb955b08e09466b9af8f50673b405b9c8c | Python | eswanson611/scripts | /archivesspace/asDeleteOrphanLocations.py | UTF-8 | 1,449 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import os, requests, json, sys, logging, ConfigParser, urllib2, pandas
config = ConfigParser.ConfigParser()
config.read('local_settings.cfg')
# Logging configuration
logging.basicConfig(filename=config.get('Logging', 'filename'),format=config.get('Logging', 'format', 1), datefmt=config.get('Logging', 'datefmt', 1), level=config.get('Logging', 'level', 0))
# Sets logging of requests to WARNING to avoid unneccessary info
logging.getLogger("requests").setLevel(logging.WARNING)
dictionary = {'baseURL': config.get('ArchivesSpace', 'baseURL'), 'repository':config.get('ArchivesSpace', 'repository'), 'user': config.get('ArchivesSpace', 'user'), 'password': config.get('ArchivesSpace', 'password')}
locationURL = '{baseURL}/locations/'.format(**dictionary)
# authenticates the session
auth = requests.post('{baseURL}/users/{user}/login?password={password}&expiring=false'.format(**dictionary)).json()
session = auth["session"]
headers = {'X-ArchivesSpace-Session':session}
locations = pandas.read_csv('locations.csv', header=None)
locs = locations.values.T[0].tolist()
print 'Getting a list of top containers'
for loc in locs:
print '/locations/' + str(loc) + ' will be deleted'
location = (requests.get(locationURL + str(loc), headers=headers)).json()
deleted = requests.delete(locationURL + str(loc), headers=headers, data=json.dumps(location))
logging.info('/locations/' + str(loc) + ' was deleted')
| true |
b25465e5fb9c7d58869adb86287d167e6ac49bc8 | Python | nintex00/bfun | /perceptron_OR.py | UTF-8 | 1,083 | 3.140625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 3 20:47:56 2016
@author: Brad
"""
import numpy as np
import matplotlib.pyplot as plt
input = np.matrix('0 0; 0 1; 1 0; 1 1')
numIn = 4
desired_out = np.matrix('0; 1; 1; 1')
bias = -1
coeff = 0.7 # learning rate
weights = -1*2*np.random.rand(3,1)
iterations = 1000
rms_err= np.zeros(iterations)
for i in range(iterations):
out = np.zeros(4)
for j in range(numIn):
y = bias*weights[0][0] + input[j,0]*weights[1][0] + input[j,1]*weights[2][0]
out[j] = 1 / (1+np.exp(-y))
delta = desired_out[j] - out[j] # delta rule, levenberg-marquardt could be used
weights[0][0] = weights[0][0] + coeff*bias*delta
weights[1][0] = weights[1][0] + coeff*input[j,0]*delta
weights[2][0] = weights[2][0] + coeff*input[j,1]*delta
sum_rms = 0
for k in range(numIn):
sum_rms = np.power((out[k] - desired_out[k]),2) + sum_rms
rms_err[i] = np.sqrt(sum_rms / len(out))
print(out)
print(weights)
plt.figure
plt.plot(range(iterations),rms_err)
plt.title("RMSE vs. Iterations")
plt.show() | true |
27a9e101cd4a7f253db5f5c89fb3068918340ead | Python | DilyanTsenkov/SoftUni-Software-Engineering | /Python Fundamentals/03 Lists Basics/Exercises/07_Easter_Gifts.py | UTF-8 | 870 | 2.953125 | 3 | [] | no_license | gifts_names = input().split(" ")
command = input()
while command != "No Money":
command_list = command.split(" ")
if command_list[0] == "OutOfStock":
if command_list[1] in gifts_names:
for i in range(len(gifts_names)):
if gifts_names[i] == command_list[1]:
gifts_names[i] = "None"
elif command_list[0] == "Required" and int(command_list[2]) > 0 and int(command_list[2]) <= int(
len(gifts_names)) - 1:
gifts_names[int(command_list[2])] = command_list[1]
elif command_list[0] == "JustInCase":
gifts_names[int(len(gifts_names)) - 1] = command_list[1]
command = input()
for n in range(len(gifts_names)):
if "None" in gifts_names:
gifts_names.remove("None")
gifts_names_print = " ".join(gifts_names)
print(gifts_names_print)
| true |
bcddd2dc3a8c7cf035536ff1248f0e35913cc880 | Python | INfoUpgraders/rblxpy | /rblxpy/__init__.py | UTF-8 | 372 | 2.71875 | 3 | [
"MIT"
] | permissive | import urllib.request, json
class Users:
def __init__(self, username):
self.username = username
def get_user(self):
with urllib.request.urlopen(f"http://api.roblox.com/users/get-by-username?username={self.username}") as url:
data = json.loads(url.read().decode())
return data
print(Users.get_user(Users("INfoUpgradersYT")))
| true |
3e6a36efb6a4efb1c03791376a5aa331008782e6 | Python | joewledger/ProjectEuler | /Problems/Euler20/Euler20.py | UTF-8 | 391 | 3.40625 | 3 | [] | no_license | #Project Euler Problem 20
#Description: Find the sum of the digits in the number 100!
import os
import sys
if(len(sys.argv) > 1):
os.chdir(sys.argv[1])
sys.path.append("../../Utils")
sys.path.append("Utils")
import operator
import integer_utils
factorial = reduce(operator.mul, [x for x in xrange(1,101)],1)
print(sum([x for x in integer_utils.convert_int_to_digit_list(factorial)]))
| true |
ee40939b51d168f08137b3e6b4abf062e15cc0e0 | Python | k-data/Streamlit-Titanic-Machine-Learning-from-Disaster | /streamlit/python/basic_ml.py | UTF-8 | 4,200 | 3.15625 | 3 | [] | no_license | """ basic_ml.py """
import pandas as pd
import seaborn as sns; sns.set(font='DejaVu Sans')
import matplotlib.pyplot as plt
import streamlit as st
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
class Trial_ML():
def rfc(self, X_train, y_train):
rfc = RandomForestClassifier(random_state=0)
rfc.fit(X_train, y_train)
# print('RandomForestClassifier')
# print('accuracy of train set: {}'.format(rfc.score(X_train, y_train)))
# print('accuracy of test set: {}'.format(rfc.score(X_test, y_test)))
return rfc
def xgb(self, X_train, y_train):
xgb = XGBClassifier(random_state=0, use_label_encoder =False)
xgb.fit(X_train, y_train)
# print('XGBClassifier')
# print('accuracy of train set: {}'.format(xgb.score(X_train, y_train)))
# print('accuracy of test set: {}'.format(xgb.score(X_test, y_test)))
return xgb
def lgb(self, X_train, y_train):
lgb = LGBMClassifier(random_state=0)
lgb.fit(X_train, y_train)
# print('LGBMClassifier')
# print('accuracy of train set: {}'.format(lgb.score(X_train, y_train)))
# print('accuracy of test set: {}'.format(lgb.score(X_test, y_test)))
return lgb
def lr(self, X_train, y_train):
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
# print('LogisticRegression')
# print('accuracy of train set: {}'.format(lr.score(X_train, y_train)))
# print('accuracy of test set: {}'.format(lr.score(X_test, y_test)))
return lr
def svc(self, X_train, y_train):
svc = SVC(random_state=0)
svc.fit(X_train, y_train)
# print('SVC')
# print('accuracy of train set: {}'.format(svc.score(X_train, y_train)))
# print('accuracy of test set: {}'.format(svc.score(X_test, y_test)))
return svc
def base_ml(self, X_train, X_test, y_train, y_test):
rfc = self.rfc(X_train, y_train)
xgb = self.xgb(X_train, y_train)
lgb = self.lgb(X_train, y_train)
lr = self.lr(X_train, y_train)
svc = self.svc(X_train, y_train)
st.success('Finishied Machine learning')
dic = {'train': [rfc.score(X_train, y_train), xgb.score(X_train, y_train), \
lgb.score(X_train, y_train), lr.score(X_train, y_train), svc.score(X_train, y_train)],
'test': [rfc.score(X_test, y_test), xgb.score(X_test, y_test), \
lgb.score(X_test, y_test), lr.score(X_test, y_test), svc.score(X_test, y_test)]}
index=['rfc', 'xgb', 'lgb', 'lr', 'svc']
df_cls = pd.DataFrame(dic, index=index)
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
sns.barplot(data=df_cls, x=index, y='train', ax = axes[0])
axes[0].set_ylim([0.7, 1])
axes[0].set_xlabel('Classifier')
axes[0].set_ylabel('Accuracy')
axes[0].set_title('Train')
sns.barplot(data=df_cls, x=index, y='test', ax = axes[1])
axes[1].set_ylim([0.7, 1])
axes[1].set_xlabel('Classifier')
axes[1].set_ylabel('Accuracy')
axes[1].set_title('Test')
fig.tight_layout()
st.pyplot(fig)
return rfc, xgb, lgb, lr, svc
def output_file(self, output, *arg):
test_feature = sa.test_feature
test_raw = sa.test_raw
pred_1 = {
'rfc': rfc.predict(test_feature),
'xgb': xgb.predict(test_feature),
'lgb': lgb.predict(test_feature),
'lr': lr.predict(test_feature),
'svc': svc.predict(test_feature)
}
# ファイル出力
for key, value in pred_1.items():
pd.concat(
[
pd.DataFrame(test_raw['PassengerId']).reset_index(drop=True),
pd.DataFrame(value, columns=['Survived'])
],
axis=1
).to_csv('../output/submittion_{0}_{1}.csv'.format(key, output), index=False)
| true |
89bde37cca3554a7881dbd5a102cafe7140ef125 | Python | J3rryCodes/AI_python | /XOR_problem_solving_ai/XOR_nn.py | UTF-8 | 2,162 | 3.390625 | 3 | [
"Unlicense"
] | permissive | #XOR problem
import numpy as np #matrix math
#input vlues
X = np.array([[0,0],
[1,0],
[0,1],
[1,1]])
#utputvalues
y = np.array([[0],
[1],
[1],
[0]])
class nuralnetwork:
no_epoches = 100000
learnig_rate = 0.001
ih_weights = 2 * np.random.random((2,3)) - 1 #weights b/w input layer and hidden layer [2x3]
ho_weights = 2 * np.random.random((3,1)) - 1 #weights b/w hidden layer and output layer [3x2]
i_bais = 2 * np.random.random((1,3)) - 1 #bias for input layer [1x3]
h_bias = 2 * np.random.random((1,1)) - 1 #bias for hidden layer [1x1]
def sigmoid(self,a):
return 1/(1+np.exp(-a)) #activation function
def desigmoid(self,a):
return a*(1-a) #derivative of activation function
def feedforword(self,x):
input_layer = x
hidden_layer = self.sigmoid(input_layer.dot(self.ih_weights) + self.i_bais) # hidden layer = input layer x inputTOhidden weights [1x3]
output_layer = self.sigmoid(hidden_layer.dot(self.ho_weights) + self.h_bias) # output layer = hidden layer x hiddenTOoutput weights [1x1]
return output_layer
def train(self,X,y):
for i in range(self.no_epoches):
r = i%len(X)
#feed forword
input_layer = np.array([X[r]])
hidden_layer = self.sigmoid(input_layer.dot(self.ih_weights) + self.i_bais) # hidden layer = input layer x inputTOhidden weights [1x3]
output_layer = self.sigmoid(hidden_layer.dot(self.ho_weights) + self.h_bias) # output layer = hidden layer x hiddenTOoutput weights [1x1]
#back propogation
output_error = y[r] - output_layer #[1x1]
delta_output = output_error * self.desigmoid(output_layer) #[1x1]
hidden_error = delta_output.dot(self.ho_weights.T) #[1x3]
delta_hidden = hidden_error*(self.desigmoid(hidden_layer)) #[1x3]
self.ho_weights += hidden_layer.T.dot(delta_output) #[3x1]
self.ih_weights += input_layer.T.dot(delta_hidden) #[2x3]
self.h_bias += delta_output
self.i_bais += delta_hidden
print('Final error'+str(abs(output_error)))
nn = nuralnetwork()
nn.train(X,y)
for x in X:
if(nn.feedforword(x) > 0.5):
print(f'[{x[0]}] : [{x[1]}] --> 1')
else:
print(f'[{x[0]}] : [{x[1]}] --> 0')
| true |
cf67c3593024181f627691cf7f9f5c9bf2eef46c | Python | rapchen/LeetCode_Python | /contests/20201206/5617. 设计 Goal 解析器.py | UTF-8 | 300 | 2.953125 | 3 | [] | no_license | """
@Difficulty : E
@Status : AC
@Time : 2020/12/6 10:26
@Author : Chen Runwen
"""
class Solution:
def interpret(self, command: str) -> str:
return command.replace('()', 'o').replace('(al)', 'al')
if __name__ == '__main__':
print(Solution().interpret())
| true |
beaffbf8c4250428fdee3a7f6d76689f03dc4a69 | Python | dmontealegre/pa_graphs | /PA_graph.py | UTF-8 | 1,209 | 3.359375 | 3 | [] | no_license | import networkx as nx
import matplotlib.pyplot as plt
import random
import copy
import numpy as np
import pylab
import math
import pickle
# The following function creates a graph that creates a graph that follows the preferential attachment model.
# networkx library comes with a different implementation of the Barabasi Albert model.
# Here we implement the one presented in Random Graphs (by Alan Frieze).
def pa_graph(m, n):
G = nx.MultiGraph()
G.add_node(1)
for x in range(0, m):
G.add_edge(1, 1)
# so far we created G_m^(1)
targets = [1] * 2 * m
vertex = 2
# we start adding the vertices starting from vertex two and working our way up.
while vertex < n + 1:
G.add_node(vertex)
# create a shallow copy of the targets. We will be using this one to randomly choose the neighbors of x.
targets2 = copy.copy(targets)
for x in range(0, m):
chosen = random.choice(targets2)
G.add_edge(vertex, chosen)
# we update our targets list so we can use it for the next vertex.
targets.append(chosen)
targets.append(vertex)
vertex += 1
return G
| true |
1371edb870e847a98875de6f05cfcf58934c4f54 | Python | Luispapiernik/Guane-Inter-FastAPI | /app/models/dog.py | UTF-8 | 680 | 2.828125 | 3 | [
"MIT"
] | permissive | import datetime
from typing import Optional
from pydantic import BaseModel, HttpUrl
class BaseDog(BaseModel):
name: str
birth_date: Optional[datetime.datetime]
picture: Optional[HttpUrl]
is_adopted: bool
id_user: Optional[str]
# se crea esta clase por consistencia en los nombres
class DogIn(BaseDog):
"""
Representación del modelo que se recibe del usuario al momento de registrar
un nuevo dog en la base de datos.
"""
pass
class DogOut(BaseDog):
"""
Representación del modelo que se le envía al usuario al momento de registrar
o hacer una consulta de un perro.
"""
ID: str
created_date: datetime.datetime
| true |
6cbe44b60613b93715a72adea0697d8f5719250a | Python | yatish0492/PythonTutorial | /pandas/0_Introduction.py | UTF-8 | 550 | 2.8125 | 3 | [] | no_license | '''
What is Pandas?
It is is a library of python which provides functions to do data analytics.
What is Data Mungling/Wrangling?
It is the process of cleaning messy data. Say like if some of the data is missing then we can fill them with 0 or
any value so that we can easily process the data with analytics, this is one example of data Mungling/Wrangling.
How to install Pandas?
Anaconda comes with panda library within it. We can also manually import panda if you don't want to install anaconda
by using pip install etc.
''' | true |
02a53441dac8c4d1d202e5dee750a94f3ed5553d | Python | RickyL-2000/ZJUI-lib | /PHYS212/unit5_hw2.py | UTF-8 | 299 | 3.125 | 3 | [] | no_license | import math
epsilon = 8.85e-12
pi = 3.1415
# Q1
g = 2.3
h = 7
deltaV = (-3/2*7*7) - (-3/2*2.3*2.3)
print(deltaV)
# Q3
lam = 2.8e-6
# Ex = lam/(2*pi*epsilon*x)
c = 3
d = 7
V3 = lam/(2*pi*epsilon) * (math.log(7)-math.log(3))
print(V3)
# Q4
sigm = 1.1e-6
V4 = sigm/(2*epsilon)*(0.2-1.9)
print(V4)
| true |
8be5eb81da94c5b68cc57ab922d8a6cf364f2007 | Python | sakurasakura1996/Leetcode | /二叉树/problem95_不同的搜索二叉树II.py | UTF-8 | 3,957 | 3.890625 | 4 | [] | no_license | """"
95. 不同的二叉搜索树 II
给定一个整数 n,生成所有由 1 ... n 为节点所组成的 二叉搜索树 。
示例:
输入:3
输出:
[
[1,null,3,2],
[3,2,null,1],
[3,1,null,null,2],
[2,1,3],
[1,null,2,null,3]
]
解释:
以上的输出对应以下 5 种不同结构的二叉搜索树:
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
提示:
0 <= n <= 8
problem96和这题是同一个题目,只不过它只需要输出一个不同结构二叉搜索树的个数,这个题目是要输出所有可能结果,递归是比较好思考的方法
"""
from typing import List
from functools import lru_cache
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val =val
self.left = left
self.right = right
class Solution:
# 这道题和96题虽然题意相同,但是不能使用动态规划了
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
def helper(start, end):
if start > end:
return [None]
ans = []
for i in range(start, end+1):
leftTrees = helper(start,i-1)
rightTrees = helper(i+1,end)
for l in leftTrees:
for r in rightTrees:
root = TreeNode(i)
root.left = l
root.right = r
ans.append(root)
return ans
ans = helper(1,n)
return ans
def generateTrees(self, n:int):
# woca,这道题还是可以用动态规划来写哎
"""
标准的动态规划,最标准的,还没有优化,适合初学者的。
1.定义dp数组,dp[i][j] 存储数字为i---j之间的所有可能的情况,dp为三维数组
2.规定初始值,如果i==j,表示只有一个节点的情况,添加节点val为i即可,
i>j初始化为空列表,其他情况初始化为[None],画一个矩阵就可以看出来了。
3.确定dp方程,求dp[i][j],以i-j之间数字为顶点的所有情况相加即可,
例如,求dp[1][3],分别以1,2,3为顶点,以1为顶点,左边值更大,右边值更小,
左子树为dp[1][0],右子树为dp[2][3],两个树排列组合就可以,dp[1][0]为None,
4.确定循环顺序,画矩阵可以看出来,从下向上进行循环,求dp[1][3]必须要知道dp[2][3],
求dp[2][4]必须要知道dp[3][4],dp[2][3],
5.写代码时候注意边界情况需要额外判断
"""
if n == 0:
return None
# 对dp进行初始化
dp = []
for i in range(0, n+1):
dp.append([])
for j in range(0, n+1):
if i == j:
dp[i].append([TreeNode(i)])
elif i < j:
dp[i].append([])
else:
dp[i].append([None])
dp[0][0] = [None]
for i in range(n - 1, 0, -1): # 自下向上进行循环
for j in range(i + 1, n + 1):
for r in range(i, j + 1): # i-j每一个节点为顶点的情况
left = r + 1 if r < j else r # 右边的值需要边界判断,不然会溢出数组
for x in dp[i][r - 1]: # 左右子树排列组合
for y in dp[left][j]:
node = TreeNode(r)
node.left = x
node.right = y
if r == j:
node.right = None
dp[i][j].append(node) # dp[i][j]添加此次循环的值
return dp[1][n]
solu = Solution()
ans = solu.generateTrees(3)
print(ans)
| true |
75e492a97355d5dea483a044ed68a0c54b57ae9d | Python | jeremy-codes/rosalind | /solutions/bioinformatics_stronghold/020-revp.py | UTF-8 | 734 | 2.84375 | 3 | [] | no_license | """Solution for Bioinformatics Stronghold Problem ID: REVP
Problem Title: Locating Restriction Sites
Link: http://rosalind.info/problems/revp
"""
import rosalindutils.dna_functions as dnaf
from rosalindutils.fasta_parser import FastaParser
input_path = "data/rosalind_revp.txt"
seq_objs = FastaParser(input_path).parse_fasta()
fwd_seq = seq_objs[0]["sequence"]
seq_l = len(fwd_seq)
for fi in range(0, len(fwd_seq)):
for pdrome_l in range(4, 13):
fwd_subseq = fwd_seq[fi: fi + pdrome_l]
if len(fwd_subseq) == pdrome_l:
rev_subseq = dnaf.dna_reverse_complement(fwd_subseq)
if fwd_subseq == rev_subseq:
pos = fi + 1
print(str(pos) + " " + str(pdrome_l))
| true |
601227337d264d59a927830d6c8ae6032fa20156 | Python | huyson1810/VS_speech_processing | /test/test_2.py | UTF-8 | 251 | 2.796875 | 3 | [] | no_license | from tkinter import *
from PIL import ImageTk
canvas = Canvas(width=1000, height=800, bg='blue')
canvas.pack(expand=YES, fill=BOTH)
image = ImageTk.PhotoImage(file="../virtual_ass.gif")
canvas.create_image(10, 10, image=image, anchor=NW)
mainloop() | true |
0b97b270abc818bf7d2defc031369da7d2ecd11d | Python | ruthierutho/karaoke_homework | /codeclan_caraoke/tests/room_test.py | UTF-8 | 3,894 | 3.140625 | 3 | [] | no_license | import unittest
from classes.room import *
from classes.song import *
from classes.guest import *
class TestRoom(unittest.TestCase):
def setUp(self):
self.song1 = Song("I'm a Slave 4 u", "Britney Spears")
self.song2 = Song("Toxic", "Britney Spears")
self.song3 = Song("...Baby One More Time", "Britney Spears")
self.song4 = Song("Dirrty", "Christina Aguilera")
self.song5 = Song("Genie in a Bottle", "Christina Aguilera")
self.song6 = Song("Beautiful", "Christina Aguilera")
self.song7 = Song("Like a Prayer", "Madonna")
self.song8 = Song("Hung Up", "Madonna")
self.song9 = Song("Material Girl", "Madonna")
self.guest1 = Guest("Joanne", "Like a Prayer", 47)
self.guest2 = Guest("Jane", "...Baby One More Time", 19)
self.guest3 = Guest("Julie", "Dirrty", 35)
self.guest4 = Guest("Jackie", "Toxic", 45)
self.guest5 = Guest("Jamie", "Hung Up", 86)
self.room1 = Room("Britney Room", 12, [self.song1, self.song2, self.song3], [], 90)
self.room2 = Room("Christina Room", 6, [self.song4, self.song5, self.song6], [], 50)
self.room3 = Room("Madonna Room", 10, [self.song7, self.song8, self.song9], [], 35)
self.room4 = Room("Other Room", 2, [], [self.guest4, self.guest5], 15)
def test_room_has_name(self):
self.assertEqual("Britney Room", self.room1.name)
def test_room_capacity(self):
self.assertEqual(10, self.room3.capacity)
def test_room_has_song(self):
self.assertEqual("Material Girl", self.room3.songs[2].title)
def test_add_song_to_room(self):
self.room4.add_song_to_room(self.room4, self.song8)
self.assertEqual("Hung Up", self.room4.songs[0].title)
# self.assertEqual(1, len(self.room4.songs))
def test_how_many_songs_in_room(self):
self.assertEqual(3, len(self.room1.songs))
def test_find_song_in_room_by_title(self):
self.assertEqual(self.room1.songs[1].title, self.room1.find_song_in_room_by_title("Toxic"))
def test_if_song_is_in_room__True(self):
self.assertEqual(True, self.room2.song_in_room(self.song5))
def test_if_song_is_in_room__False(self):
self.assertEqual(False, self.room2.song_in_room(self.song7))
def test_add_guest_to_room(self):
self.room1.add_guest_to_room(self.guest2)
self.assertEqual("Jane", self.room1.guests[0].name)
def test_remove_guest_from_room(self):
self.room4.remove_guest_from_room(self.guest4)
self.assertEqual(1, len(self.room4.guests))
def test_check_capacity_before_entry_full(self):
self.assertEqual("Room is full sorry!", self.room4.check_capacity_before_entry(self.guest1))
def test_check_capacity_before_entry_not_full(self):
self.room2.check_capacity_before_entry(self.guest2)
self.assertEqual(1, len(self.room2.guests))
def test_charge_guest_entry_fee(self):
self.room1.charge_guest_entry_fee(self.guest1, 5)
self.assertEqual(42, self.guest1.purse)
def test_add_fee_to_room_till(self):
self.room1.add_fee_to_room_till(5)
self.assertEqual(95, self.room1.till)
def test_full_guest_check_in_process(self):
# check capacity before entry
self.room1.check_capacity_before_entry(self.guest4)
self.assertEqual(1, len(self.room1.guests))
# charge entry fee from guest
self.room1.charge_guest_entry_fee(self.guest4, 5)
self.assertEqual(40, self.guest4.purse)
# add fee to room till
self.room1.add_fee_to_room_till(5)
self.assertEqual(95, self.room1.till)
# guest can check for fave song
self.guest4.guest_react_to_fave_song(self.room1)
self.assertEqual("Yassss put it on! I know allll the words!!", self.guest4.guest_react_to_fave_song(self.room1))
| true |
eca7afd8799ccdb53e79aabd253dd5da7e8e9f44 | Python | briandleahy/globaloptimize | /util/heap.py | UTF-8 | 3,750 | 4.15625 | 4 | [] | no_license | from collections import deque
class Heap(object):
"""
A data structure which efficiently keeps the min value at the top.
Both adding an object to the heap and popping the minimum object
from the heap take O(log(N)) operations.
Methods
-------
create_from_iterable: iterable -> Heap
add_to_heap
Add a`
pop_min:
Remove and return the minimum element from the heap.
Raises
------
EmptyHeapError
Raised when pop_min() is called on an empty heap.
See Also
--------
heapsort
Uses a Heap structure to sort items in O(N log(N)) time.
"""
def __init__(self, value=None):
"""
Parameters
----------
value : comparison-sortable object or None, optional
The first value to add to the heap. If None, the heap is
initialized empty. If not None, `value` must be comparable
to the other objects in the heap with >, <, >=, <=, and ==.
"""
self.value = value
self.num_in_heap = 0 if value is None else 1
self.left_child = None
self.right_child = None
@classmethod
def create_from_iterable(cls, iterable):
as_iterable = iter(iterable)
first_value = next(as_iterable)
heap = cls(first_value)
for i in as_iterable:
heap.add_to_heap(i)
return heap
def add_to_heap(self, value):
"""
Parameters
----------
value : comparison-sortable object
`value` must be comparable to the other objects in the heap
with >, <, >=, <=, and ==.
"""
if self.value is None:
self.value = value
elif value < self.value:
self._bubble_down(self.value)
self.value = value
else:
self._bubble_down(value)
self.num_in_heap += 1
def pop_min(self):
if self.num_in_heap == 0:
raise EmptyHeapError
return self._pop_min()
def _pop_min(self):
out = self.value
self.num_in_heap -= 1
self.value = self._bubble_up()
return out
def _bubble_down(self, value): # needs a better name
if self.left_child is None:
self.left_child = self.__class__(value)
elif self.right_child is None:
self.right_child = self.__class__(value)
elif self.left_child.num_in_heap < self.right_child.num_in_heap:
self.left_child.add_to_heap(value)
else:
self.right_child.add_to_heap(value)
def _bubble_up(self):
# 5 options: both none, 1 none (x2), neither noen
if self.left_child is None and self.right_child is None:
out = None
elif self.left_child is None:
out = self._bubble_up_right()
elif self.right_child is None:
out = self._bubble_up_left()
elif self.left_child.value < self.right_child.value:
out = self._bubble_up_left()
else:
out = self._bubble_up_right()
return out
def _bubble_up_left(self):
out = self.left_child.pop_min()
if len(self.left_child) == 0:
self.left_child = None
return out
def _bubble_up_right(self):
out = self.right_child.pop_min()
if len(self.right_child) == 0:
self.right_child = None
return out
def __len__(self):
return self.num_in_heap
class EmptyHeapError(Exception):
pass
def heapsort(x):
iterable = iter(x)
heap = Heap(next(iterable))
for i in iterable:
heap.add_to_heap(i)
out = deque()
while len(heap) > 0:
out.append(heap.pop_min())
return list(out)
| true |
eb42a64b2dd667e7b840237591c79dacfc507ddb | Python | JeterG/Post-Programming-Practice | /CodingBat/Python/List_2/has22.py | UTF-8 | 240 | 3.4375 | 3 | [] | no_license | #Given an array of ints, return True if the array contains a 2 next to a 2 somewhere
def has22(nums):
temp=0
for num in nums:
if temp==2 and num==2:
return True
else:
temp=num
return False | true |
37b44c85595b4ec7642bae59f055e3df2becf1cf | Python | Keine-Ahnung/secWebAw | /webapp/tralala/function_helper.py | UTF-8 | 5,906 | 2.75 | 3 | [] | no_license | import random
import smtplib
import string
import db_handler
import security_helper
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def send_mail_basic(to, subject, text_mail_body, html_mail_body=None):
"""
Method to send Mails using a gmx account
"""
sender = "verification_tralala@gmx.de"
password = "sichwebapp_2017"
msg = MIMEMultipart()
msg["From"] = sender
msg["To"] = to
msg["Subject"] = subject
msg.attach(MIMEText(text_mail_body, 'plain'))
if html_mail_body is not None:
msg.attach(MIMEText(html_mail_body, 'html'))
try:
server = smtplib.SMTP("212.227.17.190", 587)
server.starttls()
server.login(sender, password)
server.sendmail(sender, to, msg.as_string())
server.quit()
return True
except Exception as e:
print(str(e))
raise
def send_verification_mail(to, confirm_url):
"""
Wrapper um eine Bestätigungsmail (Registrierung) zu senden.
"""
subject = 'Bestaetige deinen Account bei Tralala!'
html = """\
<html>
<head></head>
<body>
<p> Hallo """ + to + """\ </br>
Benutze den folgenden Link, um deinen Account zu bestaetigen. </br>
<a href=\"""" + confirm_url + """\"> Bestaetigung </a> </br> </br>
Viel Freude, </br>
dein Tralala- Team
</p>
</body>
</html>
"""
text = "Hallo " + to + "\nanbei der Link zur Bestaetigung deines " \
"Accounts\nkopiere diesen in deinen Browser, " \
"um deinen Account zu bestätigen\n\n" + confirm_url
success = send_mail_basic(to=to, subject=subject, text_mail_body=text,
html_mail_body=html)
return success
def send_reset_mail(to, uid, token, url, app):
"""
Wrapper method to send the reset mail to an user.
"""
mail_body_plain = "Wir haben dein Passwortanfrage erhalten.\n" \
"Bitte besuche den untenstehenden Link, um dein " \
"Passwort zurückzusetzen\n\n" + \
"http://localhost:5000" + str(url) + \
"?token=" + str(token) + "&uid=" + str(uid)
try:
send_mail_basic(to, "Tralala - Passwort zurücksetzen",
text_mail_body=mail_body_plain,
html_mail_body=None)
app.logger.debug("Passwort Reset Mail gesendet an "
+ to + " mit Token " + token)
return True
except Exception as e:
return False
def reset_password(mysql, mail: str, url: str):
"""
Method to reset the password of an existing user, by sending a mail to the
mailaddress stored in the database
"""
success, data = db_handler.check_for_existence(mysql=mysql, email=mail)
if success != 1:
return False
else:
token = generate_token(99)
mysql.set_reset_token(mysql, token, data["uid"])
mail_sended = send_reset_mail(data["email"], data["uid"], token, url)
return mail_sended
def confirm_password_reset(user_email, confirm_url):
html = """\
<html>
<head></head>
<body>
<p> Hallo, </br>
Benutze den folgenden Link, um das Zurücksetzen deines Passworts
zu bestaetigen. </br>
<a href=\"""" + confirm_url + """\"> Bestaetigung </a> </br> </br>
Viel Freude, </br>
dein Tralala- Team
</p>
</body>
</html>
"""
text = "Hallo " + to + "\nanbei der Link zur Bestaetigung der " \
"Änderung deines Passworts " \
"\nkopiere diesen in deinen Browser, " \
"um die Änderung bestätigen\n\n" + confirm_url
return send_mail_basic(user_email, subject="Passwort Bestaetigung",
html_mail_body=html, text_mail_body=text)
def generate_token(length):
"""
Generiere ein Verification Token der Länge length
(String zufälliger Buchstaben und Zahlen)
"""
return ''.join(random.choice(string.ascii_lowercase + string.digits)
for _ in range(length))
def compare_reset_token(mysql, userid: int, token: str):
"""
Vergleiche das übergebene Reset Token mit dem in der Datenbank
gespeicherten Reset Token.
"""
token_database = db_handler.get_reset_token(mysql=mysql, userid=userid)
if token == token_database:
return True
else:
return False
def check_params(t, param):
"""
Hilfsfunktion die das Format von Parametern überprüft, wie bspw. die
Passwortstärke. Gibt True zurück, wenn die Evaluation erfolgreich war
und False, wenn Abweichungen oder nicht-zugelassene Formate
entdeckt wurden.
Typen:
- email
- id
- text
- password
"""
if t == "email":
if not param:
return False
if not security_helper.check_mail(param):
return False
return True
elif t == "id":
if not param:
return False
try:
int_p = int(param)
except:
return False
if int_p < 0:
return False
return True
elif t == "text":
if not param:
return False
if param == "":
return False
return True
elif t == "password":
if not param:
return False
if param == "":
return False
if not security_helper.check_password_strength(param)[0]:
return False
return True
elif t == "token":
if not param:
return False
if param == "":
return False
if not param.isalnum():
return False
return True
| true |
4685a14fa7ea4489ba2d21826e3af52d922eddb0 | Python | aqknutsen/AlecAlexMarchMadnessMania | /GetIndividualInfo.py | UTF-8 | 2,828 | 2.875 | 3 | [] | no_license | from urllib.request import urlopen
import sqlite3
from bs4 import BeautifulSoup
class GetIndividualStats:
def __init__(self):
pass
def get_stats(self):
url = 'http://www.espn.com/mens-college-basketball/teams'
player_links = []
team_name = []
try:
response = urlopen(url)
soup = BeautifulSoup(response.read(), 'html.parser')
for link in soup.find_all('a'):
if link.get_text() == 'Stats':
id = link['href']
id = id[24:]
team_link = 'http://www.espn.com/mens-college-basketball/team/roster/_/id/' + str(id)
player_links.append(team_link)
if link['href'][0:49] == 'http://www.espn.com/mens-college-basketball/team/':
team_name.append(link.get_text())
index = 0
conn = sqlite3.connect('Database.db')
c = conn.cursor()
for link in player_links:
for year in range(2002,2019):
response = urlopen(link + '/year/' + str(year))
soup = BeautifulSoup(response.read(), 'html.parser')
table = soup.find_all('table')[0]
rows = table.findChildren('tr')
for i in range(0,len(rows)):
if rows[i]['class'][0] == 'stathead' or rows[i]['class'][0] == 'colhead':
continue
entries = rows[i].findAll('td')
data_vec = []
j=0
name = ""
for entry in entries:
if j == 0 or j == 6:
j = j + 1
elif j == 1:
name = entry.get_text()
j = j + 1
else:
data_vec.append(entry.get_text())
j = j + 1
print(name)
print(year)
data_vec.append(year)
data_vec.append(team_name[index])
data_vec.append(name)
print(data_vec)
temp_tuple = tuple(data_vec)
print('executing')
c.execute('UPDATE PlayerStatistics SET Position = ?, Height = ?, Weight = ?, Class = ? WHERE Year=? and Team=? and Name=?', temp_tuple)
conn.commit()
index = index + 1
conn.close()
except sqlite3.Error:
conn.close()
self.output_exc()
Instance = GetIndividualStats()
Instance.get_stats()
| true |
06612951cf0f1886c222f71f48d13145c3b56b60 | Python | lllttzz/my_code | /项目上位机/Myline.py | UTF-8 | 2,126 | 2.78125 | 3 | [] | no_license | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import argparse
#绘制曲线主函数
def Mymain(x,y,im,am,pos,dir):
plt.clf()
n = ['rt','gt','bt']
for i in range(3):
n[i] = im[i].getdata() #获取图片参数
n[i] = np.matrix(n[i]) #转换为矩阵
n[i] = np.reshape(n[i],(x,y))
a = range(x)
a = np.matrix(a)
b = range(x*y)
b = np.matrix(b)
#横向图片绘制
if dir == 1:
plt.xlim(0, x)
plt.ylim(0, y)
plt.plot(a[0,:].T, n[0][pos,:].T,'-',c='r')
plt.plot(a[0,:].T, n[1][pos,:].T,'-',c='g')
plt.plot(a[0,:].T, n[2][pos,:].T,'-',c='b')
plt.title('line_regression_horizontal')
#纵向图片绘制
if dir == 0:
plt.xlim(0, x)
plt.ylim(0, y)
plt.plot(a[0,:].T, n[0][:,pos],'-',c='r')
plt.plot(a[0,:].T, n[1][:,pos],'-',c='g')
plt.plot(a[0,:].T, n[2][:,pos],'-',c='b')
plt.title('line_regression_vertical')
#全图绘制
elif dir == 2:
plt.xlim(0, x*y+5)
plt.ylim(0, y+5)
plt.scatter(b[0,:],am[:,0],c='r',alpha=0.1)
plt.scatter(b[0,:],am[:,1],c='g',alpha=0.1)
plt.scatter(b[0,:],am[:,2],c='b',alpha=0.1)
plt.title('scatter diagram')
plt.xlabel("picture_position")
plt.ylabel("RGB_value")
plt.ion()
plt.show()
def Pic(pos,dir):
am = Image.open("lena.bmp").convert()
x,y = am.size
im = am.split()
am = am.getdata()
am = np.matrix(am)
Mymain(x,y,im,am,pos,dir)
#作为模块导入使用,则不执行以下语句
# if __name__ == '__main__':
# parser = argparse.ArgumentParser() #命令行解析函数
#
# parser.add_argument('picture')
# parser.add_argument('x',type=int,choices=range(512))
# parser.add_argument('y',type=int,choices=[0, 1, 2])
# args = parser.parse_args() #解析上述函数
# pos,dir=args.x,args.y #命令行自定参数赋值给pos和dir
# IMG = args.picture
#
# am = Image.open(IMG)
# im = am.split()
# am = am.getdata()
# am = np.matrix(am)
# Mymain(im,am,pos,dir)
| true |
e2ca120ff6ab891b2413a2f78332752fac464b48 | Python | emapco/Python-Code-Challenges | /merge_csv_files.py | UTF-8 | 1,765 | 3.203125 | 3 | [] | no_license | import csv
import pandas as pd
import numpy as np
# merges multiple CSV files into one utilizing pandas library
def merge_csv_files(input_files, output_file_path):
input_dfs = [pd.read_csv(file, index_col=0) for file in input_files]
if not input_dfs:
return
output_df = input_dfs[0]
for i in range(1, len(input_dfs)):
output_df = output_df.append(input_dfs[i], ignore_index=False).replace(np.NaN, pd.NA)
# needed due to bug (up-casts ints to floats if column contains NaN but not if NA)
for column in input_dfs[i]:
if input_dfs[i][column].dtype.type in (np.int64, np.int32, np.int16, np.int8):
output_df[column] = output_df[column].astype('Int64')
output_df.to_csv(output_file_path)
def solution(csv_list, output_path):
# build list with all fieldnames
fieldnames = list()
for file in csv_list:
with open(file, 'r') as input_csv:
fn = csv.DictReader(input_csv).fieldnames
fieldnames.extend(x for x in fn if x not in fieldnames)
# write data to output file base on field names
with open(output_path, 'w', newline='') as output_csv:
# object to write rows to csv (includes every field name specified)
writer = csv.DictWriter(output_csv, fieldnames=fieldnames)
# write first header row to file
writer.writeheader()
for file in csv_list:
with open(file, 'r') as input_csv:
# create reader to read each row
reader = csv.DictReader(input_csv)
for row in reader:
# writes row to file and if data is not given for a field
# writer leaves it blank/empty
writer.writerow(row)
| true |
03dae043060cfea719488a6d899c2afc81298cdd | Python | GraceDurham/coding_challenges_coding_bat | /pos_neg.py | UTF-8 | 393 | 3.90625 | 4 | [] | no_license |
# Given 2 int values, return True
# if one is negative and one is positive.
# Except if the parameter "negative" is True, then return True only if both are negative.
def pos_neg(a, b, negative):
if negative:
return ( a < 0 and b < 0)
else:
return ((a < 0 and b > 0 ) or (a > 0 and b < 0))
print(pos_neg(1, -1, False))
print(pos_neg(-1, 1, False))
print(pos_neg(-4, -5, True)) | true |
a2ae95721b14e5232ca7ab17b15a47451343257f | Python | arielmiki/playit-lite | /model.py | UTF-8 | 985 | 2.921875 | 3 | [] | no_license | import enum
import pickle
class MouseKeyboardEvent:
class Type(enum.Enum):
MOUSE_ON_MOVE = 0
MOUSE_ON_SCROLL = 1
MOUSE_ON_CLICK = 2
KEYBOARD_ON_PRESSED = 3
KEYBOARD_ON_RELEASED = 4
@staticmethod
def decode(byte):
return pickle.loads(byte)
@staticmethod
def encode(obj):
return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
def encode(self):
return pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
def __init__(self, type: Type, x = 0, y = 0, button = None, pressed = False, dx = 0, dy = 0, key = None):
self.type = type
self.x = x
self.y = y
self.button = button
self.pressed = pressed
self.dx = dx
self.dy = dy
self.key = key
def __repr__(self):
return "{0}: x={1}, y={2}, button={3}, pressed={4}, dx={5}, dy={6}, key={7}>".format(self.type, self.x, self.y, self.button, self.pressed, self.dx, self.dy, self.key)
| true |
0cfc56359c3f89e737cbc643c7d9d224fc41c9bd | Python | rafiparvez/urlshortener | /url_shortener_proj/url_shortener_app/schema.py | UTF-8 | 1,147 | 2.546875 | 3 | [] | no_license | import graphene
from graphene_django.types import DjangoObjectType, ObjectType
from .models import UrlModel
"""
Class to manage GraphQL schema
"""
# Create a GraphQL type for the url model
class UrlType(DjangoObjectType):
class Meta:
model = UrlModel
class QueryType(graphene.ObjectType):
urls = graphene.List(UrlType)
def resolve_url(self, info, **kwargs):
url = kwargs.get('url')
if url is not None:
return UrlModel.objects.get(pk=url)
return None
def resolve_urls(self, info, **kwargs):
return UrlModel.objects.all()
# Create mutations for url
class CreateUrl(graphene.Mutation):
url = graphene.Field(UrlType)
status = graphene.Boolean()
class Arguments:
url = graphene.String()
@staticmethod
def mutate(root, info, url):
status = True
url_instance = UrlModel(url=url)
url_instance.save()
return CreateUrl(status=status, url=url_instance)
# Class to hold graphql mutations
class Mutation(graphene.ObjectType):
create_url = CreateUrl.Field()
schema = graphene.Schema(query=QueryType, mutation=Mutation) | true |
cf6abde98117d67308616864e043a34778efd5aa | Python | Mariamawatt/TP_Ateliers_Prog | /AP3/exercice3.py | UTF-8 | 543 | 3.421875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 15:49:41 2020
@author: Mariama
"""
def separer(L_non_triee : list)->list:
LSEP = []
liste_negative =[]
liste_nulle = []
liste_positive = []
for elt in L_non_triee:
if elt < 0 :
liste_negative.append(elt)
elif elt == 0 :
liste_nulle.append(elt)
elif elt > 0 :
liste_positive.append(elt)
LSEP = liste_negative + liste_nulle + liste_positive
return LSEP
print (separer ([1, -1, 0, 3, -2, 5, 0])) | true |
671a52ee2ee5714f55c9418415d3eb0e66bd7d3b | Python | ashutosh117/rosalind_problems | /stronghold/gc_content.py | UTF-8 | 768 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 13:48:25 2020
@author: t1
"""
def readFile(file_path):
with open(file_path,'r') as f:
return [l.strip() for l in f.readlines()]
def gc_content(dna_seq):
return ((dna_seq.count('C') + dna_seq.count('G')) / len(dna_seq)*100)
file_path = 'test_data/gc_content_test.txt'
dna_list = readFile(file_path)
dna_dist = {}
label = ''
for line in dna_list:
if '>' in line:
label = line.replace('>','')
dna_dist[label] = ''
else:
dna_dist[label] += line
res_dict = {key:gc_content(value) for key,value in dna_dist.items()}
max_key = max(res_dict,key = res_dict.get)
max_gc = res_dict[max_key]
print(f'{max_key}\n{max_gc:.6f}')
| true |
b501766cde2f305ee39e1f7644619353cb7192dd | Python | heliosantos/clipboard_text_processor | /clipboard_text_processor/format_http_request.py | UTF-8 | 890 | 2.859375 | 3 | [] | no_license | import re
from .decorators import use_clipboard
@use_clipboard
def format_http_request(raw):
output = []
lines = [l.strip('\r\n') for l in raw.split('\n')]
output.append(lines.pop(0))
headers = []
headersDict = {}
while h := lines.pop(0):
headers.append(h)
if m := re.search(r'([^:]+):\s*(.*)', h, re.IGNORECASE):
headersDict[m.group(1).lower()] = m.group(2)
headers = sorted(headers, key=lambda x: x.lower())
output.extend(headers)
output.append('')
contentType = headersDict.get('content-type', '')
for l in lines:
if 'application/x-www-form-urlencoded' in contentType:
entries = l.split('&')
output.extend(sorted(entries, key=lambda x: x.lower()))
else:
output.append(l)
return '\r\n'.join(output)
if __name__ == '__main__':
format_http_request()
| true |
b7f8ee9316bec41e7711610b3be3135df4b05b4d | Python | muhammadmisbah/SPSSexe-installed-pckg | /IBM/SPSS/Statistics/21/Samples/Make Significant Values Bold And Red.py | UTF-8 | 6,385 | 2.75 | 3 | [] | no_license | #/***********************************************************************
# * IBM Confidential
# *
# * OCO Source Materials
# *
# * IBM SPSS Products: Statistics Common
# *
# * (C) Copyright IBM Corp. 1989, 2011
# *
# * The source code for this program is not published or otherwise divested of its trade secrets,
# * irrespective of what has been deposited with the U.S. Copyright Office.
# ************************************************************************/
import SpssClient
""" This example finds a column labelled "Significance" in selected
pivot tables, and changes the color of values less than 0.05
in that column to red. It can be applied to all pivot tables
by changing
"""
### TODO: This part will migrate into its own module SpssClientAux.py
### and will be imported from Lib/site-packages.
###
### Please scroll to the end to see how this is applied.
class OutputItemTypeEnum(object):
itemType = {}
#itemType[0] = SpssClient.OutputItemType.UNKNOWN
itemType[1] = SpssClient.OutputItemType.CHART
itemType[2] = SpssClient.OutputItemType.HEAD
itemType[3] = SpssClient.OutputItemType.LOG
itemType[4] = SpssClient.OutputItemType.NOTE
itemType[5] = SpssClient.OutputItemType.PIVOT
itemType[6] = SpssClient.OutputItemType.ROOT
itemType[7] = SpssClient.OutputItemType.TEXT
itemType[8] = SpssClient.OutputItemType.WARNING
itemType[9] = SpssClient.OutputItemType.TITLE
itemType[11] = SpssClient.OutputItemType.PAGETITLE
itemType[13] = SpssClient.OutputItemType.TREEMODEL
itemType[14] = SpssClient.OutputItemType.GENERIC
#itemType["UNKNOWN"] = itemType[0]
itemType["CHART"] = itemType[1]
itemType["HEAD"] = itemType[2]
itemType["LOG"] = itemType[3]
itemType["NOTE"] = itemType[4]
itemType["PIVOT"] = itemType[5]
itemType["ROOT"] = itemType[6]
itemType["TEXT"] = itemType[7]
itemType["WARNING"] = itemType[8]
itemType["TITLE"] = itemType[9]
itemType["PAGETITLE"] = itemType[11]
itemType["TREEMODEL"] = itemType[13]
itemType["GENERIC"] = itemType[14]
def __getitem__(self, key):
return self.itemType[key]
def get(self, key, default=None):
return self.itemType.get(key, default)
class SpssItems(object):
OutputItemType = OutputItemTypeEnum()
def __init__(self, outputDoc=None, selected=False):
if outputDoc:
self.outputDoc = outputDoc
else:
self.outputDoc = SpssClient.GetDesignatedOutputDoc()
self.items = self.outputDoc.GetOutputItems()
self.__itemType = []
self.__selected = selected
def __iter__(self):
if self.__selected:
for index in xrange(self.items.Size()):
item = self.items.GetItemAt(index)
if item.IsSelected():
yield item
else:
for index in xrange(self.items.Size()):
item = self.items.GetItemAt(index)
yield item
class SpssItemsTyped(SpssItems):
def __init__(self, itemType=[], outputDoc=None, selected=False):
super(SpssItemsTyped, self).__init__(outputDoc=outputDoc, selected=selected)
if isinstance(itemType, (list, tuple)):
itemTypes = itemType
else:
itemTypes = [itemType]
itemTypes = [SpssItems.OutputItemType.get(it, None) for it in itemTypes]
itemTypes = [it for it in itemTypes if it]
self.__itemType = itemTypes
def __iter__(self):
for item in super(SpssItemsTyped, self).__iter__():
if item.GetType() in self.__itemType:
typedItem = item.GetSpecificType()
yield typedItem
class PivotTables(SpssItemsTyped):
""" Convenience Class, equivalent to:
SpssItemsTyped("PIVOT")
"""
def __init__(self, outputDoc=None, selected=False):
super(PivotTables, self).__init__(itemType="PIVOT", outputDoc=outputDoc, selected=selected)
class WarningTables(SpssItemsTyped):
""" Convenience Class, equivalent to:
SpssItemsTyped("WARNING")
"""
def __init__(self, outputDoc=None, selected=False):
super(WarningTables, self).__init__(itemType="WARNING", outputDoc=outputDoc, selected=selected)
class TextStyleEnum(object):
TextStyle = {}
TextStyle['Regular'] = SpssClient.SpssTextStyleTypes.SpssTSRegular
TextStyle['Bold'] = SpssClient.SpssTextStyleTypes.SpssTSBold
TextStyle['Italic'] = SpssClient.SpssTextStyleTypes.SpssTSItalic
TextStyle['BoldItalic'] = SpssClient.SpssTextStyleTypes.SpssTSBoldItalic
TextStyle[0] = TextStyle['Regular']
TextStyle[1] = TextStyle['Bold']
TextStyle[2] = TextStyle['Italic']
TextStyle[3] = TextStyle['BoldItalic']
def __getitem__(self, key):
return self.TextStyle[key]
def get(self, key, default=None):
return self.TextStyle.get(key, default)
def RGB(r,g,b):
"""Assumes red, green, blue values are integers in range(256)"""
return r + 256*g + 65536*b
def FindColumn(pivotTable, label):
"""Really FindFirstColumn with the given label"""
try:
colLabels = pivotTable.ColumnLabelArray()
rows = colLabels.GetNumRows()
cols = colLabels.GetNumColumns()
for i in xrange(rows):
for j in xrange(cols):
if colLabels.GetValueAt(i, j) == label:
return j
except:
return None
return None
### TODO: do other useful things
try:
SpssClient.StartClient()
TS = TextStyleEnum()
# change selected=False to apply to all pivot tables
for pivotTable in PivotTables(selected=True):
sigColumn = FindColumn(pivotTable, "Sig.")
if sigColumn is not None:
data = pivotTable.DataCellArray()
rows = data.GetNumRows()
for i in xrange(rows):
try:
cellValue = data.GetValueAt(i,sigColumn)
value = float(cellValue)
if value >= 0.0 and value <= 0.05:
data.SetTextColorAt(i,sigColumn, RGB(255,0,0))
data.SetTextStyleAt(i,sigColumn, TS["Bold"])
except Exception, e:
#print e
pass
except Exception, e:
print e
finally:
SpssClient.StopClient()
| true |
5107ba5385db212a00762934e87840232b82800a | Python | kubapok/AI_2017 | /coordinates-recognition/main.py | UTF-8 | 7,682 | 2.640625 | 3 | [] | no_license | from ImageToArrays import ImageToArrays
import math
import os
import copy
import PIL.Image
import numpy as np
import random
import sys
import pdb
import time
np.set_printoptions(threshold=np.nan)
start_time = time.time()
impath = 'python-image-recognition/images/numbers'
def getDigitImgs(digit):
kk = [ a for a in os.listdir('python-image-recognition/images/numbers') if a[0] == str(digit) ]
return [ impath + '/' + a for a in kk]
def getDigitNorms(ssdigit):
xx = list()
for im in ssdigit:
a = PIL.Image.open(im)
a = np.asarray(a)
b = [[0] * 8 for i in range(8) ]
for j in range(len(a)):
for k in range(len(a[0])):
b[j][k] = 0 if a[j][k][0] > 120 else 1
c = copy.deepcopy(b)
xx.append(c)
return xx
s0 = getDigitNorms(getDigitImgs(0))
s1 = getDigitNorms(getDigitImgs(1))
s2 = getDigitNorms(getDigitImgs(2))
s3 = getDigitNorms(getDigitImgs(3))
s4 = getDigitNorms(getDigitImgs(4))
s5 = getDigitNorms(getDigitImgs(5))
s6 = getDigitNorms(getDigitImgs(6))
s7 = getDigitNorms(getDigitImgs(7))
s8 = getDigitNorms(getDigitImgs(8))
s9 = getDigitNorms(getDigitImgs(9))
trainingSet = [s0,s1,s2,s3,s4,s5,s6,s7,s8,s9]
# ----------------------------------------------------------------------------------------------------
for sdd in trainingSet:
for i in range(len(sdd)):
sdd[i] = [item for sublist in sdd[i] for item in sublist] # to jest spłaszczanie listy
sdd[i] = np.asarray(sdd[i], dtype = np.float32 )
sdd[i] = np.append(sdd[i], [1.0]) # z extra 1 tak jak trzeba zrobic wektor
u = list()
for i in trainingSet[:-1]:
u.extend(i[:-1])
learning_now = sys.argv[1]
print(learning_now)
if learning_now == '0':
z = [1] * 25 + [0] * 25 + [0] * 25+ [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25
elif learning_now == '1':
z = [0] * 25 + [1] * 25 + [0] * 25+ [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25
elif learning_now == '2':
z = [0] * 25 + [0] * 25 + [1] * 25+ [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25
elif learning_now == '3':
z = [0] * 25 + [0] * 25 + [0] * 25+ [1] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25
elif learning_now == '4':
z = [0] * 25 + [0] * 25 + [0] * 25+ [0] * 25 + [1] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25
elif learning_now == '5':
z = [0] * 25 + [0] * 25 + [0] * 25+ [0] * 25 + [0] * 25 + [1] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25
elif learning_now == '6':
z = [0] * 25 + [0] * 25 + [0] * 25+ [0] * 25 + [0] * 25 + [0] * 25 + [1] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25
elif learning_now == '7':
z = [0] * 25 + [0] * 25 + [0] * 25+ [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [1] * 25 + [0] * 25 + [0] * 25 + [0] * 25
elif learning_now == '8':
z = [0] * 25 + [0] * 25 + [0] * 25+ [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [1] * 25 + [0] * 25 + [0] * 25
elif learning_now == '9':
z = [0] * 25 + [0] * 25 + [0] * 25+ [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [0] * 25 + [1] * 25 + [0] * 25
else:
assert False
z = np.asarray(z, dtype=np.float32)
kmax = len(s0[0])
pmax = len(u)
wold = [ [0] * kmax for k in range(kmax -1) ]
wnew = [ [1] * kmax for k in range(kmax -1) ]
wold = np.array(wold, dtype = np.float32)
wnew = np.array(wnew, dtype = np.float32)
for i in range(len(wold)):
for j in range(len(wold[0])):
wold[i][j] = random.random()
wnew[i][j] = random.random()
snew = [1] * kmax
sold = [0] * kmax
snew = np.asarray(snew, dtype = np.float32)
sold = np.asarray(sold, dtype = np.float32)
for i in range(len(sold)):
sold[i] = random.random()
snew[i] = random.random()
c = float(sys.argv[2]) # 3.0 1.5 5
eps = 0.0001 # 0.000001
beta = 0.1 # 3.0
################################################################################
def f(u):
# print(1 / (1 + math.exp(-beta * u)))
return 1 / (1 + math.exp(-beta * u))
def fp(u):
fu_value = f(u)
return beta * fu_value * (1 - fu_value)
def is_greater_than_eps(wnew, wold, snew, sold):
for i in range(len(wnew)):
for j in range(len(wnew[i])):
if abs(wnew[i][j] - wold[i][j]) >= eps:
return True
for i in range(len(snew)):
if abs(snew[i] - sold[i]) >= eps:
return True
return False
################################################################################
def printU(u):
x = [None] * kmax
for i in range(len(wnew)):
ksum = 0
for k in range(len(wnew[i])):
ksum += wnew[i][k] * u[k]
x[i] = f( ksum )
x[k] = 1
ksum = 0
for k in range(len(snew)):
ksum += snew[k] * x[k]
# print(u)
print(ksum)
x = [ [0] * kmax for p in range(pmax)]
x = np.asarray(x, dtype = np.float32)
y = [0] * pmax
y = np.asarray(y, dtype = np.float32)
ES = [0] * kmax
ES = np.asarray(ES, dtype = np.float32)
EW = [ [0] * kmax for k in range(kmax -1) ]
EW = np.asarray(EW, dtype = np.float32)
max_iterations = 500
for iteration in range(max_iterations):
printU(s0[-1])
printU(s1[-1])
printU(s2[-1])
printU(s3[-1])
printU(s4[-1])
printU(s5[-1])
printU(s6[-1])
printU(s7[-1])
printU(s8[-1])
printU(s9[-1])
print('-' * 50)
file_vec = open('./vectors/' + sys.argv[1] + '-' + sys.argv[2] + '-' +str(iteration) , 'wb')
np.save(file_vec, wnew)
file_vec.close()
file_vec = open('./vectors/s' + sys.argv[1] + '-' + sys.argv[2] + '-' +str(iteration) , 'wb')
np.save(file_vec, snew)
file_vec.close()
print('-' * 50)
print('iteration' + str(iteration) + '/' + str(max_iterations))
time_now = time.time()
time_delta = int(time_now - start_time )
print("elapsed time: ", time_delta // 60, time_delta % 60 )
if not is_greater_than_eps(wnew,wold, snew,sold):
break
for p in range(len(x)):
for i in range(len(wold)):
x[p][i] = f( sum(np.multiply(wold[i], u[p])))
x[p][-1] = 1.0
for p in range(len(y)):
y[p] = f( sum (np.multiply(sold, x[p])))
# pochES
for i in range(len(x[0])):
suma = 0
for p in range(len(y)):
ksum = sum (np.multiply(sold, x[p]))
suma += (y[p] - z[p] ) * fp( ksum ) * x[p][i]
ES[i] = suma
# pochEW
print('a')
pmax = len(x)
for i in range(len(EW)):
for j in range(len(EW[0])):
suma = 0
for p in range(pmax):
suma += (y[p] - z[p] ) * fp( sum( np.multiply(sold, x[p]) ) ) * sold[i] * fp( sum( np.multiply(wold[i], u[p])) ) * u[p][j]
EW[i][j] = suma
print('b')
# nowe wartości
for i in range(len(snew)):
snew[i] = sold[i] -c*ES[i]
wold = copy.deepcopy(wnew)
sold = copy.deepcopy(snew)
for i in range(len(wnew)):
for j in range(len(wnew[i])):
wnew[i][j] = wnew[i][j] - c*EW[i][j]
#----------------------------------------------------------------------------------------------------
print('KONIEC')
for digit in trainingSet:
for u in digit:
x = [None] * kmax
for i in range(len(wnew)):
ksum = 0
for k in range(len(wnew[i])):
ksum += wnew[i][k] * u[k]
x[i] = f( ksum )
x[k] = 1
ksum = 0
for k in range(len(snew)):
ksum += snew[k] * x[k]
# print(u)
print(ksum)
print('-' * 50)
| true |
598630d749939c244d249bc8438219d8eb99c541 | Python | IC-H/16_Always_On | /combine_4th.py | UTF-8 | 13,631 | 2.671875 | 3 | [] | no_license | import cv2
import os
import numpy as np
import math
import scipy as sp
import matplotlib.pyplot as plt
def imTrim(img, points):
p1 = points[0]
p2 = points[1]
if p1[0] > p2[0]:
x1 = p2[0]
x2 = p1[0]
else:
x1 = p1[0]
x2 = p2[0]
if p1[1] > p2[1]:
y1 = p2[1]
y2 = p1[1]
else:
y1 = p1[1]
y2 = p2[1]
img_trim = img[x1:x2, y1:y2]
cv2.imwrite('a2.png',img_trim)
return img_trim
def handFilter(img, up, down):
ker = np.ones((5,5), np.uint8) #filter of erode and dilate
YCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB) #YCrCb Transfer
hand_filter = cv2.inRange(YCrCb, down, up) #Color filter
return hand_filter
def findEndPoints(con, img, theta):
px = 0
py = 0
count = 0
coor = [ False ]
n_c = len(con)
for i in range(n_c):
cnt = con[i]
momen = cv2.moments(cnt)
if momen['m00'] >100: # if area of closed contour is smaller than #, it may not be hand
fea = ext_feat(momen)
h = np.dot(fea, theta[1]) + theta[0][0]
h = 1./(np.ones([1, 1]) + np.exp(-h))
if h == 1.:
coor[0] = True
cx = int(momen['m10']/momen['m00']) # x-coordinate of center of mass
cy = int(momen['m01']/momen['m00']) # y-coordinate of center of mass
check = cv2.isContourConvex(cnt) # check whether it is convex or not
# point out center of mass by blue circle
cv2.circle(img, (cx, cy), 5, (255, 0, 0), -1)
if not check:
hull = cv2.convexHull(cnt)
cv2.drawContours(img, [hull], 0, (0, 255, 0), 3)
phd = cv2.convexHull(cnt, returnPoints = False)
for j in range(len(phd)):
const = phd[j]
# current point of end-point
crx = cnt[const][0][0][0]
cry = cnt[const][0][0][1]
absv = float(((crx - cx)**2 + (cry - cy)**2 )**(0.5))
#If distance of current points is in certain value then we difine that is End-Point.
if absv > 95 and absv < 140:
if (px - crx)**2 + (py - cry)**2 < 1600:
count += 1
px = int(( px*(count - 1) + crx)/count)
py = int(( py*(count - 1) + cry )/count)
else:
if count == 0:
px = crx
py = cry
count = 1
elif count == 1:
cv2.circle(img, (px, py), 5, (0, 0, 255), -1)
coor.append((px, py))
px = crx
py = cry
else:
cv2.circle(img, (px, py), 5, (0, 0, 255), -1)
coor.append((px, py))
px = crx
py = cry
count = 1
if (count != 0):
cv2.circle(img, (px, py), 5, (0, 0, 255), -1)
coor.append((px, py))
return coor
def nom(v):
v = np.array(v)
vs = np.square(v)
sv = np.sum(vs)
av = math.sqrt(sv)
nv = v/av
return nv
def make_vector(p):
n = len(p)
nv = []
for i in range(n):
if i == n-1:
v = p[0] - p[i]
nv.append( nom(v) )
else:
v = p[i+1] - p[i]
nv.append( nom(v) )
return nv
def update_point(p, img):
dist = 3000
n = len(p)
np = []
pp = 0
for i in range(n):
if i == n-1:
if distance(p[0], p[pp]) > dist:
cv2.circle(img, (p[pp][0], p[pp][1]), 2, (255, 0, 0), -1)
np.append( p[pp] )
else:
if distance(p[i + 1], p[pp] ) > dist:
pp = i
cv2.circle(img, (p[pp][0], p[pp][1]), 2, (255, 0, 0), -1)
np.append( p[pp] )
return np
def make_vector_debug(p, img):
n = len(p)
nv = []
pp = 0
for i in range(n):
if i == n-1:
v = p[0] - p[i]
cv2.circle(img, (p[i][0], p[i][1]), 1, (255, 0, 0), -1)
nv.append( nom(v) )
else:
v = p[i+1] - p[i]
cv2.circle(img, (p[i][0], p[i][1]), 1, (255, 0, 0), -1)
nv.append( nom(v) )
return nv
def cal_deg(v1, v2):
av1 = math.sqrt( v1[0]**2 + v1[1]**2 )
av2 = math.sqrt( v2[0]**2 + v2[1]**2 )
c = ( v1[0]*v2[0] + v1[1]*v2[1] )/(av1*av2)
if c >= 1.0:
return 0
elif c <= -1:
return 180
else:
theta = math.acos(c)*180/math.pi
return theta
def make_deg(v):
n = len(v)
deg = []
for i in range(n):
if i == 0:
deg.append(cal_deg(v[n-1], v[0]))
else:
deg.append(cal_deg(v[i], v[i - 1]))
return deg
def find_end2(con, img, m00, theta):
#img2 = img.copy()
n_c = len(con)
coor = [False]
for i in range(n_c):
cnt = con[i]
momen = cv2.moments(cnt)
if momen['m00'] >m00: # if area of closed contour is smaller than #, it may not be hand
fea = ext_feat(momen)
h = np.dot(fea, theta[1]) + theta[0][0]
h = 1./(np.ones([1, 1]) + np.exp(-h))
p = []
n = len(cnt)
c_d = []
if h == 1.0:
coor[0] = True
for j in range(n):
p.append(cnt[j][0])
n_p = update_point(p, img)
#cv2.namedWindow('debug2', cv2.WINDOW_NORMAL)
#cv2.imshow('debug2', img)
nv = make_vector(n_p)
deg = make_deg(nv)
for j in range(len(nv)):
if deg[j] > 120:
c_d.append(j)
cv2.circle(img, (n_p[j][0], n_p[j][1]), 5, (0, 0, 255), -1)
coor.append( n_p[j] )
return coor
def handle_video():
os.system('sudo modprobe bcm2835-v4l2')
# Capture Video
cap = cv2.VideoCapture(0)
cap.set(3, 360) #set width of cap size
cap.set(4, 240) #set height of cap size
num_c = 0
point_c = [(0, 0), (0, 0)]
countFull = 0
CaptureFlag = False
PS = 0
pv = []
''' Main Function'''
theta = test_learn()
while True:
ret, frame = cap.read() # 2592 * 1944
frame2 = frame.copy() # copy frame
fr = frame.copy()
'''Color Filter for finding hands '''
hand_filter_down = np.array([0, 133, 77]) #YCrCb Filter low Limit
hand_filter_up = np.array([255, 173, 127]) #YCrCb Filter Up Limit
hand_filter = handFilter(frame, hand_filter_up, hand_filter_down)
hand = hand_filter.copy()
con, hir = cv2.findContours(hand, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
coor = findEndPoints(con, frame2, theta)
coor2 = find_end2(con, fr, 5000, theta)
print coor2
countForCoor = len(coor2) - 1
if (countForCoor == 1 ) & (PS == 0):
if ( point_c[0][0] == 0 ) and ( point_c[0][1] == 0 ) :
point_c[0] = coor2[1]
countFull += 1
elif ( point_c[1][0] == 0 ) and ( point_c[1][1] == 0 ):
if distance(point_c[0], coor2[1]) > 30000:
point_c[1] = coor2[1]
countFull += 1
else:
pass
print countFull
PS = countForCoor
if ( countFull == 2 ) & (not coor2[0]):
CaptureFlag = True
if CaptureFlag:
C_img = imTrim(frame, point_c)
cv2.imshow('Capture',C_img)
CaptureFlag = False
point_c = [(0, 0), (0, 0)]
countFull = 0
#break
if not ret:
print 'Not Found Devices'
break
''' show frames '''
cv2.imshow('Frame', frame)
cv2.imshow('Color Filter', hand_filter)
cv2.drawContours(frame, con, -1, (0, 255, 0), 1)
cv2.imshow('contour',frame)
#cv2.imshow('End Points 1',frame2)
cv2.imshow('End Points 2', fr)
if cv2.waitKey(1) & 0xff == 27:
break
cap.release()
cv2.destroyAllWindows()
def makeString(img, contour, mon):
shp = img.shape
b_img = np.zeros((shp[0], shp[1], 3), np.uint8)
size_of_img = shp[0]*shp[1]
cv2.drawContours(b_img, contour, -1, (0, 255, 0), 1)
cv2.namedWindow('contour', cv2.WINDOW_NORMAL)
cv2.imshow('contour', b_img)
Flag_Hand = cv2.waitKey(0)
if Flag_Hand & 0xff == 27:
return 27
cv2.destroyWindow('contour')
s = chr(Flag_Hand) + '\t'
s += str(mon['m00']/size_of_img)+ '\t'
s += str(mon['m10'])+ '\t'
s += str(mon['m01'])+ '\t'
s += str(mon['m20'])+ '\t'
s += str(mon['m11'])+ '\t'
s += str(mon['m02'])+ '\t'
s += str(mon['m30'])+ '\t'
s += str(mon['m21'])+ '\t'
s += str(mon['m12'])+ '\t'
s += str(mon['m03'])+ '\t'
s += str(mon['mu20'])+ '\t'
s += str(mon['mu11'])+ '\t'
s += str(mon['mu02'])+ '\t'
s += str(mon['mu30'])+ '\t'
s += str(mon['mu21'])+ '\t'
s += str(mon['mu12'])+ '\t'
s += str(mon['mu03'])+ '\t'
s += str(mon['nu20'])+ '\t'
s += str(mon['nu11'])+ '\t'
s += str(mon['nu02'])+ '\t'
s += str(mon['nu30'])+ '\t'
s += str(mon['nu21'])+ '\t'
s += str(mon['nu03'])+ '\n'
return s
def make_data():
os.system('sudo modprobe bcm2835-v4l2')
# Capture Video
cap = cv2.VideoCapture(0)
cap.set(3, 360) #set width of cap size
cap.set(4, 240) #set height of cap size
#data = sp.genfromtxt('data_test.tsv', delimiter = '\t')
#print len(data)
while True:
ret, frame = cap.read()
Ycc = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)
hand_filter_down = np.array([0, 133, 77]) #YCrCb Filter low Limit
hand_filter_up = np.array([255, 173, 127]) #YCrCb Filter Up Limit
hand_filter = handFilter(frame, hand_filter_up, hand_filter_down)
hand = hand_filter.copy()
#cv2.drawContours(b_img, con, -1, (0, 255, 0), 1)
cv2.namedWindow('origin', cv2.WINDOW_NORMAL)
#cv2.namedWindow('contour', cv2.WINDOW_NORMAL)
cv2.imshow('origin', frame)
#cv2.imshow('contour', b_img)
ki = cv2.waitKey(1) & 0xff
if ki == 27:
break
elif ki == ord('s'):
con, hir = cv2.findContours(hand_filter, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
n_c = len(con)
f_d = open('data_test.tsv', 'a+')
for i in range(n_c):
cnt = con[i]
mon = cv2.moments(cnt)
if mon['m00'] > 1000:
s = makeString(frame, cnt, mon)
if s == 27:
break
f_d.write(s)
f_d.close()
cv2.destroyAllWindows()
cv2.namedWindow('Color Filter', cv2.WINDOW_NORMAL)
cv2.imshow('Color Filter', hand)
cap.release()
cv2.destroyAllWindows()
def distance(p0, p1):
p0 = np.array(p0)
p1 = np.array(p1)
return np.sum((p0 - p1)**2)
def nn_classify(training_set, training_labels, new_ex):
dists = np.array([distance(t, new_ex) for t in training_set])
nearest = dists.argmin()
return training_labels(nearest)
def t_linear_regression(x, y, r, l):
x = np.array(x)
y = np.array([y], np.uint8)
y = y.T
#y1 = y & 0b001
y2 = (y & 0b010)/2
#y3 = (y & 0b100)/4
#y2 = np.array(y2, np.float64)
x_s = x.shape
m = x_s[0]
n = x_s[1]
theta = np.zeros([n, 1])
b = np.zeros([m, 1])
a = 0.01
h = np.dot(x, theta) + b
h = 1./(np.ones([m, 1]) + np.exp(-h))
for i in range(r):
d = 1./m*np.dot(x.T, (h - y2))
b = b - a*(h-y2)
theta = theta - a*d + l/m*theta
h = np.dot(x, theta) + b
h = 1./(np.ones([m, 1]) + np.exp(-h))
return [b, theta]
def ext_feat(mom):
return np.array([[mom['m00'], mom['m10'], mom['m01'], mom['m20'], mom['m11'], mom['m02'],\
mom['m30'], mom['m21'], mom['m12'], mom['m03'], mom['mu20'], mom['mu11'], mom['mu02'],\
mom['mu30'], mom['mu21'], mom['mu12'], mom['mu03'], mom['nu20'], mom['nu11'], mom['nu02'],\
mom['nu30'], mom['nu21'], mom['nu03']]])
def test_learn():
raw_data = sp.genfromtxt('data_test.tsv', delimiter = '\t')
print len(raw_data)
d_s = raw_data.shape
m = d_s[0]
n = d_s[1]
hf = raw_data[:m-1, 0]
hf = np.array(hf, np.uint8)
hf2 = (hf & 0b10)/2
t_d = raw_data[:m-1, 1:]
e_d = np.array([raw_data[m-1, 1:]])
#print e_d.shape
theta = t_linear_regression(t_d, hf, 1000, 107)
h = np.dot(e_d, theta[1]) + theta[0][0]
#print h.shape
h = 1./(np.ones([1, 1]) + np.exp(-h))
return theta
#print h
#print raw_data[m-1, 0]
def test_tt():
theta = test_learn()
count = 0.
fcount = 0.
m00 = 3000
img = cv2.imread('a5.jpg')
frame = img.copy()
frame2 = img.copy()
frame3 = img.copy()
hand_filter_down = np.array([0, 133, 77]) #YCrCb Filter low Limit
hand_filter_up = np.array([255, 173, 127]) #YCrCb Filter Up Limit
hand_filter = handFilter(img, hand_filter_up, hand_filter_down)
hand = hand_filter.copy()
hand2 = hand.copy()
con, hir = cv2.findContours(hand, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
n_c = len(con)
for i in range(n_c):
shp = img.shape
bg = np.zeros((shp[0], shp[1], 3), np.uint8)
mon = cv2.moments(con[i])
fea = ext_feat(mon)
h = np.dot(fea, theta[1]) + theta[0][0]
h = 1./(np.ones([1, 1]) + np.exp(-h))
print h
if h == 1.0:
cv2.drawContours(bg, con[i], -1, (0, 255, 0), 1)
cv2.namedWindow('hand?', cv2.WINDOW_NORMAL)
cv2.imshow('hand?', bg)
else:
cv2.drawContours(bg, con[i], -1, (0, 0, 255), 1)
cv2.namedWindow('not hand?', cv2.WINDOW_NORMAL)
cv2.imshow('not hand?', bg)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
if key == ord('y'):
count += 1.
elif key == 27:
break
else:
fcount += 1
pass
print count/(fcount + count)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
def test_and_update():
theta = test_learn()
count = 0.
fcount = 0.
m00 = 3000
img = cv2.imread('a1.jpg')
frame = img.copy()
frame2 = img.copy()
frame3 = img.copy()
hand_filter_down = np.array([0, 133, 77]) #YCrCb Filter low Limit
hand_filter_up = np.array([255, 173, 127]) #YCrCb Filter Up Limit
hand_filter = handFilter(img, hand_filter_up, hand_filter_down)
hand = hand_filter.copy()
hand2 = hand.copy()
con, hir = cv2.findContours(hand, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#coor = findEndPoints(con, frame2)
#coor2 = find_end2(con, frame, m00)
n_c = len(con)
for i in range(n_c):
shp = img.shape
bg = np.zeros((shp[0], shp[1], 3), np.uint8)
mon = cv2.moments(con[i])
fea = ext_feat(mon)
h = np.dot(fea, theta[1]) + theta[0][0]
h = 1./(np.ones([1, 1]) + np.exp(-h))
print h
if h == 1.0:
cv2.drawContours(bg, con[i], -1, (0, 255, 0), 1)
cv2.namedWindow('hand?', cv2.WINDOW_NORMAL)
cv2.imshow('hand?', bg)
else:
cv2.drawContours(bg, con[i], -1, (0, 0, 255), 1)
cv2.namedWindow('not hand?', cv2.WINDOW_NORMAL)
cv2.imshow('not hand?', bg)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
if key == ord('y'):
count += 1.
elif key == 27:
break
else:
fcount += 1
pass
print count/(count + fcount)
def Neral_Network():
pass
if __name__ == '__main__':
#make_data()
handle_video()
#test_learn()
#test_tt()
#test_and_update()
| true |
58aeff945165905bb2005a1ed8988fa12b775e89 | Python | CZ-NIC/deckard | /pydnstest/mock_client.py | UTF-8 | 4,628 | 2.703125 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | """Module takes care of sending and recieving DNS messages as a mock client"""
import errno
import socket
import struct
import time
from typing import Optional, Tuple, Union
import dns.message
import dns.inet
SOCKET_OPERATION_TIMEOUT = 5
RECEIVE_MESSAGE_SIZE = 2**16-1
THROTTLE_BY = 0.1
def handle_socket_timeout(sock: socket.socket, deadline: float):
# deadline is always time.monotonic
remaining = deadline - time.monotonic()
if remaining <= 0:
raise RuntimeError("Server took too long to respond")
sock.settimeout(remaining)
def recv_n_bytes_from_tcp(stream: socket.socket, n: int, deadline: float) -> bytes:
# deadline is always time.monotonic
data = b""
while n != 0:
handle_socket_timeout(stream, deadline)
chunk = stream.recv(n)
# Empty bytes from socket.recv mean that socket is closed
if not chunk:
raise OSError()
n -= len(chunk)
data += chunk
return data
def recvfrom_blob(sock: socket.socket,
timeout: int = SOCKET_OPERATION_TIMEOUT) -> Tuple[bytes, str]:
"""
Receive DNS message from TCP/UDP socket.
"""
# deadline is always time.monotonic
deadline = time.monotonic() + timeout
while True:
try:
if sock.type & socket.SOCK_DGRAM:
handle_socket_timeout(sock, deadline)
data, addr = sock.recvfrom(RECEIVE_MESSAGE_SIZE)
elif sock.type & socket.SOCK_STREAM:
# First 2 bytes of TCP packet are the size of the message
# See https://tools.ietf.org/html/rfc1035#section-4.2.2
data = recv_n_bytes_from_tcp(sock, 2, deadline)
msg_len = struct.unpack_from("!H", data)[0]
data = recv_n_bytes_from_tcp(sock, msg_len, deadline)
addr = sock.getpeername()[0]
else:
raise NotImplementedError("[recvfrom_blob]: unknown socket type '%i'" % sock.type)
return data, addr
except socket.timeout as ex:
raise RuntimeError("Server took too long to respond") from ex
except OSError as ex:
if ex.errno == errno.ENOBUFS:
time.sleep(0.1)
else:
raise
def recvfrom_msg(sock: socket.socket,
timeout: int = SOCKET_OPERATION_TIMEOUT) -> Tuple[dns.message.Message, str]:
data, addr = recvfrom_blob(sock, timeout=timeout)
msg = dns.message.from_wire(data, one_rr_per_rrset=True)
return msg, addr
def sendto_msg(sock: socket.socket, message: bytes, addr: Optional[str] = None) -> None:
""" Send DNS/UDP/TCP message. """
try:
if sock.type & socket.SOCK_DGRAM:
if addr is None:
sock.send(message)
else:
sock.sendto(message, addr)
elif sock.type & socket.SOCK_STREAM:
data = struct.pack("!H", len(message)) + message
sock.sendall(data)
else:
raise NotImplementedError("[sendto_msg]: unknown socket type '%i'" % sock.type)
except OSError as ex:
# Reference: http://lkml.iu.edu/hypermail/linux/kernel/0002.3/0709.html
if ex.errno != errno.ECONNREFUSED:
raise
def setup_socket(address: str,
port: int,
tcp: bool = False,
src_address: str = None) -> socket.socket:
family = dns.inet.af_for_address(address)
sock = socket.socket(family, socket.SOCK_STREAM if tcp else socket.SOCK_DGRAM)
if tcp:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
if src_address is not None:
sock.bind((src_address, 0)) # random source port
sock.settimeout(SOCKET_OPERATION_TIMEOUT)
sock.connect((address, port))
return sock
def send_query(sock: socket.socket, query: Union[dns.message.Message, bytes]) -> None:
message = query if isinstance(query, bytes) else query.to_wire()
while True:
try:
sendto_msg(sock, message)
break
except OSError as ex:
# ENOBUFS, throttle sending
if ex.errno == errno.ENOBUFS:
time.sleep(0.1)
else:
raise
def get_answer(sock: socket.socket, timeout: int = SOCKET_OPERATION_TIMEOUT) -> bytes:
""" Compatibility function """
answer, _ = recvfrom_blob(sock, timeout=timeout)
return answer
def get_dns_message(sock: socket.socket,
timeout: int = SOCKET_OPERATION_TIMEOUT) -> dns.message.Message:
return dns.message.from_wire(get_answer(sock, timeout=timeout))
| true |
da47bd6c0e68aa8506a8b903d8ddc74e04a8ddec | Python | MeztliVal/Python_Codes | /PRINCIPIANTES/manejo de archivos csv y xml/modif_arch.py | UTF-8 | 699 | 3.0625 | 3 | [] | no_license | #modificando archivos
#importando libreria nuestra para manejo de archivos
import lib20
print("Rellenando archivos...")
nombre = input("Teclea el nombre del archivo con extension .txt al que deseas agregar un registro:")
resp = 's'
while resp == 'S' or resp == 's':
print("Ingresa los siguientes datos:")
ncontrol = input('Teclea el No.de Control:') + '-'
paterno = input('Teclea el Apellido Paterno:') + '-'
materno = input('Teclea el Apellido Materno:') + '-'
nombre_alumno = input('Teclea el Nombre(s):')
agrega = ncontrol + paterno + materno + nombre_alumno
lib20.modifica_archivo(nombre, agrega)
resp = input('Deseas añadir mas registros al archivo?[S/n]') | true |
1a262354c8be5a2ca4eb1570da679bf3cd4b507e | Python | yuriivs/geek_python_less02_dz | /lesson02_dz02.py | UTF-8 | 767 | 4.4375 | 4 | [] | no_license | # Для списка реализовать обмен значений соседних элементов, т.е.
# Значениями обмениваются элементы с индексами 0 и 1, 2 и 3 и т.д.
# При нечетном количестве элементов последний сохранить на своем месте.
# Для заполнения списка элементов необходимо использовать функцию input().
# mylist = [2, 8, 43, 15, 907, 33, "inter", 32, "bayer"]
mylist = list(input("Введите любое количество чисел через запятую >>>"))
mylist.reverse()
print("Я поменял местами соседние элементы:", mylist)
| true |
a61c572018cfaec675195f3b9f56b2ad933f46f0 | Python | veervohra03/Processing | /Python/sineZipper/sineZipper.pyde | UTF-8 | 1,131 | 3.515625 | 4 | [] | no_license | # Veer Vohra
# Sine Zipper
# Python Ver 1.0
t = 0
x = []
objs = 30
beg = 0
def setup():
global beg
size(1000, 800)
smooth()
inter = 20
beg = (width-(inter*objs))/2 - 5
temp = 20
for i in range(objs):
x.append(temp)
temp += inter
def draw():
global t, x, beg
background(0)
stroke(220)
strokeWeight(5)
translate(beg, height/2)
op1()
#op2()
#op3()
t += 0.05
def op1():
for i in range(objs):
y = (sin(t+(i*100))*100) - 50
line(x[i], -50, x[i], y)
for i in range(objs):
y = -(sin(t+(i*100))*100) + 50
line(x[i], 50, x[i], y)
def op2():
for i in range(objs):
y = (sin(t+(i*100))*200) - 50
line(x[i], -50, x[i], y)
for i in range(objs):
y = -(sin(t+(i*100))*200) + 50
line(x[i], 50, x[i], y)
def op3():
for i in range(objs):
y = (sin((t+(i*100))/2)*100) - 60
line(x[i], -60, x[i], y)
for i in range(objs):
y = -(sin((t+(i*100))/2)*100) + 60
line(x[i], 60, x[i], y)
| true |
ca31b6e81740b1b8c32e8a8270385f2ca79d0788 | Python | thailore/RandomCodes | /CodeAcademy/machine-learning/titanic_survival/TitanicSurvival_RandomForest.py | UTF-8 | 2,288 | 3.875 | 4 | [] | no_license | import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Load the passenger data
passengers = pd.read_csv("passengers.csv")
print(passengers.info()) # print range index and types of features
# print(passengers.describe())
print(passengers)
# Update sex column to numerical
passengers['Sex'] = passengers['Sex'].map({'female': 1, 'male':0})
# Fill the nan values in the age column
meanAge = np.nanmean(passengers['Age'].values)
passengers['Age'].fillna(value=meanAge, inplace=True)
# Create Age_Class
passengers['Age_Class'] = passengers['Age'] * passengers['Pclass']
# Create a first class column
passengers['FirstClass'] = passengers['Pclass'].apply(lambda x: 1 if x == 1 else 0)
# Create a second class column
passengers['SecondClass'] = passengers['Pclass'].apply(lambda x: 1 if x == 2 else 0)
# Create is alone column
passengers['Alone'] = passengers['SibSp'].apply(lambda x: 1 if x < 1 else 0)
# Select the desired features
features = passengers[['Age','Sex', 'FirstClass', 'SecondClass', 'Alone']]
survival = passengers['Survived']
# Perform train, test, split
training_data, test_data, training_labels, test_labels = train_test_split(features, survival, test_size = 0.2, random_state = 10)
# Scale the feature data so it has mean = 0 and standard deviation = 1
scaler = StandardScaler()
scaler.fit_transform(training_data)
scaler.transform(test_data)
# Create and train the model
model = RandomForestClassifier(n_estimators=2000).fit(training_data, training_labels)
# Score the model on the train data
print("Train data score: ", model.score(training_data, training_labels))
# Score the model on the test data
print("Test data score: ", model.score(test_data, test_labels))
# Analyze the coefficients
# Sample passenger features
Jack = np.array([0.0,20.0,0.0,0.0, 1.0])
Rose = np.array([1.0,17.0,1.0,0.0, 0.0])
You = np.array([0.0,26.0,0.0,0.0, 0.0])
# Combine passenger arrays
sample_passengers = np.array([Jack, Rose, You])
# Scale the sample passenger features
sample_passengers = scaler.transform(sample_passengers)
# Make survival predictions!
print("Sample passengers prediciton: ", model.predict(sample_passengers))
| true |
86eee1bfdeda0258ff9db13bd7af0b662a29003a | Python | chwgcc/hogwarts_chw | /python_practice/game/game_round_more.py | UTF-8 | 711 | 3.734375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2020/10/23 17:12
# @Author : chw
# @File : game_round_more.py
# 定义fight函数实现游戏逻辑
def fight():
# 定义四个变量来存放数据
my_hp = 1000
my_power = 200
enemy_hp = 1000
enemy_power = 199
# 加入循环,让游戏可以进行多轮
while True:
my_hp = my_hp - enemy_power
enemy_hp = enemy_hp - my_power
#判断谁的血量小于等于0
if my_hp <= 0:
print("我输了")
#满足条件跳出循环
break
if enemy_hp <=0:
print("我赢了")
# 满足条件跳出循环
break
#调用函数fight
fight()
| true |
96a4abc4ba336875e8a78f492dfe7b47be83e8cd | Python | Edwinroman30/Python_practices | /To_Practices/Intermediate_Practices/Exercise03.py | UTF-8 | 241 | 3.578125 | 4 | [] | no_license | #Studen: Edwin Alberto Roman Seberino.
#Enrollment: 2020-10233
"""
3. Hacer un programa que genere las tablas de multiplicar de los números múltiplos de 5 que hay entre 1 y 500.
"""
i=0
for i in range(500):
i= i + 1
if((i%5) == 0):
print(i)
| true |
589e4f2b44ab4b38396412da0f73dfa131d09412 | Python | meunierd/romexpander | /romexpander.py | UTF-8 | 4,711 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""romexpander.py
Usage:
romexpander.py [options] [INPUT]
romexpander.py [-h | --help]
romexpander.py [-v | --version]
Arguments:
INPUT optional input ROM file.
Options:
-v, --version show version info.
-h, --help show this message.
-o, --output target specify an output file.
-t, --txt cfg specify a config file.
"""
import md5
from binascii import unhexlify
from docopt import docopt
def get_md5(source):
"""Return the MD5 hash of the file `source`."""
m = md5.new()
while True:
d = source.read(8196)
if not d:
break
m.update(d)
return m.hexdigest()
def hex_to_bstr(d):
"""Return the bytestring equivalent of a plain-text hex value."""
if len(d) % 2:
d = "0" + d
return unhexlify(d)
def load_line(s):
"""Tokenize a tab-delineated string and return as a list."""
return s.strip().split('\t')
def load_script(txt="ROM Expander Pro.txt"):
script = {}
script["file"] = txt
with open(script["file"]) as script_file:
script_lines = script_file.readlines()
# Load the `NAME` line from script.
l = load_line(script_lines.pop(0))
assert 'NAME' == l.pop(0)
script["source"], script["target"] = l
assert script["target"] != script["source"]
# Load the `SIZE` and optional `MD5`
l = load_line(script_lines.pop(0))
script["old_size"] = eval("0x" + l[1])
script["new_size"] = eval("0x" + l[2])
if l.index(l[-1]) > 2:
script["MD5"] = l[3].lower()
# Load the replacement `HEADER`.
l = load_line(script_lines.pop(0))
assert 'HEADER' == l.pop(0)
script["header_size"] = eval("0x" + l.pop(0))
assert script["header_size"] > len(l)
# Sanitize and concatenate the header data.
new_header = "".join(["0" * (2 - len(x)) + x for x in l])
# Cast to character data and pad with 0x00 to header_size
new_header = hex_to_bstr(new_header)
script["header"] = new_header + "\x00" * (script["header_size"] - len(l))
script["ops"] = []
while script_lines:
script["ops"].append(load_line(script_lines.pop(0)))
script["patches"] = []
for op in script["ops"]:
if op[0] == "REPLACE":
script["patches"].append(op[1:])
script["ops"].remove(op)
return script
def expand_rom(script):
# Check the source file MD5.
if "MD5" in script:
with open(script["source"], "rb") as s_file:
# Don't digest the header.
s_file.read(script["header_size"])
assert script["MD5"] == get_md5(s_file)
print "MD5... match!"
print "Expanding..."
with open(script["source"], "rb") as s, open(script["target"], "wb") as t:
def copy(s_offset, t_offset):
source_ptr = script["header_size"] + s_offset
write_ptr = script["header_size"] + t_offset
s.seek(source_ptr)
t.seek(write_ptr)
t.write(s.read(end_ptr - write_ptr))
def fill(destination, value):
write_ptr = script["header_size"] + destination
t.seek(write_ptr)
t.write(value * (end_ptr - write_ptr))
# Write Header
t.write(script["header"])
while script["ops"]:
op = script["ops"].pop(0)
cmd = op.pop(0)
if not script["ops"]:
end_ptr = script["header_size"] + script["new_size"]
else:
end_ptr = eval("0x" + script["ops"][0][1]) + \
script["header_size"]
if cmd == "COPY":
copy(eval("0x" + op[1]), # Source
eval("0x" + op[0])) # Target
elif cmd == "FILL":
fill(eval("0x" + op[0]), # Destination
hex_to_bstr(op[1])) # Value
else:
raise Exception
# REPLACE
for patch in script["patches"]:
offset = eval("0x" + patch.pop(0))
data = "".join(["0" * (2 - len(x)) + x for x in patch])
t.seek(offset + script['header_size'])
t.write(hex_to_bstr(data))
print "Wrote %s successfully." % (script["target"])
def run(**kwargs):
if kwargs['--txt']:
script = load_script(kwargs['--txt'])
else:
script = load_script()
if kwargs["--output"]:
script["target"] = kwargs["--output"]
if kwargs["INPUT"]:
script["source"] = kwargs["INPUT"]
expand_rom(script)
if __name__ == "__main__":
arguments = docopt(__doc__, version='romexpander 0.4')
run(**arguments)
| true |
23042dbd9b34104bce8baa62b299fcb1d7cdc675 | Python | eyalho/Cyber_APT_Reports_NER | /creating_data/A1_convert_pdf_to_txt.py | UTF-8 | 1,015 | 2.640625 | 3 | [] | no_license | from pathlib import Path
import pdftotext
from config import GIT_1_SOURCE_DIR, GIT_1_TXT_DIR
def pdftotext_converter(source_pdf_dir, dst_txt_dir):
source_pdf_dir = Path(source_pdf_dir)
dst_txt_dir = Path(dst_txt_dir)
print(f"pdf_dir : {source_pdf_dir}")
print(f"dst_txt_dir : {dst_txt_dir}")
bad_counter = 0
for i, pdf_path in enumerate(source_pdf_dir.rglob("*pdf")):
rel_pdf_path = pdf_path.relative_to(source_pdf_dir)
dst_path = dst_txt_dir / f"{rel_pdf_path}.txt"
dst_path.parent.mkdir(exist_ok=True, parents=True)
# Load your PDF
try:
with open(pdf_path, "rb") as f:
pdf = pdftotext.PDF(f)
with open(dst_path, 'w') as f:
f.write("\n\n".join(pdf))
print("converted", i - bad_counter, dst_path)
except Exception as e:
print(e, i, dst_path)
bad_counter += 1
if __name__ == "__main__":
pdftotext_converter(GIT_1_SOURCE_DIR, GIT_1_TXT_DIR)
| true |
4c8c78e4e649bc0f6673c6fdeaf0ce8cfd2684d1 | Python | mehedi-shafi/word-scrabble-bot | /pathfinder.py | UTF-8 | 1,364 | 2.90625 | 3 | [] | no_license | def backTrack(target, word, node, graph, path=[]):
if len(word) == 0:
formedWord = ''
for _ in path:
formedWord += _.character
if formedWord == target:
return path
return False
adjacencyList = graph[node]
adjacencyCharacterList = [x.character for x in adjacencyList]
if word[0] in adjacencyCharacterList:
indexes = [x for x in range(len(adjacencyCharacterList)) if adjacencyCharacterList[x] == word[0]]
for i in indexes:
if adjacencyList[i] not in path:
newPath = path.copy()
newPath.append(adjacencyList[i])
return backTrack(target, word[1:], adjacencyList[i], graph, newPath)
return False
def validateWord(board, word):
roots = board.getInstances(word[0])
for root in roots:
res = backTrack(word, word[1:], root, board.adjacencyList, [root])
if res != False:
return res
# img = drawPath(IMG, res, RED)
# showImage('', img)
# print(res)
return False
if __name__ == '__main__':
import pickle
with open('sample_board.dat', 'rb') as file:
board = pickle.load(file)
# dfs([], board.adjacencyList, board.board[1][0])
res = validateWord(board, "ROSEY")
print(res) | true |
35b6a0f73cd38f05450012d3f55d775554e0dc5a | Python | wilsonwang371/pyalgotrade | /pyalgotrade/fsm.py | UTF-8 | 3,259 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #state machine
import enum
import inspect
import sys
import pyalgotrade.logger
logger = pyalgotrade.logger.getLogger(__name__)
def state(state_enum, is_initial_state=False):
def wrapper(func):
''' decorator for state machine
'''
assert callable(func)
assert isinstance(state_enum, enum.Enum)
func.__state__ = state_enum
if is_initial_state:
func.__initial_state__ = True
return func
return wrapper
class StateMachine(object):
''' new state machine
'''
def __init__(self):
self.__states = {}
self.__current_state = None
self.__last_state = None
initial_set = False
methods = inspect.getmembers(self.__class__,
predicate=lambda x: (inspect.isfunction(x) or
inspect.ismethod(x)))
for i in methods:
if hasattr(i[1], '__state__'):
self.__register_state(i[1].__state__, getattr(self, i[0]))
if hasattr(i[1], '__initial_state__'):
if initial_set:
raise Exception('you can only have one initial state')
initial_set = True
self.__set_initial_state(i[1].__state__)
if not initial_set:
raise Exception('no initial state defined')
def __register_state(self, name, function):
logger.debug('Registering state [%s]' % name)
if name in self.__states:
raise Exception("Duplicate state %s" % name)
self.__states[name] = function
def __set_initial_state(self, name):
assert name in self.__states
logger.debug('Initial state [%s]' % name)
self.__current_state = name
@property
def current_state(self):
return self.__current_state
@current_state.setter
def current_state(self, new_state):
assert new_state in self.__states
logger.info('Setting state from '
'[{}] to [{}]'.format(self.__current_state, new_state))
self.__current_state = new_state
@property
def last_state(self):
return self.__last_state
def run(self, *args, **kwargs):
assert self.__current_state is not None
new_state = self.__states[self.__current_state](*args, **kwargs)
self.__last_state = self.__current_state
if new_state != self.__current_state:
logger.debug('Switch state [%s] -> [%s]' % (self.__current_state,
new_state))
assert new_state in self.__states
self.__current_state = new_state
def run_forever(self, *args, **kwargs):
while True:
self.run(*args, **kwargs)
class StrategyFSM(StateMachine):
''' state machine used by strategy runner
each state should have 2 arguments.
The first one is "bars" and the second
one is "states"
'''
def __init__(self, barfeed, states):
super(StrategyFSM, self).__init__()
self.__barfeed = barfeed
self.__states = states
@property
def barfeed(self):
return self.__barfeed
@property
def state(self):
return self.__states
| true |
f7b6df67cc6eb1afc80ff0e4ef28bfad82c6100c | Python | ojasagg/Time-Table-Solver | /GA.py | UTF-8 | 5,217 | 2.765625 | 3 | [] | no_license | from collections import OrderedDict
import random
import heapq
import time
best_ans=[]
best_ans_val=0
#Print answer
def output():
subject=[0]*M
arr=[]
row=[0,0,0,0,0,0,0,0]
for j in range(5):
arr.append(row[:])
for j in range(0,5):
for k in range(0,8):
arr[j][k]=[]
for j in range(int(M/2)):
subject[j]=j+1
subject[j+int(M/2)]=j+1
for j in range(M):
h=best_ans[0][j]
p=best_ans[1][j]
d=best_ans[2][j]
s=best_ans[3][j]
newlist=[]
newlist.append(subject[j])
newlist.append(p)
newlist.append(h)
arr[d-1][s-1].append(newlist)
print( "\nDay \t1st \t2nd \t3rd \t4th \t5th \t6th \t7th \t8th\n")
dcount=1
daydict= {}
daydict[1]="Mon"
daydict[2]="Tue"
daydict[3]="Wed"
daydict[4]="Thu"
daydict[5]="Fri"
len_arr=len(arr)
for row in range(len_arr):
maxc=0
len_row=len(arr[row])
for period in range(len_row):
l=len(arr[row][period])
maxc=max(l,maxc)
flag=0
m=1
while m<maxc+1:
line=" "
for period in range(len_row):
flag1=len(arr[row][period])>=m
if(flag1):
#line+=str(period[m-1])+" \t"
line+="S"+str(arr[row][period][m-1][0])+"_P"+str(arr[row][period][m-1][1])
line+="_H"+str(arr[row][period][m-1][2])+" \t"
else:
line+=" \t"
flag2=(flag==0)
if flag2:
print(str(daydict[dcount])+" "+line)
else:
print(" "+line)
flag=1
m+=1
print("\n\n")
dcount+=1
print "Time taken= "+str(time.time()-strt_tme)+"sec"
#Stopping criteria
def stop_criteria(population,M):
ret=0
for i in population:
c=0
for j in range(M):
for k in range(M):
if population[i][2][j]==population[i][2][k] and population[i][3][j]==population[i][2][k]:
if population[i][1][j]==population[i][1][k] or population[i][0][j]==population[i][0][k]:
c=1
if c==0:
output()
exit()
return 0
#Fitness function
def fitness(pop,entry,M):
score=0
for i in pop:
for j in range(int(M/2)):
#constraint1
if pop[i][2][j]!=pop[i][2][j+int(M/2)]:
score+=100
#constraint2
if pop[i][2][j]-pop[i][2][j+int(M/2)]>=2:
score+=100
#constraint3
if pop[i][0][j]==pop[i][0][j+int(M/2)]:
score+=100
c4=0
c5=0
for i in population:
for j in range(M):
for k in range(M):
if population[i][2][j]==population[i][2][k] and population[i][3][j]==population[i][2][k]:
#constraint4
if population[i][1][j]==population[i][1][k]:
c4=1
#constraint5
if population[i][0][j]==population[i][0][k]:
c5=1
score-=c4*100
score-=c5*100
return score
#Initial declarations
M=int(input("Enter the number of courses"))
N=int(input("Enter the number of lecture halls"))
P=int(input("Enter the number of professors"))
strt_tme=time.time()
M*=2#For restricting atleast 2 classes of each course a week.
D=5
S=8
top_k=15#out of total population, top 15 parents would be selected.
chromosome=[[],[],[],[]]
#Assigning professors for subjects, same professor may take multiple subject, but multiple professor should not take same subject.
profs=[0]*M
for i in range(int(M/2)):
profs[i]=random.randint(1,P)
profs[i+int(M/2)]=profs[i]
#Randomly initialize population
population={}
for i in range(50):
chromo=[]
for l in chromosome:
chromo.append(l[:])
for m in range(M):
chromo[0].append(random.randint(1,N))
for m in range(M):
chromo[1].append(profs[m])
for m in range(M):
chromo[2].append(random.randint(1,5))
for m in range(M):
chromo[3].append(random.randint(1,8))
population[str(chromo)]=chromo
#Assigning first random chromosome as best answer, it can be updated futher
best_ans=chromo
#Evaluating population
value=[]#priority queue to store population in increasing order of fitness value
for i in population:
val=fitness(population,i,M)
value.append([val,i])
if value[0][0]>best_ans_val:
best_ans=population[i]
heapq._heapify_max(value)
iterations=0
while(not stop_criteria(population,M) and iterations<1000):
print("Iteration "+str(iterations+1)+" going on")
#Selecting parents
new_population={}
for i in range(top_k):
new_population[value[i][1]]=population[value[i][1]]
population=new_population.copy()
new_population.clear()
#Crossover to produce children
new_population=population.copy()
for i in population:
for j in population:
p1=[]
p2=[]
c1=[]
c2=[]
for l in population[i]:
p1.append(l[:])
for l in population[j]:
p2.append(l[:])
one_point=random.randint(0,M)
for k in range(4):
c1.append(p1[k][:one_point]+p2[k][one_point:])
c2.append(p2[k][:one_point]+p1[k][one_point:])
new_population[str(c1)]=c1
new_population[str(c2)]=c2
population.clear()
population=new_population.copy()
new_population.clear()
#Evaluating population
del value[:]
for i in population:
val=fitness(population,i,M)
value.append([val,i])
if value[0][0]>best_ans_val:
best_ans=population[i]
heapq._heapify_max(value)
iterations+=1
print "This might not be the best answer, but the most preferable one."
output()
#contraints
#different days for 2 classes of a subject
#difference of 2 day between 2 classes of a subject
#same hall for a subject
#same day, same slot , not same prof
#same day, same slot , not same hall
| true |
ee24e575c5a9a8df45411807b64a6c3b911a3f7e | Python | foldvaridominic/taboos | /aux.py | UTF-8 | 10,495 | 2.65625 | 3 | [] | no_license | import logging
import random
from collections import Counter, defaultdict
from functools import reduce
from itertools import combinations
import networkx as nx
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
ch.setLevel(logging.INFO)
logger.addHandler(ch)
from constants import NUCLEOTIDE_CHARACTERS, ALPHABET_LENGTH
from tabooset import TabooSet as TS
from utils import *
from plot import DrawCube
nucleotides = set(NUCLEOTIDE_CHARACTERS)
allowed = []
def rec_func(length, letters=nucleotides):
global allowed
for r in range(1,len(letters) + 1):
allowed.append(set(random.sample(letters, r)))
if length > 1:
yield from rec_func(length-1, letters)
else:
yield allowed
allowed = allowed[:-1]
def gen_hamming_graph(taboos, length, letters=nucleotides):
graph = nx.Graph()
nodes = list(get_self_product(letters, length))
edges = combinations(nodes, 2)
edges = [e for e in edges if hamming_distance_1_for_strings(e)]
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
graph.remove_nodes_from(taboos)
cc = list(nx.algorithms.components.connected_components(graph))
blocks = [len(c) for c in cc]
#print(f"Connected components: {cc}")
print(f"Size of connected components: {blocks}")
print(f"Number of connected components: {len(blocks)}")
return blocks
def create_projections(taboos, length, letters=nucleotides):
cst = []
alphabet = len(letters)
for fixed in range(length):
projection = [i for i in range(length) if i != fixed]
for a in letters:
projected_taboos = [tuple(t[i] for i in projection)
for t in taboos if t[fixed] == a]
dummy_graph = nx.Graph()
dummy_nodes = list(get_self_product(letters, length-1))
dummy_graph.add_nodes_from(dummy_nodes)
dummy_edges = combinations(dummy_nodes, 2)
dummy_edges = [e for e in dummy_edges if hamming_distance_1_for_strings(e)]
dummy_graph.add_edges_from(dummy_edges)
dummy_graph.remove_nodes_from(projected_taboos)
taboo_edges = [[e for e in direct_product([list(dummy_graph.nodes), [n]])
if hamming_distance_1_for_strings(e)]
for n in projected_taboos]
cc = nx.algorithms.components.number_connected_components(dummy_graph)
if cc > 1:
minimal = True
for te in taboo_edges:
dg = dummy_graph.copy()
dg.add_edges_from(te)
if nx.algorithms.components.number_connected_components(dg) > 1:
minimal = False
break
if minimal:
cst.append("MC")
else:
cst.append("DC")
else:
cst.append("C")
cst = frozenset(Counter([frozenset(Counter([cst[k+i] for i in range(alphabet)]).items()) for k in range(0, len(cst), alphabet)]).items())
return cst
def enum_graphs(length, alphabet=None):
letters = set(range(alphabet)) if alphabet else nucleotides
all_structures = defaultdict(list)
all_projections = defaultdict(list)
for allowed in rec_func(length, letters):
not_allowed = [letters-pos for pos in allowed]
taboos = []
for idx in range(length):
taboos += [s for s in direct_product(
allowed[:idx] + [not_allowed[idx]] + allowed[idx+1:])]
structure = tuple([len(a) for a in allowed])
print(f"Taboo blocks: {structure}")
blocks = gen_hamming_graph(taboos, length, letters)
if len(blocks) > 1:
block = frozenset(Counter(blocks).items())
cst = create_projections(taboos, length, letters)
all_structures[block].append(structure)
all_projections[cst].append(structure)
print(f"Number of different block structures: {len(all_structures)}")
print(f"All different block structures")
for block, count in all_structures.items():
print(f"{block}: {count}")
print(f"Number of different proj structures: {len(all_projections)}")
print(f"All different proj structures")
for cst, count in all_projections.items():
print(f"{cst}: {count}")
def sigma_minus(number, alphabet_length=4):
return alphabet_length - number
def flip_at(iterator, indices):
ret = [i for i in iterator]
for i in indices:
ret[i] = sigma_minus(ret[i])
return ret
def enum_quotient_graph_length(dimensions, alphabet_length=4):
length = len(dimensions)
node_levels = []
for flip in range(length+1):
node_levels.append(sum(reduce(lambda x,y: x*y, flip_at(dimensions, indices)) for indices in combinations(range(length),flip)))
return node_levels
def increase_dimension(characters, increase=4):
not_allowed = [letters-pos for pos in characters]
taboos = []
for idx, _ in enumerate(characters):
taboos += [to_string(s) for s in direct_product(characters[:idx] + [not_allowed[idx]] + characters[idx+1:])]
structure = tuple([len(c) for c in characters])
logger.info("Taboo blocks: %s", structure)
for jdx in range(increase):
blocks = gen_hamming_graph(taboos, idx+1+jdx)
return
def enum_quotient_graph(characters, skip=1):
not_allowed = [letters-pos for pos in characters]
length = len(characters)
node_groups = []
index_groups = []
jdx = 0
for idx in range(length+1):
if idx == skip:
continue
index_groups.append([])
for indices in combinations(range(length),idx):
jdx += 1
current_characters = [c for c in characters]
for i in indices:
current_characters[i] = not_allowed[i]
node_groups.append((jdx,current_characters))
index_groups = [(1,), tuple(range(2,jdx+1))]
return node_groups, index_groups
def extend_left_and_right(node_groups):
node_groups_right = []
node_groups_left = []
for group in node_groups:
node_groups_left.append((group[0], [letters] + group[1]))
node_groups_right.append((group[0], group[1] + [letters]))
return node_groups_left, node_groups_right
def overlaps(node_groups, index_groups):
left, right = extend_left_and_right(node_groups)
new_node_groups = []
for p1,p2 in direct_product([left, right]):
ints = [s1.intersection(s2) for s1, s2 in zip(p1[1],p2[1])]
if all(ints):
new_node_groups.append(((p1[0], p2[0]), ints))
indices = [n[0] for n in new_node_groups]
new_index_groups = [] #TODO
return new_node_groups, new_index_groups
def inspect_dimension_increment_in_quotient_graph(characters, increase=1):
node_groups, index_groups = enum_quotient_graph(characters)
for n in node_groups:
logger.info(n)
for i in range(increase):
node_groups, index_groups = overlaps(node_groups, index_groups)
for n in node_groups:
logger.info(n)
class TabooTree:
def __init__(self, length, alphabet):
nodes = list(get_self_product(range(alphabet), length))
graph = nx.Graph()
graph.add_nodes_from(nodes)
edges = combinations(nodes, 2)
edges = [e for e in edges if hamming_distance_1_for_strings(e)]
graph.add_edges_from(edges)
#logger.info("Start: %s", list(graph.edges))
self.graph = graph
current_branch = [{n,} for n in nodes]
self.current_branch = current_branch
self.graph_nodes = nodes
self.num_states = alphabet**length
self.draw_cube = DrawCube(alphabet, length)
self.n = length
self.alphabet = range(alphabet)
self.check_connected()
def create_new_branch(self, length, skip):
for c in combinations(self.graph_nodes, length):
if not any(s <= set(c) for s in skip):
yield set(c)
def check_connected(self):
new_branch = self.current_branch
disconnected = []
for i in range(self.num_states-1):
# node in the TabooTree is a collection of nodes to be removed from the Hamming-graph
component_sizes = defaultdict(int)
cross_section_types = defaultdict(int)
count = 0
for idx, remove_nodes in enumerate(new_branch, 1):
remove_edges = list(flatten([list(self.graph.edges(n)) for n in remove_nodes]))
#logger.info("%s Remove nodes: %s", idx, remove_nodes)
#logger.info("%s Remove edges: %s", idx, remove_edges)
self.graph.remove_edges_from(remove_edges)
self.graph.remove_nodes_from(remove_nodes)
#logger.info("%s Remaining edges: %s", idx, list(self.graph.edges))
cc = nx.algorithms.components.number_connected_components(self.graph)
components = list(nx.algorithms.components.connected_components(self.graph))
self.graph.add_edges_from(remove_edges)
if cc > 1:
disconnected.append(remove_nodes)
component_size = frozenset([len(c) for c in components])
#logger.info("Taboo count: %s | idx: %s | components: %s", i+1, idx, component_size)
component_sizes[component_size] += 1
count += 1
#if self.num_states == 16:
#cst = self.draw_cube.create_fig_with_projections(remove_nodes, f'{i+1}_{count}')
# resize and regroup
#cst = frozenset(Counter([frozenset((cst[k],cst[k+1])) for k in range(0, len(cst), 2)]).items())
#cross_section_types[cst] += 1
cst = create_projections(remove_nodes, self.n, self.alphabet)
cross_section_types[cst] += 1
logger.info("Taboo count %s finished: %s", i+1, count)
logger.info(component_sizes.items())
for cst, count in cross_section_types.items():
logger.info("%s: %s", cst, count)
new_branch = self.create_new_branch(i+2, disconnected)
#logger.info("End: %s", list(self.graph.edges))
| true |
f022a76ad25582ab2e9fa434477acb618d8c1411 | Python | acnar/CombinedLIFX_Alexa_Python | /lifx_manager.py | UTF-8 | 8,070 | 2.515625 | 3 | [] | no_license | import configparser
from copy import deepcopy
from lifxlan.lifxlan import *
from time import time
""""
Class for managing LIFX devices.
"""
class LIFXManager:
LIGHTS_DOWN = 0
LIGHTS_RESTORED = 1
LIGHTS_CHANGED = 2
def __init__(self):
config = configparser.ConfigParser()
config.read("config")
self.configs = config["LIFX_CONFIGS"]
self.config = config["LIFX"]
self.saved_groups = dict()
self.lan = LifxLAN()
# cached discovery
self.ReadCache()
# perform first new discovery
self.lan.get_groups()
self.ProcessConfigs()
self.control_group = self.config["control_group"]
self.active_config_num = self.config["active_config_num"]
if int(self.active_config_num) >= len(self.configs):
# override invalid config
self.activeConfigNum = "0"
if self.config["lights_on_at_start"] == "1":
self.lights_start_on = True
self.lan.set_group(self.control_group, (0,0,13107,3000), 65535, 0)
else:
self.lights_start_on = False
#if len(self.lan.groups.keys()) == 0 or len(self.lan.groups[self.control_group]) == 0:
# # No configs found. Store current state as saved state after first discovery.
self.save_pending = True;
#else:
# self.save_pending = False
self.light_state = LIFXManager.LIGHTS_RESTORED
self.previous_state = LIFXManager.LIGHTS_RESTORED
self.last_print = ""
if len(self.configs) == 0:
print("Error, no configs found\n")
exit(1)
def ReadCache(self):
groups = dict()
group = []
name = ""
with open("cache") as cache:
for line in cache:
if "Group" in line:
if name != "" and len(group) > 0:
groups[name] = group
name = line.split(",")[1].strip()
elif "Device" in line:
(device, mac_addr, label, hue, saturation, brightness, kelvin, power_level) = line.split(",")
light = Device(0,0,0,0,0)
light.label = name.strip()
light.color = (int(hue.strip()), int(saturation.strip()), int(brightness.strip()), int(kelvin.strip()))
light.power_level = int(power_level.strip())
light.mac_addr = mac_addr.strip()
light.discovered = False
light.discovery_time = time()
group.append(light)
self.lan.groups = groups
def WriteCache(self):
cache = open("cache", "w")
for group, lights in self.lan.groups.items():
cache.write("Group, %s\n" % group)
for light in lights:
cache.write("Device, %s, %s, %i, %i, %i, %i, %i\n" % (light.mac_addr, light.label, light.color[0], light.color[1], light.color[2], light.color[3], light.power_level))
cache.close()
def ProcessConfigs(self):
new_configs = dict()
for key,value in self.configs.items():
if "," in value:
(name, hue, saturation, brightness, kelvin, power_level, fade_time, restore_time) = value.split(",")
new_configs[key] = (name, (int(hue), int(saturation), int(brightness), int(kelvin)), int(power_level), int(fade_time), int(restore_time))
else:
new_configs[key] = value
self.configs = new_configs
def ListGroups(self):
thisprint = ""
for group in self.lan.groups:
thisprint += "%s\n" % str(group)
if thisprint != self.lastprint:
print(thisprint)
self.lastprint = thisprint
def LightsRestore(self):
saved_state = self.saved_groups[self.control_group]
if self.active_config_num != 0:
restore_time = self.configs[self.active_config_num][-1]
else:
restore_time = 0
group = self.lan.groups[self.control_group]
for device in group:
(saved_color, saved_power) = saved_state[device.label]
#print("restoring device %s to %s in %i ms" % (device.label, str(saved_color), restore_time))
if saved_power != 0:
# Turn on the power first
device.set_power(saved_power, restore_time)
device.set_hsbk(saved_color, restore_time)
if saved_power != 0:
# Turn off the power last
device.set_power(saved_power, restore_time)
self.light_state = LIFXManager.LIGHTS_RESTORED
def LightsDown(self):
success = True
save = True
(name, color, power_level, fade_time, restore_time) = self.configs[self.active_config_num]
if not self.lan.discovery_done(self.control_group):
return False
state = self.GetGroupState()
saved_state = self.saved_groups[self.control_group]
for device in self.lan.groups[self.control_group]:
if self.light_state == LIFXManager.LIGHTS_CHANGED:
if self.prev_light_state != LIGHTS_RESTORED:
compare_state = saved_state[device.label]
save = False
else:
compare_state = state[device.label]
print(compare_state)
if compare_state[1] != 0 or power_level != 0:
if compare_state[1] >= power_level:
if compare_state[0][2] < color[2]:
color[2] = compare_state[0][2]
if power_level != 0:
# Turn on the power first
device.set_power(power_level, fade_time)
device.set_hsbk(color, fade_time)
if power_level == 0:
# Turn off power last
device.set_power(power_level, fade_time)
#if not device.set_hsbk(color, power_level, fade_time):
# save = False
if save:
print("save")
self.SaveGroup(fade_time, state)
self.previous_state = self.light_state
self.light_state = LIFXManager.LIGHTS_DOWN
def GetGroupState(self):
group = self.lan.groups[self.control_group]
state = dict()
for device in group:
#print("color =")
#print(device.color)
name = deepcopy(device.label)
color = deepcopy(device.color)
power = deepcopy(device.power_level)
state[name] = (color, power)
#print(color)
return state
def SaveGroup(self, delay = 0, state = None):
save = False
if not state:
state = self.GetGroupState()
if self.control_group in self.saved_groups:
saved_state = self.saved_groups[self.control_group]
if "time" in saved_state:
if saved_state["time"] < time():
state["time"] = time() + delay
save = True
else:
save = True
if save:
self.saved_groups[self.control_group] = state
def Discover(self):
#print("discovering\n")
try:
self.lan.get_groups()
except Exception as e:
pass
#print ("Exception during discovery: %s\n", e)
#print("done\n")
if self.save_pending:
if self.lan.discovery_done(self.control_group):
self.SaveGroup()
self.save_pending = False | true |
2164b54d75a8ae11f9dc293a76602a5a7f9d5b2b | Python | slee17/NLP | /sentimentAnalysis/sentimentAnalysis.py | UTF-8 | 8,224 | 2.796875 | 3 | [] | no_license | from nltk.twitter import Streamer, TweetWriter, credsfromfile
from nltk.twitter.common import json2csv
from nltk.corpus import stopwords, opinion_lexicon
from sklearn.dummy import DummyClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cross_validation import KFold
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn import metrics, cross_validation
import pandas as pd
import numpy as np
import csv
import json
def find_matching_tweets(num_tweets=100, fname="matching_tweets.csv", shownum=50):
"""Given the number of tweets to retrieve, queries that number of tweets with
the keyword "Trump" and saves the tweet id and text as a csv file "fname". Prints
out the shownum amount of tweets using panda. Does not remove retweets."""
oauth = credsfromfile()
# create and register a streamer
client = Streamer(**oauth)
writer = TweetWriter(limit=num_tweets)
client.register(writer)
# get the name of the newly-created json file
input_file = writer.timestamped_file()
client.filter(track="trump") # case-insensitive
client.sample()
with open(input_file) as fp:
# these two fields for now
json2csv(fp, fname, ['id', 'text', ])
# pretty print using pandas
tweets = pd.read_csv(fname, encoding="utf8")
return tweets.head(shownum)
def parse(filename, delimiter = '\t'):
"""Given a filename of a csv file and a delimiter, returns an array of json objects."""
json_objects = []
count_positive = 0
count_negative = 0
count_neutral = 0
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter = delimiter)
for row in reader:
input_object = json.dumps(row)
json_object = json.loads(input_object)
# check the entry's label
label = json_object['label']
if label == 'positive':
count_positive += 1
elif label == 'negative':
count_negative += 1
else:
count_neutral += 1
json_objects.append(json_object)
# for analyzing the data
"""print ("Total number of entries: %d" % len(json_objects))
print ("Positive: %d, %f%%" % (count_positive, count_positive/len(json_objects)*100))
print ("Negative: %d, %f%%" % (count_negative, count_negative/len(json_objects)*100))
print ("Neutral: %d, %f%%" % (count_neutral, count_neutral/len(json_objects)*100))"""
return json_objects
def format_json(json_objects):
"""Given an array of json objects, returns the contents of the tweets and their labels
as separate arrays."""
texts = []
labels = []
for json_object in json_objects:
texts.append([json_object['text']])
labels.append(json_object['label'])
return np.asarray(texts), np.asarray(labels)
def run_baseline_systems(strategy, training_file, test_file):
# generate training and text data
training_json_objects = parse(training_file, delimiter = '\t')
training_texts, training_labels = format_json(training_json_objects)
test_json_objects = parse(test_file, delimiter = ',')
test_texts, test_labels = format_json(test_json_objects)
clf = DummyClassifier(strategy=strategy)
# calculate the 10-fold f1 score
f1_scores = cross_validation.cross_val_score(clf, texts, labels, cv=10) # scoring='f1_weighted' ??
f1_score = sum(f1_scores)/len(f1_scores)
# calculate the score on the test set
clf.fit(training_texts, training_labels)
test_score = clf.score(test_texts, test_labels)
return f1_score, test_score
def run_NB(training_file, test_file):
# generate training and text data
training_json_objects = parse(training_file, delimiter = '\t')
training_texts, training_labels = format_json(training_json_objects)
test_json_objects = parse(test_file, delimiter = ',')
test_texts, test_labels = format_json(test_json_objects)
training_texts = [element[0] for element in training_texts]
test_texts = [element[0] for element in test_texts]
count_vectorizer = CountVectorizer(analyzer="word", stop_words='english', vocabulary=list(set(opinion_lexicon.words())))
counts = count_vectorizer.transform(training_texts)
classifier = MultinomialNB()
# calculate the 10-fold f1 score
k_fold = KFold(n=len(training_texts), n_folds=10)
scores = cross_validation.cross_val_score(classifier, counts, training_labels, cv=k_fold) # scoring=f1_scorer
f1_score = sum(scores)/len(scores)
# calculate the score on the test set
classifier.fit(counts, training_labels)
test_counts = count_vectorizer.transform(test_texts)
predictions = classifier.predict(test_counts)
correct_predictions = 0
for i in range(len(predictions)):
if predictions[i] == test_labels[i]:
correct_predictions += 1
test_score = correct_predictions/len(predictions)
return f1_score, test_score
def advanced_classifier(training_file, test_file):
# generate training and text data
training_json_objects = parse(training_file, delimiter = '\t')
training_texts, training_labels = format_json(training_json_objects)
test_json_objects = parse(test_file, delimiter = ',')
test_texts, test_labels = format_json(test_json_objects)
training_texts = parse_text(training_texts)
test_texts = parse_text(test_texts)
count_vectorizer = CountVectorizer(analyzer="word", stop_words='english', vocabulary=list(set(opinion_lexicon.words())))
counts = count_vectorizer.transform(training_texts)
classifier = MultinomialNB()
# calculate the 10-fold f1 score
k_fold = KFold(n=len(training_texts), n_folds=10)
scores = cross_validation.cross_val_score(classifier, counts, training_labels, cv=k_fold) # scoring=f1_scorer
f1_score = sum(scores)/len(scores)
# calculate the score on the test set
classifier.fit(counts, training_labels)
test_counts = count_vectorizer.transform(test_texts)
predictions = classifier.predict(test_counts)
# sideline features
for i in range(len(predictions)):
if includes_hyperlink(test_texts[i]):
predictions[i] = 'neutral'
if includes_positive_hashtag(test_texts[i]):
predictions[i] = 'positive'
# calculate the score on the test set
correct_predictions = 0
for i in range(len(predictions)):
if predictions[i] == test_labels[i]:
correct_predictions += 1
test_score = correct_predictions/len(predictions)
return f1_score, test_score
def parse_text(data):
"""Given a list of lists of strings, returns a list of strings with each of the string parsed and normalized."""
stops = set(stopwords.words('english'))
texts = [element[0].lower().replace('.', '') for element in data if element[0] not in stops]
parsed_tweets = []
for text in texts:
parsed_tweet= []
list_of_words = text.split()
for word in list_of_words:
if not word.startswith('@') and word != 'rt':
if 'http' in word:
parsed_tweet.append('hyperlink')
elif word.endswith('!'):
parsed_tweet.append(word[:-1])
parsed_tweet.append('!')
else:
parsed_tweet.append(word)
parsed_tweets.append(' '.join(parsed_tweet))
return parsed_tweets
def includes_hyperlink(tweet):
"""Given a tweet represented as a string, returns True if the tweet contains a hyperlink."""
return 'hyperlink' in tweet
def includes_positive_hashtag(tweet):
"""Given a tweet represented as a string, returns True if the tweet contains a positive hashtag."""
hashtags = ['#trump2016', '#makeamericagreatagain', '#maga', '#alwaystrump', '#onlytrump']
for hashtag in hashtags:
if hashtag in tweet:
return True
return False
if __name__ == '__main__':
print (advanced_classifier('./data/training_full.csv', './data/matching_tweets_utf.csv')) | true |
815752b58dc12026c1694f77fa88e7c3b7c009e6 | Python | CeliaGM5/Incu2020 | /API/APIs/send_message.py | UTF-8 | 3,326 | 2.546875 | 3 | [] | no_license | from flask import Flask, request
import requests
import json
import pymongo
############## Bot details ##############
bot_name = 'extra_lessons@webex.bot'
roomId = "Y2lzY29zcGFyazovL3VzL1JPT00vZTQ4MjhlOTAtNzliZS0xMWVhLWE1YjctYWRiMmUxMDFiOWRi"
token = 'M2FiNGM1NzItMGZhZi00OGUxLWFjMjItNzMxMDIyNzE3ZDU2NTE0YmE3YzEtNGRm_PF84_consumer'
header = {"content-type": "application/json; charset=utf-8",
"authorization": "Bearer " + token}
############## MONGODB CONNECTIVITY ##############
url = "mongodb://svetlana:123456789@localhost:27017/Private_Lessons"
with pymongo.MongoClient(url) as client:
db = client.Private_Lessons
collection = db.Teachers_Information
############## Flask Application ##############
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def sendMessage():
webhook = request.json
url = 'https://api.ciscospark.com/v1/messages'
msg = {"roomId": webhook["data"]["roomId"]}
sender = webhook["data"]["personEmail"]
subjects= ["english", "spanish", "maths", "chemistry", "physics"]
subjects_Cap = ["English", "Spanish", "Maths", "Chemistry", "Physics"]
output = ''
message = getMessage()
if (sender != bot_name):
if (message.lower() == "hello" or message.lower() == "hi"):
msg["markdown"] = "Welcome to your personal private class teacher advisor! \n List of available options: \n" \
"- Available subjects: Subjects. \n" \
"- Available teachers for one particular subject: the name of the subject, for example: english. \n" \
"- Filter by type of classes: individual or collective."
elif (message == "Subjects"):
msg["markdown"] = "The offered subjects are: \n" \
"- English \n" \
"- Spanish \n" \
"- Maths. \n" \
"- Chemistry \n" \
"- Physics."
elif (message.lower() in subjects):
message=message.lower()
cursor = collection.find({'Subject': message})
for result in cursor:
output = output + "- " +result['Name'] + ": " + result['Phone_number'] + ", " + result['Type'] + " classes, " + result['Price'] + " per student per hour \n"
msg["markdown"] = output
elif (message.lower() == "individual" or message.lower() == "collective"):
message=message.lower()
output = "The teachers for " + message + " classes are: \n"
cursor = collection.find({'Type': message})
for result in cursor:
output = output + "- " + result['Name'] + ": " + result['Phone_number'] + ", " + result['Subject'] + ", " + result['Price'] + " per student per hour \n"
msg["markdown"] = output #sustituir por la variable con el contenido, un json o algo
elif(message == "help"):
output= "The possible commands are: \n" \
"- \"Subjects\" \n" \
"- the names of the subjects like \"spanish\", \"maths\", \"physics\", etc. \n" \
"- the type of the classes: \"individual\" or \"collective\" \n " \
""
msg["markdown"] = output
else:
msg["markdown"] = "Sorry! I didn't recognize that command. Type **help** to see the list of available commands."
requests.post(url,data=json.dumps(msg), headers=header, verify=True)
def getMessage():
webhook = request.json
url = 'https://api.ciscospark.com/v1/messages/' + webhook["data"]["id"]
get_msgs = requests.get(url, headers=header, verify=True)
message = get_msgs.json()['text']
return message
app.run(debug = True) | true |
683b0b610b7fd8011d20565f2c60e1a7d4f460ad | Python | vksychev/PythonPlayground | /PG/file.py | UTF-8 | 863 | 3.21875 | 3 | [] | no_license | import os
import tempfile
class File:
def __init__(self, path):
self.path = path
with open(path, "a+") as f:
f.seek(0)
self.file_lines = f.readlines()
self.current = 0
def write(self, string):
with open(self.path, "w") as f:
f.write(string)
def __add__(self, other):
storage_path = os.path.join(tempfile.gettempdir(), 'storage.data')
with open(storage_path, "w") as fsum:
fsum.writelines(self.file_lines + other.file_lines)
return File(storage_path)
def __iter__(self):
return self
def __next__(self):
if self.current >= len(self.file_lines):
raise StopIteration
self.current += 1
return self.file_lines[self.current - 1]
def __str__(self):
return '{}'.format(self.path)
| true |
e5c0caf48350840d1b5b73276ec63db873a7bc35 | Python | eniche-akim/ChessAI | /play_chess.py | UTF-8 | 1,158 | 2.671875 | 3 | [] | no_license |
from __future__ import print_function
import os
import chess
import time
import chess.svg
import traceback
import base64
from state import State
import torch
from train_model import ConvNetwork
class Valuator:
def __init__(self, board = None):
vals = torch.load("Data/value.pth", map_location=lambda storage, loc: storage)
self.model = ConvNetwork()
self.model.load_state_dict(vals)
def __call__(self, s):
brd = s.serialize()
brd = torch.tensor(brd)
brd = brd.unsqueeze(0)
output = self.model(brd.float())
return float(output.data[0][0])
def get_best_move(self, s, v):
rated_moves = []
for move in s.edges():
s.board.push(move)
value = v(s)
rated_moves.append((value, move))
s.board.pop()
sorted_moves = sorted(rated_moves,reverse = s.board.turn)
pourcentage = self.get_pourcentage(sorted_moves[0][0])
return pourcentage ,sorted_moves[0][1]
def get_pourcentage(self,valeur):
if valeur < 0:
pourcentage = 50 + (-valeur*100 / 2)
elif valeur > 0:
pourcentage = (1 - valeur) * 100 / 2
else:
pourcentage = 50
return pourcentage
| true |
f5732184f087edf933a6ff319228c350237db4f4 | Python | Deepakdk7/Playerset3 | /41.py | UTF-8 | 146 | 2.890625 | 3 | [] | no_license | ax=list(map(int,input().split()))
a=ax[0]
b=ax[1]
for i in range(0,a):
if (b**i)==a:
print('yes')
break
else:
print('no')
| true |
33475ef16b8d31a99ad6563df763386b16e72a89 | Python | szazyczny/MIS3640 | /Session05/turtle-demo.py | UTF-8 | 3,256 | 4.25 | 4 | [] | no_license | #TURTLE MODULE
# import turtle
# jack = turtle.Turtle() #importing module and using class called turtle
# jack.fd(100) #call a method, this means forward 100 pixels, draw a horizontal line
# jack.lt(90) #lt means left turn 90 degrees
# jack.fd(100)
# jack.lt(90)
# jack.fd(100)
# jack.lt(90)
# jack.fd(100) #to draw a square just add more fd lt, 4 repeating tasks
#EXERCISE 2,1 CREATE A FUNCTION to create square
# import turtle
# def square(t):
# for i in range(4):
# t.fd(100) #call a method, this means forward 100 pixels, draw a horizontal line
# t.lt(90) #lt means left turn 90 degrees
# jack = turtle.Turtle() #passing jack through the function
# square(jack) #function call
# turtle.mainloop()
#SIMPLE REPETITION, full loop
# for i in range(4):
# print('Hello!') #prints hello 4 times
# for i in range(4):
# jack.fd(100)
# jack.lt(90)
#EXERCISE 2.2, add length parameter (abstraction)
# import turtle
# def square(t, length):
# for i in range(4):
# t.fd(length)
# t.lt(90)
# jack = turtle.Turtle() #passing jack through the function
# square(jack, 200) #function call
# turtle.mainloop()
#EXERCISE 2.3
# import turtle
# def polygon (t, length, n):
# for i in range(n):
# t.fd(length)
# t.lt(360/n)
# jack = turtle.Turtle() #passing jack through the function
# polygon(jack, 100, 3) #function call
# turtle.mainloop()
# #EXERCISE 2.4
# import turtle
# import math
# def polygon (t, length, n):
# for i in range(n):
# t.fd(length)
# t.lt(360/n)
# def circle (t, r):
# circumference = 2 * math.pi * r
# n = 50
# length = circumference / n
# polygon(t, length, n)
# jack = turtle.Turtle()
# circle(jack, 150)
# turtle.mainloop()
#EXERCISE 2.3 CONT
# import turtle
# import math
# def polygon (t, length, n):
# for i in range(n):
# t.fd(length)
# t.lt(360/n)
# def circle (t, r):
# circumference = 2 * math.pi * r
# n = int(circumference / 3) + 1
# length = circumference / n
# polygon(t, length, n)
# jack = turtle.Turtle()
#polygon(t = jack, n = 50, length = 70) #keyword argument
# circle(jack, 150)
# turtle.mainloop()
#REFACTORING ARC
# import turtle
# import math
# def arc(t, r, angle):
# arc_length = 2 * math.pi * r * angle / 360
# n = int(arc_length / 3) + 1
# step_length = arc_length / n
# step_angle = angle / n
# for i in range(n):
# t.fd(step_length)
# t.lt(step_angle)
# jack = turtle.Turtle()
# arc(jack, 100, 180)
# turtle.mainloop()
import turtle
import math
def polyline(t, n, length, angle):
"""Draws n line segments with the given length and
angle (in degrees) between them. t is a turtle.
"""
for i in range(n):
t.fd(length)
t.lt(angle)
def polygon(t, n, length):
angle = 360.0 / n
polyline(t, n, length, angle)
def arc(t, r, angle):
arc_length = 2 * math.pi * r * angle / 360
n = int(arc_length / 3) + 1
step_length = arc_length / n
step_angle = float(angle) / n
polyline(t, n, step_length, step_angle)
def circle(t, r):
arc(t, r, 360)
jack = turtle.Turtle()
circle(jack, 100)
turtle.mainloop()
| true |
8f85e62bbe93cfbbb64527373f5bb6fa214bb979 | Python | JuliaYu2002/HunterDE | /Comp Sci 127 hw/attendanceGraph_jy.py | UTF-8 | 561 | 3.390625 | 3 | [] | no_license | #Name: Julia Yu
#Date: October 18, 2019
#Email: julia.yu83@myhunter.cuny.edu
#This program plots attendance on a graph from a specified file and saves it to another
import pandas as pd
import matplotlib.pyplot as plt
inFile = input("Enter name of input file: ")
outFile = input("Enter name of output file: ")
attend = pd.read_csv(inFile)
attend["Date"] = pd.to_datetime(attend["Date"].apply(str))
attend["% Attending"] = (attend["Present"] / attend["Enrolled"]) * 100
attend.plot(x = "Date", y = "% Attending")
graph = plt.gcf()
graph.savefig(outFile)
# plt.show() | true |
8fe81d7d9431106a801486d2f8dfc44150d804b0 | Python | athikrishnarao/Python_Anaconda_code | /Pycharm_Program/Program/Test.py | UTF-8 | 597 | 3.796875 | 4 | [] | no_license | """a=int(input("enter number"))
if a>1:
for x in range(2,a):
if(a%x)==0:
print("not prime")
break
else:
print("Prime")
else:
print("not prime")"""
class airport:
def checkin(self):
name = input("What is Your Name : ")
flight_name = input("Flight Name : ")
boarding_time = int(input("Boarding Time :" ))
print(name,flight_name,boarding_time)
self.boardingpass()
def boardingpass(self):
ticket_no = int(input("Enter the Flight Ticket No : "))
athi = airport()
athi.checkin()
#athi.boardingpass() | true |
abb98dae65e7ed2dfe9bf7d43c6dec01e54b6b37 | Python | Uabanur/OddJobs | /Python/MachineLearning/Project2/LinearRegression_AttributeResiduals.py | UTF-8 | 2,573 | 2.625 | 3 | [] | no_license | import matplotlib.pyplot as plt
from scipy.io import loadmat
import sklearn.linear_model as lm
from sklearn import cross_validation
from toolbox_02450 import feature_selector_lr, bmplot
import csv
import numpy as np
count = 214
# labels = ['RI', 'Na', 'Mg', 'Al', 'Si', 'K', 'Ca', 'Ba', 'Fe', 'Type']
labels = ['Na', 'Mg', 'Al', 'Si', 'K', 'Ca', 'Ba', 'Fe']
table = np.mat(np.empty((count, 11)))
addCombinations = False;
transformMean = True;
transformStd = True;
# Load xls sheet with data
with open('glass_data.csv') as csvfile:
reader = csv.reader(csvfile)
# Populate X
for i, row in enumerate(reader):
table[i, :] = np.array(row)
# Transform data
if transformMean:
table -= table.mean(axis=0)
if transformStd:
table /= table.std(axis=0)
# X: All the glass contents
X = table[:,2:-1]
# y: the refractive index
y = table[:,1]
N, M = X.shape
if addCombinations:
# Initialize combination data
combinations = np.zeros((N, M**2))
labelcombinations = [None]*M**2
for j in range(M):
for i in range(M):
combinations[:,i + j*M] = np.multiply(X[:,i], X[:,j]).reshape(1,-1)
labelcombinations[i + j*M] = labels[j] + " | " + labels[i]
# Add all combinations of attributes
X = np.hstack((X,combinations))
labels = np.hstack((labels,labelcombinations))
Error_nofeatures = np.square(y-y.mean()).sum()/y.shape[0]
selected_features, features_record, loss_record = feature_selector_lr(X, y, 10)
model = lm.LinearRegression(fit_intercept=True).fit(X[:,selected_features], y)
equation = "y = ";
for i in range(len(model.coef_[0])):
if(model.coef_[0][i] < 0):
equation += " - "
elif (i>0):
equation += " + "
equation += "{0:.2e} * {1}".format(abs(model.coef_[0][i]), labels[selected_features[i]])
print("Final model:")
print(equation)
Rsquared = 1-loss_record/Error_nofeatures
data = X[:,selected_features]
m = lm.LinearRegression(fit_intercept=True).fit(data, y)
y_est= m.predict(data)
residual=y-y_est
tabnr = len(selected_features)
plt.figure(0, figsize=(10, 2.5))
plt.title('Residual error vs. Attributes for features selected')
plt.subplots_adjust(left=0.1, bottom=0.25, right=0.95, top=0.95, wspace=0.2, hspace=0.3)
for i in range(0,data.shape[1]):
plt.subplot(1,tabnr,i+1)
plt.plot(data[:,i],residual,'.')
plt.xlabel(labels[selected_features[i]], fontsize = 15)
plt.grid(True)
# plt.ylabel('residual error')
plt.text(-37, 0.6, 'Residual error', fontsize=15, rotation=90)
plt.savefig('reg_residuals.pdf')
# file = open(filename+'.txt',"w")
# file.write(filename + "\n" + equation)
# file.close()
plt.show()
| true |
6159880ff07097192abb0dd39bbe243ea9d7b1e5 | Python | bennytzang/python-traning | /multimatrix.py | UTF-8 | 835 | 3.359375 | 3 | [] | no_license | import numpy as np
def matrixMul(A, B):
res = [[0] * len(B[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
res[i][j] = A[i][k] * B[k][j]
return res
def matrixMul2(A, B):
return [[sum(a * b for a, b in zip(a, b)) for b in zip(*B)] for a in A]
a= np.zeros(shape=(100,100))
b= np.zeros(shape=(100,100))
for i in range (100):
for j in range (100):
a[i,j] = 12.2* (i+1) - 3.8* (j+1)
b[i,j] = 65.1+ 3.3* (i+1) - 20.2 * (j+1)
print(a)
print('---------')
print(b)
#a = [[1,2], [3,4], [5,6], [7,8]]
#b = [[1,2,3,4], [5,6,7,8]]
#print (matrixMul(a,b))
#print (matrixMul(b,a))
#print ("-"*90)
#print (matrixMul2(a,b))
print (matrixMul2(b,a))
#print ("-"*90)
#print(map(list,dot(a,b)))
#print(map(list,dot(b,a)))
| true |
d49b13782c55b12c125d4098b74b7c2905ccf5e0 | Python | timedcy/ndnlp-penne-19336a258d30 | /examples/rnnlm2.py | UTF-8 | 1,362 | 2.9375 | 3 | [
"MIT"
] | permissive | """
Another implementation of a deep recurrent language model. This one
stacks RNNs by computing the entire output sequence of one RNN before
feeding to the next RNN up.
"""
import sys, time
sys.path.append("..")
from penne import *
from penne import lm
from penne import recurrent
import numpy
hidden_dims = 100
depth = 1
#data = lm.read_data("../data/inferno.txt")
data = lm.read_data("../data/ptb.train.txt")[:420]
vocab = lm.make_vocab(data)
numberizer = lm.Numberizer(vocab)
layers = [recurrent.LSTM(hidden_dims, -len(vocab), hidden_dims)]
for i in xrange(depth-1):
layers.append(recurrent.LSTM(hidden_dims, hidden_dims, hidden_dims))
output_layer = make_layer(hidden_dims, len(vocab), f=logsoftmax)
trainer = SGD(learning_rate=0.01)
for epoch in xrange(1):
epoch_loss = 0.
n = 0
for iteration, words in enumerate(data):
nums = [numberizer.numberize(word) for word in ["<s>"]+words]
xs = nums[:-1]
for layer in layers:
xs = layer.transduce(xs)
# Compute all the output layers at once
o = output_layer(stack(xs))
w = stack([one_hot(len(vocab), num) for num in nums[1:]])
loss = -einsum("ij,ij->", w, o)
sent_loss = trainer.receive(loss)
epoch_loss += sent_loss
n += len(words)
print "epoch=%s ppl=%s" % (epoch, numpy.exp(epoch_loss/n))
| true |
45d2f17079bf8fb6c40e023e84d6f1cfa262d6c6 | Python | malavika545/python | /assignment-2/listproduct.py | UTF-8 | 267 | 3.5625 | 4 | [] | no_license | '''13. Compute given Num_tuple = (5, 6,8 ,3,9,1) to get desired output
Output: Out_list = [5, 30, 240, 720, 6480, 6480]
'''
num_tuple=(5,6,8,3,9,1)
out_list=list()
pro=1
for i in num_tuple:
pro=pro*i
out_list.append(pro)
print("out_list: ",out_list) | true |
59a608e0d7ba5fba9841711121336292506c95f2 | Python | mkomod/cv_docs | /python-examples/src/06_drawing_with_mouse.py | UTF-8 | 454 | 2.84375 | 3 | [] | no_license | import numpy as np
import cv2 as cv
img = np.full((512, 512, 3), 251.0)
cv.namedWindow('image')
def draw_circle(event, x, y, flags, params):
''' Callback function that draws a circle '''
if event == cv.EVENT_LBUTTONDBLCLK:
cv.circle(img, (x, y), 100, (255, 0, 0), 1)
cv.setMouseCallback('image', draw_circle)
while(True):
cv.imshow('image', img)
if cv.waitKey(20) & 0xFF == 27: # esc key
break
cv.destroyAllWindows()
| true |
eeb4e16044ee4a310daeb8775cf2560e16c6fbc1 | Python | NicholasAKovacs/SkillsWorkshop2018 | /Week01/Problem03/nkruyer_03.py | UTF-8 | 796 | 3.640625 | 4 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 21:19:54 2018
@author: nkruyer3
"""
#Week 1 Assignment: Problem 3 - Nick Kruyer
#Correct anser 6857
n = 600851475143
#define function to determine if a number is prime
#if test = 1, number is not prime. If test = 0, number is prime
def prime(x):
test = 0
for i in range(2,x):
if x%i == 0:
test = 1
break
return test
def factors(y):
facts = [1,y]
for i in range(2,y):
if facts.count(i) > 0:
break
elif y%i == 0:
facts.append(i)
facts.append(y//i)
return sorted(facts)
facts = factors(n);
for i in range(1,len(facts)):
test = prime(facts[-i])
if test == 0:
print(facts[-i])
break
| true |
27f22a839fd9798d76db8c4f6354fa81f46ef2cf | Python | bennettyardley/martingale-sim | /start.py | UTF-8 | 1,948 | 3.53125 | 4 | [] | no_license | '''
start = float(input("USD: $"))
start = start * 0.0058
x = start
total = 0
win = .495
lose = .505
print("\n" + str(start) + "\n")
print("BET\t\tLOSS\t\tCHANCE")
print(str(start) + "\t\t" + str(1) + "\t\t50.5%")
for i in range(16):
x = x * 2
ud = x * 171.40
total = total + x
chance = .505 ** (i+2)
percent = chance * 100
print(str(round(ud,3)) + "\t\t" + str(i+2) + "\t\t" + str(round(percent,4)) + "%")
usd = total * 171.40
print("\n\nTotal: $" + str(usd) + "\n\nTotal: ETH" + str(total))
import random
def roll():
dice = random.randint(1,1000)
if dice <= 505:
return False
elif dice > 505:
return True
bal = 80
firstBet = 0.00058000
bet = firstBet
previousWagerAmount = firstBet
previousWager = 'win'
wager_count = 10000
currentWager = 1
while currentWager <= wager_count:
if previousWager == 'win':
if roll():
bal += bet
else:
bal -= bet
previousWager = 'loss'
previousWagerAmount = bet
if bal < 0:
print("LOSS")
elif previousWager == 'loss':
if roll():
bet = previousWagerAmount * 2
bal += bet
bet = firstBet
previousWager = 'win'
else:
bet = previousWagerAmount * 2
bal -= bet
if bal < 0:
print("LOSS")
previousWager = 'loss'
previousWagerAmount = bet
if bal < 0:
print("LOSS")
currentWager += 1
print(bal)
'''
check = 0.00000071
check = 0.025598871353022815 #49.5
check = 0.04790433229852053 #51
check = 157.6878280774863 #75
balance = 53684.7242557345
sum = 0
losses = 20.0
for i in range(20):
check = check * 2
print(check)
sum += check
print(sum)
x = balance / 1.3200 ** (losses + 1)
print(x)
| true |
2e60fd869db490a7e443bf618e1b18cfeef6da91 | Python | maryraven/cfg | /main.py | UTF-8 | 670 | 3 | 3 | [] | no_license | from __future__ import division # Python 2 users only
import nltk, re, pprint
from nltk import word_tokenize
with open('input_file.txt') as f:
raw = f.readlines()
input = [l.strip().split() for l in raw]
print(input)
# http://www.nltk.org/book/ch08.html
calc_grammar = nltk.CFG.fromstring("""
S -> N OPP
OPP -> OP N | OPP OP N
N -> 'zero' | 'one'| 'two' | 'three' | 'four' | 'five' | 'six' | 'seven' | 'eight' | 'nine'
OP -> 'plus' | 'minus' | 'times'| 'divide'
""")
parser = nltk.ChartParser(calc_grammar)
for sentence in input:
for tree in parser.parse(sentence):
print(tree)
| true |
15c175639412294c3168f879f3186b438a8234c8 | Python | FLNacif/URIProblems | /src/1116.py | UTF-8 | 207 | 3.828125 | 4 | [] | no_license | quantidade = int(input())
for i in range(quantidade):
x,y = input().split(" ")
x = int(x)
y = int(y)
if y == 0:
print("divisao impossivel")
else:
print("%.1f"%(x/y)) | true |
2856e1eb0b9003d7c91d0040ca1d06d3b8eae323 | Python | shasank27/Tic-Tac-Toe | /main.py | UTF-8 | 2,947 | 3.5 | 4 | [] | no_license | lis = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
def line():
print(" | |")
def inline(lis, ind):
print(" {} | {} | {}".format(lis[ind], lis[ind + 1], lis[ind + 2]))
def stline():
print("---------------")
def printmat():
for i in range(0, 9, 3):
line()
inline(lis, i)
if i != 6:
stline()
printmat()
def check():
for inwhi in range(0, 9, 3):
if (lis[inwhi] == lis[inwhi + 1]) and (lis[inwhi + 2] == lis[inwhi + 1]):
if lis[inwhi] == a:
print("PLAYER 1 WINS")
return 1
else:
print("PLAYER 2 WINS")
return 1
for inwhi in range(0, 3):
if (lis[inwhi] == lis[inwhi + 3]) and (lis[inwhi + 3] == lis[inwhi + 6]):
if lis[inwhi] == a:
print("PLAYER 1 WINS")
return 1
else:
print("PLAYER 2 WINS")
return 1
if (lis[0] == lis[4]) and (lis[4] == lis[8]):
if lis[0] == a:
print("PLAYER 1 WINS")
return 1
else:
print("PLAYER 2 WINS")
return 1
if (lis[2] == lis[4]) and (lis[4] == lis[6]):
if lis[2] == a:
print("PLAYER 1 WON")
return 1
else:
print("PLAYER 2 WON")
return 1
return 0
a = input("Player 1 choose your mark 'X' or '0' : ")
b = None
if a == 'X':
b = '0'
else:
b = 'X'
print("Mark for Player1 is {}".format(a))
print("Mark for Player2 is {}".format(b))
print("Rules : First Player1 will choose a number which will be replaced ")
print("by his/her mark then Player2 will choose a number which will be ")
print("replaced by his/her mark and it'll continue subsequently in the later stage of the game")
whi = 0
while whi < 9:
if whi % 2 == 0:
pos = int(input("Player1 choose your position (1-9) : ")) - 1
if pos<0:
print("Enter between 1-9.")
whi -= 1
elif pos>=9:
print("Enter between 1-9.")
whi -= 1
else:
if (lis[pos]) == b:
print("Enter positions that are not used")
whi -= 1
else:
lis[pos] = a
else:
pos = int(input("Player2 choose your position (1-9) : ")) - 1
if pos < 0:
print("Enter between 1-9.")
whi -= 1
elif pos >= 9:
print("Enter between 1-9.")
whi -= 1
else:
if (lis[pos]) == a:
print("Enter positions that are not used")
whi -= 1
else:
lis[pos] = b
printmat()
check1 = check()
if check1 == 1:
break
whi += 1
if check1 == 0 and whi == 9:
print("No one wins")
| true |
036408ba33973c449c5c62f96f8cd59e05374c90 | Python | Patton97/Patton97.github.io | /Research/Blockly/base.py | UTF-8 | 5,157 | 2.828125 | 3 | [] | no_license | from microbit import *
from random import randint
import neopixel
import music
I2caddr = 0x10
isRunning = True
isCrashed = False
isComplete = False
# --------------------------------------------------------------------------------
# UTILITY FUNCTIONS --------------------------------------------------------------
# --------------------------------------------------------------------------------
def waitFor(secs):
sleep(secs * 1000)
def wait():
waitFor(1)
def isJourneyComplete():
global currentPos
xPos = currentPos[0]
yPos = currentPos[1]
global myDestination
xDest = myDestination[0]
yDest = myDestination[1]
return xPos == xDest and yPos == yDest
def isValidPosition(xPos, yPos):
if xPos < 0 or yPos < 0 or xPos >= len(level[0]) or yPos >= len(level):
return False
return not level[yPos][xPos] == 0
def isSafe_Ahead():
global currentPos
global currentDir
xTgt = currentPos[0] + currentDir[0]
yTgt = currentPos[1] + currentDir[1]
return isValidPosition(xTgt, yTgt)
def isSafe_Left():
global currentPos
xTgt = currentPos[0]
yTgt = currentPos[1]
global currentDir
if currentDir[0] == 0:
xTgt += currentDir[1]
else:
yTgt -= currentDir[0]
return isValidPosition(xTgt, yTgt)
def isSafe_Right():
global currentPos
xTgt = currentPos[0]
yTgt = currentPos[1]
global currentDir
if currentDir[0] == 0:
xTgt -= currentDir[1]
else:
yTgt += currentDir[0]
return isValidPosition(xTgt, yTgt)
def updateStatus():
global isRunning
global isComplete
global isCrashed
global currentPos
# update flags
if isJourneyComplete():
isComplete = True
if not isValidPosition(currentPos[0], currentPos[1]):
isCrashed = True
# update display
if isRunning:
display.show(Image.HAPPY)
rgb_all(125, 75, 0) # orange
return
if isComplete:
display.show(Image.YES)
rgb_all(0, 75, 0) # green
return
if not isRunning and not isComplete:
display.show(Image.NO)
rgb_all(125, 0, 0) # red
# --------------------------------------------------------------------------------
# MOTOR SETUP --------------------------------------------------------------------
# --------------------------------------------------------------------------------
def motor(directionL, speedL, directionR, speedR):
buf = bytearray(5)
buf[0] = 0x00
buf[1] = directionL
buf[2] = speedL
buf[3] = directionR
buf[4] = speedR
i2c.write(I2caddr, buf)
# --------------------------------------------------------------------------------
# MOVEMENT -----------------------------------------------------------------------
# --------------------------------------------------------------------------------
# direction: 0 = forward, 1 = back
MOVESPEED = 35 # possible speed range: 0 - 255
def movePhysical(direction):
global MOVESPEED
motor(direction, MOVESPEED, direction, MOVESPEED)
wait()
motor(0,0,0,0)
wait()
def moveLogical(direction):
global currentPos
global currentDir
currentPos[0] += currentDir[0]
currentPos[1] += currentDir[1]
def move(direction):
global isCrashed
if isCrashed:
updateStatus()
return
movePhysical(direction)
moveLogical(direction)
updateStatus()
def moveForward():
move(0)
def moveBackward():
move(1)
# --------------------------------------------------------------------------------
# TURNING ------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# direction: 0 = left, 1 = right
TURNSPEED = 25 # possible speed range: 0 - 255
def turnPhysical(direction):
global TURNSPEED
if bool(direction):
motor(0, TURNSPEED, 1, TURNSPEED)
else:
motor(1, TURNSPEED, 0, TURNSPEED)
wait()
motor(0,0,0,0)
wait()
def turnLogical(direction):
global currentDir
if bool(direction):
currentDir = [-currentDir[1], currentDir[0]]
else:
currentDir = [ currentDir[1], -currentDir[0]]
def turn(direction):
global isCrashed
if isCrashed:
return
turnPhysical(direction)
turnLogical(direction)
def turnLeft():
turn(0)
def turnRight():
turn(1)
# --------------------------------------------------------------------------------
# NEOPIXEL SETUP -----------------------------------------------------------------
# --------------------------------------------------------------------------------
# Neopixel strip is on pin15 with a length of 4 pixels
np = neopixel.NeoPixel(pin15, 4)
def rgb(pixel_id, red, green, blue):
if pixel_id > len(np) or pixel_id < 0:
return
np[pixel_id] = (red, green, blue)
np.show()
def rgb_all(red,green,blue):
# first reset all
for pixel_id in range(0, len(np)):
rgb(pixel_id, 0,0,0)
# then apply new rgb
for pixel_id in range(0, len(np)):
rgb(pixel_id,red,green,blue)
# --------------------------------------------------------------------------------
# MAIN CODE ----------------------------------------------------------------------
# --------------------------------------------------------------------------------
# The user's generated code will be appended below
| true |
b09ee3267fc5b14eedfb18a4f61c5794a08443d9 | Python | wangjs/Lintcode | /(407)加一.py | UTF-8 | 852 | 4.21875 | 4 | [] | no_license | '''
给定一个非负数,表示一个数字数组,在该数的基础上+1,返回一个新的数组。该数字按照大小进行排列,最大的数在列表的最前面。
思路:将数字数组转换成整数,然后求得加一的值,将这个值再转换成数字数组,最大的数在数组的前面(采用倒序一下就可以了)
'''
class Solution:
# @param {int[]} digits a number represented as an array of digits
# @return {int[]} the result
def plusOne(self, digits):
# Write your code here
char = []
length = len(digits)
num = 0
for i in range(0, length):
num += digits[i]*(10**(length-1-i))
num += 1
while(num != 0):
temp = num % 10
char.append(temp)
num /= 10
char.reverse()
return char
| true |
3095dfa9dbc93fc3b855f6bf2077c2a700060312 | Python | enaut/snake | /game.py | UTF-8 | 5,712 | 3.34375 | 3 | [] | no_license | from tkinter import *
from time import sleep
class Spiel():
"""
Diese Klasse macht das programmieren eines Pixelspiels mit
Python einfach.
Der Entstanden ist diese Datei für den Unterricht der
Waldorfschule Uhlandshöhe.
Der Quelltext wird unter den Bedingungen der GPL V3 oder höher
publiziert.
"""
border = 0
blocks =[]
texts = []
steps = []
keylisteners = []
debug = True
def __init__(self, size=20, pixelsize=20, debug=True, speed=200):
"""
Das Spiel ist die Grund-Klasse mit ihr öffnet sich das
Fenster und die Spielwelt wird initialisiert.
Parameter (optional):
* size: die Größe des Spielfeldes
* pixelsize: die Größe der einzelnen Pixel
* debug: Anzeigen von Zahlen für jeden Pixel
* speed: Die Geschwindigkeit des Spiels.
"""
self.size = size
self.pixelsize = pixelsize
self.speed = speed
self.debug = debug
self.game = Tk()
self.level = [0]*size*size
self.levelindex = list(range(size*size))
self.steps = []
self.keylisteners = []
self.canvas = Canvas(self.game,
width = size*pixelsize,
height = size*pixelsize)
self.canvas.pack()
def addStep(self, function):
""" Hinzufügen einer Funktion die jeden Spielschritt
ausgeführt wird. """
self.steps.append(function)
def addKeylistener(self, function):
"""Hinzufügen einer Funktion die bei einem Tastendruck
ausgeführt wird."""
self.keylisteners.append(function)
def createlevel(self):
""" Initialisieren des Levels. Und der zugehörigen
Pixelflächen.
"""
for i in self.levelindex:
x,y = self.number2coord(i)
block = self.canvas.create_rectangle(x,
y,
x+self.pixelsize-self.border,
y+self.pixelsize-self.border,
fill="white")
self.blocks.append(block)
if self.debug:
for i in self.levelindex:
x,y = self.number2coord(i)
tex = self.canvas.create_text(x+self.pixelsize/2 - 3,
y+self.pixelsize/2,
fill="white",
text=str(self.level[i]),
font=("Courier 11 bold"))
self.texts.append(tex)
def create(self):
"""
Erstellen und zeichnen des Spieles.
"""
self.createlevel()
self.registerkeys()
def start(self):
"""
starten des Spiels
"""
self.game.after(10, self.animate)
self.game.mainloop()
def react(self, event):
#print("Reagiere auf Tastendruck: ", event.keysym)
"Auf einen Tastendruck reagieren."
for f in self.keylisteners:
f(event.keysym)
def registerkeys(self):
""" Alle verwendeten Tasten registrieren. """
self.game.bind('<Escape>', self.react)
self.game.bind('<Up>', self.react)
self.game.bind('<Down>', self.react)
self.game.bind('<Left>', self.react)
self.game.bind('<Right>', self.react)
self.game.bind('a', self.react)
self.game.bind('w', self.react)
self.game.bind('s', self.react)
self.game.bind('d', self.react)
def draw(self):
""" Das Spielfeld zeichnen. """
for i in self.levelindex:
if self.level[i] > 0:
self.canvas.itemconfig(self.blocks[i],
fill = 'Blue',
width=0)
elif self.level[i] == -1:
self.canvas.itemconfig(self.blocks[i],
fill = 'red',
width=0)
elif self.level[i] == -2:
self.canvas.itemconfig(self.blocks[i],
fill = 'purple',
width=0)
elif self.level[i] <= -3:
self.canvas.itemconfig(self.blocks[i],
fill = 'green',
width=0)
else:
self.canvas.itemconfig(self.blocks[i],
fill = 'white',
width=0)
if self.debug:
for i in self.levelindex:
self.canvas.itemconfig(self.texts[i], text=str(self.level[i])if self.level[i] else "")
def animate(self):
""" Einen Animationsschritt durchführen. """
self.game.update()
for f in self.steps:
f()
try:
self.draw()
except:
pass
self.game.after(self.speed, self.animate)
def number2coord(self, num):
""" Umrechnen einer Zahl in eine x,y Koordinate. """
x = (num%self.size) * self.pixelsize
y = int(num/self.size) * self.pixelsize
return x,y
def coord2number(self,x,y):
""" Koordinate in eine Eindimmensionale Zahl umrechnen. """
if x>self.size-1 or y>self.size-1:
raise Exception("Not a coordiante numbers too big!")
else:
return self.size*y + x
def exit(self):
"""Beenden des Spiels."""
print("beende das Spiel.")
self.game.destroy()
| true |
b75202401a8c3043571201997b15e414b7b5905d | Python | ryanmcf10/game-engine | /env/tools/pathfinder.py | UTF-8 | 2,683 | 3.078125 | 3 | [] | no_license | import env.tools.grid as grid
import heapq
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1-x2) + abs(y1-y2)
def a_star_search(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
def reconstruct_path(came_from, start, goal):
current = goal
path = [current]
while current != start:
current = came_from[current]
path.append(current)
path.reverse()
return path
class WeightedGraph:
def __init__(self, grid):
self.width = grid.num_cols
self.height = grid.num_rows
self.data = grid.grid_data
self.blockers = self._build_blockers()
self.weights = self._build_weights()
def _build_blockers(self):
blockers = []
for row in range(len(self.data)):
for col in range(len(self.data[row])):
if self.data[row][col].value != 0:
blockers.append((row, col))
return blockers
def _build_weights(self):
weights = {}
for row in range(len(self.data)):
for col in range(len(self.data[row])):
weights[(row,col)] = self.data[row][col].value
return weights
def in_bounds(self, id):
(x,y) = id
return 0 <= x < self.width and 0 <= y < self.height
def passable(self, id):
return id not in self.blockers
def neighbors(self, id):
(x,y) = id
results = [(x+1,y),(x,y-1),(x-1,y),(x,y+1)]
if (x + y) % 2 == 0:
results.reverse()
results = filter(self.in_bounds, results)
results = filter(self.passable, results)
return results
def cost(self, from_node, to_node):
return self.weights.get(to_node, 1)
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
| true |
fc91e2cb52310675d1d250eb3aca14bda40fc29e | Python | zongmingshu/Malicious-URL-Detection | /cnn2.py | UTF-8 | 4,441 | 2.65625 | 3 | [] | no_license | import torch.nn as nn
import torch
import numpy as np
from common import get_batch,get_train_datas,get_data,ont_hot
class CNN(nn.Module):
def __init__(self):
super(CNN,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=64,
kernel_size = 5,
stride=1,
padding = 0
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(
in_channels=64,
out_channels=32,
kernel_size = 5,
stride=1,
padding = 2
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.dense = torch.nn.Sequential(
torch.nn.Linear(32*49*23, 4096),
torch.nn.ReLU(),
torch.nn.Dropout(0.5),
torch.nn.Linear(4096, 4096),
torch.nn.ReLU(),
torch.nn.Dropout(0.5),
torch.nn.Linear(4096, 2)
)
def forward(self,x):
x = self.conv1(x)
x = self.conv2(x)
res = x.view(x.size(0), -1)
x = self.dense(res)
return x
def train():
cnn = CNN()
LR=0.0001
epoch = 20000
# x = torch.from_numpy(np.random.random((32,1,200,97)))
optimizer = torch.optim.Adam(cnn.parameters(), LR)
loss_func = nn.CrossEntropyLoss()
# output = cnn(x.float())
bdata, blabel = get_data("data/badqueries.txt", 0, 23333)
gdata, glabel = get_data("data/goodqueries.txt", 1, 88888)
# datas = bdata+gdata
labels = blabel + glabel
data = bdata + gdata
# print(labels)
for epoch in range(epoch):
batch, target = get_batch(data, labels, 64)
# print(torch.Tensor(target).shape)
input = ont_hot(batch)
# print(input.shape)
output = cnn(input)
print(output)
#计算误差
# output.view(x.size(0), -1)
# print(torch.flatten(output, start_dim=0, end_dim=-1))
# value = torch.flatten(output, start_dim=0, end_dim=-1)
value = output
target = torch.Tensor(target).long()
print(target)
loss = loss_func(value, target)
print(loss)
#将梯度变为0
optimizer.zero_grad()
#反向传播
loss.backward()
#优化参数
optimizer.step()
if epoch % 5 == 0:
batch, target = get_batch(data, labels, 32)
# print(torch.Tensor(target).shape)
input = ont_hot(batch)
test_output = cnn(input)
# squeeze将维度值为1的除去,例如[64, 1, 28, 28],变为[64, 28, 28]
pre_y = torch.max(test_output, 1)[1].data.squeeze()
# 总预测对的数除总数就是对的概率
c=0
for i in range(len(pre_y)):
if pre_y[i]==target[i]:
c = c+1
accuracy = c/ float(len(target))
print("epoch:", epoch, "| train loss:%.4f" % loss.data, "|test accuracy:%.4f" % accuracy)
torch.save(cnn.state_dict(), 'parameter.pkl')
# print(output.shape)
def test():
cnn = CNN()
test_epoch = 200
# x = torch.from_numpy(np.random.random((32,1,200,97)))
cnn.load_state_dict(torch.load('parameter.pkl'))
# output = cnn(x.float())
bdata, blabel = get_data("data/badqueries.txt", 0, 23333)
gdata, glabel = get_data("data/goodqueries.txt", 1, 88888)
# datas = bdata+gdata
labels = blabel + glabel
data = bdata + gdata
# print(labels)
for epoch in range(test_epoch):
batch, target = get_batch(data, labels, 32)
# print(torch.Tensor(target).shape)
input = ont_hot(batch)
test_output = cnn(input)
pre_y = torch.max(test_output, 1)[1].data.squeeze()
c = 0
for i in range(len(pre_y)):
if pre_y[i] == target[i]:
c = c + 1
accuracy = c / float(len(target))
print("epoch:", epoch, "|", "|test accuracy:%.4f" % accuracy)
#如果想要测试一条URL
url=["/example/test"]
input = ont_hot(url)
test_output = cnn(input)
# test_output 输出的是概率
pre_y = torch.max(test_output, 1)[1].data.squeeze()
#pre_y 输出的是 1或者0
if __name__=="__main__":
train()
| true |
af7a8b060d27c7f69d0c45c8ffb2fcdf2d5ac6e4 | Python | bongbong3/Study | /Algorithm/Algorithm for Everyone/trainint01/Intro/Intro/Palindrome.py | UTF-8 | 775 | 4.40625 | 4 | [] | no_license | '''
Created on 2018. 3. 11.
@author: kfx20
'''
# 주어진 문장이 회문인지 찾기(큐와 스택 이용)
# 문자열 s
# 회문이면 true, 아니면 false
def palindrome(s):
# 큐와 스택을 리스트로 정의
qu = []
st = []
# 1단계 : 문자열의 알파벳을 큐와 스택에 넣음
for x in s :
# 해당 문자가 알파벳이면
# 큐와 스택에 각각 추가
if x.isalpha():
qu.append(x.lower())
st.append(x.lower())
# 2단계 : 큐와 스택에 들어 있는 문자를 꺼내면서 비교
while qu:
if qu.pop(0) != st.pop():
return False
return True
print(palindrome("Wow"))
print(palindrome("Madam, I'm Adam."))
print(palindrome("Madam, I am Adam.")) | true |
c6f9a63472ca5b5d854c3b6e566402c09c44992f | Python | yinruei/python- | /python_exercise/def_fun.py | UTF-8 | 118 | 3.375 | 3 | [] | no_license | def dividing_line(symbol, count):
for i in range(count):
print(symbol, i, end="\n")
dividing_line('@', 10) | true |
f21fea993f22f296770415c12bf417b7899e0d58 | Python | EKI-INDRADI/eki-latihan-python | /latihan_python_basic/32_argument_list.py | UTF-8 | 522 | 4.0625 | 4 | [] | no_license | # Belajar Argument List
#*list_angkat maksud dari * <<< adalah bisa menambahkan angka lebih dari 1
# def jumlahkan( x , *list_angka): <<<< jika ingin menambahkan parameter lain maka * hrs di tambahkan di yang paling belakang
# argument list (*) <<< hanya bisa 1 tidak bisa def jumlahkan( x , *list_angka, *list_angka):
def jumlahkan(*list_angka):
total = 0
for angka in list_angka:
total = total + angka
print(f"Total {list_angka} = {total}")
jumlahkan(10, 10, 10, 10, 10, 10) #<<< contoh | true |