blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e463121c5da4de503d90faa72a80afd4510c80e0 | Python | quicksloth/source-code-recommendation-server | /src/Models/DTO/Client/CodeDTO.py | UTF-8 | 700 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | import os
import requests
from flask import json
class CodeDTO(object):
"""
Object to transfer code with score
and all complementary data to client
"""
def __init__(self, code=None, score=None, source_link=None):
self.code = code if code else ''
self.score = score if score else 0.0
self.source_link = source_link if source_link else ''
@staticmethod
def from_crawler_code(crawler_result, crawler_code):
return CodeDTO(source_link=crawler_result.source_link,
code=crawler_code.code,
score=crawler_code.score)
# def serialize:
# TODO serialize this object to string
| true |
0db612336b54f81e31c7eb55ce5a7c704bf6ea60 | Python | juancsosap/pythontraining | /training/c18_pandas/e01-reading-data.py | UTF-8 | 1,165 | 2.75 | 3 | [] | no_license | import pandas as pd
import os
#import xlrd
basedir = __file__[:__file__.rfind('/')+1]
if basedir != '': os.chdir(basedir)
os.chdir('..')
# Reading tabular data from URL (Good Default Formated)
url = 'data/chiporders.data' #'http://bit.ly/chiporders'
data = pd.read_table(url) # Deprecated
print(data.head(), end='\n\n')
# Reading tabular data from URL (Bad Default Formated)
url = 'data/movieusers.data' #'http://bit.ly/movieusers'
data = pd.read_csv(url)
print(data.head(), end='\n\n')
# Reading tabular data from URL setting separator symbol
data = pd.read_csv(url, sep='|')
print(data.head(), end='\n\n')
# Reading tabular data from URL without header
data = pd.read_csv(url, sep='|', header=None)
print(data.head(), end='\n\n')
# Reading tabular data from URL defining header
user_cols = ['user_id', 'age', 'gender', 'occupation', 'zip_code']
data = pd.read_csv(url, sep='|', header=None, names=user_cols)
print(data.head(), end='\n\n')
# Reading CSV file from URL using read_csv
url = 'data/uforeports.csv' #'http://bit.ly/uforeports'
data = pd.read_csv(url)
print(data.head(), end='\n\n')
#wb = xlrd.open_workbook(path)
#data = pd.read_excel(wb)
| true |
8ce8ddee6e06e07438ea71211f1b9b8604b8663e | Python | a2975667/circle_2017 | /week_8-machine_learing/simple_lr_2.py | UTF-8 | 261 | 2.609375 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
X = data.ix[:,:1] #[['TV']]
y = data.ix[:,3:4]
y_list = y['Sales'].values.tolist()
plt.hist(y_list)
plt.show()
| true |
acdaf0e447d23adc30b6763a1287b7e9e87c880d | Python | AnkyBistt/Python | /Python classes/class without function.py | UTF-8 | 524 | 3.640625 | 4 | [] | no_license | class Student:
studentName = ""
studentAddress = ""
def __init__(self, studentName, studentAddress): #its how a constructor is defined in python inside a class
print("Halo YOu are in class")
self.studentName = studentName
self.studentAddress = studentAddress
print(self.studentName, self.studentAddress)
studentName =input("Enter ur name: ")
studentAddress = input("Enter ur address: ")
obj = Student(studentName, studentAddress)
| true |
b36d4fb82b0807dfdb008eb0ee22e67f8efd0887 | Python | maheshbingi/ReadALoud-NoSQL | /mongodb/populate_mongo.py | UTF-8 | 2,662 | 2.84375 | 3 | [] | no_license | import os, sys
import csv, time
from pymongo import MongoClient
from random import randint
RECORD_COUNT = 10
file = "E:/Semester II/CMPE226/Project 2/data/Books.csv"
path = "E:/Semester II/CMPE226/Project 2/data/test"
connection = MongoClient("mongodb://localhost:27017")
genreList = ["Autobiography","Adventure","Classics","Comic","Crime Fiction","Fantasy", "Fiction", "Horror", "History", "Poetry", "Politics", "Travel", "Vampires"]
langList = ["English (US)","French","Spanish","Hindi","Japanese","English (UK)"]
filenames = []
book_record={}
def populateBooksMongoMeta(book_record):
db = connection.readaloud.books_metadata
db.insert(book_record)
def populateBooksMongoContent(book_record):
db = connection.readaloud.books_content
db.insert(book_record)
def readCSV():
failureCounter = 0
counter = 1
failCount = 0
metaWriteTime =0
data = open(file)
input_file = csv.DictReader(data,delimiter=";",quotechar='"')
for row in input_file:
if counter > RECORD_COUNT:
break
#print row
genreIndex = randint(0,12)
langIndex = randint(0,5)
try:
book_record={'_id':counter,'ISBN':row["ISBN"], 'title': row["Book-Title"] , 'author' : row["Book-Author"] , 'year_of_publication': row["Year-Of-Publication"] , 'publisher': row["Publisher"], 'genre':genreList[genreIndex],'language':langList[langIndex]}
startTime = int(round(time.time() * 1000))
populateBooksMongoMeta(book_record)
metaWriteTime += (int(round(time.time() * 1000)) - startTime)
counter +=1
except :
failCount +=1
#print "Row Ignored"
print "Time for metadata :" + str(metaWriteTime)
def populateContent():
failureCounter = 0
counter = 0
failCount = 0
contentWriteTime=0
data = open(file)
input_file = csv.DictReader(data,delimiter=";",quotechar='"')
for row in input_file:
if counter > RECORD_COUNT :
break
filename = path + "/"+ filenames[randint(0,1300)]
try:
input_file = open(filename,'r')
counter +=1
startTime = int(round(time.time() * 1000))
book_record={"ISBN":row["ISBN"],"content":input_file.read()}
populateBooksMongoContent(book_record)
contentWriteTime = contentWriteTime + (int(round(time.time() * 1000)) - startTime)
except:
failCount +=1
print "Time for content :" + str(contentWriteTime)
dirs = os.listdir(path)
for f in dirs:
filenames.append(f)
print "******************Mongo*********************"
print "Total Records :" + str(RECORD_COUNT)
#readCSV()
populateComments()
populateContent()
print "************************************************"
connection.close()
| true |
827ab7deb45a53778bcb9b5b1c87ab4bea4d5399 | Python | laigen-unam/tf-properties-summarizer | /summarizer/transforming.py | UTF-8 | 5,714 | 2.71875 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import re
from optparse import OptionParser
import os
import sys
from time import time
__author__ = 'CMendezC'
#Modified by Blanchet | Regular expression for SSA tag identification
# Objective: Transforming BIOLemmatized files:
# 1) Transformed files
# 2) Text files to extract aspects
# Parameters:
# 1) --inputPath Path to read input files.
# 2) --transformedPath Path to place output files.
# 3) --textPath Path to place output files.
# 4) --crf Let POS tag instead of substituting it by term or freq tag
# Output:
# 1) transformed files
# 2) text files
# Execution:
# python transforming.py --inputPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\aspectClassificationDatasets\term --transformedPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\aspectClassificationDatasets\transformed --textPath C:\Users\cmendezc\Documents\GENOMICAS\AUTOMATIC_SUMMARIZATION_TFS\corpus\aspectClassificationDatasets\textToExtractAspects
###########################################################
# MAIN PROGRAM #
###########################################################
if __name__ == "__main__":
# Parameter definition
parser = OptionParser()
parser.add_option("-i", "--inputPath", dest="inputPath",
help="Path to read input files", metavar="PATH")
parser.add_option("-o", "--transformedPath", dest="transformedPath",
help="Path to place transformed files", metavar="PATH")
parser.add_option("--textPath", dest="textPath",
help="Path to place text files", metavar="PATH")
parser.add_option("--crf", default=False,
action="store_true", dest="crf",
help="Let POS tag instead of substituting it by term or freq tag?")
parser.add_option("--termPath", dest="termPath",
help="Path to read term files", metavar="PATH")
parser.add_option("--termFiles", dest="termFiles",
help="JSON file with terms files and tags", metavar="PATH")
(options, args) = parser.parse_args()
if len(args) > 0:
parser.error("None parameters indicated.")
sys.exit(1)
# Printing parameter values
print('-------------------------------- PARAMETERS --------------------------------')
print("Path to read input files: " + str(options.inputPath))
print("Path to place transformed files: " + str(options.transformedPath))
print("Path to place text files: " + str(options.textPath))
print("Let POS tag instead of substituting it by term or freq tag? " + str(options.crf))
with open(os.path.join(options.termPath, options.termFiles)) as dicts:
tags_d = {}
dicts = dicts.readlines()
tagset = []
for line in dicts:
if re.search("\"(?P<son>\w+)\":\s\"(?P<father>\w+)\"", line):
s = re.search("\"(\w+)\":\s\"(\w+)\"", line).group(1)
tagset.append(s)
filesPreprocessed = 0
t0 = time()
print("Transforming files...")
# Walk directory to read files
for path, dirs, files in os.walk(options.inputPath):
# For each file in dir
for file in files:
print(" Transforming file..." + str(file))
#TrpR NN TrpR NN PennPOS
# , , , , NUPOS}
# tryptophan NN tryptophan NN PennPOS
listLine1 = []
listLine2 = []
text = ''
lemma = ''
pos = ''
textTransformed = ''
textText = ''
with open(os.path.join(path, file), "r", encoding="utf-8", errors="replace") as iFile:
# Create output file to write
with open(os.path.join(options.textPath, file.replace('term.txt', 'txt')), "w", encoding="utf-8") as textFile:
with open(os.path.join(options.transformedPath, file.replace('term.txt', 'tra.txt')), "w", encoding="utf-8") as transformedFile:
for line in iFile:
if line == '\n':
textFile.write(textText + '\n')
transformedFile.write(textTransformed + '\n')
textTransformed = ''
textText = ''
else:
line = line.strip('\n')
listLine1 = line.split('\t')
text = listLine1[0]
# Replacing a strange space character
text = text.replace(' ', '-')
listLine2 = listLine1[2].split(' ')
if len(listLine2) < 3:
continue
lemma = listLine2[0]
# Replacing a strange space character
lemma = lemma.replace(' ', '-')
if listLine2[2] == "TermTag":
pos = listLine2[1]
#print('Line ' + str(line.encode(encoding='UTF-8', errors='replace')))
else:
pos = listLine1[1]
textText = textText + text + ' '
textTransformed = textTransformed + text + '|' + lemma + '|' + pos + ' '
filesPreprocessed += 1
# Imprime archivos procesados
print()
print("Files preprocessed: " + str(filesPreprocessed))
print("In: %fs" % (time() - t0))
| true |
caadf07ae6ec57e2d873cc3aad976ddf3a13c142 | Python | macukadam/TwitterApiWebApp | /Twaster/TweetUtils/tests.py | UTF-8 | 1,082 | 2.671875 | 3 | [] | no_license | from django.test import TestCase
from datetime import datetime
d = datetime.strptime('Thu Apr 23 13:38:19 +0000 2009','%a %b %d %H:%M:%S %z %Y')
print(d.strftime('%Y-%m-%d'))
print(d.strftime('%H:%M:%S'))
# def newlocs():
# global tm
# global flag
# for i in range(tm):
# location_predicter(41,28.97,10)
# location_predicter(41,28.97,10)
# location_predicter(21,28.97,10)
# if tm > 7:
# flag = True
# elif tm < 1:
# flag = False
# if(flag):
# tm += 1
# else:
# tm -=1
# print(tm)
# time.sleep(10)
# time1 = timeit.default_timer()
# time2 = time1 + 90
# while timeit.default_timer() < time2:
# newlocs()
# import datetime
# import math
# lmbda = 0.34
# pf = 0.35
# n0 = 1
# def p_occur(t):
# return 1-pf**(n0*(1-math.exp(-lmbda*(t +1)))/(1-math.exp(-lmbda)))
# print(p_occur(10))
# import threading
# def printit():
# threading.Timer(5.0, printit).start()
# print("Hello, World!")
# printit()
#json basis
#print(json.dumps(status, indent=4, sort_keys=True)) | true |
a62b28925e909240aa99e0f7708dda1c79e54a90 | Python | yjyoo3312/MC_GAN | /Model1/MyTransform.py | UTF-8 | 2,934 | 3.34375 | 3 | [] | no_license | import numpy as np
import torch
import random
from PIL import Image
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size, interpolation=Image.BILINEAR):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
self.interpolation = Image.BILINEAR
def __call__(self, sample):
image, seg = sample['image'], sample['seg']
w, h = image.size
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = image.resize((new_h, new_w), self.interpolation)
seg_img = seg.resize((new_h, new_w), Image.NEAREST)
return {'image': img, 'seg': seg_img}
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, seg = sample['image'], sample['seg']
w, h = image.size
th, tw = self.output_size
if w == tw and h == th:
return {'image': image, 'seg': seg}
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
image = image.crop((x1, y1, x1 + tw, y1 + th))
seg = seg.crop((x1, y1, x1 + tw, y1 + th))
return {'image': image, 'seg': seg}
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __call__(self, sample):
img, seg = sample['image'], sample['seg']
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
seg = seg.transpose(Image.FLIP_LEFT_RIGHT)
return {'image': img, 'seg': seg}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, seg = sample['image'], sample['seg']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image),
'seg': torch.from_numpy(seg)} | true |
c1830ccafef50d637ace2ceb7133172486733dd9 | Python | farseer810/vicky-practice | /000.py | UTF-8 | 548 | 4.375 | 4 | [] | no_license | #-*- coding: utf-8 -*-
"""
给整数a, b,计算两数的和与积
输入:共一行,两个数字以空格隔开
输出:第一行输出a+b的和,第二行输出a*b
输入样例1:
1 2
输出样例1:
3
2
"""
if __name__ == "__main__":
"""
line = input() # 读一行字符串
a, b = line.split(' ') # 以空格分离一行字符串
a, b = int(a), int(b) # 转换成整数类型
"""
a, b = input().split(' ') # 读取一行字符串并以空格分开
a, b = int(a), int(b)
print(a + b)
print(a * b)
| true |
03dd86345be3f770bd9b03a8798209c7ada14ec1 | Python | ThinkRORBOT/pressureUi | /pressure_test.py | UTF-8 | 764 | 2.71875 | 3 | [] | no_license | import unittest
import receive_data
class MyTest(unittest.TestCase):
def test_data_leak(self):
data_1 = [0.1, 0.15, 0.14, 0.16, 0.29, 0.3, 4, 5, 6, 7, 8 , 8, 9.6, 13, 13.2, 13.3, 13.6, 13.2, 13.1, 13.0, 12.9, 12.8, 12.9, 13.0, 12.7, 12.6, 12.4, 12.6, 12.4, 12.3, 12.2, 12, 11.9, 12.1, 11.8, 11.7]
test_1 = receive_data.leak_check(5, 3)
self.assertEqual(test_1.test_list(data_1), True)
def test_data_nonleak(self):
data_2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12.2, 12.4, 12.3, 12.2, 12.3, 12.5, 12.6, 12.5, 12.6, 12.7, 12.8, 12.5, 12.7, 12.8, 13, 13.1, 13.2, 13.3]
test_2 = receive_data.leak_check(5, 3)
self.assertEqual(test_2.test_list(data_2), False)
if __name__ == '__main__':
unittest.main()
| true |
428385368d7082122838d3f03dbf9714ee234f1e | Python | zimonitrome/simple-general-image-classifier-pytorch | /eval.py | UTF-8 | 5,585 | 2.609375 | 3 | [
"MIT"
] | permissive | import types
import argparse
from pathlib import Path
import inspect
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torchvision import models, transforms
from torchvision.datasets import VisionDataset
from PIL import Image
from shutil import copy
def get_first_layer(net):
while True:
if isinstance(net, nn.Conv2d):
return net
try:
net = list(net.children())[0]
except:
raise Exception("Model doesn't start with a convolution. Try another model.")
def get_last_layer(net):
while True:
if isinstance(net, nn.Linear):
return net
try:
net = list(net.children())[-1]
except:
raise Exception("Model doesn't end with a convolution. Try another model.")
def list_items(items):
return '\n - '.join(['', *items])
def save_model(model, path):
path.parent.mkdir(exist_ok=True, parents=True)
torch.save(model.state_dict(), path)
class PathDataset(VisionDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.paths = list(Path(self.root).rglob("*.png"))
def __getitem__(self, index: int):
path = self.paths[index]
sample = self.transforms(Image.open(path))
return sample, str(path)
def __len__(self) -> int:
return len(self.paths)
if __name__ == "__main__":
# Get every function in 'models'
available_models = [m for m in dir(models) if isinstance(getattr(models, m), types.FunctionType)]
available_models_str = list_items(available_models)
# Get every function in 'models'
available_transforms = transforms.transforms.__all__
available_transforms_with_args = [f"{t}{inspect.signature(getattr(transforms, t))}" for t in available_transforms]
available_transforms_with_args_str = list_items(available_transforms_with_args)
class CustomFormatter(argparse.RawTextHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):
pass
parser = argparse.ArgumentParser(formatter_class=CustomFormatter)
parser.add_argument('input_folder', help='Path to folder containing folder of classes.')
parser.add_argument('model_checkpoint', help='Saved .pt file.')
parser.add_argument('prediction_output_folder', help='Path to folder where predictions should be saved. Will be created.')
parser.add_argument('-d', '--device', help='cuda or cpu', default="cuda")
parser.add_argument('-ic', '--in_channels', help='Number of input channels. 3=rgb, 1=mono etc.', default=3)
parser.add_argument('-m', '--model', help=f"Models to choose from. Available models:{available_models_str}", default="resnet18")
parser.add_argument('-w', '--workers', help=f"How many threads to run for training dataloader.", default=0, type=int)
parser.add_argument('-bs', '--batch_size', default=32, type=int)
parser.add_argument('-t', '--transforms', help=("List of transforms (including parameters without spaces) to apply to each image.\n" +
"For example: --transforms \"gaussianBlur(3)\" \"CenterCrop(64)\".\n" +
f"Available:{available_transforms_with_args_str}"), nargs='+', default=None)
args = parser.parse_args()
device = torch.device(args.device)
# Build constant transforms
if args.transforms:
arg_transforms = [eval("transforms."+t) for t in args.transforms]
else:
arg_transforms = []
constant_transforms = transforms.Compose([
*arg_transforms,
transforms.ToTensor(),
transforms.Normalize(.5, .225),
])
print("General transforms:", list_items([str(t) for t in constant_transforms.transforms]))
# Build dataloaders
data_folder = Path(args.input_folder)
# dataset = DatasetFolder(data_folder, transform=constant_transforms, loader=lambda p: Image.open(p), extensions=["png"])
dataset = PathDataset(data_folder, transforms=constant_transforms)
print(f"Images: {len(dataset)}")
dataloader = DataLoader(dataset, args.batch_size, shuffle=False, pin_memory=(args.workers), num_workers=args.workers)
# Open saved checkpoint
saved = torch.load(args.model_checkpoint)
classes = saved["classes"]
# Build model
model = getattr(models, args.model)()
model.to(device)
first_layer = get_first_layer(model)
first_layer.in_channels = args.in_channels
last_layer = get_last_layer(model)
last_layer.out_features = len(classes)
model.load_state_dict(saved["model"])
print(f"Input channels: {first_layer.in_channels}")
print(f"Output neurons: {last_layer.out_features}")
path_predictions = {}
dl_iter = tqdm(dataloader, desc="Predicting")
for inputs, paths in dl_iter:
inputs = inputs.to(device)
# forward + backward + optimize
with torch.no_grad():
with torch.cuda.amp.autocast():
outputs = model(inputs)
_, predictions = torch.max(outputs, 1)
for path, pred in zip(paths, predictions):
path_predictions[path] = pred
output_root = Path(args.prediction_output_folder)
path_preds_iter = tqdm(path_predictions.items(), desc="Copying files")
for str_path, pred in path_preds_iter:
output_path = Path(output_root) / classes[pred] / Path(str_path).name
output_path.parent.mkdir(exist_ok=True, parents=True)
copy(str_path, str(output_path))
| true |
6f185b8b5b9f451b9efbb4cf9fe263f230814b7f | Python | quaxsze/flask-file-system | /tests/test_backend_mixin.py | UTF-8 | 6,696 | 3.140625 | 3 | [
"MIT"
] | permissive | import hashlib
from datetime import datetime
class BackendTestCase:
def b(self, content):
if isinstance(content, str):
content = content.encode('utf-8')
return content
def put_file(self, filename, content):
raise NotImplementedError('You must implement this method')
def get_file(self, filename):
raise NotImplementedError('You must implement this method')
def file_exists(self, filename):
raise NotImplementedError('You must implement this method')
def assert_bin_equal(self, filename, expected):
data = self.get_file(filename)
assert data == self.b(expected)
def assert_text_equal(self, filename, expected):
data = self.get_file(filename)
assert data == expected.encode('utf-8')
def test_exists(self):
self.put_file('file.test', 'test')
assert self.backend.exists('file.test')
assert not self.backend.exists('other.test')
def test_open_read(self, faker):
content = str(faker.sentence())
self.put_file('file.test', content)
with self.backend.open('file.test') as f:
data = f.read()
assert isinstance(data, str)
assert data == content
def test_open_read_binary(self, faker):
content = bytes(faker.binary())
self.put_file('file.test', content)
with self.backend.open('file.test', 'rb') as f:
data = f.read()
assert isinstance(data, bytes)
assert data == content
def test_open_write_new_file(self, faker):
filename = 'test.text'
content = str(faker.sentence())
with self.backend.open(filename, 'w') as f:
f.write(content)
self.assert_text_equal(filename, content)
def test_open_write_new_file_with_prefix(self, faker):
filename = 'some/new/dir/test.text'
content = str(faker.sentence())
with self.backend.open(filename, 'w') as f:
f.write(content)
self.assert_text_equal(filename, content)
def test_open_write_new_binary_file(self, faker):
filename = 'test.bin'
content = bytes(faker.binary())
with self.backend.open(filename, 'wb') as f:
f.write(content)
self.assert_bin_equal(filename, content)
def test_open_write_existing_file(self, faker):
filename = 'test.txt'
content = str(faker.sentence())
self.put_file(filename, str(faker.sentence()))
with self.backend.open(filename, 'w') as f:
f.write(content)
self.assert_text_equal(filename, content)
def test_read(self, faker):
content = str(faker.sentence())
self.put_file('file.test', content)
assert self.backend.read('file.test') == content.encode('utf-8')
def test_write_text(self, faker):
content = str(faker.sentence())
self.backend.write('test.txt', content)
self.assert_text_equal('test.txt', content)
def test_write_binary(self, faker):
content = bytes(faker.binary())
self.backend.write('test.bin', content)
self.assert_bin_equal('test.bin', content)
def test_write_file(self, faker, utils):
content = bytes(faker.binary())
self.backend.write('test.bin', utils.file(content))
self.assert_bin_equal('test.bin', content)
def test_write_with_prefix(self, faker):
content = str(faker.sentence())
self.backend.write('some/path/to/test.txt', content)
self.assert_text_equal('some/path/to/test.txt', content)
def test_delete(self, faker):
content = faker.sentence()
self.put_file('file.test', content)
self.backend.delete('file.test')
assert not self.file_exists('file.test')
def test_delete_directory(self, faker):
content = faker.sentence()
self.put_file('test/file.01', content)
self.put_file('test/file.02', content)
self.backend.delete('test')
assert not self.file_exists('test/file.01')
assert not self.file_exists('test/file.02')
assert not self.file_exists('test')
def test_save_content(self, faker, utils):
content = str(faker.sentence())
storage = utils.filestorage('test.txt', content)
self.backend.save(storage, 'test.txt')
self.assert_text_equal('test.txt', content)
def test_save_from_file(self, faker, utils):
content = bytes(faker.binary())
f = utils.file(content)
self.backend.save(f, 'test.png')
f.seek(0)
self.assert_bin_equal('test.png', content)
def test_save_with_filename(self, faker, utils):
filename = 'somewhere/test.test'
content = str(faker.sentence())
storage = utils.filestorage('test.txt', content)
self.backend.save(storage, filename)
self.assert_text_equal(filename, content)
def test_list_files(self, faker, utils):
files = set(['first.test', 'second.test', 'some/path/to/third.test'])
for f in files:
content = str(faker.sentence())
self.put_file(f, content)
assert set(self.backend.list_files()) == files
def test_metadata(self, app, faker):
content = str(faker.sentence())
hasher = getattr(hashlib, self.hasher)
hashed = hasher(content.encode('utf8')).hexdigest()
self.put_file('file.txt', content)
metadata = self.backend.metadata('file.txt')
assert metadata['checksum'] == '{0}:{1}'.format(self.hasher, hashed)
assert metadata['size'] == len(content)
assert metadata['mime'] == 'text/plain'
assert isinstance(metadata['modified'], datetime)
def test_metadata_unknown_mime(self, app, faker):
content = str(faker.sentence())
self.put_file('file.whatever', content)
metadata = self.backend.metadata('file.whatever')
assert metadata['mime'] in ('application/octet-stream', 'text/plain')
def test_copy(self, faker):
content = faker.sentence()
self.put_file('file.test', content)
target = 'other/path/to/file.test2'
self.backend.copy('file.test', target)
assert self.file_exists('file.test')
assert self.file_exists(target)
self.assert_text_equal(target, content)
def test_move(self, faker):
content = faker.sentence()
self.put_file('file.test', content)
target = 'other/path/to/file.test2'
self.backend.move('file.test', target)
assert not self.file_exists('file.test')
assert self.file_exists(target)
self.assert_text_equal(target, content)
| true |
8b8e69c691fc5ff193c27a674190a406256ddad9 | Python | deepaksabat/PythonPrograms | /even.py | UTF-8 | 113 | 3.515625 | 4 | [] | no_license | n=input("enter a number:")
if n%2==0:
print n,"is a even number"
else:
print n,"is a odd number"
| true |
95605463f21e5d1534d6faf669d9db3c1a5ae0b6 | Python | JT4life/DailyCodingChallenges | /sorted.py | UTF-8 | 203 | 3.65625 | 4 | [] | no_license | def filter_sort(items):
lst = []
for item in items:
if isinstance(item, str):
lst.append(item)
return lst.sort()
items = [1,2,'a','c','a']
print(filter_sort(items)) | true |
179fbc24e7438286adab259903a26dc03b321cb7 | Python | tmlife485/useful_tools | /ParkMyCloudAPIExamples/PMC-override_list_of_instances.py | UTF-8 | 2,549 | 2.90625 | 3 | [
"MIT"
] | permissive | import os
import requests
# Get the PMC API key from the OS environment variable
pmc_username = os.environ.get('PMC_USERNAME')
pmc_password = os.environ.get('PMC_PASSWORD')
pmc_api = os.environ.get('PMC_API_TOKEN')
base_url = "https://console.parkmycloud.com"
# Define the instances you want to override
override_these_instance_names = [
"instance1",
"instance2",
"instance3"
]
# Authenticate to ParkMyCloud's API
def get_pmc_api_auth():
url = base_url + '/auth'
payload = {
"username": pmc_username,
"password": pmc_password,
"app_id": pmc_api
}
headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
auth_response = requests.post(url=url, json=payload, headers=headers)
return auth_response.json()['token']
# Get a list of resources (instances and databases) from ParkMyCloud
def get_pmc_resources(auth_token):
url = base_url + '/resources-simple'
headers = {
"Accept": "application/json",
"X-Auth-Token": auth_token
}
resources_json = requests.get(url=url, headers=headers).json()
return resources_json
# Snooze a schedule, which temporarily overrides it
def pmc_snooze_schedule(auth_token, item_ids, hours):
url = base_url + '/resources/snooze'
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"X-Auth-Token": auth_token
}
body = {
"item_ids": item_ids,
"snooze_period": hours,
"timezone": "America/New_York"
}
snooze_response = requests.put(url=url, headers=headers, json=body)
snooze_json = snooze_response.json()
return snooze_json
if __name__ == "__main__":
# 1. Login to the API to get an auth token (POST to /auth/login)
auth_token = get_pmc_api_auth()
# 2. Get a list of all resources in your account (GET to /resources-simple)
resources_json = get_pmc_resources(auth_token)
# 3. Find the instances you need and get their corresponding PMC IDs
item_ids = []
for item in resources_json['items']:
if item['name'] in override_these_instance_names:
print "Adding item to override list: "+str(item['name'])
item_ids.append(int(item['id']))
# 4. Use that list of instance IDs to snooze the schedules (PUT to /resources/snooze)
hours = 2
snooze_response = pmc_snooze_schedule(auth_token, item_ids, hours)
if snooze_response.has_key('snooze_until'):
print "Item schedules will override until "+str(snooze_response['snooze_until'])
else:
print "No items found for override"
| true |
b8e0333788aa3365bd6cb1b6240665aa80611795 | Python | whanke/MSc | /Demos/genim_word2vec.py | UTF-8 | 2,089 | 2.84375 | 3 | [] | no_license | """
https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Corpora_and_Vector_Spaces.ipynb
"""
import logging
# loggin.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import os
import tempfile
TEMP_FOLDER = tempfile.gettempdir()
print('Folder "{}" will be used to save temporary dictionary and corpus.'.format(TEMP_FOLDER))
from gensim import corpora
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
# remove common words and tokenize
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts]
from pprint import pprint # pretty-printer
pprint(texts)
dictionary = corpora.Dictionary(texts)
dictionary.save(os.path.join(TEMP_FOLDER, 'deerwester.dict')) # store the dictionary, for future reference
print(dictionary)
print(dictionary.token2id)
new_doc = "Human computer interaction"
new_vec = dictionary.doc2bow(new_doc.lower().split())
print(new_vec) # the word "interaction" does not appear in the dictionary and is ignored
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize(os.path.join(TEMP_FOLDER, 'deerwester.mm'), corpus) # store to disk, for later use
for c in corpus:
print(c)
| true |
e26068b03e14e29e6b6fd317058aac0cfbec3acb | Python | AbimaelSB/ZerinhoOuUmSocket | /ServidorUDP.py | UTF-8 | 3,220 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 5 17:05:30 2018
@author: abimaelsb
"""
import socket
lista = []
players = []
zero = []
um = []
venc = "empate"
palp = "empate"
HOST = 'localhost'
PORT = 15000
n = 0
j = 0
aux = 0
S_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
end = (HOST, PORT)
S_udp.bind(end)
print "Rodando Servidor! \n"
while True:
msg, cliente = S_udp.recvfrom(1024)
if cliente not in lista:
lista.append(cliente)
aux += 1
if msg == "Conectando":
S_udp.sendto(str(aux), cliente)
elif msg != "Conectando":
if int(msg) > 2:
j = int(msg)
break
while True:
if j > 2:
if len(lista) >= j:
for i in range(len(lista)):
if i < j:
S_udp.sendto("ok", lista[i])
else:
S_udp.sendto("erro", lista[i])
msg, cliente = S_udp.recvfrom(1024)
if len(lista) < j:
if cliente not in lista:
lista.append(cliente)
if msg == "Conectando":
S_udp.sendto(str(aux), cliente)
elif msg == "0" or msg == "1":
n += 1
players.append(cliente)
players.append(msg)
if n < j:
S_udp.sendto("Aguardando Demais Jogadores", cliente)
else:
if n == j:
for i in range(0, len(players), +2):
if int(players[i+1]) == 0:
zero.append(players[i+1])
elif int(players[i+1]) == 1:
um.append(players[i+1])
if len(zero) == 1:
for i in range(0, len(players), +2):
if players[i+1] == zero[0]:
venc = players[i]
palp = 0
S_udp.sendto("Você venceu", venc)
else:
S_udp.sendto("Você perdeu", players[i])
elif len(um) == 1:
for i in range(0, len(players), +2):
if players[i+1] == um[0]:
venc = players[i]
palp = 1
S_udp.sendto("Você venceu", venc)
else:
S_udp.sendto("Você perdeu", players[i])
else:
for i in range(0, len(players), +2):
S_udp.sendto(venc, players[i])
print "------------- Tabela de Palpites ----------------"
for i in range(0, len(players), +2):
print "Jogador: ", players[i], "Palpite: ", players[i+1]
print "Vencedor: ", venc, "Palpite: ", palp
print
break
else:
for i in range(0, len(players),+2):
S_udp.sendto("Aguardando Mais Jogadores", players[i])
S_udp.close()
| true |
b4aad71d0c53fc0e8feaec556b0bf46546348a93 | Python | mulberry11/python | /PythonSpider/spider/bs4WangYiYun.py | UTF-8 | 1,388 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 12:53:20 2018
@author: Administrator
"""
# 爬取网易云音乐的爬虫
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import urllib.request
import urllib
#获取网页
def gethtml(url, headers={}):
req = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(req)
content = response.read().decode('utf-8')
response.close()
return content
#解析音乐列表网页
def parsehtmlMusicList(html):
soup = BeautifulSoup(html, 'lxml')
list_pic = soup.select('ul#m-pl-container li div img')
list_nameUrl = soup.select('ul#m-pl-container li div a.msk')
list_num = soup.select('div.bottom span.nb')
list_author = soup.select('ul#m-pl-container li p a')
n = 0
length = len(list_pic)
while n < length:
print('歌单图片:'+list_pic[n]['src']+'\n\n')
print('歌单名称:'+list_nameUrl[n]['title']+'\n\n歌单地址:'+list_nameUrl[n]['href']+'\n\n')
print('歌单播放量:'+list_num[n].text+'\n\n')
print('歌单作者:'+list_author[n]['title']+'\n\n作者主页:'+list_author[n]['href']+'\n\n\n')
n += 1
url = 'http://music.163.com/discover/playlist'
url = gethtml(url, headers={
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Host': 'music.163.com'
})
parsehtmlMusicList(url) | true |
bc8c15ab8d4d220b60d9e423d24bdf453b0ebbf7 | Python | dwkang707/BOJ | /python3/(2953)BOJ.py | UTF-8 | 290 | 2.9375 | 3 | [] | no_license | # https://www.acmicpc.net/problem/2953
max = 0
player = 0
scores = []
for i in range(5):
total = 0
scores.append(list(map(int, input().split())))
for j in range(4):
total += scores[i][j]
if max < total:
max = total
player = i + 1
print(player, max)
| true |
266b09ebfeed2997c2e19998e7169205708396b4 | Python | SSL-Roots/CON-SAI | /decision_making/scripts/plays/play_book.py | UTF-8 | 1,628 | 2.53125 | 3 | [
"MIT"
] | permissive |
from play_halt import PlayHalt
from play_outside import PlayOutside
from play_stop import PlayStop
from play_our_pre_kickoff import PlayOurPreKickoff
from play_our_kickoff_start import PlayOurKickoffStart
from play_our_pre_penalty import PlayOurPrePenalty
from play_our_penalty_start import PlayOurPenaltyStart
from play_force_start import PlayForceStart
from play_inplay import PlayInPlay
from play_indirect import PlayIndirect
from play_direct import PlayDirect
from play_their_pre_kickoff import PlayTheirPreKickoff
from play_their_kickoff_start import PlayTheirKickoffStart
from play_their_indirect import PlayTheirIndirect
from play_their_direct import PlayTheirDirect
from play_their_pre_penalty import PlayTheirPrePenalty
from play_their_penalty_start import PlayTheirPenaltyStart
from play_inplay_our_defence import PlayInPlayOurDefence
from play_inplay_their_defence import PlayInPlayTheirDefence
class PlayBook(object):
book = []
book.append(PlayHalt())
book.append(PlayOutside())
book.append(PlayStop())
book.append(PlayOurPreKickoff())
book.append(PlayOurKickoffStart())
book.append(PlayOurPrePenalty())
book.append(PlayOurPenaltyStart())
book.append(PlayForceStart())
book.append(PlayInPlay())
book.append(PlayIndirect())
book.append(PlayDirect())
book.append(PlayTheirPreKickoff())
book.append(PlayTheirKickoffStart())
book.append(PlayTheirIndirect())
book.append(PlayTheirDirect())
book.append(PlayTheirPrePenalty())
book.append(PlayTheirPenaltyStart())
book.append(PlayInPlayOurDefence())
book.append(PlayInPlayTheirDefence())
| true |
82300e4302cc6bc1ae405ee903bf079266cab04a | Python | ashish3x3/competitive-programming-python | /Hackerrank/Maths/sum_of_nC0_to_nCN.py | UTF-8 | 310 | 3.140625 | 3 | [] | no_license | # https://www.hackerrank.com/challenges/diwali-lights
'''
For n >= 1, derive the identity
nC0 + nC1 + nC2 + ... + nCn = 2^n
[Hint: Let a = b = 1 in the binomial theorem]
nCn = 1 and nC0 = 1.
nCr = nC(n - r)
'''
T = int(raw_input())
for _ in xrange(T):
N = int(raw_input())
print (2**N -1)%100000
| true |
1a1e27c03bd266b576df56789270186e66ac1205 | Python | yafeile/Simple_Study | /Simple_Python/standard/fnmatch/fnmatch_3.py | UTF-8 | 219 | 2.578125 | 3 | [] | no_license | import fnmatch
import os
import pprint
pattern="fnmatch_*.py"
files=os.listdir(".")
print
print "Files:"
pprint.pprint(files)
print "-"*20
print "Matches:"
pprint.pprint(fnmatch.filter(files,pattern))
| true |
d8480575855b89a2740da6ed5ec52a784848b168 | Python | Minecraftschurli/myWebsite | /libs/face_detection.py | UTF-8 | 2,013 | 2.796875 | 3 | [] | no_license | import platform
from cv2 import cv2
COLOR = {'WHITE': [255, 255, 255], 'BLUE': [255, 0, 0], 'GREEN': [0, 255, 0], 'RED': [0, 0, 255], 'BLACK': [0, 0, 0]}
if platform.system() == 'Linux':
directory = "/home/pi/webapp/libs"
else:
directory = "C:/Users/georg/PycharmProjects/website/libs"
modelFile = directory + "/models/opencv_face_detector_uint8.pb"
configFile = directory + "/models/opencv_face_detector.pbtxt"
nn = cv2.dnn.readNetFromTensorflow(modelFile, configFile)
conf_threshold = 0.7
def draw_box(image, x, y, w, h, color=None):
if color is None:
color = COLOR['WHITE']
cv2.line(image, (x, y), (x + int(w / 5), y), color, 2)
cv2.line(image, (x + int((w / 5) * 4), y), (x + w, y), color, 2)
cv2.line(image, (x, y), (x, y + int(h / 5)), color, 2)
cv2.line(image, (x + w, y), (x + w, y + int(h / 5)), color, 2)
cv2.line(image, (x, (y + int(h / 5 * 4))), (x, y + h), color, 2)
cv2.line(image, (x, (y + h)), (x + int(w / 5), y + h), color, 2)
cv2.line(image, (x + int((w / 5) * 4), y + h), (x + w, y + h), color, 2)
cv2.line(image, (x + w, (y + int(h / 5 * 4))), (x + w, y + h), color, 2)
def detect_face_open_cv_dnn(frame):
global nn
frame_opencv_dnn = frame.copy()
frame_height = frame_opencv_dnn.shape[0]
frame_width = frame_opencv_dnn.shape[1]
blob = cv2.dnn.blobFromImage(frame_opencv_dnn, 1.0, None, [104, 117, 123], False, False)
nn.setInput(blob)
detections = nn.forward()
bboxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * frame_width)
y1 = int(detections[0, 0, i, 4] * frame_height)
x2 = int(detections[0, 0, i, 5] * frame_width)
y2 = int(detections[0, 0, i, 6] * frame_height)
bboxes.append([x1, y1, x2, y2])
draw_box(frame_opencv_dnn, x1, y1, x2 - x1, y2 - y1, COLOR['GREEN'])
return frame_opencv_dnn, bboxes
| true |
bfd61ea6cd9db1cb8f986623c4fb79b8380fa1db | Python | uosmandy/CP3_Krit-Nawaritloha | /Exercise4_Krit_N.py | UTF-8 | 285 | 2.96875 | 3 | [] | no_license | FE = 60.5
GB = 80.4
IC = 25.0
CP = 60.2
print("---------------")
print("Score System")
print("---------------")
print("--Your Score--")
print("Foudation English :", FE)
print("General Business :", GB)
print("Introduction to Computer Systems :", IC)
print("Computer Programming :", CP) | true |
c441386d3c69dad754b3e719c10ab536f9e5a8e4 | Python | ukonline/CodeExamples | /python/PythonOptimisation/chapter2/itertools-module.py | UTF-8 | 520 | 3.4375 | 3 | [] | no_license | # Computing a cartesian product itertools.product
# Auteur : Sébastien Combéfis
# Version : October 11, 2020
from itertools import product
import timeit
REPEATS = 100
def pairs_1(a, b):
return [(i, j) for i in a for j in b]
def pairs_2(a, b):
return list(product(a, b))
def measure_time(name, params):
t = timeit.Timer(name + params, f'from __main__ import {name}')
print(t.timeit(REPEATS) / REPEATS * 1000)
params = '(range(1000), range(1000))'
measure_time('pairs_1', params)
measure_time('pairs_2', params)
| true |
d7fe3b8968cf3e02733747eebd4c6abfe741478c | Python | Kcpf/DesignSoftware | /Bairro_mais_custoso.py | UTF-8 | 1,439 | 3.90625 | 4 | [] | no_license | """
Sua empresa possui filiais em diversas regiões da cidade e você precisa fazer uma análise simples dos gastos com infraestrutura em cada bairro. Os gastos com infraestrutura nos últimos 12 meses para cada bairro estão disponíveis em um dicionário como o apresentado a seguir (atenção, este é somente um exemplo):
{
'Bairro 1': [1234.45, 5123.32, 6134.35, 8567.98, 5472.28, 9715.38, 1380.15, 2569.42, 8459.24, 8351.25, 4082.19, 1750.16],
'Bairro 2': [236.62, 845.52, 475.72, 846.22, 735.34, 846.26, 48.97, 624.37, 375.46, 4568.76, 73.32, 475.74],
'Bairro 3': [51234.45, 5123.32, 61334.35, 8567.98, 5472.28, 9715.38, 1380.15, 2569.42, 8459.24, 82351.25, 4082.19, 1750.16],
}
Nesse dicionário, as chaves são os nomes dos bairros e os valores são listas com exatamente 12 números, representando o gasto com infraestrutura em cada mês para o respectivo bairro.
Faça uma função que recebe um dicionário de gastos com infraestrutura e devolve o nome do bairro com maior gasto com infraestrutura nos últimos 6 meses. Utilize a função desenvolvida no Exercício 166.
O nome da sua função deve ser bairro_mais_custoso.
"""
def total_do_semestre_por_bairro(dic):
d = {}
for each in dic:
d[each] = sum(dic[each][-6:])
return d
def bairro_mais_custoso(dic):
dic = total_do_semestre_por_bairro(dic)
return list({k: v for k, v in sorted(dic.items(), key=lambda item: item[1])})[-1] | true |
c623d6a077576b1b3c008538535265086c90e6b0 | Python | webdev3211/News-Reader-App | /main.py | UTF-8 | 3,341 | 2.84375 | 3 | [
"MIT"
] | permissive |
from bs4 import BeautifulSoup
import requests
import nltk
from nltk import corpus
import re
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import os
import cv2
def exact_url(url):
index = url.find(".html")
index = index + 5
current_url = ""
current_url = url[:index]
return current_url
url = "https://www.nytimes.com/2019/02/03/world/middleeast/pope-francis-uae-mideast-muslims.html"
news_url = exact_url(url)
source = requests.get(news_url).text
soup = BeautifulSoup(source, 'lxml')
# print(soup.prettify().encode('utf-8'))
str = ""
for para in soup.find_all('p', attrs = {'class':'css-1ygdjhk evys1bk0'}):
str = str + para.text
# print(str)
article_text = re.sub('[^A-Za-z0-9.]+', ' ', str)
# newstr = re.sub(r'\s+', ' ', str)
# print(article_text)
print()
sentence_list = nltk.sent_tokenize(article_text)
stopwords = nltk.corpus.stopwords.words('english')
word_frequencies = {}
for word in nltk.word_tokenize(article_text):
if word not in stopwords:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
print('executed1')
maximum_frequncy = max(word_frequencies.values())
for word in word_frequencies.keys():
word_frequencies[word] = (word_frequencies[word]/maximum_frequncy)
print('executed2')
sentence_scores = {}
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_frequencies.keys():
if len(sent.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_frequencies[word]
else:
sentence_scores[sent] += word_frequencies[word]
print('executed3')
print()
print()
import heapq
from google_images_download import google_images_download
response = google_images_download.googleimagesdownload()
summary_sentences = heapq.nlargest(10, sentence_scores, key=sentence_scores.get)
print(summary_sentences)
def downloadimages(sent):
arguments = {"keywords": sent ,"limit":4,"print_urls":True}
response.download(arguments)
# summary = ' '.join(summary_sentences)
# print(summary)
for sent in summary_sentences:
print(sent)
try:
downloadimages(sent)
except:
print("some error")
print()
subfolders = [f.path for f in os.scandir("downloads") if f.is_dir() ]
def exact_name(name):
return name[10:]
for i in subfolders:
caption = exact_name(i)
for f in os.listdir(i):
img = Image.open(os.path.join(i, f))
basewidth = 1000
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
if not img.mode == 'RGB':
img = img.convert('RGB')
try:
draw = ImageDraw.Draw(img)
# font = ImageFont.truetype(<font-file>, <font-size>)
font = ImageFont.truetype("arial.ttf", 15)
# draw.text((x, y),"Sample Text",(r,g,b))
draw.text((50,50), caption ,(255,0,0),font=font, align="center")
except:
pass
img.save("newfolder/{}".format(f))
# img.show()
print('done')
| true |
53832364ac44487765e854fe54cbbc4e0130c6bf | Python | Nittilina/uu-aspp2020-python-project | /render.py | UTF-8 | 1,470 | 3.078125 | 3 | [] | no_license | from models import ExcitedState, Transition
from terminaltables import SingleTable
from typing import List
def render_excited_states(states):
"""
Accepts a list of excited states with associated data and prints it in the terminal as a table.
"""
#titles = ["State", "E (eV)", "f", "Sym", "Orbitals", " Coeffs", "Transition\ntype", "Total\nconfiguration", "Comments "]
titles = ["State", "E (eV)", "f", "Sym", "Orbitals", " Coeffs"]
rows = [ es_to_row(state) for state in states ]
table_data = [titles] + rows
table = SingleTable(table_data)
table.inner_row_border = True
table.inner_column_border = False
#table.justify_columns[6] = 'center'
#table.justify_columns[7] = 'center'
print(table.table)
#Constructs a table row from a single ExcitedState object.
def es_to_row(state: ExcitedState):
y = []
y.append(f'{state.mult}{state.index}')
y.append(state.energy)
y.append("%.4f" % state.osc_str)
y.append(state.sym)
orbitals, coeffs = render_transitions(state.transits)
y.append(orbitals)
y.append(coeffs)
return y
#Constructs the orbital and coefficient cells from the given Transition objects.
def render_transitions(transitions: List[Transition]):
orbitals = "\n".join([ f'{x.from_orb} -> {x.to_orb}' for x in transitions ])
coeffs = "\n".join([ f'{" " if x.coeff > 0 else ""}{"%.5f" % x.coeff}' for x in transitions ])
return orbitals, coeffs | true |
6ba880850d6944e2504cc65b4405119f6d009218 | Python | elcomcot/CRUD-Application | /Assignment4/Readit.py | UTF-8 | 7,893 | 3.125 | 3 | [] | no_license | '''
Filename: Assignment 4.
Author: Tejveer Singh
Course Name: Programming Language Research Project
Course Number: CST8333
Lab Sec #: 351
Exercise Number: 3
Professors Name: Stanley Pieda.
'''
import dataBase
import threading
from tkinter import *
import tkinter.messagebox
d = dataBase.dataBaseClass
count = 1
try:
#getting and array from database
ar = d.selctRowFromMysql(count)[0]
except:
print("Load Data First")
def buttonClick1():
print("I am delete button")
print(ar[0])
d.deleteRowInDb(ar[0])
def updateClick1():
global ar,count
d.updateRowInDb(ar[0],entry1.get(),entry2.get(),entry3.get(),entry4.get(),entry5.get(),entry6.get(),entry7.get(),entry8.get(),entry9.get(),entry10.get(),entry11.get(),entry12.get(),entry13.get(),entry14.get(),entry15.get(),entry16.get())
ar = d.selctRowFromMysql(count)[0]
insertionInEntryNull()
insertionInEntry()
print("Updated")
def searchIt():
global ar, count
value = int(entrySpecific.get())
print(value)
try:
ar = dataBase.dataBaseClass.selctRowFromMysql(value)[0]
count = value
insertionInEntryNull()
insertionInEntry()
except:
print("Enter an integer please")
def buttonClick2():
print("I am button3 button")
def buttonClick3():
# global ar
# dataBase.dataBaseClass.createTableWithDataInDataBase()# class method
# ar = dataBase.dataBaseClass.selctRowFromMysql(2)[0]
# insertionInEntryNull()
# insertionInEntry()
entrySpecific.delete(0, 'end')
entrySpecific.insert(0, "Please Wait Loading...")
try:
t = threading.Thread(target=rundb)
t.start()
except:
print("thread not running")
MessageBoxForInfo()
def rundb():
global ar,count
dataBase.dataBaseClass.createTableWithDataInDataBase() # class method
ar = dataBase.dataBaseClass.selctRowFromMysql(count)[0]
insertionInEntryNull()
insertionInEntry()
def blankIt():
insertionInEntryNull()
def previous():
print("I am previous button")
global count, ar
if count <= 1:
count = 2
count = count - 1
try:
ar = dataBase.dataBaseClass.selctRowFromMysql(count)[0]
except:
insertionInEntryNull()
print("entry does not exists")
entrySpecific.delete(0, 'end')
entrySpecific.insert(0, count)
insertionInEntryNull()
insertionInEntry()
def Next():
print("I am next button")
global count, ar
count = count + 1
try:
ar = dataBase.dataBaseClass.selctRowFromMysql(count)[0]
except:
insertionInEntryNull()
print("entry does not exists")
entrySpecific.delete(0,'end')
entrySpecific.insert(0, count)
insertionInEntryNull()
insertionInEntry()
def InsertIt():
d.insertTheNewData( entry1.get(), entry2.get(), entry3.get(), entry4.get(), entry5.get(), entry6.get(),
entry7.get(), entry8.get(), entry9.get(), entry10.get(), entry11.get(), entry12.get(),
entry13.get(), entry14.get(), entry15.get(), entry16.get())
print("Inserted")
def MessageBoxForInfo():
tkinter.messagebox.showinfo("Important Information", "Please Wait 30 sec so that data can load into database")
root = Tk()# main window
topFrame = Frame(root) #upper frame
topFrame.pack()
middleFrame = Frame(root)
middleFrame.pack()
middleFrame2 = Frame(root)
middleFrame2.pack()
bottomFrame = Frame(root)# lower Frame
bottomFrame.pack(side=BOTTOM)
#button Declaration
button1 =Button(bottomFrame ,text="Update", command=updateClick1)
button2 =Button(bottomFrame, text="Delete", command=buttonClick1)
button3 =Button(bottomFrame, text="Quit",command=quit)
button4 =Button(bottomFrame, text="Start Application",command=buttonClick3,bg="red")#on click will load data
button1.pack(side=LEFT)
button2.pack(side=LEFT)
button3.pack(side=LEFT)
button4.pack(side=LEFT)
#button for middle
buttonNext = Button(middleFrame, text="Next", command=Next)
entrySpecific = Entry(middleFrame)
buttonPrevios = Button(middleFrame, text="Previous", command=previous)
buttonNext.pack(side=RIGHT)
entrySpecific.pack(side=RIGHT)
buttonPrevios.pack(side=RIGHT)
#button for middle2
buttonInsert = Button(middleFrame2, text="Insert", command=InsertIt)
buttonInsert.pack(side=RIGHT)
buttonSearch = Button(middleFrame2, text="Search", command=searchIt)
buttonSearch.pack(side=RIGHT)
buttonBlankIt = Button(middleFrame2, text="BlankIt", command=blankIt)
buttonBlankIt.pack(side=RIGHT)
#label declaration
label1 = Label(topFrame, text="REF_DATE")
label1.grid(row = 0, column = 0)
label2 = Label(topFrame, text="GEO")
label2.grid(row=1, column=0)
label3 = Label(topFrame, text="DGUID")
label3.grid(row=2, column=0)
label4 = Label(topFrame, text="Food categories")
label4.grid(row=3, column=0)
label5 = Label(topFrame, text="Commodity")
label5.grid(row=4, column=0)
label6 = Label(topFrame, text="UOM")
label6.grid(row=5, column=0)
label7 = Label(topFrame, text="UOM_ID")
label7.grid(row=6, column=0)
label8 = Label(topFrame, text="SCALAR_FACTOR")
label8.grid(row=7, column=0)
label81 = Label(topFrame, text="SCALAR_ID")
label81.grid(row=8, column=0)
label9 = Label(topFrame, text="VECTOR")
label9.grid(row=9, column=0)
label10 = Label(topFrame, text="COORDINATE")
label10.grid(row=10, column=0)
label11= Label(topFrame, text="VALUE")
label11.grid(row=11, column=0)
label12 = Label(topFrame, text="STATUS")
label12.grid(row=12, column=0)
label13 = Label(topFrame, text="SYMBOL")
label13.grid(row=13, column=0)
label14= Label(topFrame, text="TERMINATED")
label14.grid(row=14, column=0)
label15= Label(topFrame, text="DECIMALS")
label15.grid(row=15, column=0)
#entry Declaration
entry1 = Entry(topFrame)
entry1.grid(row=0, column=1)
entry2 = Entry(topFrame)
entry2.grid(row=1, column=1)
entry3 = Entry(topFrame)
entry3.grid(row=2, column=1)
entry4 = Entry(topFrame)
entry4.grid(row=3, column=1)
entry5 = Entry(topFrame)
entry5.grid(row=4, column=1)
entry6 = Entry(topFrame)
entry6.grid(row=5, column=1)
entry7 = Entry(topFrame)
entry7.grid(row=6, column=1)
entry8 = Entry(topFrame)
entry8.grid(row=7, column=1)
entry9 = Entry(topFrame)
entry9.grid(row=8, column=1)
entry10 = Entry(topFrame)
entry10.grid(row=9, column=1)
entry11 = Entry(topFrame)
entry11.grid(row=10, column=1)
entry12 = Entry(topFrame)
entry12.grid(row=11, column=1)
entry13 = Entry(topFrame)
entry13.grid(row=12, column=1)
entry14 = Entry(topFrame)
entry14.grid(row=13, column=1)
entry15 = Entry(topFrame)
entry15.grid(row=14, column=1)
entry16 = Entry(topFrame)
entry16.grid(row=15, column=1)
def insertionInEntry():
entry1.insert(0, ar[1])
entry2.insert(0, ar[2])
entry3.insert(0, ar[3])
entry4.insert(0, ar[4])
entry5.insert(0, ar[5])
entry6.insert(0, ar[6])
entry7.insert(0, ar[7])
entry8.insert(0, ar[8])
entry9.insert(0, ar[9])
entry10.insert(0, ar[10])
entry11.insert(0, ar[11])
entry12.insert(0, ar[12])
entry13.insert(0, ar[13])
entry14.insert(0, ar[14])
entry15.insert(0, ar[15])
entry16.insert(0, ar[16])
def insertionInEntryNull():
entry1.delete(0, 'end')
entry2.delete(0, 'end')
entry3.delete(0, 'end')
entry4.delete(0, 'end')
entry5.delete(0, 'end')
entry6.delete(0, 'end')
entry7.delete(0, 'end')
entry8.delete(0, 'end')
entry9.delete(0, 'end')
entry10.delete(0, 'end')
entry11.delete(0, 'end')
entry12.delete(0, 'end')
entry13.delete(0, 'end')
entry14.delete(0, 'end')
entry15.delete(0, 'end')
entry16.delete(0, 'end')
entrySpecific.delete(0,'end')
entrySpecific.insert(0, count)
tkinter.messagebox.showinfo("Important Information", "If you are running the app for the first time make sure that the credential for database in data.py class are correct then start the application with start application button")
try:
insertionInEntry()
except:
print("Load data first")
root.mainloop()
| true |
fd77fc1057a189e6409cfacc7eb3099fc4d87c0c | Python | fvesp18/AirBnB_clone | /console.py | UTF-8 | 7,563 | 2.84375 | 3 | [] | no_license | #!/usr/bin/python3
# Displays prompt to take in user input
import cmd
import sys
from models.base_model import BaseModel
from models.__init__ import storage
from models.user import User
from models.place import Place
from models.city import City
from models.review import Review
from models.state import State
from models.amenity import Amenity
class HBNBCommand(cmd.Cmd):
# Creates prompt as (hbnb)
intro = ''
prompt = '(hbnb) '
file = None
classes = {
'BaseModel': BaseModel,
'User': User,
'Place': Place,
'City': City,
'Review': Review,
'State': State,
'Amenity': Amenity,
}
# Define method of objects
def do_quit(self, arg):
'Quit command to exit the program'
print('')
return True
def do_EOF(self, arg):
'Exits shell upon End of File'
print('')
return True
def do_create(self, arg):
'Creates a new instance of BaseModel, saves it and prints the id'
if arg is '':
print("** class name missing **")
elif arg not in HBNBCommand.classes:
print("** class doesn't exist **")
else:
new = HBNBCommand.classes[arg]()
storage.save()
print("{}".format(new.id))
def do_show(self, arg):
'Prints the string rep of an instance based on class name and id'
args = arg.split(" ")
obj_dict = storage.all()
if arg is '':
print("** class name missing **")
elif args[0] not in HBNBCommand.classes:
print("** class doesn't exist **")
elif len(args) < 2:
print("** instance id missing **")
else:
key = args[0] + '.' + args[1]
try:
print(obj_dict[key])
except:
print("** no instance found **")
def do_destroy(self, arg):
'Delete an object based on class name and id'
args = arg.split(" ")
if arg is '':
print("** class name missing **")
elif args[0] not in HBNBCommand.classes:
print("** class doesn't exist **")
elif len(args) < 2:
print("** instance id missing **")
else:
key = args[0] + '.' + args[1]
obj_dict = storage.all()
try:
del(obj_dict[key])
storage.save()
except:
print("** no instance found **")
def do_all(self, arg):
'Prints all string reps of all instaces, with or without class'
obj_dict = storage.all()
if not arg:
for key, value in obj_dict.items():
print("{}".format(obj_dict[key]))
else:
for key, value in obj_dict.items():
skey = key.split(".")
if skey[0] == arg:
print("{}".format(obj_dict[key]))
def do_update(self, arg):
obj_dict = storage.all()
args = arg.split(" ")
if arg is '':
print("** class name missing **")
elif args[0] not in HBNBCommand.classes:
print("** class doesn't exist **")
elif len(args) < 2:
print("** instance id missing **")
else:
for key, value in obj_dict.items():
skey = key.split(".")
if skey[0] != args[0]:
print("** no instance found **")
else:
if len(args) < 3:
print("** attribute name missing **")
elif len(args) < 4:
print("** value missing **")
else:
for key, value in obj_dict.items():
skey = key.split(".")
if skey[1] == args[1]:
val = args[3]
updater = {args[2]: val.replace('"', '')}
(obj_dict[key].__dict__).update(updater)
storage.save()
def do_count(self, arg):
obj_dict = storage.all()
count = 0
args = arg.split(" ")
_class = args[0]
if arg:
for key, val in obj_dict.items():
skey = key.split(".")
if _class == skey[0]:
count += 1
print(count)
def default(self, line):
if "." not in line:
return cmd.Cmd.default(self, line)
syntax = line.split(".")
_class = syntax[0]
method = syntax[1]
obj_dict = storage.all()
if _class in HBNBCommand.classes:
if method[0:5] == 'all()':
HBNBCommand.do_all(self, _class)
if method[0:8] == 'count()':
HBNBCommand.do_count(self, _class)
arg_split = method.split('"')
method_id = arg_split[0]
if method_id[0:5] == 'show(':
class_id = arg_split[1]
arg = _class + " " + class_id
HBNBCommand.do_show(self, arg)
if method_id[0:8] == 'destroy(':
class_id = arg_split[1]
arg = _class + " " + class_id
HBNBCommand.do_destroy(self, arg)
if method_id[0:7] == 'update(':
arg_split2 = method.split(",")
class_id = arg_split2[0].split("(")[1].replace('"', "")
print(class_id)
att_name = arg_split2[1].replace('"', "")
print(att_name)
att_val = arg_split2[2].replace(")", "")
print(att_val)
arg = _class + " " + class_id + " " + att_name[1:] + att_val
print(arg)
HBNBCommand.do_update(self, arg)
def emptyline(self):
'Empties last command'
pass
# Ovewrites help message
def help_help(self):
'Help message for help'
print("Prints messages with information of command")
def help_create(self):
'Creates instance of object'
print("For a new instance of an obj saves it and prints id")
def help_quit(self):
'Help message for quit'
print('Exits the shell')
def help_EOF(self):
'Help message for EOF'
print('Upon end of file, exits shell')
def help_create(self):
'Help message for create'
print('Creates a new instance of BaseModel,\
saves it (to the JSON file) and\
prints the id. Ex: $ create BaseModel')
def help_show(self):
'Help message for show'
print('Prints the string representation of an instance\
based on the class name and id.\
Ex: $ show BaseModel 1234-1234-1234.')
def help_destroy(self):
'Help message for destroy'
print('Deletes an instance based on the class name\
and id (save the change into the JSON file).\
Ex: $ destroy BaseModel 1234-1234-1234.')
def help_all(self):
'Help message for all'
print('Prints all string representation of all\
instances based or not on the class name.\
Ex: $ all BaseModel or $ all.')
def help_update(self):
'Help message for update'
print('Updates an instance based on the class name and \
id by adding or updating attribute (save the change \
into the JSON file). Ex: $ update BaseModel \
1234-1234-1234 email "aibnb@holbertonschool.com".')
def help_count(self):
'Help message for count'
print('retrieve the number of instances of a class')
if __name__ == '__main__':
HBNBCommand().cmdloop()
| true |
8fd7de96837c08c993fb2b8c6a1fa1894ba1e5ee | Python | keshav2/RKB | /print hello n.py | UTF-8 | 80 | 3.6875 | 4 | [] | no_license | n=int(input("Input:"))
i=0
print("Output:")
while i<n:
print("Hello")
i=i+1
| true |
f7aab600ea36ff6a52b6f9b098eae24f1ffda22c | Python | Zopek/bladder | /show/split_pos_neg_sizes_periods.py | UTF-8 | 2,805 | 2.5625 | 3 | [] | no_license | import os
import csv
def main():
record_path = '/DB/rhome/qyzheng/Desktop/Link to renji_data/labels/bladder_tags_period.csv'
record_path1 = '/DB/rhome/qyzheng/Desktop/qyzheng/source/renji_data/process/dwi_t2w_t2wfs/all_sizes.csv'
save_path = '/DB/rhome/qyzheng/Desktop/qyzheng/source/renji_data/process/dwi_t2w_t2wfs/all_sizes_periods.csv'
periods = {}
with open(record_path, 'rb') as f:
reader = csv.reader(f)
for item in reader:
assert item[0] not in periods.keys()
periods[item[0]] = item[1]
with open(record_path1, 'rb') as f1:
reader1 = csv.reader(f1)
with open(save_path, 'wb') as f:
writer = csv.writer(f)
for item in reader1:
accession = item[0].split('/')[0]
if accession in periods.keys() and item[2] == '1':
if periods[accession] == 'Ta':
period = 0
elif periods[accession] == 'Tis':
period = 1
elif periods[accession] == 'T1':
period = 2
elif periods[accession] == 'T2':
period = 3
elif periods[accession] == '>=T2':
period = 4
elif periods[accession] == 'T3':
period = 5
elif periods[accession] == 'T4':
period = 6
slices = (item[0], item[1], item[2], item[3], item[4],
item[5], item[6], item[7], item[8], period)
writer.writerow(slices)
'''
if item[2] == '1':
accession = item[0].split('/')[0]
assert accession in periods.keys()
if periods[accession] == 'Ta':
period = 0
elif periods[accession] == 'Tis':
period = 1
elif periods[accession] == 'T1':
period = 2
elif periods[accession] == 'T2':
period = 3
elif periods[accession] == '>=T2':
period = 4
elif periods[accession] == 'T3':
period = 5
elif periods[accession] == 'T4':
period = 6
slices = (item[0], item[1], item[2], item[3], item[4],
item[5], item[6], item[7], item[8], period)
writer.writerow(slices)
else:
continue
'''
if __name__ == "__main__":
main() | true |
d260b71e8dff0dae1c829d8affe1240feea0363d | Python | eduardonp1/Prolux | /Concurso1/B.py | UTF-8 | 454 | 3.28125 | 3 | [] | no_license | estaciones = []
cantidadPersonas = int(input(" "))
estacion = int(input(" "))
estaciones.append(estacion)
i = 0
while i < cantidadPersonas-1:
estacion = input(" ")
estacion = int(estacion)
estaciones.append(estacion)
i += 1
comparador = estaciones
numero = 0
#Puerta trasera
for z in estaciones:
for x in comparador:
if z < x:
estaciones.pop(0)
elif z > x:
pass
#Dos puertas | true |
c4762ca2c625714c0e64fbe6f2a77a393e4a971f | Python | shubh24/MfoPaper | /code/bat knapsack/bat.py | UTF-8 | 1,682 | 2.640625 | 3 | [] | no_license | import math
import numpy as np
import matplotlib.pyplot as plt
from pylab import plot, legend, subplot, grid, xlabel, ylabel, show, title
import random
def func(u):
z = u**2
return sum(z)
def simplebounds(s,lb,ub):
d = np.shape(s)[0]
for i in range(d - 1):
if (s[i] - lb[i] < 0):
s[i] = lb[i]
for i in range(d - 1):
if (s[i] - ub[i] > 0):
s[i] = ub[i]
return s
para = np.array([25, 1000, 1, 1])
n = int(para[0])
num_iter = int(para[1])
A = para[2]
r_range = para[3]
Qmin = 0
Qmax = 2
gamma = 1
d = 30
lb = -100*np.ones(d)
ub = +100*np.ones(d)
Q = np.zeros((n, 1))
r = np.random.rand(n,1)
v = np.zeros((n, d))
Sol = np.zeros((n, d))
S = np.zeros((n, d))
fitness = np.zeros((n, 1))
#print np.shape(Sol)[1]
for i in range(int(n)):
Sol[i, :] = lb + np.multiply((ub-lb), (np.random.rand(1,d)))
x = Sol[i,:]
fitness[i] = func(x)
I = np.argmin(fitness)
best = Sol[I,:]
fmin = fitness[I]
for t in range(num_iter):
for i in range(n):
Q[i] = Qmin + (Qmax - Qmin) * random.random()
v[i,:] = v[i,:] + (Sol[i,:] - best)*Q[i]
S[i,:] = Sol[i,:] + v[i,:]
S[i,:] = simplebounds(S[i,:], lb, ub)
if random.random > r[i]:
S[i,:] = best + 0.95*np.random.normal(1,d)
fnew = func(S[i,:])
if(fnew <= fitness[i] and random.random() < A):
r[i] = r[i]*(1 - math.exp(-gamma*num_iter))
Sol[i,:] = S[i,:]
fitness[i] = fnew
if(fnew <= fmin):
best = S[i,:]
fmin = fnew
print Sol
print fitness[np.argmin(fitness)]
'''x = Sol[:,0]
y = Sol[:,1]
plt.scatter(x,y,z)
plt.show()'''
| true |
d1300c4612705e7dfd572f33c84c62e829a5ddd7 | Python | arpithaupd/Automatic-Covid-19-Classification-and-Segmentation | /Files/model_resnet.py | UTF-8 | 4,034 | 2.734375 | 3 | [
"MIT"
] | permissive | import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
from keras.backend import int_shape
from keras.models import Model
from keras.layers import Conv2D, Conv3D, MaxPooling2D, MaxPooling3D, UpSampling2D, UpSampling3D, Add, BatchNormalization, Input, Activation, Lambda, Concatenate
def res_unet(filter_root, depth, n_class=1, input_size=(256, 256, 1), activation='relu', batch_norm=True, final_activation='sigmoid'):
"""
Build UNet model with ResBlock.
Args:
filter_root (int): Number of filters to start with in first convolution.
depth (int): How deep to go in UNet i.e. how many down and up sampling you want to do in the model.
Filter root and image size should be multiple of 2^depth.
n_class (int, optional): How many classes in the output layer. Defaults to 2.
input_size (tuple, optional): Input image size. Defaults to (256, 256, 1).
activation (str, optional): activation to use in each convolution. Defaults to 'relu'.
batch_norm (bool, optional): To use Batch normaliztion or not. Defaults to True.
final_activation (str, optional): activation for output layer. Defaults to 'softmax'.
Returns:
obj: keras model object
"""
inputs = Input(input_size)
x = inputs
# Dictionary for long connections
long_connection_store = {}
if len(input_size) == 3:
Conv = Conv2D
MaxPooling = MaxPooling2D
UpSampling = UpSampling2D
elif len(input_size) == 4:
Conv = Conv3D
MaxPooling = MaxPooling3D
UpSampling = UpSampling3D
# Down sampling
for i in range(depth):
out_channel = 2**i * filter_root
# Residual/Skip connection
res = Conv(out_channel, kernel_size=1, padding='same', use_bias=False)(x)
# First Conv Block with Conv, BN and activation
conv1 = Conv(out_channel, kernel_size=3, padding='same')(x)
if batch_norm:
conv1 = BatchNormalization()(conv1)
act1 = Activation(activation)(conv1)
# Second Conv block with Conv and BN only
conv2 = Conv(out_channel, kernel_size=3, padding='same')(act1)
if batch_norm:
conv2 = BatchNormalization()(conv2)
resconnection = Add()([res, conv2])
act2 = Activation(activation)(resconnection)
# Max pooling
if i < depth - 1:
long_connection_store[str(i)] = act2
x = MaxPooling(pool_size=(2,2),padding='same')(act2)
else:
x = act2
# Upsampling
for i in range(depth - 2, -1, -1):
out_channel = 2**(i) * filter_root
# long connection from down sampling path.
long_connection = long_connection_store[str(i)]
up1 = UpSampling()(x)
up_conv1 = Conv(out_channel, 2, activation='relu', padding='same' )(up1)
# Concatenate.
up_conc = Concatenate(axis=-1 )([up_conv1, long_connection])
# Convolutions
up_conv2 = Conv(out_channel, 3, padding='same')(up_conc)
if batch_norm:
up_conv2 = BatchNormalization()(up_conv2)
up_act1 = Activation(activation)(up_conv2)
up_conv2 = Conv(out_channel, 3, padding='same')(up_act1)
if batch_norm:
up_conv2 = BatchNormalization()(up_conv2)
# Residual/Skip connection
res = Conv(out_channel, kernel_size=1, padding='same', use_bias=False)(up_conc)
resconnection = Add()([res, up_conv2])
x = Activation(activation)(resconnection)
# Final convolution
output = Conv(n_class, 1, padding='same', activation=final_activation)(x)
model= Model(inputs, outputs=output, name='Res-UNet')
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
| true |
b11621e6d4a5107361e6485149e88bf002a07864 | Python | franz6ko/intro-ai | /Clase 5/GradientDescent.py | UTF-8 | 1,899 | 3.03125 | 3 | [] | no_license | import numpy as np
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
class GradientDescent:
def __init__(self, alpha, n_epochs, poly=None, lbd=0):
self.alpha = alpha
self.n_epochs = n_epochs
self.model = None
self.lbd = lbd
if poly is not None:
self.poly = PolynomialFeatures(poly)
else:
self.poly = None
def fit(self, x, y):
if self.poly is not None:
x = self.poly.fit_transform(x.reshape(-1, 1))
x = StandardScaler(with_std=True).fit_transform(x)
if x.ndim == 1:
x = x.reshape(-1, 1)
n_samples = x.shape[0]
n_features = x.shape[1]
# 1 - Random w initialization
w = np.random.random(n_features)*-100
np.seterr(all='warn')
for epoch in range(self.n_epochs):
# 2 - Prediction
y_hat = x @ w
# 3 - Error
e = y - y_hat
# 4 - Gradient
o = np.zeros((n_samples, n_features))
for i in range(n_samples):
o[i, :] = e[i] * x[i, :]
g = -2 * np.sum(o, axis=0) / n_samples
# 5 - Correction
reg_factor = 1 - 2 * self.lbd * self.alpha
w = reg_factor * w - self.alpha * g
# 2-5 condensed version
# for epoch in range(n_epochs):
# w = w - self.alpha * (-2 / n_samples) * np.sum((y - x @ w)[:, np.newaxis] * x, axis=0)
self.model = w
def predict(self, x):
if self.poly is not None:
x = self.poly.fit_transform(x.reshape(-1, 1))
x = StandardScaler(with_std=True).fit_transform(x)
if x.ndim == 1:
x = x.reshape(-1, 1)
return x @ self.model
def fit_transform(self, x, y):
self.fit(x, y)
return self.predict(x).reshape(1, -1)
| true |
09d0f12fbf4b8f62ebd03962ce789699a940eeea | Python | aman-aman/Python_tkInter | /gui4.py | UTF-8 | 684 | 2.75 | 3 | [] | no_license | from tkinter import *
root=Tk()
#topFrame=Frame(root)
#topFrame.pack()
#bottomFrame=Frame(root)
#bottomFrame.pack(side=BOTTOM)
button1=Button(root,text="button 1",fg="red")
button2=Button(root,text="button 2",fg="blue")
button3=Button(root,text="button 3",fg="green")
button4=Button(root,text="button 4",fg="purple")
#button1.pack(side=LEFT)
#button2.pack(side=LEFT)
#button3.pack(side=LEFT)
#button4.pack(side=BOTTOM)
button1.pack()
button2.pack()
button3.pack()
button4.pack()
button1.tkMessagebox(top,text="aman1")
button2.tkMessagebox(top,text="aman2")
button3.tkMessagebox(top,text="aman3")
button4.tkMessagebox(top,text="aman4")
root.mainloop()
| true |
3efd1f696fe681996e1e1f2fa9c4120b7d566845 | Python | tarekmehrez/recomendation_engine | /lib/tbont_text_engine/vector_space/lsi.py | UTF-8 | 3,806 | 2.921875 | 3 | [] | no_license | """Contains the LSIModel class."""
from collections import OrderedDict
from gensim import models, matutils
from corpus import Corpus
from tbont_text_engine.utils import io
class LSIModel(object):
"""Train LSIModel using gensim's API."""
def __init__(self):
"""Init LSIModel instance."""
self.model = None
self.corpus = Corpus()
self.token_vectors = []
self.articles_vectors = []
self.article_to_idx = OrderedDict()
def train_model(self, articles_as_tokens, num_topics=100):
"""
Train LSI Model.
params:
num_topics (int) [default=100]
"""
self.corpus.build(articles_as_tokens.values())
self._assign_article_ids(articles_as_tokens.keys())
self.model = models.LsiModel(corpus=self.corpus.tfidf_matrix,
num_topics=num_topics)
self._extract_matrices()
def _assign_article_ids(self, articles_ids):
"""
Assign article ids.
params:
articles_ids (list[str])
"""
indices = range(len(articles_ids))
self.article_to_idx = dict(zip(articles_ids, indices))
def update_model(self, articles_as_tokens):
"""
Add new documents to the LSI vector space.
params:
articles_as_tokens (dict{str: list[list(str)]}): article id, tokens
"""
print 'updating...'
print 'merging corpus...'
new_documents = self.corpus.merge(articles_as_tokens.values())
print 'updating ids...'
self._update_article_ids(articles_as_tokens.keys())
print 'adding new documents...'
self.model.add_documents(new_documents)
print 'extracting new matrix'
self._extract_matrices()
def _update_article_ids(self, articles_ids):
"""
Update article ids.
params:
articles_ids (list[str])
"""
max_so_far = max(self.article_to_idx.values())
new_max = max_so_far + len(articles_ids)
new_indices = range(max_so_far + 1, new_max + 1)
self.article_to_idx.update(dict(zip(new_indices, articles_ids)))
def _extract_matrices(self):
"""
Extract U, V matrices from LSI model.
U: num_topics * num_tokens
V: num_docs * num_topics
So basically U is the tokens aginst topics vector
V is the articles against topics vector
http://tinyurl.com/hzxm4ty
*** requires background on how LSI works ***
"""
self.token_vectors = self.model.projection.u
self.articles_vectors = matutils.corpus2dense(
self.model[self.corpus.tfidf_matrix],
len(self.model.projection.s)).T / self.model.projection.s
def get_article_vector(self, article_id):
"""
Get article vector.
params:
article_id (str)
returns:
numpy.ndarray: article vector
"""
article_idx = self.article_to_idx[article_id]
return self.articles_vectors[article_idx]
def get_token_vector(self, token):
"""
Get token vector.
params:
token (str)
return:
numpy.ndarray: token vector
"""
token_id = self.corpus.dictionary.token2id[token]
return self.token_vectors[token_id]
def load(self, file_path):
"""
Load entire LSIModel instance.
params:
file_path (str)
"""
content = io.read(file_path)
self.__dict__.update(content)
def save(self, file_path):
"""
Save entire LSIModel instance.
params:
file_path (str)
"""
content = self.__dict__
io.write(content, file_path)
| true |
7a0c7a47251f282cbb4e4ccf504f3a1e7d4046d5 | Python | yanita-d/Bioinformatics2020 | /Martin Georgiev/Homework/problem3.py | UTF-8 | 740 | 3.34375 | 3 | [] | no_license | from Bio import SeqIO
from collections import Counter
#reading fasta format file and returning the sequence
def readSeqFromFastaFile(filename):
inputFileData = SeqIO.read(filename, "fasta")
return inputFileData.seq
dnaSeq = readSeqFromFastaFile("data/fasta_seq_1.fa")
#first way
def myFrequencyTable(dnaSeq):
frequencyTable = {'A': 0, 'C': 0,'G': 0,'T': 0}
for nuc in dnaSeq:
frequencyTable[nuc] += 1
print(frequencyTable['A'])
#second way
def collectionsFrequencyTable(dnaSeq):
frequencyTable2 = Counter(dnaSeq)
print(frequencyTable2['A'])
#third way
def bioModuleFrequency(dnaSeq):
print(dnaSeq.count('A'))
myFrequencyTable(dnaSeq)
collectionsFrequencyTable(dnaSeq)
bioModuleFrequency(dnaSeq) | true |
8875d11d29889f5f3f2e6d49a49b709aaebc2fb5 | Python | Joscho2/PTS_DU3 | /gamewrapper.py | UTF-8 | 2,382 | 3.328125 | 3 | [] | no_license | import final
class GameWrapper(object):
"""Stará sa o správu hry. Oznamuje jednotlivým kvalifikáciam
posun na nový deň, spracováva postupujúce tími a posúva na nový deň
aj samotné majstrovstvá."""
def __init__(self, q_list, simulator, history):
self.q_list = q_list
self.qual_is_playing = True
self.simulator = simulator
self.history = history
super(GameWrapper, self).__init__()
def q_next(self):
"""Odsimulovanie ďalšieho dňa kvalifikácie
vráti False ak sa kvalifikácie skončili/ak sú škončené,
inak vráti True"""
held = False #Určí, či sa ešte
#konajú kvaifikačné
#zápasy.
#Pre každú kvalifikáciu
for l in self.q_list:
#Ak ešte má čo odohrať
if(l.has_next()):
held = True
#odohraj
l.next()
#Ak sa nič neodohralo..
if(not held):
print('Vstup do finále!', end = '\n')
return False
else:
return True
def next(self):
"""Vykonanie nasledujúceho dňa. Zahŕňa to kvalifikáciu
a samotné majstrovstvá"""
#Histórií povieme, že ideme do nasledujúceho dňa
self.history.next_day()
#Pýtame sa, či sa ešte hrá kvalifikácia
if(self.qual_is_playing):
self.qual_is_playing = self.q_next()
#Kvalifikacia sa skoncila, pripravime si majstrovstva.
#V tento deň sa neodohrá žiadny zápas
if(not self.qual_is_playing):
final_list = []
#Z každej kvalifikácie si vypýtame postupujúce tímy
for q in self.q_list:
for l in q.get_winners():
final_list.append(l)
self.world_cup = final.Final(final_list, self.simulator)
else:
#Hra sa finale
if(self.world_cup.has_next()):
self.world_cup.next()
else:
print('Majstrovstvá sa už skončili.', end ='\n')
def print_tables(self):
"""Vypísanie kvalifikačných tabuliek. Ak by sme chceli
veľmi pekné, usporiadané tabuľky, tak to stačí zmeniť len tu."""
for q in self.q_list:
q.print_table()
| true |
a8bec1d14a22ca001585d9d47f48d915853e281c | Python | jisoo-ho/Python_R_study | /20200420/20200420-2.py | UTF-8 | 1,890 | 3.375 | 3 | [] | no_license | # 2)아이콘 넣기
import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QIcon
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'), encoding='UTF8')
#파이썬과 가장 잘 맞는 이미지 파일 형식 : png(jpg는 고화질을 표현한다. gif는 표현 가능한 색이 216가지 밖에 안된다.(safe 컬러))
self.setGeometry(300,300,300,200)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyApp()
sys.exit(app.exec_())
# =============================================================================
# setWindowIcon() 메서드는 어플리케이션 아이콘을 설정하도록 합니다.
# 이를 위해서 QIcon 객체를 생성하였습니다. QIcon()에 보여질 이미지('web.png')를 입력합니다.
# 이미지 파일을 다른 폴더에 따로 저장해 둔 경우에는 경로까지 함께 입력해주면 됩니다.
# 만들어진 QIcon 객체를 윈도우에 설정하겠다 라고 해서 setWindowIcon 이라는 함수가 필요한 것 이다.
# =============================================================================
# setGeometry(상단에서 떨어진 값, 좌측에서 떨어진 값, 창 가로값, 창 세로값) 메서드는 창의 위치와 크기를 설정합니다.
# 앞의 두 매개변수는 창의 x, y위치를 결정하고, 뒤의 두 매개변수는 각각 창의 너비와 높이를 결정합니다.
# 이 메서드는 창 띄우기 예제에서 사용했던 move()와 resize() 메서드를 하나로 합쳐놓은 것과 같습니다.
# ============================================================================= | true |
64b73bd1146c482c6df19ef8ab87be6fb391fd3e | Python | gabrielbaldao/robotica | /teste.py | UTF-8 | 703 | 3.125 | 3 | [] | no_license | import RPi.GPIO as gpio
import time
#Configuring don’t show warnings
gpio.setwarnings(False)
#Configuring GPIO
gpio.setmode(gpio.BOARD)
gpio.setup(17,gpio.OUT)
gpio.setup(18,gpio.OUT)
#Configure the pwm objects and initialize its value
pwmBlue = gpio.PWM(17,100)
pwmBlue.start(0)
pwmRed = gpio.PWM(18,100)
pwmRed.start(100)
#Create the dutycycle variables
dcBlue = 0
dcRed = 100
def mudaPWM(pwm1, pwm2):
pwm1.ChangeDutyCycle(100)
pwm2.ChangeDutyCycle(0)
time.sleep(2)
#Loop infinite
while True:
#increment gradually the luminosity
mudaPWM(pwmRed, pwmBlue)
mudaPWM(pwmBlue,pwmRed)
#decrement gradually the luminosity
#End code
gpio.cleanup()
exit() | true |
99888cbdeec893d781548f2c6993cd0a395f8b45 | Python | ChiPT318/PhamThucChi-Labs-C4E16 | /Web module/Session01/app.py | UTF-8 | 858 | 2.828125 | 3 | [] | no_license | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/') #mo trang chu
def index(): #khi vao trang chu kia thi chay function index luon
posts = [
{
"title" : "Tho con coc",
"content" : "nekrnk kenjk kejn kenrj kernvw nwin",
"author" : "Chi",
"gender" : 0
},
{
"title" : "MamaMia",
"content" : "Ma ma mia, here we go again. My my, how can I resist ya",
"author" : "AbbA",
"gender": 1
},
{
"title" : "Khong biet lam tho",
"content" : "Chiu chiu",
"author" : "A Bo Co",
"gender": 0
}
]
return render_template("index.html", posts = posts)
@app.route("/hello")
def hello():
return("Hello C4E16")
@app.route("/sayhi/<name>/<age>")
def sayhi(name, age):
return("Hi " + name + "You are " +age)
if __name__ == '__main__':
app.run(debug=True)
| true |
fc7aad6891276f1f8f187dbd674e5f3bc5d5295d | Python | schuderer/bprl | /tests/gym_fin/test_pension_env.py | UTF-8 | 14,787 | 2.578125 | 3 | [
"MIT"
] | permissive | """Tests for the gym_fin.envs.pension_env module"""
# Stdlib imports
import logging
from math import floor
from unittest import mock
# Third-party imports
from gym.utils import seeding
import pytest
# Application level imports
from gym_fin.envs import pension_env
class MockEnv:
def __init__(self):
self.year = 0
self.companies = []
self.humans = []
class MockClient:
pass
@pytest.fixture(autouse=True)
def seed():
# TODO: ugly global -- refactor in code?
pension_env.np_random, seed = seeding.np_random(0)
return seed
@pytest.fixture(autouse=True)
def loglevel_debug():
pension_env.logger.setLevel(logging.DEBUG)
@pytest.fixture
def env():
return MockEnv()
@pytest.fixture
def env_company(env):
company = pension_env.InsuranceCompany()
env.companies.append(company)
return env, company
@pytest.fixture
def env_company_client(env_company):
env, company = env_company
return env, company, pension_env.Client.maybe_make_client(env, force=True)
@pytest.fixture
def mockclient():
return MockClient()
###############################################
# Seq
###############################################
def test_seq():
class Something(pension_env.Seq):
pass
assert Something.create_id() == Something.create_id() - 1
###############################################
# Client
###############################################
def test_client_init(env):
_ = pension_env.Client(env)
def test_client_factory_empty_env(env):
with pytest.raises(pension_env.ClientError, match="no companies"):
_ = pension_env.Client.maybe_make_client(env, force=True)
def test_client_factory_non_empty_env(mocker, env_company):
mocker.patch("gym_fin.envs.pension_env.InsuranceCompany.create_membership")
env, company = env_company
client = pension_env.Client.maybe_make_client(env, force=True)
company.create_membership.assert_called_once_with(client)
assert client.pension_fund == company
def test_client_factory_no_client_because_bad_reputation(mocker, env_company):
mocker.patch("gym_fin.envs.pension_env.InsuranceCompany.create_membership")
env, company = env_company
company.reputation = -9999999999
client = pension_env.Client.maybe_make_client(env)
assert client is None
def test_client_already_client(mocker, env_company_client):
env, company, client = env_company_client
with pytest.raises(pension_env.ClientError, match="already client"):
client._become_client_of(company)
def test_client_give_or_take_positive(mocker, env_company_client):
env, company, client = env_company_client
initial_funds = client.funds
success = client.give_or_take(123)
assert success
assert client.last_transaction == 123
assert client.funds == initial_funds + 123
def test_client_give_or_take_negative(mocker, env_company_client):
env, company, client = env_company_client
initial_funds = client.funds
success = client.give_or_take(-123)
assert success
assert client.last_transaction == -123
assert client.funds == initial_funds - 123
def test_client_give_or_take_zero_funds_positive(mocker, env_company_client):
env, company, client = env_company_client
client.funds = 0
success = client.give_or_take(1234)
assert success
assert client.last_transaction == 1234
assert client.funds == 1234
def test_client_give_or_take_zero_funds_negative(mocker, env_company_client):
env, company, client = env_company_client
client.funds = 0
success = client.give_or_take(-1234)
assert not success
assert client.last_transaction != -1234
assert client.funds == 0
def test_client_live_one_year_age(mocker, env_company_client):
env, company, client = env_company_client
initial_age = client.age
client.live_one_year()
# out, err = capsys.readouterr()
assert client.age == initial_age + 1
def test_client_live_one_year_earn_income(mocker, env_company_client):
env, company, client = env_company_client
initial_funds = client.funds
client.live_one_year()
assert (
client.funds == initial_funds + client.income - client.living_expenses
)
def test_client_live_one_year_die(mocker, env_company_client, caplog):
env, company, client = env_company_client
client.age = 9999999999
client.live_one_year()
assert not client.active
assert "RIP" in caplog.text
def test_client_live_one_year_leave(mocker, env_company_client, caplog):
env, company, client = env_company_client
initial_company_reputation = company.reputation
client.happiness = -9999999999
client.live_one_year()
assert not client.active
assert "Goodbye" in caplog.text
assert company.reputation < initial_company_reputation
def test_client_live_one_year_old_no_pension(mocker, env_company_client):
env, company, client = env_company_client
initial_happiness = client.happiness
client.age = pension_env.Client.income_end_age + 1
# We won't give client income
client.live_one_year()
assert client.happiness < initial_happiness
def test_client_live_one_year_old_pension(mocker, env_company_client):
env, company, client = env_company_client
initial_happiness = client.happiness
client.age = pension_env.Client.income_end_age + 1
print(client.funds, client.expectation)
assert client.give_or_take(+client.expectation)
client.live_one_year()
assert client.happiness >= initial_happiness
def test_client_live_one_year_old_no_funds(mocker, env_company_client):
env, company, client = env_company_client
initial_happiness = client.happiness
client.age = pension_env.Client.income_end_age + 1
client.funds = 0
client.live_one_year()
assert client.happiness < initial_happiness
def test_client_live_one_year_young_no_income(mocker, env_company_client):
env, company, client = env_company_client
initial_happiness = client.happiness
client.age = pension_env.Client.income_end_age - 1
# Taking client's income away from them
assert client.give_or_take(-client.income)
client.live_one_year()
assert client.happiness < initial_happiness
def test_client_live_one_year_young_more_income(mocker, env_company_client):
env, company, client = env_company_client
initial_happiness = client.happiness
client.age = pension_env.Client.income_end_age - 1
# Doubling client's income
assert client.give_or_take(+client.income)
client.live_one_year()
assert client.happiness >= initial_happiness
def test_client_live_one_year_young_no_funds(mocker, env_company_client):
env, company, client = env_company_client
initial_happiness = client.happiness
client.age = pension_env.Client.income_end_age - 1
client.funds = 0
client.live_one_year()
assert client.happiness < initial_happiness
def test_client_live_one_year_young_no_funds_no_income(
mocker, env_company_client
):
env, company, client = env_company_client
initial_happiness = client.happiness
client.age = pension_env.Client.income_end_age - 1
client.funds = -client.income
client.live_one_year()
assert client.happiness < initial_happiness
def test_client_live_one_year_recover_negative_happiness(
mocker, env_company_client
):
env, company, client = env_company_client
client.age = pension_env.Client.income_end_age - 11
client.happiness = -50
for y in range(10):
previous_happiness = client.happiness
client.live_one_year()
assert client.happiness > previous_happiness or client.happiness == 0
###############################################
# InsuranceCompany
###############################################
def test_insurance_company_init():
_ = pension_env.InsuranceCompany()
def test_insurance_company_create_membership(mockclient, env_company):
env, company = env_company
company.create_membership(mockclient)
assert mockclient in company.clients
def test_insurance_company_run_company_running_cost():
company = pension_env.InsuranceCompany(investing=False)
running_cost = pension_env.InsuranceCompany.running_cost
initial_funds = company.funds
company.run_company()
assert company.funds == initial_funds - running_cost
def test_insurance_company_run_company_investing(env_company):
env, company = env_company
running_cost = pension_env.InsuranceCompany.running_cost
initial_funds = company.funds
# It's very certain that there will be profits after 50 years. :)
for y in range(50):
company.run_company()
company.funds += running_cost
assert company.funds > initial_funds - 50 * running_cost
def test_insurance_company_run_company_reputation(env_company):
env, company = env_company
company.reputation = 100
initial_reputation = company.reputation
company.run_company()
assert company.reputation <= initial_reputation
company.reputation = -100
initial_reputation = company.reputation
company.run_company()
assert company.reputation > initial_reputation
def test_insurance_company_damage_reputation(env_company):
env, company = env_company
recovery = pension_env.InsuranceCompany.reputation_recovery
initial_reputation = company.reputation
company.damage_reputation(-100)
company.run_company()
assert company.reputation == initial_reputation - 100 + recovery
company.reputation = 0
initial_reputation = company.reputation
with pytest.raises(
pension_env.InsuranceCompanyError, match="must be negative"
):
company.damage_reputation(100)
def test_insurance_company_do_debit_premium(env_company_client):
env, company, client = env_company_client
initial_funds = company.funds
success = company.do_debit_premium(1234, client)
assert success
assert company.funds == initial_funds + 1234
client.funds = 0
success = company.do_debit_premium(1234, client)
assert not success
client.funds = 10000
initial_funds = company.funds
with pytest.raises(
pension_env.InsuranceCompanyError, match="must be positive"
):
company.do_debit_premium(-1234, client)
def test_insurance_company_do_pay_out(env_company_client):
env, company, client = env_company_client
initial_funds = company.funds
success = company.do_pay_out(1234, client)
assert success
assert company.funds == initial_funds - 1234
company.funds = 0
success = company.do_pay_out(1234, client)
assert not success
company.funds = 10000
initial_funds = company.funds
with pytest.raises(
pension_env.InsuranceCompanyError, match="must be positive"
):
company.do_pay_out(-1234, client)
###############################################
# PensionEnv
###############################################
def test_pension_env_init():
assert pension_env.PensionEnv()
def test_pension_env_companies():
env = pension_env.PensionEnv()
assert len(env.companies) == 0
env.reset()
assert len(env.companies) > 0
def test_pension_env_seed():
env = pension_env.PensionEnv()
env.seed(0)
r1 = pension_env.np_random.choice(range(1000))
env.seed(0)
r2 = pension_env.np_random.choice(range(1000))
assert r1 == r2
def test_pension_env_reset():
env = pension_env.PensionEnv()
env.seed(0) # PensionEnv start state is currently deterministic, but still
s1 = env.reset()
for _ in range(100):
env.step(env.action_space.sample())
env.seed(0)
s2 = env.reset()
assert (s1 == s2).all()
def test_pension_env_observation_shape():
env = pension_env.PensionEnv()
s = env.reset()
assert s.shape[0] == env.observation_space.low.shape[0]
def test_pension_env_render():
env = pension_env.PensionEnv()
env.render()
def test_pension_env_close():
env = pension_env.PensionEnv()
env.close()
def test_pension_env_step_before_reset():
env = pension_env.PensionEnv()
with pytest.raises(pension_env.PensionEnvError, match="before reset"):
env.step(env.action_space.sample())
@mock.patch.object(
pension_env.InsuranceCompany, "do_debit_premium", return_value=True
)
def test_pension_env_step_debit_action(do_debit_premium):
env = pension_env.PensionEnv()
env.reset()
debit_action = 0
env.step(debit_action)
# do_debit_premium.assert_called_once()
assert do_debit_premium.call_count == 1
@mock.patch.object(
pension_env.InsuranceCompany, "do_pay_out", return_value=True
)
def test_pension_env_step_payout_action(do_pay_out):
env = pension_env.PensionEnv()
env.reset()
payout_action = 1
env.step(payout_action)
# do_pay_out.assert_called_once()
assert do_pay_out.call_count == 1
# @mock.patch.object(pension_env.Client, '_leave_cdf', return_value=1.0)
# @mock.patch.object(pension_env.Client, '_death_cdf', return_value=0.0)
# @mock.patch.object(pension_env.Client, '_new_cdf', return_value=0.0)
# @mock.patch.object(pension_env.Client, 'live_one_year')
# @mock.patch.object(pension_env.InsuranceCompany, 'run_company')
def test_pension_env_step_new_years(mocker):
# _leave_cdf, _death_cdf, _new_cdf, live_one_year, run_company):
"""Given N Clients, env.year will advance with every Nth step(),
InsuranceCompany.run_company will be called once per year and
Client.live_one_year will be called once for each step.
"""
# Patch Client object to never leave nor join nor die
mocker.patch.object(pension_env.Client, "_leave_cdf", return_value=1.0)
mocker.patch.object(pension_env.Client, "_death_cdf", return_value=0.0)
mocker.patch.object(pension_env.Client, "_new_cdf", return_value=0.0)
run_company = mocker.patch.object(
pension_env.InsuranceCompany, "run_company"
)
env = pension_env.PensionEnv()
env.reset()
first_client = env.humans[0]
second_client = pension_env.Client.maybe_make_client(env, force=True)
env.humans.append(second_client)
num_clients = len(env.humans) # There are 2 clients
print("num_clients", num_clients)
mocker.spy(first_client, "live_one_year")
mocker.spy(second_client, "live_one_year")
steps = 10
y0 = env.year # probably 0
debit_action = 0
for _ in range(steps):
env.step(debit_action)
y5 = env.year
expected_year_changes = floor(steps / num_clients)
# 0 + floor(10 / 2) == 5
assert y0 + expected_year_changes == y5
assert run_company.call_count == y0 + expected_year_changes
assert first_client.live_one_year.call_count == steps / num_clients
assert second_client.live_one_year.call_count == steps / num_clients
| true |
db5c2b9b451c0d849a4e91247318728672b455e8 | Python | iamFIREcracker/project-euler | /python/52.py | UTF-8 | 440 | 3.578125 | 4 | [] | no_license | """Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x,
contain the same digits.
"""
def IntToSeq(n):
s = []
while n:
n, r = divmod(n, 10)
s.append(r)
return s
for d in range(1, 10):
for x in xrange(10**(d - 1), (10**d)//6):
s = set(IntToSeq(x))
for m in range(2, 7):
ss = set(IntToSeq(x*m))
if len(ss) != 6 or len(s&ss) != 6: break
else: break
else: continue
break
print x
| true |
e482329d8ac5b58b8a02f18f13404e4130697eab | Python | sbowles22/CS550 | /HW2/C-to-F-Conversion.py | UTF-8 | 482 | 3.703125 | 4 | [] | no_license | import sys
try:
if sys.argv[1].lower() == 'f':
temp = float(sys.argv[2]) * (9/5) + 32.0
elif sys.argv[1].lower() == 'c':
temp = (float(sys.argv[2]) - 32.0) * (5/9)
else:
print('ERROR: Please input C or F as argument 1')
quit()
temp = round(temp)
print(f'{temp}°{sys.argv[1].title()}')
except ValueError:
print('ERROR: Please input a number for argument 2')
except IndexError:
print('ERROR: Please enter 2 arguments')
| true |
1e849da0076a8ea5f654af5bf45f7dac09ab3f5c | Python | dinobobo/thesis_plots | /BigCoilPair.py | UTF-8 | 4,671 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Model of big 6x6 winding coil on octagon chamber as built in 2016
"""
from __future__ import division
from numpy import linspace, array, polyfit, poly1d
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from quagmire.magnetics.wire import WireSquareTube
from quagmire.magnetics.coil_array import LoopArrayCoilPair
from quagmire.visual.utils import axisEqual3D
import plot_utilities
#------------------------------------------------------------------
# Standard modeling parameters for big coil as of January 2018
#------------------------------------------------------------------
nv = array([0,0,1]) # normal vector for coil oriented in the z direction
# Axial spacing of the coils: must be minimum 0.0354 m
#"Standard" value for initial 2016 plan was 0.0398 m for a 6x6 coil
# Distance changed to 0.0364 m in Sept 2016 after modeling recessed coil
# Increased to 0.037 m in Dec 2017 after coil mount reconstruction
r0i = array([0, 0, 0.037]) # Distance of coil surface from center plane
radial_coil_thickness = .00352 # +/- .00028 cm
axial_coil_thickness = .00355 # +/- .00028 cm
epoxy_thickness = .0003 # thickness of the layer of epoxy
dR = radial_coil_thickness+epoxy_thickness #
dZ = axial_coil_thickness # spacing of coils axially
Ri = .089 # coil inner radius is 9 cm
NR = 6 # radial coil layers
NZ = 6 # axial coil layers
big_coil_wire = WireSquareTube(outer_side=.0032, inner_side=.0013, material="copper")
BigCoilPair = LoopArrayCoilPair(nv, r0i, Ri, dR, NR, dZ, NZ, big_coil_wire, "HH")
###############################################################################
if __name__ == "__main__":
IHH = 300 # Likely maximum operating current value?
zrng = 0.05
zvals = linspace(-zrng, zrng, 150)
rvals = [ array([0.0,0,z]) for z in zvals]
HHvals = BigCoilPair.B(rvals)[:,2] * 10000 * IHH
# Plot on-axis field using the 3D solution implemented in B_loop
plt.figure(1)
plt.clf()
plt.plot(zvals*100, HHvals, "r.", lw = 2)
#Polynomial fit to the axial Helmholtz data to get field curvature
HHfitz = polyfit(zvals * 100, HHvals, 7)
HHfitz_data = poly1d(HHfitz)(zvals * 100)
plt.plot(zvals * 100, poly1d(HHfitz)(zvals * 100), "r-", label = "Axial Magnetic Field")
B0 = HHfitz[-1]
plt.xlabel("Z (cm)")
plt.ylabel("Magnetic Field (G)")
plt.text(-2, 0.4*B0, "$B_{0}$=%.1f $G$" % HHfitz[-1], fontsize = 12)
plt.text(-2, 0.2*B0, "$B_z''$=%.3f $G/cm^2$" % HHfitz[-3], fontsize = 12, color = 'r')
R340 = BigCoilPair.resistance(340)
V = R340 * IHH
P = R340 * IHH**2
# plt.text(-3, 0.94*B0, "Coil Resistance: %.3f Ohm" % R340, fontsize=12 )
# plt.text(-3, 0.96*B0, "Power Dissipated: %d W" % P, fontsize=12 )
# plt.text(-3, 0.98*B0, "Voltage Drop: %.3f V" % V, fontsize=12 )
# Plot Radial Field
xrng = zrng
xvals = linspace(-xrng, xrng, 150)
rxvals = [ array([x,0,0.0]) for x in xvals]
HHBx = BigCoilPair.B(rxvals)[:,2] * 10000 * IHH
plt.plot(xvals*100, HHBx, "b.", lw= 2)
HHfitr = polyfit(xvals * 100, HHBx, 7)
HHfitr_data = poly1d(HHfitr)(xvals * 100)
plt.plot(zvals * 100, poly1d(HHfitr)(xvals * 100), "b-", label = "Radial Magnetic Field")
plt.text(-2, 0.9*B0, "$B_r''$=%.3f $G/cm^2$" % HHfitr[-3], fontsize = 12, color = 'b')
plt.legend(loc='lower center')
plt.show()
#Calculate magnetic field drop at different locations
HHval_max = BigCoilPair.B([array([0.0,0.0,0.0])])[:,2] * 10000 * IHH
HHval_mm = BigCoilPair.B([array([0.0,0.0,0.001])])[:,2] * 10000 * IHH
HHval_cm = BigCoilPair.B([array([0.0,0.0,0.01])])[:,2] * 10000 * IHH
mm_dif = HHval_mm - HHval_max
cm_dif = HHval_cm - HHval_max
HHBx_max = BigCoilPair.B([array([0.0,0.0,0.0])])[:,2] * 10000 * IHH
HHBx_mm = BigCoilPair.B([array([0.001,0.0,0.0])])[:,2] * 10000 * IHH
HHBx_cm = BigCoilPair.B([array([0.01,0.0,0.0])])[:,2] * 10000 * IHH
mmx_dif = HHBx_mm - HHBx_max
cmx_dif = HHBx_cm - HHBx_max
# Data ploting
plots = plot_utilities.plot_utilities(xlabel = "Z (cm)", ylabel = "Magnetic Field (G)", marker = '.')
plots.label = "Axial Magnetic Field"
plots.plot(xvals*100, HHvals)
plots.color = 'b'
plots.label = "Radial Magnetic Field"
plots.plot(xvals*100, HHBx)
plots.label = None
plots.ls = '-'
plots.plot(xvals*100, HHfitr_data)
plots.color = 'r'
plots.plot(xvals*100, HHfitz_data)
plots.ax.legend(loc = 'lower center' )
plt.savefig("big_coil_field", quality = 100)
| true |
a16be770561e50839d2edf891b893ad76f5fd8c8 | Python | leovasc5/python | /aula28/aula28.py | UTF-8 | 189 | 3.109375 | 3 | [] | no_license | jogadores = ["Cristiano Ronaldo", "Messi", "Neymar", "De Bruyne"]
itJogadores = iter(jogadores)
print(next(itJogadores))
print(next(itJogadores))
# for jogador in jogadores:
# print(jogador) | true |
d0d609159e66cd451bffcf0c520a39b3ef2f9d23 | Python | HKKKD/leetcode | /136singleNumber.py | UTF-8 | 275 | 3.140625 | 3 | [] | no_license | def singleNumber(nums):
s = set(nums)
d = dict()
for x in nums:
if x in s:
if x in d:
d[x] += 1
else:
d[x] = 1
for key,value in d.iteritems():
if value == 1:
return key
print singleNumber([17,12,5,-6,12,4,17,-5,2,-3,2,4,5,16,-3,-4,15,15,-4,-5,-6])
| true |
d11161a7acf8ab6019a482c074b170119ce51539 | Python | bir0bin/isp-exam | /itermagic/itermagic.py | UTF-8 | 983 | 3.28125 | 3 | [] | no_license | from Queue import Queue
def niter(s_iter, n=2):
it = iter(s_iter)
underlying_qs = [Queue() for _ in xrange(n)]
def underlying_gen(q):
while True:
if q.empty():
val = next(it)
for it_q in underlying_qs:
it_q.put(val)
yield q.get()
return tuple(underlying_gen(q) for q in underlying_qs)
def merge_longer_iter(iters, fill):
stop_iter_cnt = 0
while True:
resp = []
for it in iters:
try:
val = next(it)
except StopIteration:
stop_iter_cnt += 1
val = fill
resp.append(val)
if stop_iter_cnt < len(iters):
yield tuple(resp)
stop_iter_cnt = 0
else:
break
if __name__ == "__main__":
arr = range(10)
it1, it2 = niter(arr)
next(it1)
next(it1)
for x in merge_longer_iter((it1, it2), 1337):
print x
| true |
e2530e0149a554d87708650b5f9e011abcfe4251 | Python | jinurajan/Datastructures | /LeetCode/contests/number_of_restricted_paths_from_first_to_last_node.py | UTF-8 | 1,609 | 3.09375 | 3 | [] | no_license | """
"""
from typing import List
from collections import defaultdict
from heapq import heappop, heappush
class Solution:
def countRestrictedPaths(self, n: int, edges: List[List[int]]) -> int:
if not edges or n == 1:
return 0
adj_map = defaultdict(dict)
for x, y, weight in edges:
adj_map[x][y] = weight
adj_map[y][x] = weight
dfs_mem = {}
def dijsktra():
minheap = [(0, n)]
dist = [float("inf")] * (n + 1)
dist[n] = 0
while minheap:
distance, node = heappop(minheap)
if distance != dist[node]:
continue
for v, weight in adj_map[node].items():
if dist[v] > dist[node] + weight:
dist[v] = dist[node] + weight
heappush(minheap, (dist[v], v))
return dist
def dfs(node):
if node == n:
return 1
if node in dfs_mem:
return dfs_mem[node]
ans = 0
for nei, weight in adj_map[node].items():
if dist[node] > dist[nei]:
ans = (ans + dfs(nei)) % 1000000007
dfs_mem[node] = ans
return ans
dist = dijsktra()
return dfs(1)
n = 5
edges = [[1,2,3],[1,3,3],[2,3,1],[1,4,2],[5,2,2],[3,5,1],[5,4,10]]
print(Solution().countRestrictedPaths(n, edges))
# n = 7
# edges = [[1,3,1],[4,1,2],[7,3,4],[2,5,3],[5,6,1],[6,7,2],[7,5,3],[2,6,4]]
# print(Solution().countRestrictedPaths(n, edges)) | true |
d9d6bd0e22adc46859d103139ad44a06e26343f1 | Python | ttocsneb/an_Adventure | /an_adventure/schemas/__init__.py | UTF-8 | 3,681 | 2.640625 | 3 | [] | no_license | from marshmallow import fields, Schema, post_load, pre_dump, post_dump, ValidationError
import adventurelib
from pymaybe import maybe
from . import objects
def getErrorString(errors):
def to_str(obj):
if isinstance(obj, dict):
return ', '.join(f'{v}' for k, v in obj.items())
return ', '.join(i for i in obj)
return ''.join(f'{k}: {to_str(v)}' for k, v in errors.items())
class ItemSchema(Schema):
names = fields.List(fields.String(), required=True)
attrs = fields.Dict(missing=dict())
@post_load
def createItem(self, data):
item = adventurelib.Item(*data['names'])
for attr, value in data['attrs'].items():
setattr(item, attr, value)
return item
class ItemReference(fields.Field):
default_error_messages = dict(
dne="Item '{}' does not exist.", badobj="Invalid item object."
)
def _serialize(self, value: adventurelib.Item, attr, obj, **kwargs):
if hasattr(value, "name"):
return str(value.name)
name = maybe(value)['name']
if name is None:
self.fail("badobj")
return str(name)
def _deserialize(self, value: str, attr, obj, **kwargs):
# def get_items():
# if callable(self._items):
# return self._items()
# return self._items
# item = next((v for v in get_items() if v.name == value), None)
# if item is None:
# self.fail("dne", name=value)
return str(value)
class RoomReference(fields.Field):
default_error_messages = dict(
badobj="Invalid room object."
)
def _serialize(self, value: objects.Room, attr, obj, **kwargs):
if value is None:
return None
if isinstance(value, str):
return value
if hasattr(value, "name"):
return str(value.name)
name = maybe(value)['name']
if name is None:
self.fail("badobj")
return str(name)
def _deserialize(self, value, attr, obj, **kwargs):
return str(value)
class PlayerSchema(Schema):
items = fields.List(ItemReference())
@pre_dump
def loadPlayer(self, data: objects.Player):
return dict(items=list(data))
@post_load
def createPlayer(self, data):
return objects.Player(**data)
class RoomSchema(Schema):
name = fields.String(required=True)
desc = fields.String(required=True)
items = fields.List(ItemReference())
item_desc = fields.Dict()
attrs = fields.Dict()
exits = fields.Dict()
@post_dump
def loadRoom(self, data):
def get_name(obj):
if isinstance(obj, str):
return obj
if isinstance(obj, objects.Room):
return obj.name
raise ValidationError(f"type {type(obj)} is not a Room")
data['exits'] = dict((k, get_name(v)) for k, v in data['exits'].items())
return data
@post_load
def createRoom(self, data):
return objects.Room(**data)
class RoomSaveSchema(Schema):
"""
A Schema for Rooms in the save file
"""
name = fields.String(required=True)
items = fields.List(ItemReference())
attrs = fields.Dict()
@post_load
def createRoom(self, data: dict):
from ..globalvars import rooms
room = next((r for r in rooms if r.name == data['name']), None)
if room is None:
raise ValidationError(f"Could not find the room '{data['name']}'")
exits = dict((k, v.name) for k, v in room.exits.items())
return objects.Room(desc=room.desc, exits=exits, item_desc=room.item_desc, **data)
| true |
b8581021d89d28e44486d5cee0d5f31a015d76de | Python | PatrickRWells/keckcode | /keckcode/spectra/skysub.py | UTF-8 | 2,678 | 2.65625 | 3 | [
"MIT"
] | permissive | import scipy,special_functions
from scipy import ndimage,interpolate
WIDE = 100
def skysub(x,y,z,scale):
# Find sources by determining which pixels are slightly high
height = int(y.max()-y.min())
width = int(x.max()-x.min())
midpt = y.mean()
# Very wide slits need special attention. Here we fit a first order
# correction to the slit and subtract it away before doing the high
# pixel rejection (the problem is if there is a small gradient across
# a wide slit, the top and bottom pixels may differ significantly,
# but these pixels may be close in *wavelength* and so locally (on
# the CCD) low pixels will be rejected in the smoothing
if height>WIDE:
zbak = z.copy()
args = y.argsort()
revargs = args.argsort()
ymodel = ndimage.percentile_filter(z[args],30.,size=height)[revargs]
fit = special_functions.lsqfit(ymodel,'polynomial',1)
if fit['coeff'][1]*float(ymodel.size)/fit['coeff'][0]<0.05:
pass
else:
ymodel = special_functions.genfunc(scipy.arange(ymodel.size),0,fit)
ymodel -= ymodel.mean()
z -= ymodel
# Filter locally (in wavelength space) high points
args = x.argsort()
revargs = args.argsort()
smooth = ndimage.percentile_filter(z[args],30.,size=height)[revargs]
diff = z-smooth
# We assume poisson statistics....
var = scipy.sqrt(scipy.fabs(smooth))
sigma = diff/var
args = y.argsort()
revargs = args.argsort()
t = ndimage.median_filter(sigma[args],9)
t = ndimage.gaussian_filter(t,width)[revargs]
# Source detection/rejection
# Reject yvalues > 1. sigma, and weight remaining pixels
w = (1.0-t)/abs(z)
skycond = ((w>0.)&(z>0))#|((y<y.min()+2.)|(y>y.max()-2.))
x = x[skycond]
y = y[skycond]
z = z[skycond]
# Reject residual high pixels (and very low pixels too!)
args = x.argsort()
revargs = args.argsort()
smooth = ndimage.median_filter(z[args],height/4.)[revargs]
diff = z-smooth
var = scipy.sqrt(smooth)
cond = abs(diff)<4.*var
x = x[cond]
y = y[cond]
z = z[cond]
kx = 3
ky = 1
# If the slit is long, return to original data and increase the order
# of the y-fit.
if height>WIDE:
z = zbak[skycond]
z = z[cond].astype(scipy.float64)
if height>WIDE*1.5:
ky = 3
cond = z>0.
x = x[cond]
y = y[cond]
z = z[cond]
w = 1./z
# Create knots...
innertx = scipy.arange(x.min()+scale/2.,x.max()-scale/2.,scale)
tx = scipy.zeros(innertx.size+kx*2+2)
tx[0:kx+1] = x.min()
tx[kx+1:innertx.size+kx+1] = innertx.copy()
tx[innertx.size+kx+1:] = x.max()
ty = scipy.zeros(ky*2+2)
ty[0:ky+1] = y.min()
ty[ky+1:] = y.max()
# ...and fit.
bgfit = interpolate.bisplrep(x,y,z,w,tx=tx,ty=ty,kx=kx,ky=ky,task=-1,nxest=tx.size,nyest=ty.size,s=0)
return bgfit
| true |
2afac159a6dbc7ed21f1edb21d2986154fc4ceff | Python | coldmax88/PyGUI | /GUI/Generic/GViewBases.py | UTF-8 | 3,294 | 3.171875 | 3 | [
"MIT"
] | permissive | #
# Python GUI - View Base - Generic
#
from GUI.Properties import overridable_property
class ViewBase(object):
"""ViewBase is an abstract base class for user-defined views.
It provides facilities for handling mouse and keyboard events
and associating the view with one or more models, and default
behaviour for responding to changes in the models."""
models = overridable_property('models',
"List of Models being observed. Do not modify directly.")
model = overridable_property('model',
"Convenience property for views which observe only one Model.")
cursor = overridable_property('cursor',
"The cursor to display over the view.")
# _models [Model]
_cursor = None
def __init__(self):
self._models = []
def destroy(self):
#print "GViewBase.destroy:", self ###
for m in self._models[:]:
#print "GViewBase.destroy: removing model", m ###
self.remove_model(m)
def setup_menus(self, m):
pass
#
# Getting properties
#
def get_model(self):
models = self._models
if models:
return self._models[0]
else:
return None
def get_models(self):
return self._models
#
# Setting properties
#
def set_model(self, new_model):
models = self._models
if not (len(models) == 1 and models[0] == new_model):
for old_model in models[:]:
self.remove_model(old_model)
if new_model:
self.add_model(new_model)
#
# Model association
#
def add_model(self, model):
"""Add the given Model to the set of models being observed."""
if model not in self._models:
self._models.append(model)
add_view = getattr(model, 'add_view', None)
if add_view:
add_view(self)
self.model_added(model)
def remove_model(self, model):
"""Remove the given Model from the set of models being observed."""
if model in self._models:
self._models.remove(model)
remove_view = getattr(model, 'remove_view', None)
if remove_view:
remove_view(self)
self.model_removed(model)
def model_added(self, model):
"""Called after a model has been added to the view."""
pass
def model_removed(self, model):
"""Called after a model has been removed from the view."""
pass
#
# Input event handling
#
def track_mouse(self):
"""Following a mouse_down event, returns an iterator which can be used
to track the movements of the mouse until the mouse is released.
Each call to the iterator's next() method returns a mouse_drag
event, except for the last one, which returns a mouse_up event."""
raise NotImplementedError
# def targeted(self):
# """Called when the component becomes the target within its Window."""
# pass
#
# def untargeted(self):
# """Called when the component ceases to be the target within its Window."""
# pass
#
# Cursors
#
def get_cursor(self, x):
return self._cursor
def set_cursor(self, x):
self._cursor = x
self._cursor_changed()
#
# Callbacks
#
def model_changed(self, model, *args, **kwds):
"""Default method called by the attached Model's notify_views
method. Default is to invalidate the whole view."""
self.invalidate()
def model_destroyed(self, model):
"""Called when an attached model is destroyed. Default is to
destroy the window containing this view."""
win = self.window
if win:
win.destroy()
| true |
3e1f77e21bfaddc199f7c7fa62035d37513ec94d | Python | 51616/CU_Makhos | /ThaiCheckers/preprocessing.py | UTF-8 | 1,306 | 2.921875 | 3 | [] | no_license | import numpy as np
def preprocess_state(board):
tensor = board.reshape(1, 1, 8, 8)
return tensor
def flatten_idx(position, size, needhalf=True):
(x, y) = position
if needhalf:
return x * size + y//2
else:
return x * size + y
def unflatten_idx(idx, size):
start = idx // size
end = idx % size
return start, end
def preprocess_move(move):
(start, end) = move
start_idx = flatten_idx(start, 4)
end_idx = flatten_idx(end, 4)
idx = flatten_idx((start_idx, end_idx), 32, False)
mv_tensor = np.zeros((1, 32 * 32))
mv_tensor[0][idx] = 1
return mv_tensor
def move_to_index(move):
(start, end) = move
start_idx = flatten_idx(start, 4)
end_idx = flatten_idx(end, 4)
idx = flatten_idx((start_idx, end_idx), 32, False)
return idx
def index_to_move(idx):
start_idx, end_idx = unflatten_idx(idx, 32)
start_x, start_y = unflatten_idx(start_idx, 4)
if start_x % 2 == 0:
start_y = start_y * 2 +1
else:
start_y = start_y * 2
end_x, end_y = unflatten_idx(end_idx, 4)
if end_x % 2 == 0:
end_y = end_y * 2 +1
else:
end_y = end_y * 2
return ((start_x, start_y),(end_x, end_y))
def index_to_move_human(idx):
d = {0:'a',1:'b',2:'c',3:'d',4:'e',5:'f',6:'g',7:'h'}
((start_x, start_y),(end_x, end_y)) = index_to_move(idx)
return ((start_x+1,d[start_y]),(end_x+1,d[end_y]))
| true |
f1584206360d12a0467fb4148cb51ed140a2b353 | Python | kdbanman/sandbox | /primefac.py | UTF-8 | 766 | 3.359375 | 3 | [] | no_license | import primes
import sys
def primefac(n,prints=False,cumul=[]):
for p in primes.primes(n):
if n%p == 0:
if prints:
print p
cumul.append(p)
if n/p == 1:
return cumul
else:
return primefac(n/p, prints, cumul)
if __name__ == "__main__":
if sys.argv[1] == "test":
try:
two = primefac(2)
two.remove(2)
assert two==[]
six = primefac(6)
six.remove(2)
six.remove(3)
assert six==[]
except:
print "shit's fucked"
else:
try:
primefac(int(sys.argv[1]),True)
except ValueError:
print "you're a fucking dick"
| true |
de28b3893283d8862f623b2e8d99d994881f7b2b | Python | MilenaFilippova/programming_language_practice | /task1_big_area.py | UTF-8 | 1,126 | 3.03125 | 3 | [] | no_license | #На изображении (task1.png) найти объект с самой большой внутренней площадью(т.е. площадь без
#учета точек периметра).
import matplotlib.pyplot as plt
import numpy as np
from skimage import filters
from skimage.filters import threshold_isodata, threshold_otsu
from skimage.measure import label, regionprops
from skimage import color
image=plt.imread("D:\\ИГУ\\3курс\\Компьютерное зрение\\task1.png")
gray = np.average(image, 2)
thresh1 = threshold_otsu(gray)
thresh2 = threshold_isodata(gray)
temp1 = gray > thresh1
temp2 = gray < thresh2
img = temp1 + temp2
LB=label(img)
objs=regionprops(LB)
max_area_in=0
max_area_all=0
for obj in objs:
print( str(obj.label) +" "+ str(obj.area))
area=obj.area-obj.perimeter
if max_area_in<area:
max_area_in=area
max_area_all=obj.area
print("Max internal area= " + str(max_area_in) + " ; All area of this figure: "+ str(max_area_all))
plt.subplot(121)
plt.imshow(image)
plt.subplot(122)
plt.imshow(LB)
plt.colorbar()
plt.show()
| true |
4c418217888394a52f8920db5f2adf617fb5c775 | Python | jdidion/atropos | /atropos/util/__init__.py | UTF-8 | 22,369 | 3.0625 | 3 | [
"CC0-1.0",
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | """Widely useful utility methods.
"""
from collections import OrderedDict
from collections.abc import Iterable, Sequence
from datetime import datetime
import errno
import functools
import logging
import math
from numbers import Number
import time
from atropos import AtroposError
# TODO: the nucleotide table should be implemented as an alphabet.
class NotInAlphabetError(Exception):
def __init__(self, character):
super().__init__()
self.character = character
class Alphabet():
def __init__(self, valid_characters, default_character):
if not isinstance(valid_characters, set):
valid_characters = set(valid_characters)
if not default_character in valid_characters:
valid_characters.add(default_character)
self.valid_characters = valid_characters
self.default_character = default_character
def __contains__(self, character):
return character in self.valid_characters
def validate(self, character):
"""Raises NotInAlphabetError if the character is not in the alphabet.
"""
if not character in self:
raise NotInAlphabetError(character)
def validate_string(self, string):
"""Raises NotInAlphabetError if any character in 'string' is not in the
alphabet.
"""
for character in string:
self.validate(character)
def resolve(self, character):
"""Returns 'character' if it's in the alphabet, otherwise the
alphabet's default character.
"""
if character in self.valid_characters:
return character
else:
return self.default_character
def resolve_string(self, string):
"""Returns a new string with any non-alphabet characters replaced
with the default character.
"""
return "".join(self.resolve(c) for c in string)
ALPHABETS = dict(
dna=Alphabet('ACGT', 'N'),
iso=None,
colorspace=Alphabet('0123', None)
)
def build_iso_nucleotide_table():
"""Generate a dict mapping ISO nucleotide characters to their complements,
in both upper and lower case.
"""
nuc = {
'A' : 'T',
'C' : 'G',
'R' : 'Y',
'S' : 'S',
'W' : 'W',
'K' : 'M',
'B' : 'V',
'D' : 'H',
'N' : 'N'
}
for base, comp in tuple(nuc.items()):
nuc[comp] = base
nuc[base.lower()] = comp.lower()
nuc[comp.lower()] = base.lower()
return nuc
BASE_COMPLEMENTS = build_iso_nucleotide_table()
IUPAC_BASES = frozenset(('X',) + tuple(BASE_COMPLEMENTS.keys()))
"""Valid IUPAC bases, plus 'X'"""
GC_BASES = frozenset('CGRYSKMBDHVN')
"""IUPAC bases that include C or G."""
MAGNITUDE = dict(
G=1E9,
M=1E6,
K=1E3
)
LOG2 = math.log(2)
class RandomMatchProbability(object):
"""Class for computing random match probability for DNA sequences based on
binomial expectation. Maintains a cache of factorials to speed computation.
Args:
init_size: Initial cache size.
"""
def __init__(self, init_size=150):
self.cache = {}
self.factorials = [1] * init_size
self.max_n = 1
self.cur_array_size = init_size
def __call__(self, matches, size, match_prob=0.25, mismatch_prob=0.75):
"""Computes the random-match probability for a given sequence size and
number of matches.
Args:
match_prob: Probability of two random bases matching.
mismatch_prob: Probability of two random bases not matcing.
Returns:
The probability.
"""
# First see if we have the result in the cache
key = (matches, size, match_prob)
prob = self.cache.get(key, None)
if prob:
return prob
# When there are no mismatches, the probability is
# just that of observing a specific sequence of the
# given length by chance.
if matches == size:
prob = match_prob ** matches
else:
nfac = self.factorial(size)
prob = 0.0
for i in range(matches, size+1):
j = size - i
# use integer division in the case that the numbers are too
# large for floating point division
try:
div = nfac / self.factorial(i) / self.factorial(j)
except OverflowError:
div = nfac // self.factorial(i) // self.factorial(j)
prob += (mismatch_prob ** j) * (match_prob ** i) * div
self.cache[key] = prob
return prob
def factorial(self, num):
"""Returns `num`!.
"""
if num > self.max_n:
self._fill_upto(num)
return self.factorials[num]
def _fill_upto(self, num):
if num >= self.cur_array_size:
extension_size = num - self.cur_array_size + 1
self.factorials += [1] * extension_size
idx = self.max_n
next_i = idx + 1
while idx < num:
self.factorials[next_i] = next_i * self.factorials[idx]
idx = next_i
next_i += 1
self.max_n = idx
class Mergeable(object):
"""Base class for objects that can merge themselves with another.
"""
def merge(self, other):
"""Merges `other` with `self` and returns the merged value.
"""
raise NotImplementedError()
class Summarizable(object):
"""Base class for objects that can summarize themselves.
"""
def summarize(self):
"""Returns a summary dict.
"""
raise NotImplementedError()
class Const(Mergeable):
"""A :class:`Mergeable` that is a constant value. Merging simply checks
that two values are identical.
Args:
value: The value to treat as a constant.
"""
def __init__(self, value):
self.value = value
def merge(self, other):
"""Check that `self==other`.
Raises:
ValueError
"""
if self != other:
raise ValueError("{} != {}".format(self, other))
return self
def __eq__(self, other):
"""Returns True if `self.value==other` (or `other.value` if `other` is
a :class:`Const`).
"""
if isinstance(other, Const):
other = other.value
return self.value == other
def __repr__(self):
return str(self.value)
class Timestamp(object):
"""Records datetime and clock time at object creation.
"""
def __init__(self):
self.dtime = datetime.now()
self.process_time = time.process_time()
def timestamp(self):
"""Returns the unix timestamp.
"""
return self.dtime.timestamp()
def isoformat(self):
"""Returns the datetime in ISO format.
"""
return self.dtime.isoformat()
def __sub__(self, other, minval=0.01):
"""Subtract another timestamp from this one.
Args:
other: The other timestamp.
minval: The minimum difference.
Returns:
A dict of {wallclock=<datetime_diff>, cpu=<clock_diff>}.
"""
return dict(
wallclock=max(minval, self.timestamp() - other.timestamp()),
cpu=max(minval, self.process_time - other.process_time))
class Timing(Summarizable):
"""Context manager that maintains timing information using
:class:`Timestamp`s. Maintains a start time on __enter__, and can be updated
with the current time by the user. Does a final update on __exit__.
"""
def __init__(self):
self.start_time = None
self.cur_time = None
def __enter__(self):
self.start_time = Timestamp()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.update()
def update(self):
"""Set :attr:`self.cur_time` to the current time.
"""
self.cur_time = Timestamp()
def summarize(self):
"""Returns a summary dict
{start=<start_time>, wallclock=<datetime_diff>, cpu=<clock_diff>}.
"""
if not self.cur_time:
self.update()
assert self.start_time is not None
summary = dict(start=self.start_time.isoformat())
summary.update(self.cur_time - self.start_time)
return summary
class CountingDict(dict, Mergeable, Summarizable):
"""A dictionary that always returns 0 on get of a missing key.
Args:
sort_by: Whether summary is sorted by key (0) or value (1).
"""
def __init__(self, keys=None, sort_by=0, summary_type='dict'):
super().__init__()
self.sort_by = sort_by
self.summary_type = summary_type
if keys:
for key in keys:
self.increment(key)
def __getitem__(self, name):
return self.get(name, 0)
def increment(self, key, inc=1):
"""Increment the count of `key` by `inc`.
"""
self[key] += inc
def merge(self, other):
if not isinstance(other, CountingDict):
raise ValueError(
"Cannot merge object of type {}".format(type(other)))
for key, value in other.items():
self[key] += value
return self
def get_sorted_items(self):
"""Returns an iterable of (key, value) sorted according to this
CountingDict's `sort_by` param.
"""
return sorted(self.items(), key=lambda item: item[self.sort_by])
def summarize(self):
"""Returns an OrderedDict of sorted items.
"""
summary_func = ordered_dict if self.summary_type == 'dict' else tuple
return summary_func(self.get_sorted_items())
class Histogram(CountingDict):
"""Counting dict that returns a summary dict that contains summary stats.
"""
def summarize(self):
hist = super().summarize()
return dict(
hist=hist,
summary=self.get_summary_stats())
def get_summary_stats(self):
"""Returns dict with mean, median, and modes of histogram.
"""
values = tuple(self.keys())
counts = tuple(self.values())
mu0 = weighted_mean(values, counts)
return dict(
mean=mu0,
stdev=weighted_stdev(values, counts, mu0),
median=weighted_median(values, counts),
modes=weighted_modes(values, counts))
class NestedDict(dict, Mergeable, Summarizable):
"""A dict that initalizes :class:`CountingDict`s for missing keys.
Args:
shape: The flattened shape: 'long' or 'wide'.
"""
def __init__(self, shape="wide"):
super().__init__()
self.shape = shape
def __getitem__(self, name):
if name not in self:
self[name] = CountingDict()
return self.get(name)
def merge(self, other):
if not isinstance(other, NestedDict):
raise ValueError(
"Cannot merge object of type {}".format(type(other)))
for key, value in other.items():
if key in self:
self[key].merge(value)
else:
self[key] = value
return self
def summarize(self):
"""Returns a flattened version of the nested dict.
Returns:
When `shape=='long'`, a list of (key1, key2, value) tuples.
When `shape=='wide'`, a dict of
{columns:keys2, rows: {key1, values}}, where `keys2` is the set
of keys in the child dicts.
"""
keys1 = sorted(self.keys())
if self.shape == "long":
return tuple(
(key1, key2, value)
for key1 in keys1
for key2, value in self[key1].items())
else:
keys2 = set()
for child in self.values():
keys2.update(child.keys())
keys2 = tuple(sorted(keys2))
return dict(
columns=keys2,
rows=ordered_dict(
(key1, tuple(self[key1].get(key2, 0) for key2 in keys2))
for key1 in keys1))
class MergingDict(OrderedDict, Mergeable):
"""An :class:`collections.OrderedDict` that implements :class:`Mergeable`.
"""
def merge(self, other):
"""Merge `other` with `self` using :method:`merge_dicts`.
"""
merge_dicts(self, other)
return self
def merge_dicts(dest, src):
"""Merge corresponding items in `src` into `dest`. Values in `src` missing
in `dest` are simply added to `dest`. Values that appear in both `src` and
`dest` are merged using `merge_values`.
Args:
dest: The dict to merge into.
src: The dict to merge from.
Raises:
ValueError if a value is not one of the accepted types.
"""
for key, v_src in src.items():
if dest.get(key, None) is None:
dest[key] = v_src
elif v_src is not None:
dest[key] = merge_values(dest[key], v_src)
def merge_values(v_dest, v_src):
"""Merge two values based on their types, as follows:
- Mergeable: merging is done by the dest object's merge function.
- dict: merge_dicts is called recursively.
- Number: values are summed.
- Iterable (non-string): First src and dest values are converted to tuples;
they must be the same length. Then, corresponding values are handled as
above. The value is returned as a list.
- Otherwise: Treated as a Const (i.e. must be identical).
Args:
v_dest: The dest value.
v_src: The src value.
Returns:
The merged value.
"""
if isinstance(v_dest, Mergeable):
v_dest = v_dest.merge(v_src)
elif isinstance(v_dest, dict):
assert isinstance(v_src, dict)
merge_dicts(v_dest, v_src)
elif isinstance(v_dest, str):
assert v_dest == v_src
elif isinstance(v_dest, Number):
v_dest += v_src
elif isinstance(v_dest, Iterable):
i_dest = tuple(v_dest)
i_src = tuple(v_src)
if len(i_dest) == 0:
v_dest = i_src
elif len(i_src) > 0:
v_dest = [merge_values(d, s) for d, s in zip(i_dest, i_src)]
else:
assert v_dest == v_src
return v_dest
def ordered_dict(iterable):
"""Create an OrderedDict from an iterable of (key, value) tuples.
"""
ordict = OrderedDict()
for key, value in iterable:
ordict[key] = value
return ordict
def complement(seq):
"""Returns the complement of nucleotide sequence `seq`.
"""
return "".join(BASE_COMPLEMENTS[base] for base in seq)
def reverse_complement(seq):
"""Returns the reverse complement of nucleotide sequence `seq`.
"""
return "".join(BASE_COMPLEMENTS[base] for base in reversed(seq))
def sequence_complexity(seq):
"""Computes a simple measure of sequence complexity.
Args:
seq: The sequence to measure.
Returns:
Complexity, as a value [0,2], where 0 = a homopolymer and
2 = completely random.
"""
seq = seq.upper()
seqlen = float(len(seq))
term = 0
for base in ('A','C','G','T'):
count = seq.count(base)
if count > 0:
frac = count / seqlen
term += frac * math.log(frac) / LOG2
return -term
def qual2int(qual, base=33):
"""Convert a quality charater to a phred-scale int.
Args:
q: The quality value.
base: The offset of the first quality value (Old Illumina = 64,
new Illumina = 33).
Returns:
The integer quality.
"""
return ord(qual) - base
def quals2ints(quals, base=33):
"""Convert an iterable of quality characters to phred-scale ints.
Args:
quals: The qualities.
base: The offset of the first quality value (Old Illumina = 64,
new Illumina = 33).
Returns:
A tuple of integer qualities.
"""
return (ord(q) - base for q in quals)
def qual2prob(qchar):
"""Converts a quality char to a probability.
"""
return 10 ** (-qual2int(qchar) / 10)
def enumerate_range(collection, start, end):
"""Generates an indexed series: (0,coll[0]), (1,coll[1]) ...
Only up to (start-end+1) items will be yielded.
Args:
collection: The collection to enumerate.
start: The starting index.
end: The ending index.
Yields:
(index, item) tuples.
"""
idx = start
itr = iter(collection)
while idx < end:
yield (idx, next(itr))
idx += 1
def mean(values):
"""Computes the mean of a sequence of numeric values.
Args:
values: Sequence of numeric values.
Returns:
The mean (floating point).
"""
if len(values) == 0:
raise ValueError("Cannot determine the mode of an empty sequence")
return sum(values) / len(values)
def weighted_mean(values, counts):
"""Computes the mean of a sequence of numeric values weighted by counts.
Args:
values: Sequence of numeric values.
Returns:
The weighted mean (floating point).
"""
datalen = len(values)
if datalen == 0:
raise ValueError("Cannot determine the mena of an empty sequence")
if datalen != len(counts):
raise ValueError("'values' and 'counts' must be the same length")
return sum(v * c for v, c in zip(values, counts)) / sum(counts)
def stdev(values, mu0=None):
"""Returns standard deviation of values having the specified mean.
"""
datalen = len(values)
if datalen == 0:
raise ValueError("Cannot determine the stdev of an empty sequence")
if datalen == 1:
return 0
if mu0 is None:
mu0 = mean(values)
return math.sqrt(sum((val - mu0) ** 2 for val in values) / len(values))
def weighted_stdev(values, counts, mu0=None):
"""Returns standard deviation of values having the specified mean weighted
by counts.
"""
datalen = len(values)
if datalen == 0:
raise ValueError("Cannot determine the stdev of an empty sequence")
if datalen != len(counts):
raise ValueError("'values' and 'counts' must be the same length")
if datalen == 1:
return 0
if mu0 is None:
mu0 = weighted_mean(values, counts)
return math.sqrt(
sum(
((val - mu0) ** 2) * count
for val, count in zip(values, counts)) /
sum(counts))
def median(values):
"""Median function borrowed from python statistics module, and sped up by
in-place sorting of the array.
Args:
data: Sequence of numeric values.
Returns:
The median (floating point).
"""
datalen = len(values)
if datalen == 0:
raise ValueError("Cannot determine the median of an empty sequence")
values.sort()
idx = datalen // 2
if datalen % 2 == 1:
return values[idx]
else:
return (values[idx - 1] + values[idx]) / 2
def weighted_median(values, counts):
"""Compute the median of `values` weighted by `counts`.
Args:
values: Sequence of unique values.
counts: Sequence of counts, where each count is the number of times the
value at the corresponding position appears in the sample.
Returns:
The weighted median.
"""
datalen = len(values)
if datalen == 0:
raise ValueError("Cannot determine the median of an empty sequence")
if datalen != len(counts):
raise ValueError("'values' and 'counts' must be the same length")
counts_cumsum = functools.reduce(
lambda c, x: c + [c[-1] + x], counts, [0])[1:]
total = counts_cumsum[-1]
if total == 0:
return None
mid1 = mid2 = (total // 2) + 1
if total % 2 == 0:
mid1 -= 1
val1 = val2 = None
for i, val in enumerate(counts_cumsum):
if val1 is None and mid1 <= val:
val1 = values[i]
if mid2 <= val:
val2 = values[i]
break
return float(val1 + val2) / 2
def modes(values):
"""Returns a sorted sequence of the modal (i.e. most frequent) values.
"""
datalen = len(values)
if datalen == 0:
raise ValueError("Cannot determine the mode of an empty sequence")
elif datalen == 1:
return values
return _find_modes(CountingDict(values).items())
def weighted_modes(values, counts):
"""Returns a sorted sequence of the modal (i.e. most frequent) values
weighted by counts.
"""
datalen = len(values)
if datalen == 0:
raise ValueError("Cannot determine the mode of an empty sequence")
if datalen != len(counts):
raise ValueError("'values' and 'counts' must be the same length")
if datalen == 1:
return values
return _find_modes(zip(values, counts))
def _find_modes(value_count_iter):
sorted_counts = sorted(value_count_iter, key=lambda x: x[1], reverse=True)
modal_values = [sorted_counts[0][0]]
mode_count = sorted_counts[0][1]
for value, count in sorted_counts[1:]:
if count == mode_count:
modal_values.append(value)
else:
break
modal_values.sort()
return modal_values
def truncate_string(string, max_len=100):
"""Shorten string s to at most n characters, appending "..." if necessary.
"""
if string is None:
return None
if len(string) > max_len:
string = string[:max_len-3] + '...'
return string
def run_interruptible(func, *args, **kwargs):
"""Run a function, gracefully handling keyboard interrupts.
Args:
func: The function to execute.
args, kwargs: Positional and keyword arguments to pass to `func`.
Returns:
A (unix-style) return code (0=normal, anything else is an error).
Raises:
"""
retcode = 0
try:
func(*args, **kwargs)
except KeyboardInterrupt:
logging.getLogger().error("Interrupted")
retcode = 130
except IOError as err:
if err.errno == errno.EPIPE:
retcode = 1
else:
raise
except (AtroposError, EOFError):
logging.getLogger().error("Atropos error", exc_info=True)
retcode = 1
except Exception: # pylint: disable=broad-except
logging.getLogger().error("Unknown error", exc_info=True)
retcode = 1
return retcode
| true |
e5b5417d6cbf579f2ab6bcc9ae9fbb3c217112dc | Python | gugugu625/fslnavdatatokml | /main.py | UTF-8 | 5,170 | 2.640625 | 3 | [] | no_license | import sqlite3
import math
import re
conn = sqlite3.connect('rom')
c = conn.cursor()
def getDegree(latA, lonA, latB, lonB):
radLatA = math.radians(latA)
radLonA = math.radians(lonA)
radLatB = math.radians(latB)
radLonB = math.radians(lonB)
dLon = radLonB - radLonA
y = math.sin(dLon) * math.cos(radLatB)
x = math.cos(radLatA) * math.sin(radLatB) - math.sin(radLatA) * math.cos(radLatB) * math.cos(dLon)
brng = math.degrees(math.atan2(y, x))
brng = (brng + 360) % 360
return brng
def getlatlonbyraddis(lat,lon,deg,dis):
r = 6371393
radlat = math.radians(lat)
radlon = math.radians(lon)
deg = math.radians(deg)
lat2 = math.asin(math.sin(radlat)*math.cos(dis/r)+math.cos(radlat)*math.sin(dis/r)*math.cos(deg))
lon2 = radlon+math.atan2(math.sin(deg)*math.sin(dis/r)*math.cos(radlat),math.cos(dis/r)-math.sin(radlat)*math.sin(lat2))
#lon2 = radlon + (dis * math.sin(deg* Math.PI / 180)) / (111 * Math.cos($lat * Math.PI / 180));
resl = []
resl.append(math.degrees(lat2))
resl.append(math.degrees(lon2))
return resl
def searchwaypoint(name,icao_code,sect_code,airport):
conn = sqlite3.connect('rom')
c = conn.cursor()
resw = c.execute("SELECT * FROM WAYPOINT where WAYPOINT_IDENT='"+name+"' AND SECT_CODE='"+sect_code+"' AND WAYPOINT_ICAO_CODE='"+icao_code+"' AND REGION_CODE='"+airport+"'")
for roww in resw:
return roww
resw=c.execute("SELECT * FROM VHF_NAVAID where VOR_IDENT='"+name+"' AND SECT_CODE='"+sect_code+"' AND VHF_ICAO_CODE='"+icao_code+"'")
for roww in resw:
roww = list(roww)
roww[13] = roww[12]
roww[12] = roww[11]
return roww
resw = c.execute("SELECT * FROM WAYPOINT where WAYPOINT_IDENT='"+name+"' AND SECT_CODE='"+sect_code+"' AND WAYPOINT_ICAO_CODE='"+icao_code+"'")
for roww in resw:
return roww
conn.close()
def printpath(res):
prewaypoint = ""
h = 0
for row in res:
#print(row)
if row[18]=="IF" or row[18]=="TF":
ifp = searchwaypoint(row[10],row[11],row[12],airport)
try:
print(str(ifp[13])+","+str(ifp[12])+","+str(h)+" ", end='')
except:
print(row)
elif row[18]=="RF":
prejw = searchwaypoint(prewaypoint[10],prewaypoint[11],prewaypoint[12],airport)
cjw = searchwaypoint(row[36],row[38],row[39],airport)
jw = searchwaypoint(row[10],row[11],row[12],airport)
deg2 = getDegree(cjw[12],cjw[13],jw[12],jw[13])
deg1 = getDegree(cjw[12],cjw[13],prejw[12],prejw[13])
'''if(row[10]=="RK684"):
print(prejw)
print(cjw)
print(jw)
print(deg1)
print(deg2)'''
if row[16]=="L":
angle = math.floor(deg1)
cnt = 0
while cnt<=360:
if angle==math.ceil(deg2):
break
elif (math.ceil(deg2)==360 or math.ceil(deg2)==0) and angle==0:
break
rflatlon=getlatlonbyraddis(cjw[12],cjw[13],angle,(row[22]/1000)*1852)
print(str(rflatlon[1])+","+str(rflatlon[0])+","+str(h)+" ", end='')
angle = angle-1
if(angle<=0):
angle = 360+angle
cnt=cnt+1
else:
angle = math.ceil(deg1)
cnt = 0
while cnt<=360:
if angle==math.floor(deg2):
break
elif (math.floor(deg2)==360 or math.floor(deg2)==0) and angle==0:
break
rflatlon=getlatlonbyraddis(cjw[12],cjw[13],angle,(row[22]/1000)*1852)
print(str(rflatlon[1])+","+str(rflatlon[0])+","+str(h)+" ", end='')
angle = angle+1
if(angle==360):
angle = 0
cnt = cnt+1
ifp = searchwaypoint(row[10],row[11],row[12],airport)
print(str(ifp[13])+","+str(ifp[12])+","+str(h)+" ", end='')
prewaypoint = row
if re.match("[\s\S]Y[\s\S]M",row[15]):
print("")
print("")
print("")
h=0
ifp = searchwaypoint(prewaypoint[10],prewaypoint[11],prewaypoint[12],airport)
print(str(ifp[13])+","+str(ifp[12])+","+str(h)+" ", end='')
airport="ZLYS"
#res = c.execute("SELECT * FROM AIRPORT_PROCEDURE WHERE ARPT_IDENT='"+airport+"' AND ROUTE_TYPE=5 AND PROC_IDENT='R15' ORDER BY SEQ_NR")# AND TRANSITION_IDENT='RW09'
#res = c.execute("SELECT * FROM AIRPORT_PROCEDURE WHERE ARPT_IDENT='"+airport+"' AND ROUTE_TYPE=5 AND PROC_IDENT='DOB6JA' ORDER BY SEQ_NR")
#res = c.execute("SELECT * FROM AIRPORT_PROCEDURE WHERE ARPT_IDENT='"+airport+"' AND ROUTE_TYPE='A' AND PROC_IDENT='R33' AND TRANSITION_IDENT='LXA' ORDER BY SEQ_NR")
res = c.execute("SELECT * FROM AIRPORT_PROCEDURE WHERE ARPT_IDENT='"+airport+"' AND ROUTE_TYPE='R' AND PROC_IDENT='R10';")
printpath(res)
conn.commit()
conn.close() | true |
11bc86085f90e5af5b3038f990389ee3b5ca5f25 | Python | PyCQA/redbaron | /tests/test_redbaron.py | UTF-8 | 996 | 2.984375 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding:Utf-8 -*-
""" Main redbaron test module """
from redbaron import RedBaron, truncate
def test_other_name_assignment():
red = RedBaron("a = b")
assert red.assign is red[0]
def test_index():
red = RedBaron("a = [1, 2, 3]")
assert red[0].value.value[2].index_on_parent == 2
assert red[0].index_on_parent == 0
assert red[0].value.index_on_parent is None
def test_index_raw():
red = RedBaron("a = [1, 2, 3]")
assert red[0].value.value.node_list[2].index_on_parent_raw == 2
assert red[0].index_on_parent == 0
assert red[0].value.index_on_parent_raw is None
def test_regression_find_all_recursive():
red = RedBaron("a.b()")
assert red[0].value("name", recursive=False) == [red.name, red("name")[1]]
def test_truncate():
assert "1234" == truncate("1234", 2)
assert "12345" == truncate("12345", 4)
assert "1...6" == truncate("123456", 5)
assert "123456...0" == truncate("12345678901234567890", 10)
| true |
63cab8c6c0b8db5c884fe7c15f5c2a7b40797791 | Python | EricaHD/SemiSupervisedLearning | /archived/resnet.py | UTF-8 | 2,502 | 2.609375 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import models
from load import get_train_loader, get_test_loader
torch.manual_seed(1)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_loader = get_train_loader('/scratch/ehd255/ssl_data_96/supervised/train/', batch_size=32)
test_loader = get_test_loader('/scratch/ehd255/ssl_data_96/supervised/val/', batch_size=32)
class ResNet18(nn.Module):
def __init__(self):
super(ResNet18, self).__init__()
self.model = models.resnet18(pretrained=False)
num_ftrs = self.model.fc.in_features
self.model.fc = nn.Linear(num_ftrs, 1000)
def forward(self, x):
return self.model(x)
model = ResNet18()
model.to(device)
def train(num_epochs=10):
model.train()
optimizer = optim.Adam(model.parameters(), lr=0.01, weight_decay=0.00001)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
for epoch in range(num_epochs):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model.forward(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item())
)
torch.save(model, '/scratch/jtb470/semires/res-{}.pth'.format(epoch))
train()
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model.forward(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss,
correct,
len(test_loader.dataset),
accuracy)
)
test()
torch.save(model, '/scratch/jtb470/semires/res-final.pth')
| true |
3a434e79a0390362e7f78015fcc723aa77d29dfa | Python | shivanichauhan18/files_questions | /p.py | UTF-8 | 182 | 3.125 | 3 | [] | no_license | number_list=[[1,2.3],
[2,1,3],
[3,2,1]]
i=0
j=0
new_list=[]
sum=0
while i<len(number_list):
new_list.append(number_list[i][j])
sum=sum+new_list[i]
j=j+1
i=i+1
print sum | true |
96cdbd1ea9b66cb896e1c00bf8bed983c9c30bb4 | Python | standthis/ml | /practical/demo/10_Keras_CNN_MNIST.py | UTF-8 | 4,554 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python
# ---------------------------------------------------------------------------------------------------------------
# Training a shallow vs deep (Convolutional Layer) Neural Net to Classify Handwritten Digits Using Keras
import numpy
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')
import matplotlib.pyplot as plt
# load (downloaded if needed) the MNIST dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Display 4 images as greyscale
plt.subplot(221)
plt.imshow(X_train[0], cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.imshow(X_train[1], cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.imshow(X_train[2], cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.imshow(X_train[3], cmap=plt.get_cmap('gray'))
# show the plot
# plt.show()
# fix random seed for reproducibility
seed = 42
numpy.random.seed(seed)
# Keras version of train_test_split on the mnist 28 x 28 = 784 dimensions
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# The neural nets in Keras act on the feature matrix slightly differently than the standard
# OpenCV and scikit-learn estimators.
# Reshape the feature matrix into a 4D matrix with dimensions n_features x 28 x 28 x 1:
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# One-hot encode the training labels.
# Transforms the vector of class integers into a binary matrix:
num_classes = 10
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
# The input layer expects images with the structure outline above [pixels][width][height].
# The first hidden layer is a convolutional layer called a Convolution2D with 32 5×5 feature maps and relu activation.
# Pooling layer that takes the max called MaxPooling2D. It is configured with a pool size of 2×2.
# The next layer is a regularization layer using Dropout.
# It is configured to randomly exclude 20% of neurons in the layer in order to reduce overfitting.
# Next convert the 2D matrix data to a Flattened vector for the output to be process fully connected layers.
# Another fully connected layer with 128 neurons and relu
# Finally, Softmax activation on output layer: probability values converted to 1 of 10 as the output prediction.
# define baseline model as above:
def baseline_model():
# create model
model = Sequential()
model.add(Conv2D(32, (5, 5), input_shape=(1, 28, 28), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
# Again using Logarithmic loss function and ADAM gradient descent algorithm as it is quick to learn the weights.
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# define the larger model, but not for potato pc. Beware of overfitting and bad parameters.
def larger_model():
# create model
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(15, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# Fit and evaluate the model over 10 epochs with updates every 200 images (batches).
# The test data is used as the validation dataset, to see the improvement as the model as it trains.
# A verbose value of 2 is used to reduce the output to one line for each training epoch.
# build the model
model = baseline_model()
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("CNN Error: %.2f%%" % (100-scores[1]*100)) | true |
70cb25496ad6f4df9bcfee788eb59be62c405100 | Python | sarah/sorts | /quicksort/quicksort.py | UTF-8 | 1,406 | 3.796875 | 4 | [] | no_license | import unittest
def quicksort(A):
"""
API function that calls internal function _quicksort with initial args
"""
_quicksort(A,0,len(A)-1)
def _quicksort(A, start, last):
"""
:A array
:start Int
:last Int
"""
if (last - start) > 0:
pIndex = partition(A,start,last)
_quicksort(A,start, pIndex-1) # segment to the left of the pivot
_quicksort(A,pIndex+1, last) # segment to the right of the pivot
def partition(A, start, last):
"""
:A array
:start Int
:last Int
"""
pivot = last
divider = start
for i in range(start, last):
if(A[i] <= A[pivot]):
# swap, move the smaller value to the left of the 'divider' and move the divider
A[i], A[divider] = A[divider], A[i]
divider += 1
# put the value of the pivot in the divider position, completing the partition.
A[divider], A[pivot] = A[pivot], A[divider]
return divider
class QuickSortTest(unittest.TestCase):
def testSorts(self):
A = [5,4,10,9,8,7,15,1,2]
quicksort(A)
expected = [1,2,4,5,7,8,9,10,15]
self.assertEqual(A, expected)
def testSortsAlreadySortedList(self):
A = [1,2,4,5,7,8,9,10,15]
quicksort(A)
expected = [1,2,4,5,7,8,9,10,15]
self.assertEqual(A, expected)
if __name__== "__main__":
unittest.main()
| true |
40c23f62486fcb181f48e1452829453236da222a | Python | jedrekw-git/aftermarket-python | /pages/appraisal_list.py | UTF-8 | 1,801 | 2.546875 | 3 | [] | no_license | # coding=utf-8
from selenium.webdriver.common.by import By
from pages.base import BasePage
from utils.utils import *
from random import randint
from time import sleep
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from random import randint
class AppraisalListPage(BasePage):
_title = "Appraisal List"
_fourth_appraisal_domain_field = (By.XPATH, "//tr[12]/td[3]/div/span/label/span")
_fourth_appraisal_time_field = (By.XPATH, "//tr[12]/td[4]/div/span/span")
_fourth_appraisal_type_field = (By.XPATH, "//tr[12]/td[5]/div/span")
_fourth_appraisal_status_field = (By.XPATH, "//tr[12]/td[6]/div/span")
_first_appraisal_domain_field = (By.XPATH, "//td[3]/div/span/label/span")
_first_appraisal_time_field = (By.XPATH, "//td[4]/div/span/span")
_first_appraisal_type_field = (By.XPATH, "//td[5]/div/span")
_first_appraisal_status_field = (By.XPATH, "//td[6]/div/span")
_search_field = (By.NAME, "domain")
_search_button = (By.XPATH, "//div[3]/button")
def __init__(self, driver):
super(AppraisalListPage, self).__init__(driver, self._title)
def search_for_appraisal(self, domain_name):
self.clear_field_and_send_keys(domain_name, self._search_field)
self.click(self._search_button)
def get_fourth_appraisal_domain_time_type_and_status(self):
self.fourth_appraisal_domain = self.get_text(self._fourth_appraisal_domain_field)
self.fourth_appraisal_time = self.get_text(self._fourth_appraisal_time_field)
self.fourth_appraisal_type = self.get_text(self._fourth_appraisal_type_field)
self.fourth_appraisal_status = self.get_text(self._fourth_appraisal_status_field) | true |
e6a5ed2ffc5d2e6ab51b90806d35ff42c0567fb3 | Python | mj596/FermiTools | /modules/exceptions/exceptions.py | UTF-8 | 340 | 2.515625 | 3 | [] | no_license | class SourceNotFound( Exception ):
def __init__( self, _source_name ):
self.source_name = _source_name
class ModuleNotFound( Exception ):
def __init__( self, _module_name ):
self.module_name = _module_name
class RangeError( Exception ):
def __init__( self, _error_name ):
self.error_name = _error_name
| true |
1e568caaa04b4fb692e415b894c23d10ce358a21 | Python | SajjadDaneshmand/BahmanRbt | /src/main.py | UTF-8 | 4,734 | 2.546875 | 3 | [] | no_license | # internal
import settings
from data_catcher import Cars
# standard
import time
# selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from bs4 import BeautifulSoup
import pandas as pd
# catch all of table
def table_catcher(src, path):
code = []
number = []
name = []
price = []
soup = BeautifulSoup(src, 'html.parser')
table = soup.find('table')
header = table.thead.tr.text.strip().split('\n')
body = table.tbody
for row in body.find_all('tr'):
row_list = row.text.strip().split('\n')
code.append(row_list[0])
number.append(row_list[1])
name.append(row_list[2])
price.append(row_list[3])
frame = {
header[0]: code,
header[1]: number,
header[2]: name,
header[3]: price
}
dataframe = pd.DataFrame(frame)
dataframe.to_excel(path)
# get all number of pages
def number_of_page():
try:
number = driver.find_element_by_id('DataPager1_ctl00_TotalPagesLabel')
return int(number.text)
except:
return False
def wait_for_page_loading(src):
pars = BeautifulSoup(src, 'html.parser')
display = pars.find('div', attrs={'id': 'LoadingTem'})
style = display.get('style')
return style
driver = webdriver.Firefox()
driver.set_page_load_timeout(40)
driver.get(settings.URL)
car_class = Cars(settings.DATA_FILE)
data = car_class.reader()
for car in data:
"""Selecting all car type"""
for model in list(data.values()):
if len(model) != 0:
for cary in model:
"""Selecting all car model"""
for char in settings.persian_ascii_letters:
"""Inserting all character to input tag"""
select = WebDriverWait(driver, settings.delay).until(ec.presence_of_element_located((By.ID, 'drpCarType')))
selecting = Select(select)
selecting.select_by_visible_text(car)
time.sleep(1)
model_selector = WebDriverWait(driver, settings.delay).until(ec.presence_of_element_located((By.ID, 'drpCarModel')))
model_selecting = Select(model_selector)
model_selecting.select_by_visible_text(cary)
counter = 1
char_input = driver.find_element_by_id('txtPartName')
char_input.send_keys(char)
WebDriverWait(driver, settings.delay).until(ec.presence_of_element_located((By.ID, 'btnSearch'))).click()
while wait_for_page_loading(driver.page_source) != 'display: none':
time.sleep(0.5)
num_page = number_of_page()
counter += 1
if num_page > 1:
for page in range(num_page):
"""Scraping all pages"""
print(f'I\'m in page: {page + 1}')
WebDriverWait(driver, settings.delay).until(ec.presence_of_element_located((By.NAME, 'dtPager$ctl02$ctl00'))).click()
page_src = driver.page_source
soup = BeautifulSoup(page_src, 'html.parser')
btn_disable = int(soup.find(class_='btn disabled').text)
form = f'{settings.FILES_FOLDER}{car}-{cary}-({page + 1})-{char}.xlsx'
table_catcher(driver.page_source, form)
while True:
page_src = driver.page_source
soup = BeautifulSoup(page_src, 'html.parser')
btn_disable_ = int(soup.find(class_='btn disabled').text)
time.sleep(0.5)
if btn_disable != btn_disable_:
break
elif page == (num_page - 1):
driver.get(settings.URL)
break
else:
form = f'{settings.FILES_FOLDER}{car}-{cary}-(1)-{char}.xlsx'
table_catcher(driver.page_source, form)
# clearing the input tag
char_input = driver.find_element_by_id('txtPartName')
char_input.clear()
| true |
874c5c8bed5c0bd41df7d5e2e0e24c72682d4f69 | Python | paulineml/sdmxthon | /sdmxthon/parsers/status_message.py | UTF-8 | 3,650 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | """Status messages file withholds some extra MessageTypes for specific
purposes """
from sdmxthon.model.base import LocalisedString, InternationalString
from sdmxthon.parsers.data_parser import DataParser
from sdmxthon.utils.xml_base import find_attr_value_
class StatusMessageType(DataParser):
"""StatusMessageType describes the structure of an error or warning
message. A message contains the text of the message, as well as an
optional language indicator and an optional _code.The _code attribute
holds an optional _code identifying the underlying error that generated
the message. This should be used if parallel language descriptions of
the error are supplied, to distinguish which of the multiple error
messages are for the same underlying error."""
__hash__ = DataParser.__hash__
subclass = None
superclass = None
def __init__(self, code=None, Text: InternationalString = None,
gds_collector_=None, **kwargs_):
super(StatusMessageType, self).__init__(None)
self.gds_collector_ = gds_collector_
self._code = code
self._text = Text
@staticmethod
def _factory(*args_, **kwargs_):
"""Factory Method of StatusMessageType"""
return StatusMessageType(*args_, **kwargs_)
@property
def text(self):
"""Text of the Message"""
if self._text is None:
return None
elif isinstance(self._text, InternationalString):
if len(self._text.items) == 0:
return None
elif len(self._text.items) == 1:
values_view = self._text.items.values()
value_iterator = iter(values_view)
first_value = next(value_iterator)
return first_value['content']
else:
return self._text.items
return self._text
@text.setter
def text(self, value):
self._text = value
@property
def code(self):
"""Status code of the Message"""
return self._code
@code.setter
def code(self, value):
self._code = value
def _build_attributes(self, node, attrs, already_processed):
"""Builds the attributes present in the XML element"""
value = find_attr_value_('Code', node)
if value is not None and 'Code' not in already_processed:
already_processed.add('Code')
self._code = value
def _build_children(self, child_, node, nodeName_, fromsubclass_=False,
gds_collector_=None):
"""Builds the childs of the XML element"""
if nodeName_ == 'Text':
obj_ = LocalisedString._factory()
obj_._build(child_, gds_collector_=gds_collector_)
if self._text is None:
self._text = InternationalString()
self._text.addLocalisedString(obj_)
# end class StatusMessageType
class CodedStatusMessageType(StatusMessageType):
"""CodedStatusMessageType describes the structure of an error or warning
message which required a _code."""
__hash__ = DataParser.__hash__
subclass = None
superclass = StatusMessageType
def __init__(self, code=None, Text=None, gds_collector_=None, **kwargs_):
super(CodedStatusMessageType, self).__init__(code, Text,
gds_collector_, **kwargs_)
self._name = 'CodedStatusMessageType'
@staticmethod
def _factory(*args_, **kwargs_):
"""Factory Method of CodedStatusMessageType"""
return CodedStatusMessageType(*args_, **kwargs_)
# end class CodedStatusMessageType
| true |
9519faacb3ecb70bd836a80d62f9e049282d07cc | Python | luishpmendes/zdt | /plotter_pareto.py | UTF-8 | 3,476 | 2.71875 | 3 | [] | no_license | import csv
import matplotlib.pyplot as plt
import os
import seaborn as sns
from plotter_definitions import *
dirname = os.path.dirname(__file__)
for zdt in zdts:
for version in versions:
min_ys = []
max_ys = []
for i in range(2):
min_ys.append(-1)
max_ys.append(-1)
for solver in solvers:
filename = os.path.join(dirname, "pareto/zdt" + str(zdt) + "_" + solver + "_" + version + ".txt")
if os.path.exists(filename):
with open(filename) as csv_file:
data = csv.reader(csv_file, delimiter=" ")
for row in data:
for i in range(2):
if min_ys[i] == -1 or min_ys[i] > float(row[i]):
min_ys[i] = float(row[i])
if max_ys[i] == -1 or max_ys[i] < float(row[i]):
max_ys[i] = float(row[i])
csv_file.close()
for i in range(2):
delta_y = max_ys[i] - min_ys[i]
min_ys[i] = min_ys[i] - round(0.025 * delta_y)
max_ys[i] = max_ys[i] + round(0.025 * delta_y)
fig, axs = plt.subplots(nrows = 2, ncols = 2, figsize = (5.0 * 2, 5.0 * 2), squeeze = False, num = 1, clear = True)
fig.set_size_inches(5.0 * 2, 5.0 * 2)
fig.suptitle("zdt" + str(zdt), fontsize = "xx-large")
for i in range(len(solvers)):
filename = os.path.join(dirname, "pareto/zdt" + str(zdt) + "_" + solvers[i] + "_" + version + ".txt")
if os.path.exists(filename):
ys = []
for j in range(2):
ys.append([])
with open(filename) as csv_file:
data = csv.reader(csv_file, delimiter = " ")
for row in data:
for j in range(2):
ys[j].append(float(row[j]))
csv_file.close()
for j in range(2):
axs[j][j].set_xlim(left = min_ys[j], right = max_ys[j])
axs[j][j].set_xlabel(xlabel = "$f_{" + str(j + 1) + "}$", fontsize = "x-large")
axs[j][j].set_yticks([])
axs[j][j].set_ylabel(ylabel = "Density", fontsize = "x-large")
sns.kdeplot(data = ys[j], ax = axs[j][j], color = colors[i], label = solver_labels[solvers[i]], marker = (i + 3, 2, 0), alpha = 0.80)
axs[j][j].legend(loc = "best")
for l in range(2):
if j != l:
axs[j][l].set_xlim(left = min_ys[l], right = max_ys[l])
axs[j][l].set_ylim(bottom = min_ys[j], top = max_ys[j])
axs[j][l].set_xlabel(xlabel = "$f_{" + str(l + 1) + "}$", fontsize = "x-large")
axs[j][l].set_ylabel(ylabel = "$f_{" + str(j + 1) + "}$", fontsize = "x-large")
axs[j][l].scatter(x = ys[l], y = ys[j], color = colors[i], label = solver_labels[solvers[i]], marker = (i + 3, 2, 0), alpha = 0.80)
axs[j][l].legend(loc = "best", fontsize = "large")
del ys
plt.subplots_adjust(wspace = 0.16 + 0.07 * 2, hspace = 0.16 + 0.07 * 2)
filename = os.path.join(dirname, "pareto/zdt" + str(zdt) + "_" + version + ".png")
plt.savefig(filename, format = "png")
| true |
dc2d2b090bef9e1567c07f8c08c6d3e10f8935ef | Python | deboramelinda94/KGConstructionFromTextbook | /constructKG_FromTOC/TableOfContent_EntityExtraction.py | UTF-8 | 2,719 | 2.90625 | 3 | [] | no_license | from nltk import pos_tag
from nltk.tokenize import word_tokenize
def createTermGlossary(fileName): #related to selected dataset (e.g. python syntax that may is listed in the TOC)
TermGlossary = []
f = open(fileName, "r")
content = f.readlines()
for item in content:
item = item.rstrip("\n")
TermGlossary.append(item)
f.close()
return TermGlossary
def checkPureTerm(posTag):
termType = {"NN", "NNP", "NNS", "JJ", "VBG", "VBN"}
canConcept = []
for i in range(len(posTag)):
#linguistic filtering
if (posTag[i][1] in termType or posTag[i][0] in pythonGlossary\
) and re.search('[a-zA-Z]', posTag[i][0]) and len(posTag[i][0]) > 1:
canConcept.append(posTag[i][0])
canConcept.append("")
finConcept = []
temp = ""
for item in canConcept:
if item != "":
temp = temp + item + " "
else:
finConcept.append(temp.rstrip())
temp = ""
return finConcept
def getNounAdj(token): #token --> array of tokenize
listConcept = []
determiner = ['the','a', 'an']
subConcept = []
determinerRemove = []
for word in token:
if word not in determiner:
determinerRemove.append(word)
token = determinerRemove
posTag = pos_tag(token)
if "and" in token:
indexConj = token.index("and")
if "," in token:
subTopic = checkPureTerm(posTag[indexConj+1:])
subConcept.extend(subTopic)
for i in range(indexConj):
if token[i] is not ",":
subConcept.append(token[i])
else:
subTopic = checkPureTerm(posTag[indexConj+1:])
subConcept.extend(subTopic)
#If the word at the left is Noun, directly become new topic
if "NN" in posTag[indexConj-1][1]:
#subTopic = ' '.join(token[:indexConj])
subTopic = checkPureTerm(posTag[:indexConj])
subConcept.extend(subTopic)
#If the word at the left is Verb, merge it with the Noun that located at the right
else:
for i in range(len(token)-1, indexConj+1,-1):
if "NN" in posTag[i][1]:
otherTopic = token[indexConj-1] + " " + posTag[i][0]
subConcept.append(otherTopic)
break
else:
subTopic = checkPureTerm(posTag)
subConcept.extend(subTopic)
while "" in subConcept:
subConcept.remove("")
return subConcept
| true |
e09b84fd7cce01ebed319ae4342540f919df1c0e | Python | sgammon/yapa-moments | /moments/driver.py | UTF-8 | 7,785 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
yapa moments demo: ffmpeg driver
'''
# stdlib
import os
import sys
import shutil
import tempfile
import traceback
import subprocess
# local
from . import base
class FFmpeg(base.MomentBase):
''' Class that wraps and properly handles calls to FFmpeg, related
to generating :py:class:`moments.api.Moment` videos. Things are
meant to pass through this to :py:mod:`subprocess`. '''
__args__ = None # positional arguments for this ``FFmpeg`` run
__input__ = None # stdin input for target ``FFmpeg`` run
__kwargs__ = None # keyword arguments for this ``FFmpeg`` run
__output__ = None # stdout output for target ``FFmpeg`` run
__target__ = None # target subprocess containing ``FFmpeg``
__moment__ = None # moment job that we'll be working on this run
__scratch__ = None # scratch directory where temp files can be written
__pending__ = False # flag that indicates we are actively working
def __init__(self, moment):
''' Initialize an FFmpeg instance, with arguments/config/options.
Keyword arguments passed here override values from ``self.config``.
:param moment: :py:class:`Moment` object to compile into a video.
:returns: Nothing, as this is a constructor. '''
self.__moment__, self.__args__, self.__kwargs__ = (
moment, # target moment
[], {} # args and kwargs
)
## == Internals == ##
def _spawn(self):
''' Spawn ``FFmpeg`` subprocess, reducing the current argument set
into a valid string to be executed by :py:mod:`subprocess`.
:returns: Target :py:mod:`subprocess.Popen` object. '''
# generate string command
command = self._make_command()
if not self.__target__:
self.logging.debug('Spawning FFmpeg with command: "%s".' % ' '.join(command))
self.__target__ = subprocess.Popen(
command,
shell=False,
bufsize=0, # don't buffer from ffmpeg
executable=self._ffmpeg_path
)
self.logging.debug('FFmpeg running under native driver at PID %s.' % self.__target__.pid)
return self.__target__
def _provision_scratchspace(self):
''' Provision a temporary directory to write midway input image
files, after resizing/reformatting. This is called during
context entry on the :py:class:`FFmpeg` driver.
:returns: The string location of the new scratch directory,
which is also stored at ``self.__scratch__``. '''
# allocate a temp directory and return
space = tempfile.mkdtemp()
if self.moment.options.verbose:
self.logging.debug('Provisioned scratchspace at location "%s".' % space)
return setattr(self, '__scratch__', space) or self.__scratch__
def _destroy_scratchspace(self):
''' Destroy temporary scratch space originally allocated at the
beginning of the run to save midway image source files.
:raises OSError: If an ``OSError`` is encountered with an
error code other than ``2``.
:returns: Nothing. '''
space = self.__scratch__
if self.moment.options.verbose:
self.logging.debug('Destroyed scratchspace at location "%s".' % space)
try:
#shutil.rmtree(space)
pass
except OSError as e:
if e.errno != 2: # code 2 == no such file or directory
raise
@property
def _ffmpeg_path(self):
''' Calculates the default path to ``FFmpeg``, which is distributed along
with this package by default.
:returns: String path to ``FFmpeg`` binary. '''
_default_path = os.path.abspath(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'resources',
'ffmpeg'
))
return _default_path
def _make_command(self):
''' Reduces the current argument set into a valid command to be executed
by :py:class:`subprocess.Popen`.
:returns: ``self``, for easy chainability. '''
path = self._ffmpeg_path
if self.moment.options.debug:
self.logging.debug('Using FFmpeg at path: "%s".' % path)
try:
os.stat(path)
except OSError:
if self.moment.options.debug:
traceback.print_exception(*sys.exc_info())
self.logging.critical('Failed to find FFmpeg. Exiting.')
raise RuntimeError('Cannot find `FFmpeg` executable.')
else:
return [path] + self.args + ([
(("--%s" % k) + (("=%s" % v) if v is not None else "")) for k, v in self.kwargs.iteritems()
] if self.kwargs else [])
## == Command Flow == ##
def _add_argument(self, *positional, **kwargs):
''' Add a positional (value-based) argument to the current argset.
:param positional: ``str`` values to add, positionally. Defaults
to ``None`` so ``kwargs`` may be passed independently.
:param kwargs:: ``dict`` representing keyword-mapped arguments
to add. Can be specified in addition to (or in-place-of) ``positional`` arguments.
:returns: ``self``, for easy chainability. '''
# map args
if positional:
for argument in positional:
self.__args__.append(argument)
# map kwargs
if kwargs:
for keyword, argument in kwargs.iteritems():
self.__kwargs__[keyword] = argument
return self
## == Context Management == ##
def __enter__(self):
''' Get ready to spawn the ``FFmpeg`` subprocess, by allocating
scratch space and indicating that we're starting an exclusive
session (at least in the scope of the current interpreter) for
talking to ``FFmpeg``.
:returns: ``self``, for use in an ``as`` binding as part of a
``with`` construct. '''
if self.__pending__:
raise RuntimeError("Cannot invoke ``FFmpeg`` concurrently.")
self._provision_scratchspace()
self.__pending__ = True # indicate pending mode
return self # set self as final driver
def __exit__(self, exc_type, exception, exc_info):
''' Handle clean exit for failure and success states after using
the ``FFmpeg`` subprocess in-context.
:param exc_type: Class (type) of context-disrupting exception.
:param exception: Value (object) of context-disrupting exception.
:param exc_info: ``sys.exc_info`` result for context-disrupting exception.
:raises Exception: Anything that happens during in-context execution.
:returns: ``True`` if no exception was encountered. ``False`` otherwise,
which bubbles up *all* exceptions. '''
if not self.__pending__:
raise RuntimeError("Out-of-context ``__exit__`` invocation.")
self._destroy_scratchspace()
if exception:
return False # @TODO(sgammon): exception suppression? cleanup?
return True
def __call__(self, *args, **kwargs):
''' API touchpoint for executing the current set of queued/pending
arguments via ``FFmpeg``.
:param args: Positional arguments to also pass to ``FFmpeg``.
:param kwargs: Keyword arguments to also pass to ``FFmpeg``.
:returns: Return code of the underlying ``subprocess`` call. '''
# map final arguments, if any
if args or kwargs:
self._add_argument(*args, **kwargs)
# stdout and stderr output
stdout, stderr = self.target.communicate(self.__input__ or None)
return self.target.returncode
## == Property Mappings == ##
args = property(lambda self: self.__args__) # args sent to ``FFmpeg``
kwargs = property(lambda self: self.__kwargs__) # kwargs sent to ``FFmpeg``
target = property(lambda self: self._spawn()) # spawn/grab process
moment = property(lambda self: self.__moment__) # subject moment
output = property(lambda self: self.__output__) # output location
scratch = property(lambda self: self.__scratch__) # scratchspace
| true |
f764d72a16198a2dacf1782ae4d7a9e76332d921 | Python | rongDang/Search_engine_spider | /main.py | UTF-8 | 3,690 | 3.28125 | 3 | [] | no_license | # -*- encoding:utf8 -*-
import os
import sys
from scrapy.cmdline import execute
# 调用scrapy的函数execute进行运行测试
# sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# scrapy crawl douban
execute(["scrapy", "crawl", "douban"])
# one = set()
# words = {"tokens":[{"token":"sadasd"}, {"token":"45"}, {"token":"45"}]}
# print(set([r["token"] for r in words["tokens"] if len(r["token"]) > 1]))
# 静态方法
"""
class Test(object):
@classmethod
def test(cls, one):
print("testst-"+one)
test = Test()
test.test(one="45455454")
Test.test(one="1111111111")
"""
# 抽象类
'''
from abc import ABCMeta, abstractmethod, ABC
class Foo(ABC):
@abstractmethod
def fun(self):
"""
你需要在子类中实现该方法, 子类才允许被实例化
:return
"""
@abstractmethod
def fun1(self):
print("11111111")
class Sub(Foo):
def fun(self):
print("子类实现父类抽象方法")
su = Sub()
su.fun()
'''
# MySQL插入
'''
import MySQLdb
conn = MySQLdb.connect(host='localhost', db='search_engine',
user='root', passwd='root', charset='utf8')
cur = conn.cursor()
cur.execute("""insert into csdn(title,date) values(%s,%s)""", ("test", "2018-08-04 18:00:13"))
conn.commit()
'''
# 规范注释
'''
def add(self, a: int, b: int) -> int:
"""
:param a: int 第一个操作数
:param b:
:return:
"""
'''
"""
import requests
from lxml import etree
header = {
"USER_AGENT": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"
}
body = requests.get("https://movie.douban.com/subject/1300267/", headers=header)
selector = etree.HTML(body.text)
fields = [s.strip().replace(':', '') for s in selector.xpath("//div[@id='info']/span[@class='pl']//text()")]
print(fields)
"""
# print(selector.xpath("//div[@id='info']//text()"))
# ['类型', '制片国家/地区', '语言', '上映日期', '片长', '又名', 'IMDb链接']
# 时间格式处理
"""
import datetime
time_str = "2018年08月04日 18:00:13"
print(datetime.datetime.strptime(time_str, "%Y年%m月%d日 %H:%M:%S"))
one = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(one, "\n", type(one))
"""
# 正则匹配
'''
import re
li = ['片长', '又名', 'IMDb链接']
test = """
片长: 238分钟 / 234分钟
又名: 飘
IMDb链接: tt0031381"""
"""
(?:) 表示匹配括号内的正则表达式,但是这些正则表达式不能作为提取字符串使用
例如我的re为: (?:one):\d*,匹配字符串 "one:11 one:22"的结果为 11 22,
如果是(one):\d* 匹配字符串 "one:11 one:22" 结果是 ['one', '11', 'one', '22']
"""
values = [re.sub('\s+', ' ', s.strip()) for s in re.split('\s*(?:片长|又名|IMDb链接):\s*', test)][1:]
# ('\s*(?:%s):\s*' % '|'.join(fields), info)
print(values)
a = "1232: test,487"
print(re.sub(":.*", "", a))
s = """one:1212
one:11111
two:22222"""
print(re.split("(one|two):\s*", s))
'''
import redis, pickle
redis_cli = redis.StrictRedis()
# redis_cli.decr("movie_count")
print(redis_cli.get("movie_count"))
"""
def real_time_count(key, init):
if redis_cli.get(key):
count = pickle.loads(redis_cli.get(key))
count = count + 1
count = pickle.dumps(count)
redis_cli.set(key, count)
else:
count = pickle.dumps(init)
redis_cli.set(key, count)
"""
| true |
d5d757b73622d8a571f886a510678dd53d55536a | Python | LPLhock/huobi_swap | /matploat/ema_pic.py | UTF-8 | 3,427 | 2.59375 | 3 | [] | no_license | import pandas as pd
from api.huobi.huobi_request import HuobiRequest
import asyncio
import matplotlib as mpl
from matplotlib import pyplot as plt
# 图形参数控制
import pylab as pl
import numpy as np
from utils import fileutil
from datetime import datetime
import talib
from collections import deque
from utils import sigle_linear_regression_util
mpl.use('TkAgg')
pd.set_option('expand_frame_repr', False) # 当列太多时不换行
pd.set_option('display.max_rows', 1000) # 最多显示行数.
pd.set_option('display.float_format', lambda x:'%.2f' % x) # 设置不用科学计数法,保留两位小数.
class MatPlot:
@classmethod
async def get_data(cls, symbol, period="15min", size=500):
success, error = await request.get_klines(contract_type=symbol, period=period, size=size)
if error:
print(error)
return None
if success:
data = success.get("data")
df = pd.DataFrame(data, columns={"id": 0, 'vol': 1, 'count': 2, 'open': 3, 'close': 4, 'low': 5,
'high': 6, 'amount': 7})
df = df[['id', 'open', 'high', 'low', 'close', 'vol']]
df = df.rename(
columns={"id": "Date", "open": "Open", "high": "High", "low": "Low", "close": "Close", "vol": "Volume"})
df["Date"] = pd.to_datetime(df["Date"], unit="s")
df.set_index(["Date"], inplace=True)
MatPlot.show(df)
@classmethod
def show(cls, df):
df['fast_ema'] = talib.EMA(df['Close'], timeperiod=5)
df['slow_ema'] = talib.EMA(df['Close'], timeperiod=10)
price_values = df["Close"]
fast_ema_values = df['fast_ema']
slow_ema_values = df['slow_ema']
# 设置画布,纵向排列的三个子图
fig, ax = plt.subplots(1, 1)
# 设置标签显示中文
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 调整子图的间距,hspace表示高(height)方向的间距
plt.subplots_adjust(hspace=.1)
# 设置第一子图的y轴信息及标题
ax.set_ylabel('Close price in ¥')
ax.set_title('A_Stock %s MACD Indicator' % ("test"))
price_values.plot(ax=ax, color='g', lw=1., legend=True, use_index=False)
fast_ema_values.plot(ax=ax, color='r', lw=1., legend=True, use_index=False)
slow_ema_values.plot(ax=ax, color='b', lw=1., legend=True, use_index=False)
# plt.scatter([100], [9300], s=100, color='y',marker='^', alpha=0.5)
# 设置间隔,以便图形横坐标可以正常显示(否则数据多了x轴会重叠)
scale = 100
interval = scale // 20
# 设置x轴参数,应用间隔设置
# 时间序列转换,(否则日期默认会显示时分秒数据00:00:00)
# x轴标签旋转便于显示
pl.xticks([i for i in range(1, scale + 1, interval)],
[datetime.strftime(i, format='%Y-%m-%d') for i in
pd.date_range(df.index[0], df.index[-1], freq='%dd' % (interval))],
rotation=45)
plt.show()
if __name__ == "__main__":
request = HuobiRequest("https://api.btcgateway.pro", "xxxx", "xxxx")
s = "BSV_CQ"
p = "60min"
c = 300
loop = asyncio.get_event_loop()
loop.run_until_complete(MatPlot.get_data(s, p, c))
loop.close()
| true |
5fbace1b8d83491554e4ea5aa89e79b6e4dbb44c | Python | luckyparkwood/khal | /chris.python/projects/project.do_she_love_me.py | UTF-8 | 464 | 3.546875 | 4 | [] | no_license | import random
she_love_me = random.choice([True, False])
she_hate_me = random.choice([True, False])
if she_love_me and she_hate_me:
print("She love you and she hate you bro. Sounds tricky.")
elif she_love_me and not(she_hate_me):
print("She love you my dude, go get her!")
elif not(she_love_me) and she_hate_me:
print("She hate you bro :( ... better luck next time.")
else:
print("She don't love you or hate you my guy. Keep trying!") | true |
467f7f0539ef91e338a83c0dbb0e1279825fe6ce | Python | inksci/tcp-py | /client.py | UTF-8 | 310 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 22:40:41 2016
@author: zhanghc
"""
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(('172.17.0.1',12001))
print s.recv(1024)
for data in ['zhang','liu','wang']:
s.send(data)
print s.recv(1024)
s.send('exit')
s.close()
| true |
f06f4125754d05922755e9832ded74ce32192e76 | Python | JonKPowers/horses | /db_functions.py | UTF-8 | 6,463 | 2.78125 | 3 | [] | no_license | import pymysql.cursors
import re
import logging
class DbHandler:
def __init__(self, db='horses_test', username='codelou', password='ABCabc123!', initialize_db=False):
self.db = db
self.user = username
self.password = password
self.connection = None
if initialize_db == True:
self.connect_db()
self.initialize_db()
def connect_db(self):
if not self.connection:
self.connection = pymysql.connect(
host='localhost',
user=self.user,
password=self.password
)
def close_db(self):
if self.connection:
self.connection.close()
def query_db(self, sql_query):
cursor = self.connection.cursor()
self.__use_db(cursor)
cursor.execute(sql_query)
results = list(cursor)
results_cols = [item[0] for item in cursor.description]
return results, results_cols
def update_db(self, sql_query):
cursor = self.connection.cursor()
self.__use_db(cursor)
print('Sending SQL update query')
print(sql_query)
cursor.execute(sql_query)
self.connection.commit()
print('Update query sent; change committed')
return None
def initialize_db(self):
"""Checks to see if db exists. If not, creates it."""
cursor = self.connection.cursor()
sql = f'SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = "{self.db}"'
db_exists = cursor.execute(sql)
if db_exists:
print(f'Database {self.db} already exists--skipping create step')
elif db_exists == 0:
self.__create_db(cursor)
print(f'Created new database {self.db}')
else:
print(f'There was a problem checking whether {self.db} exists--unexpected db_exists value.')
def initialize_table(self, table_name, dtypes, unique_key, foreign_key):
"""Checks to see if a table exists. If not, creates it."""
cursor = self.connection.cursor()
self.__use_db(cursor)
sql = 'SELECT count(*) FROM information_schema.TABLES '
sql += f'WHERE (TABLE_SCHEMA = "{self.db}") AND (TABLE_NAME = "{table_name}")'
cursor.execute(sql)
table_exists = [item for item in cursor][0][0]
if table_exists:
logging.info("Table {} already exists--skipping creation step.".format(table_name))
elif table_exists == 0:
self.__create_table(cursor, table_name, dtypes, unique_key, foreign_key)
else:
print("There was a problem checking whether {} exists".format(table_name), end="")
print("--unexpected table_exists value.")
# TO DO
# Check whether table looks like it has the right number of columns and column names
def add_to_table(self, table_name, table_data, sql_col_names, file_name):
cursor = self.connection.cursor()
self.__use_db(cursor)
self.__insert_records(cursor, table_name, table_data, sql_col_names, file_name)
def __create_db(self, cursor):
sql = 'CREATE DATABASE {}'.format(self.db)
cursor.execute(sql)
self.connection.commit()
def __use_db(self, cursor):
sql = 'USE {}'.format(self.db)
cursor.execute(sql)
self.connection.commit()
def __create_table(self, cursor, table_name, dtypes, unique_key, foreign_key):
logging.info('Creating table {}'.format(table_name))
sql = "CREATE TABLE {} (".format(table_name)
sql += "id INT NOT NULL AUTO_INCREMENT, "
sql += "source_file VARCHAR(255), "
for column_name, column_dtype in dtypes.items():
sql += "{} {}, ".format(column_name, column_dtype)
sql += "PRIMARY KEY (id)"
if unique_key:
sql += ", UNIQUE ("
for key in unique_key:
sql += key + ', '
sql = sql[:-2] # Chop off last ', '
sql += ")"
if foreign_key:
for constraint in foreign_key:
sql += ", FOREIGN KEY("
sql += constraint[0]
sql += ") REFERENCES "
sql += constraint[1]
sql += ')' # ... and balance parentheses before sending.
logging.info('Creating table {}:\n\t{}'.format(table_name, sql))
try:
cursor.execute(sql)
except pymysql.err.ProgrammingError:
print('Error creating table {}'.format(table_name))
logging.info('Error creating table{}:\n\t{}'.format(table_name, sql))
self.connection.commit()
def __insert_records(self, cursor, table_name, table_data, sql_col_names, file_name):
for i in range(len(table_data)):
values_string = file_name + "', '"
for item in table_data[i:i+1].values[0]:
escaped_item = re.sub(r"(['\\])", r'\\\1', str(item)) # Escape textual backslashes and tick marks
cleaned_item = re.sub(u"\uFFFD", "", escaped_item) # Fix oddball <?> character
values_string += cleaned_item.strip() + "', '" # Strip of leading and trailing whitespace
values_string = values_string[:-4] # Chop off extra "', '"
sql = "INSERT INTO {} ({}) VALUES ('{}')".format(table_name, "source_file, " + ", ".join(sql_col_names),
values_string)
sql = re.sub(r"'(NULL|nan|None)'", "NULL", sql) # NULL should be sent in SQL w/o quote marks
# nan and None should be stored as NULL
# print('{} of {}: {}'.format(i+1,len(table_data), sql))
logging.debug('{} of {}: {}'.format(i+1, len(table_data), sql))
try:
cursor.execute(sql)
except (pymysql.err.ProgrammingError, pymysql.err.IntegrityError) as e:
if not re.search(r'Duplicate entry', repr(e)):
logging.info('Error adding entry: \n\t{}'.format(e))
logging.info('\t{} of {}: {}'.format(i+1, len(table_data), sql))
self.connection.commit()
# -----------------TO DO-----------------------------------
# Have it return something if the operation raises an error and move the file into a problem file folder.
| true |
50c2dcc71149914dfeec31b3a79745f275c15e49 | Python | gtcaps/OLC1_Proyecto1_201700312 | /AnalizadorLexicoHTML/AnalizadorLexico.py | UTF-8 | 13,445 | 2.984375 | 3 | [] | no_license | from AnalizadorLexicoHTML.Token import *
import os, re, pathlib
class AnalizadorLexicoHTML:
def __init__(self):
self.listaTokens = []
self.listaErrores = []
self.entradaLimpia = ""
self.estado = 0
self.lexema = ""
self.linea = 1
self.columna = 1
self.comentarios = []
self.cadenas = []
self.palabrasReservadas = ["html","head","title","bode","h1","h2","h3","h4","h5","h6","p","br","img","src","id","class","a","href","ul","ol","li","style","table","thead","tbody","th","tr","td","caption","colgroup","col","tfoot","border","body","div","footer"]
#END -----
def __agregarToken(self, tipoToken):
token = Token(tipoToken, self.lexema, self.linea, self.columna)
self.entradaLimpia += self.lexema
self.listaTokens.append(token)
self.estado = 0
self.lexema = ""
#END -----
def __agregarErrorLexico(self, mensaje):
self.listaErrores.append(mensaje)
self.estado = 0
self.lexema = ""
#END -----
def analizarCadena(self, cadena):
cadenaEntrada = cadena.strip() + "#"
col = 0
i = 0
while i < len(cadenaEntrada):
caracterActual = cadenaEntrada[i]
if caracterActual == '\n':
self.linea += 1
col = 0
if self.estado == 0:
if caracterActual == '"':
self.lexema += caracterActual
self.estado = 1
self.columna = col
elif caracterActual == '<':
self.lexema += caracterActual
self.estado = 2
elif caracterActual == '/':
self.lexema += caracterActual
self.columna = col
self.__agregarToken(TipoToken.DIAGONAL)
elif caracterActual == '=':
self.lexema += caracterActual
self.columna = col
self.__agregarToken(TipoToken.IGUAL)
elif caracterActual.isalpha():
self.lexema += caracterActual
self.estado = 6
elif caracterActual == '>':
self.lexema += caracterActual
self.columna = col
self.estado = 7
else:
if caracterActual == '#' and i == (len(cadenaEntrada) - 1):
print(">>>>>>>>>>>> Fin del Analisis Lexico <<<<<<<<<<<<<")
elif caracterActual in ('\n',' ', '\t'):
self.estado = 0
self.lexema = ""
self.entradaLimpia += caracterActual
else:
self.__agregarErrorLexico("El caracter {} no es reconocido dentro del lenguaje".format(caracterActual))
elif self.estado == 1:
if caracterActual == '"':
self.lexema += caracterActual
self.cadenas.append(self.lexema)
self.__agregarToken(TipoToken.CADENA)
else:
self.estado = 1
self.lexema += caracterActual
elif self.estado == 2:
if caracterActual == '!':
self.lexema += caracterActual
self.estado = 3
else:
self.__agregarToken(TipoToken.MENOR)
i -= 1
elif self.estado == 3:
if caracterActual == '-':
self.lexema += caracterActual
self.estado = 4
else:
self.lexema += caracterActual
self.estado = 3
elif self.estado == 4:
if caracterActual == '-':
self.lexema += caracterActual
self.estado = 5
else:
self.lexema += caracterActual
self.estado = 3
elif self.estado == 5:
if caracterActual == '>':
self.lexema += caracterActual
print("COMENTARIO =>\n{}".format(self.lexema))
self.comentarios.append(self.lexema)
self.entradaLimpia += self.lexema
self.lexema = ""
self.estado = 0
else:
self.lexema += caracterActual
self.estado = 3
elif self.estado == 6:
if caracterActual.isalpha() or caracterActual.isdigit():
self.lexema += caracterActual
self.estado = 6
else:
if self.lexema in self.palabrasReservadas:
self.__agregarToken(TipoToken.PALABRA_RESERVADA)
else:
self.__agregarToken(TipoToken.IDENTIFICADOR)
i -= 1
elif self.estado == 7:
if caracterActual == '<':
if len(self.lexema) == 1:
self.__agregarToken(TipoToken.MAYOR)
self.lexema += caracterActual
self.__agregarToken(TipoToken.MENOR)
else:
cadena_html = self.lexema.replace(">","")
self.lexema = self.lexema.replace(cadena_html,"")
self.__agregarToken(TipoToken.MAYOR)
self.lexema = cadena_html
self.__agregarToken(TipoToken.CADENA_HTML)
self.lexema = caracterActual
self.__agregarToken(TipoToken.MENOR)
elif caracterActual == '#' and i == (len(cadenaEntrada) - 1):
self.__agregarToken(TipoToken.MAYOR)
i -= 1
else:
self.lexema += caracterActual
self.estado = 7
i+= 1
col += 1
auxTkn = []
for tkn in self.listaTokens:
if re.search(r'([\t\n ][\t\ ])+', tkn.lexema):
continue
else:
auxTkn.append(tkn)
self.listaTokens = auxTkn
#END -----
def analizarArchivo(self, ruta):
if os.path.isfile(ruta):
archivo = open(ruta, "r")
self.analizarCadena(archivo.read())
archivo.close()
#END -----
def imprimirTokens(self):
for token in self.listaTokens:
print("=====================================================")
print('TOKEN => {} LEXEMA => {}'.format(token.getTipo(), token.lexema))
print("=====================================================")
#END -----
def imprimirErrores(self):
for error in self.listaErrores:
print("\n\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;")
print(error)
print(";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;")
#END -----
def crearArchivoLimpio(self, nombre_archivo):
# patron = r'([a-zA-Z]:\\)*(\w+\\)+'
patron = r'(PATHW)[: ]+([a-zA-Z]:\\)*(\w+\\)+'
ruta = re.search(patron, self.entradaLimpia)
ruta = ruta.group()
ruta = re.sub(r'(PATHW)[: ]+',"",ruta)
ruta = re.sub(r'[a-zA-Z]:\\','',ruta)
ruta = ruta.replace("user\\","")
pathlib.Path(ruta).mkdir(parents=True, exist_ok=True)
file = open(".\\" + ruta + nombre_archivo, "w")
file.write(self.entradaLimpia)
file.close()
#END -------
def __verificarDirectorioReportes(self):
if not os.path.isdir("reportes/"):
os.mkdir("reportes/")
#END -------
def generarReporteErrores(self):
self.__verificarDirectorioReportes()
file = open("reportes/erroreshtml.html", "w")
file.write("<!DOCTYPE html>\n<html>\n")
file.write("<head>\n")
file.write(" <meta charset=\"UTF-8\">\n")
file.write(" <title>Reporte de Errores HTML</title>\n")
file.write(" <style>")
file.write(" *{margin:0; padding:0; box-sizing: border-box;}\n")
file.write(" menu{background: rgb(27,38,68);text-align:center;padding:20px 0;}\n")
file.write(" a{margin: 0 30px; text-decoration:none; font-size:20px; color:white;}\n")
file.write(" a:hover{text-decoration: underline;}\n")
file.write(" h1{text-align: center; margin: 30px 0;}\n")
file.write(" table{border-collapse: collapse; margin: 0 auto; width: 40%;}\n")
file.write(" td, th{border: 1px solid black; padding: 10px;}\n")
file.write(" th{background: black; color: white}\n")
file.write(" </style>\n")
file.write("</head>\n")
file.write("<body>\n")
file.write(" <menu>\n")
file.write(" <a href=\"tokenshtml.html\">Reporte Tokens</a>\n")
file.write(" <a href=\"erroreshtml.html\">Reporte Errores</a>\n")
file.write(" </menu>\n")
file.write(" <h1>Reporte de Errores Lexicos HTML</h1>\n")
file.write(" <table>\n")
file.write(" <thead>\n")
file.write(" <tr>\n")
file.write(" <th>#</th>\n")
file.write(" <th>Error</th>\n")
file.write(" </tr>\n")
file.write(" </thead>\n")
file.write(" <tbody>")
if len(self.listaErrores) != 0:
i = 1
for error in self.listaErrores:
file.write(" <tr>")
file.write(" <td>{}</td>".format(i))
file.write(" <td>{}</td>".format(error))
file.write(" </tr>")
i += 1
else:
file.write(" <tr>")
file.write(" <td>0</td>")
file.write(" <td>El archivo no tiene errores lexico :D</td>")
file.write(" </tr>")
file.write(" </tbody>")
file.write(" </table>\n")
file.write("</body>\n")
file.write("</html>")
file.close()
self.__generarReporteTokens()
os.system("start ./reportes/erroreshtml.html")
#END -------
def __generarReporteTokens(self):
self.__verificarDirectorioReportes()
file = open("reportes/tokenshtml.html", "w")
file.write("<!DOCTYPE html>\n<html>\n")
file.write("<head>\n")
file.write(" <meta charset=\"UTF-8\">\n")
file.write(" <title>Reporte de Tokens HTML</title>\n")
file.write(" <style>")
file.write(" *{margin:0; padding:0; box-sizing: border-box;}\n")
file.write(" menu{background: rgb(27,38,68);text-align:center;padding:20px 0;}\n")
file.write(" a{margin: 0 30px; text-decoration:none; font-size:20px; color:white;}\n")
file.write(" a:hover{text-decoration: underline;}\n")
file.write(" h1{text-align: center; margin: 30px 0;}\n")
file.write(" table{border-collapse: collapse; margin: 0 auto; width: 40%;}\n")
file.write(" td, th{border: 1px solid black; padding: 10px;}\n")
file.write(" th{background: black; color: white}\n")
file.write(" </style>\n")
file.write("</head>\n")
file.write("<body>\n")
file.write(" <menu>\n")
file.write(" <a href=\"tokenshtml.html\">Reporte Tokens</a>\n")
file.write(" <a href=\"erroreshtml.html\">Reporte Errores</a>\n")
file.write(" </menu>\n")
file.write(" <h1>Reporte de Tokens HTML</h1>\n")
file.write(" <table>\n")
file.write(" <thead>\n")
file.write(" <tr>\n")
file.write(" <th>#</th>\n")
file.write(" <th>Token</th>\n")
file.write(" <th>Lexema</th>\n")
file.write(" <th>Fila</th>\n")
file.write(" <th>Columna</th>\n")
file.write(" </tr>\n")
file.write(" </thead>\n")
file.write(" <tbody>")
if len(self.listaTokens) != 0:
i = 1
for token in self.listaTokens:
file.write(" <tr>")
file.write(" <td>{}</td>".format(i))
file.write(" <td>{}</td>".format(token.getTipo()))
file.write(" <td>{}</td>".format(token.lexema))
file.write(" <td>{}</td>".format(token.linea))
file.write(" <td>{}</td>".format(token.columna))
file.write(" </tr>")
i += 1
else:
file.write(" <tr>")
file.write(" <td>0</td>")
file.write(" <td>El archivo no tiene tokens :D</td>")
file.write(" </tr>")
file.write(" </tbody>")
file.write(" </table>\n")
file.write("</body>\n")
file.write("</html>")
file.close()
#END ------- | true |
a6f4efdabc59c342aa041e7702db81d737ee2c41 | Python | taglio/reti-P2P | /directory_distribuita/menu_gnutella.py | UTF-8 | 5,500 | 2.625 | 3 | [] | no_license | import socket,sys,time
from gnutella import PEER
#imports needed for the GUI
from Tkinter import *
import thread_gnutella
#TODO bisogna fare una specie di login che permetta di far partire la socket in ascolto
peer=PEER()
class Menu_Login:
def __init__(self,master):
"""
This method create the master frame, the first window for the Login, button and the label.
"""
frame = Frame(master, height=300, width=500)
frame.pack_propagate(0)
frame.pack()
def My_Button_Click():
"""
This method defines the actions to make if key Login is pressed
"""
global Login
Login = False
#call to login method
self.StatusLogin["text"] = peer.login()
if self.StatusLogin["text"] != "ok":
Login = False
self.StatusLogin["text"] = "Login non effettuato, riprova"
else :
Login = True
self.StatusLogin["text"] = "Login effettuato, chiudere la finestra per passare al menu' principale"
self.Login = Button(frame, height=5, width=20, text="LOGIN", command=My_Button_Click)
self.Login.pack()
self.Login.place(x=150, y=40)
#create a Label widget as a child to the root window
self.StatusLogin = Label(frame, text="...")
self.StatusLogin.pack({"expand":"yes", "fill":"x"})
#create an ordinary window
root = Tk()
menu_login = Menu_Login(root)
#TKinter event loop, the program will stay in the event loop until we close the window
root.mainloop()
class Menu:
def __init__(self, master):
"""
This method create the second window and all elements within it.
"""
if Login == True:
self.resultfiles=[]
frame = Frame(master, height=700, width=500)
frame.pack_propagate(0)
frame.pack()
def My_Button_Click_Add():
self.Status["text"] = peer.addfile(self.name.get())
if self.Status["text"] == "ok":
self.Status["text"] = "file aggiunto con successo"
else :
self.Status["text"] = "Errore nell'aggiunta del file"
def My_Button_Click_Remove():
self.Status["text"] = peer.rmfile(self.name.get())
if self.Status["text"] == "ok":
self.Status["text"] = "file rimosso con successo"
else :
self.Status["text"] = "Errore nella rimozione del file"
def My_Button_Click_Logout():
self.Status["text"] = peer.logout()
if self.Status["text"] == "ok":
self.Status["text"] = "logout effettuata con successo, si e' disconnessi"
else :
self.Status["text"] = "logout non riuscito, riprova"
def My_Button_Click_Search():
self.resultfiles = peer.find(self.name.get())
for i in range(0,len(self.resultfiles)):
rs=self.resultfiles[i]
self.Status["text"] = "%d %s %s:%s" %(i,rs.filename.strip(" "),rs.ip,rs.porta)
def My_Button_Click_Download():
child_pid = os.fork() #TODO mettere a posto il figlio
if child_pid==0:
print "Sono il figlio che fa il downloadl"
signal.signal(signal.SIGINT, signal.SIG_DFL)
downloadFile(fileScelto,stringa)
os._exit(0) #Uscita del figlio.
self.Status["text"] = peer.download(self.name.get())
if self.Status["text"] == "ok":
self.Status["text"] = "download effettuato con successo"
else :
self.Status["text"] = "download non effettuata, riprova"
self.name = StringVar()
self.testo = Label(frame, height=3, text="SELEZIONA LA FUNZIONE DESIDERATA:", fg="red", font=("ubuntu-title",16))
self.testo.pack()
self.testo.grid(columnspan=3)
self.MyInputBox = Entry(frame, textvariable=self.name)
self.MyInputBox.pack()
self.MyInputBox.grid(rowspan=4, column=1)
self.aggiunta = Button(frame, height=3, width=20, text="ADD", command=My_Button_Click_Add)
self.aggiunta.pack()
self.aggiunta.grid(row=1, column=0)
self.Status = Label(frame)
self.Status.pack({"expand":"yes", "fill":"x"})
self.Status.grid(row=2, column=2)
self.rimozione = Button(frame, height=3, width=20, text="REMOVE",command=My_Button_Click_Remove)
self.rimozione.pack()
self.rimozione.grid(row=3, column=0)
self.ricerca = Button(frame, height=3, width=20, text="SEARCH", command=My_Button_Click_Search)
self.ricerca.pack()
self.ricerca.grid(row=2, column=0)
self.download = Button(frame, height=3, width=20, text="DOWNLOAD", command=My_Button_Click_Download)
self.download.pack()
self.download.grid(row=4, column=0)
self.logout = Button(frame, height=3, width=20, text="LOGOUT", command=My_Button_Click_Logout)
self.logout.pack()
self.logout.grid(row=5, column=0)
root = Tk()
menu = Menu(root)
root.mainloop()
| true |
74decb788597750bbc4caa81336fcde8ebca2e82 | Python | chenhuiyeh/python-scripts | /consonantCount.py | UTF-8 | 506 | 3.40625 | 3 | [] | no_license | import re, pprint
message = 'It was a bright cold day in April, and the clocks were striking thirteen.'
# counts number of consonants in a string message
def consonantCount(message):
consonantRegex = re.compile(r'[^aeiouAEIOU\s,.]')
consonantList = consonantRegex.findall(message)
count = {}
for i in range(len(consonantList)):
count.setdefault(consonantList[i], 0)
count[consonantList[i]] = count[consonantList[i]] + 1
pprint.pprint(count)
consonantCount(message)
| true |
9e375a1135162c5ecb95614376aa158805f896a2 | Python | jadoona81/RL_Experiments | /Benchmark/testScript.py | UTF-8 | 2,640 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 08:46:17 2021
@author: HG19230
"""
import sys
sys.path.append("..\..\DQNLibrary")
sys.path.append("..\..\DQNLibrary\MobilityPatterns")
import numpy as np
from gridAStar import AStarGridPathPlanning
from TSP_greedy import TSP_greedy
import math
import random
def manhattanData(xDim, yDim, block_width, street_width):
line_manhattan =0
column_manhattan =0
speed= 0
direction=0
line_count = math.ceil(yDim / block_width) #number of lines (#rows)
print(line_count)
line_manhattan = random.randrange(1, line_count+1) #random integer between 1 and line_count.
print(line_manhattan)
column_count = math.ceil(xDim / block_width) #number of lines (#columns)
print(column_count)
column_manhattan = random.randrange(1, column_count+1) #random integer between 1 and column_count.
print(column_manhattan)
for i in range (20):
#initialize location
k= random.random()
if(k < 0.5):
rand = random.random()
X= rand * xDim
Y = (line_manhattan)* block_width + 1
delta = street_width*rand - (street_width/2)
Y = Y + delta
else:
X = (column_manhattan)* block_width + 1
rand= random.random()
delta = street_width*rand - (street_width/2)
X = X + delta
rand = random.random()
Y = rand * yDim
print('created X and Y==========='+ str(X)+", "+ str(Y))
def euclideanDistance(a,b):
a= np.array(a)
b=np.array(b)
dist = np.linalg.norm(a-b)
return dist
def numHops(dist):
return dist%4
# dist= euclideanDistance((8,8), (16,16))
# print(dist)
# print(numHops(dist))
# print(dist/4)
# print(int(dist/4))
# print(int(dist/4) -1)
# print(dist/5.657)
# print(int(dist/5.657))
# aStar= AStarGridPathPlanning(4, 4)
# path = aStar.search((0,1), (3,3))
# for i in range(len(path)):
# print(path[i])
# print(path[3][0])
# print(len(path))
# total_timesteps= 20
# areaSize= np.power(48, 2) # keep area size as multiple of 4
# numTargets= 10
# coverageMajorSemiAxis= 3.5355339059327
# coverageMainorSemiAxis= 2.83 #2.53553390593275
# gridLowCorners= {(0,0), (4,0), (8,0), (12, 0), (0,4), (4,4), (8,4), (12,4), (0,8), (4,8), (8, 8), (12, 8), (0,12), (4,12), (8,12), (12,12)}
# tsp= TSP_greedy(2, 100, 16, 4, gridLowCorners, 4)
manhattanData(20, 20, 4, 4) | true |
01c4df3e339e17f5ae7e9f9814a9497e74f2b78f | Python | amfl/opencv-go | /game_tree.py | UTF-8 | 2,747 | 3.234375 | 3 | [] | no_license | from sgfmill import sgf
import numpy as np
class GameNode:
def __init__(self):
self.state = None
self.parent = None
self.sgf_node = None
def difference_from_parent(self):
try:
diff = self.state - self.parent.state
except AttributeError:
# There is no parent for this state!
return None
changed_coords = np.nonzero(diff)
# Convert coords into a list of moves
# Format: [((2, 1), 'w')]
moves = list(zip(changed_coords[0], changed_coords[1]))
m = ['.', 'b', 'w']
colors = [m[self.state[x[0], x[1]]] for x in moves]
sgf_friendly_moves = list(zip(colors, moves))
return sgf_friendly_moves
class GameTree:
def __init__(self, size):
# For quick access to all gamestates
self.state_map = {}
self.current_state = None
self.sgf_game = sgf.Sgf_game(size=size)
def update(self, state):
"""Update the gametree.
Returns:
bool: True if board state changed, False otherwise.
GameNode: The current GameNode
"""
# Convert state to something hashable
hashable = str(state)
# If this is the same as last time, no action required
if self.current_state == hashable:
return False, self.state_map[hashable]
# If we have not seen this state before..
if not (hashable in self.state_map):
print("Unique state!")
gn = GameNode()
gn.state = state
gn.parent = self.state_map.get(self.current_state, None)
sgf_node = None
if gn.parent:
diffs = gn.difference_from_parent()
if len(diffs) == 1 and gn.parent.sgf_node is not None:
# This could be a valid sgf move.
node = gn.parent.sgf_node.new_child()
try:
# Can throw an error if this move is invalid.
node.set_move(diffs[0][0], diffs[0][1])
sgf_node = node
except ValueError:
print("ERROR: Invalid move.")
print("Further moves on this branch not recordable.")
node.delete()
else:
# This is the first move!
print("made the first move.")
sgf_node = self.sgf_game.get_last_node()
gn.sgf_node = sgf_node
self.state_map[hashable] = gn
else:
print("Rollback!")
# Keep track of the current state
self.current_state = hashable
return True, self.state_map[hashable]
| true |
41582606c5fc2378a17ea1e134f0a1149ad0fd1c | Python | roman-89/advent_of_code | /2019/4.py | UTF-8 | 940 | 3.25 | 3 | [] | no_license |
def is_valid_password(i):
s = str(i)
adjacent = False
previous = int(s[0])
for c in s[1:]:
c = int(c)
if not adjacent and c == previous:
adjacent = True
if c < previous:
return False
previous = c
return adjacent
print(sum( is_valid_password(i) for i in range(108457, 562041)))
# second part
def is_valid_password(i):
s = str(i)
adjacent = None
previous = int(s[0])
pre_previous = None
for idx, c in enumerate(s[1:]):
c = int(c)
if c == previous:
if c != pre_previous:
if idx == 4:
adjacent = True
elif c != int(s[idx+2]):
adjacent = True
if c < previous:
return False
pre_previous = previous
previous = c
return bool(adjacent)
print(sum( is_valid_password(i) for i in range(108457, 562041)))
| true |
8949e70615a08b4721b69df85691db236ca58b38 | Python | nianweijie/webScrapyBase | /firstselenium.py | UTF-8 | 735 | 2.96875 | 3 | [] | no_license | from selenium import webdriver
import time
driver = webdriver.Firefox()
driver.get("http://www.santostang.com/2018/07/04/hello-world/")
# 因为评论在iframe中,所以要用.frame对iframe进行解析
driver.switch_to.frame(driver.find_element_by_css_selector("iframe[title='livere']"))
for x in range(1,4):
# 再通过find elements by css selector找到'div.reply-content'
comments = driver.find_elements_by_css_selector('div.reply-content')
for eachcomment in comments:
content = eachcomment.find_element_by_tag_name('p')
print(content.text)
try:
next_page=driver.find_element_by_css_selector('button.more-btn')
next_page.click()
time.sleep(5)
except:
pass | true |
e482f1c4d813dce16bdb4a36acbc53afe6c08623 | Python | liubrandon/pod_6 | /gregg/todoproject/todo/views.py | UTF-8 | 2,738 | 2.625 | 3 | [] | no_license | from django.shortcuts import render
from .models import *
from .forms import *
from django.http import HttpResponseRedirect
from django.urls import reverse
# todo list homepage
def todo(request):
if request.method == 'GET':
#tasks not completed
tasks_pending = Todo.objects.filter(completed=False).order_by('-task_id')
#tasks completed
tasks_completed = Todo.objects.filter(completed=True).order_by('-task_id')
form = TodoForm()
return render(request=request,
template_name = 'list.html',
context = {'tasks_pending':tasks_pending, 'tasks_completed':tasks_completed, 'form':form})
# when user submits form
if request.method == 'POST':
form=TodoForm(request.POST)
if form.is_valid():
task = form.cleaned_data['task']
# add new Todo object to QuerySet
Todo.objects.create(task=task)
# "redirect" to the todo homepage
return HttpResponseRedirect(reverse('todo'))
def task(request, task_id):
if request.method == 'GET':
# looking up a specific Todo object
todo = Todo.objects.get(pk=task_id)
# make a form, pre-populate char field with the name of the task
form = TodoForm(initial = {'task':todo.task})
return render(request = request,
template_name = 'detail.html',
context = {
'form':form,
'task_id': task_id
})
if request.method == 'POST':
if 'save' in request.POST:
form = TodoForm(request.POST)
if form.is_valid():
task=form.cleaned_data['task']
# update task attribute of the task of the correct task_id to match user input
Todo.objects.filter(pk=task_id).update(task=task)
elif 'delete' in request.POST:
Todo.objects.filter(pk=task_id).delete()
elif 'mark complete' in request.POST:
Todo.objects.filter(pk=task_id).update(completed=True)
return HttpResponseRedirect(reverse('todo'))
def notes(request):
if request.method == 'GET':
notes = Note.objects.all().order_by('note_id')
form = NotesForm()
return render(request=request,
template_name = 'notes.html',
context = {'notes':notes, 'form':form})
if request.method == 'POST':
form = NotesForm(request.POST)
if form.is_valid():
note = form.cleaned_data['notes']
Note.objects.create(note_text=note)
return HttpResponseRedirect(reverse('notes'))
| true |
877a50a49332216eacf25400b7eb97f66bc5761b | Python | ggradias/real-python-test | /tkinterexamp2.py | UTF-8 | 358 | 3.359375 | 3 | [] | no_license | from tkinter import *
# define the GUI application
window = Tk()
window.geometry("300x200")
button1 = Button(window, text="I'm at offset (50,60)")
button2 = Button(window, text="I'm at offset (0,0)")
button1.pack()
button2.pack()
button1.place(height=200, width=200, x=50, y=65)
button2.place(height=150, width=150, x=0, y=0)
window.mainloop()
| true |
cffd1209920369dbf899b5fbe2fa0558eb39f419 | Python | ImEagle/codemetrics | /tests/test_scm.py | UTF-8 | 4,514 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `codemetrics.scm`"""
import unittest
import datetime as dt
import textwrap
import typing
import unittest.mock as mock
import pandas as pd
import codemetrics.core as core
import codemetrics.scm as scm
import tests.utils as utils
class TestLogEntriesToDataFrame(unittest.TestCase):
"""Given a set of scm.LogEntries"""
def setUp(self):
utils.add_data_frame_equality_func(self)
self.log_entries = [
scm.LogEntry(revision='abc',
author='Agatha',
date=dt.datetime(2019, 1, 13),
path='dir/file.txt',
message='',
kind='file',
textmods=True,
propmods=False,
action='M'),
scm.LogEntry(revision='abd',
author='Agatha',
date=dt.datetime(2019, 2, 1),
path='dir/file.txt',
message='',
kind='file',
textmods=True,
propmods=False,
action='M'),
]
def test_dataframe_conversion(self):
"""Check conversion to DataFrame."""
actual = scm._to_dataframe(self.log_entries)
expected = utils.csvlog_to_dataframe(textwrap.dedent('''\
revision,author,date,path,message,kind,action
abc,Agatha,2019-01-13T00:00:00.000000Z,dir/file.txt,,file,M
abd,Agatha,2019-02-01T00:00:00.000000Z,dir/file.txt,,file,M'''))
self.assertEqual(expected, actual)
class ScmDownloadTestCase:
"""Test interface of download functions.
Common test case for all SCM download functions. Inherit from it *and*
from unittest.TestCase.
See also:
GitDownloadTestCase, SubversionDownloadTestCase
See https://stackoverflow.com/questions/1323455/ for design rationale.
"""
@mock.patch('codemetrics.internals.run', autospec=True,
return_value='dummy content')
def test_download_return_single_result(self, _):
"""Makes sure the download function returns a DownloadResult."""
actual = self.download(pd.DataFrame({'revision': ['abcd'],
'path': ['/some/file']}))
expected = scm.DownloadResult('abcd', '/some/file', 'dummy content')
self.assertEqual(expected, actual)
class GetLogTestCase:
"""Test interface to get_log functions.
Common test case for all SCM get_log functions. Inherit from it *and* from
unittest.TestCase.
see also:
GetGitLogTestCase, SubversionTestCase
"""
def setUp(self, get_log_func: typing.Callable, module) -> None:
"""Set up common to all log getting test cases.
Adds hanlding of equality test for pandas.DataFrame and patches the
functions get_now for a specific date and check_run_in_root.
Args:
get_log_func: function that will retrieve the log from SCM tool.
"""
utils.add_data_frame_equality_func(self)
self.get_log = get_log_func
self.module = module
self.now = dt.datetime(2018, 12, 6, 21, 0, tzinfo=dt.timezone.utc)
self.get_now_patcher = mock.patch('codemetrics.internals.get_now',
autospec=True, return_value=self.now)
self.get_now = self.get_now_patcher.start()
self.get_check_patcher = mock.patch('codemetrics.internals.check_run_in_root',
autospec=True)
self.check_run_in_root = self.get_check_patcher.start()
self.after = dt.datetime(2018, 12, 3, tzinfo=dt.timezone.utc)
def test_set_up_called(self):
"""Makes sure GetLogTestCase.setUp() is called."""
self.assertIsNotNone(self.get_log)
@mock.patch('codemetrics.internals.run', autospec=True)
def test_get_log_updates_default_download_func(self, _):
"""The SCM used to get the log updates the default download."""
self.get_log()
self.assertEqual(self.module.download, scm._default_download_func)
@mock.patch('codemetrics.internals.run', autospec=True)
def test_get_log_with_path(self, run_):
"""get_log takes path into account."""
_ = self.get_log(path='my-path', after=self.after)
self.assertIn('my-path', str(run_.call_args[0][0]))
| true |
00cb1e8b9912a546f949f43a51f93b8038456c14 | Python | kruthvik007/BlockChainCryptoTransaction | /blockchain_currency.py | UTF-8 | 6,490 | 3.03125 | 3 | [] | no_license | from hashlib import sha256
import json
import pprint
# function to hash the data sent (transaction)
def calculate_hash(previous_hash, data, nonce):
data = str(previous_hash) + str(data) + str(nonce)
data = data.encode()
hashing = sha256(data)
return hashing.hexdigest()
# class to create a block
class Block:
def __init__(self, transaction_data, previous_hash=''):
self.transaction_data = transaction_data
if previous_hash == 'I am the First Block':
self.previous_hash = '0' * 64
else:
self.previous_hash = previous_hash
self.nonce = 0
self.hash = calculate_hash(previous_hash, transaction_data, self.nonce)
# mining of the block based on difficulty by increasing nonce
def mine_block(self, difficulty):
difficultyCheck = "0" * difficulty
while self.hash[:difficulty] != difficultyCheck:
self.hash = calculate_hash(self.previous_hash, self.transaction_data, self.nonce)
self.nonce = self.nonce + 1
# function to get the first block data....just a string
def genesis_block():
basic_genesis_block = Block("I am the First Block")
return basic_genesis_block
# class to chain the blocks
class Blockchain:
def __init__(self):
self.chain = [genesis_block()]
self.difficulty = 3
self.pendingTransaction = []
self.reward = 10
# is used to get the last box in the chain that is used to get the previous hash
def get_last_block(self):
return self.chain[len(self.chain) - 1]
# mining of all pending transactions into 1 block
def mining_pending_transactions(self, minerRewardAddress):
# in reality not all of the pending transaction go into the block the miner gets to pick which one to mine
new_block = Block(self.pendingTransaction)
new_block.mine_block(self.difficulty)
new_block.previous_hash = self.get_last_block().hash
print("Previous Block's Hash: " + new_block.previous_hash)
testChain = []
for transaction_data in new_block.transaction_data:
temp = json.dumps(transaction_data.__dict__, indent=5, separators=(',', ': '))
testChain.append(temp)
pprint.pprint(testChain)
self.chain.append(new_block)
print("Block's Hash: " + new_block.hash)
print("Block added")
if minerRewardAddress == 'default':
self.reward = 0
else:
self.reward = 10
rewardTrans = Transaction("System", minerRewardAddress, self.reward)
self.pendingTransaction.append(rewardTrans)
self.pendingTransaction = []
# function to check if the blockchain is valid or not by checking current block's hash and previous block's hash
def isValid(self):
for x in range(1, len(self.chain)):
currentBlock = self.chain[x]
previous_hash = self.chain[x - 1].hash
if currentBlock.previous_hash != previous_hash:
return "The Chain is not valid!"
return "The Chain is valid and secure"
# function to add the transaction in pending transaction list
def create_transaction(self, transaction):
self.pendingTransaction.append(transaction)
# function to get balance of a person by checking all the transactions
def get_balance(self, walletAddress):
balance = 0
for block in self.chain:
if block.previous_hash == "":
# don't check the first block as it has no data
continue
for transaction in block.transaction_data:
if transaction.from_wallet == walletAddress:
balance -= transaction.amount
if transaction.to_wallet == walletAddress:
balance += transaction.amount
return balance
# class to create a transaction between 2 people for a specific amount
class Transaction:
def __init__(self, from_wallet, to_wallet, amount):
self.from_wallet = from_wallet
self.to_wallet = to_wallet
self.amount = amount
# function for printing the data
def __str__(self):
# str(self.__class__) + ": " +
return str(self.__dict__)
# main runner code
RPS_money = Blockchain()
while True:
print("Select an option:- ")
choice = int(input("1.Mine currency\n2.Check Balance\n3.Send money\n4.Check transactions\n5.Check if chain is "
"valid or not\n6.Exit\n"))
if choice == 1:
person = str(input("Who is mining?"))
RPS_money.mining_pending_transactions(person)
print("")
print("10 coins are added to", person)
print("")
elif choice == 2:
person = str(input("Whose balance is to be checked?"))
RPS_money.mining_pending_transactions("default")
print("")
print(person + " has " + str(RPS_money.get_balance(person)) + " Coins on their account")
print("")
elif choice == 3:
from_who = str(input("Who is sending money? "))
to_who = str(input("Who is receiving money? "))
money = -1
while money < 0:
money = float(input("How much money is to be transferred? "))
RPS_money.create_transaction(Transaction(from_who, to_who, money))
print("")
print("Money has been transferred\n")
elif choice == 4:
chain_list = RPS_money.chain
RPS_money.mining_pending_transactions("default")
print("")
print("Block 1 :- ")
count = 2
print(chain_list[0].transaction_data)
for i in chain_list[1:]:
print("Block ", count, ":- ")
if type(i.transaction_data) == list:
print("Previous hash:- ", i.previous_hash)
print("Current hash :- ", i.hash)
print(*i.transaction_data, sep='\n')
print("")
else:
print(i.transaction_data)
count = count + 1
print("")
elif choice == 5:
if RPS_money.isValid():
print("The chain is valid")
else:
print("The chain is invalid")
print("")
elif choice > 5:
print("Thank You....Visit again later")
exit()
else:
print("Invalid Option")
| true |
c95767d7e93bd7654829681e1431fdb84a3b3b4b | Python | gcallah/utils | /html_checker.py | UTF-8 | 5,665 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python3
"""
Checks html syntax.
"""
from html.parser import HTMLParser
from html_content_spec import content_spec, _ANY_CONTENT, _NO_CONTENT
import re
import argparse
try:
from typing import List, Set, Dict # noqa F401
except ImportError:
print("WARNING: Typing module is not found.")
DEV_FEATURE_ON = False # type: bool
ARG_ERROR = 1 # type: int
PARSE_ERROR = 2 # type: int
MAX_LINE = 80 # type: int
EXCEPS = "_EXCEPTIONS"
tag_stack = [] # type: List[str]
line_no = 0 # type: int
saw_error = False # type: bool
tag_error = False # type: bool
tag_check = False # type: bool
void_tags = {"area", "base", "br", "col", "hr", "img", "input", "link",
"meta", "param"} # type: Set[str]
in_sig_tag = {"pre": False, "script": False, "a": False,
"style": False} # that's all for now!
def line_msg(): # type: () -> str
"""
A little func to regularize reporting line #s for errors.
"""
return " at line number " + str(line_no)
def is_tag_in_spec(tag): # (str) -> bool
"""
func to see if the tag is in content_spec
"""
if tag not in content_spec and tag not in content_spec[EXCEPS]:
print("WARNING: " + tag + " not found in content_spec")
# Not necessarily an error, more like a warning
# saw_error = True
return False
return True
def is_valid_content(tag, attrs): # type: (str, str) -> bool
"""
Checks if the given tag is valid or can be placed within the parent tag
"""
# print("IS_VALID_CONTENT ==========")
# print("TAG: " + tag)
# print("tag_stack: " + str(tag_stack))
# print("tag_stack len: " + str(len(tag_stack)))
# If we don't know about the tag, we will not do any checks
# Just inform the user
if not is_tag_in_spec(tag):
return True
if len(tag_stack) > 0 and tag not in content_spec[EXCEPS]:
do_while = True
parent_index = -1
parent_model = []
# Processes content models that are transparent
# Must get model from an older parent
while do_while or "transparent" in parent_model:
do_while = False
ptag = tag_stack[parent_index]
if (is_tag_in_spec(ptag) and ptag not in content_spec[EXCEPS]):
parent_model = content_spec[ptag]["content_model"]
parent_index -= 1
else:
# Parent tag not in spec or is part of exceptions:
return True
tag_categories = content_spec[tag]["categories"]
for model in parent_model:
for category in tag_categories:
# If parent expects no children tags, then tag is illegal
if model == _NO_CONTENT:
return False
if model == _ANY_CONTENT or model == tag or model == category:
return True
return False
return True
class OurHTMLParser(HTMLParser):
"""
Our descendant of base HTMLParser class: we override just the methods we
need to.
"""
def __init__(self): # type: () -> None
super(OurHTMLParser, self).__init__(convert_charrefs=False)
def handle_starttag(self, tag, attrs): # type: (str, object) -> None
"""
This is a callback function that is used by HTMLParser for start tags:
it is called!
"""
if tag in in_sig_tag:
in_sig_tag[tag] = True
if tag not in void_tags:
if DEV_FEATURE_ON:
if is_valid_content(tag, attrs) is False:
print("ERROR: illegal tag" + line_msg() + ". ")
tag_stack.append(tag)
def handle_endtag(self, tag): # type: (str) -> None
global saw_error # type :bool
if not tag_stack:
print("ERROR: unmatched close tag '" + tag + "'" + line_msg())
saw_error = True
elif tag not in void_tags:
open_tag = tag_stack.pop()
if tag != open_tag:
print("ERROR: " +
"Close tag '" + tag +
"' does not match open tag '" + open_tag +
"'" + line_msg())
saw_error = True
if tag in in_sig_tag:
in_sig_tag[tag] = False
def handle_data(self, data): # type: (str) -> None
"""
Here we can look for long lines or other such problems.
"""
global saw_error # type :bool
if(not in_sig_tag["pre"] and not in_sig_tag["a"] and not
in_sig_tag["script"]):
if len(data) > MAX_LINE:
print("WARNING: long line found" + line_msg())
print(data)
if re.search('\x09', data):
print("WARNING: tab character found" + line_msg() +
"; please uses spaces instead of tabs.")
if not in_sig_tag["script"] and re.search('[<>]', data):
print("ERROR: Use > or < instead of < or >" + line_msg())
saw_error = True
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("html_filename")
arg_parser.add_argument("-t", action="store_true")
arg_parser.add_argument("-d", action="store_true",
help="turns on dev features")
args = arg_parser.parse_args()
parser = OurHTMLParser()
file_nm = args.html_filename
tag_check = args.t
if args.d:
DEV_FEATURE_ON = True
file = open(file_nm, "r")
for line in file:
line_no += 1
parser.feed(line)
if saw_error:
exit(PARSE_ERROR)
else:
exit(0)
| true |
85d3c6ad4f09f78cb8790dbaaf1fcb73c7dedd41 | Python | JoaquinRodriguez2006/Roboliga_2021 | /Funciones/Avanzar_retroceder_girar.py | UTF-8 | 595 | 3.109375 | 3 | [] | no_license | from controller import Robot
timeStep = 32
max_velocity = 6.28
robot = Robot()
# Definimos las ruedas
wheel1 = robot.getDevice("wheel1 motor") # Create an object to control the left wheel
wheel2 = robot.getDevice("wheel2 motor") # Create an object to control the right wheel
# Definimos su movimiento infinito
wheel1.setPosition(float("inf"))
wheel2.setPosition(float("inf"))
# Para avanzar:
speed1 = max_velocity
speed2 = max_velocity
# Definimos las variables de avanzar
wheel1.setVelocity(speed1)
wheel2.setVelocity(speed2)
# Esto debe ir obligatoriamente al final | true |
b9efc213ff01fcb3fd3eb6e59720ec7e37dc01f7 | Python | csernazs/misc | /euler/p024.py | UTF-8 | 143 | 3.03125 | 3 | [] | no_license |
from itertools import permutations, islice
for i in islice(permutations("0123456789", 10), 999999, 1000000):
print "".join(map(str, i))
| true |
80df997c24ba14a2cea7f09faa48e9507d011c46 | Python | meryzu/faults | /src/MainFaults.py | UTF-8 | 5,109 | 2.578125 | 3 | [] | no_license | import pandas as pd
from Process import Prepare
from Scenaries import Scene
extention=5 #Number of time steps to be taken
df=pd.read_csv('../data/FallasJunioNew.csv') #read data from csv file
data=Prepare.prepare(df,16,20,24,28) #posicion de la falla en el array de fallas (failureCode)
#Incluir con data las dos tablas de training y testing
#train=data.sample(frac=0.8,random_state=200)
#test=data.drop(train.index)
z=Scene.scene(data,'4H','Time')#H: horas, min: minutos, 24h es x dia
# to combine X1
scenary_x1_1=Scene.scene1(z,extention,28)
scenary_x1_2=Scene.scene2(z,extention,16,28)
scenary_x1_3=Scene.scene2(z,extention,20,28)
scenary_x1_4=Scene.scene2(z,extention,24,28)
scenary_x1_5=Scene.scene3(z,extention,16,20,28)
scenary_x1_6=Scene.scene3(z,extention,16,24,28)
scenary_x1_7=Scene.scene3(z,extention,20,24,28)
scenary_x1_8=Scene.scene4(z,extention,16,20,24,28)
scenary_x1_1.to_csv('../data/datascenary_x1_1.csv',index=False)
scenary_x1_2.to_csv('../data/datascenary_x1_2.csv',index=False)
scenary_x1_3.to_csv('../data/datascenary_x1_3.csv',index=False)
scenary_x1_4.to_csv('../data/datascenary_x1_4.csv',index=False)
scenary_x1_5.to_csv('../data/datascenary_x1_5.csv',index=False)
scenary_x1_6.to_csv('../data/datascenary_x1_6.csv',index=False)
scenary_x1_7.to_csv('../data/datascenary_x1_7.csv',index=False)
scenary_x1_8.to_csv('../data/datascenary_x1_8.csv',index=False)
###### to combine X2
#data=Prepare.prepare(df,20,24,28,16) #posicion de la falla en el array de fallas (failureCode)
#Incluir con data las dos tablas de training y testing
#train=data.sample(frac=0.8,random_state=200)
#test=data.drop(train.index)
#z=Scene.scene(train,'1H','Time')#H: horas, min: minutos
# to combine x2
scenary_x2_1=Scene.scene1(z,extention,16)
scenary_x2_2=Scene.scene2(z,extention,20,16)
scenary_x2_3=Scene.scene2(z,extention,24,16)
scenary_x2_4=Scene.scene2(z,extention,28,16)
scenary_x2_5=Scene.scene3(z,extention,20,24,16)
scenary_x2_6=Scene.scene3(z,extention,24,28,16)
scenary_x2_7=Scene.scene3(z,extention,20,28,16)
scenary_x2_8=Scene.scene4(z,extention,20,24,28,16)
scenary_x2_1.to_csv('../data/datascenary_x2_1.csv',index=False)
scenary_x2_2.to_csv('../data/datascenary_x2_2.csv',index=False)
scenary_x2_3.to_csv('../data/datascenary_x2_3.csv',index=False)
scenary_x2_4.to_csv('../data/datascenary_x2_4.csv',index=False)
scenary_x2_5.to_csv('../data/datascenary_x2_5.csv',index=False)
scenary_x2_6.to_csv('../data/datascenary_x2_6.csv',index=False)
scenary_x2_7.to_csv('../data/datascenary_x2_7.csv',index=False)
scenary_x2_8.to_csv('../data/datascenary_x2_8.csv',index=False)
df=pd.read_csv('../data/FallasJunioNew.csv') #read data from csv file
data=Prepare.prepare(df,16,20,24,28) #posicion de la falla en el array de fallas (failureCode)
#Incluir con data las dos tablas de training y testing
###### to combine X3
data=Prepare.prepare(df,16,24,28,20) #posicion de la falla en el array de fallas (failureCode)
#Incluir con data las dos tablas de training y testing
#z=Scene.scene(train,'1H','Time')#H: horas, min: minutos
# to combine x3
scenary_x3_1=Scene.scene1(z,extention,20)
scenary_x3_2=Scene.scene2(z,extention,16,20)
scenary_x3_3=Scene.scene2(z,extention,24,20)
scenary_x3_4=Scene.scene2(z,extention,28,20)
scenary_x3_5=Scene.scene3(z,extention,16,24,20)
scenary_x3_6=Scene.scene3(z,extention,16,28,20)
scenary_x3_7=Scene.scene3(z,extention,24,28,20)
scenary_x3_8=Scene.scene4(z,extention,16,24,28,20)
scenary_x3_1.to_csv('../data/datascenary_x3_1.csv',index=False)
scenary_x3_2.to_csv('../data/datascenary_x3_2.csv',index=False)
scenary_x3_3.to_csv('../data/datascenary_x3_3.csv',index=False)
scenary_x3_4.to_csv('../data/datascenary_x3_4.csv',index=False)
scenary_x3_5.to_csv('../data/datascenary_x3_5.csv',index=False)
scenary_x3_6.to_csv('../data/datascenary_x3_6.csv',index=False)
scenary_x3_7.to_csv('../data/datascenary_x3_7.csv',index=False)
scenary_x3_8.to_csv('../data/datascenary_x3_8.csv',index=False)
###### to combine X4
#data=Prepare.prepare(df,16,24,28,20) #posicion de la falla en el array de fallas (failureCode)
#Incluir con data las dos tablas de training y testing
#z=Scene.scene(train,'1H','Time')#H: horas, min: minutos
# to combine x4
scenary_x4_1=Scene.scene1(z,extention,24)
scenary_x4_2=Scene.scene2(z,extention,16,24)
scenary_x4_3=Scene.scene2(z,extention,20,24)
scenary_x4_4=Scene.scene2(z,extention,28,24)
scenary_x4_5=Scene.scene3(z,extention,16,20,24)
scenary_x4_6=Scene.scene3(z,extention,16,28,24)
scenary_x4_7=Scene.scene3(z,extention,20,28,24)
scenary_x4_8=Scene.scene4(z,extention,16,20,28,24)
scenary_x4_1.to_csv('../data/datascenary_x4_1.csv',index=False)
scenary_x4_2.to_csv('../data/datascenary_x4_2.csv',index=False)
scenary_x4_3.to_csv('../data/datascenary_x4_3.csv',index=False)
scenary_x4_4.to_csv('../data/datascenary_x4_4.csv',index=False)
scenary_x4_5.to_csv('../data/datascenary_x4_5.csv',index=False)
scenary_x4_6.to_csv('../data/datascenary_x4_6.csv',index=False)
scenary_x4_7.to_csv('../data/datascenary_x4_7.csv',index=False)
scenary_x4_8.to_csv('../data/datascenary_x4_8.csv',index=False)
| true |
5df9207fc175340aeadc1ef747430b9a7a02e7cd | Python | learnitmyway/tictactoe_ml | /test_train_ai.py | UTF-8 | 811 | 2.875 | 3 | [] | no_license | from train_ai import update_ai
from ai import AI
from game import Game, get_winner, X, O, EMPTY
class TestTrainAI:
def test_update_ai(self):
game = Game()
previous_board = [
[X, X, EMPTY],
[O, O, EMPTY],
[EMPTY, EMPTY, EMPTY]
]
board = [
[X, X, X],
[O, O, EMPTY],
[EMPTY, EMPTY, EMPTY]
]
game.board = board
action = (0, 2)
last = {
X: {"board": board, "action": action},
O: {"board": previous_board, "action": (1, 2)}
}
winner = get_winner(board)
ai = AI()
update_ai(previous_board, game, winner, ai, action, last)
# FIXME: How can I assert ai.update_q was called twice with the correct arguments?
| true |
ad53dd3d441dff4168ba50bf1141c32a923fdb18 | Python | venugopalkadamba/Programming-Data-Structures-and-Algorithms-using-Python | /GeeksForGeeks_Placement_Course_Problems/Linked List/pairwise_swap.py | UTF-8 | 631 | 3.96875 | 4 | [] | no_license | class Node:
def __init__(self, data):
self.data = data
self.next = None
def pairwise_swap(head):
temp = head
while temp != None and temp.next!=None:
temp.data, temp.next.data = temp.next.data, temp.data
temp = temp.next.next
return head
def printLinkedList(head):
temp = head
while temp!=None:
print(temp.data, end=" ")
temp = temp.next
n1 = Node(10)
n2 = Node(20)
n3 = Node(30)
n4 = Node(40)
n5 = Node(50)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
head = n1
printLinkedList(head)
head = pairwise_swap(head)
print()
printLinkedList(head) | true |
95a9e00b62d2e6bddf443b2abb6cfd7f32378d29 | Python | BlackDragonN001/BZCLauncher | /application/exceptions.py | UTF-8 | 576 | 2.625 | 3 | [
"MIT"
] | permissive | """
exceptions.py
Python source file defining launcher specific exception types.
This software is licensed under the MIT license. Refer to LICENSE.txt for more
information.
"""
class LauncherException(exception):
"""
An exception type representing the most generic type of launcher exceptions.
"""
pass
class NotImplementedException(LauncherException):
"""
An exception type thrown when the user attempts to invoke some functionality that
is currently not supported by the launcher software.
"""
pass | true |
5e8817ef4e3dfe0d50da0f37c7718d1675919fcb | Python | ChistEgor/botweather | /BotWeather.py | UTF-8 | 2,587 | 3.203125 | 3 | [] | no_license | import requests
import json
from time import sleep
telegram_token = '1146634987:AAHhhmXXUCWbnF3RPNM-rUBEaQV_s4tV2Xs'
telegram_link = 'https://api.telegram.org/bot' + telegram_token + '/'
open_weather_api = 'db71095002de213ae977f3d6cd10ed4f'
open_weather_link = 'https://api.openweathermap.org/data/2.5/weather'
def get_updates():
"""
Getting json object
"""
url = telegram_link + 'getUpdates'
response = requests.get(url)
return response.json()
def get_json_file():
"""
Creating a file in text format Jason which
helps to select the necessary dictionary keys for future use.
"""
dict_updates = get_updates()
with open('updates.json', 'w') as file:
json.dump(dict_updates, file, indent=2, ensure_ascii=False)
def get_message():
"""
Getting a dictionary consisting of chat_id and text
which were originally taken from the file updates.json.
"""
data: dict = get_updates()
chat_id = data['result'][-1]['message']['chat']['id']
message_text = data['result'][-1]['message']['text']
message = {'chat_id': chat_id, 'text': message_text}
return message
def get_update_id(): # Зачем я его получил если не с чем сравнить и обновить
"""
Here we get update_id, which will be constantly refreshed.
"""
data: dict = get_updates()
current_update_id = data['result'][-1]['update_id']
return current_update_id
def get_temp(city):
"""
City is a parameter('text': message_text)
that contains the last text entered in telegram chat by the user.
"""
response = requests.get(url=open_weather_link,
params={'q': city, 'appid': open_weather_api, 'units': 'metric'})
if response.status_code == 200:
response = json.loads(response.content)
temperature: int = response['main']['temp']
result = round(temperature)
return f'Текущая температура {result} °С' # Here you can change the language :)
return 'Введите корректное название города'
def send_message_from_bot(chat_id, text):
url = telegram_link + f'sendMessage?chat_id={chat_id}&text={text}'
requests.get(url)
def main():
# current_update_id = get_update_id()
while True:
answer = get_message()
chat_id = answer['chat_id']
text = answer['text']
send_message_from_bot(chat_id, get_temp(text))
# sleep(3)
if __name__ == '__main__':
main()
| true |
18437414e18a3ce0160d5a21c4543fd5352fb317 | Python | SnapCapCo/SnapCapCo.github.io | /cs121/model/createImages.py | UTF-8 | 1,113 | 2.640625 | 3 | [] | no_license | import csv
import numpy as np
#import pandas as pd
import cv2
w,h = 48,48
with open('fer2013.csv') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
print(row)
line_count +=1
else:
emotion = row[0]
label = "images/" + str(emotion)+ "/" + str(line_count)+ ".png"
pixels = map(int, row[1].split())
pixellist = list(pixels)
usage = row[2]
pixelsarray = np.asarray(pixellist)
image = pixelsarray.reshape(w, h)
stackedimage = np.dstack((image,) * 3)
#cv2.imwrite(label, stackedimage)
# if line_count == 1:
# #print(label)
# cv2.imwrite("exampleimage.png", stackedimage)
# print(image.shape)
# print(image)
if emotion == 1:
if not image.any():
print("empty image data", label)
cv2.imwrite(label, image)
line_count+=1
| true |
ffd84f194fd4cc4a7f57fa730c20385d03f7b95b | Python | manokel/SW-Capstone-Flux | /client4.py | UTF-8 | 681 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 24 19:12:34 2017
@author: summer
"""
import socket
import sys
def read(port):
s = socket.socket()
host = '192.168.0.14' #(IP address of PC (server))
s.connect((host,port))
try:
msg = s.recv(1024)
s.close()
except socket.error as msg:
sys.stderr.write('error %s' %msg[1])
s.close()
print('close')
sys.exit(2)
return msg
if __name__ == '__main__':
port = 12345
while True:
print('hey, checking TCP socket')
data = read(port)
print('i just read %s' % data)
print('port num is: %d' % port)
port = port + 1
| true |
be4f4132a7aa0279713bfdcd0ee51a5c3ac224cb | Python | gregoryvit/hack.moscow_terryfoldflaps_round_2 | /server/api/app/api/v1/rating.py | UTF-8 | 319 | 2.546875 | 3 | [] | no_license | import json
from flask import abort, request
from . import api
@api.route("/rating", methods=['POST'])
def rating():
data = request.data
data_dict = json.loads(data)
product_id = data_dict['product_id']
new_rating = data_dict['rating']
print(product_id)
print(new_rating)
return "OK"
| true |
4844679bab6d06f644e121385365c28871083903 | Python | goodboyycb/python_samples | /python_3_排序.py | UTF-8 | 379 | 3.875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 15:54:40 2018
@author: goodboyycb
"""
##排序排序
l=[]##这是一个 列表
print("说明:每次输入一个数,输入三次,从小到大进行排序")
for i in range(3): ##range(3),包含0,1,2
x=int(input('integer:\n'))
l.append(x) # 使用列表的 添加因素。对就是添加因素。
l.sort()
print (l)
| true |
1e5a96a6761fdd00e95fc536d7ecb7e791a27d1e | Python | pronob1010/Codeforces_Solve | /cf_697_a.py | UTF-8 | 437 | 2.8125 | 3 | [] | no_license | a,b,c = list(map(int,input().split()))
p=a
r =a
i = 1
while True:
if (c == p) or ( c == r):
print("YES",i)
break
if r > c or p>c :
print("No")
break
p = a + i * b
r = a + i * b + 1
i+=1
# def find(p,q,c):
# if (c == p) or (c == r):
# return 1
# if r > c or p>c :
# return 0
# p = a + i * b
# r = a + i * b + 1
# find(p, q, c)
# find(p, q, c) | true |