blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d95acff188a7d285fcc6f4ba1b098be0c7a9e7e8 | Python | michaelulin/pytorch-ergonomics | /ergonomics/serialization.py | UTF-8 | 3,714 | 3.046875 | 3 | [] | no_license | """
These functions are ergonomics improvements for pytorch.
By saving the source code along with the model definitions,
you can reuse your models easily in new modules without needing to redefine
everything from scratch.
"""
import torch
import tempfile
import os
import zipfile
import sys
import pickle
DEFAULT_MOD_PATH = 'portable-pytorch'
def save_portable(obj: torch.nn.Module, mod_path: str, output_path: str, path_delimiter='/'):
"""
Portably serializes a torch module file to a disk, along with the network definition module required to execute it.
Args:
obj: the torch module to save.
mod_path: the pythonic import path to your network definition module (it's recommended that this is
a separate module, that is self contained (IE does not use resources from other parts of your project).
output_path: the output file path where you'd like to save your model to.
path_delimiter: how your operating system delimits filepaths, the default assumes you're using a unix kernel.
Example:
# save portable version of your model
from src.some_project.pytorch_defs.net import Net
net = Net(...)
output = net.forward()
...
torch.save_portable(net, "src.some_project.pytorch_defs", "/tmp/myModel.zip")
"""
_, model_temp = tempfile.mkstemp()
input_system_path = mod_path.replace('.', path_delimiter)
_save_portable(obj, mod_path, model_temp)
source_files = []
for root, dirs, files in os.walk(input_system_path):
for file in files:
true_path = os.path.join(root, file)
false_path = os.path.join(DEFAULT_MOD_PATH, file)
source_files.append((true_path, false_path))
with zipfile.ZipFile(output_path, "w") as zip:
zip.write(model_temp, "model.t7")
for true_path, false_path in source_files:
zip.write(true_path, false_path)
os.remove(model_temp)
return output_path
def _save_portable(obj: torch.nn.Module, input_mod_path: str, model_save_path: str):
serialized_model = pickle.dumps(obj)
input_mod_path = input_mod_path.encode('utf-8')
output_mod_path = DEFAULT_MOD_PATH.encode('utf-8')
serialized_model = serialized_model.replace(input_mod_path, output_mod_path)
with open(model_save_path, 'wb') as f:
f.write(serialized_model)
def load_portable(local_file_path, temp_location="/tmp", path_delimiter='/'):
"""
Portably deserializes a portable serialized torch model saved with "save_portable". The magic here is
you don't need the original network definition code in order to load this network and utilize it, as the
necessary source code is provided along with the network.
Args:
local_file_path: The local system file path to your portable model object.
temp_location: a scratchspace location to use for the unzip/depickling process, if you're
using a non debian based kernel (or wholy different operating system) you'll want to change this.
path_delimiter: how your operating system delimits filepaths, the default assumes you're using a unix kernel.
Example:
# Load a portable model
local_portable_model_path = "/tmp/myModel.zip"
model = torch.load_portable(local_portable_model_path)
output = model.forward(...)
"""
with zipfile.ZipFile(local_file_path) as zip:
zip.extractall(temp_location)
sys.path.insert(0, temp_location)
model = _load_portable("{}{}model.t7".format(str(temp_location), path_delimiter))
return model
def _load_portable(model_path):
with open(model_path, 'rb') as f:
mod = pickle.load(f)
return mod | true |
363cfa7a201cb68c71d706dfb62eac7a9c81da53 | Python | pgkavade/comp110-21f-workspace | /exercises/ex01/numeric_operators.py | UTF-8 | 686 | 4.03125 | 4 | [] | no_license | """practicing the numeric operators, type conversions, and string concatenation."""
__author__ = "730395347"
left_hand: int = int(input("Left-hand side: "))
right_hand: int = int(input("Right-hand side: "))
exponent: int = left_hand ** right_hand
divison: float = (left_hand / right_hand)
integer_division: int = (left_hand // right_hand)
remainder: int = (left_hand % right_hand)
print(str(left_hand) + " ** " + str(right_hand) + " is " + str(exponent))
print(str(left_hand) + " / " + str(right_hand) + " is " + str(divison))
print(str(left_hand) + " // " + str(right_hand) + " is " + str(integer_division))
print(str(left_hand) + " % " + str(right_hand) + " is " + str(remainder)) | true |
c254ca6a0419a25c0d357b063dc7d70652927374 | Python | raunak-shr/intermediate-python-course | /dice_roller.py | UTF-8 | 711 | 3.953125 | 4 | [] | no_license | import random
def main():
dice_rolls = 2
dice_sum = 0
for i in range(0, dice_rolls):
roll = random.randint(1,6)
dice_sum = dice_sum + roll
print(f'You rolled a {roll}')
print(f'You rolled a total of {dice_sum}')
if __name__== "__main__":
main()
#EXPLANATION OF THE CODE
#dice_rolls-->no. of times dice will be rolled
#dice_sum-->variable for finally adding the outcome of every roll
#roll = 3
#dice_sum(0) = dice_sum(0) + roll(3) = 3
#print roll(3)
#go up
#roll = 4
#dice_sum(3) = dice_sum(3) + roll( now 4) = 7
#print roll(4)
#2 times done-->exit loop
#print dice_sum(now 7)
| true |
58e63955326486c3c84d6cae693aa022c3ee976a | Python | leducthanguet/PyContinual | /src/tools/prep_ner.py | UTF-8 | 8,816 | 2.578125 | 3 | [] | no_license | import nltk
import numpy as np
import json
from collections import defaultdict
import xml.etree.ElementTree as ET
import random
random.seed(1337)
np.random.seed(1337)
"""TODO: this file is not well-tested but just copied from another repository.
"""
# valid_split=150
def change_beginning_to_B(label):
changed = False # some dataset does not have beginning B
for lab_id,lab in enumerate(label):
if lab!='O' and 'B' not in lab and changed == False:
label[lab_id] = label[lab_id].replace('I-','B-')
changed = True
elif lab == 'O':
changed = False
return label
def parse_ner(filename):
'''
read file
'''
f = open(filename)
sentence = []
label= []
id=0
corpus = []
for line in f:
if len(line)==0 or line.startswith('-DOCSTART') or line[0]=="\n" or len(line.split())==0:
if len(sentence) > 0:
if 'wikigold' in filename: #only for wikigold, which does not have B
label = change_beginning_to_B(label)
print('sentence: ',sentence)
print('label: ',label)
corpus.append({"id": id, "tokens": sentence, "labels": label})
sentence = []
label = []
id+=1
continue
splits = line.split() #split for get the label, we are differnet
if len(splits) < 2: continue
sentence.append(splits[0])
label.append(splits[-1])
if len(sentence) >0:
# data.append((sentence,label))
corpus.append({"id": id, "tokens": sentence, "labels": label})
id+=1
print('sentence: ',sentence)
print('label: ',label)
return corpus
# wnut 2017
print('================= wnut2017 =========================')
fn_read = './data/ner/wnut2017/'
fn_write = './dat/ner/wnut2017/'
files_read = ['emerging.test.annotated','wnut17train.conll','emerging.dev.conll']
train_corpus = parse_ner(fn_read+'wnut17train.conll')
with open(fn_write+"/train.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus}, fw, sort_keys=True, indent=4)
dev_corpus = parse_ner(fn_read+'emerging.dev.conll')
with open(fn_write+"/dev.json", "w") as fw:
json.dump({rec["id"]: rec for rec in dev_corpus }, fw, sort_keys=True, indent=4)
test_corpus = parse_ner(fn_read+'emerging.test.annotated')
with open(fn_write+"/test.json", "w") as fw:
json.dump({rec["id"]: rec for rec in test_corpus }, fw, sort_keys=True, indent=4)
# wikigold
print('================= wikigold =========================')
fn_read = './data/ner/wikigold/'
fn_write = './dat/ner/wikigold/'
train_rate = 0.8
valid_rate = 0.1
test_rate = 0.1
train_corpus = parse_ner(fn_read+'wikigold.conll.txt')
with open(fn_write+"/train.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[:int(len(train_corpus)*train_rate)]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/dev.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*train_rate):int(len(train_corpus)*(train_rate+valid_rate))]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/test.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*(train_rate+valid_rate)):]}, fw, sort_keys=True, indent=4)
#re3d
print('================= re3d =========================')
fn_read = './data/ner/re3d/'
fn_write = './dat/ner/re3d/'
train_rate = 0.9
valid_rate = 0.1
train_corpus = parse_ner(fn_read+'re3d-train.conll')
with open(fn_write+"/train.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[:int(len(train_corpus)*train_rate)]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/dev.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*train_rate):int(len(train_corpus)*(train_rate+valid_rate))]}, fw, sort_keys=True, indent=4)
test_corpus = parse_ner(fn_read+'re3d-test.conll')
with open(fn_write+"/test.json", "w") as fw:
json.dump({rec["id"]: rec for rec in test_corpus }, fw, sort_keys=True, indent=4)
# Ritter twitter
print('================= ritter =========================')
fn_read = './data/ner/ritter/'
fn_write = './dat/ner/ritter/'
train_rate = 0.8
valid_rate = 0.1
test_rate = 0.1
train_corpus = parse_ner(fn_read+'ner.txt')
with open(fn_write+"/train.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[:int(len(train_corpus)*train_rate)]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/dev.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*train_rate):int(len(train_corpus)*(train_rate+valid_rate))]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/test.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*(train_rate+valid_rate)):]}, fw, sort_keys=True, indent=4)
#gum
print('================= gum =========================')
fn_read = './data/ner/gum/'
fn_write = './dat/ner/gum/'
train_rate = 0.9
valid_rate = 0.1
train_corpus = parse_ner(fn_read+'gum-train.conll')
with open(fn_write+"/train.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[:int(len(train_corpus)*train_rate)]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/dev.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*train_rate):int(len(train_corpus)*(train_rate+valid_rate))]}, fw, sort_keys=True, indent=4)
test_corpus = parse_ner(fn_read+'gum-test.conll')
with open(fn_write+"/test.json", "w") as fw:
json.dump({rec["id"]: rec for rec in test_corpus }, fw, sort_keys=True, indent=4)
#btc
print('================= btc =========================')
fn_read = './data/ner/btc/'
fn_write = './dat/ner/btc/'
train_rate = 0.8
valid_rate = 0.1
test_rate = 0.1
train_corpus = parse_ner(fn_read+'btc.txt')
random.Random(0).shuffle(train_corpus) #more robust
with open(fn_write+"/train.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[:int(len(train_corpus)*train_rate)]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/dev.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*train_rate):int(len(train_corpus)*(train_rate+valid_rate))]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/test.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*(train_rate+valid_rate)):]}, fw, sort_keys=True, indent=4)
#ieer
print('================= ieer =========================')
fn_read = './data/ner/ieer/'
fn_write = './dat/ner/ieer/'
train_rate = 0.8
valid_rate = 0.1
test_rate = 0.1
train_corpus = parse_ner(fn_read+'ieer.txt')
train_corpus = parse_ner(fn_read+'ieer.txt')
random.Random(0).shuffle(train_corpus) #more robust
with open(fn_write+"/train.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[:int(len(train_corpus)*train_rate)]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/dev.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*train_rate):int(len(train_corpus)*(train_rate+valid_rate))]}, fw, sort_keys=True, indent=4)
with open(fn_write+"/test.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[int(len(train_corpus)*(train_rate+valid_rate)):]}, fw, sort_keys=True, indent=4)
#ontonote: nothing need to do
print('================= ontonote =========================')
fn_read = './data/ner/ontonote/'
fn_write = './dat/ner/ontonote/'
train_corpus = parse_ner(fn_read+'onto.train.ner')
with open(fn_write+"/train.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus}, fw, sort_keys=True, indent=4)
dev_corpus = parse_ner(fn_read+'onto.development.ner')
with open(fn_write+"/dev.json", "w") as fw:
json.dump({rec["id"]: rec for rec in dev_corpus }, fw, sort_keys=True, indent=4)
test_corpus = parse_ner(fn_read+'onto.test.ner')
with open(fn_write+"/test.json", "w") as fw:
json.dump({rec["id"]: rec for rec in test_corpus }, fw, sort_keys=True, indent=4)
#conll2003: nothing need to do
print('================= conll2003 =========================')
fn_read = './data/ner/conll2003/'
fn_write = './dat/ner/conll2003/'
train_corpus = parse_ner(fn_read+'train.txt')
with open(fn_write+"/train.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus}, fw, sort_keys=True, indent=4)
dev_corpus = parse_ner(fn_read+'valid.txt')
with open(fn_write+"/dev.json", "w") as fw:
json.dump({rec["id"]: rec for rec in dev_corpus }, fw, sort_keys=True, indent=4)
test_corpus = parse_ner(fn_read+'test.txt')
with open(fn_write+"/test.json", "w") as fw:
json.dump({rec["id"]: rec for rec in test_corpus }, fw, sort_keys=True, indent=4)
| true |
96cc957b1853675567d4222586ec0f884d4cdfc8 | Python | singhbhaskar/projectEuler | /6difference_sonatural_sosquarenatural.py | UTF-8 | 133 | 2.84375 | 3 | [] | no_license | def differnce(n):
sumOfN = n*(n+1)/2;
sumofSqreN = n*(n+1)*(2*n+1)/6
return sumOfN**2 - sumofSqreN
print differnce(100)
| true |
91aefa4c96b586fa59e4ad9d0fb1cfc6bd766f38 | Python | jettero/gr-alt-moving-average | /python/wmoving.py | UTF-8 | 1,698 | 2.859375 | 3 | [] | no_license |
from gnuradio import gr
from operator import itemgetter
import numpy, cmath, os
class wmoving(gr.sync_block):
"""
weighted moving average
"""
def __init__(self, alpha=0.5, samples=False):
"""
Create the block
Args:
alpha: the weight of new information (vs the weight ov the average)
avg = ( alpha * new ) + ( (1 - alpha) * avg )
samples:
alpha = (samples * (samples+1.0))/2.0
avg = ( alpha * new ) + ( (1 - alpha) * avg )
If both alpha and samples are given as arguments, samples overrides whatever
is set for alpha.
"""
if samples:
self.set_samples(samples)
else:
self.set_alpha(alpha)
self._first = True
gr.sync_block.__init__(self, "wmoving_average", ["float32"], ["float32"])
def set_alpha(self,alpha):
self._alpha = numpy.float128(alpha) # promote some greater precision by invoking numpy with a big width
self._beta = (1 - alpha)
def set_samples(self,samples):
self.set_alpha( numpy.float128(2) / (1 + samples) )
def work(self, input_items, output_items):
p = 0
if self._first and len(input_items[0]):
self._avg = input_items[0][p]
output_items[0][p] = self._avg
p = 1
self._first = False;
while p < len(input_items[0]):
self._avg = self._alpha * input_items[0][p] + self._beta * self._avg
output_items[0][p] = self._avg
p = p + 1
if os.getenv("DEBUG_WMA"):
os.write(2, "alpha=%f; avg=%f\n" % (self._alpha, self._avg))
return p
| true |
b1563fe7986a3f494f000aa499ad75351e121ca9 | Python | omite773/watersensor | /i2c_bb_devices.py | UTF-8 | 2,619 | 2.78125 | 3 | [] | no_license | import pigpio
import math
from time import sleep
arduino_addr = 0x04
SDA = 22
SCL = 27
pi = pigpio.pi()
#Close bus if already open
try:
pi.bb_i2c_close(SDA)
sleep(0.2)
except pigpio.error as e:
print(str(e) + " Startar om bb i2c port " + str(SDA))
#Open bus on GPIO pins, 300KHz
bus = pi.bb_i2c_open(SDA,SCL,300000)
arduino_handle = pi.i2c_open(1, arduino_addr)
def close_bus():
pi.bb_i2c_close(SDA)
pi.stop()
def temp_send(raw):
#Bit-banging array
pi.i2c_write_i2c_block_data(arduino_handle, 0x03, raw)
#(s, buf) = pi.bb_i2c_zip(SDA,[4, arduino_addr, 2, 7, 1, 0x03, 3, 0])
def recieve(addr,mode,count):
#Specify register address
(s, buf) = pi.bb_i2c_zip(SDA,[4, addr, 2, 7, 1, mode, 3, 0])
#Read specified register
(s, buf) = pi.bb_i2c_zip(SDA,[4, addr, 2, 6, count, 3, 0])
if s >= 0:
return buf
else:
#S should be positive if recieved correctly
raise ValueError('i2c error returned s < 0 on recieve')
########## Functions for reading Arduino ##########
def arduino_init():
try:
(tmp,tmp2,tmp3) = read_arduino()
return 1
except pigpio.error:
#Arduino not connected
return 0
def read_arduino():
#Get energy values from arduino, indexes 0 and 1
#Arrives on split form, lower byte first
eleCondRaw = recieve(arduino_addr, 0x00, 2)
eleCond = (int(eleCondRaw[1]) << 8) | int(eleCondRaw[0])
battVRaw = recieve(arduino_addr, 0x01, 2)
batteryV = (int(battVRaw[1]) << 8) | int(battVRaw[0])
currentRaw = recieve(arduino_addr, 0x02, 2)
current = (int(currentRaw[1]) << 8) | int(currentRaw[0])
return (eleCond, batteryV, current)
#May not be used!
########## Setting environment via temperature sensor #########
def set_environment(temperature, humidity = 50 ):
# Minimum enterable temperature
if temperature < -25.0:
temperature = -25.0
# Check humidity bounds
if humidity < 0 or humidity > 100.0:
humidity = 50
# LSB is worth 0.5C and so on
hum_perc = int(round(humidity)) << 1
# Split fractionals and integers
parts = math.modf(temperature)
# Remove sign bit from fractional part
fractional = math.fabs(parts[0])
temp_int = int(parts[1])
# Add offset and shift 9
temp_high = ((temp_int + 25) << 9)
# LSB of fractional is worth 1/512, but must be sent as integer
temp_low = (int(fractional / 0.001953125) & 0x1FF)
# Merge result
temp_conv = (temp_high | temp_low)
# Complete bytearray with humidity
buf = [hum_perc, 0x00,((temp_conv >> 8) & 0xFF), (temp_conv & 0xFF)]
| true |
a7079c587f83123da30a5717f2c309ac975d06cf | Python | RichardW35/ethereum-SQL | /organize.py | UTF-8 | 1,509 | 2.75 | 3 | [] | no_license | def get_block_data(block, web3):
""" build a block table to be compatible with SQLite data types"""
block_data = web3.eth.getBlock(block)
return block_data
def order_table_foreverstrong(hashh, block, web3, balance=False):
#open transaction data
tx_data = web3.eth.getTransaction(hashh)
#get addresses
addr_from = tx_data['from']
addr_to = tx_data['to']
#build a foreverstrong table
foreverstrong_table = {}
foreverstrong_keys = [
'from', 'to', 'value', 'hash', 'nonce', 'blockNumber'
]
#convert types to be SQLite-compatible
for nn in foreverstrong_keys:
if nn == "hash":
foreverstrong_table["txHash"] = web3.toHex(tx_data[nn])
elif nn == "value":
foreverstrong_table["value"] = str(tx_data[nn])
else:
foreverstrong_table[nn] = tx_data[nn]
return foreverstrong_table
def execute_sql(table_foreverstrong):
import os
from sql_helper import create_database, update_database, create_index
import sqlite3 as sq3
db_name = 'blockchain.db'
db_is_new = not os.path.exists(db_name)
#connect to the database
conn = sq3.connect(db_name) # or use :memory: to put it in RAM
cur = conn.cursor()
if db_is_new:
print('Creating a new DB.')
create_database(cur)
create_index(cur)
update_database(cur, table_foreverstrong)
else:
update_database(cur, table_foreverstrong)
conn.commit()
conn.close()
| true |
8c0a1d7211982c5c8efd28d20dbe8fd53ff01996 | Python | kckr/SimpleTkinterGUI | /dbutil.py | UTF-8 | 3,395 | 2.546875 | 3 | [] | no_license | import pyodbc
class DbUtil:
def __init__(self, dbname, tablename1, tablename2): # class constructor
self.dbname = dbname #
self.tablename1 = tablename1
self.tablename2 = tablename2
self.conn = None
def connect(self): # initiates the connection
# connection string
conn_str = """Driver={ODBC Driver 13 for SQL Server}; \
Server=*****;\
Database=""" + self.dbname + """; \
UID=****; \
PWD=****; \
"""
self.conn = pyodbc.connect(conn_str, autocommit=True) # pyodbc connection
cursor = self.conn.cursor() # cursor object
return cursor
def readdata(self, pin): # read function by accepting pin
cursor = self.connect()
# sets record from SQL Query
record = cursor.execute("""SELECT [ASSEMBLY_ITEM_NO]
,[COMPONENT_NO]
,[COMPONENT_DESC]
,[QUANTITY]
FROM [""" + self.dbname + """].[dbo].[""" + self.tablename1 + """]
where [ASSEMBLY_ITEM_NAME] = '""" + pin + """'""").fetchall()
cursor.close() # closing the cursor
self.conn.close() # closing the connection
print(record)
return record
def readTable2(self, pin):
cursor = self.connect()
record = cursor.execute("""SELECT [ASSEMBLY_ITEM_DESC]
FROM [""" + self.dbname + """].[dbo].[""" + self.tablename2 + """]
where [ASSEMBLY_ITEM_NO] = '""" + pin + """'""").fetchall()
cursor.close() # closing the cursor
self.conn.close() # closing the connection
return record
def read_component(self, pin):
cursor = self.connect()
record = cursor.execute("""SELECT [ASSEMBLY_ITEM_NO]
,[COMPONENT_DESC]
FROM [""" + self.dbname + """].[dbo].[""" + self.tablename1 + """]
where [COMPONENT_NO] = '""" + pin + """'""").fetchall()
cursor.close() # closing the cursor
self.conn.close() # closing the connection
return record
def read_description(self, pin):
cursor = self.connect()
record = cursor.execute("""SELECT
[ASSEMBLY_ITEM_DESC]
FROM [""" + self.dbname + """].[dbo].[""" + self.tablename2 + """]
where [ASSEMBLY_ITEM_NO] = '""" + pin + """'""").fetchall()
cursor.close() # closing the cursor
self.conn.close() # closing the connection
return record
def assemblyitemsno(self):
cursor = self.connect()
record = cursor.execute("""SELECT
[ASSEMBLY_ITEM_NO]
FROM [XML].[dbo].[ASSEMBLY_ITEM]""").fetchall()
cursor.close() # closing the cursor
self.conn.close() # closing the connection
return record
| true |
7b0e6fc178aaa34d6a62cb494a0a5bc5393939cd | Python | gaaferHajji2/AI | /DataMining/guide2datamining/ch2/file.py | UTF-8 | 212 | 3.046875 | 3 | [] | no_license | import csv;
reader=csv.reader(open("./BX-Book-Ratings.csv", 'r'));
for row in reader:
print "Row is: ", row[0].split(';');
#print "Type of row is: ", type(row).__name__;
#print "length of row is: ", len(row);
| true |
6c1b85d7fa20a0f98e17ab4131c18f283b162087 | Python | aid8/CS01 | /Introduction to Computing/Algorithms Needed/DIA_convpoint.py | UTF-8 | 476 | 3.5625 | 4 | [] | no_license | n = [23,0,0,42,36,29,0,0,25,1]
legit = 10
left = 0
right = 9
tSh = 0
print("Left:",left,", Right:", right," Legit:",legit)
print(*n)
while left < right:
if n[left] != 0:
left += 1
else:
legit -= 1
n[left] = n[right]
right -= 1
print("Copied: 1")
tSh += 1
print("Left:",left,", Right:", right," Legit:",legit)
print(*n)
if n[left] == 0:
legit -= 1
print("Legit:",legit,"\nSwap Count:",tSh) | true |
7872d9b98631115dd9f5e21c58403eab838e813d | Python | SuwaliArora/Demo-Project-Of-RESTFUL-API | /NewProject3/myapp.py | UTF-8 | 1,687 | 2.734375 | 3 | [] | no_license | #third party application
import requests
import json
URL = "http://127.0.0.1:8000/studentdata/"
def get_data(id = None):
data = {}
if id is not None:
data = {'id': id}
json_data = json.dumps(data)
headers = {'content-Type': 'application/json'}
r = requests.get(url = URL, headers=headers, data = json_data)
# to extract data from r
data = r.json()
print(data)
get_data(2)
def post_data():
data = {
'name' : 'rohita',
'roll' : 10,
'city': 'goa'
}
headers = {'content-Type': 'application/json'}
# to convert python data in json data
json_data = json.dumps(data)
# response stored in r
r = requests.post(url = URL, headers=headers, data = json_data)
# to extract data from r
data = r.json()
print(data)
#post_data()
def update_data():
data = {
'id': 2,
'name' : 'ravisha',
'roll': 106,
'city': 'pune'
}
headers = {'content-Type': 'application/json'}
# to convert python data in json data
json_data = json.dumps(data)
# response stored in r
r = requests.put(url = URL, headers=headers, data = json_data)
# to extract data from r
data = r.json()
print(data)
#update_data()
def delete_data():
data = {
'id': 3
}
headers = {'content-Type': 'application/json'}
# to convert python data in json data
json_data = json.dumps(data)
# response stored in r
r = requests.delete(url = URL, headers= headers, data = json_data)
# to extract data from r
data = r.json()
print(data)
delete_data()
| true |
934e1fd8c5c647c6ee4fe9c3678768ef717caf7b | Python | VectorInstitute/vector_cv_tools | /vector_cv_tools/datasets/mvtec.py | UTF-8 | 8,326 | 2.609375 | 3 | [
"MIT"
] | permissive | import os
from glob import glob
from pathlib import Path
from torch.utils.data import Dataset
from ..utils import load_image_to_numpy, load_binary_mask_to_numpy
MVTec_OBJECTS = ('bottle', 'cable', 'capsule', 'carpet', 'grid', 'hazelnut',
'leather', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush',
'transistor', 'wood', 'zipper') #15 different objects
MVTec_CON_TYPES = (
"good", "bent", "bent_lead", "bent_wire", "broken", "broken_large",
"broken_small", "broken_teeth", "cable_swap", "color", "combined",
"contamination", "crack", "cut", "cut_inner_insulation", "cut_lead",
"cut_outer_insulation", "damaged_case", "defective", "fabric_border",
"fabric_interior", "faulty_imprint", "flip", "fold", "glue", "glue_strip",
"gray_stroke", "hole", "liquid", "manipulated_front", "metal_contamination",
"misplaced", "missing_cable", "missing_wire", "oil", "pill_type", "poke",
"poke_insulation", "print", "rough", "scratch", "scratch_head",
"scratch_neck", "split_teeth", "squeeze", "squeezed_teeth", "thread",
"thread_side", "thread_top") # 49 contamination types
def validate_arguments(split, obj_types, con_types):
"""Checks the arguments to MVTec dataset
Arguments:
split (str): String indicating train or test split
obj_types (list): List containing strings indicating
the different objects to load
con_types (list): List containing strings indicating
the different contaminations to load
"""
if split not in {"train", "test"}:
raise ValueError(f"Split {split} is not supported")
for obj in obj_types:
if obj not in MVTec_OBJECTS:
raise ValueError(f"Invalid object {obj}")
for con in con_types:
if con not in MVTec_CON_TYPES:
raise ValueError(f"Invalid contamination type {con}")
def id_to_object(obj_id):
"""
Arguments:
obj_id (int): Object's numerical ID
Returns:
String that corresponds to the object's numerical ID
"""
return MVTec_OBJECTS[obj_id]
def object_to_id(obj_name):
"""
Arguments:
obj_name (str): The object name
Returns:
ID that corresponds to the the object's name
"""
return MVTec_OBJECTS.index(obj_name)
def id_to_contaimination(con_id):
"""
Arguments:
con_id (int): Contamination's numerical ID
Returns:
String that corresponds to the contamination's numerical ID
"""
return MVTec_CON_TYPES[con_id]
def contamination_to_id(con_name):
"""
Arguments:
con_name (str): The contamination name
Returns:
ID that corresponds to the the contamination's name
"""
return MVTec_CON_TYPES.index(con_name)
def get_valid_paths(root, folder_type, obj_types, contamination_types):
"""Gets all valid image paths depending on the specified
object types and contamination_types
Arguments:
root (str): The root path of the MVTec dataset directory
folder_type (str): This can be either train, test or ground_truth
obj_types (list): List of strings containing the object types to load
contamination_types (list): List of strings containing the
contamination_types to load
Returns:
all_paths (list): A sorted list of valid image paths
"""
all_paths, contamination_types = [], set(contamination_types)
for obj in set(obj_types):
file_pattern = os.path.join(root, f"{obj}/{folder_type}/*/*.png")
for potential_path in glob(file_pattern):
_, label = get_label_from_path(potential_path)
if label in contamination_types:
all_paths.append(potential_path)
return sorted(all_paths)
def get_mask_path(path_to_img):
"""Gets the path to mask from path to image
e.g. if path_to_img is:
.../object_type/folder_type/broken_large/000.suffix
then the path to mask will be
.../object_type/ground_truth/broken_large/000_mask.suffix
Arguments:
path_to_img (str): Path to the image
Returns:
mask_path (str): Path to the corresponding mask,
note that this path might not actually exist
"""
p = Path(path_to_img)
mask_name = f"{p.stem}_mask{p.suffix}"
mask_path = list(p.parts)
mask_path[-1] = mask_name
mask_path[-3] = "ground_truth"
mask_path = os.path.join(*mask_path)
return mask_path
def get_label_from_path(path_to_img):
"""From a path to image, get a tuple of the format
(obj_name, contamination_type) e.g (bottle, broken_large)
Arguments:
path_to_img (str): Path to the image
"""
p = Path(path_to_img)
obj_type = os.path.basename(p.parents[2]).lower()
contamination_type = os.path.basename(p.parent).lower()
return obj_type, contamination_type
class MVTec(Dataset):
def __init__(self,
root,
split="train",
obj_types=None,
con_types=None,
mask_only=False,
transforms=None):
"""
Arguments:
root (string): Root directory of the MVTec dataset
split (string): Options are train and test
obj_types (list, optional): List of strings containing the object
types to load
con_types (list, optional): List of strings containing the contaminations
to load
mask_only (bool, optional): If the split is test, this decides if
we only load from images that have a corresponding mask(i.e
containmation that is not of type "good")
transforms (callable, optional): A callable object that takes in
img and target as it's input and returns their transformed
version
NOTE:
The train set for MVTec has no masks, so leave mask_only to be False and
con_types to be None or ["good"] to load images in the train set
"""
obj_types = [x.lower() for x in obj_types
] if obj_types is not None else MVTec_OBJECTS
con_types = [x.lower() for x in con_types
] if con_types is not None else MVTec_CON_TYPES
# removing good type contaminations will ensure that only images with mask
# will get loaded
if mask_only:
con_types = [x for x in con_types if x != "good"]
validate_arguments(split, obj_types, con_types)
paths = get_valid_paths(root, split, obj_types, con_types)
if len(paths) == 0:
raise ValueError(
"No data points available for the specified combination")
self.paths = paths
self.load_mask = (split == "test")
self._transforms = transforms
def __getitem__(self, index):
"""
Arguments:
index (int): Index
Returns:
tuple: Tuple (img, target). Where target is a dictionary containing
the label names and label ids in the format:
{
"label_names" : ("the type of object", "the type of contamination"),
"label_ids" : (int: the unique integer ID of the object,
int: the unique integer ID of the contamination )
}
NOTE:
Normal/good images have contamination type and ID as "good". and 0,
respectively.
If the mask doesn't exist for the image, target['mask'] will be None
"""
path_to_load, mask = self.paths[index], None
img = load_image_to_numpy(path_to_load, mode="RGB")
obj_type, con_type = get_label_from_path(path_to_load)
mask_path = get_mask_path(path_to_load)
if self.load_mask and os.path.exists(mask_path):
mask = load_binary_mask_to_numpy(mask_path)
target = {}
target["label_names"] = (obj_type, con_type)
target["label_ids"] = (object_to_id(obj_type),
contamination_to_id(con_type))
target["mask"] = mask
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def __len__(self):
return len(self.paths)
| true |
3a14ed3928e44164bb09cb99742299f9194ebb50 | Python | khalillakhdhar/exercices_python_et_codes | /parite.py | UTF-8 | 131 | 3.53125 | 4 | [] | no_license | x=0
while x<2000:
x=input("donner un entier")
x=int(x)
if(x % 2==0):
print("paire")
else:
print("impaire")
| true |
eccdd9474f74a8fcc1c146f62b639cdce349cb97 | Python | amenson1983/2-week | /_2_week/Homework_2_week/average_thick_rain.py | UTF-8 | 589 | 3.703125 | 4 | [] | no_license | years = int(input("Укажите количество лет: "))
tot_thick = 0
month_quant = 0
for yea in range(0,years,1):
for mon in range(12):
rain_thick = int(input("Введите толщину осадков для месяца"))
tot_thick +=rain_thick
month_quant = years*12
average = tot_thick/(years*12)
print("Средняя толщина осадков за период в месяц: ",average)
print("Общяя толщина осадков за период: ",tot_thick)
print("Общее количество месяцев: ", month_quant*years) | true |
fda2fbb82c5ea8f98fab0bbd42470fb8c9d5cd9c | Python | machine-learning-quickstarts/ML-python-tensorflow-mnist-cpu-service | /service.py | UTF-8 | 2,944 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
# coding=utf-8
import os
import sys
import json
import numpy as np
import onnx
import onnxruntime as rt
import tensorflow as tf
import time
class MNIST(object):
def __init__(self):
"""Load the MNIST test dataset (10000 images). Load onnx model and start inference session."""
start = time.time()
mnist = tf.keras.datasets.mnist
(_, _), (x_test, self.y_test) = mnist.load_data()
self.x_test = x_test / 255.0
self.image_count = x_test.shape[0]
end = time.time()
print("Loading time: {0:f} secs".format(end - start))
# Load the ONNX model and check the model is well formed
if not os.path.exists("model.onnx"):
sys.exit("There needs to be a model located at 'model.onnx'. Tests will fail if this is not the case.")
self.model = onnx.load("model.onnx")
onnx.checker.check_model(self.model)
# Start inference session
rt.set_default_logger_severity(0)
self.sess = rt.InferenceSession("model.onnx")
self.input_name = self.sess.get_inputs()[0].name
@staticmethod
def prepare_x_test(image_in: np.ndarray) -> np.ndarray:
"""Format an MNIST image so that it can be used for inference in onnx runtime.
:param image_in: 2-dim numpy array that will be converted into a 4-dim array
:type image_in: np.ndarray
:return: 4-dim array with the first (onnxruntime specific) and last dimensions (batchsize=1) as empty
:rtype: np.ndarray
"""
test_image = np.asarray(image_in, dtype='float32')
test_image = np.expand_dims(test_image, axis=2)
return np.expand_dims(test_image, axis=0)
def format_payload(self, index: int, y_pred: np.ndarray) -> json:
"""Format prediction results into a json to be returned to user.
:param index: int 0-9999 indicating which test image will be processed by the model
:type index: int
:param y_pred: 10-dim array containing probability distribution over the 10 classes
:type y_pred: np.ndarray
:return: json with structure {"label": int, "predicted": int}
:rtype: json
"""
payload = {"label": int(self.y_test[index])}
predicted = int(np.argmax(y_pred))
payload["predicted"] = predicted
return json.dumps(payload)
def run_inference(self, index: int) -> json:
"""Handle HTTP GET request.
:param index: int 0-9999 indicating which test image will be processed by the model. Defined by user as part of
the HTTP GET request
:type index: int
:return: json containing the predicted label and true label
:rtype: json
"""
test_image = self.prepare_x_test(image_in=self.x_test[index, :, :])
y_pred = self.sess.run(None, {self.input_name: test_image})[0]
return self.format_payload(index=index, y_pred=y_pred)
| true |
c4c0e455c2ec4998d7db7e2df69188f71eecebf3 | Python | Naoya-abe/siliconvalley-python | /section12/lesson139.py | UTF-8 | 2,559 | 3.453125 | 3 | [] | no_license | """
SQLAlchemy
PythonのORMライブラリの一つ
RDBにアクセスするためのラッパー
SQLAlchemyを使って記述しておけば、sqliteとMySQLの置換が容易になる
オブジェクト指向的に簡単にDBにアクセスできる。SQL文を書かなくてもOK!
"""
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
# 作業場所の作成 今回はsqliteのメモリ echo=True:sqlalchemyがどのようなSQL文を実行したか確認することができる
# engine = sqlalchemy.create_engine('sqlite:///:memory:', echo=True)
# 実際にDBを作成する時(sqlite)
# engine = sqlalchemy.create_engine('sqlite:///test_sqlite139.db', echo=True)
# 実際にDBを作成する時(mysql) pymysqlのインストール必要
# エラーがあったら公式リファレンスを読む:https://docs.sqlalchemy.org/en/13/core/engines.html
engine = sqlalchemy.create_engine(
'mysql+pymysql://root@localhost/test_myswl_database139', echo=True)
# Baseクラスの作成
Base = sqlalchemy.ext.declarative.declarative_base()
class Person(Base): # Baseクラスを継承してテーブルを作成する
__tablename__ = 'persons' # クラス名は人1人を扱うオブジェクトなのでPersonだが
id = sqlalchemy.Column( # DBには複数のpersonデータが保存されるためtable_nameはpersons
sqlalchemy.Integer, primary_key=True, autoincrement=True)
name = sqlalchemy.Column(sqlalchemy.String(14))
# engineに指定した環境でDBを作成する
Base.metadata.create_all(engine)
# DBにアクセスする
# Sessionの作成
Session = sqlalchemy.orm.sessionmaker(bind=engine)
# Sessionのオブジェクトを作成
session = Session()
# 挿入したいデータを記述
p1 = Person(name='Mike')
p2 = Person(name='Naoya')
p3 = Person(name='Takuya')
# DBに挿入
session.add(p1)
session.add(p2)
session.add(p3)
# 変更確定
session.commit()
# データ更新
# データ読み込み(個別) .first():重複しているものがあったら、一番最初に検索に引っかかったものを返す
# p4はあくまでプログラム上の変数であり、実態は「id:1、name:Mike」のデータ
p4 = session.query(Person).filter_by(name='Mike').first()
p4.name = 'Michel'
session.add(p4)
session.commit()
# データ削除
p5 = session.query(Person).filter_by(name='Takuya').first()
session.delete(p5)
session.commit()
# データの読み込み
persons = session.query(Person).all()
for person in persons:
print(person.id, person.name)
| true |
60de62633fb68981b8fd2e97b3aed98380177949 | Python | sreytouchmoch/html.1 | /pyton/py/array2D1.py | UTF-8 | 189 | 3 | 3 | [] | no_license | array2D=[
["A","B","c"],
["D","F","c"],
["A","A","F"],
["v","B","c"],
]
row=int(input())
for k in range(len(array2D[row])):
array2D[row][k]="*"
print(array2D)
| true |
8c903dd94de39a71430779e02fba848171fddc7f | Python | avlakshmy/programming-practice | /rangeSumBST.py | UTF-8 | 558 | 3.359375 | 3 | [] | no_license | def inorderTraversal(root):
if root == None:
return []
ans = []
ans.extend(inorderTraversal(root.left))
ans.append(root.val)
ans.extend(inorderTraversal(root.right))
return ans
def rangeSumBST(root, low, high):
inorder = inorderTraversal(root)
sumVal = 0
i = 0
while i < len(inorder):
if inorder[i] >= low:
break
i += 1
while i < len(inorder):
if inorder[i] <= high:
sumVal += inorder[i]
i += 1
else:
break
return sumVal
| true |
ad32d63ec5ef563eb3669f7448dfa53d1faeed13 | Python | kate-melnykova/CanvasAPI-Learning-Analytics | /auth/model.py | UTF-8 | 466 | 2.6875 | 3 | [] | no_license | from auth.crypting import aes_encrypt, aes_decrypt
class UnauthorizedMethod(Exception):
pass
class BaseUser:
def __init__(self, url=''):
self.url = aes_encrypt(url)
def is_authenticated(self):
raise UnauthorizedMethod
class User(BaseUser):
def is_authenticated(self):
return True
class AnonymousUser(BaseUser):
def __init__(self):
super().__init__('')
def is_authenticated(self):
return False | true |
7be639760d26c23689fb9222b8b0240a61005119 | Python | vustkdgus/StudyRaspberry21 | /opencv_img03.py | UTF-8 | 375 | 2.578125 | 3 | [] | no_license | import cv2
import numpy as np
#
org = cv2.imread('./image/cat.jpg')
gray = cv2.cvtColor(org, cv2.COLOR_BGR2GRAY)
h, w, c = org.shape
cropped = gray[:int(h/2), :int(w/2)]
# cropped = org[:int(h/2), :int(w/2)]
cv2.imshow('Original', org) #cv2 새창 열림
cv2.imshow('Crop', cropped)
cv2.waitKey(0) # 창에서 키입력 대기
cv2.destroyAllWindows() # 메모리 해제
| true |
e44353ef68a3d4b7d366174a54ced40ee9350411 | Python | GeneMANIA/pipeline | /builder/extract_identifiers.py | UTF-8 | 8,802 | 2.53125 | 3 | [] | no_license |
'''
Create GENERIC_DB files:
* NODES.txt columns 'ID', 'NAME', 'GENE_DATA_ID', 'ORGANISM_ID'
* GENES.txt columns 'ID', 'SYMBOL', 'SYMBOL_TYPE', 'NAMING_SOURCE_ID',
'NODE_ID', 'ORGANISM_ID', 'DEFAULT_SELECTED'
* GENE_DATA.txt 'ID', 'DESCRIPTION', 'EXTERNAL_ID', 'LINKOUT_SOURCE_ID'
* GENE_NAMING_SOURCES.txt, columns 'ID', 'NAME', 'RANK', 'SHORT_NAME'
See GENERIC_DB.md
input:
* raw mappings file
* processed mappings file
* default genes
* organism id
'''
import argparse
import pandas as pd
from configobj import ConfigObj
def load_identifiers(identifiers_file):
identifiers = pd.read_csv(identifiers_file, sep='\t', header=None,
names=['GMID', 'SYMBOL', 'SOURCE'],
dtype='str', na_filter=False, index_col=0)
assert identifiers.index.name == 'GMID'
return identifiers
def load_descriptions(descriptions_file):
# column names in first row of the file
descriptions = pd.read_csv(descriptions_file, sep='\t', header=0,
dtype='str', na_filter=False, index_col=0)
assert descriptions.index.name == 'GMID'
return descriptions
def extract_nodes(identifiers, organism_id, output):
identifiers = load_identifiers(identifiers)
# nodes are the unique GMID's
gmids = identifiers.index.unique()
nodes = pd.DataFrame(index=gmids)
nodes['NAME'] = nodes.index
nodes['GENE_DATA_ID'] = nodes.index
nodes['ORGANISM_ID'] = organism_id
nodes.to_csv(output, sep='\t', header=False, index=True,
index_label='ID', columns=['NAME', 'GENE_DATA_ID', 'ORGANISM_ID'])
def extract_naming_sources(identifiers_file, naming_sources_file, organism_id):
must_have_sources = ['Entrez Gene ID']
identifiers = load_identifiers(identifiers_file)
# extract naming sources. unique() returns a series,
# use it to build a dataframe adding other columns
naming_sources = list(identifiers['SOURCE'].unique()) + must_have_sources
naming_sources = pd.Series(naming_sources).unique() # in case some must-have's were already there
naming_sources = pd.DataFrame(naming_sources, columns=['NAME'])
naming_sources.sort_values(by=['NAME'], inplace=True)
naming_sources.reset_index(inplace=True, drop=True)
#When merging multiple species the naming sources from different organisms get all messed up. Make a unique numbering for each species based on the species number.
naming_sources.index += ((organism_id * 100) + 1) # start numbering at 1
naming_sources.index.name = 'ID'
# SHORT_NAME and RANK may not be used anymore
naming_sources['SHORT_NAME'] = ''
naming_sources['RANK'] = 0
# quick rank test. TODO: add config for this
naming_sources.loc[naming_sources['NAME'] == 'Gene Name', 'RANK'] = 10
naming_sources.to_csv(naming_sources_file, sep='\t', header=False, index=True,
index_label='ID', columns=['NAME', 'RANK', 'SHORT_NAME'])
def extract_genes(identifiers_file, naming_sources_file, organism_cfg, genes_file):
identifiers = load_identifiers(identifiers_file)
cfg = ConfigObj(organism_cfg, encoding='UTF8')
default_genes = cfg['default_genes']
organism_id = cfg['gm_organism_id']
naming_sources = pd.read_csv(naming_sources_file, sep='\t', header=None,
dtype='str', na_filter=False, index_col=0,
names=['NAME', 'RANK', 'SHORT_NAME'])
# load genes
genes = identifiers.copy()
genes.index.name = 'NODE_ID'
genes.reset_index(inplace=True) # push into column
genes.index.name = 'ID'
genes['SYMBOL_TYPE'] = ''
genes['ORGANISM_ID'] = organism_id
genes['DEFAULT_SELECTED'] = 0
# set source ids by joining
sources_temp = naming_sources.copy()
sources_temp.index.name='NAMING_SOURCE_ID'
sources_temp.reset_index(inplace=True)
sources_temp = sources_temp[['NAMING_SOURCE_ID', 'NAME']]
genes = pd.merge(genes, sources_temp, left_on='SOURCE', right_on='NAME', how='inner')
genes.index += 1
genes.drop(['SOURCE', 'NAME'], axis=1, inplace=True)
# assign defaults genes
genes['SYMBOL_UPPER'] = genes['SYMBOL'].str.upper()
default_genes = [gene.upper() for gene in default_genes]
genes.loc[genes['SYMBOL_UPPER'].isin(default_genes), 'DEFAULT_SELECTED'] = 1
genes.drop(['SYMBOL_UPPER'], axis=1, inplace=True)
# write out files. be explicit about column order so as not to mess things up
genes.to_csv(genes_file, sep='\t', header=False, index=True,
index_label='ID', columns=['SYMBOL', 'SYMBOL_TYPE', 'NAMING_SOURCE_ID',
'NODE_ID', 'ORGANISM_ID', 'DEFAULT_SELECTED'])
def extract_gene_data(identifiers_file, descriptions_file, gene_data_file):
# gene_data is just descriptions, same index as nodes
# merge with 'Definition' column of the descriptions data
# on ID index
identifiers = load_identifiers(identifiers_file)
descriptions = load_descriptions(descriptions_file)
# nodes are the unique GMID's
gmids = identifiers.index.unique()
gene_data = pd.DataFrame(index=gmids)
gene_data.index.name = 'ID'
descriptions_temp = descriptions[['Definition']].copy()
descriptions_temp.rename(columns={'Definition': 'DESCRIPTION'}, inplace=True)
gene_data = pd.merge(gene_data, descriptions_temp, how='inner', left_index=True, right_index=True)
# hopefully these next two are not used anymore
gene_data['EXTERNAL_ID'] = ''
gene_data['LINKOUT_SOURCE_ID'] = 0
gene_data.to_csv(gene_data_file, sep='\t', header=False, index=True,
index_label='ID', columns=['DESCRIPTION', 'EXTERNAL_ID', 'LINKOUT_SOURCE_ID'])
#def process(identifiers_file, descriptions_file, default_genes_file, organism_id,
# nodes_file, genes_file, gene_data_file, naming_sources_file):
def process(args):
if args.type == 'nodes':
#extract_nodes(identifiers_file, organism_id, nodes_file)
extract_nodes(args.identifiers, args.organism_id, args.output)
elif args.type == 'gene_data':
#extract_gene_data(identifiers_file, descriptions_file, gene_data_file)
extract_gene_data(args.identifiers, args.descriptions, args.outptu)
elif args.type == 'naming_sources':
#extract_naming_sources(identifiers_file, naming_sources_file)
extract_naming_sources(args.identifiers, args.output, args.organism_id)
elif args.type == 'genes':
#extract_genes(identifiers_file, naming_sources_file, organism_id, genes_file)
extract_genes(args.identifiers, args.output, args.organism_id, args.output)
if __name__ == '__main__':
# setup subcommands for each output file
parser = argparse.ArgumentParser(description='Create identifier files in generic_db format')
subparsers = parser.add_subparsers(dest = 'subparser_name')
# nodes
parser_nodes = subparsers.add_parser('nodes')
parser_nodes.add_argument("organism_id", type=int, help="organism id")
parser_nodes.add_argument("identifiers", help="cleaned identifiers file")
parser_nodes.add_argument("output", help="output file")
# genes
parser_genes = subparsers.add_parser('genes')
parser_genes.add_argument("organism_cfg", type=str, help="organism config file")
parser_genes.add_argument("identifiers", help="cleaned identifiers file")
parser_genes.add_argument("naming_sources", help="naming sources file")
parser_genes.add_argument("output", help="output file")
# gene_data
parser_gene_data = subparsers.add_parser('gene_data')
parser_gene_data.add_argument("identifiers", help="cleaned identifiers file")
parser_gene_data.add_argument("descriptions", help="descriptions file")
parser_gene_data.add_argument("output", help="output file")
# naming_sources
parser_naming_sources = subparsers.add_parser('naming_sources')
parser_naming_sources.add_argument("identifiers", help="cleaned identifiers file")
parser_naming_sources.add_argument("output", help="output file")
parser_naming_sources.add_argument("organism_id", type=int, help="organism identifier")
# parse args and dispatch
args = parser.parse_args()
if args.subparser_name == 'nodes':
extract_nodes(args.identifiers, args.organism_id, args.output)
elif args.subparser_name == 'genes':
extract_genes(args.identifiers, args.naming_sources, args.organism_cfg, args.output)
elif args.subparser_name == 'gene_data':
extract_gene_data(args.identifiers, args.descriptions, args.output)
elif args.subparser_name == 'naming_sources':
extract_naming_sources(args.identifiers, args.output, args.organism_id)
else:
raise Exception('unexpected command')
| true |
e738ee2e4420ba6b8a4208c190384a3e058a20fc | Python | kshm2483/ssafy_TIL | /Algori/AD/A/A9_1~n sum.py | UTF-8 | 121 | 2.921875 | 3 | [] | no_license | def DFS(i):
global sums
if i < 1: return
sums += i
DFS(i-1)
N = int(input())
sums = 0
DFS(N)
print(sums) | true |
19cd43187380bc3882e79ae7e8e41c5d48b27dcf | Python | ccollado7/Curso-Python-Pildoras-Informaticas | /32.Video 37/manejo_archivos.py | UTF-8 | 314 | 3.640625 | 4 | [] | no_license | #Importo el modulo
from io import open
#Creo un archivo en blanco
archivo_texto = open("archivo.txt","w")
frase = "Estupendo dia para estudiar Python \n el miercoles"
#Escribo sobre el archivo una frase
archivo_texto.write(frase)
#Cierro el archivo (abierto desde memoria)
archivo_texto.close()
| true |
a118ffca7989b71caae9399d7b5fb948ea048661 | Python | H-P-U/PythonUygulamalar | /Python Genarators/genarators.py | UTF-8 | 515 | 3.859375 | 4 | [] | no_license | """def cube():
result=[]
for i in range(5):
result.append(j**3)
return result
print(cube())"""
#generator sayesinde daha az bellek kullanılır
"""def cube():
for i in range(5):
yield i**3
iterator=cube()
for i in iterator:
print(i)"""
generator= (i**3 for i in range(5)) #liste=[i**3 for i im range(5)] /print(liste)
print(generator)
for i in generator:
print(i)
'''print(next(generator))
print(next(generator))
print(next(generator))
print(next(generator))'''
| true |
65fba29250169499756a7e44b267b8d20f72444e | Python | han201/EulerProjects | /EulerProject3_LargestPrime.py | UTF-8 | 156 | 2.75 | 3 | [] | no_license | #What is the largest prime factor of the number 600851475143 ?
from EulerProject_HanFunctions import largestprime
N = 600851475143
print largestprime(N)
| true |
19028abfacd8080f81198a465dab5c11f94bb6c3 | Python | shenfanyi/NLP | /w2v.py | UTF-8 | 2,179 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
## calculate the number of sentences which include each dictionary word
def idf(dic, text):
idf = []
for i in dic.index:
#print i
n_i = 0
for j in text:
if i in j:
n_i += 1
idf.append(n_i)
return idf
## calculate the number of each dictionary word in a sentence
def tf_sent(dic, sentence):
tf = []
for i in dic.index:
n_i = 0
for j in sentence:
if i == j:
n_i += 1
tf.append(n_i)
return tf
## calculate the ifidf of each dictionary word in a sentence
def ifidf_sent(sentence, tf, idf):
ifidf = np.log(idf)*tf
return ifidf
## turn a document to a vector, by the means of if-idf algorithm
def docu_to_vec(documents):
## pre handle data, drop stop-words
stoplist = set('for a of the and to in'.split())
text = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
#print text
words_unique = set()
for i in text:
for j in i:
words_unique.add(j)
#print words_unique
dic = pd.DataFrame(range(len(words_unique)),index = words_unique)
#print dic
idf_list = idf(dic, text)
ifidf_list = []
for sentence in text:
tf = tf_sent(dic, sentence)
ifidf = ifidf_sent(sentence, tf, idf_list)
ifidf_list.append(ifidf)
return ifidf_list
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
print docu_to_vec(documents)
| true |
6294ccb0cffe9a503f8f60678b312b6f58b1ce7d | Python | Florence-TC/University_Admission_Procedure | /Topics/Iterators/Students/main.py | UTF-8 | 69 | 2.984375 | 3 | [] | no_license | for n, student in enumerate(student_list):
print(n + 1, student)
| true |
eabd4cbdd37c799101641760c674b561e4392e28 | Python | KDD-OpenSource/geox-young-academy | /day-3/Kalman-filter_Mark.py | UTF-8 | 1,494 | 3.171875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 10:10:24 2017
@author: Mark
"""
import numpy as np
import matplotlib.pyplot as plt
#Define functions
def model(state_0,A,B):
state_1 = A*state_0 + np.random.normal(0,B)
return state_1
state_null=np.random.normal(0,0.4)
def observation_function(state,R):
obs=state+np.random.normal(0,R)
return obs
def forecast(state_0,cov_0,A,B):
state_1=A*state_0
cov_1=A*cov_0*A+B
return state_1,cov_1
def analysis_formulas(state_1_hat,cov_1_hat,K,H,obs_0):
state_1 = state_1_hat - K*(H*state_1_hat - obs_0)
cov_1 = cov_1_hat - K*H*cov_1_hat
return state_1, cov_1
def kalman_gain(cov_1_hat,H,R):
K = cov_1_hat*H*(R+H*cov_1_hat*H)**(-1)
return K
#Initialize model parameters
A = 0.5
H = 1
B = 0.5
R = 0.1
lev = 100
#Sythetic Model
STATE_real = np.zeros(lev)
OBS_real = np.zeros(lev)
STATE_real[0] = np.random.normal(5,0.1)
OBS_real[0] = observation_function(STATE_real[0],R)
for i in range (1,lev-1):
STATE_real[i] = model(STATE_real[i-1],0.4,0.01)
OBS_real[i] = observation_function(STATE_real[i],R)
#Kalman-filter
STATE = np.zeros(lev)
COV = np.zeros(lev)
STATE[0] = state_null
COV[0] = B
for i in range (1,lev-1):
(state_hat,cov_hat) = forecast(STATE[i-1],COV[i-1],A,B)
K = kalman_gain(cov_hat,H,R)
(STATE[i],COV[i]) = analysis_formulas(state_hat,cov_hat,K,H,OBS_real[i])
plt.plot(STATE)
plt.plot(STATE_real)
| true |
6d13303815c54f7662a3d0cf764337eac843716f | Python | zhao750456695/cvpro | /爬虫/myscrapy/baidunews/test.py | UTF-8 | 375 | 2.53125 | 3 | [] | no_license | # -*- coding=utf-8 -*-
__author__ = 'zhaojie'
__date__ = '2018/4/4 8:41'
import re
with open('./new 5.txt', 'r') as f:
data = f.readlines()
idlist = []
for line in data:
if len(line)>1:
print('line', len(line))
pat = 'id=(.*?)&'
res = re.compile(pat).findall(line)[0]
idlist.append(res)
print(idlist)
| true |
793db199be8db80c5fcd0839821df1a7afbcce88 | Python | heliumman/AdventOfCode | /2020/python/day17A.py | UTF-8 | 1,718 | 2.9375 | 3 | [] | no_license | import common
def main() :
dat = common.readFile('day17.dat')
pd, dims = parse_dat(dat)
i = 0
while i < 6:
pd, dims = new_pd(pd, dims)
i = i + 1
print(count_active(pd))
def parse_dat(dat):
pd = {}
i = 0
for d in dat:
j = 0
for l in list(d):
pd[(j, i, 0)] = l
j = j + 1
i = i + 1
return (pd, ((0, len(pd)/abs(i)), (0, abs(i)), (0, 1)))
def check_neighbours(pd, x, y, z):
count = 0
for i in range(-1, 2):
for j in range(-1,2):
for k in range (-1, 2):
if not (i, j, k) == (0,0,0):
if pd.get((x + i, y + j, z + k), '.') == '#':
count = count + 1
if pd.get((x,y,z), '.') == '.':
if count == 3:
return '#'
else:
return '.'
else:
if count in [2,3]:
return '#'
else:
return '.'
def new_pd(pd, dims):
pd_ = {}
dims = (
(dims[0][0] - 1, dims[0][1] + 1),
(dims[1][0] - 1, dims[1][1] + 1),
(dims[2][0] - 1, dims[2][1] + 1)
)
for x in range(dims[0][0], dims[0][1]):
for y in range(dims[1][0], dims[1][1]):
for z in range(dims[2][0], dims[2][1]):
pd_[(x, y, z)] = check_neighbours(pd, x, y, z)
return (pd_, dims)
def count_active(pd):
count = 0
for k in pd.keys():
if pd[k] == '#':
count = count + 1
return count
def print_z(pd, dims, z):
for y in range(dims[1][0], dims[1][1]):
for x in range(dims[0][0], dims[0][1]):
print(pd[(x,y,z)]),
print
if __name__ == '__main__':
main() | true |
99b4d819ad0bf76a21267c9795f5ad6efb5d587d | Python | yujinK/TIL | /Algorithm/LeetCode/swap_nodes_in_pairs.py | UTF-8 | 1,575 | 3.546875 | 4 | [] | no_license | class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
# # 내가 한 풀이
# def swapPairs(self, head: ListNode) -> ListNode:
# prev = None
# result = cur = ListNode(0)
# while head:
# if prev:
# cur.next = ListNode(head.val)
# cur = cur.next
# cur.next = ListNode(prev.val)
# cur = cur.next
# prev = None
# else:
# prev = ListNode(head.val)
# head = head.next
# if prev:
# cur.next = ListNode(prev.val)
# return result.next
# def swapPairs(self, head: ListNode) -> ListNode:
# root = prev = ListNode(None)
# prev.next = head
# while head and head.next:
# b = head.next
# head.next = b.next
# b.next = head
# prev.next = b
# head = head.next
# prev = prev.next.next
# return root.next
def swapPairs(self, head: ListNode) -> ListNode:
if head and head.next:
p = head.next
head.next = self.swapPairs(p.next)
p.next = head
return p
return head
solution = Solution()
head = ListNode(1)
node1 = ListNode(2)
node2 = ListNode(3)
node3 = ListNode(4)
head.next = node1
node1.next = node2
node2.next = node3
result = solution.swapPairs(head)
while result:
print(result.val)
result = result.next | true |
50b5df882bab37f29bdaa4342ef52177bbb84e1c | Python | adrishg/Intersemestral_Python_Proteco2020 | /Clase2-Archivos_Regex/Regex_codigo2.py | UTF-8 | 234 | 3.375 | 3 | [] | no_license | import re
txt = """Tomás alias San Nicolas fue Capaz de ir con el Capataz
haciendolo andar de altas
"""
parrafo = txt.split()
for palabra in parrafo:
coincidencia = re.findall("(á|a)(s|z)", palabra)
if coincidencia:
print(palabra) | true |
25baa45399a235b98a416d4553f82362d81597ea | Python | mandyshen/YelpSentiment | /NgramsAnalysis/evaluate/filterOutCommon.py | UTF-8 | 1,073 | 2.984375 | 3 | [] | no_license | arrayNegativeNgrams = []
arrayPositiveNgrams = []
arrayPositiveNgramsWithFreq = []
arrayNegativeNgramsWithFreq = []
with open("positiveNgrams", "r") as ins:
for line in ins:
lastIndex = line.rfind(':')
str1 = line[:lastIndex]
str1.strip()
arrayPositiveNgrams.append(str1)
arrayPositiveNgramsWithFreq.append(line)
with open("negativeNgrams", "r") as ins:
for line in ins:
lastIndex = line.rfind(':')
str1 = line[:lastIndex]
str1.strip()
arrayNegativeNgrams.append(str1)
arrayNegativeNgramsWithFreq.append(line)
f = open("positiveNgramsFiltered", "w")
for ngram in arrayPositiveNgrams:
if ngram not in arrayNegativeNgrams:
indexP = arrayPositiveNgrams.index(ngram)
f.write(arrayPositiveNgramsWithFreq[indexP])
f.close()
f = open("negativeNgramsFiltered", "w")
for ngram in arrayNegativeNgrams:
if ngram not in arrayPositiveNgrams:
indexP = arrayNegativeNgrams.index(ngram)
f.write(arrayNegativeNgramsWithFreq[indexP])
f.close() | true |
9330bfd62ac7d4a34615c6b9792093e92c59ae84 | Python | mmilenkoski/twitter_sentiment_classification | /train_LSTM_CNN.py | UTF-8 | 1,619 | 3 | 3 | [] | no_license | # Make sure the training is reproducible
from numpy.random import seed
seed(2)
from tensorflow import set_random_seed
set_random_seed(3)
import numpy as np
from preprocessing_and_loading_data.DataLoader import DataLoader
max_words=40
# Create DataLoader object to get the training, validation and testing data
dl = DataLoader(glove_dimension=200, max_words=40, full=True)
# Load the data, including the embedding matrix for our vocabulary
X_train, X_val, Y_train, Y_val = dl.get_train_test_split()
embedding_matrix = dl.get_embedding_matrix()
print(X_train.shape)
print(Y_train.shape)
# Define the parameters of the model
params = {
'LSTM_num_neurons': 150,
'LSTM_dropout': 0,
'LSTM_recurrent_dropout': 0,
'CNN_filters': 128,
'CNN_kernel_size': 5,
'CNN_activation': "relu",
'CNN_pool_size': 2,
'epochs': 5,
'batch_size': 1024,
'DENSE_activation': 'sigmoid',
'loss': 'binary_crossentropy',
'optimizer': 'RMSprop'
}
# Import the model
from models.LSTM_CNN_model import LSTM_CNN_model
# The model_name is used to create checkpoint files in the "models_checkpoints" folder
model_name = "LSTM_CNN"
lstm_cnn = LSTM_CNN_model(model_name)
# Build the model and print its summary
lstm_cnn.build_model(embedding_matrix, max_words, params)
print(lstm_cnn.model.summary())
# Train the model, and save the weights which give the best validation loss
lstm_cnn.train(X_train, Y_train, epochs=params["epochs"], batch_size=params["batch_size"], validation_data=(X_val, Y_val))
print('_________________________________')
print(model_name)
print('_________________________________')
| true |
475fbd5342bcf2e83f149cbec4fb8acf1b07a099 | Python | sebvstianrodrigo/CYPSebastianVL | /libro/problemas_resueltos/capitulo3/problema3_15.py | UTF-8 | 785 | 3.40625 | 3 | [] | no_license | CL = 0
CUENTA = 0
TIPO = str(input("Ingresa el tipo de llamada: "))
DUR = int(input("Ingresa la duracion de la llamada en minutos: "))
while(TIPO != 'X' and DUR != (-1)):
if TIPO=='I':
if DUR > 3:
COSTO = 7.59+(DUR-3)*3.03
else:
COSTO = 7.59
elif TIPO=='L':
CL = CL+1
if CL > 50:
COSTO = 0.60
else:
COSTO = 0
elif TIPO=='N':
if DUR > 3:
COSTO = 1.20+(DUR-3)*0.48
else:
COSTO = 1.20
CUENTA = CUENTA + COSTO
TIPO = str(input("Ingresa el tipo de llamada: "))
DUR = int(input("Ingresa la duracion de la llamada en minutos: "))
else:
print(f"El costo total de llamadas es ${CUENTA}")
print("Fin del programa")
| true |
834bf644903227457e47b3d876e49b6ac09df78b | Python | CNwangbin/shenshang1 | /shenshang/simulate.py | UTF-8 | 6,043 | 3.5625 | 4 | [
"BSD-3-Clause"
] | permissive | import numpy as np
from scipy.stats import rankdata
def permute_table(m, inplace=False, seed=None):
'''Randomly permute each feature in a sample-by-feature table.
This creates a table for null model. The advantage of this is that
it doesn't change distribution for any feature in the original
table, thus keeping the original data characteristics.
Parameters
----------
m : 2-D numeric array
sample-by-feature table. Usually it is a real table from real world.
inplace : bool, optional
True to modify current table, False (default) to create a new table.
seed : int or None (default)
random seed for random permutation.
Returns
-------
permuted table
'''
if not inplace:
m = np.copy(m)
rng = np.random.default_rng(seed)
for i in range(m.shape[1]):
rng.shuffle(m[:, i])
# m[:, i] = rng.permutation(m[:, i])
return m
def simulate_compositionality(m, inplace=False, seed=None):
'''
'''
def simulate_correlation(m, structure=((10, 0.1), (10, -0.1),
(10, 0.2), (10, -0.2),
(10, 0.3), (10, -0.3)),
inplace=False, seed=None):
'''Simulate correlation structure (feature-wise) in the sample-by-feature table.
It shuffles randomly selected features (on the column).
Recommend to create a null table using `permute_table`
first and then simulate correlation structure within it.
Parameters
----------
m : 2-D numeric array
sample-by-feature table. Usually it is created from `permute_table`.
structure : list-like of 2-item tuple
correlation structure. 1st item is the number of pairs of
features randomly chosen from the table; 2nd item is the
correlation strength to simulate for those pairs passing to
`correlate_xy`.
Returns
-------
sample-by-feature table
updated table with specified correlation structure.
2-D array
x-by-2 array. Each row is a pair of feature indices that are correlated.
The row number is the sum of all 1st item in the tuples of `structure`.
1-D array
Each item is the target correlation strength specified in `structure`,
in the correponding order of the rows in the above 2-D array.
'''
if not inplace:
m = np.copy(m)
rng = np.random.default_rng(seed)
select_sizes = [i * 2 for i, _ in structure]
select = rng.choice(m.shape[1], sum(select_sizes), replace=False)
select_idx = np.split(select, np.cumsum(select_sizes)[:-1])
for (_, strength), idx in zip(structure, select_idx):
it = iter(idx)
# zip(it, it) runs next() twice
for i, j in zip(it, it):
x, y = m[:, i], m[:, j]
correlate_xy(x, y, strength=strength, inplace=True)
return m, select.reshape(-1, 2), np.concatenate([[i] * j for j, i in structure])
def correlate_xy(x, y, strength=1, inplace=True, seed=None):
'''Correlate x and y by sorting y according to x.
It assumes the input `x` and `y` are uncorrelated. It sorts y but
keeps x unchanged. The target strength of correlation determines
the fraction of the vector `y` to be sorted according to `x`. The
resulting correlation is a rank correlation (spearman).
..notice:: y is sorted in place by default.
Parameters
----------
x, y : 1-D numeric arrays of the same size.
the abundance of the feature across all samples
strength : float of [-1, 1]
the target strength of correlation between x and y after
sorting. Negative (positive) values means negative (positive)
correlation. For example, if strength is set to -0.5, randomly half
values of y array will sorted in the opposite order of
corresponding values of x array.
Returns
-------
1-D numeric array
sorted y
'''
# from pprint import pprint
# pprint(x)
# pprint(y)
if not inplace:
y = np.copy(y)
rng = np.random.default_rng(seed)
size = round(x.size * abs(strength))
select = np.sort(rng.choice(x.size, size, replace=False))
y_select = y[select]
sx = x[select].argsort()
# this is how to sort y according to x:
# y[x.argsort()] = y[y.argsort()]
if strength < 0:
sx = sx[::-1]
y_select[sx] = y_select[y_select.argsort()]
y[select] = y_select
# pprint(x[select])
# pprint(y[select])
return y
def correlate_xy_TODO1(x, y, noise, positive=False):
'''Correlate x and y by sorting y according to x.
It sorts y but keeps x unchanged. The strength of correlation is
determined by the random perturbance on the argsort:
1. get argsort that matches the order of y according to x;
2. add random noise to the argsort;
3. use the perturbed argsort to sort y.
..notice:: y is sorted in place by default.
Parameters
----------
x, y : 1-D numeric arrays of the same size.
the abundance of the feature across all samples
noise :
Returns
-------
1-D numeric array
sorted y
'''
sx = np.argsort(x)
sy = np.argsort(y_rank + noise)
def correlate_xy_TODO2(x, y, noise, positive=False):
'''Correlate x and y by sorting y according to x.
It sorts y but keeps x unchanged. The procedure is:
1. add random noise to y;
2. get argsort that matches the order of y according to x;
3. strip away the random noised added to y to restore original values;
4. return sorted and restored y
..notice:: y is sorted in place by default.
Parameters
----------
x, y : 1-D numeric arrays of the same size.
the abundance of the feature across all samples
positive : bool
return the sorted y as postively or negatively correlated to x
Returns
-------
1-D numeric array
sorted y
'''
y_rank = rankdata(y)
sx = np.argsort(x)
sy = np.argsort(y + noise)
| true |
fb23c9d4fe506e81ad846ee1e52e5d40da56297b | Python | Vinaykumarujee/Face_Detection_Using_OpenCV | /detector.py | UTF-8 | 2,050 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on july 2019
@author: vinayujee
"""
# import necessary modules
import cv2
import sys
# importing the cascade classifier for face and eye
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# check for input video
# if no input is given, default camera is choosen as input source
if len(sys.argv) == 1:
cap = 0
else:
cap = sys.argv[1]
# initialize input head, with source
video = cv2.VideoCapture(cap)
# Run an infinite loop, until user quit(press 'q')
while True:
# reading frame from the video source
_, frame =video.read()
# cinverting frame to Gray scale to pass on classifier
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces and return coordinates of rectangle
# This is the section, where you need to work
# To get more accurate result, you need to play with this parameters
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=4)
# make a rectangle around face detected
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
# extract the rectangle, containing face
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
# As eye without a face doen't make any sense
# so we search for eye, within the face only
# this reduces the computational load an also increases accuracy
# detect eyes and return coordinates of rectangle
eyes = eye_cascade.detectMultiScale(roi_gray)
# make a rectangle around face detected
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
# show the processed frame
cv2.imshow('Output',frame)
# If 'q' pressed => Quit
key = cv2.waitKey(1)
if key == ord('q'):
# Release the video object
video.release()
# close all open windows
cv2.destroyAllWindows()
exit(0) | true |
8ff6d4fed9ffaf1b54bd1115e050972125e38ac6 | Python | GlenHaber/euler | /problem77.py | UTF-8 | 1,262 | 4.15625 | 4 | [] | no_license | """
Prime summations
It is possible to write ten as the sum of primes in exactly five different ways:
7+3
5+5
5+3+2
3+3+2+2
2+2+2+2+2
What is the first value which can be written as the sum of primes in over five thousand different ways?
"""
from time import time
from problem47 import prime_sieve
primes = prime_sieve(1000)
# Fast method; if there are x ways to create j-i, then there are sum(x) over i ways to create j.
start = time()
ways = [0] * 101
ways[0] = 1
for i in range(100):
for j in range(primes[i], 101):
ways[j] += ways[j - primes[i]]
big = [w for w in ways if w > 5000]
print(ways.index(big[0]))
print(time() - start)
start = time()
values = [0] * primes[-1] * 5
# Brute force with a sum of at least 5 primes
for ai, a in enumerate(primes):
for bi, b in enumerate(primes[ai:], start=ai):
values[a + b] += 1
for ci, c in enumerate(primes[bi:], start=bi):
values[a + b + c] += 1
for di, d in enumerate(primes[ci:], start=ci):
values[a + b + c + d] += 1
for ei, e in enumerate(primes[di:], start=di):
values[a + b + c + d + e] += 1
for i, v in enumerate(values):
if v >= 5000:
print(i)
break
print(time() - start)
| true |
6cc80531b9aa3a955932c89453fed38ce559a910 | Python | johnchen383/RocketGame | /game.py | UTF-8 | 1,127 | 4.21875 | 4 | [] | no_license | def showMenu():
print(" ---- Welcome to the Rocket Game ----- ")
print()
while True:
command = input("Press [s] to Start or [e] to exit ")
if (command == 's' or command == 'e' or command == 'S' or command == 'E'):
break
else:
print("Invalid input. Try again.")
return command
def isInteger(str):
try:
num = int(str)
except ValueError:
return False
return True
def getInput(prompt):
while True:
inputString = input(prompt)
if (isInteger(inputString)):
inputInt = int(inputString)
if (inputInt >= 6 and inputInt <= 100):
return inputInt
print("Invalid input.")
def newGame():
command = showMenu()
# Exit Game
if (command == 'E' or command == 'e'):
return
# Starting Game!
sideDice = getInput("How many sides do you want on your dice? Between 6 and 100, inclusive. ")
numSegments = getInput("How many segments do you want your rockets to have? ")
print(sideDice)
print(numSegments)
newGame()
| true |
f4546e96bce9f0db5220899412ef1d03a5e5bf12 | Python | koyoshizawa/gajumaru | /lib/scrape_rate.py | UTF-8 | 1,599 | 2.765625 | 3 | [] | no_license |
# -*- encoding:utf-8 -*-
__author__ = "SosukeMatsunaga <>"
__version__ = "0.0.1"
__date__ = "01 August 2018"
from decimal import Decimal
from datetime import datetime
import urllib.request
from bs4 import BeautifulSoup
from transaction_management.models import HistoricalRate
class ScrapeRate(object):
def __init__(self, product_name=1):
self.product_name = product_name
self.rate = Decimal()
# 日経MINI
if self.product_name == 1:
self.scrape_nikkei_mini_rate()
else:
pass
self.save()
def scrape_nikkei_mini_rate(self):
"""
指定したサイトから日経先物MINIの価格を取得する。
"""
url = 'https://moneybox.jp/futures/detail.php?t=f_nikkei'
# クローリング
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
html = response.read()
# スクレイピング
soup = BeautifulSoup(html, "html.parser")
a = soup.find(id="cntns_l")
b = a.find('div', class_='cpny')
c = b.find('span')
t = c.text
future_price = Decimal(t.replace(',', ''))
self.rate = future_price
def save(self):
"""
product_typeに応じて取得した価格をDBに格納する
"""
d = HistoricalRate(product_name=1, date_time=datetime.now(), rate=self.rate)
d.save()
def get_rate(self):
"""
product_typeに応じて取得した価格を返す
:return: Decimal
"""
return self.rate
| true |
5d1846c09f93e1ac251fdcddb5485e522b3704d9 | Python | Sriram-52/Python-Lab | /Exp-6/Prg2.py | UTF-8 | 467 | 4.0625 | 4 | [] | no_license | if __name__ == "__main__":
path = input("Enter path of the file: ")
fp = open(path, 'r')
count = {}
for line in fp:
for char in line:
if char in count.keys(): count[char] += 1
else: count[char] = 1
print(count)
print("Checking the type of file...")
ext = path.split("\\").pop().split(".").pop()
if ext == "py": print("It's a python file")
elif ext == "txt": print("It's a text file")
else: print("It's extension is ", ext) | true |
90e266ada39db84855496a11d30bbdfcb18f0753 | Python | kimotot/aizu | /itp2/itp2_9_d.py | UTF-8 | 379 | 3.5 | 4 | [] | no_license | n = int(input())
A = [int(x) for x in input().split()]
m = int(input())
B = [int(x) for x in input().split()]
i = j = 0
while i < n and j < m:
if A[i] == B[j]:
i += 1
j += 1
elif A[i] < B[j]:
print(A[i])
i += 1
else:
print(B[j])
j += 1
while i < n:
print(A[i])
i += 1
while j < m:
print(B[j])
j += 1
| true |
32f29c174c6800ef266d65167c40c2d9c824f098 | Python | ChristophReich1996/ECG_Classification | /scripts/lstm_example.py | UTF-8 | 300 | 2.515625 | 3 | [
"Python-2.0",
"MIT"
] | permissive | import torch
import torch.nn as nn
if __name__ == '__main__':
input = torch.rand(5, 3, 128)
lstm = nn.LSTM(input_size=128, hidden_size=512, num_layers=2, bias=True, batch_first=False)
print(sum([p.numel() for p in lstm.parameters()]))
output, _ = lstm(input)
print(output.shape)
| true |
a665478b79c20be91c34b072d305e7f4da375c59 | Python | afreeman100/TetrisDQN | /code/DQN.py | UTF-8 | 15,804 | 2.59375 | 3 | [] | no_license | import tetris
from tqdm import tqdm
from tetriminos import *
from networks import *
from replay import ReplayMemory, PriorityMemory
from array2gif import write_gif
def update_targets(tau):
values = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Q_Net')
targets = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Target')
assignments = []
with tf.variable_scope('Update'):
for target, value in zip(targets, values):
assignments.append(target.assign((value.value() * tau) + ((1 - tau) * target.value())))
return assignments
def perform_updates(assignments, sess):
for assignment in assignments:
sess.run(assignment)
class Agent:
def __init__(self,
game_settings,
training_steps,
network_architecture=Convolutional,
hidden_nodes=np.array([250, 100]),
a=0.000005,
y=0.99,
e_start=1,
e_min=0.1,
e_rate=20,
tau=0.001,
replay_samples=32,
replay_size=100000,
use_replay=True,
use_priority=True,
use_double=True,
use_summaries=False,
save_model=False
):
self.training_steps = training_steps
self.a = a
self.y = y
# Decay epsilon linearly from e_start to e_min over first e_rate % of interactions
self.e = e_start
self.e_min = e_min
self.e_step = (e_start - e_min) / (training_steps * e_rate / 100)
# Game parameters
self.rows = game_settings.rows
self.columns = game_settings.columns
self.tetriminos = game_settings.tetriminos
self.end_at = game_settings.end_at
self.input_size = self.rows * self.columns + 7
self.output_size = self.columns * 4
# Experience replay
self.use_replay = use_replay
self.use_priority = use_priority
self.replay_samples = replay_samples
memory = PriorityMemory if use_priority else ReplayMemory
self.replay_memory = memory(size=replay_size, state_size=self.input_size, action_size=self.output_size)
if use_replay:
self.populate_memory(10000)
tf.reset_default_graph()
self.net = network_architecture(self.rows, self.columns, self.output_size, hidden_nodes, a, 'Q_Net', use_priority)
self.target = network_architecture(self.rows, self.columns, self.output_size, hidden_nodes, a, 'Target', use_priority)
self.init = tf.global_variables_initializer()
self.initial_targets = update_targets(1)
self.save_model = save_model
self.saver = tf.train.Saver()
self.save_directory = './model/tetris.ckpt'
self.tau = tau
self.use_double = use_double
self.updater = update_targets(tau)
self.writer = None
self.use_summaries = use_summaries
def populate_memory(self, n):
# Ensure replay memory has sufficient samples in it before training begins
i = 0
while i < n:
game = tetris.Game(self.rows, self.columns, self.tetriminos, self.end_at)
while not game.game_over:
# Interact
state = game.state()
mask = validity_mask(game.next_tetrimino, self.columns)
action = np.random.choice([i for i in range(self.output_size) if mask[i] == 1])
rotation, column = action_to_rc(action, self.columns)
reward = game.place_tetrimino(rotation, column)
state_new = game.state()
mask_new = validity_mask(game.next_tetrimino, self.columns)
# Save
if self.use_priority:
self.replay_memory.save(state, action, reward, game.game_over, state_new, mask, mask_new, np.max(self.replay_memory.priorities))
else:
self.replay_memory.save(state, action, reward, game.game_over, state_new, mask, mask_new)
i += 1
def learning_curve(self, training_intervals, interval_interactions):
# Extra element for performance after 0 training
scores_list = np.zeros(training_intervals + 1)
devs_list = np.zeros(training_intervals + 1)
ep_list = np.arange(training_intervals + 1) * interval_interactions
with tf.Session() as sess:
sess.run(self.init)
# Ensure target network is initialised to same values as Q network
perform_updates(self.initial_targets, sess)
if self.use_summaries:
self.writer = tf.summary.FileWriter('./graphs', sess.graph)
for episode in tqdm(range(training_intervals)):
current_interaction = 0
while current_interaction < interval_interactions:
game = tetris.Game(self.rows, self.columns, self.tetriminos, self.end_at)
while not game.game_over and current_interaction < interval_interactions:
current_interaction += 1
# --------- Interact ---------
# Get current state and perform feed forward pass
state = game.state()
q_values = sess.run(self.net.q_out, feed_dict={self.net.inputs: [state]})
# Validity mask for the current tetrimino, then select best action
mask = validity_mask(game.next_tetrimino, self.columns)
q_masked = np.where(mask, q_values, -np.inf)
action = np.argmax(q_masked, 1)[0]
# Epsilon greedy action - ensuring that action is valid
if np.random.rand() < self.e:
valid_act_args = [i for i in range(self.output_size) if mask[i] == 1]
action = np.random.choice(valid_act_args)
# Decay e
self.e = max(self.e_min, self.e - self.e_step)
# convert action to a rotation and column which can be used to update the game
rotation, column = action_to_rc(action, self.columns)
reward = game.place_tetrimino(rotation, column)
# State: s'
state_new = game.state()
mask_new = validity_mask(game.next_tetrimino, self.columns)
# Save this experience
if self.use_priority:
self.replay_memory.save(state, action, reward, game.game_over, state_new, mask, mask_new, np.max(self.replay_memory.priorities))
else:
self.replay_memory.save(state, action, reward, game.game_over, state_new, mask, mask_new)
# --------- Train ---------
# If not using experience replay, perform single update for this experience
if not self.use_replay:
# Q values for s' according to target network
q_new = sess.run(self.target.q_out, feed_dict={self.target.inputs: [state_new]})
q_new_masked = np.where(mask_new, q_new, -1000)
if self.use_double:
# 1) Q network to select best action
q = sess.run(self.net.q_out, feed_dict={self.net.inputs: [state_new]})
q = np.where(mask_new, q, -1000)
a_max = np.argmax(q, 1)
# 2) Use target network to determine value of action
q_max = q_new[0, a_max]
else:
# Use target net to select the action and its value
q_max = np.max(q_new_masked)
# Set target values
q_target = q_values
if game.game_over:
q_target[0, action] = reward
else:
q_target[0, action] = reward + self.y * q_max
# Train network
_ = sess.run(self.net.update_model, feed_dict={self.net.inputs: [state], self.net.target_q: q_target})
perform_updates(self.updater, sess)
else:
if self.use_priority:
self.priority_replay_train(sess)
else:
self.replay_train(sess)
# Test current performance of agent and store so that learning curve can be plotted
av_score, dev = self.test(sess, 100)
scores_list[episode + 1] = av_score
devs_list[episode + 1] = dev
if self.save_model:
self.saver.save(sess, self.save_directory)
print('Saved!')
return ep_list, scores_list, devs_list
def replay_train(self, session):
# Retrieve batch of experiences
states, actions, rewards, game_overs, new_states, masks, new_masks = self.replay_memory.retrieve(self.replay_samples)
# Predicted Q values for s and s'
q_values, summary = session.run([self.net.q_out, self.net.merged], feed_dict={self.net.inputs: states})
q_new, summary2 = session.run([self.target.q_out, self.target.merged], feed_dict={self.target.inputs: new_states})
q_new_masked = np.where(new_masks, q_new, -1000)
if self.use_summaries:
self.writer.add_summary(summary)
self.writer.add_summary(summary2)
if self.use_double:
# 1) Q network to select best actions
q = session.run(self.net.q_out, feed_dict={self.net.inputs: new_states})
q = np.where(new_masks, q, -1000)
a_max = np.argmax(q, 1)
# 2) Use target network to determine value of actions
q_new_max = np.array([q_new_masked[i, action] for i, action in enumerate(a_max)])
else:
# Use target net to select both actions and values
q_new_max = np.max(q_new_masked, 1)
# Set target Q values
for i in range(self.replay_samples):
if game_overs[i]:
q_values[i, actions[i]] = rewards[i]
else:
q_values[i, actions[i]] = rewards[i] + self.y * q_new_max[i]
# Batch train, then update target network
_ = session.run([self.net.update_model], feed_dict={self.net.inputs: states, self.net.target_q: q_values})
perform_updates(self.updater, session)
def priority_replay_train(self, session):
# Retrieve indices of experiences to use, and their current importance weights
states, actions, rewards, game_overs, new_states, masks, new_masks, indices, weights = self.replay_memory.retrieve(self.replay_samples)
# Predicted Q values for s and s'
q_values, summary = session.run([self.net.q_out, self.net.merged], feed_dict={self.net.inputs: states})
q_new, summary2 = session.run([self.target.q_out, self.target.merged], feed_dict={self.target.inputs: new_states})
q_new_masked = np.where(new_masks, q_new, -1000)
# Q(s,a) for each experience - used with max(Q(s',a')) to calculate TD error for PER
td_errs = np.array([q_values[i, action] for i, action in enumerate(actions)])
if self.use_summaries:
self.writer.add_summary(summary)
self.writer.add_summary(summary2)
if self.use_double:
# 1) Q network to select best actions
q = session.run(self.net.q_out, feed_dict={self.net.inputs: new_states})
q = np.where(new_masks, q, -1000)
a_max = np.argmax(q, 1)
# 2) Use target network to determine value of actions
q_new_max = np.array([q_new_masked[i, action] for i, action in enumerate(a_max)])
else:
# Use target net to select both actions and values
q_new_max = np.max(q_new_masked, 1)
# Set target Q values
for i in range(self.replay_samples):
if game_overs[i]:
q_values[i, actions[i]] = rewards[i]
else:
q_values[i, actions[i]] = rewards[i] + self.y * q_new_max[i]
# Use target values to calculate TD error for REP
td_errs[i] = np.abs(q_values[i, actions[i]] - td_errs[i])
# Update replay priorities
for i in range(self.replay_samples):
self.replay_memory.priorities[indices[i]] = td_errs[i] + self.replay_memory.e
weights = np.reshape(weights, [self.replay_samples, 1])
# Batch train, then update target network
_ = session.run([self.net.update_model],
feed_dict={self.net.inputs: states, self.net.target_q: q_values, self.net.weights: weights})
perform_updates(self.updater, session)
def load_and_play(self):
with tf.Session() as sess:
sess.run(self.init)
self.saver.restore(sess, self.save_directory)
score, dev = self.test(sess, 100)
print('Average score: ', score)
def test(self, session, num_tests):
""" Test performance of agent, returning mean score and standard deviation from given number of test games. """
score_list = np.zeros(num_tests)
for n in range(num_tests):
game = tetris.Game(self.rows, self.columns, self.tetriminos, self.end_at)
while not game.game_over:
# Pass state to network - perform best action and add reward to log
state = [game.state()]
q_values = session.run(self.net.q_out, feed_dict={self.net.inputs: state})
mask = validity_mask(game.next_tetrimino, self.columns)
q_masked = np.where(mask, q_values, -1000)
action = np.argmax(q_masked, 1)[0]
rotation, column = action_to_rc(action, self.columns)
score_list[n] += game.place_tetrimino(rotation, column)
return np.mean(score_list), np.std(score_list)
def load_and_gif(self):
with tf.Session() as sess:
sess.run(self.init)
self.saver.restore(sess, self.save_directory)
frames = []
def add_frame(s):
frame = np.reshape(s[:self.rows * self.columns], (self.rows, self.columns))
# Upscale so GIF is larger than 20x10 pixels
frame = np.kron(frame, np.ones((20, 20)))[::-1]
# Convert to RGB array
frame = np.stack((frame, frame, frame)) * 100
frames.append(frame)
game = tetris.Game(self.rows, self.columns, self.tetriminos, self.end_at)
while not game.game_over:
state = game.state()
add_frame(state)
q_values = sess.run(self.net.q_out, feed_dict={self.net.inputs: [state]})
mask = validity_mask(game.next_tetrimino, self.columns)
q_masked = np.where(mask, q_values, -1000)
action = np.argmax(q_masked, 1)[0]
rotation, column = action_to_rc(action, self.columns)
game.place_tetrimino(rotation, column)
add_frame(game.state())
write_gif(frames, 'gif/game.gif', fps=2)
print('GIF saved')
| true |
0191cfb6c5a2b56364b96d586d6475e4d5a77c30 | Python | jramcast/ml_weather | /example9/classifier.py | UTF-8 | 2,999 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | """
The weather classifier module
"""
import time
import math
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.externals import joblib
from preprocessing import (SentimentExtractor,
TempExtractor,
WindExtractor,
tokenize,
STOPWORDS)
"""
Classes to train
"""
CLASSES = [
'k1',
'k2',
'k3',
'k4',
'k5',
'k6',
'k7',
'k8',
'k9',
'k10',
'k11',
'k12',
'k13',
'k14',
'k15',
's2',
's3',
's4',
's5',
'w1',
'w2',
'w4',
]
# Loaded models will be stored here
MODELS = {}
sentiment_extractor = SentimentExtractor()
temp_extractor = TempExtractor()
wind_extractor = WindExtractor()
vectorizer = CountVectorizer(
min_df=10,
max_df=0.5,
ngram_range=(1, 3),
max_features=10000,
lowercase=True,
stop_words=STOPWORDS,
tokenizer=tokenize
)
def train(data):
"""
Trains the classifier. Each class is trained separately and saved to disk
"""
x_train = [row['tweet'] for row in data]
for classname in CLASSES:
print("--------------------------> Training " + classname)
start_time = time.time()
y_train = filter_class(data, classname)
classifier = train_class(x_train, y_train)
# save model
joblib.dump(classifier, 'models/{}.pkl'.format(classname))
print('Time elapsed:')
print(time.time() - start_time)
def load():
"""
Loads pretrained models from disk
"""
print('Loading models...')
for classname in CLASSES:
MODELS[classname] = joblib.load('models/{}.pkl'.format(classname))
print('Models loaded')
def predict(data):
"""
Predicts class for all models
"""
if MODELS == {}:
load()
results = {}
for classname in CLASSES:
results[classname] = MODELS[classname].predict(data)
return results
def train_class(x_train, y_train):
"""
Trains a model for one class
"""
svm = LinearSVC(C=0.3, max_iter=300, loss='hinge')
pipeline = Pipeline([
('union', FeatureUnion([
('sentiment', sentiment_extractor),
('temp', temp_extractor),
('wind', wind_extractor),
('vect', vectorizer),
])),
('cls', svm),
])
accuracy = cross_val_score(pipeline, x_train, y_train, scoring='accuracy')
print('=== Accuracy ===')
print(np.mean(accuracy))
pipeline.fit(x_train, y_train)
return pipeline
def filter_class(data, classname):
"""
Returns a list of 0 or 1 value based no the presence of the given class
"""
classes = []
for row in data:
value = float(row[classname])
classes.append(math.ceil(value))
return classes
| true |
b148a319b4813089bcf433c3ffca78f5f33e6906 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_155/1607.py | UTF-8 | 684 | 3.03125 | 3 | [] | no_license | var = raw_input("Enter something: ").split('\n')
text = ""
caseno = 0
for line in var[1:]:
if line != "":
caseno += 1
addpeople = 0
(maxshy, situ) = line.split(" ")
#print situ
maxshy = int(float(maxshy))
stand = 0
addall = 0
for j in range(0,maxshy+1):
#print("j " + str(j))
addnow = 0
if stand < j:
addnow = (j-stand)
#print addnow
stand += (addnow + (int(float(situ[j]))))
addall += addnow
newline = "Case #" + str(caseno) + ": " + str(addall)
text += "%s\n" % newline
print text
| true |
4b2ae2afca5125c99b1d74c8ed014dd79fcc321f | Python | msarker000/ML_hw1 | /DSEHW1.py | UTF-8 | 2,861 | 3.703125 | 4 | [] | no_license | import numpy as np
class DSELinearClassifier(object):
"""DSELinearClassifier classifier.
Parameters
------------
activation: string
values are ('Perceptron', 'Logistic', 'HyperTan').
initial_weight: vector
inital weight
random_state : int
Random number generator seed for random weight initialization.
eta : float
Learning rate (between 0.0 and 1.0)
max_epochs : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
cost_ : list
Sum-of-squares cost function value in each epoch.
"""
def __init__(self, activation, initial_weight, random_state=42, eta=0.01, max_epochs=50):
self.eta = eta
self.max_epochs = max_epochs
self.random_state = random_state
self._w = initial_weight
self.activation = activation
def fit(self, X, Y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples
is the number of examples and
n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : object
"""
self.cost_ = []
self._fit_errors = []
for i in range(self.max_epochs):
errors = 0
for x, y in zip(X, Y):
z = self.net_input(x)
yhat = 0
if self.activation == 'Perceptron':
yhat = self.activation_linear(z)
elif self.activation == 'Logistic':
yhat = self.activation_sigmoid(z)
elif self.activation == 'HyperTan':
yhat = self.activation_tanh(z)
errors = (y - yhat)
delta_w = self.eta * errors * x
self._w = self._w + delta_w
self._fit_errors.append(errors)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self._w.T)
def activation_linear(self, X):
"""Compute linear activation"""
return X
def activation_sigmoid(self, X):
"""logistic activation function"""
return 1 / (1 + np.exp(-X))
def activation_tanh(self, X):
"""Tanh activation function"""
return (np.exp(X) - np.exp(-X)) / (np.exp(X) + np.exp(-X))
def predict(self, X):
if self.activation == 'Perceptron':
return np.where(self.net_input(X) >= 0.0, 1, -1)
elif self.activation == 'Logistic':
return np.where(self.net_input(X) >= 0.0, 0, 1)
elif self.activation == 'HyperTan':
return np.where(self.net_input(X) >= 0.0, 1, -1) | true |
e3c144c944544048c64c4afe37b97706269c0d22 | Python | MFALHI/gremlinsdk-python | /exampleapp/reviews/reviews.py | UTF-8 | 2,037 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
from flask import Flask, request
import simplejson as json
import requests
import sys
from json2html import *
app = Flask(__name__)
reviews_resp="""
<blockquote>
<p>
An extremely entertaining and comic series by Herge, with expressive drawings!
</p> <small>Reviewer1 <cite>New York Times</cite></small>
</blockquote>
<blockquote>
<p>
Its well-researched plots straddle a variety of genres:
swashbuckling adventures with elements of fantasy, mysteries,
political thrillers, and science fiction.
</p> <small>Reviewer2 <cite>Barnes and Noble</cite></small>
</blockquote>
"""
@app.route('/reviews')
def bookReviews():
global reviews_resp
return reviews_resp
@app.route('/')
def index():
""" Display frontpage with normal user and test user buttons"""
top = """
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css">
<!-- Optional theme -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap-theme.min.css">
<!-- Latest compiled and minified JavaScript -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.4/jquery.min.js"></script>
<!-- Latest compiled and minified JavaScript -->
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
</head>
<title>Book reviews service</title>
<body>
<p><h2>Hello! This is the book reviews service. My content is</h2></p>
<div>%s</div>
</body>
</html>
""" % (reviews_resp)
return top
if __name__ == '__main__':
# To run the server, type-in $ python server.py
if len(sys.argv) < 1:
print "usage: %s port" % (sys.argv[0])
sys.exit(-1)
p = int(sys.argv[1])
app.run(host='0.0.0.0', port=p, debug=False)
| true |
667e151f324f1528b71cdb91c6d6239248b3f834 | Python | EddiePulido/django_assignments | /django_intro/Random/apps/random_word/views.py | UTF-8 | 621 | 2.640625 | 3 | [] | no_license | from django.shortcuts import render, HttpResponse, redirect
from django.utils.crypto import get_random_string
def index(request):
return HttpResponse("this is the equivalent of @app.route('/')!")
def random_word(request):
rWord = get_random_string(14)
dic = {
'word' : rWord
}
if 'count' not in request.session:
request.session['count'] = 1
return render(request,"random_word/index.html", dic)
def generate(request):
request.session['count'] += 1
return redirect('/random_word')
def reset(request):
del request.session['count']
return redirect('/random_word') | true |
f0a3ed337d6c4a98b6bc62339bf9cda47ce44f5c | Python | ChangxingJiang/LeetCode | /0401-0500/0471/0471_Python_1.py | UTF-8 | 1,108 | 3.0625 | 3 | [] | no_license | class Solution:
def encode(self, s: str) -> str:
size = len(s)
dp = [[""] * size for _ in range(size)]
for l in range(1, size + 1):
for i in range(size - l + 1):
j = i + l - 1
dp[i][j] = s[i:j + 1]
if l > 4:
# 寻找是否为某一段的重复
idx = (s[i:j + 1] + s[i:j + 1]).index(s[i:j + 1], 1)
if idx < l:
dp[i][j] = str(l // idx) + "[" + dp[i][i + idx - 1] + "]"
# 寻找是否有更优解
for k in range(i, j):
if len(dp[i][k]) + len(dp[k + 1][j]) < len(dp[i][j]):
dp[i][j] = dp[i][k] + dp[k + 1][j]
return dp[0][-1]
if __name__ == "__main__":
print(Solution().encode(s="aaa")) # "aaa"
print(Solution().encode(s="aaaaa")) # "5[a]"
print(Solution().encode(s="aaaaaaaaaa")) # "10[a]"
print(Solution().encode(s="aabcaabcd")) # "2[aabc]d"
print(Solution().encode(s="abbbabbbcabbbabbbc")) # "2[2[abbb]c]"
| true |
989ee8fd1ff45a3e0e6948ce56751850d6275d90 | Python | phuchonguyen/contextual-personalized-feeds-migiphu | /recsys/recommender/learning_to_rank_ensemble.py | UTF-8 | 7,221 | 2.6875 | 3 | [] | no_license | import collections
import pickle
import random
import re
from collections import Counter
import numpy as np
import pandas as pd
from scipy.sparse import dok_matrix
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
"""
Functions to suggest posts based on collaborative filtering
"""
def split_id(val):
return (re.sub("[^_,\d]", "", val).split(","))
def split_rating(val, sep=","):
if sep == " ":
patterns = "[\[\]]"
string_rating = re.sub(patterns, "", val).split()
else:
patterns = "[^.,\d]"
string_rating = re.sub(patterns, "", val).split(",")
float_rating = [float(r) for r in string_rating]
return (float_rating)
def find_most_similar_posts_collabfilter(comments, k=9):
user_post = comments[['from_id', 'post_id']]
user_post.drop_duplicates(inplace=True)
unique_users = user_post.from_id.drop_duplicates().values
unique_posts = user_post.post_id.drop_duplicates().values
users_map = dict(zip(unique_users, range(len(unique_users))))
posts_map = dict(zip(unique_posts, range(len(unique_posts))))
user_all_posts = user_post.groupby('from_id')['post_id'].apply(list).reset_index()
item_matrix = dok_matrix((len(unique_users), len(unique_posts)), dtype=np.float32)
for l in tqdm(range(user_all_posts.shape[0])):
i = users_map[user_all_posts.iloc[l].from_id]
posts = user_all_posts.iloc[l].post_id
for post in posts:
j = posts_map[post]
item_matrix[i, j] = 1
cosine_sim = cosine_similarity(item_matrix.transpose())
similar_posts = []
similar_rating = []
for l in tqdm(range(cosine_sim.shape[0])):
source_sim = cosine_sim[l, :]
sim_ids = np.argpartition(source_sim, -k)[-k:]
sim = source_sim[sim_ids]
sim_posts = [unique_posts[d] for d in sim_ids]
similar_posts.append(sim_posts)
similar_rating.append(sim)
df = pd.DataFrame(data={'post_id': unique_posts,
'most_similar': similar_posts,
'most_similar_rating': similar_rating})
return df
def format_similarity(df, sep=","): # split string to post_id's and similarities
df['most_similar'] = df.most_similar.apply(split_id)
df['most_similar_rating'] = df.most_similar_rating.apply(split_rating, sep=sep)
return (df)
def create_dataset_for_learning_to_rank(n_users=300):
doc2vec = format_similarity(pd.read_csv("../../data/fb_news_posts_20K_doc2v.csv"))
tfidf = format_similarity(pd.read_csv("../../data/fb_news_posts_20K_tfidf.csv"), sep=" ")
cf = load_obj('ready_to_use_recommender_data/collab_filtering')
features = {'1': doc2vec, '2': tfidf, '3': cf}
users = [i for i in comment_dict.keys() if len(comment_dict[i]) > 5] # Select active users
users = random.sample(users, n_users)
final = collections.defaultdict(lambda: collections.defaultdict(int))
for user in tqdm(users):
for commented_post in comment_dict[user]:
# Add commented_post
final[(user, commented_post)]['commented'] = 2
# Add similar post
for f_index in features:
f = features[f_index]
f_similar = f.loc[f.post_id == commented_post]['most_similar'].values[0]
f_rating = f.loc[f.post_id == commented_post]['most_similar_rating'].values[0]
for similar, rating in zip(f_similar, f_rating):
final[(user, similar)][f_index] = rating
final[(user, similar)]['commented'] = int((similar) in comment_dict[user]) + 1
commented = [i['commented'] for i in final.values()]
qid = [i[0] for i in final]
f1 = [i['1'] for i in final.values()]
f2 = [i['2'] for i in final.values()]
f3 = [i['3'] for i in final.values()]
eid = [i[1] for i in final]
final_df = pd.DataFrame()
final_df['commented'] = commented
final_df['qid'] = qid
final_df['f1'] = f1
final_df['f2'] = f2
final_df['f3'] = f3
final_df['eid'] = eid
df = final_df[['f1', 'f2', 'f3']]
normalized_df = (df - df.min()) / (df.max() - df.min())
final_df['f1'] = normalized_df.f1
final_df['f2'] = normalized_df.f2
final_df['f3'] = normalized_df.f3
save_obj(final_df, 'final_df' + str(len(users)))
train, validate, test = np.split(final_df.sample(frac=1), [int(.6 * len(df)), int(.8 * len(df))])
write_to_file('LTR_files/train' + str(len(users)) + '.txt', train)
write_to_file('LTR_files/validate' + str(len(users)) + '.txt', validate)
write_to_file('LTR_files/test' + str(len(users)) + '.txt', test)
def write_to_file(name, df):
with open(name, 'w') as file:
for i, r in tqdm(df.iterrows()):
file.write(str(r['commented']) + ' ')
file.write('qid:' + str(r['qid']) + ' ')
file.write('1:' + str(r['f1']) + ' ')
file.write('2:' + str(r['f2']) + ' ')
file.write('3:' + str(r['f3']) + ' ')
file.write('#eid = ' + str(r['eid']) + '\n')
def test(test_path, score_path):
test = pd.read_csv(test_path, header=None, sep=' ')
score = pd.read_csv(score_path, header=None, sep='\t')
score['eid'] = test[7]
score.columns = ['uid', '1', 'score', 'eid']
best10 = score.sort_values(['uid', 'score'], ascending=False).groupby('uid').head(10)
test_eid = best10.groupby(by='uid').eid.apply(list)
test_eid.index = test_eid.index.astype(str)
test_score = best10.groupby(by='uid').score.apply(list)
test_score.index = test_score.index.astype(str)
counts = []
ndcg_scores = []
for i in tqdm(test_eid.index):
cnt = 0
ytrue = []
yscore = test_score[i]
for post in test_eid[i]:
if post in comment_dict[i]:
ytrue.append(1)
cnt += 1
else:
ytrue.append(0)
ndcg_scores.append(ndcg_score(ytrue, yscore))
counts.append(cnt)
print("Average recall: ", np.mean(counts))
print(Counter(counts))
print("Mean ndcg_score", np.nanmean(ndcg_scores))
def dcg_score(y_true, y_score, k=10, gains="exponential"):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
if gains == "exponential":
gains = 2 ** y_true - 1
elif gains == "linear":
gains = y_true
else:
raise ValueError("Invalid gains option.")
# highest rank is 1 so +2 instead of +1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains * 1.0 / discounts)
def ndcg_score(y_true, y_score, k=10, gains="exponential"):
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return actual * 1.0 / best
if __name__ == '__main__':
comment_dict = load_obj('ready_to_use_recommender_data/comment_dict')
create_dataset_for_learning_to_rank(n_users=10)
test('LTR_files/test5000.txt', 'LTR_files/score5000.txt')
| true |
989c4f147cc45dc7b5e476dbea63c52f39a979ee | Python | Erotemic/netharn | /netharn/layers/swish.py | UTF-8 | 2,008 | 3.0625 | 3 | [
"Apache-2.0"
] | permissive | """
References:
https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/utils.py
https://discuss.pytorch.org/t/implementation-of-swish-a-self-gated-activation-function/8813
https://arxiv.org/pdf/1710.05941.pdf
"""
import torch
from torch import nn
class _SwishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class Swish(nn.Module):
"""
When beta=1 this is Sigmoid-weighted Linear Unit (SiL)
``x * torch.sigmoid(x)``
References:
https://arxiv.org/pdf/1710.05941.pdf
Example:
>>> from netharn.layers.swish import * # NOQA
>>> x = torch.linspace(-20, 20, 100, requires_grad=True)
>>> self = Swish()
>>> y = self(x)
>>> y.sum().backward()
>>> # xdoctest: +REQUIRES(--show)
>>> import kwplot
>>> kwplot.autompl()
>>> kwplot.multi_plot(xydata={'beta=1': (x.data, y.data)}, fnum=1, pnum=(1, 2, 1),
>>> ylabel='swish(x)', xlabel='x', title='activation')
>>> kwplot.multi_plot(xydata={'beta=1': (x.data, x.grad)}, fnum=1, pnum=(1, 2, 2),
>>> ylabel='𝛿swish(x) / 𝛿(x)', xlabel='x', title='gradient')
>>> kwplot.show_if_requested()
"""
def __init__(self, beta=1.0):
super(Swish, self).__init__()
self.beta = beta
def forward(self, x):
"""
Equivalent to ``x * torch.sigmoid(x)``
"""
if self.beta == 1:
return _SwishFunction.apply(x)
else:
return x * torch.sigmoid(x * self.beta)
def receptive_field_for(self, field):
return field
def output_shape_for(self, shape):
return shape
| true |
4c7f73839b4b2cba1e07f7b63cdb1397cf5cf11d | Python | rohan-sawhney/correspondence | /deps/cyamites/viewer/Camera.py | UTF-8 | 6,478 | 3.0625 | 3 | [
"MIT"
] | permissive | # A class encapsulating the basic functionality commonly used for an OpenGL view
# System imports
import numpy as np
from math import pi, sin, cos, tan
# OpenGL imports
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
# Use euclid for rotations
import euclid as eu
# Cyamites imports
from ..core.Utilities import normalize
class Camera(object):
def __init__(self, windowWidth, windowHeight):
# Window variables
self.updateDimensions(windowWidth, windowHeight)
# View variables
self.viewTarget = np.array((0.0,0.0,0.0))
self.cameraDir = eu.Vector3(0,0,1).normalize()
self.upDir = eu.Vector3(0,1,0).normalize()
self.cameraTrans = np.array((0.0,0.0,0.0))
self.fov = 65 # degrees
self.nearClipRat = 0.01
self.farClipRat = 100
self.zoomDist = 3.0 # Camera distance from target
self.minZoom = 0.01 # Cannot zoom closer than this
# Mouse drag variables
self.mouseDragging = False
self.mouseDragState = None
self.lastPos = None
self.shiftHeld = False
### OpenGL model and projection
def projMat(self):
# Build a projection matrix
fVal = 1.0 / tan(self.fov * (pi / 360.0))
farClip = self.farClipRat * self.zoomDist
nearClip = self.nearClipRat * self.zoomDist
projMat = np.eye(4)
projMat[0,0] = fVal / self.aspectRatio
projMat[1,1] = fVal
projMat[2,2] = (farClip + nearClip) / (nearClip - farClip)
projMat[2,3] = (2.0 * farClip * nearClip) / (nearClip - farClip)
projMat[3,2] = -1.0
projMat[3,3] = 0.0
return projMat.astype(np.float32)
def viewMat(self):
# First make sure we know all relevant positions and directions
E = self.viewTarget + np.array(self.cameraDir) * self.zoomDist
C = self.viewTarget
U = np.array(self.upDir)
# Rotation matrix to put the camera in the right direction
rotMat = np.zeros((4,4))
rotMat[0,0:3] = np.cross(self.upDir, self.cameraDir)
rotMat[1,0:3] = self.upDir
rotMat[2,0:3] = self.cameraDir
rotMat[3,3] = 1.0
# Translation matrix, which mostly just pushes it out to the -z Axis
# where the camera looks
# If we want to make the camera translate, should probably add it here
transMat = np.eye(4)
transMat[0,3] = 0.0 + self.cameraTrans[0]
transMat[1,3] = 0.0 + self.cameraTrans[1]
transMat[2,3] = -self.zoomDist + self.cameraTrans[2]
transMat[3,3] = 1.0
viewMat = np.dot(transMat, rotMat)
return viewMat.astype(np.float32)
def getPos(self):
return (self.viewTarget + np.array(self.cameraDir) * self.zoomDist).astype(np.float32)
def getUp(self):
return np.array(self.upDir).astype(np.float32)
def updateDimensions(self, windowWidth, windowHeight):
self.aspectRatio = float(windowWidth) / windowHeight
self.windowWidth = windowWidth
self.windowHeight = windowHeight
glViewport(0, 0, windowWidth, windowHeight);
### Mouse and keyboard callbacks to reposition
def processMouse(self, button, state, x, y, shiftHeld):
"""Proces mouse motion. Returns True if this event was end of a click-in-place"""
# print("ProcessMouse button = " + str(button) + " state = " + str(state))
# Scroll wheel for zoom
if button == 3 or button == 4:
if state == GLUT_UP:
return
elif button == 3:
self.zoomIn()
elif button == 4:
self.zoomOut()
# Left click activates dragging
elif button == GLUT_LEFT_BUTTON:
if state == GLUT_DOWN:
self.mouseDragging = True
self.lastPos = (x,y)
# Holding shift gives translation instead of rotation
if(shiftHeld):
self.mouseDragState = 'translate'
else:
self.mouseDragState = 'rotate'
else: # (state == GLUT_UP)
self.mouseDragging = False
self.mouseDragState = None
if (x,y) == self.lastPos:
self.lastPos = None
return True
self.lastPos = None
return False
def processMotion(self, x, y):
if self.mouseDragging:
# The vector representing this drag, scaled so the dimensions
# of the window correspond to 1.0
delX = (float(x) - self.lastPos[0]) / self.windowWidth
delY = (float(y) - self.lastPos[1]) / self.windowWidth
if(self.mouseDragState == 'rotate'):
# Scale the rotations relative to the screen size
delTheta = -2*pi * delX
delPhi = -pi * delY
# Rotate by theta around 'up' (rotating up is unneeded since it
# would do nothing)
oldCamDir = self.cameraDir.copy();
self.cameraDir = self.cameraDir.rotate_around(self.upDir, delTheta)
# # Rotate by phi around 'left'
leftDir = self.upDir.cross(oldCamDir)
self.cameraDir = self.cameraDir.rotate_around(leftDir, delPhi)
self.upDir = self.upDir.rotate_around(leftDir, delPhi)
elif(self.mouseDragState == 'translate'):
moveDist = self.zoomDist * 5.0
self.cameraTrans[0] += delX*moveDist
self.cameraTrans[1] -= delY*moveDist
self.lastPos = (x,y)
def processKey(self, key, x, y):
# print("ProcessKey key = " + str(key))
moveDist = self.zoomDist * 0.02
# Use 'r' and 'f' to zoom (OSX doesn't give mouse scroll events)
if key == 'r':
self.zoomIn()
elif key == 'f':
self.zoomOut()
# Use 'wasd' to translate view window
elif key == 'd':
self.cameraTrans[0] += moveDist
elif key == 'a':
self.cameraTrans[0] -= moveDist
elif key == 'w':
self.cameraTrans[1] += moveDist
elif key == 's':
self.cameraTrans[1] -= moveDist
def zoomIn(self):
self.zoomDist = max(self.minZoom, self.zoomDist * 0.9)
def zoomOut(self):
self.zoomDist = self.zoomDist * 1.1
| true |
de94fa336f0876b3fd4e27ab6f71c60c12851734 | Python | hanuschv/GeoPy | /Assignment_04_Hanuschik.py | UTF-8 | 6,638 | 3.34375 | 3 | [] | no_license | # ==================================================================================================== #
# Assignment_04 #
# (c) Vincent Hanuschik, 10/11/2019 #
# #
# ================================== LOAD REQUIRED LIBRARIES ========================================= #
import time
import os
import gdal
import numpy as np
# ======================================== SET TIME COUNT ============================================ #
starttime = time.strftime("%a, %d %b %Y %H:%M:%S" , time.localtime())
print("--------------------------------------------------------")
print("Starting process, time: " + starttime)
print("")
# =================================== DATA PATHS AND DIRECTORIES====================================== #
path = '/Users/Vince/Documents/Uni MSc/Msc 7 Geoprocessing with Python/Assignment04_Files/'
# sorted() sorts alphabetically in order to maintain clarity. file_list[0]=2000; file_list[1]=2005 etc.
file_list = [path + file for file in sorted(os.listdir(path))]
# ==================================================================================================== #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Exercise I ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ==================================================================================================== #
def CornerCoordinates(rasterpath):
'''
Gets corner coordinates of given raster (filepath). Uses GetGeoTransform to extract coordinate information.
Returns list with coordinates in [upperleft x, upperleft y, lowerright x and lowerright y] form.
:param rasterpath:
:return:
'''
raster = gdal.Open(rasterpath)
gt = raster.GetGeoTransform() # get geo transform data
ul_x = gt[0] # upper left x coordinate
ul_y = gt[3] # upper left y coordinate
lr_x = ul_x + (gt[1] * raster.RasterXSize) # upper left x coordinate + number of pixels * pixel size
lr_y = ul_y + (gt[5] * raster.RasterYSize) # upper left y coordinate + number of pixels * pixel size
coordinates = [ul_x , ul_y , lr_x , lr_y]
return coordinates
def overlapExtent(rasterPathlist):
'''
Finds the common extent/ overlap and returns geo coordinates from the extent.
Returns list with corner coordinates.
Uses GetGeoTransform to extract corner values of each raster.
Common extent is then calculated by maximum ul_x value,
minimum ul_y value, minimum lr_x value and maximum lr_y value.
Use list comprehensions to calculate respective coordinates for all rasters and
use the index to extract correct position coordinates[upperleft x, upperleft y, lowerright x and lowerright y].
:param rasterPathlist:
:return:
'''
ul_x_list = [CornerCoordinates(path)[0] for path in rasterPathlist]
ul_y_list = [CornerCoordinates(path)[1] for path in rasterPathlist]
lr_x_list = [CornerCoordinates(path)[2] for path in rasterPathlist]
lr_y_list = [CornerCoordinates(path)[3] for path in rasterPathlist]
overlap_extent = []
overlap_extent.append(max(ul_x_list))
overlap_extent.append(min(ul_y_list))
overlap_extent.append(min(lr_x_list))
overlap_extent.append(max(lr_y_list))
return overlap_extent
geo_overlap = overlapExtent(file_list)
# print(geo_overlap)
# ==================================================================================================== #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Exercise I & II ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ==================================================================================================== #
imglist = [gdal.Open(raster) for raster in file_list]
def subset(raster, subset, coordinates=True, multiband=False):
"""
Subsets a given raster to a set of coordinates [subset] into an array.
When subset is given as list of geographic coordinates, they will be transformed to array indices.
When subset is given as array indices, image will be subsetted. Indices must be in format [ulx, uly,lrx,lry]
Bool-Arguments don't have to be specified, when called. Defaults to function definition.
:param raster:
:param subset:
:param coordinates:
:param multiband:
:return array:
"""
if coordinates:
gt = raster.GetGeoTransform()
inv_gt = gdal.InvGeoTransform(gt)
app_gt_offset_upperleft = gdal.ApplyGeoTransform(inv_gt , geo_overlap[0] , geo_overlap[1])
app_gt_offset_lowerright = gdal.ApplyGeoTransform(inv_gt , geo_overlap[2] , geo_overlap[3])
off_ulx , off_uly = map(int , app_gt_offset_upperleft)
off_lrx , off_lry = map(int , app_gt_offset_lowerright)
rows = off_lry - off_uly
columns = off_lrx - off_ulx
# idx = [off_ulx, off_uly, columns, rows ]
array = raster.ReadAsArray(off_ulx , off_uly , columns , rows)
return array
else:
idx = subset
if gdal:
array = raster.ReadAsArray(idx[0], idx[1],
idx[2] - idx[0],
idx[3] - idx[1])
else:
if multiband:
array = raster[:, idx[0]:idx[2], idx[1]:idx[3]]
else:
array = raster[idx[0]:idx[2], idx[1]:idx[3]]
return array
# stack_sub = [subset(raster, geo_overlap) for raster in stacklist]
#applies subset function to all images in imglist [see above]. Returns list of respective arrays.
sublist = [subset(img, geo_overlap, coordinates=True) for img in imglist]
#small summary function to calculate statistics for each array.
def arr_summary(array, decimals = 2):
'''
calculates maximum, mean, minimum and standard deviation values for given array.
Decimals specified are used only for mean and std.
:param array:
:param decimals:
:return:
'''
max = ['maximum value' ,np.max(array)]
mean = ['mean value', round(np.mean(array), decimals)]
min = ['minimum value', np.min(array)]
std = ['standard deviation value' , round(np.std(array), decimals)]
return max, mean, min, std
summary = [arr_summary(arr, 2) for arr in sublist]
# [print(sum) for sum in summary]
# =============================== END TIME-COUNT AND PRINT TIME STATS ============================== #
print("")
endtime = time.strftime("%a, %d %b %Y %H:%M:%S" , time.localtime())
print("--------------------------------------------------------")
print("start: " + starttime)
print("end: " + endtime)
print("") | true |
41ca9aea83c35bf739f2ecd1d3a89a23009420cb | Python | Ashley95/Python-FruitMall | /shopping/utils/functions.py | UTF-8 | 204 | 2.75 | 3 | [] | no_license | import random
def get_ticket():
s = 'qwertyuiopasdfghjklzxcvbnm1234567890'
ticket = ''
for i in range(28):
ticket += random.choice(s)
ticket = 'TK_' + ticket
return ticket
| true |
a874bc2a235dc5e13549aeef15b34a241722bda0 | Python | maguoying/python_work | /4-11.py | UTF-8 | 687 | 3.90625 | 4 | [] | no_license | pizzas = ['New York','chicago','Pan','Thick','Cracker'];
message = 'The first three items in the list are:';
print(message);
print(pizzas[:3]);
print('Three items from the middle of the list are:');
print(pizzas[1:4]);
print('The last three items in the list are:');
print(pizzas[-3:]);
friend_pizzas = [];
pizzas.append('ice');
friend_pizzas.append('naiyou');
print('My favorite pizzas are:');
for pizza in pizzas:
print(pizza);
print("My friend's favorite pizzas are:");
for friend_pizza in friend_pizzas:
print(friend_pizza);
print('=========================================================');
my_foods = ['pizza','falafel','carrot cake'];
for my_food in my_foods:
print(my_food);
| true |
67443a8a61aa52c1dd90bfcf04cb43018828805a | Python | pranavdave893/Leetcode | /redundant_conections_2.py | UTF-8 | 1,304 | 3.296875 | 3 | [
"MIT"
] | permissive | class Solution:
def findRedundantDirectedConnection(self, edges):
def find(u): # union find
if p[u] != u:
p[u] = find(p[u])
return p[u]
def detect_cycle(edge): # check whether you can go from u to v (forms a cycle) along the parents
u, v = edge
while u != v and u in parents:
u = parents[u]
return u == v
candidates = [] # stores the two edges from the vertex where it has two parents
parents = {}
for u, v in edges:
if v not in parents:
parents[v] = u
else:
candidates.append((parents[v], v))
candidates.append((u, v))
if candidates: # case 2 & case 3 where one vertex has two parents
return candidates[0] if detect_cycle(candidates[0]) else candidates[1]
else: # case 1, we just perform a standard union find, same as redundant-connection
p = list(range(len(edges)+1))
for edge in edges:
u, v = map(find, edge)
if u == v:
return edge
p[u] = p[v]
abc = Solution()
abc.findRedundantDirectedConnection([[1,2], [2,3], [3,4], [4,1], [1,5]])
| true |
c1cdcf8fb82b7af04edbb9f5b4ecaa453fb05ffe | Python | Muyiyunzi/ML-pyCV-Notes | /W8/7.2-create_vocab.py | UTF-8 | 668 | 2.65625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
from PIL import Image
import imtools
import pickle
import vocabulary
import sift
# 获取selected-fontimages文件下的图像文件名,并保存在list中
imlist = imtools.get_imlist('first500')
nbr_images = len(imlist)
featlist = [ imlist[i][:-3]+'sift' for i in range(nbr_images)]
for i in range(nbr_images):
sift.process_image(imlist[i],featlist[i])
voc = vocabulary.Vocabulary('ukbenchtest')
voc.train(featlist, 500, 10) #书中为1000,这里只取500个应该改成500吧
# 保存词汇
with open('vocabulary.pkl', 'wb') as f:
pickle.dump(voc, f)
print 'vocabulary is:', voc.name, voc.nbr_words
| true |
48c25f3106a1c76881078957bca974d0e29d9c77 | Python | atrinik/dwc | /maps/shattered_islands/scripts/strakewood_island/gandyld.py | UTF-8 | 4,444 | 3.0625 | 3 | [] | no_license | ## @file
## Quest for power crystal, given by Gandyld northwest
## of Quick Wolves guild.
from Atrinik import *
from QuestManager import QuestManager
activator = WhoIsActivator()
me = WhoAmI()
msg = WhatIsMessage().strip().lower()
player_info_name = "gandyld_mana_crystal"
player_info = activator.GetPlayerInfo(player_info_name)
crystal_arch = "power_crystal"
crystal_name = "Gandyld's Mana Crystal"
## Load the quests and QuestManagers.
def load_quests():
global qm, qm2
quests = [
{
"quest_name": "Gandyld's Quest",
"type": QUEST_TYPE_KILL,
"kills": 1,
"message": "Find and kill a purple worm in Old Outpost and then return to Gandyld, northwest of Quick Wolves guild.",
},
{
"quest_name": "Gandyld's Quest II",
"type": QUEST_TYPE_KILL,
"kills": 1,
"message": "Find and kill King Rhun at the end of Old Outpost and then return to Gandyld, northwest of Quick Wolves guild.",
}]
qm = QuestManager(activator, quests[0])
qm2 = QuestManager(activator, quests[1])
## Create the crystal.
def create_crystal():
crystal = activator.CreateObjectInside(crystal_arch, 1, 1)
crystal.name = crystal_name
# 1 gold
crystal.value = 10000
# Figure out how much capacity to give...
if qm2.completed():
crystal.maxsp = 200
elif qm.completed():
crystal.maxsp = 100
else:
crystal.maxsp = 50
# So that it will disappear if we drop it
crystal.f_startequip = True
activator.Write("{0} hands you a shining mana crystal.".format(me.name), COLOR_GREEN)
## Upgrade an existing crystal.
def upgrade_crystal(crystal, capacity):
me.SayTo(activator, "You have done it! Now allow me to boost your mana crystal...", 1)
activator.Write("{0} casts some strange magic...".format(me.name), COLOR_BLUE)
crystal.maxsp = capacity
if msg == "hello" or msg == "hi" or msg == "hey":
if not player_info:
activator.Write("\nThe old mage {0} mumbles something and slowly turns his attention to you.".format(me.name))
me.SayTo(activator, "\nWhat is it? Can't you see I'm ^busy^ here?")
else:
crystal = activator.CheckInventory(2, crystal_arch, crystal_name)
load_quests()
if not crystal:
me.SayTo(activator, "\nYou lost the mana crystal?! You're in luck, I have a spare one right here...")
create_crystal()
else:
me.SayTo(activator, "\nHello again, {0}.".format(activator.name))
# First quest
if not qm.completed():
if not qm.started():
me.SayTo(activator, "Would you be interested in ^boosting^ your mana crystal?", 1)
elif qm.finished():
upgrade_crystal(crystal, 100)
qm.complete()
else:
me.SayTo(activator, "Find and kill a purple worm in Old Outpost west of here.", 1)
# Second quest
elif not qm2.completed():
if not qm2.started():
me.SayTo(activator, "Would you be interested in ^boosting^ your mana crystal even further?", 1)
elif qm2.finished():
upgrade_crystal(crystal, 200)
qm2.complete()
else:
me.SayTo(activator, "Find and kill King Rhun at the end of Old Outpost, located west of here.", 1)
else:
me.SayTo(activator, "Sorry, I cannot upgrade your mana crystal any further.", 1)
# Accept one of the quests
elif msg == "boosting":
load_quests()
if not qm.completed():
if not qm.started():
me.SayTo(activator, "\nFind and kill a purple worm in Old Outpost west of here.")
qm.start()
elif not qm2.completed():
if not qm2.started():
me.SayTo(activator, "\nFind and kill King Rhun at the end of Old Outpost, located west of here.")
qm2.start()
elif not player_info:
if msg == "busy":
me.SayTo(activator, "\nYes, busy. I'm in the process of creating a very powerful ^mana crystal^.")
elif msg == "mana crystal":
me.SayTo(activator, "\nYou just won't don't want to give up, do you? Okay, I will ^tell^ you about mana crystals...")
elif msg == "tell":
me.SayTo(activator, "\nMana crystals are items sought after by mages. They allow you to ^store^ a certain amount of mana, you see.")
elif msg == "store":
me.SayTo(activator, "\nWhen a mage applies a mana crystal while he is full on mana, half of his mana will be transferred to the crystal. The mage can then apply the crystal at any time to get the mana back. Crystals have a maximum mana capacity, so mages are always after the ones that can hold the most.\nHmm, I seem to have a crystal I don't need right here. Do you ^want^ it?")
elif msg == "want":
activator.CreatePlayerInfo(player_info_name)
load_quests()
create_crystal()
| true |
fb7c48f8267248bb0340d83253cbcd1637138a78 | Python | usnistgov/swid-autotools | /lib/swidtag.py | UTF-8 | 13,696 | 2.671875 | 3 | [
"NIST-PD"
] | permissive | #!/usr/bin/env python
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. NIST assumes no
# responsibility whatsoever for its use by other parties, and makes
# no guarantees, expressed or implied, about its quality,
# reliability, or any other characteristic.
#
# We would appreciate acknowledgement if the software is used.
"""
This script creates an unsigned SWID tag for a distribution bundle or directory. The contents of a distribution directory can be described as a Primary SWID tag, if using this script as part of a dist-hook Automake rule. The contents of an individual file, such as an archive file (.tar.gz, .zip, self-unpacking .exe, etc.), can also be represented as an annotated SWID tag.
This script takes as input the path to the distribution directory or file, and several parameters for authoring metadata. The path to the output file is also required, in order to determine relative pathing of files.
Some XML ElementTree logistics in this script used this code as reference:
https://github.com/strongswan/swidGenerator/blob/master/swid_generator/generators/swid_generator.py
"""
__version__ = "0.8.0"
import os
import sys
import hashlib
import collections
import re
import xml.etree.ElementTree as ET
import logging
import uuid
_logger = logging.getLogger(os.path.basename(__file__))
rx_python_version_definition = re.compile(r"""^__version__(\s*)=(\s*)(?P<openquote>['"])(?P<version_value>.+)(?P=openquote).*$""")
def relative_path_to_directory(source_path, dest_directory):
"""
This returns the path comprised of ".." ascensions that would be needed to get from the source_path to the destination directory.
In the context of this code base, this is most likely to return "." or ".." for most of its use cases.
"""
normed_source_path = os.path.realpath(source_path)
normed_dest_directory = os.path.realpath(dest_directory)
source_path_dirname = os.path.dirname(normed_source_path)
if source_path_dirname == normed_dest_directory:
return "."
elif os.path.dirname(source_path_dirname) == normed_dest_directory:
return ".."
# Future use cases can be supported with os.path.commonprefix and looping until a match is found, but that may require directory paths fed into this function represent actual directories. One possible use case of this function would generate element trees from text lists. When that use case becomes supported this function will require further development and testing.
_logger.info("source_path = %r." % source_path)
_logger.info("dest_directory = %r." % dest_directory)
raise NotImplementedError("TODO")
def file_path_to_file_element(child_path):
# Annotate files.
child_name = os.path.basename(child_path)
file_element = ET.Element("File")
file_element_stat = os.stat(child_path)
file_element.set("size", str(file_element_stat.st_size))
sha256obj = hashlib.sha256()
sha512obj = hashlib.sha512()
with open(child_path, "rb") as child_fh:
buf = child_fh.read(2**20)
sha256obj.update(buf)
sha512obj.update(buf)
file_element.set("SHA256:hash", sha256obj.hexdigest().lower())
file_element.set("SHA512:hash", sha512obj.hexdigest().lower())
# Try adding version information for Python files.
if child_name.endswith(".py"):
# Use crude scan for __version__ instead of calling arbitrary scripts by opening a Python subprocess.
with open(child_path, "r") as child_fh:
for line in child_fh:
cleaned_line = line.strip()
maybe_match = rx_python_version_definition.search(cleaned_line)
if maybe_match:
file_element.set("version", maybe_match.group("version_value"))
break
file_element.set("name", child_name)
return file_element
def directory_path_to_directory_element(directory_path):
# Convert distribution directory walk to Payload element.
# Cache directory Element references by relative path.
# Key: Path relative to root of walk.
# Value: ET.Element.
rel_dirpath_element = dict()
# Prime with root directory.
rel_dirpath_element[""] = ET.Element("Directory")
rel_dirpath_element[""].set("name", os.path.basename(directory_path))
directory_path_strlen = len(directory_path) + 1
def _on_walk_error(e):
"""Since this should never happen, fail loudly."""
raise(e)
for (dirpath, dirnames, filenames) in os.walk(directory_path, onerror=_on_walk_error):
# rel_dirpath: Relative path, no leading (c/o +1 above) or trailing slashes.
rel_dirpath = dirpath[ directory_path_strlen : ].rstrip("/")
# Ignore one self-referential directory: top-level swidtag/.
if rel_dirpath == "":
dirnames = [ x for x in dirnames if x != "swidtag" ]
elif rel_dirpath == "swidtag" or rel_dirpath.startswith("swidtag/"):
continue
e = rel_dirpath_element[rel_dirpath]
# Guarantee remainder of walk will visit in sorted directory order.
dirnames.sort()
for childname in sorted(dirnames + filenames):
childpath = os.path.join(dirpath, childname)
rel_childpath = os.path.join(rel_dirpath, childname)
if childname in dirnames:
# Cache directory references.
ce = ET.Element("Directory")
rel_dirpath_element[rel_childpath] = ce
ce.set("name", childname)
else:
ce = file_path_to_file_element(childpath)
e.append(ce)
return rel_dirpath_element[""]
def main():
# Build Payload element by walking distribution directory.
swidtag = ET.Element("SoftwareIdentity")
swidtag.set("xmlns", "http://standards.iso.org/iso/19770/-2/2015/schema.xsd")
swidtag.set("xmlns:SHA256", "http://www.w3.org/2001/04/xmlenc#sha256")
swidtag.set("xmlns:SHA512", "http://www.w3.org/2001/04/xmlenc#sha512")
swidtag.set("xmlns:n8060", "http://csrc.nist.gov/ns/swid/2015-extensions/1.0")
if args.corpus:
swidtag.set("corpus", "true")
if args.lang == "":
raise ValueError("--lang parameter cannot be blank.")
swidtag.set("xml:lang", args.lang)
if args.name == "":
raise ValueError("--name parameter cannot be blank.")
swidtag.set("name", args.name)
tag_id = None
if not args.tag_id is None:
tag_id = args.tag_id
elif not args.tag_id_file is None:
with open(args.tag_id_file, "r") as tag_id_fh:
tag_id = tag_id_fh.readline().strip()
if tag_id is None:
# Assign default.
tag_id = str(uuid.uuid4())
swidtag.set("tagId", tag_id)
tag_version = None
if not args.tag_version is None:
tag_version = str(args.tag_version)
elif not args.tag_version_file is None:
with open(args.tag_version_file, "r") as tag_version_fh:
tag_version_str = tag_version_fh.read(8).strip()
tag_version_int = int(tag_version_str)
tag_version = str(tag_version_int)
if tag_version is None:
# Assign default.
tag_version = "1"
swidtag.set("tagVersion", tag_version)
if args.version == "":
raise ValueError("--version parameter cannot be blank.")
swidtag.set("version", args.version)
if args.version_scheme == "":
raise ValueError("--version-scheme parameter cannot be blank.")
swidtag.set("versionScheme", args.version_scheme)
# Set up entities, possibly consolidating.
# Key: (name, regid)
# Value: Set
entity_role_sets = collections.defaultdict(set)
if args.aggregator_name or args.aggregator_regid:
if args.aggregator_name and args.aggregator_regid:
entity_role_sets[(args.aggregator_name, args.aggregator_regid)].add("aggregator")
else:
raise ValueError("If supplying an aggregator, the name and regid must both be supplied.")
if args.softwarecreator_name == "":
raise ValueError("--softwarecreator-name parameter cannot be blank.")
if args.softwarecreator_regid == "":
raise ValueError("--softwarecreator-regid parameter cannot be blank.")
entity_role_sets[(args.softwarecreator_name, args.softwarecreator_regid)].add("softwareCreator")
if args.tagcreator_name == "":
raise ValueError("--tagcreator-name parameter cannot be blank.")
if args.tagcreator_regid == "":
raise ValueError("--tagcreator-regid parameter cannot be blank.")
entity_role_sets[(args.tagcreator_name, args.tagcreator_regid)].add("tagCreator")
for (name, regid) in sorted(entity_role_sets.keys()):
e = ET.Element("Entity")
e.set("name", name)
e.set("regid", regid)
e.set("role", " ".join(sorted(entity_role_sets[(name, regid)])))
swidtag.append(e)
if args.link_describedby:
e = ET.Element("Link")
e.set("href", args.link_describedby)
e.set("rel", "describedby")
swidtag.append(e)
if args.evidence:
footprint_element = ET.Element("Evidence")
elif args.payload:
footprint_element = ET.Element("Payload")
else:
raise NotImplementedError("File system footprint container element not implemented. (Expecting --evidence or --payload.)")
footprint_element.set("n8060:pathSeparator", os.path.sep)
footprint_element.set("n8060:envVarPrefix", "$")
footprint_element.set("n8060:envVarSuffix", "")
swidtag.append(footprint_element)
distribution_paths = set([x for x in args.distribution_path])
for distribution_path in sorted(distribution_paths):
if os.path.isdir(distribution_path):
element_from_path = directory_path_to_directory_element(distribution_path)
# Overwrite the root directory's name with the relative path from the swidtag.
element_from_path.set(
"name",
relative_path_to_directory(args.out_swidtag, distribution_path)
)
footprint_element.append(element_from_path)
elif os.path.isfile(distribution_path):
# Induce containing directory.
element_containing_path = ET.Element("Directory")
element_containing_path.set(
"name",
relative_path_to_directory(
args.out_swidtag,
os.path.dirname(distribution_path)
)
)
footprint_element.append(element_containing_path)
element_from_path = file_path_to_file_element(distribution_path)
element_containing_path.append(element_from_path)
else:
raise NotImplementedError("Distribution path is neither a file nor a directory: %r." % distribution_path)
with open(args.out_swidtag, "w") as out_fh:
out_encoding = "UTF-8" if sys.version_info[0] < 3 else "unicode"
out_fh.write(ET.tostring(swidtag, encoding=out_encoding))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("--corpus", action="store_true")
parser.add_argument("--lang", required=True)
parser.add_argument("--link-describedby", help="URL for documentation of this swidtag.")
parser.add_argument("--name", required=True)
parser.add_argument("--aggregator-name")
parser.add_argument("--aggregator-regid")
parser.add_argument("--softwarecreator-name", required=True)
parser.add_argument("--softwarecreator-regid", required=True)
parser.add_argument("--tagcreator-name", required=True)
parser.add_argument("--tagcreator-regid", required=True)
parser.add_argument("--version", required=True)
parser.add_argument("--version-scheme", required=True)
walk_source_group = parser.add_mutually_exclusive_group(required=True)
walk_source_group.add_argument("--distribution-path", action="append", help="Path to existing file or directory to incorporate into the SWID tag. Can be given multiple times. Files will have a containing directory element induced to track relative pathing.")
#TODO Left for future implementation.
#walk_source_group.add_argument("--file-manifest")
tree_group = parser.add_mutually_exclusive_group(required=True)
tree_group.add_argument("--evidence", action="store_true", help="The element to use for an evidence tag.")
tree_group.add_argument("--payload", action="store_true", help="The element to use for a corpus tag.")
# If no member of this group is specified, a random UUID will be put in the output XML.
tag_id_group = parser.add_mutually_exclusive_group()
tag_id_group.add_argument("--tag-id", help="A string to use to declare the tagId attribute.")
tag_id_group.add_argument("--tag-id-file", help="A file containing the tagId.")
# If no member of this group is specified, a '1' will be put in the output XML.
tag_version_group = parser.add_mutually_exclusive_group()
tag_version_group.add_argument("--tag-version", type=int, help="An integer to use to declare the tag version.")
tag_version_group.add_argument("--tag-version-file", help="A file containing the integer tag version.")
parser.add_argument("out_swidtag")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
main()
| true |
3135f9cf5e4592f5e9ccce9f2f259f6aaed19108 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2119/60833/281948.py | UTF-8 | 545 | 3 | 3 | [] | no_license | x=list(input().split(','))
x=list(map(int,x))
judge=1
if len(x)<=3:
print(False)
else:
for i in range(3,len(x)):
if((x[i]>=x[i-2])&(x[i-3]>=x[i-1])):
print(True)
judge=0
break
if((i>=4)&(x[i-1]==x[i-3])&(x[i]>=x[i-2]-x[i-4])):
print(True)
judge=0
break
if((i>=5)&(x[i-2]>=x[i-4])&(x[i-3]>=x[i-1])&(x[i-1]>=x[i-3]-x[i-5])&(x[i]>=x[i-2]-x[i-4])):
print(True)
judge=0
break
if judge:
print(False) | true |
9b36d3cd9d8779b6c3065bc85932a5d61d9962ff | Python | pwoble/ISM-4402 | /Woble_7-1.py | UTF-8 | 613 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
names = ['Bob', 'Jessica', 'Mary', 'John', 'Mel']
grades = [76,83,77,78,95]
GradeList = zip(names, grades)
df = pd.DataFrame(data = GradeList,
columns=['Names', 'Grades'])
get_ipython().magic(u'matplotlib inline')
df.plot()
# In[11]:
df.plot()
displayText = "Wow!"
xloc = 0
yloc = 76
xtext = 70
ytext = -100
plt.annotate(displayText,
xy=(xloc, yloc),
arrowprops=dict(facecolor='black',
shrink=0.05),
xytext=(xtext,ytext),
xycoords=('axes fraction', 'data'),
textcoords='offset points')
# In[ ]:
| true |
ad460ac9219846698c392928a8ebe0da3ad81b50 | Python | Jeongeun-Choi/CodingTest | /python_algorithm/소수찾기.py | UTF-8 | 335 | 3.359375 | 3 | [] | no_license | n = int(input())
arr = [False, False] + [True] * (n-1)
answer = []
for i in range(2, n+1):
if arr[i]:
answer.append(i)
for j in range(2*i, n+1, i):
arr[j] = False
print(len(answer))
#소수찾기는 에라토스테네스의 체를 사용해서 풀자
#그리고 짝수는 2 빼고 소수가 아님
| true |
cc46f2e727fac712a56ea4e55c2a286bad4ebfb1 | Python | peanut996/Leetcode | /Python/1208.GetEqualSubstringsWithinBudget.py | UTF-8 | 1,910 | 3.625 | 4 | [] | no_license | """1208. 尽可能使字符串相等
给你两个长度相同的字符串,s 和 t。
将 s 中的第 i 个字符变到 t 中的第 i 个字符需要 |s[i] - t[i]| 的开销(开销可能为 0),也就是两个字符的 ASCII 码值的差的绝对值。
用于变更字符串的最大预算是 maxCost。在转化字符串时,总开销应当小于等于该预算,这也意味着字符串的转化可能是不完全的。
如果你可以将 s 的子字符串转化为它在 t 中对应的子字符串,则返回可以转化的最大长度。
如果 s 中没有子字符串可以转化成 t 中对应的子字符串,则返回 0。
示例 1:
输入:s = "abcd", t = "bcdf", cost = 3
输出:3
解释:s 中的 "abc" 可以变为 "bcd"。开销为 3,所以最大长度为 3。
示例 2:
输入:s = "abcd", t = "cdef", cost = 3
输出:1
解释:s 中的任一字符要想变成 t 中对应的字符,其开销都是 2。因此,最大长度为 1。
示例 3:
输入:s = "abcd", t = "acde", cost = 0
输出:1
解释:你无法作出任何改动,所以最大长度为 1。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/get-equal-substrings-within-budget
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def equalSubstring(self, s: str, t: str, maxCost: int) -> int:
left,right=0,0
cost = [abs(ord(s[i])-ord(t[i])) for i in range(len(s))]
res = 0
his_cost = 0
while right < len(s):
if cost[right] + his_cost <= maxCost:
his_cost += cost[right]
right+=1
else:
his_cost = his_cost - cost[left] + cost[right]
left += 1
right += 1
# 记录窗口最大值
res = max(res,right - left)
return res | true |
63962552f9fb356b1f78a403655d64f8e44494d6 | Python | adrienlagamelle/csSurvival | /build/lib/css/tests/test_database.py | UTF-8 | 2,664 | 2.78125 | 3 | [] | no_license | """Test for database implementation"""
import unittest
from css.app import app
from css.app.database import sqlalchemy
from css.app.database import user
from css.app.database import thread
from css.app.database import comment
class Database_TestCase(unittest.TestCase):
def setUp(self):
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
sqlalchemy.create_all()
def tearDown(self):
sqlalchemy.drop_all()
def test_user(self):
""" Test User """
a = user.new('daniel', 'djp468@mun.ca', 'admin')
b = user.get(1)
c = user.getFromUsername('daniel')
d = user.new('notDaniel', 'me@danielpower.ca', '1234')
all = user.getAll()
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertNotEqual(a, d)
self.assertEqual(all[0], a)
self.assertEqual(all[1], d)
self.assertEqual(a.id, 1)
self.assertEqual(d.id, 2)
self.assertEqual(a.username, 'daniel')
self.assertEqual(a.email, 'djp468@mun.ca')
self.assertTrue(a.check_password('admin'))
self.assertFalse(a.check_password('1234'))
def test_thread(self):
""" Test Thread """
a = thread.new('This is the title', 'This is the body', 1)
b = thread.new('This is another title', 'This is another body', 2)
self.assertEqual(a.id, 1)
self.assertEqual(b.id, 2)
self.assertEqual(a.title, 'This is the title')
self.assertEqual(b.title, 'This is another title')
self.assertEqual(a.body, 'This is the body')
self.assertEqual(b.body, 'This is another body')
self.assertEqual(a.user_id, 1)
self.assertEqual(b.user_id, 2)
def test_comment(self):
""" Test Comment """
a = comment.new('This is a comment', 1, 1)
b = comment.new('This is another comment', 1, 2)
c = comment.new('This comment is by a different user', 2, 1)
d = comment.new('This is another comment by a different user', 2, 1)
self.assertEqual(a.body, 'This is a comment')
self.assertEqual(b.body, 'This is another comment')
self.assertEqual(c.body, 'This comment is by a different user')
self.assertEqual(d.body, 'This is another comment by a different user')
self.assertEqual(a.user_id, 1)
self.assertEqual(b.user_id, 1)
self.assertEqual(c.user_id, 2)
self.assertEqual(d.user_id, 2)
self.assertEqual(a.thread_id, 1)
self.assertEqual(b.thread_id, 2)
self.assertEqual(c.thread_id, 1)
self.assertEqual(d.thread_id, 1)
| true |
c40d3b0e04029f3b63c03062039d5498587f4297 | Python | KuroKousuii/Algorithms | /Dynamic Programming/Assembly line scheduling/Space optimized.py | UTF-8 | 726 | 3.5 | 4 | [] | no_license | # A space optimized solution for assembly
# line scheduling in Python3
def carAssembleTime(a, t, e, x):
n = len(a[0])
# Time taken to leave first station
# in line 1
first = e[0] + a[0][0]
# Time taken to leave first station
# in line 2
second = e[1] + a[1][0]
for i in range(1, n):
up = min(first + a[0][i],
second + t[1][i] + a[0][i])
down = min(second + a[1][i],
first + t[0][i] + a[1][i])
first, second = up, down
first += x[0]
second += x[1]
return min(first, second)
# Driver Code
a = [[4, 5, 3, 2], [2, 10, 1, 4]]
t = [[0, 7, 4, 5], [0, 9, 2, 8]]
e = [10, 12]
x = [18, 7]
print(carAssembleTime(a, t, e, x)) | true |
339f653834122418516b49d753b1796ac370a3c8 | Python | potter8/Pizza-Crust | /Pizza Crust.py | UTF-8 | 227 | 3.453125 | 3 | [] | no_license | import math
r, c = map(int, input().split())
#(Area of cheese divided by area of pizza) * 100
cheeseArea = math.pi * ((r-c) ** 2)
pizzaArea = math.pi * (r ** 2)
crust = (cheeseArea / pizzaArea) * 100
print(crust)
| true |
ca13505f73b00c89a01f0e533e1dd4b0ddf049f8 | Python | markreynoso/mail_room_madness | /src/mail_room_madness.py | UTF-8 | 3,248 | 3.59375 | 4 | [
"MIT"
] | permissive | """Allows user to search database for donors.
Send thank you notes to non-profit donors.
And to run a donoation report of all donors.
"""
import sys
donor_data = {'Phil Collins': [25, 45, 76, 100],
'Sven Sunguaard': [50, 1000, 76, 1400]}
def main(): # pragma: no cover
"""Initiate function when run from terminal."""
prompt_user()
def populate_dictionary(name, donation): # pragma: no cover
"""Create a dictionary of donors and donation amounts."""
try:
donor_data[name].append(donation)
except KeyError:
donor_data[name] = [donation]
return donor_data
def send_thank_you(full_name, donation_amount):
"""Send a personalized thank you not to the user."""
return (('\nDear {},\n Thank you for your generous donation of ${}. '
'Your support is making a difference in our community.\n'
'Sincerely,\nMark and Kavdi\n'
'Directors of Good\n'.format(full_name, donation_amount)))
def set_thank_you_amount(full_name): # pragma: no cover
"""Allow the user to set a donation amount for each donor."""
donation_amount = input('\nPlease enter donation amount.\n')
if donation_amount.isnumeric():
populate_dictionary(full_name, int(donation_amount))
letter = send_thank_you(full_name, donation_amount)
print(letter)
prompt_user()
else:
print('\nPlease enter a number.\n')
set_thank_you_amount(full_name)
def find_thank_you_donor(): # pragma: no cover
"""Allow the user to access donor names or create a new donor name."""
full_name = input(('\nPlease do one of the following:\n'
'- Enter a donors name to input donation amount\n'
'- Enter a new donor name to create an account\n'
'- Type list to show all current donors.\n'))
if full_name.lower() == 'list':
for i in donor_data:
print(i)
find_thank_you_donor()
else:
set_thank_you_amount(full_name)
def create_report():
"""Print a report of the giving history of all donors with names."""
from tabulate import tabulate
holder_list = []
for person in donor_data:
total = sum(donor_data[person])
num_gifts = len(donor_data[person])
avg = total / num_gifts
holder_list.append([person, total, num_gifts, avg])
return (tabulate(holder_list, headers=['Name', 'Total Giving',
'# Gifts', 'Avg Donation']))
def prompt_user(): # pragma: no cover
"""Give instructions to the user and provides options for use."""
response = input(('\nWelcome to your donor management center.\n'
'What would you like to do?\n\nType:\n'
'- TY to send a thank you note to a donor\n'
'- CR to create a donation report\n- Q to exit.\n'))
if response == 'TY':
find_thank_you_donor()
elif response == 'CR':
report = create_report()
print(report)
prompt_user()
elif response == 'Q':
sys.exit()
else:
print('\nPlease type a valid input\n')
prompt_user()
if __name__ == '__main__': # pragma: no cover
main()
| true |
684dad4fa6a7e74a608f16b31e5a438f183a518f | Python | LeanderLXZ/nlp-n-gram-language-models-python | /bigram_optimized.py | UTF-8 | 1,185 | 2.953125 | 3 | [] | no_license | import re
from bigram import biGram
class biGramOptimized(biGram):
def __init__(self):
super(biGramOptimized, self).__init__(0)
def predict(self, sentence):
max_prob = 0
language = 'EN'
for bigram_prob, lang in [(self.bigram_prob_en, 'EN'),
(self.bigram_prob_fr, 'FR'),
(self.bigram_prob_gr, 'GR')]:
prob_sentence = 1
for i in range(len(sentence)):
if i < len(sentence) - 1:
bigram_i = (sentence[i], sentence[i+1])
# Test if the bigram is unseen for training set
if bigram_prob.get(bigram_i):
prob_sentence *= bigram_prob[bigram_i]
else:
# Set a very low probability for unknown bigrams
prob_sentence *= 0.0001
# Get the language with the highest probability
if prob_sentence > max_prob:
language = lang
max_prob = prob_sentence
return language
if __name__ == '__main__':
biGramOptimized().main()
| true |
01f851012231b39550d50be470be7f0363ab8376 | Python | DAP-web/AirbnbProyect | /Segundo Avance/Formularios ABC/Calificaciones_BE.py | UTF-8 | 1,178 | 3.28125 | 3 | [] | no_license | from prettytable import PrettyTable
from DB_Calificaciones_BE import calificacionesDB
class calificionesBE:
def __init__(self):
self.calificiones=calificacionesDB()
def getCalificaciones(self):
result = self.calificiones.obtenerCalificaciones()
table = PrettyTable()
table.field_names = ["Residencia","Calificación Promedio"]
for residencia in result:
table.add_row([
residencia["IdResidencia"],
residencia["Promedio"]
])
print(table)
table.clear()
def calificar(self):
print("\nAgregando una calificación...")
calificacion = int(input("\nCalificación (Valor entre 0-Peor | 5-Mejor): "))
while calificacion>5 or calificacion<0:
print("La calificación es incorrecta. Favor ingresar un valor entre 0 y 5.")
print("Siendo 5 el mejor y 0 lo peor.")
calificacion = int(input("\nCalificación: "))
residencia = input("\nResidencia ID: ")
self.calificiones.agregarCalificacion(calificacion,residencia)
print("\nSu calificación se ha guardado con éxito.\n")
| true |
a8de7768d6adbb65efd0b244bf1cee40689b69ac | Python | Evilnames/FactoryGame | /Factory/building.py | UTF-8 | 3,230 | 3.1875 | 3 | [] | no_license | class building:
def __init__(self):
#Filler Text
self.name = ""
self.outputGood = ""
#General Attributes
#Input Rules
##What goods do I need? This should be stored as a dictionaries of dictionaries
### IE inputGoods.append('oil' : {'inputGoodName':'cement', inputRequired' : 2, 'maxStorage':80, 'currentInputStorage':0})
self.inputGoods = {}
#Connected conveyor
self.connectedConveyor = []
#Output Rules
##How many goods can I produce at one time?
self.maxOutputProduction = 0
##How long does it take to produce one unit?
self.productionTime = 0
##How many goods can I hold before being backed up?
self.outputStorageCapcity = 0
#Location on Grid
self.coords = {'x':0,'y':0,'xSize':0,'ySize':0}
#State
self.ticksSinceLastOutput = 0
self.outputGoodsWaiting = 0
def checkProduction(self):
#Produce something
if(self.ticksSinceLastOutput >= self.productionTime):
if((self.outputGoodsWaiting + self.maxOutputProduction <= self.outputStorageCapcity) and self.doIHaveEnoughInputResources()):
self.outputGoodsWaiting += self.maxOutputProduction
self.reduceInputResources()
self.ticksSinceLastOutput = 0
#Checks to see if I have enough input resources.
#If I have no input requirements I am the start of the chain for
#production
def doIHaveEnoughInputResources(self):
if(len(self.inputGoods) == 0):
return 1
else:
allowProduction = 1
#Loop through our input goods and see if we have all the required inputs
for i, item in self.inputGoods.items():
if(item['inputRequired'] > item['currentInputStorage']):
allowProduction = 0
return allowProduction
#Reduces the pool of resources sitting in this factory
def reduceInputResources(self):
if(len(self.inputGoods) > 0):
for i,item in self.inputGoods.items():
self.inputGoods[i]['currentInputStorage'] -= item['inputRequired']
#Test's if this factory can accept an incoming good.
#Used to test from convoyor belts
def howMuchOfGoodCanITake(self, good):
howMuchCanIAccept = 0
for i, item in self.inputGoods.items():
if(item['inputGood'] == good):
howMuchCanIAccept = item['maxStorage'] - item['currentInputStorage']
return howMuchCanIAccept
#Accepts an input of a good
def acceptIncomingGood(self, good, quantity):
for i,item in self.inputGoods.items():
if(item['inputGood'] == good):
self.inputGoods[i]['currentInputStorage'] += quantity
def getOutputQuantity(self):
return self.outputGoodsWaiting
def removeOutgoingGood(self, quantity):
self.outputGoodsWaiting -= quantity
#Called each time
def tick(self):
self.checkProduction()
#Increase last production time
self.ticksSinceLastOutput += 1
| true |
a66e36fe98502bcebc0d25c5dc11264d6c439b6f | Python | Mikesteinberg/Updated_Cr_NeuralNetwork | /Clash_Royale_Neural_Network-master/Generator_Test.py | UTF-8 | 1,852 | 2.9375 | 3 | [] | no_license | from numpy.random import seed
seed(30)
from sklearn.utils import shuffle
# Importing the dataset
def Deck_Generator():
with open('/home/mike/"Clash Royale Neural Network"/Clash_Royale_Neural_Network-master/new_match_file_ladder.txt,"r") as deck_file:
while True:
i = 0
while i < 3195150:
for line in deck_file:
#Excludes the last three values and their commas.
line2 = line[-6:]
line = line[0: -7]
line_array = line.split(",")
line_array_two = line2.split(",")
deck_array = []
for num in line_array:
try:
deck_array.append(int(num))
except:
deck_array.append(float(num))
deck_array_tuple = tuple(deck_array)
winner_array = []
for num in line_array_two:
winner_array.append(int(num))
winner_array_tuple = tuple(winner_array)
i += 1
yield(deck_array_tuple, winner_array_tuple)
deck_file.seek(0)
continue
deck_file.close()
def Batch_Generator():
my_deck = Deck_Generator()
deck_batch_array = []
win_batch_array = []
while True:
deck_batch_array = []
win_batch_array = []
for x in range (0,32):
x = next(my_deck)
deck_batch_array.append(x[0])
win_batch_array.append(x[1])
deck_batch_tuple = tuple(deck_batch_array)
win_batch_tuple = tuple(win_batch_array)
yield(np.array(deck_batch_tuple), np.array(win_batch_tuple))
my_deck = Deck_Generator()
print(next(my_deck)) | true |
60c5a5dc8e0c53d630667c25b9963a0e6646db1c | Python | wchming1987/Tumbler | /handler/Book.py | UTF-8 | 1,462 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
# -*- coding=utf-8 -*-
import json
from bson import json_util
from tornado_swagger import swagger
from handler.GenericApiHandler import GenericApiHandler
class BooksHandler(GenericApiHandler):
@swagger.operation(nickname='list')
def get(self):
"""
@rtype: L{Book}
"""
print('---------------------------------')
# 获取书籍列表
books = []
collection = self.db['books']
for item in collection.find():
print json.dumps(item, default=json_util.default).decode("unicode_escape")
books.append(item)
self.write(json.dumps(books, default=json_util.default).decode("unicode_escape"))
print('---------------------------------')
self.finish_request(books)
@swagger.operation(nickname='create')
def post(self):
"""
@param body: create a item.
@type body: L{Item}
@in body: body
@return 200: item is created.
@raise 400: invalid input
"""
# property1 = self.json_args.get('property1')
# item = Item.item_from_dict(self.json_args)
# items[property1] = item
# Item.test_classmethod()
# self.finish_request(item.format_http())
pass
def options(self):
"""
I'm not visible in the swagger docs
"""
self.finish_request("I'm invisible in the swagger docs")
| true |
8670c1b668830f4ca4cdce2f551495709ce3c18b | Python | Harshil-Patel24/Machine-Perception | /Assignment_2/assignment.py | UTF-8 | 2,270 | 2.90625 | 3 | [] | no_license | import cv2 as cv
import numpy as np
import os
import sys
from tools import *
from matplotlib import pyplot as plt
def main(argv):
# Controller to allow for different input directories
if len(argv) > 1:
print('usage: python3 assignment.py <image-directory>')
exit(0)
elif len(argv) == 1:
directory = argv[0]
else:
directory = '/home/student/test'
# Train our SVM model
svm = trainSVM()
# Iterate through directory and find all images
for ii, fname in enumerate(os.listdir(directory)):
if fname.endswith('.jpg') or fname.endswith('.png'):
img = cv.imread(directory + '/' + fname)
img = cv.resize(img, (300, 330))
# Some images have glare, so use CLAHE to reduce glare
clahe = CLAHE(img, clipLimit=0.4, tileGridSize=(3,3))
# Apply a gaussian blur to reduce noise in the images
gauss = cv.GaussianBlur(clahe, (5, 7), 0)
close = cv.dilate(gauss, (5, 5))
# Find the connected components
stats, thresh = CCL(close)
# Find the predicted regions for "numbers"
detections, det_area, bounding = extractNumbers(stats, thresh, img)
# Detect images if there are detections
if(len(detections) != 0):
result = detectNum(detections, svm)
# This just takes the number from the file name
imName = fname[2:-4]
det_area_name = 'DetectedArea' + imName + '.jpg'
bounding_box_name = 'BoundingBox' + imName + '.txt'
house_name = 'House' + imName + '.txt'
bounding_box = 'X: ' + str(bounding[0]) + '\nY: ' + str(bounding[1]) +\
'\nW: ' + str(bounding[2]) + '\nH: ' + str(bounding[3])
# Write results to file
cv.imwrite('/home/student/output/' + det_area_name, det_area)
writeFile('/home/student/output/' + bounding_box_name, bounding_box)
writeFile('/home/student/output/' + house_name, result)
else:
print('No numbers detected')
if __name__ == '__main__':
main(sys.argv[1:])
| true |
85db0ddab1f19bb9b14eadad269eb1aba49f11ab | Python | benbendaisy/CommunicationCodes | /python_module/examples/804_Unique_Morse_Code_Words.py | UTF-8 | 2,191 | 4.1875 | 4 | [] | no_license | import string
from collections import Counter
from typing import List
class Solution:
"""
International Morse Code defines a standard encoding where each letter is mapped to a series of dots and dashes, as follows:
'a' maps to ".-",
'b' maps to "-...",
'c' maps to "-.-.", and so on.
For convenience, the full table for the 26 letters of the English alphabet is given below:
[".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
Given an array of strings words where each word can be written as a concatenation of the Morse code of each letter.
For example, "cab" can be written as "-.-..--...", which is the concatenation of "-.-.", ".-", and "-...". We will call such a concatenation the transformation of a word.
Return the number of different transformations among all words we have.
Example 1:
Input: words = ["gin","zen","gig","msg"]
Output: 2
Explanation: The transformation of each word is:
"gin" -> "--...-."
"zen" -> "--...-."
"gig" -> "--...--."
"msg" -> "--...--."
There are 2 different transformations: "--...-." and "--...--.".
Example 2:
Input: words = ["a"]
Output: 1
Constraints:
1 <= words.length <= 100
1 <= words[i].length <= 12
words[i] consists of lowercase English letters.
"""
def uniqueMorseRepresentations(self, words: List[str]) -> int:
morses = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
# chars = string.ascii_lowercase
chars = [chr(value) for value in range(ord('a'), ord('z') + 1)]
morseMap = {char: morse for char, morse in zip(chars, morses)}
res = []
for word in words:
morseStr = ""
for ch in word:
morseStr += morseMap[ch]
res.append(morseStr)
morseDict = Counter(res)
return len(morseDict) | true |
549de621bddc7b5371520db81c28590f626c3b1e | Python | githubli97/leetcode-python | /202011/20201127/q454.py | UTF-8 | 1,337 | 3.125 | 3 | [] | no_license | from typing import List
class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
if not A:
return 0
def sameNum(arr: List[int]) -> dict:
result = {}
for i in arr:
if i in result:
result[i] += 1
else:
result[i] = 1
return result
def zeroSubtraction(da: dict, db: dict) -> dict:
result: dict = {}
for a in da.keys():
for b in db.keys():
if result.__contains__((a + b)):
result[(a + b)] += da[a] * db[b]
else:
result[(a + b)] = da[a] * db[b]
return result
def theEnd(da: dict, db: dict) -> int:
result = 0
for a in da.keys():
if db.__contains__(-a):
result += da[a] * db[-a]
return result
da = sameNum(A)
db = sameNum(B)
dc = sameNum(C)
dd = sameNum(D)
d1 = zeroSubtraction(da, db)
d2 = zeroSubtraction(dc, dd)
result = theEnd(d1, d2)
return result
if __name__ == '__main__':
print(Solution().fourSumCount([-1,-1],[-1,1], [-1,1],[1,-1]))
| true |
0d9f01bdabd15f91f4411f6ff0a62eff6682a683 | Python | caogtaa/OJCategory | /ProjectEuler/1_100/0025.py | UTF-8 | 113 | 2.953125 | 3 | [] | no_license | f1, f2 = 1, 1
index = 2
while len(str(f2)) < 1000:
f1, f2 = f2, f1+f2
index += 1
print(index)
# = 4782 | true |
1fd99440d9dcfe1af22b79a0d045a8ce9078b635 | Python | ZayneHuang/TCP007 | /data_process/create_protocol_dictionary.py | UTF-8 | 1,175 | 2.546875 | 3 | [] | no_license | import numpy as np
filepath = '../data/pcaps/'
namelist = range(1, 19, 1)
protocol_list = []
protocol_dic = {}
protocol_num = np.zeros(40, dtype=int)
for filename in namelist:
fullpath = filepath + str(filename)
with open(fullpath, 'rb') as f:
lines = f.readlines()
for line in lines:
message = str(line).split(" ")
message = list(filter(lambda x: x != ' ' and x != '', message))
if protocol_dic.get(message[5]) is None:
protocol_dic[message[5]] = len(protocol_dic)
protocol_num[protocol_dic[message[5]]] += 1
i = 0
for key in protocol_dic:
protocol_list.append([str(key), protocol_num[i]])
i += 1
protocol_list.sort(key=lambda x: x[1], reverse=True)
print(protocol_list)
protocol_dic = {}
for each_protocol in protocol_list:
protocol_dic[each_protocol[0]] = len(protocol_dic)
print(protocol_dic)
np.save('../data/protocol_dictionary.npy', protocol_dic, allow_pickle=True)
with open('../data/statistics_sorted.txt', 'w') as ff:
for each_protocol in protocol_list:
data = str(each_protocol[0]) + ' ' + str(each_protocol[1]) + '\n'
ff.write(data)
| true |
235ecfc077d293c90723000921b0c58bc201c6af | Python | ShadowMinerXIII/AI-iris-flower | /AI(iris).py | UTF-8 | 2,466 | 3.234375 | 3 | [] | no_license | from sklearn import datasets
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from mlxtend.plotting import plot_decision_regions
from sklearn.svm import SVC
data = datasets.load_iris()
iris = sns.load_dataset("iris")
data1 = pd.read_csv('D:\Master work/AI/iris.csv')
features = data.data[:, [0, 2]]
targets = data.target
indices = np.random.permutation(len(features))
features_train = features[indices[:-100]]
targets_train = targets[indices[:-100]]
features_test = features[indices[-100:]]
targets_test = targets[indices[-100:]]
scaler = StandardScaler()
scaler.fit(features_train)
features_train = scaler.transform(features_train)
features_test = scaler.transform(features_test)
#Train classifier
svm = SVC(C=0.5, kernel='linear')
svm.fit(features_train, targets_train)
#Random pick a kvalue then setup the model
k_range = list(range(1,51))
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k)
knn.fit(features_train, targets_train)
y_pred = knn.predict(features_test)
scores.append(metrics.accuracy_score(targets_test, y_pred))
#knn = KNeighborsClassifier(n_neighbors=k)
#knn.fit(features_train, targets_train)
predictions = knn.predict(features_test)
numTesting = features_test.shape[0]
numCorrect = (targets_test == predictions).sum()
accuracy = float(numCorrect) / float(numTesting)
#plot decision
plot_decision_regions(features_train, targets_train, clf=svm, legend=2)
plt.xlabel('sepal length [CM]')
plt.ylabel('petal length [CM]')
plt.title('SVM on Iris')
plt.show()
#After graphing the features in a pair plot, it is clear that the relationship between pairs of features of a iris-setosa (in pink) is distinctly different from those of the other two species.
#There is some overlap in the pairwise relationships of the other two species, iris-versicolor (brown) and iris-virginica (green).
g = sns.pairplot(data1, hue='species', markers='+')
plt.show()
plt.plot(k_range, scores)
plt.xlabel('Value of k for KNN')
plt.ylabel('Accuracy Score')
plt.title('Accuracy Scores for Values of k of k-Nearest-Neighbors')
print("No. correct={0}, No. testing examples={1}, prediction accuracy={2} per cent".format(numCorrect, numTesting, round(accuracy*100, 2)))
plt.show()
| true |
4a6688f7d1a3ad64a45cd6c4ccf656fef75d176e | Python | amyq7526110/python100 | /74_lsR.py | UTF-8 | 343 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python3
import os
import sys
def list_files(path):
if os.path.isdir(path):
print(path + ':')
content = os.listdir(path)
print(content)
for fname in content:
fname = os.path.join(path,fname)
list_files(fname)
if __name__ == '__main__':
list_files(sys.argv[1])
| true |
a992a761a980b76a0a833927a5a575cf6954e39f | Python | IvanIsCoding/OlympiadSolutions | /beecrowd/1065.py | UTF-8 | 299 | 3.046875 | 3 | [] | no_license | # Ivan Carvalho
# Solution to https://www.beecrowd.com.br/judge/problems/view/1065
# -*- coding: utf-8 -*-
"""
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
"""
array = []
for i in range(5):
array.append(int(input()) % 2)
print("%d valores pares" % (5 - sum(array)))
| true |
5ee9dd3f6c1067451c6c152a3995d38ca39a805a | Python | Koushika-BL/python | /dictionary is empty or not.py | UTF-8 | 85 | 2.609375 | 3 | [] | no_license | dict1={}
if not dict1:
print("dictionary is empty")
else:
print("not empty")
| true |
62eea2b5234acdcaba3a27a26c22bc72280c27a1 | Python | lucky-wn/ApiTest | /variables/script_variables.py | UTF-8 | 1,619 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Author : WangNing
# @Email : 3190193395@qq.com
# @File : script_variables.py
# @Software: PyCharm
# 生成脚本时,相关变量的存储
# 脚本预备:导入包
code_prepares = """#encoding=utf-8
import unittest, requests
from core.db_manager import *
import os, sys,json"""
# 脚本头部[需要连接数据库(有依赖数据)]:class, setup
code_head_with_db = '''
class %s(unittest.TestCase):
"""%s"""
def setUp(self):
self.dbd = DBManager()
self.base_url = "%s"
'''
# 脚本头部[不需要连接数据库]:class, setup
code_head = '''
class %s(unittest.TestCase):
"""%s"""
def setUp(self):
self.base_url = "%s"
'''
# 脚本结束
code_end = """
"""
# 脚本结束(带数据库连接)
code_end_with_db = """
def tearDown(self):
self.dbd.close_connect()
"""
# 结束
final_code = '''
if __name__ == '__main__':
unittest.main()
'''
# post方法
post_code = '''
def test_%s(self):
"""%s"""
%s
r = requests.post(self.base_url, data = json.dumps(payload))
result = r.json()
self.assertEqual(r.status_code, 200)
%s
'''
# get方法
get_code = '''\n
def test_%s(self):
"""%s"""
%s
r = requests.get(self.base_url + str(payload))
result = r.json()
self.assertEqual(r.status_code, 200)
%s
'''
# 校验
check_code = '''
check_point = %s
for key,value in check_point.items():
self.assertEqual(result[key], value, msg = u"字段【{}】: expection: {}, reality: {}".format(key, value, result[key]))
'''
| true |
48cae9311eb933c366c27f82f78607fbdb6cb625 | Python | kdh0514/Openstack | /Device/dummyInputImage.py | UTF-8 | 1,552 | 2.859375 | 3 | [] | no_license | import socket
import cv2
import numpy
import os
import time
import threading
# 이미지 파일 경로
path = './in.jpg'
# IP , PORT
ip_add = '127.0.0.1'
port_num = 8989
# 서버의 IP와 PORT로 소켓연결을 신청
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip_add, port_num))
def send_data():
fileNumber = input("Input file Number : ")
fileName = r"./in" + str(fileNumber) + ".jpg"
while True:
# global fileName
if os.path.isfile(fileName):
# testText = test + str(n)
time.sleep(1)
# 이미지 파일 불러오기
img_cv = cv2.imread(fileName)
print(img_cv)
# 추출한 이미지를 String 형태로 변환(인코딩)시키는 과정
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
result, imgencode = cv2.imencode('.jpg', img_cv, encode_param)
data = numpy.array(imgencode)
stringData = data.tobytes()
# String 형태로 변환한 이미지를 socket을 통해서 전송
sock.send(str.encode(str(len(stringData))).ljust(16))
sock.send(stringData)
sock.send(str(fileNumber).encode("utf-8"))
# 이미지 삭제
os.remove(fileName)
else:
fileNumber = input("Input file Number : ")
fileName = r"./in" + str(fileNumber) + ".jpg"
# 소켓 닫기
sock.close()
send_thread = threading.Thread(target=send_data)
send_thread.start()
while True:
pass | true |
f87de134ebc418211ad899e7557d520596442fb1 | Python | bigsem89/post_to_postgresql | /flask_app/src/app.py | UTF-8 | 1,116 | 2.546875 | 3 | [] | no_license | from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://dbuser:123@db/testdb'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False, unique=True)
def __repr__(self):
return 'Person %r' % self.id
db.create_all()
@app.route("/", methods=['POST', 'GET'])
def index():
if request.method == "POST":
name = request.form['fname'] + " " + request.form['lname']
person = Person(name=name)
try:
db.session.add(person)
db.session.commit()
return redirect("/")
except:
return redirect("/")
else:
return render_template("form.html")
@app.route("/data")
def data():
persons = Person.query.order_by(Person.id).all()
return render_template("data.html", persons=persons)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| true |
ea2eb0b89398b322bd5c24665906e787af648cd2 | Python | vishalonip/python | /kangaroo_seperate_input.py | UTF-8 | 193 | 3.109375 | 3 | [] | no_license | k1=int(input())
v1=int(input())
k2=int(input())
v2=int(input())
result="No"
for t in range(0,10000):
if k1+v1*t==k2+v2*t:
# print(t)
result="Yes"
break
print(result) | true |
6c0cf0d6903915062dd8499ca4a0ee0305236264 | Python | jackone666/analyse | /models/Infor/IndividualInfor.py | UTF-8 | 1,154 | 2.953125 | 3 | [] | no_license | import math
import sys
count = 0
sys.setrecursionlimit(100000000)
def nodeInformationentropy(node: str, tree):
rowSumDic = {} # 以字典储存每个节点出度
listTemp = list(tree.index)
rowSum = list(tree.sum(axis=1)) # 每个节点出度和 列表
for i in range(0, rowSum.__len__()):
rowSumDic[listTemp[i]] = rowSum[i]
# print(rowSumDic.keys().__len__())
def nodeCount(nodeTemp: str): # 获取树的所有子树节点
global count
if rowSumDic.get(nodeTemp) == 0:
return
for j in rowSumDic.keys():
if tree.at[nodeTemp, j] == 1:
count = count + 1
nodeCount(j)
return
global count
count = 0
nodeCount(node)
nodeChildNum = count # 节点node所有子结点个数
result = 0
if rowSumDic.get(node) == 0: # 叶子节点信息熵为0
return 0
for j in tree.index:
if tree.at[node, j] == 1:
count = 0
nodeCount(j)
result = result - (count + 1) / nodeChildNum * math.log((count + 1) / nodeChildNum, len(rowSumDic) - 1)
return result
| true |
1c8937f5d4273d451927eab5dd7bce3278e322d4 | Python | Mskty/Coattiva | /ui_test/funzioni.py | UTF-8 | 17,143 | 2.671875 | 3 | [] | no_license | import tkinter as tk
import matplotlib.pyplot as plt
import pandas as pd
import sklearn as skl
import numpy as np
import seaborn as sns
import time
import xgboost as xgb
from tkinter import filedialog
from joblib import dump, load
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, svm, model_selection
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA
from sklearn_features.transformers import DataFrameSelector
from pandas import DataFrame
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict, GridSearchCV, \
RandomizedSearchCV
from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, pairwise, precision_recall_curve
from sklearn import preprocessing
from sklearn import tree
from sklearn.neighbors import NearestNeighbors
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import Imputer, LabelBinarizer, LabelEncoder, OneHotEncoder
# TUTTI I PARAMETRI NELLE FUNZIONI SONO PASSATI PER RIFERIMENTO, AVVIENE QUINDI SIDE EFFECT A MENO DI COPIA ESPLICITA
def set_dataset_display_properties(width=400, columns=50):
# imposta i valori di visualizzazione dei dataset pandas sul terminale
pd.set_option('display.width', width)
pd.set_option('display.max_columns', columns)
def load_raw_data(raw_data_path) -> pd.DataFrame:
# ritorna un pandas Dataframe dal file csv specificato dal percorso
set_dataset_display_properties()
#TODO CONTROLLARE SE IL PATH FINISCE CON CSV UTILIZZANDO SPLIT SUL PUNTO, IN CASO MANCHI AGGIUNGERLO
data = pd.read_csv(raw_data_path)
# shuffle the dataset
# data = data.sample(frac=1)
return data
def save_dataset(data: pd.DataFrame):
# Mostra una finestra di dialogo per salvare il dataset
root = tk.Tk()
canvas1 = tk.Canvas(root, width=300, height=300, bg='lightsteelblue2', relief='raised')
canvas1.pack()
def exportCSV():
export_file_path = filedialog.asksaveasfilename(defaultextension='.csv')
data.to_csv(export_file_path, index=None, header=True)
saveAsButton_CSV = tk.Button(text='Export CSV', command=exportCSV, bg='green', fg='white',
font=('helvetica', 12, 'bold'))
canvas1.create_window(150, 150, window=saveAsButton_CSV)
root.mainloop()
def show_data_info(data: pd.DataFrame):
# stampa diverse informazioni riguardanti il dataset
print("\n----------INIZIO DATASET---------- \n")
print("Formato: \n")
print(data.shape)
print("\nInfo e tipi: \n")
print(data.info())
print("\nDescrizione: \n")
print(data.describe())
print("\nPrime 5 righe: \n")
print(data.head())
print("\n----------FINE DATASET---------- \n")
def divide_features_target(data: pd.DataFrame):
# divide il dataset in 2 array, X=features e Y=label, Y deve essere l'ultima colonna
X = data.drop(data.columns[len(data.columns) - 1], axis=1).to_numpy()
Y = data[data.columns[len(data.columns) - 1]].to_numpy()
return X, Y
def one_hot_encoder(features, data: pd.DataFrame) -> pd.DataFrame:
# ritorna il dataset passato con le colonne selezionate in formato one hot
cat_dum = pd.get_dummies(data[features].astype(str))
data.drop(columns=["Sex", "Embarked", "Title"], inplace=True)
data = data.join(cat_dum)
return data
def discrete_label_encoder(features, data: pd.DataFrame) -> pd.DataFrame:
# ritorna il dataset passato con le colonne selezionate in formato label discrete
data = data.copy()
encoder = LabelEncoder()
for feature in features:
data[feature] = encoder.fit_transform(data[feature])
return data
class CustomLabelBinarizer(BaseEstimator, TransformerMixin):
def __init__(self, sparse_output=False):
self.sparse_output = sparse_output
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
enc = LabelBinarizer(sparse_output=self.sparse_output)
return enc.fit_transform(X)
def transformation_pipeline(numeric_features, categorical_features, data: pd.DataFrame) -> pd.DataFrame:
# ritorna una copia del dataframe come matrice numpy trasformato a seconda della pipeline
# di default usa standardscaler e one_hot_encoding, minmax scaler sarebbe preferibile se non ci fossero outliers
# TODO salvare inputer in modo da utilizzarlo identico sul test set
num_pipeline = Pipeline([
('selector', DataFrameSelector(numeric_features)),
('imputer', Imputer(strategy="median")),
('std_scaler', skl.preprocessing.StandardScaler()),
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(categorical_features)),
('label_binarizer', OneHotEncoder()),
])
full_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
("cat_pipeline", cat_pipeline),
])
# data_transformed = full_pipeline.fit_transform(data)
data_transformed = num_pipeline.fit_transform(data)
return data_transformed
# VISUALIZATION
def bar_plot(x_data, y_data, title="", x_label="", y_label=""):
# genera una nuova figura, da visualizzare poi a schermo con plt.show()
plt.figure(figsize=(10, 5))
ax = sns.barplot(x_data, y_data)
plt.title(title)
plt.xlabel(x_label, fontsize=12)
plt.ylabel(y_label, fontsize=12)
def correlation_heatmap(df):
# correlation heatmap of dataset
_, ax = plt.subplots(figsize=(10, 8))
colormap = sns.diverging_palette(220, 10, as_cmap=True)
_ = sns.heatmap(
df.corr(),
cmap=colormap,
square=True,
cbar_kws={'shrink': .9},
ax=ax,
annot=True,
linewidths=0.1, vmax=1.0, linecolor='white',
annot_kws={'fontsize': 12}
)
plt.title('Pearson Correlation of Features', y=1.05, size=15)
def scatter_plot_3d(x_data, y_data, z_data, class_label, title="", x_label="", y_label="", z_label=""):
# genera una nuova figura, da visualizzare poi a schermo con plt.show()
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
plt.colorbar(ax.scatter(x_data, y_data, z_data, c=class_label))
plt.title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
def scatter_plot_2d(x_data, y_data, class_label, title="", x_label="", y_label=""):
# genera una nuova figura, da visualizzare poi a schermo con plt.show()
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
plt.colorbar(ax.scatter(x_data, y_data, c=class_label))
plt.title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
# TRAINING MODELS
def report(results, n_top=3):
# Utility function to report best scores
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
def grid_search(param_grid, classifier, X_train, y_train, fold=5):
# effettua una cross validation del modello con grid search sulla lista dei parametri passata
# ritorna i migliori parametri e il miglior risultato, media su tutti i fold
# usa lo scoring di default del classificatore (accuracy)
grid_search_cv = GridSearchCV(classifier, param_grid, cv=fold, iid=False)
start = time.perf_counter()
grid_search_cv.fit(X_train, y_train)
print("Esecuzione terminata in: ", time.perf_counter() - start)
print("Migliori parametri:", grid_search_cv.best_params_)
print("Miglior modello:", grid_search_cv.best_estimator_)
cvres = grid_search_cv.cv_results_
print("Risultati: \n")
report(grid_search_cv.cv_results_)
return grid_search_cv.best_params_, grid_search_cv.best_score_
def random_search(param_distribution, num_iter, classifier, X_train, y_train, fold=5):
# effettua una cross validation del modello con random search sulla distribuzione dei parametri passata
# ritorna i migliori parametri e il miglior risultato, media su tutti i fold
# usa lo scoring di default del classificatore (accuracy)
random_search_cv = RandomizedSearchCV(classifier, param_distribution, n_iter=num_iter, cv=fold, iid=False)
start = time.perf_counter()
random_search_cv.fit(X_train, y_train)
print("Esecuzione terminata in: ", time.perf_counter() - start)
print("Migliori parametri:" + random_search_cv.best_params_)
print("Miglior modello:" + random_search_cv.best_estimator_)
cvres = random_search_cv.cv_results_
print("Risultati: \n")
report(random_search_cv.cv_results_)
return random_search_cv.best_params_, random_search_cv.best_score_
def display_scores(scores):
# Utility function to display cross validation scores
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
def cross_validation(classifier, X_train, y_train, fold=5):
# usa lo scoring di default del classificatore (accuracy)
# ritorna il valore medio della cross validation
scores = cross_val_score(classifier, X_train, y_train, cv=fold)
display_scores(scores)
return scores.mean()
def rf_feat_importance(clf, df) -> pd.DataFrame:
# ritorna un dataframe con feature importance di un classificatore che supporta questa funzionalità
# bisogna passare il dataframe contenente le features già preprocessato
return pd.DataFrame({'cols': df.columns,'imp' :
clf.feature_importances_}).sort_values('imp',ascending = False)
# RESULTS VISUALIZATION
def plot_roc_curve(true_positive, false_positive, label=None):
# fa comparire il grafico della roc_curve
# richiede fpr, tpr, thresholds = roc_curve(y_train_true, y_proba_scores)
plt.plot(true_positive, false_positive, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--', label="coin toss")
plt.axis([0, 1, 0, 1])
plt.legend(loc="lower right")
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
# fa comparire il grafico della precision vs recall con threshold della decision function (default 0)
# precisions, recalls, thresholds = precision_recall_curve(y_train_true, y_proba_scores)
plt.plot(thresholds, precisions[:-1], "b-", label="Precision")
plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
plt.axis([0, 1, 0, 1])
plt.xlabel("Threshold")
plt.legend(loc="upper left")
def plot_precision_recall(precision, recall):
# fa comparire il grafico della precision vs recall con threshold della decision function (default 0)
# precisions, recalls, thresholds = precision_recall_curve(y_train_true, y_proba_scores)
plt.plot(recall, precision, "b-", label="Precision")
plt.axis([0, 1, 0, 1])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.legend(loc="upper left")
# SALVARE E CARICARE MODELLI
def save_model(classifier, filename):
dump(classifier, filename + ".joblib")
def load_model(filename):
classifier = load(filename + ".joblib")
return classifier
"""
start_time = time.perf_counter()
width=400
columns=30
pd.set_option('display.width', width)
pd.set_option('display.max_columns', columns)
data=pd.read_csv("kc_house_data.csv")
print(data.shape)
#shuffle rows
data=data.sample(frac=1)
#tolgo id lat long
data=data.drop(columns=["id"])
#mantengo mese e anno di vendita
data['year'] = pd.DatetimeIndex(data['date']).year
data['month'] = pd.DatetimeIndex(data['date']).month
data=data.drop(columns=["date","lat","long"])
#set labels
luxury_value=750000
old=data["price"]
data["price"]=data["price"].apply(lambda x: 1 if x >= luxury_value else 0 )
data["zipcode"]=data["zipcode"].apply(lambda x: x-98000)
#plotting data
plot_data= data[data["price"]==1]
plot_data= plot_data.groupby("zipcode")["price"].count()
plot_data.sort_values()[-20:].plot(kind="bar")
plt.ylabel("number of luxury houses sold")
#divide feature columns from label column(price)
Y=data["price"].to_numpy()
#data=data.drop(columns=["price","sqft_lot","waterfront","yr_built","yr_renovated","zipcode","sqft_lot15","year","month"])
data=data.drop(columns="price")
X=data.to_numpy()
#normalizzo dati
min_max_scaler = preprocessing.StandardScaler()
X = min_max_scaler.fit_transform(X)
newdata=pd.DataFrame(X, columns=data.columns)
newdata.insert(0,"price",Y)
print(newdata.describe())
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.3)
#setting up classifiers
clf1=RandomForestClassifier(n_estimators=100)
clf2=svm.SVC(kernel="linear", gamma="scale")
clf3=svm.SVC(kernel="rbf", gamma="scale",probability=True)
clf4=tree.DecisionTreeClassifier()
clf5=MLPClassifier(hidden_layer_sizes=20, max_iter=1000)
clf6=KNeighborsClassifier(n_neighbors=5)
clf7=LogisticRegression(solver="lbfgs")
clf8= GaussianNB()
clf9=xgb.XGBClassifier(max_depth=10)
sizes=[0.9,0.7,0.5,0.3,0.1]
performance=list()
#provo tutti i modelli disponibili con cross validation, vedo l'accuratezza media per ogni modello
intro="mean clf"
for ntest in range(0,5):
timec = time.perf_counter()
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=sizes[ntest])
performance.append(cross_val_score(clf1, X_train, y_train, cv=10).mean())
print(intro,ntest,performance[ntest],time.perf_counter() - timec, "seconds")
timec = time.perf_counter()
print(intro,2,cross_val_score(clf2, X_train, y_train, cv=10).mean(),time.perf_counter() - timec, "seconds")
timec = time.perf_counter()
print(intro,3,cross_val_score(clf3, X_train, y_train, cv=10).mean(),time.perf_counter() - timec, "seconds")
timec = time.perf_counter()
print(intro,4,cross_val_score(clf4, X_train, y_train, cv=10).mean(),time.perf_counter() - timec, "seconds")
timec = time.perf_counter()
print(intro,5,cross_val_score(clf5, X_train, y_train, cv=10).mean(),time.perf_counter() - timec, "seconds")
timec = time.perf_counter()
print(intro,6,cross_val_score(clf6, X_train, y_train, cv=10).mean(),time.perf_counter() - timec, "seconds")
timec = time.perf_counter()
print(intro,7,cross_val_score(clf7, X_train, y_train, cv=10).mean(),time.perf_counter() - timec, "seconds")
timec = time.perf_counter()
print(intro,8,cross_val_score(clf8, X_train, y_train, cv=10).mean(),time.perf_counter() - timec, "seconds")
#timec = time.perf_counter()
#print(intro,9,cross_val_score(clf9, X_train, y_train, cv=10).mean(),time.perf_counter() - timec, "seconds")
timec = time.perf_counter()
clf1.fit(X_train,y_train)
predictions=clf1.predict(X_test)
#print(clf.feature_importances_)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
print(time.perf_counter() - timec, "seconds")
timec = time.perf_counter()
clf9.fit(X_train,y_train)
predictions=clf9.predict(X_test)
#print(clf.feature_importances_)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
print(time.perf_counter() - timec, "seconds")
#y_scores = cross_val_predict(clf3, X_train, y_train, cv=3, method="decision_function")
#print(confusion_matrix(y_train,y_scores))
#print(classification_report(y_train,y_scores))
#precisions, recalls, thresholds = precision_recall_curve(y_train, y_scores)
#print(y_scores)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
plt.xlabel("Threshold")
plt.legend(loc="upper left")
plt.ylim([0, 1])
#plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
#plt.show()
clf9.fit(X_train,y_train)
predictions=clf9.predict(X_test)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
predictions=clf9.predict(X_train)
print(confusion_matrix(y_train,predictions))
print(classification_report(y_train,predictions))
print("program terminated in:",time.perf_counter() - start_time, "seconds")
"""
| true |
0ffcadac74144fde57a6e442cb58cb412e192014 | Python | darraes/coding_questions | /v2/_data_structures_and_algorithms/segment_tree.py | UTF-8 | 2,598 | 3.078125 | 3 | [] | no_license | from typing import List
import math
class SegmentTree:
def __init__(self, arr: List[int], agg=lambda x, y: x + y):
x = math.ceil(math.log(len(arr), 2))
self.tree = [None] * (2 * (2 ** x - 1))
self.len = len(arr)
self.aggregator = agg
self._build(arr)
def _build(self, arr):
def build_helper(idx: int, s: int, e: int):
nonlocal arr
if s == e:
self.tree[idx] = arr[s]
return arr[s]
m = SegmentTree._mid(s, e)
self.tree[idx] = self.aggregator(
build_helper(2 * idx + 1, s, m), build_helper(2 * idx + 2, m + 1, e)
)
return self.tree[idx]
build_helper(0, 0, self.len - 1)
def segment(self, s: int, e: int):
def segment_helper(idx: int, qs: int, qe: int, cs: int, ce: int):
if cs >= qs and ce <= qe:
return self.tree[idx]
if ce < qs or cs > qe:
return 0
m = SegmentTree._mid(cs, ce)
return self.aggregator(
segment_helper(2 * idx + 1, qs, qe, cs, m),
segment_helper(2 * idx + 2, qs, qe, m + 1, ce),
)
return segment_helper(0, s, e, 0, self.len - 1)
def update(self, arr_i, diff):
def update_helper(idx, diff, i, cs, ce):
if i < cs or i > ce:
return
self.tree[idx] = self.aggregator(self.tree[idx], diff)
if cs != ce:
m = SegmentTree._mid(cs, ce)
update_helper(2 * idx + 1, diff, i, cs, m)
update_helper(2 * idx + 2, diff, i, m + 1, ce)
if not (0 <= arr_i < self.len):
raise "TODO Exception"
update_helper(0, diff, arr_i, 0, self.len - 1)
@staticmethod
def _mid(s: int, e: int) -> int:
return s + (e - s) // 2
#####################################################
import unittest
class TestFunctions(unittest.TestCase):
def test_mergesort(self):
stree = SegmentTree([1, 3, 5, 7, 9, 11])
self.assertEqual(15, stree.segment(1, 3))
self.assertEqual(24, stree.segment(1, 4))
self.assertEqual(35, stree.segment(1, 5))
self.assertEqual(12, stree.segment(2, 3))
stree.update(arr_i=2, diff=+1)
self.assertEqual(16, stree.segment(1, 3))
self.assertEqual(25, stree.segment(1, 4))
self.assertEqual(36, stree.segment(1, 5))
self.assertEqual(13, stree.segment(2, 3))
if __name__ == "__main__":
unittest.main(exit=False)
| true |
cce0fe9951c108eb6d71fb62438eb5fd97a3fe69 | Python | SunMyoungJun/emotion_extraction | /keyword_extraction.py | UTF-8 | 1,208 | 2.671875 | 3 | [] | no_license | import pandas as pd
from konlpy.tag import Kkma
from konlpy.tag import Twitter
from konlpy.tag import Okt
from nltk.tokenize.punkt import PunktSentenceTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
import numpy as np
import time
kkma = Kkma()
okt = Okt()
def main():
df = pd.read_excel(
'C:/SSAFY/semester2/특화PJT/ai/article.xlsx', engine='openpyxl')
df_stopwords = pd.read_excel(
'C:/SSAFY/semester2/특화PJT/ai/stop_words.xlsx', engine='openpyxl')
stopwords = df_stopwords['형태'].tolist()
contents = df['content']
sentences = get_sentences(contents[0])
nouns = get_nouns(sentences, stopwords)
print(nouns)
def get_sentences(text):
sentences = kkma.sentences(text)
return sentences
def get_nouns(sentences, stopwords):
nouns = []
for sentence in sentences:
if sentence is not '':
nouns.append(' '.join([noun for noun in okt.nouns(str(sentence))
if noun not in stopwords and len(noun) > 1]))
return nouns
if __name__ == '__main__':
main()
| true |
8616d3457b5da1c1e694139c6ebcdb8d16d9ff56 | Python | JiaolongYu/Share | /compute_tests.py | UTF-8 | 690 | 2.78125 | 3 | [] | no_license | from unittest import TestCase
import compute_highest_affinity
class StandAloneTests(TestCase):
def test_compute2(self):
a = "a"
b = "b"
c = "c"
d = "d"
e = "e"
f = "f"
g = "g"
h = "h"
A = "A"
B = "B"
C = "C"
D = "D"
E = "E"
F = "F"
G = "G"
H = "H"
site_list = [a,a,b,c,c,c,d,d,e,e,f,f,g]
user_list = [B,D,C,A,B,C,B,C,C,D,A,B,A]
time_list = range(0,13)
computed_result = compute_highest_affinity.highest_affinity(site_list, user_list, time_list)
expected_result1 = (c, d)
expected_result2 = (c, f)
if computed_result == expected_result1 or computed_result == expected_result2:
self.assertTrue(True)
else:
self.assertTrue(False) | true |
4a94cdea4fc10e21e108baff44accd8f5b709c38 | Python | Yasmojam/DoYouHaveTheGuts2019 | /src/collectable.py | UTF-8 | 1,317 | 2.609375 | 3 | [
"MIT"
] | permissive | from server import ObjectUpdate
from time import time
from typing import Tuple
Vector = Tuple[float, float]
COLLECTABLE_TYPES = set(["AmmoPickup", "HealthPickup", "Snitch"])
class Collectable:
def __init__(self, payload: ObjectUpdate) -> None:
self.name = payload.name
self.id = payload.id
self.type = payload.type
self.last_seen = time()
self.position = (payload.x, payload.y)
self.positions = [(payload.x, payload.y)]
self.payload_times = [time()]
def update(self, payload: ObjectUpdate) -> None:
self.last_seen = time()
self.position = (payload.x, payload.y)
self.positions = self.positions[-4:] + [(payload.x, payload.y)]
self.payload_times = self.payload_times[-4:] + [time()]
def current_pos(self) -> Tuple[float, float]:
return self.positions[-1]
def previous_pos(self) -> Tuple[float, float]:
return self.positions[-2] if len(self.positions) > 1 else None
def current_pos_time(self) -> float:
return self.payload_times[-1]
def previous_pos_time(self) -> float:
return self.payload_times[-2] if len(self.payload_times) > 1 else None
def time_since_last(self):
timesincelastseen = time() - self.last_seen
return timesincelastseen
| true |
c371d5104897f43e779c78d8515cc528a528bb42 | Python | Heyu-cmd/distrbutied_Crawl | /socket_server.py | UTF-8 | 2,207 | 2.9375 | 3 | [] | no_license | import socket
import sys
import threading
import signal
class ServerSocket:
def __init__(self, callback, host='localhost', port=20011):
"""
:param callback: callback function for handling received data
:param host: Symbolic name meaning all available interface
:param port: Arbitray non-privilaged port
"""
self.threads = []
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.callback = callback
# print("socket created")
# bind socket to local host and port
try:
self.s.bind((host,port))
except socket.error as msg:
print(" Bind failed. Error code : "+str(msg[0]) + " Message " + str(msg[1]))
sys.exit()
# print("socket bind complete")
# socket listening
self.s.listen(10)
print("socket now listening")
def start_new_thread(self,function, args):
t = threading.Thread(target=function, args=args)
self.threads.append(t)
t.setDaemon(True)
t.start()
def startlistening(self):
# now keep tackling with the client
while True:
# 阻塞,监听新的请求过来
conn, addr = self.s.accept()
# 创建新的线程来处理请求
self.start_new_thread(self.clientthread, (conn,))
def clientthread(self, conn):
"""
function for handling connection, this will be used to create thread
:param conn:
:return:
"""
data = conn.recv(1024).decode()
print("data from client ------"+ str(data))
reply = self.callback(data)
print('reply from server ------' + reply)
conn.sendall(reply.encode())
conn.close()
def start(self):
self.startlistening()
def close(self):
# self.s.shutdown(socket.SHUT_WR)
self.s.close()
def msg_received(data):
return 'Ack'
def exit_signal_handler(signal, frame):
pass
if __name__ == '__main__':
server = ServerSocket(msg_received)
server.start()
signal.signal(signal.SIGINT, exit_signal_handler)
signal.pause()
signal.close()
sys.exit(1)
| true |
0993777fe1de0442bb41f92a373d4a194778d47f | Python | python-diamond/Diamond | /src/collectors/scribe/scribe.py | UTF-8 | 2,069 | 2.546875 | 3 | [
"MIT"
] | permissive | # coding=utf-8
"""
Collect counters from scribe
#### Dependencies
* /usr/sbin/scribe_ctrl, distributed with scribe
"""
import subprocess
import string
import diamond.collector
class ScribeCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(ScribeCollector, self).get_default_config_help()
config_help.update({
'scribe_ctrl_bin': 'Path to scribe_ctrl binary',
'scribe_port': 'Scribe port',
})
return config_help
def get_default_config(self):
config = super(ScribeCollector, self).get_default_config()
config.update({
'path': 'scribe',
'scribe_ctrl_bin': self.find_binary('/usr/sbin/scribe_ctrl'),
'scribe_port': None,
})
return config
def key_to_metric(self, key):
"""Replace all non-letter characters with underscores"""
return ''.join(l if l in string.letters else '_' for l in key)
def get_scribe_ctrl_output(self):
cmd = [self.config['scribe_ctrl_bin'], 'counters']
if self.config['scribe_port'] is not None:
cmd.append(self.config['scribe_port'])
self.log.debug("Running command %r", cmd)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
self.log.exception("Unable to run %r", cmd)
return ""
stdout, stderr = p.communicate()
if p.wait() != 0:
self.log.warning("Command failed %r", cmd)
self.log.warning(stderr)
return stdout
def get_scribe_stats(self):
output = self.get_scribe_ctrl_output()
data = {}
for line in output.splitlines():
key, val = line.rsplit(':', 1)
metric = self.key_to_metric(key)
data[metric] = int(val)
return data.items()
def collect(self):
for stat, val in self.get_scribe_stats():
self.publish(stat, val)
| true |
30c5d6404997f9668c281c8373c832643c2addbd | Python | rjmcf/Bi-onic | /game/line.py | UTF-8 | 3,431 | 3.296875 | 3 | [] | no_license | import pyxel
from plugins.enum import Enum
from plugins.geometry import Point, Size
from plugins.sprite import Sprite, Anchor
from typing import Any
# Possible states for the line
class LineState(Enum):
STATE_NORMAL = 0
STATE_HIGH = 1
STATE_LOW = 2
# Represents the line drawn on the graph.
#TODO Unfinished: Allow changing of speed
#TODO Unfinished: Allow for jumps of more than 1 * width to not break the line.
class Line():
def __init__(self, game_state : Any) -> None:
self.line_state = LineState.STATE_NORMAL
self.game_state = game_state
# Record positions of segments as height above some "middle" value
self.current_height : float = 0
self.velocity : float = 0
def set_display(self, line_display : Any) -> None:
self.line_display = line_display
self.low_border = line_display.low_border
self.low_bound = line_display.low_bound
self.high_border = line_display.high_border
def reset(self) -> None:
self.current_height = 0
self.line_display.reset()
def update(self) -> None:
self.current_height += self.velocity
self.velocity = 0
# Change state depending on current height
# Remember y increases as you go down the screen
if self.current_height < -self.high_border:
self.line_state = LineState.STATE_HIGH
elif self.current_height > -self.low_bound:
self.game_state.kill_player()
elif self.current_height > -self.low_border:
self.line_state = LineState.STATE_LOW
else:
self.line_state = LineState.STATE_NORMAL
self.line_display.set_current_height(self.current_height)
# method called to affect the line's velocity, whether by player or environment
def add_velocity(self, velocity_adjustment : float) -> None:
self.velocity += velocity_adjustment
# Interface used by the controller to add velocity to the line
class LineInterface():
def __init__(self, line : Any) -> None:
self.line = line
def add_velocity(self, velocity_adjustment : float) -> None:
self.line.add_velocity(velocity_adjustment)
# Interface used to get the current state of the line
class LineStateInterface():
def __init__(self, line : Any) -> None:
self.line = line
def get_current_line_state(self) -> Any:
return self.line.line_state
# The visual representation of the Line
class LineDisplay():
def __init__(self, length : float, low_border : float, high_border : float, low_bound : float, high_bound : float, color : int, width : int = 0) -> None:
self.length = length
self.low_border = low_border
self.high_border = high_border
self.low_bound = low_bound
self.high_bound = high_bound
self.color = color
self.width = width
self.arrow_sprite = Sprite(Point(32,0), Size(7,8), 0, 0)
# Record segments to be drawn as the heights they should be drawn at
self.segments : list[float] = []
def reset(self) -> None:
self.segments = []
def set_current_height(self, current_height : int) -> None:
# Add to back
self.segments.append(current_height)
while len(self.segments) > self.length:
# Remove from front
self.segments.pop(0)
def draw(self, start : Any) -> None:
x = start.x
# Draw backwards from the starting point
for index in range(len(self.segments)-1, -1, -1):
if self.segments[index] < -self.high_bound:
if index == len(self.segments)-1:
self.arrow_sprite.draw(Point(x,start.y - self.high_bound), Anchor(Anchor.MIDDLE))
else:
pyxel.circ(x, start.y + self.segments[index], self.width, self.color)
x -= 1
| true |
396aa57f0af05a031ad902d83eda6980a37b5d79 | Python | krito1124/python200817 | /score.py | UTF-8 | 447 | 3.390625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 14:58:30 2020
@author: USER
"""
score = int(input("please enter your score:"))
if score >= 0 and score <=100:
if score >= 90:
print("level A")
elif score >= 80:
print("level B")
elif score >= 70:
print("level C")
elif score >= 60:
print("level d")
else:
print("level E")
else:
print("thats's wrong") | true |
e8070fa0f542db266948b6616badd6c03d11a300 | Python | StudyGroupPKU/fruit_team | /Scraping/Junho/project/old_n2_baidu_xinwen.py | UTF-8 | 1,391 | 2.578125 | 3 | [] | no_license | from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import datetime
import numpy
from time import sleep
#random.seed(datetime.datetime.now())
def naver(duration=3,time=3):
DIC = dict()
#html = "https://datalab.naver.com/keyword/realtimeList.naver?datetime=2018-04-04T18:20:00"
# html = "http://news.baidu.com/"
# HTML = urlopen(html)
# bsObj = BeautifulSoup(HTML, "html.parser")
#print(bsObj)
# TEST = bsObj.find("ul",{"class":"ah_l"}).findAll("span",{"class":"ah_k"})
i = 1
while i:
html = "http://news.baidu.com/"
HTML = urlopen(html)
bsObj = BeautifulSoup(HTML, "html.parser")
# TEST = bsObj.find("div",{"class":"hotnews"}).findAll("a",href=re.compile("^(http).*$"))
# TEST = bsObj.find("div",{"class":"hotnews"}).findAll("a",{"target":"_blank"})
TEST = bsObj.find("div",{"class":"hotnews"}).findAll("a",target="_blank")
print(str(i)+"th rotation!")
TIME = (datetime.datetime.now())
LIST = list()
for test in TEST:
# print(test.get_text())
LIST.append(test.get_text())
print(LIST)
DIC[TIME]=LIST
# print(DIC)
if(i > time):
break
i =i + 1
sleep(duration)
# print(DIC)
return DIC
def main():
dic =naver(3,2)
print(dic)
if __name__=="__main__":
main()
| true |
094829ee588b6db15beccbc890fcd0cffae738ab | Python | ingunnjv/IT3708 | /Project_1/src/test.py | UTF-8 | 445 | 2.921875 | 3 | [] | no_license | import numpy as np
from timeit import default_timer as timer
def numpy_read(N, A):
for i in range(0,N):
b = A[55,55]
def list_read(N, B):
for i in range(0,N):
b = A[55][55]
N = 1000000
A = np.zeros((1000,1000))
B = [[0] * 1000] * 1000
start = timer()
numpy_read(N, A)
end = timer()
print("numpy_read time: %f" % (end-start))
start = timer()
list_read(N, B)
end = timer()
print("list_read time: %f" % (end-start)) | true |
4adfdf24d2e97385a21ebbc77b4f608b7c4b0c41 | Python | zerotk/zops.anatomy | /zops/anatomy/layers/_tests/test_tree.py | UTF-8 | 3,632 | 3.03125 | 3 | [] | no_license | import pytest
from zops.anatomy.assertions import assert_file_contents
from zops.anatomy.layers.tree import AnatomyFile, AnatomyTree, merge_dict
import os
def test_anatomy_file(datadir):
# Prepare
f = AnatomyFile(
"gitignore",
"""
a
b
""",
)
# Execute
f.apply(datadir, variables={})
# Check
assert_file_contents(
datadir + "/gitignore",
"""
a
b
""",
)
def test_anatomy_file_executable(datadir):
# Prepare
f = AnatomyFile(
"gitignore",
"""
a
b
""",
executable=True,
)
# Execute
f.apply(datadir, variables={})
# Check
assert_file_contents(
datadir + "/gitignore",
"""
a
b
""",
)
assert os.access(datadir + "/gitignore", os.X_OK)
def test_anatomy_file_with_filenames_using_variables(datadir):
f = AnatomyFile("{{filename}}", "This is alpha.")
f.apply(datadir, variables={"filename": "alpha.txt"})
assert_file_contents(
datadir + "/alpha.txt",
"""
This is alpha.
""",
)
def test_anatomy_file_replace_filename_with_variable(datadir):
f = AnatomyFile("alpha.txt", "This is alpha.")
f.apply(datadir, variables={}, filename="zulu.txt")
assert not os.path.isfile(datadir + "/alpha.txt")
assert_file_contents(
datadir + "/zulu.txt",
"""
This is alpha.
""",
)
def test_anatomy_tree(datadir):
# Prepare
tree = AnatomyTree()
tree.create_file(
".gitignore", "line 1\n{% for i in gitignore.blocks %}{{ i }}{% endfor %}\n"
)
tree.add_variables(dict(gitignore=dict(blocks=["line 2"])), left_join=False)
# Execute
tree.apply(datadir)
# Check
assert_file_contents(
datadir + "/.gitignore",
"""
line 1
line 2
""",
)
def test_anatomy_tree_with_variables(datadir):
# Prepare
tree = AnatomyTree()
tree.create_file("alpha.txt", "This is {{ name }}.")
# Without defined variables
with pytest.raises(RuntimeError):
tree.apply(datadir)
# With defined variables
tree.add_variables({"name": "ALPHA"}, left_join=False)
tree.apply(datadir)
assert_file_contents(
datadir + "/alpha.txt",
"""
This is ALPHA.
""",
)
def test_merge_dict():
# Error if the right dict has keys that doesn't exist on the left dict...
# ... in the first level
with pytest.raises(RuntimeError):
merge_dict(dict(a=1), dict(z=9), left_join=True)
# ... in the second level
with pytest.raises(RuntimeError):
merge_dict(dict(a=dict(a=1)), dict(a=dict(z=9)), left_join=True)
# ... in the third level we ignore this differences.
assert merge_dict(dict(a=dict(a=dict(a=1))), dict(a=dict(a=dict(z=9)))) == dict(
a=dict(a=dict(a=1, z=9))
)
# With left_join=False we ignore keys on the right dict that doesn't exist on the left dict.
assert merge_dict(dict(a=1), dict(b=2), left_join=False) == dict(a=1, b=2)
assert merge_dict({"a": 1}, {"a": 2}) == {"a": 2}
assert merge_dict({"a": [1]}, {"a": [2]}) == {"a": [1, 2]}
assert merge_dict({"a": {"aa": [1]}}, {"a": {"aa": [2]}}) == {"a": {"aa": [1, 2]}}
assert merge_dict({"a": {"aa": [1]}}, {"a": {"aa": [2]}}) == {"a": {"aa": [1, 2]}}
assert merge_dict(
{"PROJECT": {"code_name": "alpha"}}, {"PROJECT": {"code_name": "zulu"}}
) == {"PROJECT": {"code_name": "zulu"}}
| true |