blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4df40b1c4c502a81fff75481c25ef334b23d53aa | Python | tusharkmandal/General | /split.py | UTF-8 | 239 | 2.796875 | 3 | [] | no_license | def email_func(valstr):
if '@' in valstr:
for mail in valstr.split(" "):
if '@' in mail:
return mail
else:
return False
email_func("this is first abhinav@cloudxlab.com and this is second sandeep@cloudxlab.com")
| true |
52a566ccd01ad45059feed63a2c9706f7c5c8eb0 | Python | robinandeer/pyNoise | /src/playwmsa.py | UTF-8 | 2,544 | 2.890625 | 3 | [] | no_license | #! /usr/bin/env python
import sys
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import AlignIO
def findIndels(seq):
# Find all occurences of indels
if seq.find('-') != -1:
return True
else:
return False
return pos
def checkAas(seq,rows):
""" Checks if the sequences has more than 50% unique AA:s. """
uniques = 0
for i in range(rows):
# If more than half are unique return True
if uniques >= rows/2:
return True
# Check each aa
char = seq[i]
count = seq.count(char)
if count == 1:
uniques += 1
return False
def buildSeqCols(alignment,rows, aas):
""" Takes a multialignment, tansposes to columns and returns them. """
multSeq = []
for i in range(rows):
multSeq += [list(alignment[i].seq)]
# Transpose to cols
multSeqCols = zip(*multSeq)
return multSeqCols
def main():
# Open the .phy-file from convert2phy.py
path = '../data/outfile.phy'
# Create handles for the files to be read and written to
handle = open(path, 'r')
# Parse the file
alignment = AlignIO.read(handle, "phylip")
# No of sequences
rows = len(alignment)
# No of aas
aas = len(alignment[1].seq)
# Transposes to cols
multSeqCols = buildSeqCols(alignment, rows, aas)
# Noise reduction step, for every residue position
for i in range(aas-1, -1, -1):
if findIndels(str(multSeqCols[i])) or checkAas(multSeqCols[i], rows):
multSeqCols.pop(i)
# Transpose back to sequence rows
multSeq = zip(*multSeqCols)
# Build output string in fasta format
consensusStr = ''
for i in range(rows):
consensusStr += '>' + alignment[i].id + '\n' + ''.join(multSeq[i]) + '\n'
# Create handle for the output file
handleOut = open('../data/infile.fa', 'w')
# Write to the output file in phylips format
handleOut.write(consensusStr)
# Close the handles
handle.close()
handleOut.close()
## tempalignment = []
## for c in range(aas):
## tempCol = ''
## for r in range(rows):
##
## tempCol += alignment[r].seq[c]
##
## if not findIndels(tempCol):
## if not checkAas(tempCol, rows):
## for i in range(rows):
## tempalignment += transpose(tempCol)
## print tempCol
## else:
## print 'Removed'
if __name__ == '__main__':
main()
| true |
6ef96ec2b89200849c4bbab0fa58c7c9f93443ac | Python | MO105/DreamTeam | /Data_mining/Subcellular_location/Subcellular location mining.py | UTF-8 | 5,117 | 3.078125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 17:44:36 2020
@author: sheri
"""
#Import packages
import csv
import pandas as pd
import requests
import re
import numpy as np
#-----------------------------------------------------------------------------------------------------------------#
#Read the kinase list csv file into a dataframe using pandas (pd)
df = pd.read_csv("clean_human_kinase.csv", index_col=0)
#create an object that contains everything under the Uniprot Number
identifier = list(df.uniprot_number)
#create a object containing the gene names
geneName=list(df.gene_name)
#Need to produce 2 URLs for each protein, that contains the unique kinase
#identifier number as well as the subcellular location from the database
#As there are sometimes 2 pages of results, need to make sure there is a URL
#for page 1, and a URL for results on page 2
url1= 'https://www.ebi.ac.uk/QuickGO/services/annotation/search?includeFields=goName&geneProductId='
url2= '&aspect=cellular_component&limit=100&page=1'
url3= '&aspect=cellular_component&limit=100&page=2'
#Create empty list that will contain subcellular location information
#from quickGO from page 1 of results
quickGODataList=[]
#Create empty list that will contain the subcellular location
#information from quickGO
quickGODataList2=[]
#create an empty list for any protein names that may not be found using quickGO
errorList=[]
#for each kinase identifier, if no error is produced, merge the 2 urls for
#page 1 results, separated by the unique kinase identifier name then merge the
#2 urls for page 2 results
#append the information for each kinase to the lists
for i in identifier:
try:
url4=url1+i+url2
url5=url1+i+url3
quickGODataList.append(requests.get(url4).text)
quickGODataList2.append(requests.get(url5).text)
except:
errorList.append(i)
#Make 2 empty lists, 1 for the list created from the results of searching the
#UniprotDataList using regex1, the other empty list is for the results of
#searching the results from regex1 using regex2
kinaseInfoList=[]
kinaseInfoList2=[]
#Make a regex that will location the cellular component information from the quickGO results
regex1=re.compile(r'"goName":"[A-Za-z]*\,*\s*\-*[a-z]*\,*\:*\-*\s*[A-Za-z]*\<*\-*\s*[A-Za-z]*\,*\s*[A-Za-z]*\-*\,*\s*[A-Za-z]*"')
#For the list from each page, earch the quickGODataList using regex1, append results to KinaseInfoList
for value in quickGODataList:
kinaseInfoList.append(regex1.findall(value))
for value in quickGODataList2:
kinaseInfoList2.append(regex1.findall(value))
#create two lists that will contain the strings after unnecessary characters are removed
splitList=[]
splitList2=[]
#For each value in kinaseInfoList, remove extra characters that are not needed
for i in kinaseInfoList:
i=str(i)
splitList.append(i.replace('"','').replace('goName','').replace("[]", "").\
replace("]","").replace("[","").replace("u':","").\
replace("'","").replace(":",""))
#For each value in kinaseInfoList, remove extra characters that are not needed
for j in kinaseInfoList2:
j=str(j)
splitList2.append(j.replace('"','').replace('goName','').replace("[]", "")\
.replace("]","").replace("[","").replace("u':","").\
replace("'","").replace(":",""))
#Make a dictionary containing the gene name, Uniprot Kinase Number identifier
#information and subcellular location information from each page of results
kinaseDict= {'Gene Name':geneName, 'Uniprot Number':identifier,
'Subcellular Location1':splitList,
"Subcellular Location2":splitList2}
#Use pandas to make a dataframe from kinaseDict
df=pd.DataFrame(kinaseDict)
#Replace empty strings 'NaN' with 0
df = df.replace(np.nan, 0)
df['Subcellular Location']=df['Subcellular Location1'].astype(str)+','+ \
df['Subcellular Location2'].astype(str)
#Delete the columns for page 1 and 2
del df['Subcellular Location1']
del df['Subcellular Location2']
#Separate the list within cells of the dataframe so that each subcellular
#location is a separate row
new_df=(df.set_index(['Gene Name','Uniprot Number'])
.stack()
.str.split(',', expand=True)
.stack()
.unstack(-2)
.reset_index(-1, drop=True)
.reset_index()
)
#Remove any whitespace in the subcellular location column
new_df['Subcellular Location']=new_df['Subcellular Location'].str.strip()
#Tidy subcellular locations column by making all start of words capital letters
new_df['Subcellular Location']=new_df['Subcellular Location'].str.title()
#Drop rows where there are duplications in data
new_df2=new_df.drop_duplicates()
#Remove all rows where there are empty values in the Subcellular location column
final_df = new_df2[new_df2['Subcellular Location'] != '']
#Reindex dataframe
final_df.reset_index()
#Save the dataframe to a csv file
final_df.to_csv('Subcellular_location.csv') | true |
d57dd2d2e36e279ece77b2c0e36587188107245f | Python | dhaval1212/Optical-Character-Recognition | /loadingModel.py | UTF-8 | 934 | 2.96875 | 3 | [] | no_license | import numpy as np
import tensorflow as tf
from keras.preprocessing import image
import cv2
import matplotlib.pyplot as plt
CATEGORIES = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
test_image = cv2.imread('myTestImages/s65.jpg',cv2.IMREAD_GRAYSCALE)
test_image = cv2.resize(test_image, (28,28))
"""for i in range(len(test_image)):
for j in range(len(test_image[0])):
test_image[i][j] = 255 - test_image[i][j] """
#plt.imshow(test_image)
test_image = np.expand_dims(test_image, axis = 0)
test_image = np.expand_dims(test_image, axis = 3)
#print(test_image.shape)
#loading saved model
new_model = tf.keras.models.load_model('Thresholded_Blurred_Trained')
predictions = new_model.predict(np.array(test_image))
print("Predicted character is :",end = " ")
print(CATEGORIES[np.argmax(predictions[0])])
img = cv2.resize(test_image[0], (28,28))
plt.imshow(img)
| true |
ca0870ad779aeb3ee5e743b90f27134125c83d72 | Python | usnistgov/mosaic | /mosaic/trajio/binTrajIO.py | UTF-8 | 8,195 | 2.796875 | 3 | [
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # -*- coding: utf-8 -*-
"""
Binary file implementation of metaTrajIO. Read raw binary files with specified record sizes
:Created: 4/22/2013
:Author: Arvind Balijepalli <arvind.balijepalli@nist.gov>
:License: See LICENSE.TXT
:ChangeLog:
.. line-block::
9/13/15 AB Updated logging to use mosaicLogFormat class
3/28/15 AB Updated file read code to match new metaTrajIO API.
1/27/15 AB Memory map files on read.
1/26/15 AB Refactored code to read interleaved binary data.
7/27/14 AB Update interface to specify python PythonStructCode instead of
RecordSize. This will allow any binary file to be decoded
The AmplifierScale and AmplifierOffset are set to 1 and 0
respectively if PythonStructCode is an integer or short.
4/22/13 AB Initial version
"""
import struct
import mosaic.trajio.metaTrajIO as metaTrajIO
import mosaic.utilities.mosaicLogging as mlog
from mosaic.utilities.util import eval_
import numpy as np
__all__ = ["binTrajIO", "InvalidDataColumnError"]
class InvalidDataColumnError(Exception):
pass
class binTrajIO(metaTrajIO.metaTrajIO):
"""
Read a file that contains interleaved binary data, ordered by column. Only a single
column that holds ionic current data is read. The current in pA
is returned after scaling by the amplifier scale factor (``AmplifierScale``) and
removing any offsets (``AmplifierOffset``) if provided.
:Usage and Assumptions:
Binary data is interleaved by column. For three columns (*a*, *b*, and *c*) and *N* rows,
binary data is assumed to be of the form:
[ a_1, b_1, c_1, a_2, b_2, c_2, ... ... ..., a_N, b_N, c_N ]
The column layout is specified with the ``ColumnTypes`` parameter, which accepts a list of tuples.
For the example above, if column **a** is the ionic current in a 64-bit floating point format,
column **b** is the ionic current representation in 16-bit integer format and column **c** is
an index in 16-bit integer format, the ``ColumnTypes`` paramter is a list with three
tuples, one for each column, as shown below:
[('curr_pA', 'float64'), ('AD_V', 'int16'), ('index', 'int16')]
The first element of each tuple is an arbitrary text label and the second element is
a valid `Numpy type <http://docs.scipy.org/doc/numpy/user/basics.types.html>`_.
Finally, the ``IonicCurrentColumn`` parameter holds the name (text label defined above) of the
column that holds the ionic current time-series. Note that if an integer column is selected,
the ``AmplifierScale`` and ``AmplifierOffset`` parameters can be used to convert the voltage from
the A/D to a current.
Assuming that we use a floating point representation of the ionic current, and
a sampling rate of 50 kHz, a settings section that will read the binary file format
defined above is:
.. code-block:: javascript
"binTrajIO": {
"AmplifierScale" : "1",
"AmplifierOffset" : "0",
"SamplingFrequency" : "50000",
"ColumnTypes" : "[('curr_pA', 'float64'), ('AD_V', 'int16'), ('index', 'int16')]",
"IonicCurrentColumn" : "curr_pA",
"dcOffset": "0.0",
"filter": "*.bin",
"start": "0.0",
"HeaderOffset": 0
}
:Settings Examples:
Read 16-bit signed integers (big endian) with a 512 byte header offset. Set the amplifier scale to 400 pA, sampling rate to 200 kHz.
.. code-block:: javascript
"binTrajIO": {
"AmplifierOffset": "0.0",
"SamplingFrequency": 200000,
"AmplifierScale": "400./2**16",
"ColumnTypes": "[('curr_pA', '>i2')]",
"dcOffset": 0.0,
"filter": "*.dat",
"start": 0.0,
"HeaderOffset": 512,
"IonicCurrentColumn": "curr_pA"
}
Read a two-column file: 64-bit floating point and 64-bit integers, and no header offset. Set the amplifier scale to 1 and sampling rate to 200 kHz.
.. code-block:: javascript
"binTrajIO": {
"AmplifierOffset": "0.0",
"SamplingFrequency": 200000,
"AmplifierScale": "1.0",
"ColumnTypes" : "[('curr_pA', 'float64'), ('AD_V', 'int64')]",
"dcOffset": 0.0,
"filter": "*.bin",
"start": 0.0,
"HeaderOffset": 0,
"IonicCurrentColumn": "curr_pA"
}
:Parameters:
In addition to :class:`~mosaic.metaTrajIO.metaTrajIO` args,
- `AmplifierScale` : Full scale of amplifier (pA/2^nbits) that varies with the gain (default: 1.0).
- `AmplifierOffset` : Current offset in the recorded data in pA (default: 0.0).
- `SamplingFrequency` : Sampling rate of data in the file in Hz.
- `HeaderOffset` : Ignore first *n* bytes of the file for header (default: 0 bytes).
- `ColumnTypes` : A list of tuples with column names and types (see `Numpy types <http://docs.scipy.org/doc/numpy/user/basics.types.html>`_). Note only integer and floating point numbers are supported.
- `IonicCurrentColumn` : Column name that holds ionic current data.
:Returns:
None
:Errors:
None
"""
def _init(self, **kwargs):
if not hasattr(self, 'SamplingFrequency'):
raise metaTrajIO.InsufficientArgumentsError("{0} requires the sampling rate in Hz to be defined.".format(type(self).__name__))
if not hasattr(self, 'ColumnTypes'):
raise metaTrajIO.InsufficientArgumentsError("{0} requires the column types to be defined.".format(type(self).__name__))
else:
if type(self.ColumnTypes) is str or type(self.ColumnTypes) is str:
self.ColumnTypes=eval(str(self.ColumnTypes))
if not hasattr(self, 'IonicCurrentColumn'):
raise metaTrajIO.InsufficientArgumentsError("{0} requires the ionic current column to be defined.".format(type(self).__name__))
if not hasattr(self, 'HeaderOffset'):
self.HeaderOffset=0
else:
self.HeaderOffset=int(eval(str(self.HeaderOffset)))
try:
self.IonicCurrentType=dict(self.ColumnTypes)[self.IonicCurrentColumn]
except KeyError as err:
self.IonicCurrentColumn=self.ColumnTypes[0][0]
self.IonicCurrentType=self.ColumnTypes[0][1]
logging.warning("WARNING: IonicCurrentColumn {0} not found. Defaulting to {1}.".format(err, self.IonicCurrentColumn))
if not hasattr(self, 'AmplifierScale'):
self.AmplifierScale=1.0
else:
self.AmplifierScale=eval(str(self.AmplifierScale))
if not hasattr(self, 'AmplifierOffset'):
self.AmplifierOffset=0.0
else:
self.AmplifierOffset=float(eval(str(self.AmplifierOffset)))
# additional meta data
self.fileFormat='bin'
# set the sampling frequency in Hz.
if not hasattr(self, 'Fs'):
self.Fs=self.SamplingFrequency
else:
self.Fs=int(eval(str(self.Fs)))
self.binLogger=mlog.mosaicLogging().getLogger(name=__name__)
def readdata(self, fname):
"""
Return raw data from a single data file. Set a class
attribute Fs with the sampling frequency in Hz.
:Parameters:
- `fname` : fileame to read
:Returns:
- An array object that holds raw (unscaled) data from `fname`
:Errors:
None
"""
return self.readBinaryFile(fname)
def _formatsettings(self):
"""
Log settings strings
"""
self.binLogger.info( '\t\tAmplifier scale = {0} pA'.format(self.AmplifierScale) )
self.binLogger.info( '\t\tAmplifier offset = {0} pA'.format(self.AmplifierOffset) )
self.binLogger.info( '\t\tHeader offset = {0} bytes'.format(self.HeaderOffset) )
self.binLogger.info( '\t\tData type = \'{0}\''.format(self.IonicCurrentType) )
def readBinaryFile(self, fname):
return np.memmap(fname, dtype=self.ColumnTypes, mode='r', offset=self.HeaderOffset)[self.IonicCurrentColumn]
def scaleData(self, data):
"""
See :func:`mosaic.metaTrajIO.metaTrajIO.scaleData`.
"""
return np.array(data*self.AmplifierScale-self.AmplifierOffset, dtype=np.float64)
if __name__ == '__main__':
from mosaic.utilities.resource_path import resource_path
import os
b=binTrajIO(
fnames=['data/SingleChan-0001_1.bin'],
dcOffset=0,
start=0,
ColumnTypes=[('curr', 'float64')],
IonicCurrentColumn='curr',
HeaderOffset=0,
SamplingFrequency=500000
)
for i in range(100):
d=b.popdata(10000)
print(len(d), d[0], d[-1], np.mean(d), os.path.basename(b.LastFileProcessed), b.ElapsedTimeSeconds)
| true |
34d92e937d32dfc948cc634f19be3bb06c7a3874 | Python | srikanth000/iLID | /preprocessing/test/test_resampling.py | UTF-8 | 1,153 | 2.625 | 3 | [
"MIT"
] | permissive | from audio import resample
import numpy as np
import unittest
import math
import scipy.io.wavfile as wav
class ResamplingTest(unittest.TestCase):
def test_downsampling(self):
samplerate = 22100
length = np.random.randint(0,5) + round(np.random.random(), 2)
num_samples = samplerate * length
signal = np.random.randint(0,256, num_samples)
target_samplerate = 16000
target_num_samples = math.ceil(target_samplerate * length)
resampled_signal, resampled_samplerate = resample.downsample(signal, samplerate, target_samplerate)
self.assertEqual(resampled_samplerate, target_samplerate)
#print "%d == %d?" %(target_num_samples, len(resampled_signal))
#self.assertEqual(len(resampled_signal), target_num_samples)
def test_upsampling(self):
samplerate = 8000
length = np.random.randint(0,5) + round(np.random.random(), 3)
num_samples = samplerate * length
signal = np.random.randint(0,256, num_samples)
target_samplerate = 16000
target_num_samples = target_samplerate * length
self.assertRaises(ValueError, resample.downsample, signal, samplerate, target_samplerate)
| true |
ea157d24f746a6746b62bf42ed47c7aad66324b4 | Python | calizzim/technical-interview-practice | /odd-even-jump/test.py | UTF-8 | 294 | 3 | 3 | [] | no_license | import bisect
class NumI:
def __init__(self,val,index):
self.val = val
self.index = index
def __lt__(self, other):
return self.val < other.val
def __str__(self):
return str([self.val,self.index])
l = [1,2,3,4,5]
m = l[0:2]
print(m)
l[0] = 2
print(m) | true |
8a6667901a27a4d566a143cf7e70a1a8d0bbfe0a | Python | Teodorneishan/SoftUniLatest | /fishing_boat.py | UTF-8 | 649 | 3.890625 | 4 | [] | no_license | budget = int(input("Enter budget:"))
season = input("Season:")
fishermen = int(input("Fishermen:"))
if season == "Spring":
price=3000
elif season == "Summer":
price=4200
elif season == "Autumn":
price=4200
elif season == "Winter":
price=2600
if fishermen <=6:
price=price*0.9
elif 6 < fishermen <= 11:
price=price*0.85
elif fishermen > 12:
price=price*0.75
if fishermen % 2 == 0:
if season=="Spring" or season=="Summer" or season=="Winter":
price=price*0.95
if budget >= price:
print(f"Yes! You have {budget-price:.2f}leva left.")
else:
print(f"Not enough money! You need {price-budget:.2f}leva.") | true |
a55fb8e55f4bcedb57707ca12a19e3ddbe4ad4cd | Python | moussaifi/Web-Developpement-App-Muimui | /code/src/model/person_detect.py | UTF-8 | 2,841 | 2.546875 | 3 | [] | no_license | from imutils.object_detection import non_max_suppression
import numpy as np
import imutils
import cv2
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
BLUR = 21
CANNY_THRESH_1 = 10
CANNY_THRESH_2 = 200
MASK_DILATE_ITER = 10
MASK_ERODE_ITER = 10
MASK_COLOR = (1.0,1.0,1.0) # In BGR format
def HogDescriptor(image):
image = imutils.resize(image, width=min(400, image.shape[1]))
orig = image.copy()
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
return image, pick
def crop(im, r, c, height, width):
return im[r:r + height, c:c + width]
def im_squared(im, col=[255, 255, 255]):
"""makes the image square"""
v, h = im.shape[0], im.shape[1]
diff = abs(h - v)
pad = int(diff / 2)
if v > h:
return cv2.copyMakeBorder(im, 0, 0, pad, pad,
cv2.BORDER_CONSTANT, value=col)
else:
return cv2.copyMakeBorder(im, pad, pad, 0, 0,
cv2.BORDER_CONSTANT, value=col)
def get_foreground(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
edges = cv2.dilate(edges, None)
edges = cv2.erode(edges, None)
contour_info = []
contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for c in contours:
contour_info.append((
c,
cv2.isContourConvex(c),
cv2.contourArea(c),
))
contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
max_contour = contour_info[0]
mask = np.zeros(edges.shape)
cv2.fillConvexPoly(mask, max_contour[0], (255));
mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
mask_stack = np.dstack([mask]*3)
mask_stack = mask_stack.astype('float32') / 255.0
img = img.astype('float32') / 255.0
masked = (mask_stack * img) + ((1-mask_stack) * MASK_COLOR)
masked = (masked * 255).astype('uint8')
return masked
def get_person(img):
imr, bbs = HogDescriptor(img)
try:
bbs = [[bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1]]
for bb in bbs]
bbs = sorted(bbs, key=lambda x: x[2] * x[3],
reverse=True)
bb = bbs[0]
if max(bb[2], bb[3]) < 200:
return img
imc = crop(imr, bb[1], bb[0], bb[3], bb[2])
masked = get_foreground(imc)
ims = im_squared(masked)
return ims
except:
return img
| true |
20d9621706d65387abcd8232330cc731893f68a4 | Python | eianlee1124/daily-practice | /programmers/튜플.py | UTF-8 | 318 | 2.953125 | 3 | [] | no_license | import re
from collections import Counter
def solution(s):
s = Counter(re.findall('\d+', s))
return list(map(int, [k for k, _ in sorted(s.items(), key=lambda x: x[1], reverse=True)]))
if __name__ == "__main__":
print(solution("{{2},{2,1},{2,1,3},{2,1,3,4}}"))
print(solution("{{20,111},{111}}")) | true |
cbdfed5b9035488755ef106318849538d5794f0e | Python | YuriiPaziuk/leetcode | /string/242. Valid Anagram.py | UTF-8 | 1,680 | 3.984375 | 4 | [] | no_license | """
Given two strings s and t, write a function to determine if t is an anagram of s.
For example,
s = "anagram", t = "nagaram", return true.
s = "rat", t = "car", return false.
Note:
You may assume the string contains only lowercase alphabets.
Follow up:
What if the inputs contain unicode characters? How would you adapt your solution to such case?
"""
class Solution:
def isAnagram1(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
# 112 ms
return sorted(s) == sorted(t)
def isAnagram2(self, s, t):
# 99 ms
d1, d2 = {}, {}
for char in s:
d1[char] = d1.get(char, 0) + 1
for char in t:
d2[char] = d2.get(char, 0) + 1
return d1 == d2
def isAnagram3(self, s, t):
# 106 ms
d1, d2 = [0] * 26, [0] * 26
for char in s:
d1[ord(char) - ord('a')] += 1
for char in t:
d2[ord(char) - ord('a')] += 1
return d1 == d2
def isAnagram3(self, s, t):
# 69 ms
from collections import Counter
return Counter(s) == Counter(t)
def isAnagram4(self, s, t):
# 62 ms
letters = "EARIOTNSLCUDPMHGBFYWKVXZJQ"
for letter in letters:
if s.count(letter) != t.count(letter): return False
return True
def isAnagram5(self, s, t):
# 52 ms
letters = set(s + t)
for letter in letters:
if s.count(letter) != t.count(letter): return False
return True
def main():
pass
if __name__ == '__main__':
main()
| true |
01b84260b156c789cb2f9f86ea72dd382dc6e291 | Python | leoray317/CFNN | /hw3-mainprogram.py | UTF-8 | 3,653 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 12:00:51 2019
@author: steve
"""
#import the modules
import numpy as np
import math
import os
import matplotlib.pyplot as plt
#define the functions
# f2 is the function that generate forecasting data according to the variable that contain random numbers which is data_in and data_real_out
def f2(p):
n=p.shape[0]
y=[];y=([[]*1 for i in range(0,500)])
for j in range(0,n):
y[j]=math.cos(p[j,0])*math.sin(p[j,1])
y=np.asarray(y)
return y
#np_R2 is the coefficient correlation function which can calculate the correlation of the forecasting data and the real output data
def np_R2(output,output_pred):
return np.square(np.corrcoef(output.reshape(np.size(output)),output_pred.reshape(np.size(output_pred)), False)[1,0])
#np_RMSE is the root mean square error function which can calculate the error between the forecasting data and the real output data
def np_RMSE(output,output_pred):
rmse=0
for i in range(0,len(output)):
rmse=rmse+np.square(output[i]-output_pred[i])
rmse=np.sqrt(rmse/len(output))
return rmse
#generate input data and output data
data_in= np.random.rand(500,2)*4-2
data_real_out =f2(data_in)
#setting the index of the training, validation and testing
train_index=np.asarray(range(0,300))
val_index=np.asarray(range(300,400))
test_index=np.asarray(range(400,500))
#split data into train,validate and testing
train_in= data_in[train_index,:]
train_out= data_real_out[train_index]
val_in= data_in[val_index,:]
val_out= data_real_out[val_index]
test_in= data_in[test_index,:]
test_out= data_real_out[test_index]
train_out=np.reshape(train_out,(-1,1));val_out=np.reshape(val_out,(-1,1));test_out=np.reshape(test_out,(-1,1))
#Please reset your directory to the location of the CFNN.py folder
os.chdir('C:/Users/user/Desktop')
#CFNN is the class that contain CFNN network. In this CFNN network contains 3 functions which is newCPN, evalGaussCPN, evalTriCPN
from CFNN import CFNN_network as CFNN
delta=0.1;beta=0.5;alpha=1;
Wi,Wp=CFNN.newCPN(train_in,train_out,delta=delta,nseed=math.ceil(len(train_in)/50),beta=beta,alpha1=alpha)
#if use Gauss remove the annotation symbol "#"
#Yh_train=CFNN.evalGaussCPN(Wi,Wp,delta=delta,P=train_in) #Gauss function
Yh_train=CFNN.evalTriCPN(Wi,Wp,delta=delta,P=train_in) #triangulate function
#Yh_val=CFNN.evalGaussCPN(Wi,Wp,delta=delta,P=val_in) #Gauss function
Yh_val=CFNN.evalTriCPN(Wi,Wp,delta=delta,P=val_in) #triangulate function
#Yh_test=CFNN.evalGaussCPN(Wi,Wp,delta=delta,P=test_in) #Gauss function
Yh_test=CFNN.evalTriCPN(Wi,Wp,delta=delta,P=test_in) #triangulate function
#calculate the error between the forecasting data and the real output
train_error= train_out-Yh_train
val_error= val_out-Yh_val
test_error= test_out-Yh_test
#plot the results
#training
plt.subplot(2,1,1)
plt.plot(train_out)
plt.plot(Yh_train,'ro')
plt.ylabel('training')
plt.subplot(2,1,2)
plt.plot(train_error)
plt.ylabel("training-error")
plt.show()
#validation
plt.subplot(2,1,1)
plt.plot(val_out)
plt.plot(Yh_val,'yo')
plt.ylabel('validation')
plt.subplot(2,1,2)
plt.plot(val_error)
plt.ylabel("validation-error")
plt.show()
#testing
plt.subplot(2,1,1)
plt.plot(test_out)
plt.plot(Yh_test,'bo')
plt.ylabel('testing')
plt.subplot(2,1,2)
plt.plot(test_error)
plt.ylabel("testing-error")
plt.show()
#calculate the R2(coefficient determination) and the root mean square error of the forecasting data
train_R2=np_R2(train_out,Yh_train)
val_R2=np_R2(val_out,Yh_val)
test_R2=np_R2(test_out,Yh_test)
train_RMSE=np_RMSE(train_out,Yh_train)
val_RMSE=np_RMSE(val_out,Yh_val)
test_RMSE=np_RMSE(test_out,Yh_test) | true |
6d4b415399063b9984a210295d40e8d0a192a90f | Python | mindrobots/100_python_exercises | /38.py | UTF-8 | 169 | 2.890625 | 3 | [] | no_license | # mental exercise
# below line of code throws an error because a line is missing before it
# points 1
'''
math.sqrt(9)
'''
# should be
'''
import math
math.sqrt(9)
'''
| true |
da1efaa67f95062218f85c573d09ba8c86b5c3a3 | Python | two-first-names/advent-of-code-2020 | /day3/part2.py | UTF-8 | 774 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env python3
import math
def main():
lines = []
with open('input') as f:
for l in f:
lines.append(list(l.strip()))
end = len(lines)
line_len = len(lines[0])
def get_trees_for_slope(right, down):
x = 0
y = 0
trees = 0
while y < end:
char = lines[y][x]
if char == '#':
trees += 1
x = (x + right) % line_len
y = y + down
return trees
slopes = [
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2)
]
slope_trees = [get_trees_for_slope(right, down) for right, down in slopes]
print(slope_trees)
print(math.prod(slope_trees))
if __name__ == '__main__':
main() | true |
7c96d14c2bb04038ff8ee53b01041faa4724f05f | Python | Curso-de-Python/Clase14 | /ejercicio2.py | UTF-8 | 553 | 3.875 | 4 | [] | no_license | '''
-----------------------------
EJERCICIO N°2
Variables de clase
-----------------------------
'''
class ClaseEjemplo:
contador = 0
def __init__(self, val = 1):
self.__primera = val
ClaseEjemplo.contador += 1
objetoEjemplo1 = ClaseEjemplo()
objetoEjemplo2 = ClaseEjemplo(2)
objetoEjemplo3 = ClaseEjemplo(4)
print(objetoEjemplo1.__dict__, objetoEjemplo1.contador)
print(objetoEjemplo2.__dict__, objetoEjemplo2.contador)
print(objetoEjemplo3.__dict__, objetoEjemplo3.contador)
# ¿Y si convertirmos "contador" en una variable privada?
| true |
7be714dbead64ccb15afaa6a8eaf7177bf403436 | Python | cduck/qutrits | /cirq/schedules/schedulers_test.py | UTF-8 | 10,378 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import cast
import pytest
import cirq
class _TestDevice(cirq.Device):
"""A device for testing that only supports H and CZ gates on 10 qubits.
The H gate take 20 nanos, and the CZ gates take 40 nanos.
This device has 10 QubitLine qubits in a line, with x values ranging from
0 to 9 (inclusive).
"""
def __init__(self):
self.qubits = [cirq.LineQubit(x) for x in range(10)]
def duration_of(self, operation: cirq.Operation) -> cirq.Duration:
if isinstance(operation, cirq.GateOperation):
g = operation.gate
if isinstance(g, cirq.HGate):
return cirq.Duration(nanos=20)
if isinstance(g, cirq.Rot11Gate):
return cirq.Duration(nanos=40)
raise ValueError('Unsupported operation: {!r}'.format(operation))
def validate_gate(self, gate: cirq.Gate):
if not isinstance(gate, (cirq.HGate, cirq.Rot11Gate)):
raise ValueError('Unsupported gate type {!r}'.format(gate))
def validate_operation(self, operation: cirq.Operation):
if not isinstance(operation, cirq.GateOperation):
raise ValueError('Unsupported operation: {!r}'.format(operation))
self.validate_gate(operation.gate)
for q in operation.qubits:
if not isinstance(q, cirq.LineQubit):
raise ValueError('Unsupported qubit type: {!r}'.format(q))
if q not in self.qubits:
raise ValueError('Qubit not on device: {!r}'.format(q))
if len(operation.qubits) == 2:
p, q = operation.qubits
if not cast(cirq.LineQubit, p).is_adjacent(cast(cirq.LineQubit, q)):
raise ValueError(
'Non-local interaction: {!r}.'.format(operation))
def validate_scheduled_operation(
self,
schedule: cirq.Schedule,
scheduled_operation: cirq.ScheduledOperation):
op = scheduled_operation.operation
self.validate_operation(op)
if (isinstance(op, cirq.GateOperation) and
isinstance(op.gate, cirq.Rot11Gate)):
for other in schedule.operations_happening_at_same_time_as(
scheduled_operation):
if self.check_if_cz_adjacent(op, other.operation):
raise ValueError('Adjacent CZ operations: {} vs {}'.format(
scheduled_operation, other))
def check_if_cz_adjacent(self,
cz_op: cirq.GateOperation,
other_op: cirq.Operation):
if (isinstance(other_op, cirq.GateOperation) and
isinstance(other_op.gate, cirq.HGate)):
return False
return any(cast(cirq.LineQubit, q).is_adjacent(cast(cirq.LineQubit, p))
for q in cz_op.qubits
for p in other_op.qubits)
def validate_circuit(self, circuit):
raise NotImplementedError()
def validate_schedule(self, schedule):
for scheduled_operation in schedule.scheduled_operations:
self.validate_scheduled_operation(schedule, scheduled_operation)
class NotImplementedOperation(cirq.Operation):
def with_qubits(self, *new_qubits) -> 'NotImplementedOperation':
raise NotImplementedError()
@property
def qubits(self):
raise NotImplementedError()
def test_the_test_device():
device = _TestDevice()
device.validate_gate(cirq.H)
with pytest.raises(ValueError):
device.validate_gate(cirq.X)
device.validate_operation(cirq.H(cirq.LineQubit(0)))
with pytest.raises(ValueError):
device.validate_operation(NotImplementedOperation())
device.validate_schedule(cirq.Schedule(device, []))
device.validate_schedule(
cirq.Schedule(device, [
cirq.ScheduledOperation.op_at_on(
cirq.H(cirq.LineQubit(0)),
cirq.Timestamp(),
device),
cirq.ScheduledOperation.op_at_on(
cirq.CZ(cirq.LineQubit(0), cirq.LineQubit(1)),
cirq.Timestamp(),
device)
]))
with pytest.raises(ValueError):
device.validate_schedule(
cirq.Schedule(device, [cirq.ScheduledOperation.op_at_on(
NotImplementedOperation(),
cirq.Timestamp(),
device)]))
with pytest.raises(ValueError):
device.validate_schedule(
cirq.Schedule(device, [cirq.ScheduledOperation.op_at_on(
cirq.X(cirq.LineQubit(0)),
cirq.Timestamp(),
device)]))
with pytest.raises(ValueError):
device.validate_schedule(
cirq.Schedule(device, [cirq.ScheduledOperation.op_at_on(
cirq.H(cirq.NamedQubit('q')),
cirq.Timestamp(),
device)]))
with pytest.raises(ValueError):
device.validate_schedule(
cirq.Schedule(device, [cirq.ScheduledOperation.op_at_on(
cirq.H(cirq.LineQubit(100)),
cirq.Timestamp(),
device)]))
with pytest.raises(ValueError):
device.validate_schedule(
cirq.Schedule(device, [cirq.ScheduledOperation.op_at_on(
cirq.CZ(cirq.LineQubit(1), cirq.LineQubit(3)),
cirq.Timestamp(),
device)]))
def test_moment_by_moment_schedule_no_moments():
device = _TestDevice()
circuit = cirq.Circuit([])
schedule = cirq.moment_by_moment_schedule(device, circuit)
assert len(schedule.scheduled_operations) == 0
def test_moment_by_moment_schedule_empty_moment():
device = _TestDevice()
circuit = cirq.Circuit([cirq.Moment(),])
schedule = cirq.moment_by_moment_schedule(device, circuit)
assert len(schedule.scheduled_operations) == 0
def test_moment_by_moment_schedule_moment_of_single_qubit_ops():
device = _TestDevice()
qubits = device.qubits
circuit = cirq.Circuit([cirq.Moment(cirq.H(q) for q in qubits),])
schedule = cirq.moment_by_moment_schedule(device, circuit)
zero_ns = cirq.Timestamp()
assert set(schedule.scheduled_operations) == {
cirq.ScheduledOperation.op_at_on(cirq.H(q), zero_ns, device)
for q in qubits}
def test_moment_by_moment_schedule_moment_of_two_qubit_ops():
device = _TestDevice()
qubits = device.qubits
circuit = cirq.Circuit(
[cirq.Moment((cirq.CZ(qubits[i], qubits[i + 1])
for i in range(0, 9, 3)))])
schedule = cirq.moment_by_moment_schedule(device, circuit)
zero_ns = cirq.Timestamp()
expected = set(
cirq.ScheduledOperation.op_at_on(cirq.CZ(qubits[i], qubits[i + 1]),
zero_ns,
device)
for i in range(0, 9, 3))
assert set(schedule.scheduled_operations) == expected
def test_moment_by_moment_schedule_two_moments():
device = _TestDevice()
qubits = device.qubits
circuit = cirq.Circuit([cirq.Moment(cirq.H(q) for q in qubits),
cirq.Moment((cirq.CZ(qubits[i], qubits[i + 1])
for i in range(0, 9, 3)))])
schedule = cirq.moment_by_moment_schedule(device, circuit)
zero_ns = cirq.Timestamp()
twenty_ns = cirq.Timestamp(nanos=20)
expected_one_qubit = set(
cirq.ScheduledOperation.op_at_on(cirq.H(q), zero_ns, device)
for q in qubits)
expected_two_qubit = set(
cirq.ScheduledOperation.op_at_on(
cirq.CZ(qubits[i], qubits[i + 1]), twenty_ns,
device) for i in range(0, 9, 3))
expected = expected_one_qubit.union(expected_two_qubit)
assert set(schedule.scheduled_operations) == expected
def test_moment_by_moment_schedule_max_duration():
device = _TestDevice()
qubits = device.qubits
circuit = cirq.Circuit([
cirq.Moment([cirq.H(qubits[0]), cirq.CZ(qubits[1], qubits[2])]),
cirq.Moment([cirq.H(qubits[0])])])
schedule = cirq.moment_by_moment_schedule(device, circuit)
zero_ns = cirq.Timestamp()
fourty_ns = cirq.Timestamp(nanos=40)
assert set(schedule.scheduled_operations) == {
cirq.ScheduledOperation.op_at_on(cirq.H(qubits[0]), zero_ns, device),
cirq.ScheduledOperation.op_at_on(
cirq.CZ(qubits[1], qubits[2]), zero_ns, device),
cirq.ScheduledOperation.op_at_on(cirq.H(qubits[0]), fourty_ns, device),
}
def test_moment_by_moment_schedule_empty_moment_ignored():
device = _TestDevice()
qubits = device.qubits
circuit = cirq.Circuit([cirq.Moment([cirq.H(qubits[0])]),
cirq.Moment([]),
cirq.Moment([cirq.H(qubits[0])])])
schedule = cirq.moment_by_moment_schedule(device, circuit)
zero_ns = cirq.Timestamp()
twenty_ns = cirq.Timestamp(nanos=20)
assert set(schedule.scheduled_operations) == {
cirq.ScheduledOperation.op_at_on(cirq.H(qubits[0]), zero_ns, device),
cirq.ScheduledOperation.op_at_on(cirq.H(qubits[0]), twenty_ns, device),
}
def test_moment_by_moment_schedule_validate_operation_fails():
device = _TestDevice()
qubits = device.qubits
circuit = cirq.Circuit()
circuit.append(cirq.CNOT(qubits[0], qubits[1]))
with pytest.raises(ValueError, match="CNOT"):
_ = cirq.moment_by_moment_schedule(device, circuit)
def test_moment_by_moment_schedule_device_validation_fails():
device = _TestDevice()
qubits = device.qubits
circuit = cirq.Circuit([cirq.Moment([
cirq.CZ(qubits[0], qubits[1]),
cirq.CZ(qubits[2], qubits[3])
])])
with pytest.raises(ValueError, match="Adjacent CZ"):
_ = cirq.moment_by_moment_schedule(device, circuit)
| true |
e17d4e47c81e9ca70b247a62f4af1235842e9a15 | Python | mattijn/pynotebook | /2015/2015-12-24 Time series forecast numpy lstsq svd.py | UTF-8 | 5,068 | 2.625 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
# In[2]:
import numpy as np
# training data
X1=np.array([[-0.31994,-0.32648,-0.33264,-0.33844],[-0.32648,-0.33264,-0.33844,-0.34393],[-0.33264,-0.33844,-0.34393,-0.34913],[-0.33844,-0.34393,-0.34913,-0.35406],[-0.34393,-0.34913,-.35406,-0.35873],[-0.34913,-0.35406,-0.35873,-0.36318],[-0.35406,-0.35873,-0.36318,-0.36741],[-0.35873,-0.36318,-0.36741,-0.37144],[-0.36318,-0.36741,-0.37144,-0.37529],[-0.36741,-.37144,-0.37529,-0.37896],[-0.37144,-0.37529,-0.37896,-0.38069],[-0.37529,-0.37896,-0.38069,-0.38214],[-0.37896,-0.38069,-0.38214,-0.38349],[-0.38069,-0.38214,-0.38349,-0.38475],[-.38214,-0.38349,-0.38475,-0.38593],[-0.38349,-0.38475,-0.38593,-0.38887]])
X2=np.array([[-0.39265,-0.3929,-0.39326,-0.39361],[-0.3929,-0.39326,-0.39361,-0.3931],[-0.39326,-0.39361,-0.3931,-0.39265],[-0.39361,-0.3931,-0.39265,-0.39226],[-0.3931,-0.39265,-0.39226,-0.39193],[-0.39265,-0.39226,-0.39193,-0.39165],[-0.39226,-0.39193,-0.39165,-0.39143],[-0.39193,-0.39165,-0.39143,-0.39127],[-0.39165,-0.39143,-0.39127,-0.39116],[-0.39143,-0.39127,-0.39116,-0.39051],[-0.39127,-0.39116,-0.39051,-0.3893],[-0.39116,-0.39051,-0.3893,-0.39163],[-0.39051,-0.3893,-0.39163,-0.39407],[-0.3893,-0.39163,-0.39407,-0.39662],[-0.39163,-0.39407,-0.39662,-0.39929],[-0.39407,-0.39662,-0.39929,-0.4021]])
# target values
y1=np.array([-0.34393,-0.34913,-0.35406,-0.35873,-0.36318,-0.36741,-0.37144,-0.37529,-0.37896,-0.38069,-0.38214,-0.38349,-0.38475,-0.38593,-0.38887,-0.39184])
y2=np.array([-0.3931,-0.39265,-0.39226,-0.39193,-0.39165,-0.39143,-0.39127,-0.39116,-0.39051,-0.3893,-0.39163,-0.39407,-0.39662,-0.39929,-0.4021,-0.40506])
half = len(y1)/2 # or y2 as they have the same length
# In[ ]:
# def slow_lstsq(L, b):
# return np.array([np.linalg.lstsq(L[k], b[k])[0]
# for k in range(L.shape[0])])
# In[3]:
def stacked_lstsq(L, b, rcond=1e-10):
"""
Solve L x = b, via SVD least squares cutting of small singular values
L is an array of shape (..., M, N) and b of shape (..., M).
Returns x of shape (..., N)
"""
u, s, v = np.linalg.svd(L, full_matrices=False)
s_max = s.max(axis=-1, keepdims=True)
s_min = rcond*s_max
inv_s = np.zeros_like(s)
inv_s[s >= s_min] = 1/s[s>=s_min]
x = np.einsum('...ji,...j->...i', v,
inv_s * np.einsum('...ji,...j->...i', u, b.conj()))
return np.conj(x, x)
# In[ ]:
from sklearn.linear_model import LinearRegression
# train the 1st half, predict the 2nd half
regressor = LinearRegression()
regressor.fit(X2[:half], y2[:half])
pred = regressor.predict(X2[half:])
r_2 = regressor.score(X2[half:],y2[half:])
# print the prediction and r^2
print 'pred:',pred
print 'r^2:',r_2
# In[ ]:
# set inner variables used to center data and get intercept
fit, X_mean, y_mean, X_std = regressor.fit(X2[:half], y2[:half])
intercept = y_mean - np.dot(X_mean, regressor.coef_)
# apply prediction
npdot = np.dot(X2[half:],regressor.coef_)
prediction = npdot + intercept
# In[ ]:
print 'y_mean:', y_mean, y_mean.shape
print 'X_mean:', X_mean, X_mean.shape
print 'coef_:', regressor.coef_, regressor.coef_.shape
print 'npdot:', npdot, npdot.shape
print 'intercept:', intercept, intercept.shape
print 'predict:', prediction, prediction.shape
# In[4]:
# stack X1 & X2 and y1 & y2
y_stack = np.vstack((y1[None],y2[None]))
X_stack = np.vstack((X1[None],X2[None]))
print 'y1 shape:',y1.shape, 'X1 shape:',X1.shape
print 'y_stack shape:',y_stack.shape, 'X_stack:',X_stack.shape
# In[5]:
# center X_stack
X_stack_mean = np.average(X_stack[:,:half],axis=1)
X_stack_std = np.ones(X_stack[:,:half].shape[0::2])
X_stack_center = X_stack[:,:half] - X_stack_mean[:,None,:]
#X_stack -= X_stack_mean[:,None,:]
# center y_stack
y_stack_mean = np.average(y_stack[:,:half],axis=1)
y_stack_center = y_stack[:,:half] - y_stack_mean[:,None]
#y_stack -= y_stack_mean[:,None]
# In[6]:
y_stack_center
# In[ ]:
# get coefficients by applying linear regression on stack
coef_stack = stacked_lstsq(X_stack_center, y_stack_center)
print 'coef_stack:',coef_stack
# In[ ]:
# calculate the intercept
coef_stack = coef_stack / X_stack_std
intercept_stack = y_stack_mean - np.einsum('ij,ij->i',X_stack_mean,coef_stack)
print 'intercept_stack:',intercept_stack
# In[ ]:
# apply prediction using einsum
einsum_stack = np.einsum('ijx,ix->ij',X_stack[:,half:],coef_stack)#X_stack[:,:half]
print 'einsum:',einsum_stack
print 'npdot:',npdot
prediction_stack = einsum_stack + intercept_stack[None].T
print 'prediction_stack:',prediction_stack
y_stack_true = y_stack[:,half:]
print 'y_stack_true:',y_stack_true
# In[ ]:
#The coefficient R^2 is defined as (1 - u/v), where u is the regression
#sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
#sum of squares ((y_true - y_true.mean()) ** 2).sum().
u = ((y_stack_true - prediction_stack) ** 2).sum(axis=-1)
v = ((y_stack_true - y_stack_true.mean(axis=-1)[None].T) ** 2).sum(axis=-1)
r_2_stack = 1 - u/v
# In[ ]:
| true |
a20b372481485a8cb55071b89bfcc9d338ff12c0 | Python | spectraldoy/MusicTransformerTensorFlow | /transformerutil6.py | UTF-8 | 26,678 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | import mido
import tensorflow as tf
import numpy as np
import random
"""
Implementation of a converter of MIDI files to and from the event-based
vocabulary representation of MIDI files according to Oore et al., 2018
Also some heloer fuctions to be able to use the transformer model properly
Possible MIDI events being considered:
128 note_on events
128 note_off events #includes handling pedal_on and pedal_off events
125 time_shift events #time_shift = 1: 8 ms
32 velocity events
Total number of midi events = 413
The indices of the vocab corresponding to the events will be,
v[ 0] = '<pad>'
v[ 0..128] = note_on
v[129..256] = note_off
v[257..381] = time_shift
v[382..413] = velocity
v[414..415] = '<start>', '<end>'
A list of tokens will be generated from the midi file, and the indices of these
need to be passed into the Embedding
"""
"""MIDI TOKENIZER"""
note_on_events = 128
note_off_events = note_on_events
time_shift_events = 125 #time_shift = time_shift_events corresponds to 1 second
velocity_events = 32
LTH = 1000 #maximum number of milliseconds to be handled
DIV = 1000 // time_shift_events #time_shifts will correspond to steps of DIV milliseconds
#total midi events has + pad + start + end
total_midi_events = note_on_events + note_off_events + time_shift_events + velocity_events + 1 + 2
#create the vocabulary list to use when pedal is considered to be holding the
#note,instead of introducing pedal events -- nn might be able to learn easier
note_on_vocab = [f"note_on_{i}" for i in range(note_on_events)]
note_off_vocab = [f"note_off_{i}" for i in range(note_off_events)]
time_shift_vocab = [f"time_shift_{i}" for i in range(time_shift_events)]
velocity_vocab = [f"set_velocity_{i}" for i in range(velocity_events)]
#create vocab of tokens
vocab = ['<pad>'] + note_on_vocab + note_off_vocab + time_shift_vocab + velocity_vocab + ['<start>', '<end>']
vocab_size = len(vocab)
#tokena
pad_token = vocab.index("<pad>")
start_token = vocab.index("<start>")
end_token = vocab.index("<end>")
def Midiparser(fname=None, mid=None):
"""
Converts a midi file into a list of events and their indices in the vocab
"""
assert (fname == None) ^ (mid == None) == True, "Define only one of mid (a loaded midi file) or fname (the path from which to load a midi file)"
if fname is not None:
mid = mido.MidiFile(fname)
#conversion macros
delta_time = 0 #to be able to sum up message times
event_list = [] #list of events from vocab list
index_list = [] #list of indices of the events of event_list in the vocab
pedal_events = {} #dictionary to handle pedal events
pedal_flag = False
#create the event list as a list of elements of the vocab
for track in mid.tracks:
for msg in track:
delta_time += msg.time
if msg.is_meta:
continue
#add the time events
t = msg.type
if t == "note_on" or (t == "note_off" and not pedal_flag):
time_to_events(delta_time, event_list=event_list, index_list=index_list)
delta_time = 0
if t == "note_on":
#get the note
note = msg.note
vel = velocity_to_bin(msg.velocity)
#append the set velocity and note events; 1 is added to deal with the pad token
event_list.append(vocab[note_on_events + note_off_events + time_shift_events + vel + 1])
event_list.append(vocab[note + 1])
index_list.append(note_on_events + note_off_events + time_shift_events + vel + 1)
index_list.append(note + 1)
elif t == "note_off" and not pedal_flag:
#get the note
note = msg.note
#append the note off event
event_list.append(vocab[note_on_events + note + 1])
index_list.append(note_on_events + note + 1)
elif t == "note_off" and pedal_flag:
note = msg.note
if note not in pedal_events:
pedal_events[note] = 0
pedal_events[note] += 1
elif msg.type == "control_change":
if msg.control == 64:
if msg.value >= 64:
#pedal on
pedal_flag = True
elif msg.value <= 63:
#pedal off
pedal_flag = False
#perform note offs that occurred when pedal was on, after the pedal is lifted
for note in pedal_events:
for i in range(pedal_events[note]):
#add the time events
time_to_events(delta_time, event_list=event_list, index_list=index_list)
delta_time = 0
#repeatedly create and append note off events
event_list.append(vocab[note_on_events + note + 1])
index_list.append(note_on_events + note + 1)
#restart the pedal events list
pedal_events = {}
#return the lists of events
return np.array(index_list, dtype=np.int32), event_list
def Listparser(index_list=None, event_list=None, fname="test", tempo=512820):
"""
Takes a set of events in event_list or in index_list and converts it to a midi file
"""
assert (event_list == None) ^ (index_list == None) == True, "Input either the event_list or index_list but not both"
#convert event_list to index_list
if event_list is not None:
assert type(event_list[0]) == str, "All events in event_list must be str"
index_list = events_to_indices(event_list)
#set up the midi file and tracks to be added to it
mid = mido.MidiFile() #ticks_per_beat should be 480
meta_track = mido.MidiTrack()
track = mido.MidiTrack()
# set up the config track
meta_track.append(mido.MetaMessage("track_name").copy(name=fname))
meta_track.append(mido.MetaMessage("smpte_offset")) # open track
# time_signature
time_sig = mido.MetaMessage("time_signature") # assumes time sig is 4/4
time_sig = time_sig.copy(numerator=4, denominator=4)
meta_track.append(time_sig)
# key signature
key_sig = mido.MetaMessage("key_signature") # assumes key sig is C
meta_track.append(key_sig)
# tempo
set_tempo = mido.MetaMessage("set_tempo") # assume tempo is constant
set_tempo = set_tempo.copy(tempo=tempo)
meta_track.append(set_tempo)
# end of track
end = mido.MetaMessage("end_of_track")
end = end.copy(time=0) # time is delta time
meta_track.append(end) # check if this is the isolated problem
# set up the piano track
program = mido.Message("program_change") # 0 is piano
track.append(program)
# control
cc = mido.Message("control_change")
track.append(cc) # looks like that's done
#initialize the time and velocity attributes
delta_time = 0
vel = 0
#iterate over the events in event list to reconstruct the midi file
for idx in index_list:
if tf.is_tensor(idx):
idx = idx.numpy().item()
if idx == 0: #if it is the pad token, continue
continue
idx = idx - 1 #subtracting 1 to deal with the pad token
if 0 <= idx < note_on_events + note_off_events:
if 0 <= idx < note_on_events:
#note on event
note = idx
t = "note_on"
v = vel
else:
#note off event
note = idx - note_on_events
t = "note_off"
v = 127
#set up the message
msg = mido.Message(t)
msg = msg.copy(note=note, velocity=v, time=delta_time)
#reinitialize delta_time and velocity
delta_time = 0
vel = 0
#insert message into track
track.append(msg)
elif note_on_events + note_off_events <= idx < note_on_events + note_off_events + time_shift_events:
#time shift event
cut_time = idx - (note_on_events + note_off_events - 1) # from 1 to time_shift_events
delta_time += cut_time * DIV #div is used to turn the time from bins to milliseconds
elif note_on_events + note_off_events + time_shift_events <= idx < total_midi_events - 3: #subtract start and end tokens
#velocity event
vel = bin_to_velocity(idx - (note_on_events + note_off_events + time_shift_events))
#end the track
end2 = mido.MetaMessage("end_of_track").copy(time=0)
track.append(end2)
#create and return the midi file
mid.tracks.append(meta_track)
mid.tracks.append(track)
return mid
def events_to_indices(event_list, vocab=vocab):
"""
turns an event_list into an index_list
"""
index_list = []
for event in event_list:
index_list.append(vocab.index(event))
return tf.convert_to_tensor(index_list)
def indices_to_events(index_list, vocab=vocab):
"""
turns an index_list into an event_list
"""
event_list = []
for idx in index_list:
event_list.append(vocab[idx])
return event_list
def velocity_to_bin(velocity, step=4):
"""
Velocity in a midi file can take on any integer value in the range (0, 127)
But, so that each vector in the midiparser is fewer dimensions than it has to be,
without really losing any resolution in dynamics, the velocity is shifted
down to the previous multiple of step
"""
assert (128 % step == 0), "128 possible midi velocities must be divisible into the number of bins"
assert 0 <= velocity <= 127, f"velocity must be between 0 and 127, not {velocity}"
#bins = np.arange(0, 127, step) #bins[i] is the ith multiple of step, i.e., step * i
idx = velocity // step
return idx #returns the bin into which the actual velocity is placed
def bin_to_velocity(_bin, step=4):
"""
Takes a binned velocity, i.e., a value from 0 to 31, and converts it to a
proper midi velocity
"""
assert (0 <= _bin * step <= 127), f"bin * step must be between 0 and 127 to be a midi velocity\
not {_bin*step}"
return int(_bin * step)
def time_to_events(delta_time, event_list=None, index_list=None):
"""
takes the delta time summed up over irrelevant midi events, and converts it
to a series of keys from the vocab
"""
#since msg.time is the time since the previous message, the time
#shift events need to be put before the next messages
time = time_cutter(delta_time)
for i in time:
#repeatedly create and append time events
if event_list is not None:
event_list.append(vocab[note_on_events + note_off_events + i]) #should be -1, but adding +1
if index_list is not None: #because of pad token
index_list.append(note_on_events + note_off_events + i)
pass
def time_cutter(time, lth=LTH, div=DIV):
"""
In the mido files, with ticks_per_beat = 480, the default tempo
is 480000 µs/beat or 125 bpm. This does not depend on the time signature
1 tick is 1 ms at this tempo, therefore 8 ticks are 8 ms, which each of the
bins for time are supposed to be
lth is the maximum number of ticks, or milliseconds in this case, that will
be considered in one time_shift for this project
div is the number of milliseconds/ticks a time_shift of 1 represents, i.e.
time = time_shift * div
this function makes mido time attributes into multiplies of div, in the
integer range (1, lth); 0 will not be considered; then divides them into
lth // div possible bins, integers from 1 to lth // div
"""
assert (lth % div == 0), "lth must be divisible by div"
#create in the time shifts in terms of integers in the range (1, lth) then
#convert the time shifts into multiples of div, so that we only need to deal with
#lth // div possible bins
time_shifts = []
for i in range(time // lth):
time_shifts.append(real_round(lth / div)) #see below for real_round
last_term = real_round((time % lth) / div)
time_shifts.append(last_term) if last_term > 0 else None
return time_shifts
def check_note_pairs(fname=None, mid=None, return_notes=False):
"""
checks if each note_on is paired with a note_off in a midi file
"""
assert (fname == None)^(mid == None) == True, "Define only one of mid (a loaded midi file) or fname (the path from which to load a midi file)"
if fname is not None:
mid = mido.MidiFile(fname)
notes = {}
for track in mid.tracks:
for msg in track:
if msg.is_meta or (msg.type != "note_on" and msg.type != "note_off"):
continue
note = msg.note
t = msg.type
if note not in notes:
notes[note] = 0
if t == "note_on":
notes[note] += 1
elif t == "note_off":
notes[note] -= 1
flag = True # all note pairs exist
for i in notes:
if notes[note] != 0:
flag = False
break
if return_notes:
return notes
return flag #, notes
def real_round(a):
"""
properly rounds a float to an integer because python can't do it
"""
b = a // 1
decimal_digits = a % 1
adder = 0
if decimal_digits >= 0.5:
adder = 1
return int(b + adder)
"""TRANSFORMER UTIL"""
MAX_LENGTH = 2048
#stuff to make the data augmentation easier
noe = note_on_events
nfe = note_off_events
ne = noe + nfe #note events
tse = time_shift_events
def skew(t: tf.Tensor):
"""
Implements skewing procedure outlined in Huang et. al 2018 to reshape the
dot(Q, RelativePositionEmbeddings) matrix into the correct shape for which
Tij = compatibility of ith query in Q with relative position (j - i)
this implementation accounts for tensors of rank n
Algorithm:
1. Pad T
2. Reshape
3. Slice
Assumes T is of shape (..., L, L)
"""
# pad T
middle_dims = [[0, 0] for _ in range(tf.rank(t) - 1)] # allows padding to be generalized to rank n
padded = tf.pad(t, [*middle_dims, [1, 0]])
# reshape
srel = tf.reshape(padded, (*padded.shape[:-2], t.shape[-1] + 1, t.shape[-2]))
# final touches
srel = tf.reshape(srel, (-1, *srel.shape[-2:])) # flatten prior dims
srel = srel[:, 1:] # slice
return tf.reshape(srel, t.shape) # prior shape
def data_cutter(data, lth=MAX_LENGTH):
"""
takes a set of long input sequences, data, and cuts each sequence into
smaller sequences of length lth + 2
the start and end tokens are also added to the data
"""
#make sure data is iterable
if type(data) != list:
data = [data]
#initialize the cut data list and seqs to pad to add later to the cut data
cdata = []
seqs_to_pad = []
for seq in data:
#find the highest multiple of lth less than len(seq) so that until
#this point, the data can be cut into even multiples of lth
seq_len = len(seq)
if lth > seq_len:
seqs_to_pad.append(seq)
continue
mult = seq_len // lth
#iterate over parts of the sequence of length lth and add them to cdata
for i in range(0, lth * (mult), lth):
_slice = seq[i:i + lth]
cdata.append(_slice)
#take the last <lth elements of the sequnce and add to seqs_to_pad
idx = mult * lth
final_elems = seq[idx:]
seqs_to_pad.append(final_elems) if final_elems.size > 0 else None
#add the start and end tokens
for i, vec in enumerate(cdata):
# assume vec is of rank 1
cdata[i] = tf.pad(tf.pad(vec, [[1, 0]], constant_values=start_token), \
[[0, 1]], constant_values=end_token)
for i, vec in enumerate(seqs_to_pad):
seqs_to_pad[i] = tf.pad(tf.pad(vec, [[1, 0]], constant_values=start_token), \
[[0, 1]], constant_values=end_token)
#pad the sequences to pad
if seqs_to_pad:
padded_data = tf.keras.preprocessing.sequence.pad_sequences(seqs_to_pad, maxlen=lth + 2,
padding='post',value=pad_token)
final_data = tf.concat([tf.expand_dims(cd, 0) for cd in cdata] + \
[tf.expand_dims(pd, 0) for pd in padded_data], 0)
else:
final_data = tf.concat([tf.expand_dims(cd, 0) for cd in cdata], 0)
return final_data
def start_end_separator(data, lth=MAX_LENGTH):
"""
function to return only the first and last lth tokens of the index lists in data
as numpy arrays
input index lists are assumed to be numpy arrays
also pads the input data with start and end tokens
"""
if type(data) != list:
data = [data]
sep_data = []
seqs_to_pad = []
# separate the data and append to correct lists
for arr in data:
if len(arr) == lth:
sep_data.append(arr)
elif len(arr) < lth:
seqs_to_pad.append(arr)
else:
first = arr[:lth]
last = arr[-lth:]
sep_data.append(first)
sep_data.append(last)
# add start and end tokens
for i, vec in enumerate(sep_data):
# assume vec is of rank 1
sep_data[i] = tf.pad(tf.pad(vec, [[1, 0]], constant_values=start_token), \
[[0, 1]], constant_values=end_token)
for i, vec in enumerate(seqs_to_pad):
seqs_to_pad[i] = tf.pad(tf.pad(vec, [[1, 0]], constant_values=start_token), \
[[0, 1]], constant_values=end_token)
# pad seqs to pad
padded_data = tf.keras.preprocessing.sequence.pad_sequences(seqs_to_pad, maxlen=lth + 2,
padding='post', value=pad_token)
# concatenate
return tf.concat([tf.expand_dims(sd, 0) for sd in sep_data] + \
[tf.expand_dims(pd, 0) for pd in padded_data], 0)
def stretch_time(seq, time_stretch):
"""
function to help data augmentation that stretches index list in time
"""
# initialize time_shifted sequence to return
time_shifted_seq = []
delta_time = 0
# iterate over seq
if time_stretch == 1:
if type(seq) == np.ndarray:
return seq
else:
return np.array(seq)
for idx in seq:
idx = idx.item()
#if idx is a time_shift
if ne < idx <= ne + tse:
time = idx - (ne - 1) # get the index in the vocab
delta_time += real_round(time * DIV * time_stretch) #acculumate stretched times
else:
time_to_events(delta_time, index_list=time_shifted_seq) #add the accumulated stretched times to the list
delta_time = 0 #reinitialize delta time
time_shifted_seq.append(idx) #add other indices back
return np.array(time_shifted_seq, dtype=np.int32) #np ndarray instead of tf tensor to save
def aug(data, note_shifts=np.arange(-2, 3), time_stretches=[1, 1.05, 1.1],
sample_random_time=False, sample_size=None):
"""
uses note_shifts and time_stretches to implement the data augmentation
on data, which is a set of sequences of index_lists defined by
miditokenizer3
assumes note_shifts are integers
should put an assert positive for time stretches
"""
assert type(note_shifts) == list or type(note_shifts) == np.ndarray, \
"note_shifts must be a list of integers(number of semitones) to shift pitch by"
assert type(time_stretches) == list, "time_stretches must be a list of coefficients"
assert (sample_random_time == True) ^ (sample_size is None), "Define none or both of sample_random_time and sample_size"
assert (sample_size is None) or type(sample_size) == int, "sample_size must be an int"
#make sure data is in a list
if type(data) != list:
data = [data]
#preprocess the time stretches
if 1 not in time_stretches:
time_stretches.append(1)
ts = []
for t in time_stretches:
ts.append(t)
ts.append(1/t) if t != 1 else None
ts.sort() #make it ascending
predicted_len = len(data) * len(note_shifts) * sample_size if sample_random_time else len(data) * len(note_shifts) * len(ts)
print(f'Predicted number of augmented data samples: {predicted_len}')
#iterate over the sequences in the data to shift each one of them
note_shifted_data = [] #initialize the set of note_shifted sequences
count = 0
for seq in data:
#data will be shifted by each shift in note_shifts
for shift in note_shifts:
_shift = shift.item() #assume shift is a numpy ndarray
#initialize the note shifted sequence as a list
note_shifted_seq = []
if _shift == 0:
note_shifted_seq = seq
else:
#iterate over each elem of seq, shift it and append to note_shifted seq
for idx in seq:
_idx = idx + _shift #shift the index
#if idx is note on, and _idx is also note on, or
#if idx is note_off,and _idx is also note_off, then
#add _idx to note_shifted_sequence, else add idx
if (0 < idx <= noe and 0 < _idx <= noe) or (noe < idx <= ne and noe < _idx <= ne):
note_shifted_seq.append(_idx)
else:
note_shifted_seq.append(idx)
#note_shifted_seq = tf.convert_to_tensor(note_shifted_seq) #convert to tensor
note_shifted_data.append(np.array(note_shifted_seq, dtype=np.int32))
count += 1
if not sample_random_time:
print(f'Augmented data sample {count} created')
else:
print(f'Note shifted sample {count} created')
#now iterate over the note shifted data to stretch it in time
time_shifted_data = [] #initialize the set of time_stretched sequences
if sample_random_time: count = 0
for seq in note_shifted_data:
# data will be stretched in time by each time_stretch
# or by random time stretch if sample_random_time
if sample_random_time:
time_stretches_ = random.sample(ts, sample_size)
for _ in time_stretches_:
time_shifted_seq = stretch_time(seq, _)
time_shifted_data.append(time_shifted_seq)
count += 1
print(f"Augmented data sample {count} created")
continue
for time_stretch in ts:
time_shifted_seq = stretch_time(seq, time_stretch)
time_shifted_data.append(time_shifted_seq)
if time_stretch != 1:
count += 1
print(f"Augmented data sample {count} created")
#output the data
return time_shifted_data
"""TEST MODEL ACCURACY"""
def generate_scale(note=60, delta_times=[500], velocities=list(np.arange(9, 24)),
mode='ionian', octaves=1):
"""
generates a scale based on the input note and mode
"""
note = note + 1
iter_times = iter([time_cutter(dt) for dt in delta_times])
for i, velocity in enumerate(velocities):
if velocity > velocity_events:
velocities[i] = velocity_to_bin(velocity)
iter_vel = iter(velocities)
modes = ['ionian', 'dorian', 'phrygian', 'lydian', 'mixolydian', 'aeolian',
'locrian', 'major', 'harmonic', 'melodic']
mode_steps = np.array([[0, 2, 4, 5, 7, 9, 11, 12, 11, 9, 7, 5, 4, 2, 0],
[0, 2, 3, 5, 7, 9, 10, 12, 10, 9, 7, 5, 3, 2, 0],
[0, 1, 3, 5, 7, 8, 10, 12, 10, 8, 7, 5, 3, 1, 0],
[0, 2, 4, 6, 7, 9, 11, 12, 11, 9, 7, 6, 4, 2, 0],
[0, 2, 4, 5, 7, 9, 10, 12, 10, 9, 7, 5, 4, 2, 0],
[0, 2, 3, 5, 7, 8, 10, 12, 10, 8, 7, 5, 3, 2, 0],
[0, 1, 3, 5, 6, 8, 10, 12, 10, 8, 6, 5, 3, 1, 0],
[0, 2, 4, 5, 7, 9, 11, 12, 11, 9, 7, 5, 4, 2, 0],
[0, 2, 3, 5, 7, 8, 11, 12, 11, 8, 7, 5, 3, 2, 0],
[0, 2, 3, 5, 7, 9, 11, 12, 10, 8, 7, 5, 3, 2, 0]])
mode_steps = mode_steps[modes.index(mode)]
# get octaves
middle = mode_steps.max() + 12 * (octaves - 1)
ascend_ = mode_steps[:len(mode_steps) // 2]
ascend = ascend_[:]
descend_ = mode_steps[1 + len(mode_steps) // 2:] + 12 * (octaves - 1)
descend = descend_[:]
for i in range(octaves - 1):
ascend_ = ascend_ + 12
ascend = np.concatenate((ascend, ascend_))
descend_ = descend_ - 12
descend = np.concatenate((descend, descend_))
mode_steps = np.concatenate((ascend, np.expand_dims(middle, 0), descend))
scale_ons = np.add(note, mode_steps)
scale_offs = np.add(scale_ons, note_on_events)\
idx_list = []
for x, y in zip(scale_ons, scale_offs):
#get velocity
try:
velocity = next(iter_vel)
except StopIteration:
iter_vel = iter(velocities)
velocity = next(iter_vel)
velocity = vocab.index(f"set_velocity_{velocity}")
# get delta time
try:
delta_time = next(iter_times)
except StopIteration:
iter_times = iter([time_cutter(dt) for dt in delta_times])
delta_time = next(iter_times)
# append stuff
idx_list.append(velocity)
idx_list.append(x)
for time in delta_time:
idx_list.append(vocab.index(f"time_shift_{time - 1}"))
idx_list.append(y)
return np.array(idx_list, dtype=np.int32)
| true |
098841b46ae64b4fdf7167e794a022fd41da2a58 | Python | iyozh/PythonFinalTask | /rss_reader/rss_reader/converter.py | UTF-8 | 3,399 | 2.90625 | 3 | [] | no_license | import hashlib
import io
import logging
import os
import sys
from pathlib import Path
from jinja2 import Template
from xhtml2pdf import pisa
ROOT_DIR = Path(__file__).resolve().parent.parent
class Converter:
"""This class is implementation of converter to PDF and HTML format"""
def __init__(self, directory, file_name, logger, cache_path):
"""This initialization method receives directory from user, which will be used for saving file,
also receives file_name and logger"""
image_storage = ROOT_DIR / cache_path / "images"
image_storage.mkdir(exist_ok=True)
self.img_storage = image_storage
self.dir = Path(directory).absolute()
self.full_path_to_file = self.dir / file_name
self.logger = logger
self.template = Path(__file__).resolve().parent / "templates" / "news.html"
def prepare_storage(self):
"""This method creates storage if it exists"""
self.logger.info("Preparing storage for your data...")
try:
self.dir.mkdir(exist_ok=True)
self.full_path_to_file.touch(exist_ok=True)
except PermissionError:
logging.error(
"Conversion cannot be performed. Permission denied for this directory"
)
sys.exit()
def convert_to_html(self, news_list):
"""This method converts news in HTML format and save file to directory"""
self.logger.info("Converting news to HTML...")
self.prepare_storage()
self.process_news_list_with_images(news_list)
content = self.generate_html_template(news_list)
self.write_to_file(content.encode("UTF-8"))
def convert_to_pdf(self, news_list):
"""This method converts news in PDF format and save it to directory"""
self.logger.info("Converting news to PDF...")
self.prepare_storage()
self.process_news_list_with_images(news_list)
content = self.generate_html_template(news_list)
pdf = io.BytesIO()
pisa.pisaDocument(content, pdf)
self.write_to_file(pdf.getvalue())
def generate_html_template(self, news_list):
"""This method generate HTML template and render it"""
with open(self.template, "r") as fp:
template = Template(fp.read())
return template.render(news_list=news_list)
def write_to_file(self, content):
"""This method write news to file"""
try:
with open(self.full_path_to_file, "wb") as fp:
fp.write(content)
except PermissionError:
logging.error(
"Conversion cannot be performed. Permission denied for this directory"
)
sys.exit()
self.logger.info("News has been successfully converted")
def process_news_list_with_images(self, news_list):
"""This method process list of news, replacing image links by local paths to images if they exist in local
storage"""
for item in news_list:
try:
filename = hashlib.md5(item.get("Image").encode()).hexdigest()
except AttributeError:
continue
for existing_img in os.listdir(self.img_storage):
if existing_img.split(".")[0] == filename:
item["Image"] = (self.img_storage / existing_img).resolve()
break
| true |
e9694759e4e5e546238b3f33049ee80800299dba | Python | kateroskostas/apdd | /read_my_solutions.py | UTF-8 | 3,601 | 3.375 | 3 | [] | no_license | from networkx import Graph
# Καθε αρχείο περιέχει 2 στήλες η μια το ονομα του μαθήματος
# και η δευτερη ειναι ο κωδικός καθε περιόδου
def read_solution(path):
# Δημιουργώ ενα λεξικό το οποίο στην αρχή είναι κενό
solution = dict()
# oΑνοίγω το αρχείο μου με δηκαιώματα ανάγνωσης
file = open(path, "r")
# Σπάω το αρχείο μου σε γραμμές
lines = file.readlines()
# Δημιουργώ μια for έτσι ώστε να περάσω όλες τις γραμμές
for line in lines:
# Για κάθε γραμμή διαγράφω τα κενά που μπορούν να υπάρχουν δεξιά και αριστερά
line = line.strip()
# Αν υπάρχει κάποια γραμμή κενή την προσπερνάω
if line == "":
continue
# Σπάω την γραμμή μου σε δύο κομμάτια
line_contains = line.split()
# Χωρίζω και ονομάζω τις μεταβλητές μου σε exam και period αντίστοιχα και μετατρέπω τις μεταβλητές σε integer
exam = int(line_contains[0])
period = int(line_contains[1])
solution[exam] = period
# Επιστρέφουμε την λύση μας
return solution
# Για να βαθμολογίσω την εκάστοτε λύση θα πρέπει να υπολογίσω πόσσες περιόδους χρησημοποιεί
# Να ελέγξω αν η λύση που μας έχει δωθεί είναι σωστή δηλαδή να μηνυπάρχουν για ενα μαθητή 2 εξετάσεις την ίδια περίοδο.
def evaluate(graph: Graph, solution):
# Δημιουργώ ένα κενό λεξικό για τις χρησημοποιούμενες περιόδους
used_periods = dict()
# Τσεκάρω αν όλες οι εξετάσεις εχουν χρησημοποιειθεί σε κάποια περίοδο.
for exam in graph:
# Αν δεν έχουμε βάλει κάποια εξέταση μέσα στην λύση
if exam not in solution.keys():
print(f"Η εξέταση: {exam} δεν υπάρχει")
return 0, False
# Αν η περίοδος είναι κάτω του μηδενός είναι πάλι λάθος
assigned_period = solution[exam]
if assigned_period < 0:
print(f"Η εξέταση: {exam} ανατέθεικε σε αρνητική περίοδο")
return 0, False
# Εφόσον δεν καλλύπτει κατι απο τα παραπάνω τότε η λύση είναι σωστή
used_periods[assigned_period] = True
# Για όλες τις εξετάσεις ελέγχουμε αν κάποια εξέταση έχει πέσει πάνω με κάποια άλλη του ίδιου μαθητή στην ιδια περίοδο
for exam_a, exam_b in graph.edges():
if solution[exam_a] == solution[exam_b]:
print(
f"Η εξέταση: {exam_a} και η εξέταση: {exam_b} είναι στην ίδια περίοδο"
)
return 0, False
# Μας επιστρέφει τον αριθμό των χρησημοποιημένων περιόδων
return len(used_periods), True
| true |
b274cd7473b828724952c37257a5a1e0ab313dd4 | Python | ati-ozgur/course-python | /2022/examples-in-class-2022-11-18/altair_example1.py | UTF-8 | 368 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | # to be able to use following code
# I need to install the packages using pip
# pip install altair
# pip install altair_viewer
import pandas as pd
import altair as alt
df = pd.read_csv('seattle-weather.csv')
chart = alt.Chart(df)
alt.Chart(df).mark_circle().encode(
x=alt.X('temperature', bin=True),
y=alt.Y('wind', bin=True),
size='count()'
).show()
| true |
318cfd9ad440f88e4803efdff99c347ae5594cb8 | Python | iWonder118/atcoder | /python/ABC044/B.py | UTF-8 | 198 | 3.359375 | 3 | [] | no_license | import collections
w = list(input())
w_count = collections.Counter(w)
flag = True
for i in w_count.values():
if i % 2 != 0:
flag = False
if flag:
print("Yes")
else:
print("No")
| true |
9c946c11043a66cb35b0af4c3ecc751e07b89b1a | Python | erenes/vng-api-common | /vng_api_common/scopes.py | UTF-8 | 1,461 | 3.15625 | 3 | [] | no_license | from typing import List
OPERATOR_OR = "OR"
OPERATOR_AND = "AND"
SCOPE_REGISTRY = set()
class Scope:
def __init__(self, label: str, description: str = None):
self.label = label
self.description = description
# combined scopes
self.children = []
self.operator = None
# add to registry
SCOPE_REGISTRY.add(self)
def __repr__(self) -> str:
if self.children:
return "<%s: %r" % (self.operator, self.children)
cls_name = self.__class__.__name__
return "<%s: label=%r>" % (cls_name, self.label)
def __str__(self):
return f"({self.label})" if self.children else self.label
def __or__(self, other):
new = type(self)(label=f"{self.label} | {other.label}")
new.children = [self, other]
new.operator = OPERATOR_OR
return new
def is_contained_in(self, scope_set: List[str]) -> bool:
"""
Test if the flat ``scope_set`` encapsulate this scope.
"""
if not self.children:
return self.label in scope_set
children_contained = (
child.is_contained_in(scope_set) for child in self.children
)
if self.operator == OPERATOR_OR:
return any(children_contained)
elif self.operator == OPERATOR_AND:
return all(children_contained)
else:
raise ValueError(f"Unkonwn operator '{self.operator}'")
| true |
37bc6911d2241d0d7352262bfd2dd1e276551740 | Python | wwendler/collection | /r4game/abminimax.py | UTF-8 | 2,805 | 3.046875 | 3 | [
"ISC"
] | permissive | # abminimax.py
# An ai using minimax and alpha beta pruning
import r4server as r4
import random
def opponent(player):
return player%2+1
eval_max = 9999
eval_min = -9999
def eval(player, board):
count = 0
opp = opponent(player)
for y in range(len(board)):
for x in range(len(board[0])):
if board[y][x] == player:
count += r4.findLength(x, y, board)
elif board[y][x] == opp:
count -= r4.findLength(x, y, board)
return count
def eval2(player, board):
count = 0
opp = opponent(player)
for x in range(len(board[0])):
y = r4.findMinRow(x, board)
if not r4.isValid(x, y, board):
continue
r4.setLoc(x, y, player, board)
length = r4.findLength(x, y, board)
if length >= 4:
count += 1
elif r4.isValid(x, y+1, board):
r4.setLoc(x, y+1, opp, board)
length = r4.findLength(x, y+1, board)
if length >= 4:
count -= 1
r4.setLoc(x, y+1, 0, board)
r4.setLoc(x, y, 0, board)
return count
# player is always the min player...
def abminimax(player, board, recur, alpha, beta):
opp = opponent(player)
winner = r4.findWinner(board)
xlen = len(board[0])
ylen = len(board)
if winner == player:
return eval_max, -1
elif winner == opp:
return eval_min, -1
if recur == 0:
return eval2(player, board), -1
best_move = -1
valid_moves = []
moves = range(xlen)
random.shuffle(moves)
for x in moves:
y = r4.findMinRow(x, board)
if r4.isValid(x, y, board):
valid_moves.append(x)
else:
continue
r4.setLoc(x, y, player, board)
score, bluh = abminimax(opp, board, recur - 1,
-beta, -alpha)
r4.setLoc(x, y, 0, board)
if score < beta:
beta = score
best_move = x
if beta <= alpha:
#print "ab pruning, returning %s" % str((beta, recur, best_move))
return -beta, best_move
# if there are no valid moves
if len(valid_moves) == 0:
beta = min(beta, 0)
# if you are going to lose...
elif beta == eval_max:
# this is a random move, because moves was shuffled
best_move = valid_moves[0]
return -beta, best_move
def move(player, board):
num_recur = 6
score, best_move = abminimax(player, board,
num_recur, eval_min, eval_max)
if score == eval_min:
print "expected result: my loss. moving to %i" % best_move
elif score == eval_max:
print "you are doomed. moving to %i" % best_move
else:
print "expected score: %i, num_recur: %i, best move: %i" % (score,
num_recur, best_move)
return best_move
| true |
697f2e0104ab28d98435e1dc82dcef022c7313f5 | Python | Hodaya1234/Project | /data_set.py | UTF-8 | 1,977 | 2.84375 | 3 | [] | no_license | import torch.utils.data as data_utils
import torch
class DataSet(data_utils.Dataset):
def __init__(self, x, y):
super(DataSet, self).__init__()
self.all_x = x
self.all_y = y
self.n_data = x.shape[0]
def __getitem__(self, index):
x = self.all_x[index]
y = self.all_y[index]
return x, y
def __len__(self):
return len(self.all_y)
def change_device(self, device):
return DataSet(self.all_x.to(device), self.all_y.to(device))
def normalize(self, mean_x=None, std_x=None, simple=False):
if simple:
return DataSet(torch.sub(self.all_x, 1), self.all_y)
if mean_x is None:
mean_x = torch.mean(self.all_x, dim=0).repeat(self.n_data, 1)
else:
mean_x = mean_x.repeat(self.n_data, 1)
if std_x is None:
std_x = torch.std(self.all_x, dim=0).repeat(self.n_data, 1)
else:
std_x = std_x.repeat(self.n_data, 1)
return DataSet(torch.div(torch.sub(self.all_x, mean_x), std_x), self.all_y)
def calc_mean_std(self):
return torch.mean(self.all_x, dim=0), torch.std(self.all_x, dim=0)
def data_to_cuda(data_sets, device, cv=True):
new_data = []
if cv:
for set_type in data_sets:
set_type_list = []
for one_dataset in set_type:
set_type_list.append(one_dataset.change_device(device))
new_data.append(set_type_list)
else:
for s in data_sets:
new_data.append(s.change_device(device))
return new_data
def normalize_datasets(data_sets, cv=True):
new_data = []
if cv:
for set_type in data_sets:
set_type_list = []
for one_dataset in set_type:
set_type_list.append(one_dataset.normalize())
new_data.append(set_type_list)
else:
for s in data_sets:
new_data.append(s.normalize())
return new_data
| true |
8f6fe777e3bcfcee80c2725ac2dc33c9655dc3b3 | Python | senlau/Library_Management_System | /src/tables/Member.py | UTF-8 | 615 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | from sqlalchemy import Column, Integer, String, DateTime
from DbConfig import Base
class Member(Base):
__tablename__ = 'members'
id = Column(String(32), unique=True, nullable=False, primary_key=True)
name = Column(String(32))
email = Column(String(32), nullable=True)
password = Column(String(128), nullable=False)
# create_time = Column(DateTime, comment="数据创建日期")
# update_time = Column(DateTime, comment="数据更新日期")
def set_name(self, name):
self.name = name
def set_email(self, email):
self.email = email
def set_password(self, password):
self.password = password
| true |
03057d7fc7e66e83402030c8352c48ee4c8ebefc | Python | honeywang991/test01 | /python9/class_0809_object/test.py | UTF-8 | 108 | 3.34375 | 3 | [] | no_license |
def add(*args):
sum = 0
for i in args:
sum+=i
print(sum)
add(1,2,3,4,5,6,7,8,9,10) | true |
712f404a9e58d03cc703ab4ecaa558548e843346 | Python | Mampfzwerg/Praktikum | /V.206/latex-template/content/dampfdruck.py | UTF-8 | 783 | 2.890625 | 3 | [] | no_license | import numpy as np
from uncertainties import ufloat
import matplotlib.pyplot as plt
a, b, c, d, e, f = np.genfromtxt('mess1.txt', unpack=True)
pa = (d + 1)
T2 = c + 273.15
#print(np.log(pa))
#print(1/T2)
params, covariance_matrix = np.polyfit(1/T2, np.log(pa), deg=1, cov=True)
errors = np.sqrt(np.diag(covariance_matrix))
print('a = {:.3f} ± {:.4f}'.format(params[0], errors[0]))
print('b = {:.3f} ± {:.4f}'.format(params[1], errors[1]))
def gerade (x, m, b):
return m*x+b
z = np.linspace(np.min(1/T2), np.max(1/T2))
plt.plot(1/T2, np.log(pa), 'rx', label='Messdaten')
plt.plot(z, gerade (z, *params), 'b-', label='Ausgleichsgerade')
plt.xlabel(r'$1/T_2 \: / \: 1/K$')
plt.ylabel(r'$ln(p_a/1 bar)$')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('plot2.pdf') | true |
223e7ece52aa4df8cd51b029d05cb5338cfc60fd | Python | shambhand/pythontraining | /material/code/oop/inheritance/demo_progs/inheritance2.py | UTF-8 | 358 | 3.3125 | 3 | [] | no_license | # Replace demo
import sys
class BaseClass:
def method (self):
print ("BaseClass:This is a Base-place holder method")
class DerivedClass (BaseClass):
def method (self):
print ("DerivedClass:This is a Derived-place holder method")
def main ():
b1 = BaseClass ()
b1.method ()
d1 = DerivedClass ()
d1.method ()
sys.exit (0)
main () | true |
7d6a69aa570daaff9a852929dcdbe9209166e9ef | Python | ahatherly/PythonPlatformer | /PlatformGame/Levels.py | UTF-8 | 1,137 | 3.03125 | 3 | [] | no_license | from Enemies import Enemy
class Levels:
level_width = 0
level_height = 0
start_level_x_offset = -160
level_x_offset = -160
def __init__(self):
self.levelTiles = []
def loadLevel(self, filename, enemies):
file = open(filename, "r")
for line in file:
if len(line) < 2:
# Empty
pass
elif line[0] == "!":
# Comment line
pass
elif line[0].isdigit():
# Enemy definition line
code = line[0]
leftExtent = int(line[1])
rightExtent = int(line[2])
speed = int(line[3])
enemySpriteType = line[4]
enemies[code] = Enemy(code, leftExtent, rightExtent, speed, enemySpriteType)
else:
self.levelTiles.append(line)
if len(line) > self.level_width:
self.level_width = len(line)
self.level_height = len(self.levelTiles)
self.level_x_offset = self.start_level_x_offset
def getTile(self, x, y):
if y > len(self.levelTiles)-1:
return " "
line = self.levelTiles[y]
if x > len(line)-1:
return " "
code = line[x]
if code == "\n":
return " "
return code
def getWidth(self):
return self.level_width
def getHeight(self):
return self.level_height
| true |
1cdc70974807f1f468057448b90153bf53c07ebe | Python | annateuerle/LAI_thesis | /h5util.py | UTF-8 | 1,894 | 2.921875 | 3 | [] | no_license | """
Store/Access data in hdf5 file using some compression
Otherwise we have files which takes many gigabytes..
Saves hdf5 dataset in 'groupname/xxxx'
"""
import numpy as np
import logging
import h5py
import datetime
from settings import conf
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler())
def set_dataset(hdf, path, data):
"""Replace of set data in path of hdf file"""
if path in hdf.keys():
del hdf[path]
dl = hdf.create_dataset(
path, data=data, compression="gzip",
compression_opts=7
)
return dl
def save(name, data, attrs={}):
groupname = conf['groupname']
storage_name = conf['hdf5storage']
with h5py.File(storage_name, "a") as data_file:
path = f'{groupname}/{name}'
ds = set_dataset(data_file, path, data)
for k, v in attrs.items():
ds.attrs[k] = v
def load_dataset(ds_name):
groupname = conf['groupname']
storage_name = conf['hdf5storage']
with h5py.File(storage_name, "r") as data_file:
return np.array(data_file[f'{groupname}/{ds_name}'])
def load_attrs(ds_name, keys):
groupname = conf['groupname']
storage_name = conf['hdf5storage']
with h5py.File(storage_name, "r") as data_file:
ds = data_file[f'{groupname}/{ds_name}']
attrs = {}
for k in keys:
attrs[k] = ds.attrs[k]
return attrs
def load_timestamps(ds_name='timestamps'):
timestamps = load_dataset(ds_name)
time_x = []
for t in timestamps:
dt = datetime.date.fromtimestamp(t)
time_x.append(dt)
return time_x
def load_cru_grid():
return load_dataset('grid')
def _printname(name):
log.debug(name)
def print_paths():
storage_name = conf['hdf5storage']
with h5py.File(storage_name, "a") as data_file:
data_file.visit(_printname)
| true |
479c31513e1c93a70c2450769ae2bf56034997b8 | Python | spgeise/Restaurant-Selector | /Files/Zipcodelist.py | UTF-8 | 353 | 2.5625 | 3 | [] | no_license | from csv import reader
zipcodes = []
latlist = []
longlist = []
def openconverstionfile():
file_name = 'Files\Zipcodelist.txt'
with open(file_name) as zipdata:
zipfile = reader(zipdata)
for row in zipfile:
zipcodes.append(row[0])
latlist.append(row[1])
longlist.append(row[2]) | true |
b82fad0633c3ff0bb60b2af08bccfdf60631fe45 | Python | naive9527/luffycity-1 | /luffycity后端/utils/response.py | UTF-8 | 596 | 2.671875 | 3 | [] | no_license | """
响应的数据格式
"""
class BaseResponse(object):
"""
数据类型
ret = {"code":1000, "data": None, "error": None,}
"""
def __init__(self):
self.code = 1000
self.data = None
self.error = None
@property
def dict(self): # 用于 Response时返回对象里面的值。
return self.__dict__
class TokenResponse(BaseResponse):
"""
数据类型
ret = {"code":1000, "data": None, "error": None, "token": None}
"""
def __init__(self):
self.token = None
super(TokenResponse, self).__init__()
| true |
b7bee159fb1a91c7ca15b3e3a1a26959b4df830f | Python | simsekonur/Python-exercises | /iteration/factorial.py | UTF-8 | 253 | 3.890625 | 4 | [] | no_license | print ("*****************")
print ("Factorial Computing Program")
print ("Please enter a number...")
print ("*****************")
number = int (raw_input ("Enter a number :"))
result=1
while (number > 0):
result*=number
number-=1
print (result)
| true |
a6c839a6223a508d2da07609a52236a1f77e244d | Python | mrzzy/Portfolio-I | /practicals/code/traffic_light.py | UTF-8 | 1,522 | 3.3125 | 3 | [
"MIT"
] | permissive | #
# traffic_light.py
# Portfolio I - Lab 1-2
# Similates a Traffic light system with the raspberry pi
#
import lcddriver
import time
from datetime import datetime, timedelta
from gpiozero import LED
# Pinout constants
# TODO: fill this up to work
PIN_RED_LED = 0
PIN_AMBER_LED = 0
PIN_GREEN_LED = 0
# Displays the given traffic light signal by lighting up the LED specifed by led_pin
# and writes the given message to the lcd display for the given duration seconds
def display_signal(led_pin, message, duration):
begin = datetime.now() # record starting time
# Light up red LED
led = LED(led_pin)
led.on()
# Write message on the LCD
lcd = lcddriver.lcd()
lcd.lcd_display_string(message)
# Sleep until the duration specified has elasped
# skip time spent lighting up the LED and writing walk
sleep_seconds = timedelta(seconds=duration) - (begin - datetime.now())
time.sleep(sleep_seconds)
# Cleanup: Turn off lead and clear display
led.off()
lcd.lcd_clear()
try:
while True:
# Light up green traffic light signal
display_signal(PIN_GREEN_LED, "DO NOT WALK", 10)
# Light up amber traffic light signal
display_signal(PIN_AMBER_LED, "DO NOT WALK", 1)
# Light up red traffic light signal
display_signal(PIN_RED_LED, "DO NOT WALK", 10)
except KeyboardInterrupt:
# cleanup
LED(PIN_RED_LED).off()
LED(PIN_AMBER_LED).off()
LED(PIN_GREEN_LED).off()
lcddriver.lcd().lcd_clear()
| true |
aeadec61e79c71e19bb325ee4e5996ae776d7f0f | Python | bigdata2016/bigwork2016 | /week7/wen/ex7.2/euler_tour.py | UTF-8 | 794 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python
from mrjob.job import MRJob
from mrjob.step import MRStep
import re
import sys
import time
WORD_RE = re.compile(r"[\w']+")
class MR_euler_tour(MRJob):
#map each nodes and set each occurence as 1
def mapper(self, key, line):
for elem in line.split():
yield elem, 1
#count each nodes occurences and determine the if the graph is a Euler Tour
#If it checks a node has odd degree and out put reason
#Else it will check all nodes
def reducer(self, key, values):
v = [v for v in values]
if sum(v) % 2 != 0:
print ("Node%s has odd degree of %d" % (key, sum(v)))
sys.exit()
yield key, sum(v)
del v[:]
##Run MRJob
if __name__ == '__main__':
MR_euler_tour.run()
##If program didn't stop it the graph Euler Tour
print "The graph has Euler Tour"
| true |
a418ecb7a50141d8fa026750ea34aca2d9d21eae | Python | sih2020admin/NM402_Sambhav | /pickl.py | UTF-8 | 161 | 2.609375 | 3 | [] | no_license | import pickle
d = {1:"hi", 2: "there"}
msg = pickle.dumps(d)
# msg = bytes(f"{len(msg):<{HEADERSIZE}}", 'utf-8')+msg
print(msg)
print()
print(pickle.loads(msg))
| true |
0f20f55b45f553c1b0ed1a4d9822a70cb63d5b6a | Python | aanand01762/Self-Practice | /python/minimum_swap_2.py | UTF-8 | 914 | 3.875 | 4 | [] | no_license | # https://www.hackerrank.com/challenges/minimum-swaps-2/problem
def minimumSwaps(arr):
swap = 0
indexs = [0]*len(arr)
# Iterate index and value together
# store index of the value at the index which which is value
for i, value in enumerate(arr):
indexs[value-1] = i
for i in range(len(arr)):
# Check if the value is not at right index
if arr[i] != i+1:
# Get the right index of value
index_value = indexs[i]
# Swap the right value which is at Index and value at i
arr[index_value] = arr[i]
arr[i] = i+1
# Now index of pervious arr[i] is index_value
# so update it in indexs list
indexs[arr[index_value]-1] = index_value
# Now arr[i] or i+1 is at right index so update indexs list also
indexs[i] = i
swap += 1
return swap
| true |
58279633bc7ffe639f3f476bfc85b6b027927ddc | Python | elliottwarren/ClearFO_paper1 | /scripts/mod_obs_stats_plot.py | UTF-8 | 14,327 | 2.921875 | 3 | [] | no_license | """
Script to do all the stats to the FO output. Correlations first...
Created by Elliott Thur 27th Oct 2016
"""
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
from matplotlib.dates import DateFormatter
import numpy as np
import datetime as dt
from scipy.stats import spearmanr
import ellUtils as eu
from forward_operator import FOUtils as FO
from forward_operator import FOconstants as FOcon
def dateList_to_datetime(dayList):
""" Convert list of string dates into datetimes """
datetimeDays = []
for d in dayList:
datetimeDays += [dt.datetime(int(d[0:4]), int(d[4:6]), int(d[6:8]))]
return datetimeDays
def stats_dic(site_bsc, mbe_limit_max, mbe_limit_step):
"""
Define the almighty statistics dictionary.
:param site_bsc:
:param mbe_limit_max:
:param mbe_limit_step:
:return: statistics (dict)
statistics[site]['r'] = [...]
statistics[site]['MBE'] = {'0-500': ..., '500-1000': ...}
statistics[site]['time'] = [...]
"""
# calculate the height groups
mbe_height_limits = np.arange(0, mbe_limit_max + mbe_limit_step, mbe_limit_step)
# list of strings to match the ranges
height_groups_order = np.array([str(i) + '-' + str(i + mbe_limit_step) for i in mbe_height_limits[:-1]])
# define array to hold statistics
statistics = {}
# statistics[site]['r'] = [...]
# statistics[site]['MBE'] = {'0-500': ..., '500-1000': ...}
# statistics[site]['time'] = [...]
# define site based lists to store the correlation results in
for site in site_bsc.iterkeys():
site_id = site.split('_')[-1]
statistics[site_id] = {'r': [], 'p': [],
'MBE': {},
'time': []}
for hg in height_groups_order:
statistics[site_id]['MBE'][hg] = []
return statistics
def unique_pairs(obs_idx, diff):
"""
Find range that excludes duplicate occurances. Keeps the pair with the smallest height difference and removes
the rest.
:param obs_idx:
:param diff:
:return: unique_pairs_range
At this point, the two arrays are like:
obs_idx = [0, 0, 0, 1, 3, 5, .... 769, 769, 769]
mod_idx = [0, 1, 2, 3, 4, 4, .... 67, 68, 69 ]
By finding the unique pairs index array for obs_idx, the same array can be used
on the mod_idx, as they are already paired up and of equal lengths. E.g. from above
0-0, 0-1, ..., 3-4, 5-4 etc.
"""
# 1. remove start duplicates
# -------------------------------
# find start idx to remove duplicate pairs
duplicates = np.where(obs_idx == obs_idx[0])[0] # find duplicates
if len(duplicates) > 1:
lowest_diff = np.argmin(abs(diff[duplicates])) # find which has smallest difference
pairs_idx_start = duplicates[lowest_diff] # set start position for pairing at this point
else:
pairs_idx_start = 0
# 2. remove end duplicates
# -------------------------------
# find end idx to remove duplicate pairs
duplicates = np.where(obs_idx == obs_idx[-1])[0] # find duplicates
if len(duplicates) > 1:
lowest_diff = np.argmin(abs(diff[duplicates])) # find which has smallest difference
pairs_idx_end = duplicates[lowest_diff] # set start position for pairing at this point
else:
pairs_idx_end = len(obs_idx)
# create range in order to extract the unique pairs
unique_pairs_range = np.arange(pairs_idx_start, pairs_idx_end + 1)
return unique_pairs_range
def plot_correlations(savedir, model_type, statistics, corr_max_height):
"""
plot the correlation statistics (\beta_m, site vs \beta_o, site) and save.
:return: fig
"""
fig = plt.figure(figsize=(8, 3.5))
ax = plt.subplot2grid((1, 1), (0, 0))
for site, data in statistics.iteritems():
plt.plot_date(data['time'], data['r'], label=site, linewidth=1, fmt='-')
# plot reference line to show where profile lies
#ax.plot_date([statistics['RGS']['time'][24], statistics['RGS']['time'][24]], [-1, 1], color='black', ls='--', fmt='--')
#ax.plot_date([statistics['RGS']['time'][49], statistics['RGS']['time'][49]], [-1, 1], color='black', ls='--', fmt='--')
# prettify
# fig.suptitle(data['time'][0].strftime("%Y%m%d") + '-' + data['time'][-1].strftime("%Y%m%d"), fontsize=12)
ax.set_xlim([data['time'][0], data['time'][-1]])
ax.set_ylim([0.2, 1])
ax.set_xlabel('Time [DD/ HH:MM]')
ax.set_ylabel(r'$Spearman \/\/\rho \/\/correlation$')
ax.xaxis.set_major_formatter(DateFormatter('%d/ %H:%M'))
ax.legend(loc='best', fontsize=8)
plt.savefig(savedir +'correlations/' +
model_type + '_SpearCorrTs_' +
data['time'][0].strftime("%Y%m%d") + '-' + data['time'][-1].strftime("%Y%m%d")+'_'+
str(corr_max_height) + 'm.png') # filename
return fig
def plot_mbe(savedir, model_type, statistics, height_groups_order, site_bsc_colours):
"""
Plot the mean bias error (MBE) statistics
:param savedir:
:param model_type:
:param statistics:
:param height_groups_order:
:param site_bsc_colours:
:return: fig
"""
fig, ax = plt.subplots(4, 1, figsize=(10, 5))
# plot mbe data
for p, hg in zip(ax, height_groups_order):
for site, data in statistics.iteritems():
p.plot_date(date2num(data['time']),
data['MBE'][hg],
label=site, linewidth=1, color=site_bsc_colours[site], fmt='-')
# prettify - plot specific and done once the data is plotted
p.set_xlim([data['time'][0], data['time'][-1]])
eu.add_at(p, hg + ' m', loc=4)
#p.set_ylim([-1.8, 1.8])
#p.yaxis.set_ticks(np.arange(1.5, -2.5, -1))
# reference lines
for p in np.arange(len(ax)):
time = statistics['RGS']['time']
ax[p].plot_date(date2num([statistics['RGS']['time'][0], statistics['RGS']['time'][-1]]), [0, 0],
color='black', ls='--', fmt='-')
# turn off the x axis labels so only the bottom one plots them
if p < len(ax):
ax[p].set_xticklabels([])
# prettify - overall
ax[0].legend(fontsize=8, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
# setup a figure wide axis for labels
ax0 = eu.fig_majorAxis(fig)
ax0.set_ylabel('Difference (log10(model) - log10(obs))')
ax0.set_ylabel(r'$Difference \/\mathrm{(log_{10}(\beta_m) - log_{10}(\beta_o))}$')
ax0.set_xlabel('Time [HH:MM]')
ax[-1].xaxis.set_major_formatter(DateFormatter('%H:%M'))
plt.setp(ax[-1].get_xticklabels())
plt.tight_layout(h_pad=0.2) # need to modify the padding to improve the plot
fig.subplots_adjust(right=0.8)
plt.savefig(savedir + 'mbe/' +
model_type + '_meanBiasError_sameSites.png') # filename
return fig
def main():
# ==============================================================================
# Setup
# ==============================================================================
# which modelled data to read in
model_type = 'UKV'
res = FOcon.model_resolution[model_type]
# directories
maindir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/'
datadir = maindir + 'data/'
savedir = maindir + 'figures/' + model_type + '/highPmCase/'
# data
ceilDatadir = datadir + 'L1/'
modDatadir = datadir + model_type + '/'
rhDatadir = datadir + 'L1/'
aerDatadir = datadir + 'LAQN/'
# instruments and other settings
site_bsc = FOcon.site_bsc
site_rh = FOcon.site_rh
site_aer = FOcon.site_aer
site_bsc_colours = FOcon.site_bsc_colours
# day start and end of the MAIN DAYS, inclusively(forecast day + 1)
# dayStart = dt.datetime(2016, 05, 04)
# dayEnd = dt.datetime(2016, 05, 06)
daystrList = ['20160119']
days_iterate = dateList_to_datetime(daystrList)
# statistics to run
stats_corr = False
stats_mbe = True
# mbe ranges
mbe_limit_step = 500
mbe_limit_max = 2000
# correlation max height
corr_max_height = 2000
# calculate the height groups and matching strings
mbe_height_limits = np.arange(0, mbe_limit_max + mbe_limit_step, mbe_limit_step)
height_groups_order = np.array([str(i) + '-' + str(i + mbe_limit_step) for i in mbe_height_limits[:-1]])
# set up statistics dictionary
statistics = stats_dic(site_bsc, mbe_limit_max, mbe_limit_step)
# ==============================================================================
# Read data
# ==============================================================================
# 1. Read Ceilometer metadata
# ----------------------------
ceil_metadata = FO.read_ceil_metadata(datadir, loc_filename='CeilsCSVclearFO.csv')
# # datetime range to iterate over
# days_iterate = eu.date_range(dayStart, dayEnd, 1, 'days')
for day in days_iterate:
# 1. Read UKV forecast in
# -----------------------
# extract MURK aerosol and calculate RH for each of the sites in the ceil metadata
# (can be different locations to sites_bsc)
# reads all london model data, extracts site data, stores in single dictionary
mod_data = FO.mod_site_extract_calc(day, ceil_metadata, modDatadir, model_type, res, 910)
# 2. Read ceilometer backscatter
# --------------------------------
bsc_obs = FO.read_ceil_obs(day, site_bsc, ceilDatadir, mod_data)
# ==============================================================================
# Process modelled data
# ==============================================================================
# requires model data to be at ceilometer location!
for site, bsc_site_obs in bsc_obs.iteritems():
# short site id that matches the model id
site_id = site.split('_')[-1]
# get the nearest ceilometer height gate to each model level
# obs_idx = ALL nearest gate idx
# mod_idx = idx of the model height that each obs_idx are paired to
a = np.array([eu.nearest(bsc_site_obs['height'], i)for i in mod_data[site_id]['level_height']])
values = a[:, 0]
obs_idx = np.array(a[:, 1], dtype=int)
diff = a[:, 2]
mod_idx = np.arange(len(mod_data[site_id]['level_height'])) # mod_idx should be paired with obs_idx spots.
# Trim off the ends of obs_idx, as UKV and obs z0 and zmax are different, leading to the same gate matching multiple ukvs
# assumes no duplicates in the middle of the arrays, just at the end
# At this point, variables are like:
# obs_idx = [0, 0, 0, 1, 3, 5, .... 769, 769, 769]
# mod_idx = [0, 1, 2, 3, 4, 4, .... 67, 68, 69 ]
unique_pairs_range = unique_pairs(obs_idx, diff)
# ALL unique pairs
# Use these to plot correlations for all possible pairs, regardless of height
obs_unique_pairs = obs_idx[unique_pairs_range]
mod_unique_pairs = mod_idx[unique_pairs_range]
values_unique_pairs = values[unique_pairs_range]
diff_unique_pairs = diff[unique_pairs_range]
# ~~~~~~~~~~~~~~~~~~~~ #
# Remove pairs where obs is above 2000 m.
# hc = height cut
hc_unique_pairs_range = np.where(values_unique_pairs <= corr_max_height)[0]
# trim off unique pairs that are above the maximum height
obs_hc_unique_pairs = obs_unique_pairs[hc_unique_pairs_range]
mod_hc_unique_pairs = mod_unique_pairs[hc_unique_pairs_range]
pairs_hc_unique_values = values_unique_pairs[hc_unique_pairs_range]
pairs_hc_unique_diff = diff_unique_pairs[hc_unique_pairs_range]
# statistics
for t in np.arange(len(mod_data[site_id]['time'])):
# extract out all unique pairs below the upper height limit
obs_x = bsc_site_obs['backscatter'][t, obs_hc_unique_pairs]
mod_y = mod_data[site_id]['backscatter'][t, mod_hc_unique_pairs]
# store time
statistics[site_id]['time'] += [mod_data[site_id]['time'][t]]
if stats_corr == True:
# correlate and store
# if number of remaining pairs is too low, set r and p to nan
try:
r, p = spearmanr(np.log10(obs_x), np.log10(mod_y), nan_policy='omit')
except:
r = np.nan
p = np.nan
statistics[site_id]['r'] += [r]
statistics[site_id]['p'] += [p]
if stats_mbe == True:
# arrays of idx for each group
# list type to retain plotting order
height_groups_idx = [np.where((pairs_hc_unique_values >= i) &
(pairs_hc_unique_values < i + mbe_limit_step))[0]
for i in mbe_height_limits[:-1]]
# calculate mbe for each height group
for i in np.arange(len(height_groups_idx)):
# further divide the data based on the current height group (hg)
obs_x_hg = obs_x[height_groups_idx[i]]
mod_y_hg = mod_y[height_groups_idx[i]]
statistics[site_id]['MBE'][height_groups_order[i]] += \
[np.nanmean(np.log10(mod_y_hg) - np.log10(obs_x_hg))]
# ==============================================================================
# Plotting
# ==============================================================================
# After all day's stats are done
if stats_corr == True:
fig = plot_correlations(savedir, model_type, statistics, corr_max_height)
if stats_mbe == True:
fig = plot_mbe(savedir, model_type, statistics, height_groups_order, site_bsc_colours)
print 'END PROGRAM'
plt.close('all')
return
if __name__ == '__main__':
main() | true |
51ec7b207a29d9ce2d1466c1833f2e9a4113fccb | Python | wangyu190810/python-skill | /thread_queue/sample_thread_fetch_url_lock.py | UTF-8 | 1,329 | 3.078125 | 3 | [] | no_license | # -*-coding:utf-8-*-
import threading
import urllib2
class FetchUrls(threading.Thread):
"""
"""
def __init__(self,urls,output,lock):
threading.Thread.__init__(self)
self.urls=urls
self.output = output
#self.name = None
self.lock = lock
def run(self):
"""
线程中运行的任务,任务直接放在在这里就可以被执行
"""
while self.urls:
url = self.urls.pop()
req = urllib2.Request(url)
try:
d = urllib2.urlopen(req)
except urllib2.URLError as e:
print("URL %s failed:%s"%(url,e.reason))
self.lock.acquire()
print("lock acquire")
self.output.write(d.read())
#self.name = threading.currentThread()
print('write done by %s' % self.name)
print('URL %s fetched by %s'%(url,self.name))
self.lock.release()
def main():
lock = threading.Lock()
urls1 = ["https://baidu.com",'http://360.com']
urls2 = ['http://hao123.com','http://22too.com']
f = open("output.txt","w+")
th1 = FetchUrls(urls1,f,lock)
th2 = FetchUrls(urls2,f,lock)
th1.start()
th2.start()
th1.join()
th2.join()
f.close()
if __name__ == "__main__":
main()
| true |
421ef55a8e87f6fc48ceaa65c7fa012baabde5c6 | Python | RobotNo42/old_coed | /project/python_fullstack/day10/grep.py | UTF-8 | 230 | 2.515625 | 3 | [] | no_license | import os
def search():
while True:
dir_name = yield
g = os.walk(dir_name)
for i in g:
for x in i[-1]:
print("%s/%s" % (i[0], x))
g = search()
next(g)
g.send('d:/python') | true |
a12c25be22177d78e1bec0b264eb5281ea1653f0 | Python | Azhar1256/Parenthesis-Balancing-using-Stack | /Your program will determine whether the open brackets (the square brackets, curly braces and the parentheses) are closed in the correct order by using linked list based stack.py | UTF-8 | 2,138 | 3.671875 | 4 | [
"MIT"
] | permissive | Task02
class Node:
def __init__(self,value):
self.value=value
self.ref=None
class Stack:
head=None
s=0
def push(self,data):
self.s+=1
if self.head==None:
self.head=Node(data)
else:
n = Node(data)
n.ref=self.head
self.head=n
def peek(self):
return self.head.value
def pop(self):
if self.s==0:
print("Underflow")
return
self.s-=1
temp=self.head
self.head=self.head.ref
temp.value=None
temp.ref=None
def findout(self,word):
positionArray=[]
khulo = ["[","{","("]
bondho_koro = ["]","}",")"]
for i in range(len(word)):
if word[i] in khulo:
self.push(word[i])
positionArray+=[i+1]
elif word[i] in bondho_koro:
loc = bondho_koro.index(word[i])
if ((self.s > 0) and (khulo[loc] == self.peek())):
self.pop()
elif (self.s > 0) and (khulo[loc] != self.peek()):
print("This expression is NOT correct.")
print('Error at character #',positionArray[self.s-1],".'" ,self.peek(),"'-is not closed",sep='')
return
else:
print("This expression is NOT correct.")
print('Error at character #',i+1,".'" ,word[i],"'-is not opened",sep='')
return
if self.s == 0:
print("This expression is correct.")
return
else:
print("This expression is NOT correct.")
print('Error at character #',positionArray[self.s-1],".'" ,self.peek(),"'-is not closed",sep='')
return
st = Stack()
a=st.findout('1+2*(3/4)')
print()
st = Stack()
a=st.findout('1+2*[3*3+{4–5(6(7/8/9)+10)–11+(12*8)]+14')
print()
st = Stack()
a=st.findout('1+2*[3*3+{4–5(6(7/8/9)+10)}–11+(12*8)/{13+13}]+14')
print()
st = Stack()
a=st.findout('1+2]*[3*3+{4–5(6(7/8/9)+10)–11+(12*8)]+14')
| true |
c92c863a699d5a75df9d4e4e1a6f5b5c06575ed5 | Python | ArinMangal12/Python-Random-Number-Guess-game | /Game.py | UTF-8 | 740 | 3.9375 | 4 | [] | no_license | import random
# --> random number guess game with storing high score every time you break high score
randNo = random.randint(0, 100)
userGuess = None
guesses = 0
while userGuess != randNo:
userGuess = int(input("Enter your number: \n"))
guesses += 1
if userGuess == randNo:
print("Yes, You guessed right!!")
elif userGuess > randNo:
print("you guessed it wrong, Guess something small")
else:
print('you guessed it wrong, Guess something big')
print(f"all the guesses are {guesses}")
with open("hiscore.txt", "r") as f:
hiscore = int(f.read())
if (guesses < hiscore):
print("hurray you just broke the high score!!")
with open("hiscore.txt", "w") as f:
f.write(str(guesses))
| true |
465640f030ea732e937feb34f1e11c0dd110c2bf | Python | awick1/apcsp | /files/advScrabbleCalc.py | UTF-8 | 2,156 | 4.625 | 5 | [] | no_license | #values sets a point value to each letter of the alphabet
values = {"a": 1, "b": 3, "c": 3, "d": 2, "e": 1, "f": 4, "g": 2, "h": 4, "i": 1,
"j": 8,"k": 5, "l": 1, "m": 3, "n": 1, "o": 1, "p": 3, "q": 10, "r": 1,
"s": 1, "t": 1, "u": 1, "v": 4, "w": 4, "x": 8, "y": 4, "z": 10,
"A": 1, "B": 3, "C": 3, "D": 2, "E": 1, "F": 4, "G": 2, "H": 4, "I": 1,
"J": 8,"K": 5, "L": 1, "M": 3, "N": 1, "O": 1, "P": 3, "Q": 10, "R": 1,
"S": 1, "T": 1, "U": 1, "V": 4, "W": 4, "X": 8, "Y": 4, "Z": 10}
def score(): #score is a function that will run given one parameter (the word you want to find the score of)
word = str(raw_input("What is your scrabble word? ")) #this asks for the user to input their word and sets it as a string to the variable word
total = 0 #total initializes total to 0, this is important because it will reset the value everytime you call the function.
for letter in word: #for loop will find take each letter in your parameter (word) and get that value
total += values[letter] #this is inside the loop and will take the value of each letter and add it to the total
print total #this returns the total so that you can print it
another = str(raw_input("Do you have another word? Y or N: ")) #will allow you to continue running the program or stop the program
while another=="Y" or another=="y": #if user enters y then they will be allowed to calculate the total for another word
score()
else: #if a user enters n then it will tell them "thanks for playing" and stop the program
end()
def end():
print "Thank you for playing!"
score() #calling the function allows for the program to run on start | true |
d75703baa066e82dcd57f7f17e05590bdc7c7abb | Python | yongil1222/Python_Study | /PyGame/Ex1.Draw.py | UTF-8 | 940 | 3 | 3 | [] | no_license | import pygame
pygame.init()
BLACK = (0,0,0)
WHITE = (255,255,255)
BLUE = (0,0,255)
GREEN = (0,255,0)
RED = (255,0,0)
size = [400,300]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Game Title")
done = False
clock = pygame.time.Clock()
while not done:
clock.tick(10)
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
screen.fill(WHITE)
pygame.draw.polygon(screen, GREEN, [[30,150], [125,100], [220,150]], 5)
pygame.draw.polygon(screen, GREEN, [[30,150], [125,100], [220,150]], 0)
pygame.draw.lines(screen, RED, False, [[50,150], [50,250], [200,250], [200,150]], 5)
pygame.draw.rect(screen, BLACK, [75, 175, 75, 50], 5)
pygame.draw.rect(screen, BLUE, [75, 175, 75, 50], 0)
pygame.draw.line(screen, BLACK, [112,175], [112,225], 5)
pygame.draw.line(screen, BLACK, [75,200], [150,200], 5)
pygame.display.flip()
pygame.quit()
| true |
df38c3ac794e7edd46658c3f62cd1e4a8eaabbd1 | Python | XinchaoGou/MyLeetCode | /299. Bulls and Cows.py | UTF-8 | 503 | 2.96875 | 3 | [
"MIT"
] | permissive | class Solution:
def getHint(self, secret: str, guess: str) -> str:
res =""
cnt_A = 0
cnt_B = 0
array = [0] * 10
for i in range(len(secret)):
s = int(secret[i])
g = int(guess[i])
if s == g:
cnt_A += 1
else:
array[s] += 1
array[g] -= 1
cnt_B = len(secret) - cnt_A - sum(list(filter(lambda x: x > 0, array)))
return str(cnt_A)+"A" + str(cnt_B) +"B" | true |
ea6e2c997261137d85416efb606d6121159b9fca | Python | AnshulP10/Machine-Learning | /logisticRegression.py | UTF-8 | 1,750 | 2.78125 | 3 | [] | no_license | # Load libraries
import numpy as np
import pandas
from sklearn import model_selection
# Load dataset
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
# Split-out validation dataset
array=dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
# Initialise weights
numFeat=X_train.shape[1]
w=np.zeros((1, numFeat))
b=0
i=0
# Def
def sigmoid(num):
out=1/(1+np.exp(-num))
return out
def costFun(w, b, X, Y):
numEx=X.shape[0]
new=sigmoid(np.dot(w, X.T)+b)
cost=(-1/m)*(np.sum((-Y.T*np.log(new))+((1-Y.T)*(np.log(1-new)))))
return cost
def findGrad(w, b, X, Y):
numEx=X.shape[0]
new=sigmoid(np.dot(w, X.T)+b)
dw = (1/m)*(np.dot(X.T, (new-Y.T).T))
db = (1/m)*(np.sum(new-Y.T))
grads={'dw':dw, 'db':db}
return grads
def update(w, b, X, Y, lr):
grads=findGrad(w, b, X, Y)
dw=grads['dw']
db=grads['db']
w=w-lr*dw
b=b-lr*db
prop={'w':w, 'b':b, 'dw':dw, 'db':db}
return prop
def findWeight(w, b, X, Y, lr):
MOE=0.00001
grads=findGrad(w, b, X, Y)
while dw>MOE or db>MOE:
i=i+1
if i%100 == 0:
cost=costFun(w, b, X_train, Y_train)
print(cost)
prop=update(w, b, X, Y, lr)
w=prop['w']
b=prop['b']
dw=prop['dw']
db=prop['db']
finalWeights={'w':w, 'b':b}
return finalWeights
finalWeights=findWeight(w, b, X_train, Y_train, 0.01)
finW=finalWeights['w']
finB=finalWeights['b']
| true |
37476718745d56881560607f19dbb4adc74304df | Python | ravishankarramakrishnan/SDS_ML_PY_R | /Part 1 - Data Preprocessing/DataPreprocessing_Template.py | UTF-8 | 1,336 | 3.234375 | 3 | [] | no_license | # Data Preprocessing
# Importing the Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the Dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:,:-1].values
Y = dataset.iloc[:,3].values
np.set_printoptions(threshold = np.nan) # If you cant see full array list
# Taking care of missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN',strategy = 'mean',axis = 0)
imputer = imputer.fit(X[:,1:3])
X[:,1:3] = imputer.transform(X[:,1:3])
# Encoding Categorical Data
# Encoding Independent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:,0] = labelencoder_X.fit_transform(X[:,0])
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
# Encoding Dependent Variable
labelencoder_Y = LabelEncoder()
Y = labelencoder_Y.fit_transform(Y)
# Splitting the Dataset into Training set & Test Set
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
| true |
163908515a383418b3a93d5be38d297e0751081d | Python | ewjoachim/colorsnip | /colorsnip.py | UTF-8 | 2,613 | 2.8125 | 3 | [
"MIT"
] | permissive | """
Colorsnip is provided under the MIT License:
Copyright (c) 2018, Joachim Jablon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Usage: colorsnip [{filename,-}, ...]
Reads all provided files, syntax highlight them and copy that
to your MacOS clipboard
"""
import platform
import sys
import pygments
import pygments.lexers
import pygments.formatters
def main():
filename = sys.argv[1]
colorsnip(filename)
def copy(text, html, rtf):
if platform.system() == "Darwin":
copy_richxerox(text, html, rtf)
else:
copy_klembox(text, html)
def copy_richxerox(text, html, rtf):
import richxerox
richxerox.copy(text, html=html, rtf=rtf)
def copy_klembox(text, html):
import klembox
klembord.set_with_rich_text(text, html)
def colorsnip(filename):
if filename == "-":
contents = sys.stdin.read()
lexer = pygments.lexers.guess_lexer_for_filename(
filename, contents)
else:
with open(filename) as fh:
contents = fh.read()
lexer = pygments.lexers.get_lexer_for_filename(filename)
html_formatter = pygments.formatters.get_formatter_by_name(
"html",
style="monokai")
rtf_formatter = pygments.formatters.get_formatter_by_name(
"rtf",
style="monokai",
fontface="Menlo",
fontsize=60)
html = pygments.highlight(
code=contents,
lexer=lexer,
formatter=html_formatter)
rtf = pygments.highlight(
code=contents,
lexer=lexer,
formatter=rtf_formatter)
copy(text=contents, html=html, rtf=rtf)
if __name__ == '__main__':
main()
| true |
03693c54385ee9a8730adcc9e0f8a11b347d537a | Python | abhi55555/dsPrograms | /remove_char.py | UTF-8 | 271 | 4.09375 | 4 | [] | no_license |
def removeChar(string, character):
counts = string.count(character)
string = list(string)
while counts:
string.remove(character)
counts -= 1
string = ''.join(string)
print(string)
s = "I am a disco dancer"
removeChar(s, 'd')
| true |
0fd036a740dbf39dfcd21caac237d48512785044 | Python | RidaATariq/ITMD_413 | /Assignment-9/02_Panda_Dataframe/main.py | UTF-8 | 2,327 | 3.515625 | 4 | [] | no_license | import numpy as np
import pandas as pd
# 1. load hard-coded data into a dataframe
df = pd.DataFrame([
['Jan', 58, 42, 74, 22, 2.95],
['Feb', 61, 45, 78, 26, 3.02],
['Mar', 65, 48, 84, 25, 2.34],
['Apr', 67, 50, 92, 28, 1.02],
['May', 71, 53, 98, 35, 0.48],
['Jun', 75, 56, 107, 41, 0.11],
['Jul', 77, 58, 105, 44, 0.0],
['Aug', 77, 59, 102, 43, 0.03],
['Sep', 77, 57, 103, 40, 0.17],
['Oct', 73, 54, 96, 34, 0.81],
['Nov', 64, 48, 84, 30, 1.7],
['Dec', 58, 42, 73, 21, 2.56]],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
columns=['month', 'avg_high', 'avg_low', 'record_high', 'record_low', 'avg_precipitation'])
print(df)
# 2. Read text file into a dataframe
filename = 'Fremont_weather.txt'
df = pd.read_csv(filename)
print(df)
# 3. print first 5 or 3 rows of df
print(df.head())
print(df.tail(3))
# 4. get data types index, columns, values
print(df.dtypes)
print(df.index)
print(df.columns)
print(df.values)
# 5 statistical summary of each column
print(df.describe())
# 6 sort records by any column
print(df.sort_values('record_high', ascending=False))
# 7 slicing records
print(df.avg_low) # index with single column
print(df['avg_low'])
print(df['avg_low'])
print(df[2:4]) # index with single column, rows 2 to 3
print(df[['avg_low','avg_high']])
print(df.loc[:,['avg_low','avg_high']]) # multiple columns
print(df.loc[9,['avg_precipitation']])
print(df.iloc[3:5,[0,3]]) # index location can receive range or list of indices
# 8. Filtering
print(df[df.avg_precipitation > 1.0]) # filter on column values
print(df[df['month'].isin(['Jun','Jul','Aug'])])
# 9. Assignment -- very similar to slicing
df.loc[9,['avg_precipitation']] = 101.3
print(df.iloc[9:11])
df.loc[9,['avg_precipitation']] = np.nan
print(df.iloc[9:11])
df.loc[:,'avg_low'] = np.array([5] * len(df))
print(df.head())
df['avg_day'] = (df.avg_low + df.avg_high) / 2
print(df.head())
# 10. renaming columns
# comment everything except for section 2
df.rename(columns={'avg_precipitation':'avg_rain'}, inplace=True) # rename 1 column
print(df.head())
df.columns = ['month','av_hi','av_lo','rec_hi','rec_lo','av_rain']
print(df.head())
# 11. iterate a df
for index, row in df.iterrows():
print(index, row['month'], row['avg_high'])
# 12. write to csv file
df.to_csv('foo.csv') | true |
aea2517c68ca2f6e49ccd8ff54547a0a9900fc5e | Python | cry999/AtCoder | /beginner/101/B.py | UTF-8 | 248 | 3.671875 | 4 | [] | no_license | def digit_sums(N: int) -> bool:
s = 0
temp = N
while temp > 0:
s += temp % 10
temp //= 10
return N % s == 0
if __name__ == "__main__":
N = int(input())
yes = digit_sums(N)
print('Yes' if yes else 'No')
| true |
40a5063cd6f6f8a8e45c60bb6ae67b3b75205b61 | Python | sydbermas/AutoMeasure | /Frame/frameObject.py | UTF-8 | 5,531 | 2.828125 | 3 | [] | no_license | import cv2
import numpy as np
class Frame_Object:
# ------------------------------
# User Instructions
# ------------------------------
# ------------------------------
# User Variables
# ------------------------------
# blur (must be positive and odd)
gaussian_blur = 15
# threshold
threshold = 15
# dilation
dilation_value = 6
dilation_iterations = 2
dilation_kernel = np.ones((dilation_value, dilation_value), np.uint8)
# contour size
contour_min_area = 10 # percent of frame area
contour_max_area = 80 # percent of frame area
# target select
targets_max = 4 # max targets returned
target_on_contour = True # else use box size
target_return_box = False # True = return (x,y,bx,by,bw,bh), else check target_return_size
target_return_size = False # True = return (x,y,percent_frame_size), else just (x,y)
# display contour
contour_draw = True
contour_line = 1 # border width
contour_point = 4 # centroid point radius
contour_pline = -1 # centroid point line width
contour_color = (0, 255, 255) # BGR color
# display contour box
contour_box_draw = True
contour_box_line = 1 # border width
contour_box_point = 4 # centroid point radius
contour_box_pline = -1 # centroid point line width
contour_box_color = (0, 255, 0) # BGR color
# display targets
targets_draw = True
targets_point = 4 # centroid radius
targets_pline = -1 # border width
targets_color = (0, 0, 255) # BGR color
# ------------------------------
# System Variables
# ------------------------------
last_frame = None
# ------------------------------
# Functions
# ------------------------------
def targets(self, frame):
# frame dimensions
width, height, depth = np.shape(frame)
area = width * height
# grayscale
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# blur
frame2 = cv2.GaussianBlur(frame2, (self.gaussian_blur, self.gaussian_blur), 0)
# initialize compare frame
if self.last_frame is None:
self.last_frame = frame2
return []
# delta
frame3 = cv2.absdiff(self.last_frame, frame2)
# threshold
frame3 = cv2.threshold(frame3, self.threshold, 255, 0)[1]
# dilation
frame3 = cv2.dilate(frame3, self.dilation_kernel, iterations=self.dilation_iterations)
# get contours
contours, hierarchy = cv2.findContours(frame3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# targets
targets = []
for c in contours:
# basic contour data
ca = cv2.contourArea(c)
bx, by, bw, bh = cv2.boundingRect(c)
ba = bw * bh
# target on contour
if self.target_on_contour:
p = 100 * ca / area
if (p >= self.contour_min_area) and (p <= self.contour_max_area):
M = cv2.moments(c) # ;print( M )
tx = int(M['m10'] / M['m00'])
ty = int(M['m01'] / M['m00'])
targets.append((p, tx, ty, bx, by, bw, bh, c))
# target on contour box
else:
p = 100 * ba / area
if (p >= self.contour_min_area) and (p <= self.contour_max_area):
tx = bx + int(bw / 2)
ty = by + int(bh / 2)
targets.append((p, tx, ty, bx, by, bw, bh, c))
# select targets
targets.sort()
targets.reverse()
targets = targets[:self.targets_max]
# add contours to frame
if self.contour_draw:
for size, x, y, bx, by, bw, bh, c in targets:
cv2.drawContours(frame, [c], 0, self.contour_color, self.contour_line)
cv2.circle(frame, (x, y), self.contour_point, self.contour_color, self.contour_pline)
# add contour boxes to frame
if self.contour_box_draw:
for size, x, y, bx, by, bw, bh, c in targets:
cv2.rectangle(frame, (bx, by), (bx + bw, by + bh), self.contour_box_color, self.contour_box_line)
cv2.circle(frame, (bx + int(bw / 2), by + int(bh / 2)), self.contour_box_point, self.contour_box_color,
self.contour_box_pline)
# add targets to frame
if self.targets_draw:
for size, x, y, bx, by, bw, bh, c in targets:
cv2.circle(frame, (x, y), self.targets_point, self.targets_color, self.targets_pline)
# # reset last frame
# self.last_frame = frame2
# return target x,y
if self.target_return_box:
return [(x, y, bx, by, bw, bh) for (size, x, y, bx, by, bw, bh, c) in targets]
elif self.target_return_size:
return [(x, y, size) for (size, x, y, bx, by, bw, bh, c) in targets]
else:
return [(x, y) for (size, x, y, bx, by, bw, bh, c) in targets]
def frame_add_crosshairs(self, frame, x, y, r=20, lc=(0, 0, 255), cc=(0, 0, 255), lw=1, cw=1):
x = int(round(x, 0))
y = int(round(y, 0))
r = int(round(r, 0))
cv2.line(frame, (x, y - r * 2), (x, y + r * 2), lc, lw)
cv2.line(frame, (x - r * 2, y), (x + r * 2, y), lc, lw)
cv2.circle(frame, (x, y), r, cc, cw)
# ------------------------------
# Frame Angles and Distance
# ------------------------------
| true |
b1124e5ca8e0a68929a5afadca9adab02357e109 | Python | michael-grotelueschen/amicus | /code/model.py | UTF-8 | 4,326 | 2.75 | 3 | [] | no_license | import pandas as pd
import numpy as np
import cPickle
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import accuracy_score, \
precision_score, \
recall_score, \
classification_report, \
confusion_matrix
def get_train_set_and_test_set_dataframes():
"""Return a train set dataframe and a test set dataframe
from the feature matrix.
"""
df_transcripts = pd.read_csv('feature_matrix.csv')
dockets = df_transcripts['docket'].tolist()
ok_cases_train_set = []
with open('ok_cases_train_set') as f:
for line in f:
case = line.replace('\n', '')
ok_cases_train_set.append(case)
ok_cases_test_set = []
with open('ok_cases_test_set') as f:
for line in f:
case = line.replace('\n', '')
ok_cases_test_set.append(case)
train_set_mask = [True if d in ok_cases_train_set else False for d in dockets]
test_set_mask = [True if d in ok_cases_test_set else False for d in dockets]
df_train_set = df_transcripts[train_set_mask]
df_test_set = df_transcripts[test_set_mask]
return df_train_set, df_test_set
def exlore_models():
"""This is a placeholder function explore modeling."""
df_train_set, df_test_set = get_train_set_and_test_set_dataframes()
y_true_train = df_train_set['decision'].values
x_train = df_train_set.drop(['docket', 'decision'], axis=1).values
y_true_test = df_test_set['decision'].values
x_test = df_test_set.drop(['docket', 'decision'], axis=1).values
#lr_model = LogisticRegression()
#lr_model.fit(x_train, y_true_train)
#probs = lr_model.predict_proba(x_test)[:, 1]
#threshold = 0.7
#y_pred = probs > threshold
#print classification_report(y_true_test, y_pred)
#print accuracy_score(y_true_test, y_pred)
#print confusion_matrix(y_true_test, y_pred)
#scores = cross_val_score(lr_model, x, y_true, scoring='f1', cv=10)
#print scores
#print np.mean(scores)
#rf_model = RandomForestClassifier()
#rf_model.fit(x_train, y_true_train)
#probs = rf_model.predict_proba(x_test)[:, 1]
#threshold = 0.5
#y_pred = probs > threshold
#print classification_report(y_true_test, y_pred)
#print accuracy_score(y_true_test, y_pred)
#print confusion_matrix(y_true_test, y_pred)
#scores = cross_val_score(rf_model, x, y_true, scoring='f1', cv=10)
#print scores
#print np.mean(scores)
def get_predictions_and_actual_outcomes():
"""Get predictions for a particular model and the actual outcomes."""
df_train_set, df_test_set = get_train_set_and_test_set_dataframes()
y_true_train = df_train_set['decision'].values
x_train = df_train_set.drop(['docket', 'decision'], axis=1).values
y_true_test = df_test_set['decision'].values
x_test = df_test_set.drop(['docket', 'decision'], axis=1).values
lr_model = LogisticRegression()
lr_model.fit(x_train, y_true_train)
probs = lr_model.predict_proba(x_test)[:, 1]
threshold = 0.65
y_pred = probs > threshold
predictions_and_actual_outcomes = []
for docket, prediction in zip(df_test_set['docket'].tolist(), y_pred):
if prediction == True:
predicted_winning_side = 'petitioner'
else:
predicted_winning_side = 'respondent'
actual_outcome = df_test_set[df_test_set['docket'] == docket]['decision'].values[0]
if actual_outcome == True:
actual_winning_side = 'petitioner'
else:
actual_winning_side = 'respondent'
print actual_winning_side
predictions_and_actual_outcomes.append(docket + \
':' + predicted_winning_side + \
':' + actual_winning_side)
return '\n'.join(predictions_and_actual_outcomes)
if __name__ == "__main__":
predictions_and_actual_outcomes = get_predictions_and_actual_outcomes()
with open('predictions_and_actual_outcomes', 'w') as f:
f.write(predictions_and_actual_outcomes) | true |
0bc206d7170f32f0ad1ebd4ec8da3dd073725692 | Python | LTUC/amman-python-401d4 | /class-02/demo/factorial_recursion/factorial_recursion/factorial.py | UTF-8 | 246 | 3.859375 | 4 | [] | no_license | def fact(n):
if n==1:
return 1
return n * fact(n-1)
# Alternative solution using while loop
# def fact(n):
# result = 1
# temp = n
# while temp>1:
# result *= temp
# temp -= 1
# return result
| true |
c55f940692a75341e68f233e4b64d68662070c3b | Python | drumminhands/drumminhands_projector | /drumminhands_projector.py | UTF-8 | 6,875 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# created by chris@drumminhands.com
# see instructions at http://www.drumminhands.com/2016/09/02/raspberry-pi-photo-booth-projector/
# and the photo booth instruction at http://www.drumminhands.com/2014/06/15/raspberry-pi-photo-booth/
# slide show based on https://github.com/bradmontgomery/pgSlideShow
import argparse
import os
import stat
import sys
import time
import pygame
from pygame.locals import QUIT, KEYDOWN, K_ESCAPE
import config_projector # this is the projector config file config_projector.py
# global variables which will change during the program (do not change here)
transform_x = config_projector.monitor_w # how many pixels to scale the image width
transfrom_y = config_projector.monitor_h # how many pixels to scale the image height
offset_x = 0 # how far right of the top-left pixel to display the image
offset_y = 0 # how far down from the top-left pixel to display the image
current = 0 # which file in the list to show
num_files = 0 # how many total pics to show
file_list = [] # a list of all images being shown
def mount_pics():
try:
# mount the drive
cmd = "sudo mount -t cifs "+config_projector.server_mount_path+" "+config_projector.client_mount_path+" -o user="+config_projector.user_name+",pass="+config_projector.user_password
os.system(cmd)
except Exception, e:
tb = sys.exc_info()[2]
traceback.print_exception(e.__class__, e, tb)
print "Error mounting shared drive"
def walktree(top, callback):
"""recursively descend the directory tree rooted at top, calling the
callback function for each regular file. Taken from the module-stat
example at: http://docs.python.org/lib/module-stat.html
"""
for f in sorted(os.listdir(top)):
pathname = os.path.join(top, f)
mode = os.stat(pathname)[stat.ST_MODE]
if stat.S_ISREG(mode):
# It's a file, call the callback function
callback(pathname)
else:
# Unknown file type, print a message
print 'Skipping %s' % pathname
def addtolist(file, extensions=['.jpg','.jpeg']):
"""Add a file to a global list of image files."""
global file_list
filename, ext = os.path.splitext(file)
e = ext.lower()
# Only add common image types to the list.
if e in extensions:
if (filename[-3:]!='-sm'): # don't use the thumbnails
print 'Adding to list: ', file
file_list.append(file)
if config_projector.use_prime:
if ((len(file_list) % (config_projector.prime_freq +1)) == 0):
# show prime slide at regular intervals
file_list.append(config_projector.prime_slide) # start with the prime slide
else:
print 'Skipping: ', file, ' (thumbnail image)'
else:
print 'Skipping: ', file, ' (NOT a supported image)'
def input(events):
"""A function to handle keyboard/mouse/device input events. """
for event in events: # Hit the ESC key to quit the slideshow.
if (event.type == QUIT or
(event.type == KEYDOWN and event.key == K_ESCAPE)):
pygame.quit()
def set_demensions(img_w, img_h):
# set variables to properly display the image on screen
# connect to global vars
global transform_y, transform_x, offset_y, offset_x
# based on output screen resolution, calculate how to display
ratio_h = (config_projector.monitor_w * img_h) / img_w
if (ratio_h < config_projector.monitor_h):
#Use horizontal black bars
transform_y = ratio_h
transform_x = config_projector.monitor_w
offset_y = (config_projector.monitor_h - ratio_h) / 2
offset_x = 0
elif (ratio_h > config_projector.monitor_h):
#Use vertical black bars
transform_x = (config_projector.monitor_h * img_w) / img_h
transform_y = config_projector.monitor_h
offset_x = (config_projector.monitor_w - transform_x) / 2
offset_y = 0
else:
#No need for black bars as photo ratio equals screen ratio
transform_x = config_projector.monitor_w
transform_y = config_projector.monitor_h
offset_y = offset_x = 0
# uncomment these lines to troubleshoot screen ratios
#print str(img_w) + " x " + str(img_h)
#print "ratio_h: "+ str(ratio_h)
#print "transform_x: "+ str(transform_x)
#print "transform_y: "+ str(transform_y)
#print "offset_y: "+ str(offset_y)
#print "offset_x: "+ str(offset_x)
def find_pics():
global current, num_files, file_list
file_list = [] # clear list
if config_projector.use_prime:
file_list.append(config_projector.prime_slide) # start with the prime slide
walktree(config_projector.pics_folder, addtolist) # this may take a while...
if len(file_list) <= 1: # note one is the prime slide, if used
print "Sorry. No images found. Exiting."
sys.exit(1)
current = 0
num_files = len(file_list)
def main():
global file_list, current, num_files
pygame.init()
find_pics() # check for available images to display
# Test for image support
if not pygame.image.get_extended():
print "Your Pygame isn't built with extended image support."
print "It's likely this isn't going to work."
sys.exit(1)
modes = pygame.display.list_modes()
pygame.display.set_mode(max(modes))
screen = pygame.display.get_surface()
pygame.display.set_caption(config_projector.title)
pygame.display.toggle_fullscreen()
pygame.mouse.set_visible(False) #hide the mouse cursor
while(True):
try:
img = pygame.image.load(file_list[current])
img = img.convert()
# clear the screen
screen.fill( (0,0,0) )
# set pixel dimensions based on image
set_demensions(img.get_width(), img.get_height())
# rescale the image to fit the current display
img = pygame.transform.scale(img, (transform_x,transfrom_y))
screen.blit(img,(offset_x,offset_y))
pygame.display.flip()
input(pygame.event.get())
time.sleep(config_projector.waittime)
except pygame.error as err:
print "Failed to display %s: %s" % (file_list[current], err)
# When we get to the end, re-start at the beginning and check for new files
current = current + 1;
if (current == num_files):
print '----------------------- Restart slideshow -----------------------'
find_pics() # check for available images to display
print 'Projector running'
print 'Waiting a bit to make sure the photo booth has time to boot.'
time.sleep(300) # wait a bit until the other RPi is connected
mount_pics() # mount the drive on startup of program
# run the main program
main()
| true |
755145ae900e435133a603f5e592e596f24cccd0 | Python | aligerami/assignment2 | /chapter11-5.py | UTF-8 | 1,245 | 3.640625 | 4 | [] | no_license | def add_matrix(a, b):
result=len(a)*[0]
result= [[0 for x in range(len(a))] for y in range(len(a[1]))]
for i in range (0,len(a)):
for j in range (0,len(a[i])):
result[i][j]= a[i][j]+b[i][j]
return result
m1= [[1.0, 2.0, 3.0],
[4.0 ,5.0, 6.0],
[11.0, 8.0 ,11.0]]
m2= [[0.0 ,2.0, 4.0],
[ 1.0, 4.5, 2.2],
[1.1 ,4.3,5.2]]
print(add_matrix(m1,m2))
def main():
t1=[0]*3
list= input("please enter the first row of first matrix like this 1,2,3 :").split(",")
t1[0]=[float(i) for i in list]
list=input("please enter the second row of first matrix like this 1,2,3 :").split(",")
t1[1]=[float(i) for i in list]
list=input("please enter the third row of first matrix like this 1,2,3 :").split(',')
t1[2]=[float(i) for i in list]
t2=[0]*3
list= input("please enter the first row of second matrix like this 1,2,3 :").split(",")
t2[0]=[float(i) for i in list]
list=input("please enter the second row of second matrix like this 1,2,3 :").split(",")
t2[1]=[float(i) for i in list]
list=input("please enter the third row of second matrix like this 1,2,3 :").split(',')
t2[2]=[float(i) for i in list]
print(add_matrix(t1,t2))
main() | true |
681fb2a6a2281cbaf09e29cbd2472065a9630606 | Python | markljwong/chatbot | /src/python/doc_classification.py | UTF-8 | 558 | 3.015625 | 3 | [] | no_license | # Doesn't work. Kept for reference
import nltk
from nltk.corpus import movie_reviews
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words.keys())[:2000]
def document_features(document):
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
featuresets = [(document_features(d), c) for (d,c) in word_features]
classifier = nltk.NaiveBayesClassifier.train(featuresets)
classifier.classify(document_features(d))
classifier.show_most_informative_features(5)
| true |
1ffeaa3c45e98105b323788f81779c6336577011 | Python | dcg/LispInterpreter | /test/LISP/TestPrinter.py | UTF-8 | 1,523 | 3 | 3 | [] | no_license | '''
Created on 30.03.2012
@author: dominik
'''
import unittest
from LISP.LispClasses import *
from LISP.Printer import printLisp
class Test(unittest.TestCase):
def testPrint(self):
self.assertTrue(printLisp(new(LispInteger,5)) == "5",printLisp(new(LispInteger,5)))
self.assertTrue(printLisp(new(LispSymbol,"asd")) == "asd",printLisp(new(LispSymbol,"asd")))
self.assertTrue(printLisp(new(LispNull)) == "()",printLisp(new(LispNull)))
self.assertTrue(printLisp(new(LispTrue)) == "TRUE",printLisp(new(LispTrue)))
self.assertTrue(printLisp(new(LispFalse)) == "FALSE",printLisp(new(LispFalse)))
liste = new(LispCons,new(LispInteger,5))
self.assertTrue(printLisp(liste)=="(5)",printLisp(liste))
liste = new(LispCons,new(LispInteger,5),new(LispCons,new(LispInteger,6)))
self.assertTrue(printLisp(liste)=="(5 6)",printLisp(liste))
liste = new(LispCons,new(LispInteger,5),new(LispCons,new(LispTrue)))
self.assertTrue(printLisp(liste)=="(5 TRUE)",printLisp(liste))
l5 = new(LispInteger,5)
l6 = new(LispInteger,6)
liste = new(LispCons,l5,new(LispCons,l6))
subListe = new(LispCons,l5,new(LispCons,l6))
liste.rest.rest=new(LispCons,subListe,new(LispCons,l5))
self.assertTrue(printLisp(liste)=="(5 6 (5 6) 5)",printLisp(liste))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testPrint']
unittest.main() | true |
ddb29a9508a460f003398978b76643ec904154e2 | Python | TheSagab/Foundation-of-Programming-1 | /Assignment3.py | UTF-8 | 9,293 | 3.203125 | 3 | [] | no_license | # ********************* Program Buku Penilaian *********************
# ********************* Nama : Anindito Bhagawanta *********************
# ********************* NPM : 1606879230 *********************
# ********************* Kelas : DDP 1 - B *********************
# ********************* Asisten : Arie Joseph *********************
Database = {}
def ASCIIArt():
print(" _______ _ _____ _ ")
print("|__ __| | / ____| | | ")
print(" | | | |__ ___| (___ __ _ __ _ __ _| |__ ")
print(" | | | '_ \ / _ \\\\___ \ / _` |/ _` |/ _` | '_ \ ")
print(" | | | | | | __/____) | (_| | (_| | (_| | |_) |")
print(" |_| |_| |_|\___|_____/ \__,_|\__, |\__,_|_.__/ ")
print(" __/ | ")
print(" |___/ ")
def Add(Mahasiswa, Kelas): # Menambah mahasiswa ke Database
if Mahasiswa in Database: # Jika sudah terdapat di Database
print("Mahasiswa {} sudah ada di dalam buku.".format(Mahasiswa))
else:
DafKelas = ["A", "B", "C", "D"]
if Kelas.upper() not in DafKelas: # Daftar kelas yang valid adalah A, B, C dan D
print("Kelas yang valid adalah A, B, C dan D!")
else:
# Memasukkan mahasiswa ke Database dan assign data kelas,
Database[Mahasiswa] = [Kelas.upper(), [0, 0], [0, 0, 0], [0, 0]]
# quiz, tugas, dan ujian ke Database (default nilai 0)
print(Mahasiswa + " berhasil ditambahkan kedalam buku.")
def Update(Mahasiswa, Berkas, Nilai): # Mengupdate berkas nilai mahasiswa
Nilainya = Nilai.split(",")
if Berkas == "Quiz": # Mengupdate sesuai berkas yang dinilai
# Index 1, 2, 3 masing-masing Quiz, Tugas dan Ujian
for i in range(len(Nilainya)):
# Jika nilai kosong (""), maka dilewati dan dianggap tetap 0
if Nilainya[i] == "":
continue # Melakukan iterasi sebanyak i kali, dengan i = banyak nilai yang diinput
else: # Setiap iterasi menggantikan nilai pada index tersebut dengan nilai
Database[Mahasiswa][1][i] = int(Nilainya[i]) # yang diinginkan
elif Berkas == "Tugas":
for i in range(len(Nilainya)):
if Nilainya[i] == "":
continue
else:
Database[Mahasiswa][2][i] = int(Nilainya[i])
elif Berkas == "Ujian":
for i in range(len(Nilainya)):
if Nilainya[i] == "":
continue
else:
Database[Mahasiswa][3][i] = int(Nilainya[i])
print("Menambah nilai {} pada mahasiswa {}.".format(Berkas, Mahasiswa))
def Average(Kelas, Berkas, Ke): # Menghitung Rata-rata berkas tertentu dari suatu kelas
Jumlah = 0 # Menginisiasi Jumlah dan Count di 0
count = 0 # Jumlah bertambah pada saat mengakses Database dan mencari nilai
for i in Database: # di kelas dan berkas tertentu
# count bertambah ketika berhasil menambahkan Jumlah, dan count
for j in range(len(Database)):
if Database[i][0] == Kelas: # menunjukkan jumlah mahasiswa di kelas tersebut
if Berkas == "Quiz": # RataRata dihitung dengan hasil bagi Jumlah dan count
# i adalah Mahasiswa, dan j adalah banyak Mahasiswa di
# Database
Jumlah += Database[i][1][int(Ke) - 1]
count += 1 # round() digunakan untuk membulatkan hingga 2 angka dibelakang koma
elif Berkas == "Tugas":
Jumlah += Database[i][2][int(Ke) - 1]
count += 1
elif Berkas == "Ujian":
Jumlah += Database[i][3][int(Ke) - 1]
count += 1
RataRata = CekInt(round((Jumlah / count), 2))
print("Rata-rata nilai {} {} pada kelas {} adalah {}.".format(Berkas,
Ke, Kelas, str(RataRata).replace(".", ",")))
def Summary(Mahasiswa): # Mencetak rekap nilai suatu mahasiswa
# Menghitung rata-rata nilai quiz, tugas dan ujian dan nilai akhirnya
AverageQuiz = CekInt(round((sum(Database[Mahasiswa][1]) / 2), 2))
# dan kemudian mencetaknya
AverageTugas = CekInt(round((sum(Database[Mahasiswa][2]) / 3), 2))
AverageUjian = CekInt(round((sum(Database[Mahasiswa][3]) / 2), 2))
NilaiAkhir = CekInt(
round((0.2 * AverageTugas + 0.3 * AverageQuiz + 0.5 * AverageUjian), 2))
print("Nama : " + Mahasiswa)
print("Kelas : " + Database[Mahasiswa][0])
# .replace(".", ",") digunakan untuk mengubah titik menjadi koma
print("Quiz : {}".format(str(AverageQuiz).replace(".", ",")))
print("Tugas : {}".format(str(AverageTugas).replace(".", ",")))
print("Ujian : {}".format(str(AverageUjian).replace(".", ",")))
print("Nilai Akhir : {}".format(str(NilaiAkhir).replace(".", ",")))
def Search(Berkas, Ke, Nilai): # Mencari berkas nilai mahasiswa dengan nilai tertentu
# Nilainya[0] adalah batas bawah dan Nilainya[1] adalah batas atas
Nilainya = Nilai.split("-")
# Jika Nilainya[0] <= Nilai <= Nilainya[1] maka akan mencetak mahasiswa
# yang memiliki nilai tersebut pada berkas tertentu
for i in Database:
if Berkas == "Quiz": # i adalah Mahasiswa
if Database[i][1][int(Ke) - 1] >= int(Nilainya[0]) and Database[i][1][int(Ke) - 1] <= int(Nilainya[1]):
print(i)
elif Berkas == "Tugas":
if Database[i][2][int(Ke) - 1] >= int(Nilainya[0]) and Database[i][1][int(Ke) - 1] <= int(Nilainya[1]):
print(i)
elif Berkas == "Ujian":
if Database[i][3][int(Ke) - 1] >= int(Nilainya[0]) and Database[i][1][int(Ke) - 1] <= int(Nilainya[1]):
print(i)
def CekInt(x): # Mengecek apakah bilangan bulat
if x.is_integer(): # Jika tidak ada angka dibelakang koma, maka
return int(x) # mengembalikan bilangan bulat
else:
return x
def Main(): # Penggunaan .lower().captialize dan .upper() agar tidak case-sensitive
while True: # Program berjalan sampai tak hingga
try:
# Cek apakah input berkas sesuai
BerkasValid = ["Quiz", "Tugas", "Ujian"]
# Memisahkan menjadi list [Command, Parameter Command]
masukkan = input().split()
if masukkan[0].upper() == "ADD":
# Memisahkan menjadi list [Mahasiswa, Kelas]
Parameter = masukkan[1].split(";")
Add(Parameter[0].lower().capitalize(),
Parameter[1]) # Mahasiswa, Kelas
elif masukkan[0].upper() == "UPDATE":
# Memisahkan menjadi list [Mahasiswa, Berkas, Nilai (masih
# belum terpisah koma)]
Parameter = masukkan[1].split(";")
if Parameter[1] not in BerkasValid:
print("Input berkas tidak valid!")
else:
Update(Parameter[0].lower().capitalize(), Parameter[
1], Parameter[2]) # Mahasiswa, Berkas, Nilai
elif masukkan[0].upper() == "AVERAGE":
# Memisahkan menjadi list [Kelas, Berkas, Berkas keberapa]
Parameter = masukkan[1].split(";")
if Parameter[1] not in BerkasValid:
print("Input berkas tidak valid!")
else:
# Kelas, Berkas, Ke
Average(Parameter[0], Parameter[1], Parameter[2])
elif masukkan[0].upper() == "SUMMARY":
Summary(masukkan[1].lower().capitalize()) # Mahasiswa
elif masukkan[0].upper() == "SEARCH":
# Memisahkan menjadi list [Berkas, Berkas keberapa, Nilai
# (masih belum terpisah strip)]
Parameter = masukkan[1].split(";")
if Parameter[0] not in BerkasValid:
print("Input berkas tidak valid!")
else:
# Berkas, Ke, Nilai
Search(Parameter[0], Parameter[1], Parameter[2])
else:
print("Input tidak valid!")
except IndexError: # Jika memasukkan nilai diluar jangkauan
print("Input tidak valid / diluar jangkauan!")
except KeyError: # Jika memanggil suatu perintah tetapi belum ada mahasiswa tersebut
print("Mahasiswa belum terdaftar di buku!")
except EOFError: # Jika mencapai End-of-file
break
except ValueError: # Jika nilai terdapat huruf
print("Nilai mengandung karakter non angka!")
except KeyboardInterrupt: # Jika menekan Ctrl-C
print("Ctrl-C ditekan, menghentikan program.")
break
# print(Database) # Untuk debugging
ASCIIArt()
Main() # Eksekusi Program
| true |
fdc94432476fa6394b2d61810a48a6e26eac441b | Python | JakeGads/Python-tests | /Prime(F) | UTF-8 | 427 | 4.21875 | 4 | [] | no_license | #!/usr/bin/env python
prime_numbers = 0
def is_prime_number(x):
if x >= 2:
for y in range(2, x):
if not (x % y):
return False
else:
return False
return True
for i in range(int(raw_input("How many numbers you wish to check: "))):
if is_prime_number(i):
prime_numbers += 1
print
i
print
"We found " + str(prime_numbers) + " prime numbers." | true |
d2fe049361a38ad19cc3bec5d52fa967ed7727d2 | Python | hllj/drfi-webserver | /config/config_loader.py | UTF-8 | 373 | 2.78125 | 3 | [] | no_license | import yaml
class ConfigLoader:
def __init__(self, path):
self.path = path
def load(self, extension="yaml"):
if extension == "yaml":
return self.load_yaml()
else:
raise NotImplementedError()
def load_yaml(self):
artifacts = yaml.load(open(self.path, "r"), Loader=yaml.Loader)
return artifacts
| true |
871dd99e3971fb87d25cc60df94fe3f527ab1848 | Python | Ayushchauhan009/Spiral-Star-using-python | /star.py | UTF-8 | 112 | 3.578125 | 4 | [] | no_license | import turtle
n=60
pen=turtle.Turtle()
for i in range(n):
pen.forward(i*12)
pen.right(144)
turtle.done() | true |
5010c64902b3512bdca1c9dc13d81c1d8435f8b2 | Python | RomanHal/python | /lab1/zad2.py | UTF-8 | 158 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env python3
print("Podaj imie nazwisko i rok urodzenia")
imie,nazwisko,rok_urodzenia = input().split(',')
print (imie, nazwisko, rok_urodzenia)
| true |
e4d8552f3926fd660db8110bc187786eed5fa331 | Python | lauradang/pdf-table-parser | /pdfs/lib/python3.7/site-packages/test3.py | UTF-8 | 484 | 3.9375 | 4 | [] | no_license | #This is the "test3.py" module and it provides one function called print_list()
#which print the lists that may or may not include nested lists.
def print_list(list_name,indent=false,level=0):
for each_item in list_name:
if isinstance(each_item,list):
print_list(each_item,true,level+1)
else:
if indent:
for num in range(level):
print("\t",end=' ')
print(each_item)
| true |
37aca1c355bd99828722db26813cb25c64e5ae41 | Python | natal20-meet/meetyl1201819 | /lab3.py | UTF-8 | 634 | 3.515625 | 4 | [] | no_license | import turtle
#turtle.right(45)
#turtle.forward(60)
#turtle.left(150)
#turtle.forward(60)
angle = 144
length = 100
def draw_star(angle,length):
for i in range(5):
turtle.left(angle)
turtle.forward(length)
turtle.hideturtle()
#draw_star(angle,length)
angle_2 = 90
length_2 = 50
angle_3 = 55
length_3 = 50
angle_4 = 120
def draw_shape(angle,length):
turtle.begin_fill()
for i in range(4):
turtle.forward(length_2)
turtle.left(angle_2)
for i in range(1):
turtle.right(angle_3)
turtle.forward(length_3)
turtle.left(angle_4)
turtle.forward(length_3)
turtle.end_fill()
draw_shape(angle,length)
turtle.mainloop()
| true |
d22f727393665589dbbffacf0d8cad8f77bb4757 | Python | orenovadia/euler | /solved/e211.py | UTF-8 | 2,085 | 2.84375 | 3 | [] | no_license | '''
Created on Mar 9, 2015
@author: oovadia
'''
from time import time as thistime
from math import log,sqrt
from eulertools import primes3,Dn,primeFactors
from itertools import groupby
def calcNum(l,prm):
s=1
for i,pows in enumerate(l):
s*= ( prm[i]**pows )
s%=500500507
return s
def advance2Powers(n):
n+=1
t = int( log(n,2))+1
return 2**t-1
def sigmakOfN(k,n,prm,l):
s=1
for i,pow in enumerate(l):
p = prm[i]
s *= (p**(pow+1) -1)/( p -1 )
return s
def sigmakOfN2(k,n):
prm = primeFactors(n)
counts =[ len(list(group)) for key, group in groupby(prm)]
prm = list(set(prm))
s=1
for i,pow in enumerate(counts):
p = prm[i]
s *= (p**( (pow+1)*k )-1)/( p**k -1 )
return s
def is_square(apositiveint):
x = apositiveint // 2
seen = set([x])
while x * x != apositiveint:
x = (x + (apositiveint // x)) // 2
if x in seen: return False
seen.add(x)
return True
def is_square2(apositiveint):
tmp = long(apositiveint**0.5)
if tmp**2==apositiveint: return True
if (tmp+1)**2==apositiveint: return True
if (tmp-1)**2==apositiveint: return True
return False
def run(n):
l = [1]*(n+1)
for i in xrange(2,n+1):
for j in xrange(i,n,i):
l[j] += i**2
print 'creating dict:'
s=1
l[0]=0
l[1]=1
d = {}
for num, x in enumerate(l):
d[x]=d.get(x,0)+num
print l[:10]
maxSig = max(l)
print 'maxSig',maxSig
i=2
i2=4
while i2<=maxSig:
s+= d.get(i2,0)
i+=1
i2=i**2
print s
def runOld(n):
s=1
for i in xrange(2,n):
s2 = sigmakOfN2(2,i)
if is_square(s2):
t = primeFactors(i)
print i,s2,s2**0.5,t,sum(t)
s+=i
print s
def main(x):
st = thistime()
run(x)
print 'time: ',thistime()-st,'secs'
if __name__ == '__main__':
main(64*1000000) | true |
e9d565c20dc295ffc6a3e2c780aa040b5c24d512 | Python | linqcan/odser2014 | /scripts/configmanager.py | UTF-8 | 753 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
"""
This module handles exposes a method for retrieving
configuration settings from 'config.json'.
"""
import json
CONFIG_FILE = "../config.json"
JSON_OBJ = None
def get(config_type, config_attr):
"""
Returns the value of attribute 'config_attr' for
configuration type 'config_type'.
"""
return_type = None
return_value = None
try:
return_type = JSON_OBJ[config_type]
except KeyError:
print "No such config type '%s'" % config_type
raise
try:
return_value = return_type[config_attr]
except KeyError:
print "No such attribute '%s' for config type '%s'" % (config_attr, config_type)
raise
return return_value
file_handle = open(CONFIG_FILE, "r")
JSON_OBJ = json.load(file_handle)
| true |
83573daa98e8905c289c3057b3616a0b69eda1ce | Python | pomowunk/MarlinGcodeDocumentation | /octoprint_marlingcodedocumentation/updater.py | UTF-8 | 3,534 | 2.625 | 3 | [] | no_license | import importlib
import json
import os
class DocumentationUpdater(object):
"""Manage updating the documentation from all parsers"""
JS_PREFIX = "window.AllGcodes = "
PARSERS = {}
SOURCES = set()
PARSERS_IMPORTS = [
'octoprint_marlingcodedocumentation.parser',
]
@classmethod
def register_parser(cls, parser):
cls.PARSERS[parser.ID] = parser
cls.SOURCES.add(parser.SOURCE)
return parser
@classmethod
def import_parsers(cls):
for module_name in cls.PARSERS_IMPORTS:
try:
importlib.import_module(module_name)
except Exception as e:
print(f"Could not load {module_name}: {e}")
raise
def update_documentation(self, directories=None, js_path=None):
if js_path is None:
js_path = os.path.join(
os.path.dirname(__file__), "static", "js", "all_codes.js")
codes_list = []
ids_to_update = set()
if not self.PARSERS:
raise Exception(f"No parsers have been registered")
for _id, parser in self.PARSERS.items():
if directories is None:
directory = None
else:
if _id not in directories:
continue
directory = directories[_id]
gcodes = parser().load_and_parse_all_codes(directory)
self.attach_id_to_docs(gcodes)
codes_list.append(gcodes)
ids_to_update.add(parser.ID)
if not codes_list:
raise Exception("No sources set to be updated")
if set(self.PARSERS) - ids_to_update:
all_codes = self.load_existing_codes(ids_to_update, js_path)
else:
all_codes = {}
self.merge_codes(all_codes, codes_list)
self.sort_codes(all_codes)
self.save_codes_to_js(all_codes, js_path)
def attach_id_to_docs(self, codes):
for code in list(codes):
codes[code] = [
dict(value, **{
"id": f"{value['source']}.{code}[{index}]"
})
for index, value in enumerate(codes[code])
]
def load_existing_codes(self, ids_to_update, js_path):
with open(js_path) as f:
prefix = f.read(len(self.JS_PREFIX))
if prefix != self.JS_PREFIX:
raise Exception(
f"Prefix in JS file ('{prefix}') didn't match expected "
f"prefix ('{self.JS_PREFIX}')")
all_codes = json.load(f)
sources_to_update = [
self.PARSERS[_id].SOURCE
for _id in ids_to_update
]
for code, values in list(all_codes.items()):
all_codes[code] = [
value
for value in values
if value["source"] not in sources_to_update
]
return all_codes
def merge_codes(self, all_codes, codes_list):
for codes in codes_list:
for code, values in codes.items():
all_codes.setdefault(code, []).extend(values)
def sort_codes(self, all_codes):
for code, values in list(all_codes.items()):
all_codes[code] = sorted(values, key=lambda value: value["source"])
def save_codes_to_js(self, all_codes, js_path):
with open(js_path, "w") as f:
f.write(self.JS_PREFIX)
json.dump(all_codes, f, indent=2, sort_keys=True)
DocumentationUpdater.import_parsers()
| true |
d0c241ba76725460fc2b01b824b4e3ea61ee70fb | Python | tiagoportelanelo/flying-dog-beers | /app.py | UTF-8 | 4,437 | 2.828125 | 3 | [] | no_license | # Import Supporting Libraries
import pandas as pd
# Import Dash Visualization Libraries
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
import dash.dependencies
from dash.dependencies import Input, Output, State
import plotly.graph_objects as go
def generate_table(dataframe, max_rows=10):
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
)
# Load datasets
train = pd.read_csv('https://raw.githubusercontent.com/tiagoportelanelo/nelotest/master/train.csv', index_col ='PassengerId' )
df_describe = train.describe().copy()
df_describe.insert(0, 'Stat', train.describe().index.to_list())
df_aux = train.copy()
df_aux['Sex'] = df_aux.Sex.map({'female':0, 'male':1})
df_corr = df_aux.corr().copy()
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
html.H1('Title'),
html.H2('DataFrame Overview'),
html.H4('Primeiras entradas do DataSet'),
dt.DataTable(
id='table_head',
columns=[{"name": i, "id": i} for i in train.head().columns],
data=train.head().to_dict('records'),
),
html.H4('Descricao Estatistica'),
dt.DataTable(
id='table_describe',
columns=[{"name": i, "id": i} for i in df_describe.columns],
data=df_describe.to_dict('records'),
),
html.H4('Matriz de Correlacao'),
dcc.Graph(
id='crr-matrix',
figure = go.Figure(data=go.Heatmap(
z=[df_corr.Survived,
df_corr.Pclass,
df_corr.Sex,
df_corr.Age,
df_corr.SibSp,
df_corr.Parch,
df_corr.Fare,
],
x =['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare'],
y =['Survived', 'Pclass' ,'Sex','Age', 'SibSp', 'Parch', 'Fare']
))
),
html.H2('Analise Dos Atributos'),
html.Div(id='selected-indexes'),
dcc.Dropdown(
id='atributes-list',
options=[
{'label': i, 'value': i} for i in train.columns
],
value = 'Survived'
),
dcc.Graph(id='var-plot'),
], style={'width': '60%'})
@app.callback(Output('var-plot', 'figure'),
[Input('atributes-list', 'value')])
def update_figure(value):
if value == 'Survived':
train.Survived.value_counts(normalize=True)
figure={
'data': [
{'x': list(train.Survived.value_counts(normalize=True).index), 'y': train.Survived.value_counts(normalize=True).to_list(), 'type': 'bar', 'name': 'SF'}
],
}
return figure
if value == 'Sex':
train.Survived.value_counts(normalize=True)
figure={
'data': [
{'x': list(train.Sex.value_counts(normalize=True).index), 'y': train.Sex.value_counts(normalize=True).to_list(), 'type': 'bar', 'name': 'SF'}
],
}
return figure
if value == 'Pclass':
train.Survived.value_counts(normalize=True)
figure={
'data': [
{'x': list(train.Pclass.value_counts(normalize=True).index), 'y': train.Pclass.value_counts(normalize=True).to_list(), 'type': 'bar', 'name': 'SF'}
],
}
return figure
if value == 'Age':
trace = go.Histogram(x=train["Age"], opacity=0.7, name="Male",
xbins={"size": 5}, customdata=train["Age"], )
layout = go.Layout(title="Age Distribution", xaxis={"title": "Age (years)", "showgrid": False},
yaxis={"title": "Count", "showgrid": False}, )
figure = {"data": [trace], "layout": layout}
return figure
if __name__ == '__main__':
app.run_server(debug=True)
| true |
eb94c394bdf12ca9525e84c76404fa62853c9700 | Python | iindyk/my_GAN | /graphing/noise_vis.py | UTF-8 | 1,734 | 2.609375 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
noise_norms = [3., 13.]
im_dir = '/home/iindyk/PycharmProjects/my_GAN/images/for_graphs/'
(x_train_all, y_train_all), (x_test_all, y_test_all) = tf.keras.datasets.mnist.load_data()
x_train_all, x_test_all = x_train_all/255., x_test_all/255.
x_train_all, x_test_all = x_train_all-np.mean(x_train_all), x_test_all-np.mean(x_test_all)
for i in range(len(y_train_all)):
if y_train_all[i] == 0:
break
im0 = x_train_all[i, :, :]
noise1 = np.random.uniform(low=-1., high=1., size=(28, 28))
noise1 = (noise1/np.linalg.norm(noise1))*(noise_norms[0])
im1 = x_train_all[i]+noise1
noise2 = np.random.uniform(low=-1., high=1., size=(28, 28))
noise2 = (noise2/np.linalg.norm(noise2))*(noise_norms[1])
im2 = x_train_all[i]+noise2
def show_images(images, cols=1, titles=None):
assert ((titles is None) or (len(images) == len(titles)))
n_images = len(images)
if titles is None: titles = ['Image (%d)' % i for i in range(1, n_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(cols, np.ceil(n_images / float(cols)), n + 1)
plt.imshow(image, cmap='gray_r')
# Turn off tick labels
a.set_yticklabels([])
a.set_xticklabels([])
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().axes.get_xaxis().set_visible(False)
# border width
a.spines['top'].set_linewidth(2)
a.spines['right'].set_linewidth(2)
a.spines['bottom'].set_linewidth(2)
a.spines['left'].set_linewidth(2)
fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
plt.show()
show_images([im0, im1, im2])
| true |
12e0ca2872ed11436056a9e54869969399c48085 | Python | bkganguneni/DASHING_CAR | /PYTH_CARGAME/game.py | UTF-8 | 4,673 | 3.046875 | 3 | [] | no_license | # cd OneDrive\Desktop\PYTH_CARGAME
import pygame
import time
import random
pygame.init()
display_width = 800
display_height = 600
gray = (120, 120, 120)
black = (0, 0, 0)
red = (255, 0, 0)
bright_red = (255, 40, 20)
green = (125, 235, 52)
bright_green = (132, 255, 0)
blue = (20, 118, 255)
bright_blue = (20, 185, 255)
gamedisplays = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption("CAR GAME")
clock = pygame.time.Clock()
carimage = pygame.image.load('car1.png')
backgroundpic = pygame.image.load('grass.jpg')
yellow_strip = pygame.image.load('yellow.png')
strip = pygame.image.load('white line.png')
intro_background = pygame.image.load('intro.jpg')
car_width = 125
def obstacle(obs_startx,obs_starty,obs):
if obs==0:
obs_pic = pygame.image.load('car1.png')
elif obs==1:
obs_pic = pygame.image.load('car1.png')
elif obs==2:
obs_pic = pygame.image.load('car1.png')
elif obs==3:
obs_pic = pygame.image.load('car1.png')
elif obs==4:
obs_pic = pygame.image.load('car1.png')
elif obs==5:
obs_pic = pygame.image.load('car1.png')
gamedisplays.blit(obs_pic,(obs_startx,obs_starty))
def score_system(passed,score):
font = pygame.font.SysFont(None,25)
text = font.render("Passed :"+str(passed),True,black)
score = font.render("Score :"+str(score),True,red)
gamedisplays.blit(text,(0,50))
gamedisplays.blit(score,(0,30))
def text_objects(text,font):
textsurface = font.render(text,True,black)
return textsurface,textsurface.get_rect()
def message_display(text):
largetext = pygame.font.Font("freesansbold.ttf",80)
textsurf,testrect = text_objects(text,largetext)
testrect.center = ((display_width/2),(display_height/2))
gamedisplays.blit(textsurf,testrect)
pygame.display.update()
time.sleep(3)
game_loop()
def crash():
message_display("YOU CRASHED")
def background():
gamedisplays.blit(backgroundpic,(0,0))
gamedisplays.blit(backgroundpic,(0,200))
gamedisplays.blit(backgroundpic,(0,400))
gamedisplays.blit(backgroundpic,(700,0))
gamedisplays.blit(backgroundpic,(700,200))
gamedisplays.blit(backgroundpic,(700,400))
gamedisplays.blit(yellow_strip,(350,0))
gamedisplays.blit(yellow_strip,(350,140))
gamedisplays.blit(yellow_strip,(350,280))
gamedisplays.blit(yellow_strip,(350,420))
gamedisplays.blit(yellow_strip,(350,560))
gamedisplays.blit(yellow_strip,(350,700))
gamedisplays.blit(strip,(80,0))
gamedisplays.blit(strip,(80,400))
gamedisplays.blit(strip,(80,800))
gamedisplays.blit(strip,(620,0))
gamedisplays.blit(strip,(620,400))
gamedisplays.blit(strip,(620,800))
def car(x,y):
gamedisplays.blit(carimage,(x,y))
def game_loop():
x = (display_width*0.45)
y = (display_height*0.8 )
x_change = 0
obstacle_speed = 9
obs = 0
y_change = 0
obs_startx = random.randrange(200,(display_width)-200)
obs_starty = -750
obs_width = 60
obs_height = 115
passed = 0
level = 0
score = 0
bumped=False
while not bumped:
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_LEFT:
x_change = -5
if event.key==pygame.K_RIGHT:
x_change = 5
if event.key==pygame.K_a:
obstacle_speed += 2
if event.key == pygame.K_b:
obstacle_speed -= 2
if event.type==pygame.KEYUP:
if event.key==pygame.K_LEFT or event.key==pygame.K_RIGHT:
x_change = 0
x+=x_change
gamedisplays.fill(gray)
background()
obs_starty-=(obstacle_speed/4)
obstacle(obs_startx,obs_starty,obs)
obs_starty+=obstacle_speed
car(x,y)
score_system(passed,score)
#Dimensions are not that equal due to images
if x > 700 - car_width or x<90:
crash()
if obs_starty>display_height:
obs_starty = 0 - obs_height
obs_startx = random.randrange(100,(display_width-210))
obs = random.randrange(0,6)
passed = passed + 1
score = passed*10
if int(passed)%10==0:
level = level+1
obstacle_speed = obstacle_speed + 2
largetext = pygame.font.Font("freesansbold.ttf",80)
textsurf,testrect = text_objects("Level :"+str(level),largetext)
testrect.center = ((display_width/2),(display_height/2))
gamedisplays.blit(textsurf,testrect)
pygame.display.update()
time.sleep(3)
if y < obs_starty + obs_height:#couldnt identify the bug
if x > obs_startx and x < obs_startx + obs_width or x + car_width > obs_startx and x + car_width < obs_startx + obs_width:
crash()
pygame.display.update()
clock.tick(60)
game_loop()
pygame.quit()
quit()
| true |
24b4e27cdb6f4aec95a42c922b214629f575e0da | Python | yfchenggithub/PYTHON | /paramiko_exec_command.py | UTF-8 | 716 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python
import paramiko
import sys
import os
import string
def usage():
print('usage: %s netstat -pan | grep -w 80' % sys.argv[0])
#判断是否输入加入命令行
if len(sys.argv) < 2:
usage()
sys.exit(1)
#命令弄成字符串形式
input_cmd = ' '.join(sys.argv[1:])
#A high-level representation of a session with an SSH server
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#连接服务器 用户名密码
ssh.connect('172.171.48.72', 22, 'yfcheng','yfcheng')
#执行命令
stdin, stdout, stderr = ssh.exec_command(input_cmd)
for line in stdout.readlines():
print(line)
for line in stderr.readlines():
print(line)
#关闭连接
ssh.close()
| true |
2b9d0cf189d0b0ff7e34f9dade9b920bd20d4cb4 | Python | amanshu-cloud/hackerrank | /maze.py | UTF-8 | 887 | 3.484375 | 3 | [] | no_license | #issafe function to check if we have reached the edge cases
def issafe(r,c,n,maze):
if r<0 or r>=n:
return False
if c<0 or c>=n:
return False
if maze[r][c]:
return True
return False
#solcemaze function
def solvemaze(maze,i,j,soln,n):
if i==n-1 and j==n-1:
soln[i][j]==1
return True
if issafe(i,j,n,maze):
soln[i][j]=1
if solvemaze(maze,i+1,j,soln,n):
return True
if solvemaze(maze,i,j+1,soln,n):
return True
soln[i][j]=0
return False
n = int(input("enter n"))
maze = []
soln = []
for i in range(n):
maze.append(list(map(int,input().split())))
for i in range(n):
j = [0]*n
soln.append(j)
if solvemaze(maze,0,0,soln,n):
for i in range(n):
for j in range(n):
print(soln[i][j],end = "")
else:
print("-1") | true |
5fc04cbadcf4b27ee368c238da35f13a69c3aebe | Python | efatmae/Does-BERT-pay-attention-to-cyberbullying- | /Does-BERT-pay-attention-to-cyberbullying-/Model_Training/Pytorch/pretrained_models_helpers.py | UTF-8 | 5,768 | 2.8125 | 3 | [] | no_license | import torch
import numpy as np
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from sklearn.model_selection import train_test_split
def data_tokenization(sentences,labels,tokenizer, maxlen):
# Tokenize all of the sentences and map the tokens to thier word IDs.
# sentences" a list of sentences
# labels: a list of labels
input_ids = []
attention_masks = []
# For every sentence...
for sent in sentences:
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens=True, # Add '[CLS]' and '[SEP]'
max_length=maxlen, # Pad & truncate all sentences.
pad_to_max_length=True,
return_attention_mask=True, # Construct attn. masks.
return_tensors='pt',
truncation='longest_first' # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
return input_ids, attention_masks, labels
def split_data_into_stratified_train_and_valid(input_ids, attention_masks, labels, batch_size=32, test_size=0.3):
# Combine the training inputs into a TensorDataset.
dataset = TensorDataset(input_ids, attention_masks, labels)
train_idx, valid_idx = train_test_split(
np.arange(len(labels)),
test_size=test_size,
shuffle=True,
stratify=labels)
# Create the DataLoaders for our training and validation sets.
# We'll take training samples in random order.
train_dataloader = DataLoader(
dataset, # The training samples.
sampler=RandomSampler(train_idx), # Select batches randomly
batch_size=batch_size # Trains with this batch size.
)
# For validation the order doesn't matter, so we'll just read them sequentially.
validation_dataloader = DataLoader(
dataset, # The validation samples.
sampler=SequentialSampler(valid_idx), # Pull out batches sequentially.
batch_size=batch_size # Evaluate with this batch size.
)
return train_dataloader, validation_dataloader
def train(model, scheduler, optimizer, train_dataloader, device):
print('Training...')
model.train()
for step, batch in enumerate(train_dataloader):
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
loss, logits,_, _ = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
return loss, logits
def validate(model, validation_dataloader, device):
model.eval()
for batch in validation_dataloader:
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
with torch.no_grad():
loss, logits,_,_ = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
return loss, logits
def create_test_set_data_loader(input_ids, attention_masks, labels, batch_size=32):
prediction_data = TensorDataset(input_ids, attention_masks, labels)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)
return prediction_dataloader
def test_model_performance(model, test_data_loader, device):
model.eval()
# Tracking variables
predictions, true_labels = [], []
# Predict
for batch in test_data_loader:
batch = tuple(t.to(device) for t in batch) # Add batch to GPU
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up prediction
with torch.no_grad():
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask) # Forward pass, calculate logit predictions
logits = outputs[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Store predictions and true labels
predictions.append(logits)
true_labels.append(label_ids)
# Combine the results across all batches.
flat_predictions = np.concatenate(predictions, axis=0)
# For each sample, pick the label (0 or 1) with the higher score.
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
# Combine the correct labels for each batch into a single list.
flat_true_labels = np.concatenate(true_labels, axis=0)
return flat_predictions, flat_true_labels | true |
cd11a3fc9ec64e0b31c6ba46ec9a841e069ae671 | Python | kraudust/byu_classes | /robotic_vision/hw5_object_tracking/kalman_tracking_hw5.py | UTF-8 | 12,912 | 2.578125 | 3 | [] | no_license | import cv2
import numpy as np
from copy import deepcopy
from pdb import set_trace as pause
from scipy.stats import mode
class klt_kalman():
def __init__(self, video_path):
# Open Camera or Video
self.cap = cv2.VideoCapture(video_path)
# Check if camera opened successfully
if (self.cap.isOpened()== False):
print("Error opening video stream or file")
# params for ShiTomasi corner detection
self.feature_params = dict( maxCorners = 1,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# params for lucase kanade optical flow
self.lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
self.p1 = None # p0 passed through optical flow
ret, self.frame_old = self.cap.read()
self.cx, self.cy, self.w, self.h = cv2.selectROI("Image", self.frame_old, False, False)
self.cx, self.cy, self.w, self.h = (255, 92, 26, 18)
# print(self.cx, self.cy, self.w, self.h)
self.frame_old = cv2.cvtColor(self.frame_old, cv2.COLOR_BGR2GRAY)
# self.roi = self.frame_old[self.cx:int(self.cx + self.w/2), self.cy:int(self.cy + self.h/2.)]
self.roi = self.frame_old[int(self.cy):int(self.cy + self.h/2), int(self.cx):int(self.cx + self.w/2.)]
self.p0 = cv2.goodFeaturesToTrack(self.roi, mask = None, **self.feature_params)
self.p0[:,0,0] = self.p0[:,0,0] + self.cx
self.p0[:,0,1] = self.p0[:,0,1] + self.cy
self.p0 = np.float32(self.p0)
# initialize Kalman Filter
self.Ts = 1./30. # timestep
self.kalman = cv2.KalmanFilter(4,2) # kalman filter object
self.kalman.transitionMatrix = np.array([ [1., 0., self.Ts, 0.],
[0., 1., 0., self.Ts],
[0., 0., 1., 0.],
[0., 0., 0., 1.]], np.float32) # state transition matrix (discrete)
self.kalman.measurementMatrix = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.]], np.float32)
self.kalman.processNoiseCov = 1e+2 * np.array([[self.Ts**3/3., 0., self.Ts**2/2., 0.],
[0., self.Ts**3/3., 0., self.Ts**2/2.],
[self.Ts**2/2., 0., self.Ts, 0.],
[0., self.Ts**2/2., 0., self.Ts]], np.float32)
self.kalman.measurementNoiseCov = 1e-5 * np.eye(2, dtype=np.float32)
self.kalman.statePost = np.array([[self.p0[0][0][0]],
[self.p0[0][0][1]],
[0.],
[0.]], dtype=np.float32)
self.kalman.errorCovPost = 0.1 * np.eye(4, dtype=np.float32)
def run_tracking(self):
while(self.cap.isOpened()):
# Capture Frame
ret, frame = self.cap.read()
self.frame_new = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Calculate optical flow
self.p1, st, err = cv2.calcOpticalFlowPyrLK(self.frame_old, self.frame_new, self.p0, None, **self.lk_params)
# Update with Kalman Filter
p_predict = self.kalman.predict()
# self.meas[0,0] = good_new[0,0]
# self.meas[1,0] = good_new[0,1]
p_correct = self.kalman.correct(np.array([[self.p1[0,0,0]], [self.p1[0,0,1]]], np.float32))
if ret == True:
pt1 = (int(p_correct[0]-self.w/2), int(p_correct[1]-self.h/2))
pt2 = (int(p_correct[0]+self.w/2), int(p_correct[1]+self.h/2))
new_im = cv2.rectangle(frame, pt1, pt2, (0,0,255), thickness=2)
cv2.imshow('Original',new_im)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# Save the current image and point as the previous image and point
self.frame_old = deepcopy(self.frame_new)
self.p0 = np.reshape(p_correct[0:2,:],(-1,1,2))
class mean_cam_kalman():
def __init__(self,video_path, tracker_type):
self.tracker_type = tracker_type
# Initialize Camera
self.cap = cv2.VideoCapture(video_path)
# Check if camera opened successfully
if (self.cap.isOpened()== False):
print("Error opening video stream or file")
ret,frame = self.cap.read()
# Get window to track
self.track_window = cv2.selectROI("Image", frame, False, False)
self.track_window = (257, 96, 18, 11)
# print(self.track_window)
cx, cy, w, h = self.track_window
# cx = self.track_window[0]
# cy = self.track_window[1]
# w = self.track_window[2]
# h = self.track_window[3]
# Set up region of interest for tracking
roi = frame[cy:cy+h, cx:cx+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
most = np.float32(mode(hsv_roi[:,:,0], axis=None))[0][0]
mask = cv2.inRange(hsv_roi, np.array((most-20., 50.,0.)), np.array((most+20.,255.,255.)))
# mask = cv2.inRange(hsv_roi, np.array((0,0,0.)), np.array((180,255.,255.)))
# res = cv2.bitwise_and(roi, roi, mask=mask)
# cv2.imshow('mask',mask)
# cv2.imshow('mask applied',res)
# cv2.waitKey(30)
# pause()
self.roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(self.roi_hist, self.roi_hist, 0, 255, cv2.NORM_MINMAX) # should it be 255?
self.term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 5)
# initialize Kalman Filter
self.Ts = 1./30. # timestep
self.kalman = cv2.KalmanFilter(4,2) # kalman filter object
self.kalman.transitionMatrix = np.array([ [1., 0., self.Ts, 0.],
[0., 1., 0., self.Ts],
[0., 0., 1., 0.],
[0., 0., 0., 1.]], np.float32) # state transition matrix (discrete)
self.kalman.measurementMatrix = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.]], np.float32)
self.kalman.processNoiseCov = 1e1 * np.array([[self.Ts**3/3., 0., self.Ts**2/2., 0.],
[0., self.Ts**3/3., 0., self.Ts**2/2.],
[self.Ts**2/2., 0., self.Ts, 0.],
[0., self.Ts**2/2., 0., self.Ts]], np.float32)
self.kalman.measurementNoiseCov = 1e-3 * np.eye(2, dtype=np.float32)
self.kalman.statePost = np.array([[cx],
[cy],
[0.],
[0.]], dtype=np.float32)
self.kalman.errorCovPost = 0.1 * np.eye(4, dtype=np.float32)
def run_tracking(self):
while(self.cap.isOpened()):
ret, frame = self.cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], self.roi_hist, [0, 180], 1)
# apply meanshift or camshift to get the new location
if self.tracker_type == 'mean':
ret, self.track_window = cv2.meanShift(dst, self.track_window, self.term_crit)
elif self.tracker_type == 'cam':
ret, self.track_window = cv2.CamShift(dst, self.track_window, self.term_crit)
# Update with Kalman Filter
p_predict = self.kalman.predict()
p_correct = self.kalman.correct(np.array([[self.track_window[0]],[self.track_window[1]]], np.float32))
# Draw track window and display it
cx, cy, w, h = self.track_window
new_im = cv2.rectangle(frame, (cx,cy), (cx+w, cy+h), (0,0,255), thickness=2)
cv2.imshow('Image', new_im)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
class back_sub_kalman():
def __init__(self, video_path):
# Initialize Camera
self.cap = cv2.VideoCapture(video_path)
# Check if camera opened successfully
if (self.cap.isOpened()== False):
print("Error opening video stream or file")
ret,self.frame_old = self.cap.read()
self.blur = 5
self.frame_old = cv2.cvtColor(self.frame_old, cv2.COLOR_BGR2GRAY)
self.kernel_d = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(13,13))
# initialize kernel for erroding function
self.kernel_e = np.ones((15,15),np.uint8)
# self.kernel_d = np.ones((20,20),np.uint8)
# self.kernel = np.ones((2,2), np.uint8)
self.frame_old = cv2.GaussianBlur(self.frame_old, (self.blur, self.blur), 0)
self.area = 10
# # initialize Kalman Filter
# self.Ts = 1./30. # timestep
# self.kalman = cv2.KalmanFilter(4,2) # kalman filter object
# self.kalman.transitionMatrix = np.array([ [1., 0., self.Ts, 0.],
# [0., 1., 0., self.Ts],
# [0., 0., 1., 0.],
# [0., 0., 0., 1.]], np.float32) # state transition matrix (discrete)
# self.kalman.measurementMatrix = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.]], np.float32)
# self.kalman.processNoiseCov = 1e1 * np.array([[self.Ts**3/3., 0., self.Ts**2/2., 0.],
# [0., self.Ts**3/3., 0., self.Ts**2/2.],
# [self.Ts**2/2., 0., self.Ts, 0.],
# [0., self.Ts**2/2., 0., self.Ts]], np.float32)
# self.kalman.measurementNoiseCov = 1e-3 * np.eye(2, dtype=np.float32)
# self.kalman.statePost = np.array([[cx],
# [cy],
# [0.],
# [0.]], dtype=np.float32)
# self.kalman.errorCovPost = 0.1 * np.eye(4, dtype=np.float32)
def run_tracking(self):
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Filter by Area.
params.filterByArea = True
params.minArea = 100
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.0
# Filter by Inertia
params.filterByInertia = False
params.minInertiaRatio = 0.00
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
while(self.cap.isOpened()):
ret, frame = self.cap.read()
if ret == True:
self.frame_new = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
diff = cv2.absdiff(self.frame_new, self.frame_old)
thresh_diff = cv2.threshold(diff, 10, 255, cv2.THRESH_BINARY)[1]
new_im = cv2.erode(thresh_diff, None, iterations = 1)
new_im = cv2.dilate(new_im, self.kernel_d, iterations = 3)
# Detect blobs
keypoints = detector.detect(cv2.bitwise_not(new_im))
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('diff', new_im)
cv2.imshow('orig', im_with_keypoints)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
self.frame_old = deepcopy(self.frame_new)
if __name__ == "__main__":
video_location = '/home/kraudust/git/personal_git/byu_classes/robotic_vision/hw5_object_tracking/mv2_001.avi'
# video_location = 0
# KLT Kalman Tracker
# kt = klt_kalman(video_location)
# Mean or Cam Shift Kalman Tracker
# kt = mean_cam_kalman(video_location, 'mean')
# Background Subtraction Kalman Tracker
kt = back_sub_kalman(video_location)
kt.run_tracking()
kt.cap.release()
cv2.destroyAllWindows()
| true |
2d42ef18ac38791f9ea9c8fffbfabbc72039b966 | Python | TangYizhao/Python_202101 | /http_project.py | UTF-8 | 2,703 | 2.96875 | 3 | [] | no_license | import re
from socket import *
from select import select
class HTTPServer:
def __init__(self, host = "0.0.0.0",port = 8000, html=None):
self.host = host
self.port = port
self.html = html
self.creat_socket()
self.bind()
self.rlist = []
self.wlist = []
self.xlist = []
def creat_socket(self):
self.sockfd = socket()
self.sockfd.setblocking(False)
def bind(self):
self.address = (self.host,self.port)
self.sockfd.bind(self.address)
def start(self):
self.sockfd.listen(3)
print("Listen the port:",self.port)
self.rlist.append(self.sockfd)
while True:
rl, wl, xl = select(self.rlist, self.wlist, self.xlist)
for r in rl:
if r is self.sockfd:
connfd,addr = r.accept()
print("Connect from:", addr)
connfd.setblocking(False)
self.rlist.append(connfd)
else:
self.handle(r)
def handle(self, connfd):
request = connfd.recv(1024).decode()
pattern = r"[A-Z]+\s+(/\S*)"
try :
info = re.match(pattern,request).group(1)
except:
self.rlist.remove(connfd)
connfd.close()
return
else:
self.get_html(connfd,info)
def get_html(self, connfd, info):
if info =="/":
filename = self.html + "/index.html"
else:
filename = self.html + info
try:
f = open(filename,"rb")
except:
response_headers = "HTTP/1.1 404 NOT FOUND\r\n"
response_headers += "Content-Type:text/html\r\n"
response_headers += "\r\n"
response_content = "<h1>Sorry...PAGE NOT FOUND</h1>"
response = (response_headers + response_content).encode()
else:
response_content = f.read()
response_headers = "HTTP/1.1 200 OK\r\n"
response_headers += "Content-Type:text/html\r\n"
response_headers += "Content-Length:%d\r\n"%len(response_content)
response_headers += "\r\n"
response = response_headers.encode() + response_content
f.close()
# print("开始发送...")
connfd.send(response)
# print("发送完成...")
if __name__ == '__main__':
"""
通过HTTPServer类快速搭建服务
展示网页
"""
host = "0.0.0.0"
port = 8000
dir = "./static"
#实例化对象
httpd = HTTPServer(host = host,port = port,html = dir)
#调用方法启动服务
httpd.start()
| true |
f923b6cdb3ca660c24593eb7686a4dd1620fe380 | Python | rudik32/gitTest | /python/lab1_v6.py | UTF-8 | 1,649 | 3.9375 | 4 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
#Вариант VI.
#Скорость ветра задана в интервале [0;30] в м/с. С шагом 5 переведите эту скорость в км/ч.
#1. Выведите значения скорости в м/с и км/ч используя цикл while
#2. Задайте список значений скорости ветра в м/с и получите соответствующий список
#значений в км/ч используя цикл for по элементам списка. Используя printf-форматирование
#выведите результат в виде таблицы
#3. Используйте for с генерированным списком индексов
#4. Используйте компактный синтаксис генерации списка (в одну строку)
#5. Из 2-х списков, используя одновременный проход по 2-м спискам сделайте вложенный
#список, состоящий из соответствующих пар значений. Выведите результат в виде таблицы
#1
c = 0
while(c<=30):
print(c*3.6)
c+=5
#2
c1=[0,5,10,15,20,25,30]
c1_list=[i*3.6 for i in c1]
for c1,c1_list in zip(c1,c1_list):
print('_________')
print('|%-3i|%-3i|')%(c1,c1_list)
print('_________')
#3-4
c2_list =[i*3.6 for i in range(0,35,5)]
print(c2_list)
#5
c2=[0,5,10,15,20,25,30]
table = [[c2,c2_list] for c2,c2_list in zip(c2,c2_list)]
for j in range(len(table)):
print(table[j]) | true |
c355b5d839f01719dee308f817174e181877b645 | Python | Marcfeitosa/listadeexercicios | /ex108.py | UTF-8 | 879 | 4.125 | 4 | [] | no_license | """
Desafio 108
Adapte o código do desafio 107, criando uma função adicional chamada moeda() que consiga mostrar os valores como um valor monetário formatado.
O programa vai usar o módulo assim:
from aula22 import moeda
p = float(input('Digite o preço: R$ '))
print(f'A metade de {moeda.moeda(p)} é {moeda.moeda(moeda.metade(p))}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.moeda(moeda.dobro(p))}')
print(f'Aumentando 10%, temos {moeda.aumentar(p, 10)}')
print(f'Reduzindo 13%, temos {moeda.dimiuir(p, 13)}')
"""
from aula22 import moeda
p = float(input('Digite o preço: R$ '))
print(f'A metade de {moeda.moeda(p)} é {moeda.moeda(moeda.metade(p))}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.moeda(moeda.dobro(p))}')
print(f'Aumentando 10%, temos {moeda.moeda(moeda.aumentar(p, 10))}')
print(f'Reduzindo 13%, temos {moeda.moeda(moeda.diminuir(p, 13))}')
| true |
2338ec896846388060e07def2c30433f41aa9636 | Python | solbol/python | /Lesson 5/Lesson 5.3.py | UTF-8 | 693 | 3.5625 | 4 | [] | no_license | with open('user_file.txt', 'w') as f:
f.write('Смирнов оклад 20000\n')
f.write('Васильев оклад 30000\n')
f.write('Иванов оклад 15000\n')
f.write('Петров оклад 18000\n')
f.write('Сидоров оклад 25000')
with open('user_file.txt') as f:
salary_sum = 0
salary_cnt = 0
for line in f:
user_info = line.split()
if int(user_info[2]) <= 20000:
print(f'Сотрудник {user_info[0]} имеет оклад меньше 20 тыс. руб.')
salary_sum += int(user_info[2])
salary_cnt += 1
print(f'Средняя зарплата {salary_sum / salary_cnt} руб.') | true |
7baa0c9f5dd09383010aaff73fd904c9a68e4443 | Python | Raniac/NEURO-LEARN | /env/lib/python3.6/site-packages/dipy/reconst/benchmarks/bench_peaks.py | UTF-8 | 895 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | """ Benchmarks for peak finding
Run all benchmarks with::
import dipy.reconst as dire
dire.bench()
With Pytest, Run this benchmark with:
pytest -svv -c bench.ini /path/to/bench_peaks.py
"""
import numpy as np
from dipy.reconst.recspeed import local_maxima
from dipy.data import get_sphere
from dipy.core.sphere import unique_edges
from numpy.testing import measure
def bench_local_maxima():
repeat = 10000
sphere = get_sphere('symmetric724')
vertices, faces = sphere.vertices, sphere.faces
print('Timing peak finding')
timed0 = measure("local_maxima(odf, edges)", repeat)
print('Actual sphere: %0.2f' % timed0)
# Create an artificial odf with a few peaks
odf = np.zeros(len(vertices))
odf[1] = 1.
odf[143] = 143.
odf[505] = 505.
timed1 = measure("local_maxima(odf, edges)", repeat)
print('Few-peak sphere: %0.2f' % timed1)
| true |
bbaaf8d1e5a91fcec498e5df385dd7dc1af21776 | Python | M-RaquelCS/BreveHistoria | /quizz2.py | UTF-8 | 6,304 | 3.578125 | 4 | [] | no_license | import pygame
def musica_tema():
pygame.mixer.init()
pygame.mixer.music.load("musica_tema.mp3")
pygame.mixer.music.play()
def musica_acertou():
pygame.mixer.init()
pygame.mixer.music.load('musica_acertou.mp3')
pygame.mixer.music.play()
def musica_errou():
pygame.mixer.init()
pygame.mixer.music.load('musica_erro.mp3')
pygame.mixer.music.play()
def musicawinner():
# Inicializando o mixer PyGame
pygame.mixer.init()
# Iniciando o Pygame
pygame.mixer.music.load('fimjogo9q.mp3')
pygame.mixer.music.play()
def musicawinner2():
pygame.mixer.init()
pygame.mixer.music.load('fimjogo8q.mp3')
pygame.mixer.music.play()
def musicawinner3():
pygame.mixer.init()
pygame.mixer.music.load('fimjogo5q.mp3')
pygame.mixer.music.play()
def musicawinner4():
pygame.mixer.init()
pygame.mixer.music.load('fimjogo3p.mp3')
pygame.mixer.music.play()
while True: #loop infinito do jogo
acertos = 0 #contador que será zerado toda vez que um rada acabar e outra iniciar
erros = 0
musica_tema()
print("Bem vindo ao Breve História.")
print("Bom jogo meu caro brasileiro.")
nomeUsuario = input("Qual o seu nome? ")
from random import sample #importando biblioteca para criar um lista de numeros aleatorios sem repetição
numeroPergunta = sample(range(1,25),10) #criando a lista de numeros aleatorios sempre com 10 espaços pois nosso jogo sempre tera 10 perguntas
for i in range(len(numeroPergunta)): #lendo a lista por posição. ex: na posição 0 tem o numero 8
num = str(numeroPergunta[i]) #salvando numa variavel o conteudo de cada posição em forma de str
arquivoPerguntas = open("perguntas{}.txt".format(num), "r") #salvando em uma variavel a pergunta lida (pergunta essa 'escolida' depois de adicionar o numero lido na lista anterior)
for i,elementos in enumerate(arquivoPerguntas.readlines()): #transformando o arquivo em uma lista com enumerate cada linha é um posição (i) e dentro (elementos) esta a frase
if i != 5: #nao imprimir a posição 5 que é onde esta armazenada a resposta
print(elementos) #fazendo com que imprimisse apenas a pergunta e as alternativas
else:
respostaPergunta = elementos #salvando em uma variavel o elemento da posição 5 pra poder a comparação abaixo, de se acertou ou nao
respostaUsuario = input("Sua resposta: ")
if respostaUsuario == "a" or respostaUsuario == "A" or respostaUsuario == "b" or respostaUsuario == "B" or respostaUsuario == "c" or respostaUsuario == "C" or respostaUsuario == "d" or respostaUsuario == "D":
if respostaUsuario == respostaPergunta or respostaUsuario == respostaPergunta.lower():
musica_acertou()
acertos += 1
print("Acertou Mizeravi.\n".title())
elif respostaUsuario != respostaPergunta or respostaUsuario != respostaPergunta.lower():
musica_errou()
erros += 1
print("Lamento.\n".title())
else: #criando um condição para caso seja digitado uma opção invalida e pedindo novamete para digitar uma resposta valida
print("Opção digitada inválida.")
respostaUsuario = input("Sua resposta: ")
if respostaUsuario == respostaPergunta or respostaUsuario == respostaPergunta.lower():
musica_acertou()
acertos += 1
print("Acertou Mizeravi\n".title())
elif respostaUsuario != respostaPergunta or respostaUsuario != respostaPergunta.lower():
musica_errou()
erros += 1
print("Lamento.\n".upper())
print("Sua quantidade de acertos foi:", acertos)
if 9 <= acertos <= 10:
musicawinner()
print("Valeime, quem é Tom Hanks perto de você?")
elif 6 <= acertos <=8:
musicawinner2()
print("Você até sabe história mas podia melhorar né não?")
elif 4 <= acertos <= 5:
musicawinner3()
print("Amoreeeeeee, vamo assistir umas videos aulas hein?" + "\nVamo cuida vai pro Youtube, cuida anda")
elif 0 <= acertos <= 3:
musicawinner4()
print("Queridx, te preserva mulher, vai estudar")
dadosUsuario = open("dadosUsuario.txt", "a")#abrindo o arquivo no modo append para adicionar a quantidade de acertos e o nome do usuario em um linha com um espaço entre eles para conseguir fazer o ranking
dadosUsuario.write(str(acertos)+ " " + nomeUsuario + "\n")
dadosUsuario.close()
ranking = open("dadosUsuario.txt", "r") #abrindo o mesmo arquivo porem agora no modo read e começar o ranking
posição = 0
numerosPosições = " "
nomesJogadores = " "
lista = []
for x in ranking:
for p, a in enumerate(x):
if a == " " :
posição = p
for y in range(0, posição):
numerosPosições += x[y]
for z in range(posição+1 , len(x)):
nomesJogadores += x[z]
lista.append((float(numerosPosições),nomesJogadores))
posição = 0
numerosPosições = " "
nomesJogadores = " "
novalista = sorted(lista, reverse = True) #organizando a lista com a ordem do ranking em ordem decrescente
for a in novalista:
print(*a)
escolhaUsuario = input("Deseja jogar novamente?" +"\nDigite sim ou não: ")
if escolhaUsuario == "sim" or escolhaUsuario == "SIM" or escolhaUsuario == "não" or escolhaUsuario == "NÃO" or escolhaUsuario == "nao" or escolhaUsuario == "NAO":
if escolhaUsuario == "sim" or escolhaUsuario == "SIM":
continue
elif escolhaUsuario == "não" or escolhaUsuario == "NÃO" or escolhaUsuario == "nao" or escolhaUsuario == "NAO":
break
else:#criando uma condição para respostas invalidas perguntando de novo ate digitar um resposta valida
print("Opção digitada inválida.")
escolhaUsuario = input("Deseja jogar novamente?" +"\nDigite sim ou não: ")
if escolhaUsuario == "sim" or escolhaUsuario == "SIM":
continue
elif escolhaUsuario == "não" or escolhaUsuario == "NÃO" or escolhaUsuario == "nao" or escolhaUsuario == "NAO":
break | true |
46f57562173552d4c2aa5091c71aab4c043acab9 | Python | Saskia-vB/eng-57-oop | /monster_inc_university/course.py | UTF-8 | 700 | 3.484375 | 3 | [] | no_license |
class Course:
def __init__(self, module_name, start_date, list_of_students=[]):
self.module_name = module_name
self.list_of_students = list_of_students
self.start_date = start_date
def module_name(self):
self.module_name = module_name
def get_module_name(self):
return self.module_name
def add_student(self, student):
self.list_of_students.append(student)
return 'Student Added'
def get_students(self):
return self.list_of_students
def get_name(self):
all_students = []
for student in self.list_of_students:
all_students.append(student.get_name())
return all_students
| true |
335ea8a03acdb794af5330239f9b0d2c64c9a596 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_201/1220.py | UTF-8 | 879 | 3.140625 | 3 | [] | no_license | import numpy as np
from math import floor
def make_answer(_n):
if _n%2 ==0:
return ' '.join([str(_n/2), str(_n/2-1)])
else:
return ' '.join([str(_n/2), str(_n/2)])
def solve(n, k):
if k==1:
return make_answer(n)
# else, calc d, where 2^d <= k < 2^(d+1)
d = int(floor(np.log2(k)))
n_left = n - 2**d + 1
n_left_div = n_left / (2**d)
n_left_mod = n_left % 2**d
order = k - 2**d
if order < n_left_mod:
return make_answer(n_left_div + 1)
else:
return make_answer(n_left_div)
if __name__ == '__main__':
with open('C-small-2-attempt0.in', 'r') as f1:
with open('output3.txt', 'w') as f2:
lines = f1.readlines()
num_test = int(lines[0])
case = 1
for line in lines[1:]:
n, k = [int(x) for x in line.strip().split()]
answer = solve(n, k)
f2.write('Case #{}: {}\n'.format(case, answer))
case += 1
| true |
a81a641c403208e37dfd40e088562800265cea81 | Python | Wesley-yang/curequests-1 | /tests/utils.py | UTF-8 | 266 | 2.578125 | 3 | [
"MIT"
] | permissive | import functools
import curio
def run_with_curio(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
curio.run(f(*args, **kwargs))
except curio.TaskError as ex:
raise ex.__cause__ from None
return wrapper
| true |
1d64162eea932c88ed655e81fc9aa0708f109c20 | Python | SusannaWull/malt | /malt/detectionevent.py | UTF-8 | 1,533 | 3.234375 | 3 | [
"Apache-2.0"
] | permissive |
import point
class DetectionEvent(object):
"""
Class which is used to house the detection event. It is a persistent class
which has variables x and y for position of the node when the event was
registered, a confidence of sound recognition, and the sound pressure
leve which can be used to determine the distance from the sound source.
"""
def __init__(self, x, y, confidence, intensity, timestamp):
self.x = x
self.y = y
self.confidence = confidence
self.intensity = intensity
self.timestamp = timestamp
self.std = None
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_confidence(self):
return self.confidence
def get_intensity(self):
return self.intensity
def get_timestamp(self):
return self.timestamp
def get_pos(self):
return [self.x, self.y]
def get_std(self):
if self.std is None:
raise AttributeError("Standard deviation not set")
else:
return self.std
def get_position(self):
return point.Point(self.x, self.y)
def set_std(self, std):
self.std = std
def __repr__(self):
return (
"DetectionEvent(x=" + str(self.x) +
", y=" + str(self.y) +
", confidence=" + str(self.confidence) +
", intensity=" + str(self.intensity) + ")"
)
def __str__(self):
return "X: {0}, Y: {1}".format(self.x, self.y)
| true |
fa14c0bf6fcccaf1c23f30e5ce1774a49871f447 | Python | harryvu141043/vuhuyhoaison-fundamental-C4E26 | /lab_2/calc/game.py | UTF-8 | 451 | 3.21875 | 3 | [] | no_license | import random
while True:
x=random.randint(1,10)
y=random.randint(1,10)
erorr=random.randint(-1,1)
#s=f"{x}+{y}={r}""
t=x+y
k=x+y+erorr
print(x,"+",y,"=",k)
y=input("y/n:")
if (t==k) and y=="y":
print("yay")
elif t==k and y=="n":
print("no")
break
elif t!=k and y=="y":
print("no")
break
elif t!=k and y=="n":
print("yay")
| true |
fda40b629b034c233a2fff08a5e3ba47bb359b77 | Python | agnes-sharan/simpleRaft | /simpleRaft/boards/redis_board.py | UTF-8 | 1,659 | 3.03125 | 3 | [
"MIT"
] | permissive | import redis # importing packages, importing everything
# Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache and message broker. It supports data structures such as strings, hashes, lists, sets, sorted sets with range queries, bitmaps, hyperloglogs and geospatial indexes with radius queries. Redis has built-in replication, Lua scripting, LRU eviction, transactions and different levels of on-disk persistence, and provides high availability via Redis Sentinel and automatic partitioning with Redis Cluster
from board import Board # importing Board from board where board is the folder and Board is the class
class RedisBoard(Board): # class, RedisBoard inherits from Board
"""This will create a message board that is backed by Redis."""
def __init__(self, *args, **kwargs):# *args in function definitions in python is used to pass a variable number of arguments to a function, **kwargs in function definitions in python is used to pass a keyworded, variable-length argument list
"""Creates the Redis connection."""
self.redis = redis.Redis(*args, **kwargs) # self is a replacement for 'this' keyword from c++
def set_owner(self, owner):
self.owner = owner
def post_message(self, message):
"""This will append the message to the list."""
pass # pass" keyword (a statement) to indicate that nothing happens
def get_message(self):
"""This will pop a message off the list."""
pass
def _key(self):
if not self.key:
self.key = "%s-queue" % self.owner # saving owner-queue as a string
return self.key
| true |
91dc75de8de5a3abff6766e36c56a2d3a157c173 | Python | NeatNerdPrime/SecureTea-Project | /securetea/lib/web_deface/defacement_detector.py | UTF-8 | 4,053 | 2.53125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
u"""ML Based Defacement detection module for SecureTea Web Deface Detection.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Aman Singh <dun930n.m45732@gmail.com> , July 25 2021
Version: 1.4
Module: SecureTea
"""
import os
from securetea.lib.web_deface.deface_logger import DefaceLogger
from securetea.lib.web_deface.utils import *
from securetea.lib.web_deface.file_handler import *
from pathlib import Path
import os
import pandas as pd
import pickle
import html2text
import csv
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import VarianceThreshold
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
class DefaceDetect(object):
"""ML based defacement Detector"""
def __init__(self, debug=False, path=None):
"""
Initialize DefaceDetect
debug (bool): Log on terminal or not
path (str): Path of the directory to scan file for
Raises:
None
Returns:
None
"""
#intialize logger
self.logger = DefaceLogger(
__name__,
debug=debug
)
# Initialize path of directory to look for
self._PATH = path
self._DATASET = str(Path(os.path.dirname(__file__)).parent) + "/web_deface/config/dataset.csv"
def ml_based_scan(self, files_list):
"""
Scan the files in the directory to detect any traces of Defacement attempts
Args:
file_list (dict): list of files in the directory to scan
"""
filename = str(Path(os.path.dirname(__file__)).parent) + "/web_deface/config/finalized_model.sav"
with open(filename, "rb") as f:
loaded_model = pickle.load(f)
#Preparing User Webpage Dataset for Prediction
h = html2text.HTML2Text()
h.ignore_links = True
fields = ["status", "content"]
with open(self._DATASET, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
for file in files_list:
try:
code = open(file, 'r').read()
code = h.handle(code)
' '.join(code.split('\n'))
row = ['true', code]
csvwriter.writerow(row)
except Exception as e:
msg = "File path unknown: " + file
self.logger.log(
msg,
logtype="info"
)
return {}
df = pd.read_csv(
self._DATASET,
usecols = fields, nrows=5000
)
stemmer = SnowballStemmer('english')
df['content'] = df['content'].apply(
lambda x: ' '.join(stemmer.stem(y) for y in x.split())
)
df['content'] = df['content'].apply(
lambda x: ' '.join(word for word in x.split() if word not in (stopwords.words()))
)
df = df['content'].copy()
df = df.str.replace('\d+', '', regex=True)
tfidf = TfidfVectorizer(min_df=2,max_df=0.5, ngram_range=(1,3))
features = tfidf.fit_transform(df)
df = pd.DataFrame(
features.todense(),
columns=tfidf.get_feature_names()
)
df_model = pd.read_csv(str(Path(os.path.dirname(__file__)).parent) + "/web_deface/config/df.csv", index_col=0)
df = df.reindex(labels=df_model.columns,axis=1)
df['Target'] = '1'
df = df.fillna(0)
x = df.drop('Target',axis=1)
pred = loaded_model.predict(x)
return { files_list[i] : pred[i]=='1' for i in range(len(pred))} | true |
2ef017d6140720a9ed0d1dbb31a501b35d8cc959 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_155/1625.py | UTF-8 | 551 | 2.859375 | 3 | [] | no_license | import string, sys
f = open("output.txt", "w+")
count = 0
N = 0
for line in sys.stdin:
if count > 0:
if (count > N + 1):
break
m, l = [item for item in line.split()]
res = 0
i = 0
total = 0
for c in l:
n = int (c)
if ( (i > total) & (n != 0) ):
res += i - total
total += i - total
total += n
i = i + 1
f.write('Case #%s: %s\n' % (count,res))
else:
N = int(line)
count = count + 1
| true |
3b2f5b3ffb14f317ee84db6ab503c85e4c9efeb7 | Python | KareemAbdella/A.-Zoning-Restrictions-Again | /problem.py | UTF-8 | 344 | 3.109375 | 3 | [] | no_license | x = input().split()
n = int(x[0])
h = int(x[1])
m = int(x[2])
proff = int(0)
arr = [0] * n
for i in range(n):
arr[i] = h
for i in range(m):
a = input().split()
k = [int(s) for s in a]
s = k[0] - 1
while s < k[1]:
arr[s] = min(arr[s], k[2])
s += 1
for q in range(n):
proff += arr[q] ** 2
print(proff)
| true |
5f6da81d3f708a914ad1511a992bcee7d0369fa8 | Python | thomasm1/app-tester | /dailytechMarsReader/PixelPet.py | UTF-8 | 1,654 | 2.984375 | 3 | [
"MIT"
] | permissive | from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
my_data = ('Here','is','my','data')
red = (255,0,0)
edinburgh = (55.9533, 3.1883)
smarties = ('red', 'orange', 'blue', 'green', 'yellow', 'pink', 'violet', 'brown')
print(smarties)
print(smarties[0])
for color in smarties:
print(color)
r = (255, 0, 0)
'''
pet1 = [e,e,e,e,e,e,e,e,
p,e,e,e,e,e,e,e,
e,p,e,e,p,e,p,e,
e,p,g,g,p,y,y,s,
e,g,g,g,y,w,y,e,
e,g,g,g,g,y,y,s,
e,g,e,g,e,g,e,e,
e,e,e,e,e,e,e,e
]
pet2 = [e,e,e,e,e,e,e,e,
p,e,e,e,e,e,e,e,
e,p,e,e,p,e,p,e,
e,p,g,g,p,y,y,s,
e,g,g,g,y,w,y,e,
e,g,g,g,g,y,y,s,
e,g,e,g,e,g,e,e,
e,e,e,e,e,e,e,e
]
sense.set_pixels(pet1)
sense.set_pixels(pet2)
sense.clear()
'''
for i in range(33):
sleep(0.5)
sleep(0.5)
while True:
acc = sense.get_accelerometer_raw()
if acc['x'] > 2
print('done')
'''
#Traffic Lights
from sense_hat import SenseHat
from time import sleep
r = (255, 0, 0)
g = (0, 255, 0)
b = (0, 0, 255)
w = (255, 255, 255)
e = (0, 0, 0)
p1 = [
e, e, e, e, e, e, e, e,
b, e, e, e, e, e, e, e,
e, b, e, e, b, e, b, e,
e, b, r, r, b, w, w, e,
e, r, r, r, w, g, w, w,
e, r, r, r, e, r, e, e,
e, e, e, e, e, e, e, e
]
p2 = [
e, e, e, e, e, e, e, e,
b, e, e, e, e, e, e, e,
e, b, e, e, b, e, b, e,
e, b, r, r, b, w, w, e,
e, r, r, r, w, g, w, w,
e, r, r, r, r, w, w, e,
e, e, r, e, r, e, e, e,
e, e, e, e, e, e, e, e
]
def walking():
for i in range(10):
sense.set_pixels(p1)
sleep(0.5)
sense.set_pixels(p2)
sleep(0.5)
'''
| true |
8f5888cc19d8e50dcb3c9832f4e3ccfa2b4c968c | Python | mhmoslemi2338/corresponding-point-harris-method | /main.py | UTF-8 | 6,679 | 2.53125 | 3 | [
"MIT"
] | permissive | import timeit
start = timeit.default_timer()
import cv2
import matplotlib.pyplot as plt
import numpy as np
from my_func import my_gradian, my_NMS , my_feature_arr , my_show , my_distance , my_min_distance
############# calc gradient and Ixx Iyy Ixy for im01 and im02 ########
print("progres (1 of 2)" , end=" : ")
im1_bgr=cv2.imread('im01.jpg',cv2.IMREAD_COLOR)
im1=cv2.cvtColor(im1_bgr,cv2.COLOR_BGR2GRAY)
im2_bgr=cv2.imread('im02.jpg',cv2.IMREAD_COLOR)
im2=cv2.cvtColor(im2_bgr,cv2.COLOR_BGR2GRAY)
[Ixx1,Iyy1,Ixy1,gradient1]=my_gradian(im1)
[Ixx2,Iyy2,Ixy2,gradient2]=my_gradian(im2)
####### show and save results ########
my_show(gradient1, 'gradient of im01', gradient2, 'gradient of im02')
cv2.imwrite('result/res01_grad.jpg',gradient1);
cv2.imwrite('result/res02_grad.jpg',gradient2);
gausskernel=cv2.getGaussianKernel(ksize=31,sigma=3)
############### calc Sxx ####################
Sxx1=cv2.filter2D(Ixx1,ddepth=cv2.CV_64F,kernel=gausskernel)
Sxx2=cv2.filter2D(Ixx2,ddepth=cv2.CV_64F,kernel=gausskernel)
my_show(Sxx1, '$S_x^2$ for im01', Sxx2, '$S_x^2$ for im02')
############## calc Syy ##################
Syy1=cv2.filter2D(Iyy1,ddepth=cv2.CV_64F,kernel=gausskernel)
Syy2=cv2.filter2D(Iyy2,ddepth=cv2.CV_64F,kernel=gausskernel)
my_show(Syy1, '$S_y^2$ for im01', Syy2, '$S_y^2$ for im02')
############## calc Sxy ##################
Sxy1=cv2.filter2D(Ixy1,ddepth=cv2.CV_64F,kernel=gausskernel)
Sxy2=cv2.filter2D(Ixy2,ddepth=cv2.CV_64F,kernel=gausskernel)
my_show(Sxy1, '$S_{xy}$ for im01', Sxy2, '$S_{xy}$ for im02')
######## calc structure tensor " M " for im01 and im02 ########
M1_up=np.concatenate((Sxx1, Sxy1), axis=1)
M1_down=np.concatenate((Sxy1,Syy1), axis=1)
M1=np.concatenate((M1_up,M1_down), axis=0)
M2_up=np.concatenate((Sxx2, Sxy2), axis=1)
M2_down=np.concatenate((Sxy2,Syy2), axis=1)
M2=np.concatenate((M2_up,M2_down), axis=0)
##### calc det and trace ########
tmp1=np.multiply(Sxx1,Syy1)
tmp2=np.multiply(Sxy1,Sxy1)
det1=np.subtract(tmp1,tmp2)
trace1=np.add(Sxx1,Syy1)
tmp1=np.multiply(Sxx2,Syy2)
tmp2=np.multiply(Sxy2,Sxy2)
det2=np.subtract(tmp1,tmp2)
trace2=np.add(Sxx2,Syy2)
#### calc R and showing it ######
k=30
R1=cv2.subtract(det1,k*trace1)
R1_show=np.absolute(R1)
R1_show=np.uint8(R1_show)
R2=cv2.subtract(det2,k*trace2)
R2_show=np.absolute(R2)
R2_show=np.uint8(R2_show)
my_show(R1_show, '$R_1$', R2_show, '$R_2$')
cv2.imwrite('result/res03_score.jpg',R1_show);
cv2.imwrite('result/res04_score.jpg',R2_show);
#### thresholding and showing result and save it ######
my_threshold=100
_,th1=cv2.threshold(R1,my_threshold,255,cv2.THRESH_BINARY)
_,th2=cv2.threshold(R2,my_threshold,255,cv2.THRESH_BINARY)
my_show(th1, 'threshold for im01', th2, 'threshold for im02')
cv2.imwrite('result/res03_thresh.jpg',th1);
cv2.imwrite('result/res04_thresh.jpg',th2);
################# non maximum suppresion ###############
size=17
res1=my_NMS(size, th1)
res2=my_NMS(size, th2)
[m1,n1]=np.shape(res1)
[m2,n2]=np.shape(res2)
############# draw points in original images ##########
pnt1,t=[],0
tmp1=im1_bgr.copy()
for i in range(m1):
for j in range(n1):
if res1[i][j]==255 :
t+=1
pnt1.append([i,j,t])
cv2.circle(tmp1,(j,i),radius=2,color=(0,255,0) ,thickness=2 )
pnt2,t=[],0
tmp2=im2_bgr.copy()
for i in range(m2):
for j in range(n2):
if res2[i][j]==255 :
t+=1
pnt2.append([i,j,t])
cv2.circle(tmp2,(j,i),radius=2,color=(0,255,0) ,thickness=2 )
cv2.imwrite('result/res07_harris.jpg',tmp1);
cv2.imwrite('result/res08_harris.jpg',tmp2);
####### finding feature array for each point ##############
n=20
feature1 = my_feature_arr(n, pnt1, im1_bgr, m1, n1)
feature2 = my_feature_arr(n, pnt2, im2_bgr, m2, n2)
l1=len(feature1)
l2=len(feature2)
####### find corresponding point in pic2 for pic1 ######
print ("done !")
print("progres (2 of 2) : (%) ")
dist=my_distance(feature1, feature2, l1, l2)
[cores1,cores2]=my_min_distance(dist, l1, l2)
####### find valid correspond point for each image #######
thresh=0.75
correspond1=[] # [pnt1 , pnt2] : pnt1 is from pic1 correspond to pnt2 from pic2
for row in cores1:
if np.divide(row[2],row[4]) < thresh :
correspond1.append([row[0],row[1]])
correspond2=[] # [pnt1 , pnt2] : pnt1 is from pic2 correspond to pnt2 from pic1
for row in cores2:
if (np.divide(row[2],row[4])) < thresh :
correspond2.append([row[0],row[1]])
final_cor=[]
for row in correspond1:
for row2 in correspond2:
if( row[0]==row2[1] and row[1]==row2[0] ):
final_cor.append(row) # [pic1 pic2]
for i,row in enumerate(final_cor):
ch=row[1]
for j,row2 in enumerate(final_cor):
if (row2[1]==ch and i!=j):
final_cor[i].append(-1)
############ draw correspond point in each image #########
tmp1=im1_bgr.copy()
tmp2=im2_bgr.copy()
for row in final_cor:
p1=row[0] # pnt in pic 1
p2=row[1] # pnt in pic 2
(i1,j1)=pnt1[p1-1][0:2] # find cordinates
(i2,j2)=pnt2[p2-1][0:2]
cv2.circle(tmp1,(j1,i1),radius=2,color=(0,255,0) ,thickness=4 )
cv2.circle(tmp2,(j2,i2),radius=2,color=(0,255,0) ,thickness=4 )
cv2.imwrite('result/res09_corres.jpg',tmp1);
cv2.imwrite('result/res10_corres.jpg',tmp2);
############## draw lines for correspond points ##########
tmp3=im1_bgr.copy()
tmp4=im2_bgr.copy()
tmp5=cv2.hconcat([tmp3,tmp4])
tmp1=im1_bgr.copy()
tmp2=im2_bgr.copy()
for i,row in enumerate(final_cor):
if i%3!=0:
continue
p1=row[0] # pnt in pic 1
p2=row[1] # pnt in pic 2
(i1,j1)=pnt1[p1-1][0:2] # find cordinates
(i2,j2)=pnt2[p2-1][0:2]
j2+=n1
if i%2==0:
cv2.line(tmp5,(j1,i1),(j2,i2),color=(0,0,255),thickness=2)
cv2.circle(tmp5,(j1,i1),radius=2,color=(0,255,0) ,thickness=10 )
cv2.circle(tmp5,(j2,i2),radius=2,color=(0,255,0) ,thickness=10 )
else:
cv2.line(tmp5,(j1,i1),(j2,i2),color=(255,0,0),thickness=2)
cv2.circle(tmp5,(j1,i1),radius=2,color=(0,255,0) ,thickness=10 )
cv2.circle(tmp5,(j2,i2),radius=2,color=(0,255,0) ,thickness=10 )
cv2.imwrite('result/res11.jpg',tmp5);
stop = timeit.default_timer()
print('\n Run-Time: ', (stop - start)/60)
| true |
b9918f573d6c71c3fed9fb876ea067e1fa909a99 | Python | Shreyas3010/Imbalanced-Classes | /SMOTEregression.py | UTF-8 | 8,575 | 2.625 | 3 | [] | no_license | import pandas as pd
import sys
import xgboost
from sklearn.ensemble import RandomForestRegressor
import collections
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import math
import random
def sortSecond(val):
return val[1]
# =============================================================================
#row2=np.arange(5)
#kneighbors= pd.DataFrame(data=None,index=row2,columns = [ 'Index','Dist1','Class'])
# def settointial():
# for i in range(5):
# kneighbors['Class'][i]=-1
# kneighbors['dist1'][i]=sys.float_info.max
# return
# =============================================================================
kneighbors=[]
def knn1(data1,k):
in1=0
data1_cols=data1.columns
for i1 in range(len(data1)):
sum1=0.0
if i1!=k:
for cl1 in range(len(data1_cols)-2):
sum1=sum1+math.pow((data1[data1_cols[cl1+1]][k]-data1[data1_cols[cl1+1]][i1]),2)
if in1!=5:
kneighbors.append((data1['index'][i1],math.sqrt(sum1),data1['Class'][k]))
#kneighbors['Class'][in1]=data1['Class'][k]
#kneighbors['Index'][in1]=data1['index'][i1]
#kneighbors['Dist1'][in1]=math.sqrt(sum1)
in1=in1+1
else:
if math.sqrt(sum1)<kneighbors[4][1]:
kneighbors.pop()
kneighbors.append((data1['index'][i1],math.sqrt(sum1),data1['Class'][k]))
#kneighbors['Index'][4]=data1['index'][i1]
#kneighbors['Dist1'][4]=math.sqrt(sum1)
#kneighbors.sort(key=sortSecond)
kneighbors.sort(key=sortSecond)
return
row1=np.arange(3)
results= pd.DataFrame(data=None,index=row1,columns = [ 'Class','Datasize','Training Datasize','After Sampling','Testing Datasize'])
a1=0
data= pd.read_excel (r'New_Data_BF-2_Raigad.xlsx')
data=data.drop(data.index[0])
data=data[data.Outlier!='Yes']
data=data[data!='ppp']
data=data.dropna(axis=0)
l1=[]
for i in np.arange(len(data)):
l1.append(2)
data['Class']=l1
data.rename(columns={'Total.Production.(mt)':'Total_Production'}, inplace=True)
data.loc[data['Total_Production'] < 3200, 'Class'] = 0
data.loc[data['Total_Production'] > 4200, 'Class'] = 1
#data['F/C.Top.Pressure.(Kg/cm2)'] = data.F/C.Top.Pressure.(Kg/cm2).astype(float64)
#m=data.columns[data.isna().any()].tolist()
data= data.drop(['Outlier'],axis=1)
#data= data.drop(['index'],axis=1)
data= data.drop(['F/C.Top.Pressure.(Kg/cm2)'],axis=1)
X = data.loc[:, data.columns != 'Total_Production']
y = data.loc[:, data.columns == 'Total_Production']
datasize=collections.Counter(X['Class'])
print("data size",datasize)
results['Class'][a1]=a1
results['Class'][a1+1]=a1+1
results['Class'][a1+2]=a1+2
results['Datasize'][a1]=datasize[a1]
results['Datasize'][a1+1]=datasize[a1+1]
results['Datasize'][a1+2]=datasize[a1+2]
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.2,train_size=0.8, random_state = 0)
date_test= X_test.loc[:, X_test.columns == 'Date']
X_train= X_train.drop(['Date'],axis=1)
X_test= X_test.drop(['Date'],axis=1)
trainingdatasize=collections.Counter(X_train['Class'])
testingdatasize=collections.Counter(X_test['Class'])
print("training data size",trainingdatasize)
print("testing data size",testingdatasize)
results['Training Datasize'][a1]=trainingdatasize[a1]
results['Training Datasize'][a1+1]=trainingdatasize[a1+1]
results['Training Datasize'][a1+2]=trainingdatasize[a1+2]
results['Testing Datasize'][a1]=testingdatasize[a1]
results['Testing Datasize'][a1+1]=testingdatasize[a1+1]
results['Testing Datasize'][a1+2]=testingdatasize[a1+2]
X_train_cols=X_train.columns
y_train_cols=y_train.columns
row3=np.arange(1)
tmp_x_train_tuple=()
for list_num in range(len(X_train_cols)):
tmp_x_train_tuple=tmp_x_train_tuple+(1.1,)
tmp_x_train_list=[tmp_x_train_tuple]
tmp_x_train= pd.DataFrame(data=tmp_x_train_list,index=row3,columns = X_train_cols)
tmp_y_train= pd.DataFrame(data=[1.2],index=row3,columns = y_train_cols)
x1_train=X_train[X_train.Class==1]
x0_train=X_train[X_train.Class==0]
x1_train=x1_train.reset_index()
x0_train=x0_train.reset_index()
for x0_train_rownum in range(len(x0_train)):
kneighbors.clear()
knn1(x0_train,x0_train_rownum)
index_2=kneighbors[0][0]
random_val1=random.uniform(0,1)
random_val2=1-random_val1
index_1=x0_train['index'][x0_train_rownum]
for colnum in range(len(X_train_cols)-1):
tmp_x_train[X_train_cols[colnum]][0]=((random_val2*X_train[X_train_cols[colnum]][index_1])+(random_val1*X_train[X_train_cols[colnum]][index_2]))/(random_val1+random_val2)
tmp_x_train['Class'][0]=0
X_train=X_train.append(tmp_x_train)
tmp_y_train[y_train_cols[0]][0]=int(((random_val2*y_train[y_train_cols[0]][index_1])+(random_val1*y_train[y_train_cols[0]][index_2]))/(random_val1+random_val2))
y_train=y_train.append(tmp_y_train)
for x1_train_rownum in range(len(x1_train)):
kneighbors.clear()
knn1(x1_train,x1_train_rownum)
index_2=kneighbors[0][0]
random_val1=random.uniform(0,1)
random_val2=1-random_val1
index_1=x1_train['index'][x1_train_rownum]
for colnum in range(len(X_train_cols)-1):
tmp_x_train[X_train_cols[colnum]][0]=((random_val2*X_train[X_train_cols[colnum]][index_1])+(random_val1*X_train[X_train_cols[colnum]][index_2]))/(random_val1+random_val2)
tmp_x_train['Class'][0]=1
X_train=X_train.append(tmp_x_train)
tmp_y_train[y_train_cols[0]][0]=int((random_val2*y_train[y_train_cols[0]][index_1])+(random_val1*y_train[y_train_cols[0]][index_2])/(random_val1+random_val2))
y_train=y_train.append(tmp_y_train)
samplingdatasize=collections.Counter(X_train['Class'])
print("samplinging data size",samplingdatasize)
results['After Sampling'][a1]=samplingdatasize[a1]
results['After Sampling'][a1+1]=samplingdatasize[a1+1]
results['After Sampling'][a1+2]=samplingdatasize[a1+2]
#remove class field
X_train= X_train.drop(['Class'],axis=1)
X_test= X_test.drop(['Class'],axis=1)
#classifier
#clf = xgboost.XGBRegressor(n_estimators=100, learning_rate=0.08, gamma=0, subsample=0.75,colsample_bytree=1, max_depth=7)
#clf.fit(X_train,y_train)
clf=RandomForestRegressor(n_estimators=100,oob_score=True)
clf.fit(X_train,y_train.values.ravel())
y_pred=clf.predict(X_test)
x1=date_test['Date']
y1=y_test['Total_Production']
y_test_arr=np.array(y1)
num_test=len(y_pred)
y2=y_pred
diff1=np.arange(num_test,dtype=np.float)
sum1=0
for i in range(num_test):
diff1[i]=abs(y_pred[i]-y_test_arr[i])/y_pred[i]
sum1=sum1+diff1[i]
min1=min(diff1)
max1=max(diff1)
print("min",min1)
print("max",max1)
avg1=sum1/num_test
print("avg",sum1/num_test)
maxstr=str(max1)
minstr=str(min1)
avgstr=str(avg1)
plt.figure(figsize=(20,10))
plt.suptitle('Min : '+minstr+'Max : '+maxstr+'Avg : '+avgstr, fontsize=14, fontweight='bold')
plt.plot(x1,diff1,marker='o', markerfacecolor='blue', markersize=7, color='skyblue', linewidth=0,label="Actual Data")
plt.title('Performance (XGB)')
plt.xticks(rotation=90)
plt.xlabel('Production')
plt.ylabel('(Predicted-Actual)/Predicted (%)')
plt.savefig('resultsxgbsmoteregressionnewdata.png')
#plt.savefig('resultsrfsmoteregressionnewdata.png')
plt.show()
#plot
plt.figure(figsize=(20,10))
plt.plot(x1,y1,marker='o', markerfacecolor='blue', markersize=7, color='skyblue', linewidth=0,label="Actual Data")
plt.plot(x1,y2,marker='o', markerfacecolor='forestgreen', markersize=7, color='lightgreen', linewidth=0,label="Predicted Data")
plt.xticks(rotation=90)
plt.xlabel('Date')
plt.ylabel('Production')
plt.title('Actual Data vs Predicted Data(XGB)')
plt.legend()
plt.savefig('xgbsmoteregressionnewdata.png')
#plt.savefig('rfsmoteregressionnewdata.png')
plt.show()
# plot feature importance
plt.figure(figsize=(20,10))
train_cols=X_train.columns.values
print(train_cols)
print(clf.feature_importances_)
plt.title('Feature importances (XGB)')
plt.bar(train_cols, clf.feature_importances_)
plt.xticks(rotation=90)
plt.ylabel('Feature Importance (%)')
plt.xlabel('Features')
plt.legend()
plt.savefig('xgbsmoteregressionfeatureimportancenewdata.png')
#plt.savefig('rfsmoteregressionfeatureimportancenewdata.png')
plt.show()
| true |
f515392bda8cddcbfa21cf465d5695945fd62cab | Python | handaeho/lab_dl | /ch05_Back_Propagation/ex01_Basic_Layer.py | UTF-8 | 5,586 | 4 | 4 | [] | no_license | """
Back Propagation(역전파)
Computational Graph(계산 그래프) : 복수개의 노드와 엣지로 계산 과정을 자료구조 형태의 그래프로 표현한 것.
f(x)= x^n일 때, f'(x) = df/dx = nx^n-1이다. 이 미분 계산을 그래프 자료구조 형태로 나타내면,
'x-> [미분] -> df/dx'와 같다.
이때, 출발점부터 종착점까지 순서대로 진행되는 것을 'Forward Propagation(순전파)',
반대로 종착점부터 출발점으로 진행되는 것을 'Back Propagation(역전파)'라고 한다.
전체 계산이 아무리 복잡하고 변수가 많아도 각 단계에서 노드가 하는 일은 '국소적 계산'이다.
이는 단순하지만 그 결과를 다음 노드에 전달해가며 전체를 구성하기 때문에 복잡한 계산을 할 수 있게 한다.
예를 들어, '100원짜리 사과 2개를 샀고 소비세 10%가 붙은 최종 가격을 구하는 계산'은
'사과 100원 -> [*2] -> [*1.1] -> 최종 금액'이 된다.
이때 '최종 금액을 구하는 계산'이 'Forward Propagation(순전파)'가 되는것이며,
만약 '사과 가격이 오르면 최종 금액이 어떻게 변하는지' 알고싶을 때, 이는 '사과 가격에 대한 지불 금액의 미분'으로 표현 가능하고
이를 구하는것이 'Back Propagation(역전파)'이다.
또한 같은 방법으로 이렇게 사과 금액에 대한 미분 뿐만 아닌, 개수에 대한 미분이나 소비세에 대한 미분등으로 각 요소의 영향도 알 수 있다.
그리고 중간까지 구한 미분에 대한 결과를 공유 할 수 있어서 다수의 미분을 효율적으로 계산 할 수 있다.
이처럼 계산 그래프의 이점은 '순전파와 역전파'를 활용해서 '각 변수의 미분을 효율적으로 계산'할 수 있는 것이다.
정리하자면 'Forward Propagation'은 '시작점부터 각 노드와 엣지의 상태에 따라 순서대로 계산되어 결과가 출력되는 계산'이며,
'Back Propagation'은 '반대의 방향으로 진행되는 계산'으로 '미분을 통해 전 단계가 지금 단계의 노드에 어떤 영향'을 미쳤는지 알 수 있다.
y = f(x)의 계산 그래프 x -> [f] -> y에서
역전파의 계산 순서 : E * dy/dx <- [f] <- E
신호 E에 노드의 국소적 미분(dy/dx)를 곱하고, 다음 노드로 전달.
- 합성 함수 : 여러 함수로 구성된 함수.
z = t^2(단, t=x+y) ~~~> z = t^2 = (x+y)^2
합성 함수의 미분은 합성 함수를 구성하는 각 함수의 미분의 곱으로 나타낼 수 있다.
- 연쇄 법칙 : 합성 함수의 원리를 이용해 z=t^2(단, t=x+y)일 때,
dz/dx = dz/dt * dt/dx, dz/dt = 2t이고, dt/dx = 1이므로
dz/dx = dz/dt * dt/dx = 2t * 1 = 2(x+y)
"""
import numpy as np
class MultiplyLayer:
""" 100원짜리 사과 2개를 사고, 10%의 소비세가 붙은 최종 가격을 구해보자."""
def __init__(self):
self.x = None
self.y = None
def forward(self, x, y):
""" Forward Propagation(순방향 전파) """
self.x = x
self.y = y
return x * y
def backward(self, delta_out):
""" Backword Propagation(역방향 전파) """
# 원래는 입력이 x, 출력이 y지만, 역전파이므로 x와 y를 바꾸어서 y를 입력으로, x를 출력으로 바꾼다.
dx = delta_out * self.y
dy = delta_out * self.x
return dx, dy
class AddLayer:
def __init__(self):
pass
def forward(self, x, y):
return x + y
def backward(self, dout):
dx, dy = dout, dout
return dx, dy
if __name__ == '__main__':
# MultiplyLayer 객체 생성
apple_layer = MultiplyLayer()
# Forward Propagation(순방향 전파)
apple = 100 # 사과 가격
n = 2 # 개수
# 사과 2개의 총 가격 계산
apple_price = apple_layer.forward(apple, n) # 순방향 전파
print('사과 2개의 가격 =', apple_price)
# 사과 2개의 가격 = 200
# tax_layer를 MultiplyLayer 객체로 생성
tax_layer = MultiplyLayer()
# 'tax=1.1'로 설정해서 구매시 세금이 포함된 최종 가격 계산
tax = 1.1
total_price = tax_layer.forward(apple_price, tax)
print('세금이 포함된 사과 2개의 최종 가격 =', total_price)
# 세금이 포함된 사과 2개의 최종 가격 = 220.00000000000003
# f = a * n * t 라고 할 때,
# tax가 1 증가하면 전체 가격은 얼마가 증가? -> df/dt
# 사과 개수가 1 증가하면 전체 가격은 얼마가 증가? -> df/dn
# 사과 가격이 1 증가하면 전체 가격은 얼마가 증가? -> df/da
# Backword Propagation(역방향 전파) ~> 역전파에서는 '각 순전파의 출력에 대한 미분값'을 인수로 받는다.
delta = 1.0 # 가장 처음 역전파 될 값
dprice, dtax = tax_layer.backward(delta)
print('dprice =', dprice)
print('dtax =', dtax) # df/dt: tax 변화에 대한 전체 가격 변화율
dapple, dn = apple_layer.backward(dprice)
print('dapple =', dapple) # df/da: 사과 단가 변화에 대한 전체 가격 변화율
print('dn =', dn) # df/dn: 사과 개수 변화에 대한 전체 가격 변화율
# AddLayer 테스트
add_layer = AddLayer()
x = 100
y = 200
dout = 1.5
f = add_layer.forward(x, y)
print('f =', f) # f = x + y
dx, dy = add_layer.backward(dout) # df/dx = 1, df/dy = 1
print('dx =', dx) # dx * dout
print('dy =', dy) # dy * dout
| true |
6e491bb45296e710792dd4b86985b7b9840c0556 | Python | tk-sheldo/codeJam | /2019/crypto.py | UTF-8 | 817 | 3.375 | 3 | [] | no_license |
def GCF(a, b):
if a < b:
temp = b
b = a
a = temp
r = a%b
if r == 0:
return b
else:
return GCF(b, r)
t = int(input())
for case in range(t):
n, l = list(map(int, input().split(' ')))
code = list(map(int, input().split(' ')))
primes = ['X']
primes.append(GCF(code[0], code[1]))
primes[0] = code[0]/primes[1]
for i in range(len(code)-1):
primes.append(code[i+1]/primes[i+1])
primes = list(map(int, primes))
l = list(dict.fromkeys(primes))
l.sort()
#print(l)
d = {}
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i in range(len(l)):
d[l[i]] = letters[i]
#print(d)
broken = ''
for num in primes:
broken = broken + d[num]
print(f"Case #{case + 1}: {broken}")
| true |
95396114c3a490fe4f4b9dc5bb31c6957fb84666 | Python | henryfw/cs-330 | /cs330_image_convert.py | UTF-8 | 1,676 | 2.875 | 3 | [] | no_license | import cv2 as cv
import os
import pickle
# save images as an array of tuples: [( [1024,1024], 0|1 ), ... ]
def resizeImagesAsFile(inputFolder, saveFile, width=1024, height=1024):
data = []
for label in ["0", "1"]:
with os.scandir(inputFolder + "/" + label) as entries:
for entry in entries:
if entry.name.lower().endswith(".png") or entry.name.lower().endswith(".jpeg"):
image = cv.imread(entry.path, cv.IMREAD_GRAYSCALE)
image_scaled = cv.resize(image, (width, height), interpolation=cv.INTER_AREA)
data.append((image_scaled, int(label)))
if os.path.exists(saveFile):
os.remove(saveFile)
print("saving {} items.".format(len(data)))
pickle.dump(data, open(saveFile, "wb"))
if __name__ == '__main__':
#
# resizeImagesAsFile('C:/Users/uprz2/Downloads/cs330/images/pneumonia/', "C:/Users/uprz2/Downloads/cs330/data/pneumonia-1024", 1024, 1024)
# resizeImagesAsFile('C:/Users/uprz2/Downloads/cs330/images/tb1/', "C:/Users/uprz2/Downloads/cs330/data/tb1-1024", 1024, 1024)
#
# resizeImagesAsFile('C:/Users/uprz2/Downloads/cs330/images/pneumonia/', "C:/Users/uprz2/Downloads/cs330/data/pneumonia-512", 512, 512)
# resizeImagesAsFile('C:/Users/uprz2/Downloads/cs330/images/tb1/', "C:/Users/uprz2/Downloads/cs330/data/tb1-512", 512, 512)
#
resizeImagesAsFile('C:/Users/uprz2/Downloads/cs330/images/pneumonia/', "C:/Users/uprz2/Downloads/cs330/data/pneumonia-256-grayscale", 256, 256)
resizeImagesAsFile('C:/Users/uprz2/Downloads/cs330/images/tb1/', "C:/Users/uprz2/Downloads/cs330/data/tb1-256-grayscale", 256, 256) | true |
6f030a097133ecb7c0277171ed58be597af7938f | Python | shenhaiyu0923/resful | /vova_project/vova_resful/sept/性能测试.py | GB18030 | 1,142 | 2.671875 | 3 | [] | no_license | from locust import HttpLocust, TaskSet, task
# HttpLocust http
# TaskSet ǶûΪģ൱loadrunnerhttpЭĽűjmeterhttpһҪȥ
# task taskһװһװγһҲָǵȺִ˳
class BestTest(TaskSet):
# Լ̳࣬TaskSetҲʵҪȥʲô
@task # taskװװγһҪִе
def index(self): # 涨ҪIJ
self.client.get('/') # urlĸ·ǽӿڵĻĸӿ
class BestTestIndexUser(HttpLocust):
# ̳HttpLocustÿÿû
task_set = BestTest # ÿûȥʲôָBestTest࣬ͻÿûȥbesttestķ
#locust -f ܲ.py --host=http://www.baidu.com
#localhost8089
| true |
b6863fa906671e0236660e7858c397b0999c4070 | Python | palmergroup-tutorial/Python-force-field-parameterization-workflow | /IO/user_provided.py | UTF-8 | 5,693 | 2.578125 | 3 | [
"MIT"
] | permissive | import logging
import argparse
import numpy as np
import sys
import IO.check_type
class from_command_line():
@classmethod
def __init__(cls,jobID=None,
total_cores=None,
input_file=None,
mode=None,
ref_address=None,
prep_address=None):
if (mode is None):
cls.MODE = "run"
else:
cls.MODE = mode
if (jobID is not None):
cls.JOBID = str(jobID)
cls.logger = cls.Set_Run_Mode(cls.JOBID + ".log",cls.MODE)
if (total_cores is not None):
cls.TOTAL_CORES = total_cores
if (input_file is not None):
cls.INPUT = input_file
if (ref_address is not None):
cls.Ref_data = ref_address
else:
cls.Ref_data ="../ReferenceData"
if (prep_address is not None):
cls.prep_data = prep_address
else:
cls.prep_data = "../prepsystem"
all_options = np.array([ total_cores ,jobID ,input_file ])
# if None of total_cores ,jobID ,input_file assigned, then use the command line options
if ( np.all(all_options == None ) ):
cls.Take_Command_Line_Args()
cls.set_global()
if ( np.any(all_options != None )
and np.any(all_options == None) ):
sys.exit("ERROR: either assign all values for arguments "
"in the class constructors "
"or read all input from command line")
# check the following mandatory attributes
cls.check_necessary_attributes("JOBID")
cls.check_necessary_attributes("TOTAL_CORES")
cls.check_necessary_attributes("INPUT")
cls.check_necessary_attributes("logger")
cls.check_necessary_attributes("Ref_data")
cls.check_necessary_attributes("prep_data")
# check the type of user-provided input:
cls.check_total_cores()
return None
@classmethod
def finish_reading(cls):
return cls.logger,cls.TOTAL_CORES, cls.INPUT, cls.JOBID,cls.Ref_data,cls.prep_data
@classmethod
def check_necessary_attributes(cls,attribute):
if ( not hasattr(cls,attribute) ):
sys.exit('global variable: "%s" not found in either command line or passed argument'%attribute)
return None
@classmethod
def check_total_cores(cls):
if ( not IO.check_type.is_int(cls.TOTAL_CORES)):
cls.logger.error("ERROR: varable: 'total_cores' must be an integer ! ")
sys.exit("Check errors in log file ! ")
return None
@classmethod
def Take_Command_Line_Args(cls):
parser = argparse.ArgumentParser(description="This is a Python software package implementing a force-field parameters optimization workflow")
parser.add_argument("-c", "--cores", type=int, required=True,help="Number of cores requested")
parser.add_argument("-i", "--input", type=str, required=True,help="input file name")
parser.add_argument("-j", "--job", type=str,
required=True,help="Specify a job ID that will be attached to a job folder and log file")
parser.add_argument("-m", "--mode", type=str,
required=False,default="run",
help="(Optional) Choose 'run' or 'debug'. Default is 'run' ")
parser.add_argument("-Ref", "--ReferenceData", type=str,
required=False,default="../ReferenceData",
help="(Optional) Provide the path to Reference data folder. Default path is '../ReferenceData'")
parser.add_argument("-prep", "--prepsystem", type=str,
required=False,default="../prepsystem",
help="(Optional) Provide the path to prepsystem folder. Default path is '../prepsystem'")
args = parser.parse_args()
cls.argument = dict( args.__dict__.items() )
return None
@classmethod
def set_global(cls):
cls.JOBID = cls.argument["job"]
cls.TOTAL_CORES = cls.argument["cores"]
cls.INPUT = cls.argument["input"]
cls.logger = cls.Set_Run_Mode(cls.JOBID +".log",cls.MODE)
cls.Ref_data = cls.argument["ReferenceData"]
cls.prep_data = cls.argument["prepsystem"]
return None
@classmethod
def Select_Run_Mode(cls,arg):
mode = {
"debug": logging.DEBUG,
"run": logging.INFO
}
return mode[arg]
@classmethod
def Select_Formatter(cls,arg):
mode = {
"debug": "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
"run": "%(message)s"
}
return mode[arg]
@classmethod
def Set_Run_Mode(cls,logname,mode):
logger = logging.getLogger()
logger.setLevel(cls.Select_Run_Mode(mode))
fh = logging.FileHandler(logname,mode="w")
formatter = logging.Formatter(cls.Select_Formatter(mode))
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def keyword_exists(argument_str,keyword):
try:
keyword_indx = argument_str.index(keyword)
return keyword_indx
except ValueError:
# '-1' means no such keyword exists in this string
return -1
| true |
717b23c97a45a5e2aefc5020291d4a735b5d2d47 | Python | mikey-sb/python_logic_problems | /football_results/src/football_results.py | UTF-8 | 775 | 3.171875 | 3 | [] | no_license |
def get_result(final_score):
if final_score["home_score"] > final_score["away_score"]:
return "Home win"
if final_score["home_score"] < final_score["away_score"]:
return "Away win"
if final_score["home_score"] == final_score["away_score"]:
return "Draw"
def get_results(final_scores):
result_list = []
for score in final_scores:
result_list.append(get_result(score))
return result_list
# if score["home_score"] > score["away_score"]:
# result_list.append("Home win")
# if score["home_score"] < score["away_score"]:
# result_list.append("Away win")
# if score["home_score"] == score["away_score"]:
# result_list.append("Draw")
# return result_list | true |