blob_id
stringlengths
40
40
language
stringclasses
1 value
repo_name
stringlengths
5
133
path
stringlengths
2
333
src_encoding
stringclasses
30 values
length_bytes
int64
18
5.47M
score
float64
2.52
5.81
int_score
int64
3
5
detected_licenses
listlengths
0
67
license_type
stringclasses
2 values
text
stringlengths
12
5.47M
download_success
bool
1 class
eb2919dacdfe56bd0c59f5bcc7dec591aeb0f8af
Python
rmccampbell/PythonProjects
/music.py
UTF-8
4,102
3.078125
3
[]
no_license
#!/usr/bin/env python3 import pygame, sys, os from pygame.locals import * def load_sound(name): base = os.path.dirname(__file__) fullname = os.path.join(base, 'data', 'sounds', name + '.wav') return pygame.mixer.Sound(fullname) class Note(pygame.sprite.Sprite): def __init__(self, pitch): self.pitch = pitch self.sound = sounds[pitch] self.y = 415 - (pitch * 50) self.x = 650 self.speed = 4 self.color = Color(0, 0, 255) self.active = False self.hit = False self.missed = False super().__init__() def update(self, surface): self.x -= self.speed if self.x < 120 and self.x >= 90 and not self.active: self.active = True if self.x < 90 and self.active: self.active = False if not self.hit: self.miss() if self.x < -10: self.kill() self.draw(surface) def draw(self, surface): pygame.draw.circle(surface, self.color, (self.x, self.y), 10) def hit_note(self): self.hit = True self.sound.play() self.color = Color(0, 255, 0) def miss(self): global mistakes self.missed = True self.color = Color(255, 0, 0) mistakes += 1 sounds = {} song = [3, 3, 4, 5, 5, 4, 3, 2, 1, 1, 2, 3, 3, 2, 2, 0, 3, 3, 4, 5, 5, 4, 3, 2, 1, 1, 2, 3, 2, 1, 1] mistakes = 0 def main(): def hit_note(pitch): for note in notes: if note.active: if note.pitch == pitch: note.hit_note() return false_hit() def false_hit(): global mistakes mistakes += 1 pygame.init() screen = pygame.display.set_mode((640,480)) #Load sounds sounds.update({ 1: load_sound('c_note'), 2: load_sound('d_note'), 3: load_sound('e_note'), 4: load_sound('f_note'), 5: load_sound('g_note'), 6: load_sound('a_note'), 7: load_sound('b_note'), }) duration = 20 position = 0 timer = 0 notes = pygame.sprite.OrderedUpdates() clock = pygame.time.Clock() running = True while running: clock.tick(30) #Handle events for event in pygame.event.get(): if event.type == KEYDOWN: #Test for hit notes if event.key == K_1: hit_note(1) if event.key == K_2: hit_note(2) if event.key == K_3: hit_note(3) if event.key == K_4: hit_note(4) if event.key == K_5: hit_note(5) elif event.key == K_ESCAPE or \ event.key == K_F4 and event.mod & KMOD_ALT: running = False elif event.type == QUIT: running = False #Track position and create new Notes if timer == 0: if position < len(song): pitch = song[position] if pitch > 0: notes.add(Note(pitch)) position += 1 timer = duration timer -= 1 screen.fill((255, 255, 255)) #Draw background for y in range(115, 366, 50): pygame.draw.line(screen, (191, 191, 255), (0, y), (screen.get_width(), y), 3) #Update and draw notes notes.update(screen) #Draw foreground pygame.draw.line(screen, (0, 0, 0), (90, 0), (90, screen.get_height()), 3) pygame.draw.line(screen, (0, 0, 0), (120, 0), (120, screen.get_height()), 3) pygame.display.flip() #Too many mistakes if mistakes >= 5: print('You lose') running = False #End of song if len(notes) == 0 and position >= len(song): print('You win!') running = False if __name__ == '__main__': main() pygame.quit()
true
8a4f50ac473894390e2e4a484cd99cf0b2dce065
Python
aduston/simpledrawers
/drawers.py
UTF-8
2,021
2.609375
3
[]
no_license
from OCC import gp, TopLoc from OCC.Display.SimpleGui import init_display from OCC.TopoDS import TopoDS_Builder, TopoDS_Compound from drawer import make_drawer from drawer_box import make_drawer_box, BACK_INSET from constants import THICKNESS_0, THICKNESS_1 def _move(shape, x, y, z): tr = gp.gp_Trsf() tr.SetTranslation(gp.gp_Vec(x, y, z)) loc = TopLoc.TopLoc_Location(tr) shape.Move(loc) def make_drawers(dx, dy, dz, arrangement): air_space = 0.05 available_z_space = dz - THICKNESS_0 * (len(arrangement) + 1) drawer_space_height = available_z_space / len(arrangement) drawer_depth = dy - BACK_INSET - THICKNESS_1 offsets = [] for i in range(len(arrangement) - 1): offsets.append(THICKNESS_0 + (i + 1) * (drawer_space_height + THICKNESS_0)) drawer_box = make_drawer_box(dx, dy, dz, offsets) drawers = [] for level, num_drawers in enumerate(arrangement): drawer_width = (dx - THICKNESS_0 * 2 - (num_drawers + 1) * air_space) / float(num_drawers) z_pos = dz - (level + 1) * (THICKNESS_0 + drawer_space_height) + air_space for drawer_index in range(num_drawers): drawer = make_drawer( drawer_width, drawer_depth, drawer_space_height - 2 * air_space) _move(drawer, THICKNESS_0 + air_space + (air_space + drawer_width) * drawer_index, 0, z_pos) drawers.append(drawer) builder = TopoDS_Builder() compound = TopoDS_Compound() builder.MakeCompound(compound) builder.Add(compound, drawer_box) for drawer in drawers: builder.Add(compound, drawer) return compound paper_size = (11 * 2.54, 8.5 * 2.54) drawers = make_drawers( paper_size[0] + 2 + THICKNESS_0 * 2, paper_size[1] + 2 + THICKNESS_0 * 2 + THICKNESS_1 + BACK_INSET, 24, [3, 1, 1]) display, start_display, add_menu, add_function_to_menu = init_display() display.DisplayShape(drawers, update=True) start_display()
true
0bbd599e86178ec3aff32f1e9c33b3072c6c3674
Python
lleskow/lapie-code
/seconde/assets/imgll.py
UTF-8
1,474
3.328125
3
[]
no_license
from PIL import Image from os.path import splitext def appliquer_fonction_vers_nb(f,image,mode_depart): im = Image.open(image) largeur, hauteur = im.size im2 = Image.new("L",im.size) for y in range(hauteur): #parcours des lignes for x in range(largeur): #parcours des colonnes d'une ligne pixel = im.getpixel((x,y)) if mode_depart == "L": im2.putpixel((x,y),f(pixel)) else: im2.putpixel((x,y),int(f(pixel[0],pixel[1],pixel[2]))) im2.save(splitext(image)[0]+"new.png") print("done") def appliquer_fonction_vers_couleur(f,image): im = Image.open(image) largeur, hauteur = im.size im2 = Image.new("RGB",im.size) for y in range(hauteur): #parcours des lignes for x in range(largeur): #parcours des colonnes d'une ligne pixel = im.getpixel((x,y)) im2.putpixel((x,y),tuple(map(int,f(pixel[0],pixel[1],pixel[2])))) im2.save(splitext(image)[0]+"new.png") print("done") def appliquer_fonction(f,image): try: f(1,2,3) mode_depart = "RGB" if type(f(1,2,3)) != type(1.0) and type(f(1,2,3)) != type(1): mode_arrive = "RGB" else: mode_arrive = "L" except: f(1) mode_depart = "L" mode_arrive = "L" if mode_arrive == "RGB": appliquer_fonction_vers_couleur(f,image) else: appliquer_fonction_vers_nb(f,image,mode_depart)
true
9b4badc87ebb241607ef726b00e27a5dabe96d60
Python
ktarrant/stockfinder
/stock/candlestick.py
UTF-8
2,689
2.9375
3
[]
no_license
from plotly.tools import FigureFactory as FF import stock.indicators as indicators import pandas as pd class Candlestick(dict): """ Class for managing a candlestick stock figure. """ CUSTOM_MARKERS_ENABLED = False VOLUME_ENABLED = False def __init__(self, df, primaryKey="Close"): super(Candlestick, self).__init__( FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)) self.df = df if Candlestick.CUSTOM_MARKERS_ENABLED: for subplot in self["data"]: subplot["hoverinfo"] = "none" self._addMarkers(primaryKey, df[primaryKey]) self._addMovingAverage(df[primaryKey], 50) self._addMovingAverage(df[primaryKey], 14) if Candlestick.VOLUME_ENABLED: self._addVolumeBars(df["Volume"]) self["layout"].update = { "paper_bgcolor": 'rgba(0,0,0,0)', "plot_bgcolor": 'rgba(0,0,0,0)', "yaxis1": {"domain": [0, 1]}, "yaxis2": {"domain": [0, 0.2], "anchor": "x1"}, } def _addMarkers(self, name, series): markerTexts = [ "{0:.2f} ({1:+.2%})".format(value, perChange) for (value, perChange) in zip(series.diff(), series.pct_change())] labelScatter = { "x": series.index, "y": series.values, "name": name, "mode": 'markers', "marker": { "size": 0, "opacity": 0, "color": "#000000" }, "type": 'scatter', "text": markerTexts, "showlegend": False, "hoverinfo": "all", "xaxis": "x1", "yaxis": "y1", } self["data"] += [ labelScatter ] def _addMovingAverage(self, series, n): window = series.rolling(window=n) ma = pd.Series(window.mean()) maSubplot = { "x": ma.index, "y": ma.values, "name": 'MA_{}'.format(n), "mode": 'line', "marker": { "size": 1 }, "type": 'scatter', "showlegend": False, "hoverinfo": "name+y", "xaxis": "x1", "yaxis": "y1", } self["data"] += [ maSubplot ] def _addVolumeBars(self, series): sp = { "x": series.index, "y": series.values, "name": "Volume", "type": "bar", "opacity": 0.2, "showlegend": False, "showgrid": False, "showticklabels": False, "hoverinfo": "name+y", "xaxis": "x1", "yaxis": "y2", } self["data"] = [ sp ] + self["data"]
true
c9e57e35356dc751b18b73d1df2d759e6bef9ef3
Python
mlevesquedion/Scrapy
/HTMLparser.py
UTF-8
448
2.78125
3
[]
no_license
from bs4 import BeautifulSoup class HTMLparser: def __init__(self): self.soup = None def is_set(self): return self.soup is not None def set_html(self, html): self.soup = BeautifulSoup(html, 'html5lib') def get_pattern_matches(self, pattern): return self.soup.select(pattern) def get_available_tags(self): return sorted({tag.name for tag in self.soup.findAll()})
true
7d8443525f62296b18cb258e44b19d5d69c165b6
Python
israelcode/CG
/kp.py
UTF-8
6,343
2.65625
3
[]
no_license
#coding=utf-8 import matplotlib as mpl import numpy as np from scipy.misc import comb from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D import math import pylab from mayavi import mlab from matplotlib.ticker import MaxNLocator from matplotlib import cm from numpy.random import randn from scipy import array, newaxis try: # for Python2 import Tkinter as tk import tkSimpleDialog as tksd except: # for Python3 import tkinter as tk import tkinter.simpledialog as tksd global ar1x, ar1y, ar1z, ar2x, ar2y, ar2z global bc1x, bc1y, bc1z, bc2x, bc2y, bc2z, bc3x, bc3y, bc3z root = tk.Tk() ar1x = tksd.askinteger("Parametr (Int)", "Введите (х) первой точки оси вращения", parent=root, minvalue=0) ar1y = tksd.askinteger("Parametr (Int)", "Введите (y) первой точки оси вращения", parent=root, minvalue=0) ar1z = tksd.askinteger("Parametr (Int)", "Введите (z) первой точки оси вращения", parent=root, minvalue=0) ar2x = tksd.askinteger("Parametr (Int)", "Введите (х) второй точки оси вращения", parent=root, minvalue=0) ar2y = tksd.askinteger("Parametr (Int)", "Введите (y) второй точки оси вращения", parent=root, minvalue=0) ar2z = tksd.askinteger("Parametr (Int)", "Введите (z) второй точки оси вращения", parent=root, minvalue=0) bc1x=tksd.askinteger("Parametr (Int)", "Введите (х) первой точки кривой Безье", parent=root, minvalue=0) bc1y=tksd.askinteger("Parametr (Int)", "Введите (y) первой точки кривой Безье", parent=root, minvalue=0) bc1z=tksd.askinteger("Parametr (Int)", "Введите (z) первой точки кривой Безье", parent=root, minvalue=0) bc2x=tksd.askinteger("Parametr (Int)", "Введите (x) второй точки кривой Безье", parent=root, minvalue=0) bc2y=tksd.askinteger("Parametr (Int)", "Введите (y) второй точки кривой Безье", parent=root, minvalue=0) bc2z=tksd.askinteger("Parametr (Int)", "Введите (z) второй точки кривой Безье", parent=root, minvalue=0) bc3x=tksd.askinteger("Parametr (Int)", "Введите (x) третьей точки кривой Безье", parent=root, minvalue=0) bc3y=tksd.askinteger("Parametr (Int)", "Введите (y) третьей точки кривой Безье", parent=root, minvalue=0) bc3z=tksd.askinteger("Parametr (Int)", "Введите (z) третьей точки кривой Безье", parent=root, minvalue=0) #Чекушкин М8О-304Б # Вариант 13) Поверхность вращения. Образующая – кривая Безье 3D 2-й степени def bernstein_poly(i, n, t): return comb(n, i) * (t**(n - i)) * (1 - t)**i def bezier_curve(points, nTimes=1000): nPoints = len(points) xPoints = np.array([p[0] for p in points]) yPoints = np.array([p[1] for p in points]) zPoints = np.array([p[2] for p in points]) t = np.linspace(0.0, 1.0, nTimes) polynomial_array = np.array( [bernstein_poly(i, nPoints - 1, t) for i in range(0, nPoints)]) xvals = np.dot(xPoints, polynomial_array) yvals = np.dot(yPoints, polynomial_array) zvals = np.dot(zPoints, polynomial_array) return xvals, yvals, zvals from math import pi ,sin, cos def R(theta, u): return [[cos(theta) + u[0]**2 * (1-cos(theta)), u[0] * u[1] * (1-cos(theta)) - u[2] * sin(theta), u[0] * u[2] * (1 - cos(theta)) + u[1] * sin(theta)], [u[0] * u[1] * (1-cos(theta)) + u[2] * sin(theta), cos(theta) + u[1]**2 * (1-cos(theta)), u[1] * u[2] * (1 - cos(theta)) - u[0] * sin(theta)], [u[0] * u[2] * (1-cos(theta)) - u[1] * sin(theta), u[1] * u[2] * (1-cos(theta)) + u[0] * sin(theta), cos(theta) + u[2]**2 * (1-cos(theta))]] def Rotate(pointToRotate, point1, point2, theta): u= [] squaredSum = 0 for i,f in zip(point1, point2): u.append(f-i) squaredSum += (f-i) **2 u = [i/squaredSum for i in u] r = R(theta, u) rotated = [] for i in range(3): rotated.append(round(sum([r[j][i] * pointToRotate[j] for j in range(3)]))) return rotated if __name__ == "__main__": nPoints = 3 points = [[bc1x,bc1y,bc1z],[bc2x,bc2y,bc2z],[bc3x,bc3y,bc3z]] xpoints = [p[0] for p in points] ypoints = [p[1] for p in points] zpoints = [p[2] for p in points] xvals, yvals, zvals = bezier_curve(points, nTimes=1000) fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(xvals, yvals, zvals, label='bezier') #ax.plot(xpoints, ypoints, zpoints, "ro") # 1 1 2 # 1 2 2 # 0 0 0 # 0 0 1 p1=[ar1x,ar1y,ar1z] p2=[ar2x,ar2y,ar2z] radiane = 0 angle = pi/12 xtvals=xvals ytvals=yvals ztvals=zvals while angle <= 2*pi: pp1 = Rotate(points[0], p1, p2, angle) pp2 = Rotate(points[1], p1, p2, angle) pp3 = Rotate(points[2], p1, p2, angle) npoints=[pp1,pp2,pp3] xnvals, ynvals, znvals = bezier_curve(npoints, nTimes=1000) xtvals = np.append( xtvals , xnvals ) ytvals = np.append( ytvals , ynvals ) ztvals = np.append( ztvals , znvals ) ax.plot(xnvals, ynvals, znvals, label='bezier') print(angle) angle= angle + pi/24 plt.gcf().canvas.set_window_title("Chekushkin") #plt.show() #myavi-lib ##################################### #pts = mlab.points3d(xtvals, ytvals, ztvals, ztvals) #mesh = mlab.pipeline.delaunay2d(pts) #pts.remove() #surf = mlab.pipeline.surface(mesh) #mlab.xlabel("x") #mlab.ylabel("y") #mlab.zlabel("z") #mlab.show() ###################################### fig = plt.figure() ax = fig.add_subplot(111, projection='3d') surf = ax.plot_trisurf(xtvals, ytvals, ztvals, cmap=cm.jet, linewidth=0) fig.colorbar(surf) ax.xaxis.set_major_locator(MaxNLocator(5)) ax.yaxis.set_major_locator(MaxNLocator(6)) ax.zaxis.set_major_locator(MaxNLocator(5)) fig.tight_layout() plt.gcf().canvas.set_window_title("Chekushkin") plt.show() pylab.show()
true
24d39bee0e36d2a144aad74cf86a201ca0b58dbb
Python
eangulee/python
/math/montecarlo.py
UTF-8
1,924
3.703125
4
[]
no_license
import random from sympy import * import numpy as np import matplotlib.pyplot as plt import math #montecarlo 求定积分 def calpai(): n = 1000000 r = 1.0 a, b = (0.0, 0.0) x_neg, x_pos = a - r, a + r y_neg, y_pos = b - r, b + r count = 0 for i in range(0, n): x = random.uniform(x_neg, x_pos) y = random.uniform(y_neg, y_pos) if x*x + y*y <= 1.0: count += 1 print((count / float(n)) * 4) #montecarlo 求定积分 def integral(): n = 10000000 x_min, x_max = 0.0, 1.0 y_min, y_max = 0.0, 1.0 count = 0 for i in range(0, n): x = random.uniform(x_min, x_max) y = random.uniform(y_min, y_max) # x*x > y,表示该点位于曲线的下面。所求的积分值即为曲线下方的面积与正方形面积的比。 if x*x > y: count += 1 integral_value = count / float(n) print(integral_value) # 直接求定积分 def integral1(): x = symbols("x") f = x * x #integrate(函数,(变量,下限,上限)) v = integrate(f,(x,0.0,1.0)) print(v) # integrate ln def integral_ln(): a = 1 b = 2 x = symbols("x") # ln 的导数是 1/x f = 1 / x v = integrate(f ,(x,1.0,2.0)) print("integrate:",v) print("ln2 - ln1 = ",(math.log(2))) # montecarlo calc ln def montecarlo_ln(): n = 1000000 x_min, x_max = 1.0, 2.0 y_min, y_max = 0.0, 1.0 count = 0 for i in range(0, n): x = random.uniform(x_min, x_max) y = random.uniform(y_min, y_max) # 1/x > y,表示该点位于曲线的下面。所求的积分值即为曲线下方的面积与正方形面积的比。 if 1 / x > y: count += 1 integral_value = count / float(n) print(integral_value) if __name__ == '__main__': # calpai() # integral() # integral1() integral_ln() montecarlo_ln()
true
d024eea1490b2cbd1a0cdc9e56f80dada5dfe14a
Python
Pareidollya/LAA-
/Boubble.py
UTF-8
266
3.25
3
[]
no_license
vetor = [5,1,4,2,8,3452,76,23,56,657,123,-12345,5,5,66,4,2,23,643,234,3,0] for j in range(len(vetor)): for i in range((len(vetor)-1)-j): num = vetor[i] if vetor[i] > vetor[i+1]: vetor [i] = vetor[i+1] vetor[i+1] = num print(vetor)
true
590ca20214a2e50da6e1eb7ab40701d7bdcac243
Python
Catcheryp/CTF
/web/WebSecurityAcademy/jwt/3_weak_signing_key.py
UTF-8
1,164
3.234375
3
[]
no_license
# Demo script for 'JWT Authentication Bypass via Weak Signing Key' video: https://youtu.be/ov9yT4WAuzI import jwt # Paste JWT token here jwt_token = 'INSERT_TOKEN_HERE' wordlist_file = '/usr/share/wordlists/rockyou.txt' def attempt_fuzzing(secret_key, algorithm): try: decoded = jwt.decode(jwt_token, secret_key, algorithms=[algorithm]) print(f"Valid key found: {secret_key}") print(f"Decoded payload: {decoded}") return True except jwt.InvalidSignatureError: return False def fuzz_secret_key(wordlist): header = jwt.get_unverified_header(jwt_token) algorithm = header.get("alg") if not algorithm: print("Algorithm not found in JWT header.") return None else: print(f"Algorithm: {algorithm}") with open(wordlist, "r") as file: for line in file: secret_key = line.strip() if attempt_fuzzing(secret_key, algorithm): return secret_key return None # Start fuzzing found_key = fuzz_secret_key(wordlist_file) if found_key: print(f"\nSecret key found: {found_key}") else: print("No valid secret key found.")
true
965ae583b49a293f0ab96a2a985db13b3298db31
Python
TaoyuMei/Python_Practice_DataScience
/Plotting.py
UTF-8
8,381
3.765625
4
[]
no_license
import numpy as np import matplotlib.pyplot as plt import numpy.matlib from sklearn.cluster import KMeans ### Exercise 1. Plotting cell shapes diatoms_data = np.loadtxt("diatoms.txt") def plot_a_cell(cell, colour='b'): """input a numpy array of the 180 coordinates of the 90 landmark points in 'x1, y1, x2, y2...x90, y90' manner; generate a plot of landmark points and interpolating between subsequent landmark points; colour can also be specified, the default is blue; """ cell_x = [] cell_y = [] i = 0 for coor in cell: if i % 2 == 0: cell_x.append(coor) else: cell_y.append(coor) i += 1 plt.plot(cell_x, cell_y, c=colour) plt.axis('equal') print("begin to plot cell(s)") # plot a cell plt.title("the Plot of a Cell") plot_a_cell(diatoms_data[0, :], colour='b') plt.savefig("the Plot of a Cell.png", format="png") # plot all cells plt.cla() # remove existing plots to avoid overlap plt.title("the Plot of All Cells") for i in range(diatoms_data.shape[0]): co = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'][np.random.randint(8)] # randomly choose a colour plot_a_cell(diatoms_data[i, :], colour=co) plt.savefig("the Plot of All Cells.png", format="png") print("finish plotting cell(s)") ### Exercise 2. Visualizing variance in visual data # use the pca function defined in the assignment 3 def pca(data): """input a data matrix in which each column corresponds to a coordinate and each row corresponds to a data point; output the eigenvalues in a vector (numpy array) in descending order and a matrix where each column is an eigenvector of the corresponding eigenvalue """ # center the data data_mean = np.mean(data, 0) # 0 means calculate the means of each column data_mean_matrix = numpy.matlib.repmat(data_mean, data.shape[0], 1) data_centered = data - data_mean_matrix # construct the input matrix for svd data_centered = data_centered / np.sqrt(data.shape[0] - 1) # use singular value decomposition (u, S, PC) = np.linalg.svd(data_centered) # S is a vector of standard deviation rather than a diagonal matrix eigenvalues = S * S # compute the variances which are already in descending order PC = PC.T # make sure each column in the matrix is an eigenvector (the length ia already 1) return eigenvalues, PC print("begin PCA on the diatoms data") # compute the needed values for plotting variances, eigenvectors = pca(diatoms_data) m = np.mean(diatoms_data, 0) def plot_5_cells_givenPC(PC_index, variances, eigenvectors, m, col): """input a selected PC index (i.e. 1 or 2 or 3), the numpy array of variance and eigenvector matrix obtained from PCA, the mean of each column of the diatom data and a type of colour (e.g. "Greens", "Blues"); generate the plot of 5 cells corresponding to this PC """ # prepare the 3 variables theta_single = np.sqrt(variances[PC_index - 1]) theta = numpy.matlib.repmat(theta_single, 180, 1).T[0] e = eigenvectors[:, PC_index - 1] # generate the 5 cells cell1 = m - 2 * theta * e cell2 = m - theta * e cell3 = m cell4 = m + theta * e cell5 = m + 2 * theta * e cell_list = [cell1, cell2, cell3, cell4, cell5] col_index_list = [0.5, 0.6, 0.7, 0.8, 0.9] # plotting colours = plt.get_cmap(col) for cell, ind in zip(cell_list, col_index_list): plot_a_cell(cell, colours(ind)) print("begin plotting the 5 cells for each PC") # plot the 5 cells for each of the first 3 PCs plt.cla() plt.title("5 Cells for PC1") plot_5_cells_givenPC(1, variances, eigenvectors, m, "Reds") plt.savefig("5 Cells for PC1.png", format="png") plt.cla() plt.title("5 Cells for PC2") plot_5_cells_givenPC(2, variances, eigenvectors, m, "Blues") plt.savefig("5 Cells for PC2.png", format="png") plt.cla() plt.title("5 Cells for PC3") plot_5_cells_givenPC(3, variances, eigenvectors, m, "Greens") plt.savefig("5 Cells for PC3.png", format="png") print("diatoms data finished") ### Exercise 3. Critical thinking # use the mds function in the assignment 3 def mds(data, d): """input a data matrix with each column corresponding to a coordinate and each row corresponding to a data point, and the number of PCs to be selected; output an N * d data matrix containing the d coordinates of N data points projected on to the top d PCs """ if d > data.shape[1]: print("you specify too much PCs, the data have only", data.shape[1], "dimensions.") _, PC = pca(data) # extract the PCs PC_selected = PC[:, 0:d] # select first d PCs # center the data data_mean = np.mean(data, 0) # 0 means calculate the means of each column data_mean_matrix = numpy.matlib.repmat(data_mean, data.shape[0], 1) data_centered = data - data_mean_matrix data_projected = np.dot(PC_selected.T, data_centered.T).T # project the data on to the selected PCs return data_projected print("begin analyzing the PCA toy data") # load the PCA toy data set and perform PCA PCA_toy_data = np.loadtxt("pca_toydata.txt") PCA_toy_data_projected = mds(PCA_toy_data, 2) plt.cla() plt.title("the PCA toy data projected on to the top 2 PCs") plt.scatter(PCA_toy_data_projected[:, 0], PCA_toy_data_projected[:, 1]) plt.xlim(-1.75, 1.75) plt.ylim(-1.5, 1.5) plt.savefig("the PCA toy data projected on to the top 2 PCs.png", format="png") # remove the last 2 data set and perform PCA again PCA_toy_data_trim = PCA_toy_data[:-2, :] PCA_toy_data_trim_projected = mds(PCA_toy_data_trim, 2) plt.cla() plt.title("the PCA toy data projected on to the top 2 PCs \n(without the last 2 data points)") plt.scatter(PCA_toy_data_trim_projected[:, 0], PCA_toy_data_trim_projected[:, 1]) plt.xlim(-1.75, 1.75) plt.ylim(-1.5, 1.5) plt.savefig("the PCA toy data projected on to the top 2 PCs (without the last 2 data points).png", format="png") print("the PCA toy data finished") ### Exercise 4. Clustering II print("begin analyzing the pesticide data") # load the pesticide data dataTrain = np.loadtxt('IDSWeedCropTrain.csv', delimiter=',') # split the data into variables and labels XTrain = dataTrain[:, :-1] YTrain = dataTrain[:, -1] # project the data on the first 2 PCs XTrain_projected = mds(XTrain, 2) # collect the line index of weed points and crop points respectively weed_line = [] crop_line = [] for i in range(len(YTrain)): if YTrain[i] == 0: # weed weed_line.append(i) elif YTrain[i] == 1: # crop crop_line.append(i) # separate projected data points into 2 sets corresponding to the 2 classes weed_point = XTrain_projected[weed_line, :] crop_point = XTrain_projected[crop_line, :] # plot the projected data points. weed points in yellow, crop points in green plt.cla() plt.axis('auto') plt.xlabel("PC1") plt.ylabel("PC2") plt.scatter(weed_point[:, 0], weed_point[:, 1], color="yellow", label="weed") plt.scatter(crop_point[:, 0], crop_point[:, 1], color="green", label="crop") # perform k-means clustering startingPoint = np.vstack((XTrain[0, ], XTrain[1, ])) kmeans = KMeans(n_clusters=2, n_init=1, init=startingPoint, algorithm='full').fit(XTrain) # center the coordinates of the 2 cluster centres (because all data points are centred during PCA) XTrain_mean = np.mean(XTrain, 0) XTrain_mean_matrix = numpy.matlib.repmat(XTrain_mean, kmeans.cluster_centers_.shape[0], 1) cluster_centres_centered = kmeans.cluster_centers_ - XTrain_mean_matrix # project the 2 cluster centres on the first 2 PCs obtaining from the whole training set _, PCs = pca(XTrain) PCs = PCs[:, 0:2] cluster_centres_projected = np.dot(PCs.T, cluster_centres_centered.T).T print("the 2 class centres projected on the first 2 PCs are:\n", cluster_centres_projected) # plot the 2 class centres and finish plotting plt.scatter(cluster_centres_projected[:, 0], cluster_centres_projected[:, 1], color="red", label="cluster centre") plt.title("the Pesticide Data and Cluster Centres Projected on the First 2 PCs") plt.legend(loc='upper center') plt.savefig("the Pesticide Data and Cluster Centres Projected on the First 2 PCs.png", format="png") print("pesticide data finished")
true
1d083c13cf187413922ba01c4aa498a7994c08dd
Python
stfc/PSyclone
/src/psyclone/psyir/transformations/loop_swap_trans.py
UTF-8
8,460
2.578125
3
[ "BSD-3-Clause" ]
permissive
# ----------------------------------------------------------------------------- # BSD 3-Clause License # # Copyright (c) 2021-2022, Science and Technology Facilities Council. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- # Authors R. W. Ford, A. R. Porter, S. Siso and N. Nobre, STFC Daresbury Lab # A. B. G. Chalk STFC Daresbury Lab # J. Henrichs, Bureau of Meteorology # Modified I. Kavcic, Met Office ''' This module provides the loop swap transformation.''' from psyclone.psyir.nodes import Call, CodeBlock, Reference from psyclone.psyir.transformations.loop_trans import LoopTrans from psyclone.psyir.transformations.transformation_error import \ TransformationError class LoopSwapTrans(LoopTrans): ''' Provides a loop-swap transformation, e.g.: .. code-block:: fortran DO j=1, m DO i=1, n becomes: .. code-block:: fortran DO i=1, n DO j=1, m This transform is used as follows: >>> from psyclone.parse.algorithm import parse >>> from psyclone.psyGen import PSyFactory >>> ast, invokeInfo = parse("shallow_alg.f90") >>> psy = PSyFactory("gocean1.0").create(invokeInfo) >>> schedule = psy.invokes.get('invoke_0').schedule >>> # Uncomment the following line to see a text view of the schedule >>> # print(schedule.view()) >>> >>> from psyclone.transformations import LoopSwapTrans >>> swap = LoopSwapTrans() >>> swap.apply(schedule.children[0]) >>> # Uncomment the following line to see a text view of the schedule >>> # print(schedule.view()) ''' excluded_node_types = (Call, CodeBlock) def __str__(self): return "Exchange the order of two nested loops: inner becomes " + \ "outer and vice versa" def validate(self, node, options=None): # pylint: disable=arguments-differ '''Checks if the given node contains a valid Fortran structure to allow swapping loops. This means the node must represent a loop, and it must have exactly one child that is also a loop. :param node_outer: a Loop node from an AST. :type node_outer: py:class:`psyclone.psyir.nodes.Loop` :param options: a dictionary with options for transformations. :type options: Optional[Dict[str, Any]] :raises TransformationError: if the supplied node does not \ allow a loop swap to be done. :raises TransformationError: if either the inner or outer loop \ has a symbol table. ''' super().validate(node, options=options) node_outer = node if not node_outer.loop_body or not node_outer.loop_body.children: raise TransformationError( f"Error in LoopSwap transformation. Supplied node " f"'{node_outer}' must be the outer loop of a loop nest and " f"must have one inner loop, but this node does not have any " f"statements inside.") node_inner = node_outer.loop_body[0] # Check that the body of the outer loop is itself a Loop try: super().validate(node_inner, options=options) except TransformationError as err: raise TransformationError( f"Error in LoopSwap transformation. Supplied node " f"'{node_outer}' must be the outer loop of a loop nest but " f"the first inner statement is not a valid loop:\n" f"{err.value}.") from err if len(node_outer.loop_body.children) > 1: raise TransformationError( f"Error in LoopSwap transformation. Supplied node " f"'{node_outer}' must be the outer loop of a loop nest and " f"must have exactly one inner loop, but this node has " f"{len(node_outer.loop_body.children)} inner statements, the " f"first two being '{node_outer.loop_body[0]}' and " f"'{node_outer.loop_body[1]}'.") outer_sched = node_outer.loop_body if outer_sched.symbol_table and \ not outer_sched.symbol_table.is_empty(): raise TransformationError( "Error in LoopSwap transformation: The outer loop " "has a non-empty symbol table.") inner_sched = outer_sched[0].loop_body if inner_sched.symbol_table and \ not inner_sched.symbol_table.is_empty(): raise TransformationError( "Error in LoopSwap transformation: The inner loop " "has a non-empty symbol table.") for boundary in (node_outer.start_expr, node_outer.stop_expr, node_outer.step_expr): symbols = [ref.symbol for ref in boundary.walk(Reference)] if node_inner.variable in symbols: raise TransformationError( f"Error in LoopSwap transformation: The inner loop " f"iteration variable '{node_inner.variable.name}' is part " f"of the outer loop boundary expressions, so their order " f"can not be swapped.") for boundary in (node_inner.start_expr, node_inner.stop_expr, node_inner.step_expr): symbols = [ref.symbol for ref in boundary.walk(Reference)] if node_outer.variable in symbols: raise TransformationError( f"Error in LoopSwap transformation: The outer loop " f"iteration variable '{node_outer.variable.name}' is part " f"of the inner loop boundary expressions, so their order " f"can not be swapped.") def apply(self, node, options=None): # pylint: disable=arguments-differ '''The argument :py:obj:`outer` must be a loop which has exactly one inner loop. This transform then swaps the outer and inner loop. :param outer: the node representing the outer loop. :type outer: :py:class:`psyclone.psyir.nodes.Loop` :param options: a dictionary with options for transformations. :type options: Optional[Dict[str, Any]] :raises TransformationError: if the supplied node does not \ allow a loop swap to be done. ''' self.validate(node, options=options) outer = node inner = outer.loop_body[0] # Detach the inner code inner_loop_body = inner.loop_body.detach() # Swap the loops outer.replace_with(inner.detach()) inner.addchild(outer.loop_body.detach()) inner.loop_body.addchild(outer) # Insert again the inner code in the new inner loop outer.addchild(inner_loop_body) # For Sphinx AutoAPI documentation generation __all__ = ["LoopSwapTrans"]
true
96323a1c267e06833cef6a6e1b0426da18e50911
Python
mgracecar/python-packet-sniffer
/sniffer.py
UTF-8
40,897
3.3125
3
[]
no_license
''' Jesus Linares Brandon Deen Mariana Flores Geoff Graham Description: This Linux and Windows application processes packets in the local network and displays the supported protocol's header and data. Linux has support for link layer whereas Windows has support for network layer. The header is displayed in the same format(s) wireshark displays them. ''' import socket, sys, time, platform from struct import * # Constants for each header length. constEthHeaderLength = 14 constARPHeaderLength = 28 constIPHeaderLength = 20 constTCPHeaderLength = 20 constUDPHeaderLength = 8 constICMPHeaderLength = 8 # Lists of unpacked packets. allList = [] arpList = [] icmpList = [] tcpList = [] udpList = [] # Check the OS the application is running on. os = platform.system() linux = 'Linux' windows = 'Windows' def eth(packet, attKey, printKey): # Get Ethernet header using begin and end. begin = 0 end = begin + constEthHeaderLength ethHeader = packet[begin:end] # Unpack the header because it originally in hex. # The regular expression helps unpack the header. # ! signifies we are unpacking a network endian. # 6s signifies we are unpacking a string of size 6 bytes. # H signifies we are unpacking an integer of size 2 bytes. ethHeaderUnpacked = unpack('!6s6sH', ethHeader) # The first 6s is 6 bytes and contains the destination address. ethDestAddress = ethHeaderUnpacked[0] # The second 6s is 6 bytes and contains the source address. ethSourceAddress = ethHeaderUnpacked[1] # The first H is 2 bytes and contains the packet length. ethType = socket.ntohs(ethHeaderUnpacked[2]) # Properly unpack and format the destination address. ethDestAddress = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(ethDestAddress[0]), ord(ethDestAddress[1]), ord(ethDestAddress[2]), ord(ethDestAddress[3]), ord(ethDestAddress[4]), ord(ethDestAddress[5])) # Properly unpack and format the source address. ethSourceAddress = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(ethSourceAddress[0]), ord(ethSourceAddress[1]), ord(ethSourceAddress[2]), ord(ethSourceAddress[3]), ord(ethSourceAddress[4]), ord(ethSourceAddress[5])) # If the print key is 0, header information will be printed. # If the attKey is *, all attributes will be printed. # If the attKey is not *, the attribute the att key corresponds to will be printed. # If the print key is not 0, then do not print out the header information. # If the attKey is not *, the attribute the att key corresponds to will be returned. if printKey == 0: # Print Ethernet Header print('\n********************\n** Ethernet (MAC) **\n********************') if (attKey == 0) or (attKey == '*'): print('Destination Address: ' + str(ethDestAddress)) if (attKey == 1) or (attKey == '*'): print('Source Address: ' + str(ethSourceAddress)) if (attKey == 2) or (attKey == '*'): print('EtherType: ' + str(ethType)) else: if (attKey == 0): return str(ethDestAddress) if (attKey == 1): return str(ethSourceAddress) if (attKey == 2): return str(ethType) def arp(packet, attKey, printKey): # Get ARP header using begin and end. begin = constEthHeaderLength end = begin + constARPHeaderLength arpHeader = packet[begin:end] # Unpack the header because it originally in hex. # The regular expression helps unpack the header. # ! signifies we are unpacking a network endian. # H signifies we are unpacking an integer of size 2 bytes. # B signifies we are unpacking an integer of size 1 byte. # 6s signifies we are unpacking a string of size 6 bytes. # 4s signifies we are unpacking a string of size 4 bytes. arpHeaderUnpacked = unpack("!HHBBH6s4s6s4s", arpHeader) # The first H is 2 bytes and contains the hardware type. arpHardwareType = socket.ntohs(arpHeaderUnpacked[0]) # The second H is 2 bytes and contains the protocol type. arpProtocolType = socket.ntohs(arpHeaderUnpacked[1]) # The first B is 1 byte and contains the hardware address length. arpHardAddressLength = arpHeaderUnpacked[2] # The second B is 1 byte and contains the protocol address length. arpProtAddressLength = arpHeaderUnpacked[3] # The third H is 2 bytes and contains the operation. arpOperation = arpHeaderUnpacked[4] # The first 6s is 6 bytes and contains the sender hardware address. arpSenderHardAddress = arpHeaderUnpacked[5] # The first 4s is 4 bytes and contains the sender protocol address. arpSenderProtAddress = socket.inet_ntoa(arpHeaderUnpacked[6]) # The second 6s is 6 bytes and contains the target hardware address. arpTargetHardAddress = arpHeaderUnpacked[7] # The second 4s is 4 bytes and contains the target protocol address. arpTargetProtAddress = socket.inet_ntoa(arpHeaderUnpacked[8]) # Properly unpack and format the source MAC address. arpSenderHardAddress = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(arpSenderHardAddress[0]), ord(arpSenderHardAddress[1]), ord(arpSenderHardAddress[2]), ord(arpSenderHardAddress[3]), ord(arpSenderHardAddress[4]), ord(arpSenderHardAddress[5])) # Properly unpack and format the destination MAC address. arpTargetHardAddress = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(arpTargetHardAddress[0]), ord(arpTargetHardAddress[1]), ord(arpTargetHardAddress[2]), ord(arpTargetHardAddress[3]), ord(arpTargetHardAddress[4]), ord(arpTargetHardAddress[5])) # If the print key is 0, header information will be printed. # If the attKey is *, all attributes will be printed. # If the attKey is not *, the attribute the att key corresponds to will be printed. # If the print key is not 0, then do not print out the header information. # If the attKey is not *, the attribute the att key corresponds to will be returned. if printKey == 0: # Print ARP Header print('\n*******************\n******* ARP *******\n*******************') if (attKey == 0) or (attKey == '*'): print('Hardware Type: ' + str(arpHardwareType)) if (attKey == 1) or (attKey == '*'): print('Protocol Type: ' + str(arpProtocolType)) if (attKey == 2) or (attKey == '*'): print('Hardware Address Length: ' + str(arpHardAddressLength)) if (attKey == 3) or (attKey == '*'): print('Protocol Address Length: ' + str(arpProtAddressLength)) if (attKey == 4) or (attKey == '*'): print('Operation: ' + str(arpOperation)) if (attKey == 5) or (attKey == '*'): print('Sender Hardware Address: ' + str(arpSenderHardAddress)) if (attKey == 6) or (attKey == '*'): print('Sender Protocol Address: ' + str(arpSenderProtAddress)) if (attKey == 7) or (attKey == '*'): print('Target Hardware Address: ' + str(arpTargetHardAddress)) if (attKey == 8) or (attKey == '*'): print('Target Protocol Address: ' + str(arpTargetProtAddress)) else: if (attKey == 0): return str(arpHardwareType) if (attKey == 1): return str(arpProtocolType) if (attKey == 2): return str(arpHardAddressLength) if (attKey == 3): return str(arpProtAddressLength) if (attKey == 4): return str(arpOperation) if (attKey == 5): return str(arpSenderHardAddress) if (attKey == 6): return str(arpSenderProtAddress) if (attKey == 7): return str(arpTargetHardAddress) if (attKey == 8): return str(arpTargetProtAddress) def ip(packet, attKey, printKey): # Get IP header using begin and end. if os == linux: begin = constEthHeaderLength end = begin + constIPHeaderLength elif os == windows: begin = 0 end = begin + constIPHeaderLength ipHeader = packet[begin:end] # Unpack the header because it originally in hex. # The regular expression helps unpack the header. # ! signifies we are unpacking a network endian. # B signifies we are unpacking an integer of size 1 byte. # H signifies we are unpacking an integer of size 2 bytes. # 4s signifies we are unpacking a string of size 4 bytes. ipHeaderUnpacked = unpack('!BBHHHBBH4s4s' , ipHeader) # The first B is 1 byte and contains the version and header length. # Both are 4 bits each, split ipHeaderUnpacked[0] in "half". ipVersionAndHeaderLength = ipHeaderUnpacked[0] ipVersion = ipVersionAndHeaderLength >> 4 ipHeaderLength = ipVersionAndHeaderLength & 0xF # The second B is 1 byte and contains the service type and ECN. ipDSCPAndECN = ipHeaderUnpacked[1] ipDSCP = ipDSCPAndECN >> 2 ipECN = ipDSCPAndECN & 0x3 # The first H is 2 bytes and contains the total length. ipTotalLength = ipHeaderUnpacked[2] # The second H is 2 bytes and contains the total length. ipIdentification = ipHeaderUnpacked[3] # The third H is 2 bytes and contains the flags and fragment offset. # Flags is 3 bits and fragment offset is 13 bits. # Split ipHeaderUnpacked[4]. ipFlagsAndFragmentOffset = ipHeaderUnpacked[4] ipFlags = ipFlagsAndFragmentOffset >> 13 ipFragmentOffset = ipFlagsAndFragmentOffset & 0x1FFF # The third B is 1 byte and contains the time to live. ipTimeToLive = ipHeaderUnpacked[5] # Our fourth B is 1 byte and contains the protocol. ipProtocol = ipHeaderUnpacked[6] # The fourth H is 2 bytes and contains the header checksum. ipHeaderChecksum = ipHeaderUnpacked[7] # The first 4s is 4 bytes and contains the source address. ipSourceAddress = socket.inet_ntoa(ipHeaderUnpacked[8]); # The second 4s is 4 bytes and contains the dest address. ipDestAddress = socket.inet_ntoa(ipHeaderUnpacked[9]); # If the print key is 0, header information will be printed. # If the attKey is *, all attributes will be printed. # If the attKey is not *, the attribute the att key corresponds to will be printed. # If the print key is not 0, then do not print out the header information. # If the attKey is not *, the attribute the att key corresponds to will be returned. if printKey == 0: # Print IP Header # Some segments of the header are switched back to hex form because that # is the format wireshark has it. print('\n********************\n******** IP ********\n********************') if (attKey == 0) or (attKey == '*'): print('Version: ' + str(ipVersion)) if (attKey == 1) or (attKey == '*'): print('Header Length: ' + str(ipHeaderLength) + ' 32-bit words') if (attKey == 2) or (attKey == '*'): print('Differentiated Services Code Point: ' + format(ipDSCP, '#04X') + ' , ' + str(ipDSCP)) if (attKey == 3) or (attKey == '*'): print('Explicit Congestion Notification: ' + format(ipECN, '#04X') + ' , ' + str(ipECN)) if (attKey == 4) or (attKey == '*'): print('Total Length: ' + str(ipTotalLength) + ' bytes') if (attKey == 5) or (attKey == '*'): print('Identification: ' + format(ipIdentification, '#04X') + ' , ' + str(ipIdentification)) if (attKey == 6) or (attKey == '*'): print('Flags: ' + format(ipFlags, '#04X') + ' , ' + str(ipFlags)) if (attKey == 7) or (attKey == '*'): print('Fragment Offset: ' + str(ipFragmentOffset) + ' eight-byte blocks') if (attKey == 8) or (attKey == '*'): print('Time to Live: ' + str(ipTimeToLive) + ' seconds') if (attKey == 9) or (attKey == '*'): print('Protocol: ' + str(ipProtocol)) if (attKey == 10) or (attKey == '*'): print('Header Checksum: ' + format(ipHeaderChecksum, '#04X')) if (attKey == 11) or (attKey == '*'): print('Source Address: ' + str(ipSourceAddress)) if (attKey == 12) or (attKey == '*'): print('Destination Address: ' + str(ipDestAddress)) else: if (attKey == 0): return str(ipVersion) if (attKey == 1): return str(ipHeaderLength) if (attKey == 2): return format(ipDSCP, '#04X') if (attKey == 3): return format(ipECN, '#04X') if (attKey == 4): return str(ipTotalLength) if (attKey == 5): return format(ipIdentification, '#04X') if (attKey == 6): return format(ipFlags, '#04X') if (attKey == 7): return str(ipFragmentOffset) if (attKey == 8): return str(ipTimeToLive) if (attKey == 9): return str(ipProtocol) if (attKey == 10): return format(ipHeaderChecksum, '#04X') if (attKey == 11): return str(ipSourceAddress) if (attKey == 12): return str(ipDestAddress) def icmp(packet, attKey, printKey): # Get ICMP header using begin and end. if os == linux: begin = constEthHeaderLength + constIPHeaderLength end = begin + constICMPHeaderLength elif os == windows: begin = constIPHeaderLength end = begin + constICMPHeaderLength icmpHeader = packet[begin:end] # Unpack the header because it originally in hex. # The regular expression helps unpack the header. # ! signifies we are unpacking a network endian. # B signifies we are unpacking an integer of size 1 byte. # H signifies we are unpacking an integer of size 2 bytes. # L signifies we are unpacking a long of size 4 bytes. icmpHeaderUnpacked = unpack('!BBHL', icmpHeader) # The first B is 1 byte and contains the type. icmpType = icmpHeaderUnpacked[0] # The second B is 1 byte and contains the code. icmpCode = icmpHeaderUnpacked[1] # The first H is 2 bytes and contains the checksum. icmpChecksum = icmpHeaderUnpacked[2] # Check if the type is 1 or 8, if so, unpack the identifier and sequence number. if (icmpType == 0) or (icmpType == 8): # The first L is 4 bytes and contains the rest of the header. icmpIdentifier = icmpHeaderUnpacked[3] >> 16 icmpSeqNumber = icmpHeaderUnpacked[3] & 0xFFFF # If the print key is 0, header information will be printed. # If the attKey is *, all attributes will be printed. # If the attKey is not *, the attribute the att key corresponds to will be printed. # If the print key is not 0, then do not print out the header information. # If the attKey is not *, the attribute the att key corresponds to will be returned. if printKey == 0: if (icmpType == 0) or (icmpType == 8): # Print ICMP Header # Some segments of the header are switched back to hex form because that # is the format wireshark has it. print('\n********************\n******* ICMP *******\n********************') if (attKey == 0) or (attKey == '*'): print('Type: ' + str(icmpType)) if (attKey == 1) or (attKey == '*'): print('Code: ' + str(icmpCode)) if (attKey == 2) or (attKey == '*'): print('Checksum: ' + format(icmpChecksum, '#04X')) if (attKey == 3) or (attKey == '*'): print('Identifier: ' + str(icmpIdentifier)) if (attKey == 4) or (attKey == '*'): print('Sequence Number: ' + str(icmpSeqNumber)) else: print('\n********************\n******* ICMP *******\n********************') if (attKey == 0) or (attKey == '*'): print('Type: ' + str(icmpType)) if (attKey == 1) or (attKey == '*'): print('Code: ' + str(icmpCode)) if (attKey == 2) or (attKey == '*'): print('Checksum: ' + format(icmpChecksum, '#04X')) if (attKey == 3) or (attKey == '*'): print('Attribute not available.') if (attKey == 4) or (attKey == '*'): print('Attribute not available.') else: if (icmpType == 0) or (icmpType == 8): if (attKey == 0): return str(icmpType) if (attKey == 1): return str(icmpCode) if (attKey == 2): return format(icmpChecksum, '#04X') if (attKey == 3): return str(icmpIdentifier) if (attKey == 4): return str(icmpSeqNumber) else: if (attKey == 0): return str(icmpType) if (attKey == 1): return str(icmpCode) if (attKey == 2): return format(icmpChecksum, '#04X') if (attKey == 3): return 'Attribute not available.' if (attKey == 4): return 'Attribute not available.' def tcp(packet, attKey, printKey): # Get TCP header using begin and end. if os == linux: begin = constEthHeaderLength + constIPHeaderLength end = begin + constTCPHeaderLength elif os == windows: begin = constIPHeaderLength end = begin + constTCPHeaderLength tcpHeader = packet[begin:end] # Unpack the header because it originally in hex. # The regular expression helps unpack the header. # ! signifies we are unpacking a network endian. # H signifies we are unpacking an integer of size 2 bytes. # L signifies we are unpacking a long of size 4 bytes. # B signifies we are unpacking an integer of size 1 byte. tcpHeaderUnpacked = unpack('!HHLLBBHHH', tcpHeader) # The first H is 2 bytes and contains the source port. tcpSourcePort = tcpHeaderUnpacked[0] # The second H is 2 bytes and contains the destination port. tcpDestPort = tcpHeaderUnpacked[1] # The first L is 2 bytes and contains the sequence number. tcpSeqNumber = tcpHeaderUnpacked[2] # The second L is 4 bytes and contains the acknowledgement number. tcpAckNumber = tcpHeaderUnpacked[3] # The first B is 1 byte and contains the data offset, reserved bits, and NS flag. # Split tcpHeaderUnpacked[4] tcpDataOffsetAndReserved = tcpHeaderUnpacked[4] tcpDataOffset = tcpDataOffsetAndReserved >> 4 tcpReserved = (tcpDataOffsetAndReserved >> 1) & 0x7 tcpNSFlag = tcpDataOffsetAndReserved & 0x1 # The second B is 1 byte and contains the rest of the flags. # Split tcpHeaderUnpacked[5]. tcpRestOfFLags = tcpHeaderUnpacked[5] tcpCWRFlag = tcpRestOfFLags >> 7 tcpECEFlag = (tcpRestOfFLags >> 6) & 0x1 tcpURGFlag = (tcpRestOfFLags >> 5) & 0x1 tcpACKFlag = (tcpRestOfFLags >> 4) & 0x1 tcpPSHFlag = (tcpRestOfFLags >> 3) & 0x1 tcpRSTFlag = (tcpRestOfFLags >> 2) & 0x1 tcpSYNFlag = (tcpRestOfFLags >> 1) & 0x1 tcpFINFlag = tcpRestOfFLags & 0x1 # The third H is 2 bytes and contains the window size. tcpWindowSize = tcpHeaderUnpacked[6] # The fourth H is 2 byte and conntains the checksum. tcpChecksum = tcpHeaderUnpacked[7] # The fifth H is 2 bytes and constains the urgent pointer. tcpUrgentPointer = tcpHeaderUnpacked[8] # If the print key is 0, header information will be printed. # If the attKey is *, all attributes will be printed. # If the attKey is not *, the attribute the att key corresponds to will be printed. # If the print key is not 0, then do not print out the header information. # If the attKey is not *, the attribute the att key corresponds to will be returned. if printKey == 0: # Print TCP Header # Some segments of the header are switched back to hex form because that # is the format wireshark has it. print('\n*******************\n******* TCP *******\n*******************') if (attKey == 0) or (attKey == '*'): print('Source Port: ' + str(tcpSourcePort)) if (attKey == 1) or (attKey == '*'): print('Destination Port: ' + str(tcpDestPort)) if (attKey == 2) or (attKey == '*'): print('Sequence Number: ' + str(tcpSeqNumber)) if (attKey == 3) or (attKey == '*'): print('Acknowledgment Number: ' + str(tcpAckNumber)) if (attKey == 4) or (attKey == '*'): print('Data Offset: ' + str(tcpDataOffset) + ' 32-bit words') if (attKey == 5) or (attKey == '*'): print('Reserved: ' + format(tcpReserved, '03b') + '. .... ....') if (attKey == 6) or (attKey == '*'): print('NS Flag: ' + '...' + format(tcpNSFlag, '01b') + ' .... ....') if (attKey == 7) or (attKey == '*'): print('CWR Flag: ' + '.... ' + format(tcpCWRFlag, '01b') + '... ....') if (attKey == 8) or (attKey == '*'): print('ECE Flag: ' + '.... .' + format(tcpECEFlag, '01b') + '.. ....') if (attKey == 9) or (attKey == '*'): print('URG Flag: ' + '.... ..' + format(tcpURGFlag, '01b') + '. ....') if (attKey == 10) or (attKey == '*'): print('ACK Flag: ' + '.... ...' + format(tcpACKFlag, '01b') + ' ....') if (attKey == 11) or (attKey == '*'): print('PSH Flag: ' + '.... .... ' + format(tcpPSHFlag, '01b') + '...') if (attKey == 12) or (attKey == '*'): print('RST Flag: ' + '.... .... .' + format(tcpRSTFlag, '01b') + '..') if (attKey == 13) or (attKey == '*'): print('SYN Flag: ' + '.... .... ..' + format(tcpSYNFlag, '01b') + '.') if (attKey == 14) or (attKey == '*'): print('FIN Flag: ' + '.... .... ...' + format(tcpFINFlag, '01b')) if (attKey == 15) or (attKey == '*'): print('Window Size: ' + str(tcpWindowSize) + ' bytes') if (attKey == 16) or (attKey == '*'): print('Urgent Pointer: ' + str(tcpUrgentPointer)) if (attKey == 17) or (attKey == '*'): print('Checksum: ' + format(tcpChecksum, '#04X')) else: if (attKey == 0): return str(tcpSourcePort) if (attKey == 1): return str(tcpDestPort) if (attKey == 2): return str(tcpSeqNumber) if (attKey == 3): return str(tcpAckNumber) if (attKey == 4): return str(tcpDataOffset) if (attKey == 5): return format(tcpReserved, '03b') if (attKey == 6): return format(tcpNSFlag, '01b') if (attKey == 7): return format(tcpCWRFlag, '01b') if (attKey == 8): return format(tcpECEFlag, '01b') if (attKey == 9): return format(tcpURGFlag, '01b') if (attKey == 10): return format(tcpACKFlag, '01b') if (attKey == 11): return format(tcpPSHFlag, '01b') if (attKey == 12): return format(tcpRSTFlag, '01b') if (attKey == 13): return format(tcpSYNFlag, '01b') if (attKey == 14): return format(tcpFINFlag, '01b') if (attKey == 15): return str(tcpWindowSize) if (attKey == 16): return str(tcpUrgentPointer) if (attKey == 17): return format(tcpChecksum, '#04X') def udp(packet, attKey, printKey): # Get UDP header using begin and end. if os == linux: begin = constEthHeaderLength + constIPHeaderLength end = begin + constUDPHeaderLength elif os == windows: begin = constIPHeaderLength end = begin + constUDPHeaderLength udpHeader = packet[begin:end] # Unpack the header because it originally in hex. # The regular expression helps unpack the header. # ! signifies we are unpacking a network endian. # H signifies we are unpacking an integer of size 2 bytes. udpHeaderUnpacked = unpack('!HHHH', udpHeader) # The first H is 2 bytes and contains the source port. udpSourcePort = udpHeaderUnpacked[0] # The second H is 2 bytes and contains the destination port. udpDestPort = udpHeaderUnpacked[1] # The third H is 2 bytes and contains the packet length. udpLength = udpHeaderUnpacked[2] # The fourth H is 2 bytes and contains the header checksum. udpChecksum = udpHeaderUnpacked[3] # If the print key is 0, header information will be printed. # If the attKey is *, all attributes will be printed. # If the attKey is not *, the attribute the att key corresponds to will be printed. # If the print key is not 0, then do not print out the header information. # If the attKey is not *, the attribute the att key corresponds to will be returned. if printKey == 0: # Print UDP Header print('\n*******************\n******* UDP *******\n*******************') if (attKey == 0) or (attKey == '*'): print('Source Port: ' + str(udpSourcePort)) if (attKey == 1) or (attKey == '*'): print('Destination Port: ' + str(udpDestPort)) if (attKey == 2) or (attKey == '*'): print('Length: ' + str(udpLength) + ' bytes') if (attKey == 3) or (attKey == '*'): print('Checksum: ' + format(udpChecksum, '#04X')) else: if (attKey == 0): return str(udpSourcePort) if (attKey == 1): return str(udpDestPort) if (attKey == 2): return str(udpLength) if (attKey == 3): return format(udpChecksum, '#04X') def unpackPacket(packet, sniffKey): # All attributes for each protocol will be displayed. attKey = '*' # Attributes will be printed. printKey = 0 # Protocol will be blank until a supported protocol is found. protocol = '' # Unpack the Ethernet (MAC) information. eth(packet, attKey, printKey) # If the OS is Linux, unpack ethernet. # If the OS is Windows, mimic unpacking ethernet if os == linux: # Find the packet's Ethernet protocol then return the attKey back to * and the print key back to 0. attKey = 2 printKey = 1 ethProtocol = eth(packet, attKey, printKey) ethProtocol = int(ethProtocol) attKey = '*' printKey = 0 elif os == windows: ethProtocol = 8 # Find if the Ethernet frame is ARP or IP. if ethProtocol == 1544: # Unpack the ARP information. arp(packet, attKey, printKey) protocol = 'arp' elif ethProtocol == 8: # Unpack the IP information. ip(packet, attKey, printKey) # Know the packet's IP protocol then return the attKey back to * and the print key back to 0. attKey = 9 printKey = 1 ipProtocol = ip(packet, attKey, printKey) ipProtocol = int(ipProtocol) attKey = '*' printKey = 0 # If the protocol is 1, meaning ICMP, then unpack the ICMP information. # If the protocol is 6, meaning TCP, then unpack the TCP information. # If the protocol is 17, meaning UDP, then unpack the UDP information. if ipProtocol == 1: icmp(packet, attKey, printKey) protocol = 'icmp' elif ipProtocol == 6: tcp(packet, attKey, printKey) protocol = 'tcp' elif ipProtocol == 17: udp(packet, attKey, printKey) protocol = 'udp' # Separator print('\n----------------------------------------') # If the sniff key is 0, save the packets accordingly. # If sniff key is not 0, do not save the packets. Unpacking is enough. if sniffKey == 0: if protocol == 'arp': allList.append(packet) arpList.append(packet) elif protocol == 'icmp': allList.append(packet) icmpList.append(packet) elif protocol == 'tcp': allList.append(packet) tcpList.append(packet) elif protocol == 'udp': allList.append(packet) udpList.append(packet) def linuxFilter(): while True: # Display filtering options. # Repeated if incorrect input. decision = raw_input('0: ARP\n1: ICMP\n2: TCP\n3: UDP\nCtrl+c to stop...\nSelection: ') # Check if decision is supported input. try: decision = int(decision) except ValueError: print('\nUnsupported input, try again...') continue # A sniff key of 1 means the application is not sniffing so packets must not be saved. sniffKey = 1 # Filter based on input, if input is not supported, notify user. # If no protocols of certain type were filtered, notify user. # If user chooses cancel option, break while loop. if decision == 0: # Find the length of the protocol's list. length = len(arpList) # If the length is not empty, unpack the packets in the list. # If length is empty, notify user and return the associated number of protocol being filtered. if length > 0: for i in range(length): packet = arpList[i] unpackPacket(packet, sniffKey) return decision else: print('\nNo protocols of this type were sniffed.') elif decision == 1: length = len(icmpList) if length > 0: for i in range(length): packet = icmpList[i] unpackPacket(packet, sniffKey) return decision else: print('\nNo protocols of this type were sniffed.') elif decision == 2: length = len(tcpList) if length > 0: for i in range(length): packet = tcpList[i] unpackPacket(packet, sniffKey) return decision else: print('\nNo protocols of this type were sniffed.') elif decision == 3: length = len(udpList) if length > 0: for i in range(length): packet = udpList[i] unpackPacket(packet, sniffKey) return decision else: print('\nNo protocols of this type were sniffed.') else: print('\nUnsupported input, try again...') def windowsFilter(): while True: # Display filtering options. # Repeated if incorrect input. decision = raw_input('0: ICMP\n1: TCP\n2: UDP\nCtrl+c to stop...\nSelection: ') # Check if decision is supported input. try: decision = int(decision) except ValueError: print('\nUnsupported input, try again...') continue # A sniff key of 1 means the application is not sniffing so packets must not be saved. sniffKey = 1 # Filter based on input, if input is not supported, notify user. # If no protocols of certain type were filtered, notify user. # If user chooses cancel option, break while loop. if decision == 0: length = len(icmpList) if length > 0: for i in range(length): packet = icmpList[i] unpackPacket(packet, sniffKey) return decision else: print('\nNo protocols of this type were sniffed.') elif decision == 1: length = len(tcpList) if length > 0: for i in range(length): packet = tcpList[i] unpackPacket(packet, sniffKey) return decision else: print('\nNo protocols of this type were sniffed.') elif decision == 2: length = len(udpList) if length > 0: for i in range(length): packet = udpList[i] unpackPacket(packet, sniffKey) return decision else: print('\nNo protocols of this type were sniffed.') else: print('\nUnsupported input, try again...') def linuxExtract(filtered): # Establish the prompts for each protocol's attributes. ethAttributes = '0: Destination Address\n1: Source Address\n2: EtherType' arpAttributes = '3: Hardware Type\n4: Protocol Type\n5: Hardware Address Length\n6: Protocol Address Length\n7: Operation\n8: Sender Hardware Address\n9: Sender Protocol Address\n10: Target Hardware Address\n11: Target Protocol Address' ipAttributes = '3: Version\n4: Header Length\n5: Differentiated Services Code Point\n6: Explicit Congestion Notification\n7: Total Length\n8: Identification\n9: Flags\n10: Fragment Offset\n11: Time to Live\n12: Protocol\n13: Header Checksum\n14: Source Address\n15: Destination Address' icmpAttributes = '16: Type\n17: Code\n18: Checksum\n19: Identifier (If available)\n20: Sequence Number (If available)' tcpAttributes = '16: Source Port\n17: Destination Port\n18: Sequence Number\n19: Acknowledgment Number\n20: Data Offset\n21: Reserved\n22: NS Flag:\n23: CWR Flag\n24: ECE Flag\n25: URG Flag\n26: ACK Flag\n27: PSH Flag\n28: RST Flag\n29: SYN Flag\n30: FIN Flag\n31: Window Size\n32: Urgent Pointer\n33: Checksum' udpAttributes = '16: Source Port\n17: Destination Port\n18: Length\n19: Checksum' # Attributes will be printed. printKey = 0 # Will keep looping until given correct input. while True: # Find the selected protocol by the user. if filtered == 0: # Display the approriate attributes from the protocol. print(ethAttributes) print(arpAttributes) decision = raw_input('Selection: ') # Check if attKey (decision) is supported input. try: attKey = int(decision) except ValueError: print('\nUnsupported input, try again...') continue # Check if attKey is within range. if (attKey < 0) or (attKey > 11): print('\nUnsupported input, try again...') continue # Find the length of the protocol's list. length = len(arpList) # The chosen attribute will be found by going through the protocol layers. # The att key will be calibrated (if needed), and specify which attribute to print. if attKey >= 3: for i in range(length): packet = arpList[i] arp(packet, attKey - 3, printKey) print('\n----------------------------------------') break elif attKey >= 0: for i in range(length): packet = arpList[i] eth(packet, attKey, printKey) print('\n----------------------------------------') break elif filtered == 1: print(ethAttributes) print(ipAttributes) print(icmpAttributes) decision = raw_input('Selection: ') try: attKey = int(decision) except ValueError: print('\nUnsupported input, try again...') continue if (attKey < 0) or (attKey > 20): print('\nUnsupported input, try again...') continue length = len(icmpList) if attKey >= 16: for i in range(length): packet = icmpList[i] icmp(packet, attKey - 16, printKey) print('\n----------------------------------------') break elif attKey >= 3: for i in range(length): packet = icmpList[i] ip(packet, attKey - 3, printKey) print('\n----------------------------------------') break elif attKey >= 0: for i in range(length): packet = icmpList[i] eth(packet, attKey, printKey) print('\n----------------------------------------') break elif filtered == 2: print(ethAttributes) print(ipAttributes) print(tcpAttributes) decision = raw_input('Selection: ') try: attKey = int(decision) except ValueError: print('\nUnsupported input, try again...') continue if (attKey < 0) or (attKey > 33): print('\nUnsupported input, try again...') continue length = len(tcpList) if attKey >= 16: for i in range(length): packet = tcpList[i] tcp(packet, attKey - 16, printKey) print('\n----------------------------------------') break elif attKey >= 3: for i in range(length): packet = tcpList[i] ip(packet, attKey - 3, printKey) print('\n----------------------------------------') break elif attKey >= 0: for i in range(length): packet = tcpList[i] eth(packet, attKey, printKey) print('\n----------------------------------------') break elif filtered == 3: print(ethAttributes) print(ipAttributes) print(udpAttributes) decision = raw_input('Selection: ') try: attKey = int(decision) except ValueError: print('\nUnsupported input, try again...') continue if (attKey < 0) or (attKey > 19): print('\nUnsupported input, try again...') continue length = len(udpList) if attKey >= 16: for i in range(length): packet = udpList[i] udp(packet, attKey - 16, printKey) print('\n----------------------------------------') break elif attKey >= 3: for i in range(length): packet = udpList[i] ip(packet, attKey - 3, printKey) print('\n----------------------------------------') break elif attKey >= 0: for i in range(length): packet = udpList[i] eth(packet, attKey, printKey) print('\n----------------------------------------') break def windowsExtract(filtered): # Establish the prompts for each protocol's attributes. ipAttributes = '0: Version\n1: Header Length\n2: Differentiated Services Code Point\n3: Explicit Congestion Notification\n4: Total Length\n5: Identification\n6: Flags\n7: Fragment Offset\n8: Time to Live\n9: Protocol\n10: Header Checksum\n11: Source Address\n12: Destination Address' icmpAttributes = '13: Type\n14: Code\n15: Checksum\n16: Identifier (If available)\n17: Sequence Number (If available)' tcpAttributes = '13: Source Port\n14: Destination Port\n15: Sequence Number\n16: Acknowledgment Number\n17: Data Offset\n18: Reserved\n19: NS Flag:\n20: CWR Flag\n21: ECE Flag\n22: URG Flag\n23: ACK Flag\n24: PSH Flag\n25: RST Flag\n26: SYN Flag\n27: FIN Flag\n28: Window Size\n29: Urgent Pointer\n30: Checksum' udpAttributes = '13: Source Port\n14: Destination Port\n15: Length\n16: Checksum' # Attributes will be printed. printKey = 0 # Will keep looping until given correct input. while True: if filtered == 0: print(ipAttributes) print(icmpAttributes) decision = raw_input('Selection: ') try: attKey = int(decision) except ValueError: print('\nUnsupported input, try again...') continue if (attKey < 0) or (attKey > 17): print('\nUnsupported input, try again...') continue length = len(icmpList) if attKey >= 13: for i in range(length): packet = icmpList[i] icmp(packet, attKey - 13, printKey) print('\n----------------------------------------') break elif attKey >= 0: for i in range(length): packet = icmpList[i] ip(packet, attKey, printKey) print('\n----------------------------------------') break elif filtered == 1: print(ipAttributes) print(tcpAttributes) decision = raw_input('Selection: ') try: attKey = int(decision) except ValueError: print('\nUnsupported input, try again...') continue if (attKey < 0) or (attKey > 30): print('\nUnsupported input, try again...') continue length = len(tcpList) if attKey >= 13: for i in range(length): packet = tcpList[i] tcp(packet, attKey - 13, printKey) print('\n----------------------------------------') break elif attKey >= 0: for i in range(length): packet = tcpList[i] ip(packet, attKey, printKey) print('\n----------------------------------------') break elif filtered == 2: print(ipAttributes) print(udpAttributes) decision = raw_input('Selection: ') try: attKey = int(decision) except ValueError: print('\nUnsupported input, try again...') continue if (attKey < 0) or (attKey > 16): print('\nUnsupported input, try again...') continue length = len(udpList) if attKey >= 13: for i in range(length): packet = udpList[i] udp(packet, attKey - 13, printKey) print('\n----------------------------------------') break elif attKey >= 0: for i in range(length): packet = udpList[i] ip(packet, attKey, printKey) print('\n----------------------------------------') break def startSniff(): try: while True: # Ask the user if they would like to begin the sniffer or not. decision = raw_input('Hello, would you like to sniff the network? Y/N: ') # Y runs the rest of the application. # N exits the application. if (decision == 'Y') or (decision == 'y'): print('Sniffing, press Ctrl+c to stop...') break elif (decision == 'N') or (decision == 'n'): close() else: print('\nUnsupported input...') except KeyboardInterrupt: print('\nApplication cancelled...') close() def startFilter(): try: while True: # Ask the user if they would like to filter the packets or not. decision = raw_input('Would you like to filter the sniffed packets by protocol? Y/N: ') # Y runs the rest of the application. # N exits the application. if (decision == 'Y') or (decision == 'y'): print('Select a protocol...') return 0 elif (decision == 'N') or (decision == 'n'): return 1 else: print('\nUnsupported input...') except KeyboardInterrupt: print('\nApplication cancelled...') close() def startExtract(): try: while True: # Ask the user if they would like to extract attributes or not. decision = raw_input('Would you like to extract a specific attribute? Y/N: ') # Y runs the rest of the application. # N exits the application. if (decision == 'y') or (decision == 'Y'): print('Select an attribute...') return 0 elif (decision == 'N') or (decision == 'n'): return 1 else: print('\nUnsupported input...') except KeyboardInterrupt: print('\nApplication cancelled...') close() def close(): # Exit the application. print('Goodbye.') time.sleep(60) sys.exit() def sniff(): # Ask the user to begin. startSniff() try: # A sniff key of 0 means the application is sniffing and packets must be saved. sniffKey = 0 # If Linux, set up the raw socket the Linux way. # If Windows, set up the raw socket the Windows way. # If not Linux or Windows, close the application. if os == linux: # Create the raw socket. sock = socket.socket(socket.AF_PACKET , socket.SOCK_RAW , socket.ntohs(0x0003)) # Sniff packets. Will loop until user presses Ctrl+c. while True: # Recieve the packets in the network. # Packet will be a tuple, use the first element in the tuple. packet = sock.recvfrom(65565) packet = packet[0] unpackPacket(packet, sniffKey) # Close the socket. sock.close() elif os == windows: # The public network interface. HOST = socket.gethostbyname(socket.gethostname()) # Create a raw socket and bind it to the public interface. sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP) sock.bind((HOST, 0)) # Include IP headers sock.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1) # Receive all packages. sock.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON) # Sniff packets. Will loop until user presses Ctrl+c. while True: # Recieve the packets in the network. # Packet will be a tuple, use the first element in the tuple. packet = sock.recvfrom(65565) packet = packet[0] unpackPacket(packet, sniffKey) # Disable promiscuous mode. sock.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF) # Close the socket. sock.close() else: print('The OS you are running is not supported.') close() except socket.error, msg: print('Socket could not be created. \nError code: ' + str(msg[0]) + '\nMessage: ' + msg[1]) close() except KeyboardInterrupt: print "\nSniffing stopped." # Ask the user to filter by protocol, then ask to extract attributes. # If 0, filter. # If not 0, move on. if startFilter() == 0: try: # If Linux, filter Linux's supported protocols, then extract. # If Windows, filter Window's supported protocols, then extract. if os == linux: filtered = linuxFilter() if startExtract() == 0: linuxExtract(filtered) elif os == windows: filtered = windowsFilter() if startExtract() == 0: windowsExtract(filtered) except KeyboardInterrupt: print "\nFiltering and extracting stopped." close() def main(): sniff() if __name__ == "__main__": main()
true
a464871a4588d4d5583acf4e1744f74f7aeb10f6
Python
AnandD007/Amazing-Python-Scripts
/Applying Bitwise Operations/Applying Bitwise operations.py
UTF-8
898
3.21875
3
[ "MIT" ]
permissive
import cv2 src1= input("Enter the path of the image 1\n") #getting the path for first image src1 = cv2.imread(src1) #src1 = cv2.resize(src1,(540,540)) #resizing the image src2 = input("Enter the path of the image 2\n") #getting the path for second image src2 = cv2.imread(src2) src2 = cv2.resize(src2, src1.shape[1::-1]) #Resizing the image so that both images have same dimensions andop= cv2.bitwise_and(src1, src2,mask=None) #Applying Bitwise AND operation andop=cv2.resize(andop,(640,640)) cv2.imshow('Bitwise AND',andop) orop= cv2.bitwise_or(src1, src2,mask=None) #Applying Bitwise OR operation orop=cv2.resize(orop,(640,640)) cv2.imshow('Bitwise OR',orop) xorop = cv2.bitwise_xor(src1,src2,mask=None) #Applying Bitwise OR operation xorop=cv2.resize(xorop,(640,640)) cv2.imshow('Bitwise XOR',xorop) cv2.waitKey(0) cv2.destroyAllWindows()
true
31054b8c8a0f467e6e9e52cc70fdcbf25cfaf81d
Python
wpalafox/fundamentalsOfComp
/intro_interactivepro_python_p1/stopwatch.py
UTF-8
1,969
3.21875
3
[]
no_license
# template for "Stopwatch: The Game" import simplegui # define global variables count = 0 sec = 0 stops = 0 perfect_stops = 0 is_stopped = True # define helper function format that converts time # in tenths of seconds into formatted string A:BC.D def format(count): sec = count/100.0 min = int(sec/60) if sec%60 < 10: sec_a = "0"+str(sec%60) return str(min)+":"+str(sec_a)[:-1] elif sec >= 10: return str(min)+":"+str(sec%60)[:-1] else: return "Error" # define event handlers for buttons; "Start", "Stop", "Reset" def tick(): global count count += 1 sec = count/100 print sec def start(): global is_stopped is_stopped = False timer.start() def stop(): global stops global perfect_stops global is_stopped if count%10 == 0 and is_stopped == False: is_stopped = True perfect_stops += 1 stops += 1 timer.stop() elif is_stopped == False: is_stopped = True stops += 1 timer.stop() else: print "Watch already stopped" def reset(): global count, canvas, stops, perfect_stops timer.stop() count = 0 stops = 0 perfect_stops = 0 # define event handler for timer with 0.1 sec interval timer = simplegui.create_timer(10, tick) #define draw handler def draw(canvas): format_count = format(count) canvas.draw_text(str(format_count),[10,100],60, "red") canvas.draw_text(str(perfect_stops),[300,50],40, "red") canvas.draw_text("/",[330,50],40, "red") canvas.draw_text(str(stops),[350,50],40, "red") # create frame frame = simplegui.create_frame("Stopwatch: mini project", 400, 200) frame.set_draw_handler(draw) button1 = frame.add_button('Start', start) button1 = frame.add_button('Stop', stop) button1 = frame.add_button('Reset', reset) # register event handlers # start frame frame.start() # Please remembeasdfssfffddfr to asdasdasda grading rubric
true
5d951b175b219350e3d3ab4e6f13393c9b6b945c
Python
Simplon-IA-Biarritz-1/the-movie-predictor-dehy
/entity/title_episode.py
UTF-8
569
3.21875
3
[]
no_license
class TitleEpisode: def __init__(self, dict): self.tconst = dict['tconst'] self.parentTconst = None if dict['parentTconst'] == "\\N" else dict['parentTconst'] self.seasonNumber = None if dict['seasonNumber'] == "\\N" else int( dict['seasonNumber']) self.episodeNumber = None if dict['episodeNumber'] == "\\N" else int( dict['episodeNumber']) @staticmethod def dict_to_object(list): objects = [] for item in list: objects.append(TitleEpisode(item)) return objects
true
8fcdee74637c10f217164f9133a921b9c9ca5066
Python
tesschung/TIL-garden
/TIL/01_python/fibo.py
UTF-8
280
3.328125
3
[]
no_license
def fibo_recursion(n): if n < 2: return n else: return fibo_recursion(n - 1) + fibo_recursion(n - 2) def fibo_for(n): if n < 2: return n a, b = 0, 1 for _ in range(n - 1): a, b = b, a + b return b print('hello, world')
true
758610c79886c379331b5396b1bc0b27a00ecc40
Python
snucml/rl-tutorial
/main2_Agent.py
UTF-8
7,206
2.765625
3
[ "MIT" ]
permissive
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from replayBuffer import ReplayBuffer class Agent_DDPG(object): def __init__(self, action_size, state_size, action_limit,): self.memory_size = 10000 self.replayBuffer = ReplayBuffer(self.memory_size) self.sess = tf.Session() self.discount_factor = 0.9 self.action_variance = 3 self.critic_learning_rate = 0.001 self.actor_learning_rate = 0.002 self.batch_size = 32 self.action_size, self.state_size, self.action_limit = action_size, state_size, action_limit, self.input_state = tf.placeholder(tf.float32, [None, state_size], 's') self.input_state_ = tf.placeholder(tf.float32, [None, state_size], 's_') self.R = tf.placeholder(tf.float32, [None, 1], 'r') with tf.variable_scope('Actor'): self.a = self.build_actor_network(self.input_state, scope='eval', trainable=True) a_ = self.build_actor_network(self.input_state_, scope='tar', trainable=False) with tf.variable_scope('Critic'): q_eval = self.build_critic_network(self.input_state, self.a, scope='eval', trainable=True) q_target = self.build_critic_network(self.input_state_, a_, scope='target', trainable=False) self.actor_evaluation_params = tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval') self.actor_target_params = tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/tar') self.critic_evaluation_params = tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval') self.critic_target_params = tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/tar') self.replace = [tf.assign(t, (1 - 0.01 ) * t + 0.01 * e) for t, e in zip(self.actor_target_params + self.critic_target_params, self.actor_evaluation_params + self.critic_evaluation_params)] ''' dJ/dtheta = E[ dQ/dtheta ] ''' # Actor Loss 는 Q로부터 내려오는 값을 maximize 하면 된다(논문 참조) self.a_loss = tf.reduce_mean(q_eval) # maximize the q # Maximize Q 를 해야하므로 learning rate에 '-' 를 붙인다. self.atrain = tf.train.AdamOptimizer(-self.actor_learning_rate).minimize(tf.reduce_mean(q_eval), var_list=self.actor_evaluation_params) # self.c_train 을 호출할때 self.a 에 배치의 action을 넣게 된다. # Placeholder가 아닌 self.a 에 직접 값을 대입하는 것! # s a r s_ 를 이용해서 critic을 업데이트 하는데, 정석으로 구한 y가 트루 라벨, 뉴럴넷에 값을 넣고 나오는 것이 우리의 prediction이다. # True Label, y = r(s,u_t(s)) + gamma*Q(s_, u_t(s_)) q_true = self.R + self.discount_factor * q_target # Prediction, Q = q_eval # 우리가 mseLoss를 구하려면 q_eval을 구해야 하므로 self.input_state에 피딩을 해 주어야 함. # 또한 q_true 를 구하기 위해 self.R 과 q_target에 들어갈 self.input_state_ 도 피딩 해주어야 함. self.mseloss = tf.losses.mean_squared_error(labels=q_true, predictions=q_eval) # 이 부분은 오직 Critic net을 업데이트하기위한 Loss이다. 때문에 var_list를 Critic evaluation network로 지정해주어야한다. self.ctrain = tf.train.AdamOptimizer(self.critic_learning_rate).minimize(self.mseloss, var_list=self.critic_evaluation_params) # 네트워크를 만들고 항상 초기화를 해준다. self.sess.run(tf.global_variables_initializer()) self.actor_loss_history = [] self.critic_loss_history = [] def store_transition(self, s, a, r, s_): self.replayBuffer.add(s,a,r,s_) def choose_action(self, s): return np.clip(np.random.normal(self.sess.run(self.a, {self.input_state: s[np.newaxis, :]})[0] , self.action_variance), -2, 2) def learn(self): if self.replayBuffer.count() > self.batch_size: self.action_variance *= .9995 self.sess.run(self.replace) batch = self.replayBuffer.get_batch(self.batch_size) batch_s = np.asarray([x[0] for x in batch]) batch_a = np.asarray([x[1] for x in batch]) batch_r = np.asarray([[x[2]] for x in batch]) batch_s_ = np.asarray([x[3] for x in batch]) actor_loss, _ = self.sess.run([self.a_loss, self.atrain], {self.input_state: batch_s}) critic_loss, _ = self.sess.run([self.mseloss, self.ctrain], {self.input_state: batch_s, self.a: batch_a, self.R: batch_r, self.input_state_: batch_s_}) self.actor_loss_history.append(actor_loss) self.critic_loss_history.append(critic_loss) def build_actor_network(self, s, scope, trainable): actor_hidden_size = 30 with tf.variable_scope(scope): hidden1 = tf.layers.dense(s, actor_hidden_size, activation=tf.nn.relu, name='l1', trainable=trainable) a = tf.layers.dense(hidden1, self.action_size, activation=tf.nn.tanh, name='a', trainable=trainable) return tf.multiply(a, self.action_limit, name='scaled_a') def build_critic_network(self, s, a, scope, trainable): with tf.variable_scope(scope): critic_hidden_size = 30 hidden1 = tf.layers.dense(s, critic_hidden_size, name='s1', trainable=trainable) \ + tf.layers.dense(a, critic_hidden_size, name='a1', trainable=trainable) \ + tf.get_variable('b1', [1, critic_hidden_size], trainable=trainable) hidden1 = tf.nn.relu(hidden1) return tf.layers.dense(hidden1, 1, trainable=trainable) def plot_loss(self): plt.title('history', fontsize=25) ms = 0.1 me = 1 line_width = 0.1 plt.ylabel('Loss') plt.xlabel('Training steps') actor_loss_mean = sum(self.actor_loss_history)/len(self.actor_loss_history) self.actor_loss_history /= actor_loss_mean critic_loss_mean = sum(self.critic_loss_history)/len(self.critic_loss_history) self.critic_loss_history /= critic_loss_mean plt.plot(np.arange(len(self.actor_loss_history)), self.actor_loss_history, '-p', color='b', markevery=me, label=r'actor loss', lw=line_width, markersize=ms) plt.plot(np.arange(len(self.critic_loss_history)), self.critic_loss_history, '--^', color='r', markevery=me, label=r'critic loss', lw=line_width, markersize=ms) plt.grid() ax = plt.subplot(111) box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.ylim(0, 10) plt.show() def plot_reward(self, reward_history): plt.plot(np.arange(len(reward_history)), reward_history) plt.ylabel('Reward') plt.xlabel('Episodes') plt.grid() plt.show()
true
c761f348a1f4b954abc0ce8ba6dc43760611fbf8
Python
DhirendraRaic/PYDhi
/Datatypes/list_operations.py
UTF-8
1,309
3.734375
4
[]
no_license
numbers=[1,2,3,4,5] extranumbers=[6,7,8] s='dhoni' # insertion opertions # insert(), append(), extend() # deletion operations # remove(), pop(), clear() # other operations # count, index, ..... numbers.insert(2,'raja') print(numbers) numbers.append(12) print(numbers) sports=['cricket','baseball'] numbers.append(sports) # insert the item at the last print(numbers) numbers.extend('raja') print(numbers) # deletion primenumbers=[2,3,5,5,5,7,11] print(primenumbers) primenumbers.pop(3) # delete last item from list, 3 index print(primenumbers) # primenumbers.remove(5) # 5 is an item print(primenumbers) primenumbers.clear() print(primenumbers) listx=[1,2,3,4] # memory address 7890123 listy=listx.copy() # listx listy print(listy) listz=listx # memomry address 7890456 print(listz) num=[9,7,6,3,1,9,9] print(num.count(9)) print(num.index(9,5,9)) # 9 - element - (list) # 1 - start eindex or 6 end index snames=['Tennis','Baseball','Cricket','Volleyball'] sscore=[198,34,67,12,989] snames.sort() sscore.sort() print(snames) print(sscore) #TypeError: '<' not supported between instances of 'str' and 'int' #stringvalues=[1,"one",2,3,4,'Two','Four',4.56] #stringvalues.sort() #print(stringvalues) sscore.reverse() print(sscore) sscore.sort(reverse=True) print(sscore)
true
49cb20870166beda9681980e9605cce871cfaea2
Python
u-ever/CursoEmVideoPython
/exercises/ex090_Dicionário_em_Python.py
UTF-8
290
3.5
4
[]
no_license
aluno = dict() aluno['nome'] = str(input('Nome: ')) aluno['media'] = int(input(f'Média de {aluno["nome"]}: ')) print('=-' * 30) if aluno['media'] >= 7: aluno['situação'] = 'Aprovado' else: aluno['situação'] = 'Recuperação' for k, v in aluno.items(): print(f' - {k}: {v}')
true
98729c8d09508b59a114c6b17f8330d4a0dfd983
Python
dmesq/tic-tac-toe-online
/tic-tac-toe-server/src/server/GameManager.py
UTF-8
7,529
3.109375
3
[]
no_license
import GameManagerMessages class GameManager: def __init__(self): self.clients = {} # addr: name self.addrs = {} # name: addr self.games = {} # name: (is_occupied, in_game, is_turn, opponent) # self.games: in_game | is_turn | player_state | # --------+----------+--------------------------------------| # False | False | not occupied (**) | # --------+----------+--------------------------------------| # False | True | deciding upon request to play (*) | # --------+----------+--------------------------------------| # True | False | requested another player for game or | # | is waiting for turn to play (*) | # --------+----------+--------------------------------------| # True | True | his turn to play (*) | # # (*) -> is_occupied = True # (**) -> is_occupied = False def register(self, name, addr): msg = GameManagerMessages.USER_REGISTER if name in self.addrs: return msg + " nok " + GameManagerMessages.INVALID_USERNAME elif addr in self.clients: return msg + " nok " + GameManagerMessages.USER_ALREADY_REGISTERED else: self.clients[addr] = name self.addrs[name] = addr self.games[name] = [False, None, False, False] return msg + " ok" def unregister(self, name, addr): msg = GameManagerMessages.USER_EXIT if (name not in self.addrs) or (addr not in self.clients): return msg + " nok " + GameManagerMessages.INVALID_USERNAME else: del self.clients[addr] del self.addrs[name] del self.games[name] return msg + " ok" def list(self, addr): msg = "LST" if addr not in self.clients: return msg + " " + GameManagerMessages.USER_NOT_REGISTERED # check if user is occupied name = self.clients[addr] if self.games[name][0]: return msg + " " + GameManagerMessages.USER_OCCUPIED for player, values in self.games.items(): if player != name: msg += " " + player + " " + str(values[0]) return msg def invite(self, invited, addr): ''' invited: string, name of the player that is being invited addr: string, address of the caller (inviting player) returns [msg, send_addr] msg: the msg to be sent send_addr: to whom the message should be sent. If error, to this caller If no error, to the invited player''' # check if user is registered if addr not in self.clients: return GameManagerMessages.USER_NOT_REGISTERED, addr # get user info inviting = self.clients[addr] inviting_state = self.games[inviting] # check if user is occupied if inviting_state[0]: return GameManagerMessages.USER_OCCUPIED, addr # check if user is not inviting himself if inviting == invited: return GameManagerMessages.INVALID_USERNAME, addr # check if player invited exists and is not occupied if invited not in self.addrs: return GameManagerMessages.OTHER_USER_NOT_REGISTERED, addr invited_state = self.games[invited] if invited_state[0]: return GameManagerMessages.OTHER_USER_OCCUPIED, addr # set inviting user to occupied and as requesting turn inviting_state[0] = inviting_state[1] = True inviting_state[2] = False inviting_state[3] = invited # set invited user to occupied and as needing to respond invited_state[0] = invited_state[2] = True invited_state[1] = False invited_state[3] = inviting #return send message to invited player return GameManagerMessages.USER_INVITE + " " + inviting, self.addrs[invited] def accept(self, other_player, accepted, addr): ''' other_player: string, name of the opponent that made the invite accepted: string, 'true' if accepts game request. 'false' otherwise. addr: string, address of the caller returns: [msg, send_addr] msg: msg to be sent send_addr: to whom the message should be sent. If error, to this caller If no error, to the other user (player parameter)''' # check if caller is registered if addr not in self.clients: return GameManagerMessages.USER_NOT_REGISTERED, addr caller = self.clients[addr] caller_state = self.games[caller] # check if caller is in position to accept a request if not(caller_state[0] and not caller_state[1] and caller_state[2]): return GameManagerMessages.USER_CANT_ACCEPT, addr # check if accepted has the correct format if accepted in ('True', 'False'): accepted_bool = True if accepted == 'True' else False else: return GameManagerMessages.INVALID_COMMAND, addr # check if refered player matches the player that made the initial invite if (caller_state[3] != other_player): return GameManagerMessages.REQUEST_PLAYER_MISMATCH, addr other_player_state = self.games[other_player] # put players in the correct state for game if user accepted if accepted_bool: caller_state[1] = True caller_state[2] = False other_player_state[2] = True # otherwise put players in begin state else: self.__default(caller_state) self.__default(other_player_state) # return message informing the inviter of the decision of his opponent return "ACP " + caller + " " + accepted, self.addrs[other_player] def play(self, other_player, row, col, is_finito, addr): ''' other_player: string, opponent row: string, the row of the play (1, 2 or 3) col: string, the column of the play (1, 2 or 3) is_finito: string, 'true' if play ends the game and 'false' if not returns [msg, send_to] ''' # check if caller is registered if addr not in self.clients: return GameManagerMessages.USER_NOT_REGISTERED, addr # check if accepted has the correct format if is_finito in ('True', 'False'): is_finito_bool = True if is_finito == 'True' else False else: return GameManagerMessages.INVALID_COMMAND, addr player = self.clients[addr] player_state = self.games[player] # check if player is in position to make a play if not(player_state[0] and player_state[1] and player_state[2]): return GameManagerMessages.USER_CANT_PLAY, addr # check if the other player is specified correctly if other_player != player_state[3]: return GameManagerMessages.REQUEST_PLAYER_MISMATCH, addr other_player_state = self.games[other_player] # check if play has the correct format try: row = int(row) col = int(col) except ValueError: return GameManagerMessages.INVALID_PLAY, addr # check if play is out of bounds if row > 3 or row < 1 or col > 3 or col < 1: return GameManagerMessages.INVALID_PLAY, addr # if game finished return players to default values if (is_finito_bool): self.__default(player_state) self.__default(other_player_state) # switch roles if not finished else: player_state[2] = False other_player_state[2] = True # return the appropiate message return "PLA " + player + " " + str(row) + " " + str(col) + " "\ + is_finito,\ self.addrs[other_player] def __default(self, state1): state1[0] = state1[1] = state1[2] = False state1[3] = None
true
0d8ea7200e16e1e81ed2839a43476b6cd516d686
Python
RohithYogi/Spoj-Solutions
/Spoj/CRDS - Cards.py
UTF-8
100
2.953125
3
[]
no_license
t=input() for i in range(0,t): l=input() s=(l*((3*l)+1))/2 s=s%1000007 print s
true
4c9c2ac8cd88798e6201c6d82942063b7d371b7c
Python
UWSEDS/homework-4-documentation-and-style-kychen37
/df_from_url.py
UTF-8
1,082
3.6875
4
[ "MIT" ]
permissive
"""Requests and extracts a dataframe from a given link, and checks the dataframe for columns, length, and column types.""" import io import requests import pandas as pd def read_link(some_link): """Extracts a dataframe from a provided link.""" urlData = requests.get(some_link).content df = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) return df def test_create_dataframe(dataframe, col_list): """Checks df has right columns, >= 10 rows, values of the same column are the same type.""" count = 0 if set(dataframe.columns) == set(col_list): count += 1 if len(dataframe) >= 10: count += 1 types = [] for col in dataframe: types.append(type(dataframe.loc[0, col])) counter = 0 failed_count = 0 for i, col in enumerate(dataframe): for entry in dataframe[col]: if type(entry) == types[i]: counter += 1 else: failed_count += 1 if failed_count == 0: count += 1 if count == 3: print('True') else: return 'False'
true
ae66fe34cabb015318a3a1e367bc49f13920564c
Python
masterchoi2020/python_basic
/basic_1/53.quiz10.py
UTF-8
369
2.546875
3
[]
no_license
''' Quiz) 프로젝트 내에 나만의 시그니처를 남기는 모듈을 만드시오. 조건: 모듈 파일명은 byme.py로 작성 (모듈 사용 예제) import byme byme.sign() (출력 예제) 이 프로그램은 나도코딩에 의해 만들어졌습니다. 유튜브: http://youtube.com 이메일: nadocoding@gmail.com ''' # byme 모듈 사용 import byme byme.sign()
true
5b8f1f60d9a0b856c3d5170ecf30ab72c0104702
Python
sumitpurandare/My_git_repo
/Print.py
UTF-8
455
3.8125
4
[]
no_license
#print #Compiled by : Sumit Purandare print ("Hello World") print ("""Hello World hey awesome people out there Wanna some fun with Python """) print ("Mon " "Tue " "Wed " "Thu " "Fri " "Sat " "Sun ") print ("""\n jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec""") tabby_cat = "\t I'm tabbed in" fat_fish = "I am a fat fish" print(tabby_cat) print(fat_fish) #print ("\uxxxx")
true
a6de4743fb52aeede04bb96a00b6c173b8ee26f6
Python
DamoM73/Digital-Solutions-old
/IA2/query_database.py
UTF-8
430
2.84375
3
[]
no_license
import sqlite3 as sql def sql_query(db_file,command): # connect to database with sql.connect(db_file) as db: cursor = db.cursor() # run the command cursor.execute(command) return cursor.fetchall() # ----- MAIN PROGRAM ----- DB_FILE = "./IA2/spotify_v2.db" query = """ SELECT * FROM Songs """ results = sql_query(DB_FILE,query) for row in results: print(row)
true
48b7ea83046eb5b4a9e2a93ee7383daccba9c3cc
Python
EllieHachem/Some-projects
/Turtle-Library-day-16-start/main.py
UTF-8
840
3.15625
3
[]
no_license
import test from turtle import Turtle, Screen #class can be inside module that is why if we say import Turtle #we must type turtle.Turtle() to make this not like now easier #<turtle.Turtle object at 0x7fa7182ae910(this is memory) turtle module and class created object at memory location here 0x7fa7182ae910 easy #accing attrubute object or in class it is self or anything u want .attribute #here type everything you learn or in copy book but in concept paper try to write most important keywords and what u learned really to have space #to call method the same but () # we need to see documantion of this class to know what to do print(test.bro) timmy = Turtle() screen = Screen() screen.canvheight timmy.color("blue") timmy.forward(100) print(timmy.shape("turtle")) print(screen.canvheight) screen.exitonclick() #turtle here in aroow
true
0003e1c91e17f8e8e98e64a4a3343c2bf784ab37
Python
ankits0207/ACB_Assignment_1
/MT16121_problem3_1.py
UTF-8
1,389
3.578125
4
[]
no_license
# MT16121 # Ankit Sharma input_string = input("Enter the string ") input_pattern = input("Enter the pattern ") input_string_list = list(input_string) input_pattern_list = list(input_pattern) string_index_iterator = 0 temp_iterator = 0 temp_offset = 0 print_check = 0 offset_list = [] # Code to perform naive string matching if len(input_pattern_list) > len(input_string_list): print("Pattern length is greater than the string length !") else: while string_index_iterator != len(input_string_list): flag = 0 temp_iterator = string_index_iterator for pattern_index_iterator in range(0, len(input_pattern_list)): if pattern_index_iterator == 0: temp_offset = temp_iterator if input_string_list[temp_iterator] != input_pattern_list[pattern_index_iterator]: flag = 1 if flag == 1: break elif flag == 0 and pattern_index_iterator == len(input_pattern_list) - 1: offset_list.append(temp_offset) temp_iterator += 1 if temp_iterator == len(input_string_list): flag = 2 break if flag == 2: break string_index_iterator += 1 for val in offset_list: print_check = 1 print("Offset " + str(val)) if print_check == 0: print("No match found !")
true
68a69c9b83b62700404dff5b083534c402b39494
Python
Aasthaengg/IBMdataset
/Python_codes/p02954/s697767943.py
UTF-8
236
3.1875
3
[]
no_license
S = input() N = len(S) ans = [1]*N for i in range(N-1): if S[i] == S[i+1] == "R": ans[i+2] += ans[i] ans[i] = 0 for j in range(N-1,0,-1): if S[j] == S[j-1] == "L": ans[j-2] += ans[j] ans[j] = 0 print(*ans)
true
dad3bcd31db844464e296adfd4e8221e0abb5ce2
Python
trollhoehle/pythonlights
/gnome_test.py
UTF-8
211
2.53125
3
[ "MIT" ]
permissive
import pythonlights import math import time ctrl = pythonlights.LEDUtils() while True: for t in range(0,100): ctrl.set_gnome(max(5,int(255*math.cos(2.0*t/200*math.pi)**2))) ctrl.send() time.sleep(0.02)
true
dd957cd6e25269d13c5f747b0439979c38904f7e
Python
VictorTadema/How_to_think_project_Victor
/How to think/Week 3/Exercise 36.py
UTF-8
121
3.21875
3
[]
no_license
from typing import Any, Union tot = 0 for numbers in [2, 3, 4]: squares = numbers ** 2 tot += squares print(tot)
true
e3c2677b47d4c5ae2949145c5e5e4b9a0402d912
Python
lawrann/AI-for-stock-market-trending-analysis
/fyp_/Tweets/tweets_cleaner_remove_duplicate.py
UTF-8
11,828
2.640625
3
[]
no_license
# -*- coding: utf-8 -*- import json import csv import os import re,string from tqdm import tqdm import pandas as pd def unique(list1): # insert the list to the set list_set = set(list1) # convert the set to the list unique_list = (list(list_set)) return unique_list month = ['01','02','03','04','05','06','07','08','09','10','11','12',] year = ['14','15','16','17','18'] #%% #SP if not os.path.exists('clean3\spcleancsv'): os.makedirs('clean3\spcleancsv') if not os.path.exists('clean3\spcleanjson'): os.makedirs('clean3\spcleanjson') sp_count = 0 for yr in year: for mth in month: with open('clean2\spcleanjson\sp_tweets_removelinks_'+yr+mth+'.json') as json_file: data = json.load(json_file) new_data = [] tuple_list = [] del_list = [] for i in data: # keys: username, date, text tuple_list.append((i['username'],i['date'], i['text'])) list_len = len(tuple_list) for header in tqdm(range(len(tuple_list)), position=0, leave=True): runner = header + 1 while (runner<list_len and tuple_list[header][1] == tuple_list[runner][1]): string_len = int(len(tuple_list[runner][2])*0.6) if ((tuple_list[runner][2])[:string_len] in tuple_list[header][2]): del_list.append(runner) runner = runner + 1 tqdm.write("sp" + str(yr) + str(mth)) tqdm.write("# Tweets Before: " + str(len(tuple_list))) del_list = unique(del_list) tqdm.write("# Duplicate: " + str(len(del_list))) del_list.sort() del_list.reverse() for i in del_list: del tuple_list[i] tqdm.write("# Tweets After: " + str(len(tuple_list))) sp_count = sp_count + int(len(tuple_list)) with open('clean3\spcleancsv\sp_tweets_removeduplicates_'+yr+mth+'.csv', 'w', newline='', encoding='utf-8') as output_file: df = pd.DataFrame(tuple_list) df.to_csv(output_file, header=False, index=False, encoding='utf-8') with open('clean3\spcleanjson\sp_tweets_removeduplicates_'+yr+mth+'.json', 'w') as fp: json.dump(tuple_list, fp) print("Total tweets for SP " + str(sp_count)) #%% #CITI if not os.path.exists('clean3\citicleancsv'): os.makedirs('clean3\citicleancsv') if not os.path.exists('clean3\citicleanjson'): os.makedirs('clean3\citicleanjson') citi_count = 0 for yr in year: for mth in month: with open('clean2\citicleanjson\citi_tweets_removelinks_'+yr+mth+'.json') as json_file: data = json.load(json_file) new_data = [] tuple_list = [] del_list = [] for i in data: # keys: username, date, text tuple_list.append((i['username'],i['date'], i['text'])) list_len = len(tuple_list) for header in tqdm(range(len(tuple_list)), position=0, leave=True): runner = header + 1 while (runner<list_len and tuple_list[header][1] == tuple_list[runner][1]): string_len = int(len(tuple_list[runner][2])*0.6) if ((tuple_list[runner][2])[:string_len] in tuple_list[header][2]): del_list.append(runner) runner = runner + 1 tqdm.write("Citi" + str(yr) + str(mth)) tqdm.write("# Tweets Before: " + str(len(tuple_list))) del_list = unique(del_list) tqdm.write("# Duplicate: " + str(len(del_list))) del_list.sort() del_list.reverse() for i in del_list: del tuple_list[i] tqdm.write("# Tweets After: " + str(len(tuple_list))) citi_count = citi_count + int(len(tuple_list)) with open('clean3\citicleancsv\citi_tweets_removeduplicates_'+yr+mth+'.csv', 'w', newline='', encoding='utf-8') as output_file: df = pd.DataFrame(tuple_list) df.to_csv(output_file, header=False, index=False, encoding='utf-8') with open('clean3\citicleanjson\citi_tweets_removeduplicates_'+yr+mth+'.json', 'w') as fp: json.dump(tuple_list, fp) print("Total tweets for Citi " + str(citi_count)) #%% #SPY if not os.path.exists('clean3\spycleancsv'): os.makedirs('clean3\spycleancsv') if not os.path.exists('clean3\spycleanjson'): os.makedirs('clean3\spycleanjson') spy_count = 0 month = ['01','02','03','04','05','06','07','08','09','10','11','12',] year = ['18'] for yr in year: for mth in month: with open('clean2\spycleanjson\spy_tweets_removelinks_'+yr+mth+'.json') as json_file: data = json.load(json_file) new_data = [] tuple_list = [] del_list = [] for i in data: # keys: username, date, text tuple_list.append((i['username'],i['date'], i['text'])) list_len = len(tuple_list) for header in tqdm(range(len(tuple_list)), position=0, leave=True): runner = header + 1 while (runner<list_len and tuple_list[header][1] == tuple_list[runner][1]): string_len = int(len(tuple_list[runner][2])*0.6) if ((tuple_list[runner][2])[:string_len] in tuple_list[header][2]): del_list.append(runner) runner = runner + 1 tqdm.write("spy" + str(yr) + str(mth)) tqdm.write("# Tweets Before: " + str(len(tuple_list))) del_list = unique(del_list) tqdm.write("# Duplicate: " + str(len(del_list))) del_list.sort() del_list.reverse() for i in del_list: del tuple_list[i] tqdm.write("# Tweets After: " + str(len(tuple_list))) spy_count = spy_count + int(len(tuple_list)) with open('clean3\spycleancsv\spy_tweets_removeduplicates_'+yr+mth+'.csv', 'w', newline='', encoding='utf-8') as output_file: df = pd.DataFrame(tuple_list) df.to_csv(output_file, header=False, index=False, encoding='utf-8') with open('clean3\spycleanjson\spy_tweets_removeduplicates_'+yr+mth+'.json', 'w') as fp: json.dump(tuple_list, fp) print("Total tweets for SPY " + str(spy_count)) #%% #SPY2 if not os.path.exists('clean3\spycleancsv'): os.makedirs('clean3\spycleancsv') if not os.path.exists('clean3\spycleanjson'): os.makedirs('clean3\spycleanjson') spy_count = 0 month = ['01','02','03','04','05','06','07','08','09','10','11','12',] year = ['14','15','16','17','18'] for yr in year: for mth in month: with open('clean2\spycleanjson\spy2_tweets_removelinks_'+yr+mth+'.json') as json_file: data = json.load(json_file) new_data = [] tuple_list = [] del_list = [] for i in data: # keys: username, date, text tuple_list.append((i['username'],i['date'], i['text'])) list_len = len(tuple_list) for header in tqdm(range(len(tuple_list)), position=0, leave=True): runner = header + 1 while (runner<list_len and tuple_list[header][1] == tuple_list[runner][1]): string_len = int(len(tuple_list[runner][2])*0.6) if ((tuple_list[runner][2])[:string_len] in tuple_list[header][2]): del_list.append(runner) runner = runner + 1 tqdm.write("spy" + str(yr) + str(mth)) tqdm.write("# Tweets Before: " + str(len(tuple_list))) del_list = unique(del_list) tqdm.write("# Duplicate: " + str(len(del_list))) del_list.sort() del_list.reverse() for i in del_list: del tuple_list[i] tqdm.write("# Tweets After: " + str(len(tuple_list))) spy_count = spy_count + int(len(tuple_list)) with open('clean3\spycleancsv\spy2_tweets_removeduplicates_'+yr+mth+'.csv', 'w', newline='', encoding='utf-8') as output_file: df = pd.DataFrame(tuple_list) df.to_csv(output_file, header=False, index=False, encoding='utf-8') with open('clean3\spycleanjson\spy2_tweets_removeduplicates_'+yr+mth+'.json', 'w') as fp: json.dump(tuple_list, fp) print("Total tweets for SPY " + str(spy_count)) #%% #ATVI if not os.path.exists(r'clean3\atvicleancsv'): os.makedirs(r'clean3\atvicleancsv') if not os.path.exists(r'clean3\atvicleanjson'): os.makedirs(r'clean3\atvicleanjson') spy_count = 0 month = ['01','02','03','04','05','06','07','08','09','10','11','12',] year = ['14','15','16','17','18'] for yr in year: for mth in month: with open(r'clean2\atvicleanjson\atvi_tweets_removelinks_'+yr+mth+'.json') as json_file: data = json.load(json_file) new_data = [] tuple_list = [] del_list = [] for i in data: # keys: username, date, text tuple_list.append((i['username'],i['date'], i['text'])) list_len = len(tuple_list) for header in tqdm(range(len(tuple_list)), position=0, leave=True): runner = header + 1 while (runner<list_len and tuple_list[header][1] == tuple_list[runner][1]): string_len = int(len(tuple_list[runner][2])*0.6) if ((tuple_list[runner][2])[:string_len] in tuple_list[header][2]): del_list.append(runner) runner = runner + 1 tqdm.write("atvi" + str(yr) + str(mth)) tqdm.write("# Tweets Before: " + str(len(tuple_list))) del_list = unique(del_list) tqdm.write("# Duplicate: " + str(len(del_list))) del_list.sort() del_list.reverse() for i in del_list: del tuple_list[i] tqdm.write("# Tweets After: " + str(len(tuple_list))) spy_count = spy_count + int(len(tuple_list)) with open(r'clean3\atvicleancsv\atvi_tweets_removeduplicates_'+yr+mth+'.csv', 'w', newline='', encoding='utf-8') as output_file: df = pd.DataFrame(tuple_list) df.to_csv(output_file, header=False, index=False, encoding='utf-8') with open(r'clean3\atvicleanjson\atvi_tweets_removeduplicates_'+yr+mth+'.json', 'w') as fp: json.dump(tuple_list, fp) print("Total tweets for ATVI " + str(spy_count)) #%% # count tweets import json import csv import os import re,string from tqdm import tqdm import pandas as pd month = ['01','02','03','04','05','06','07','08','09','10','11','12',] year = ['14','15','16','17','18'] total = 0 for yr in year: for mth in month: with open(r'C:\Users\Lawrann\Desktop\fyp2\Tweets\clean3\citicleanjson\citi_tweets_removeduplicates_'+yr+mth+'.json', 'r') as json_file: data = json.load(json_file) total = total + len(data) print("citi"+str(yr)+str(mth)+" "+str(len(data))) print("total: " + str(total)) for yr in year: for mth in month: with open(r'C:\Users\Lawrann\Desktop\fyp2\Tweets\clean3\spcleanjson\sp_tweets_removeduplicates_'+yr+mth+'.json', 'r') as json_file: data = json.load(json_file) total = total + len(data) print("sp"+str(yr)+str(mth)+" "+str(len(data))) print("total: " + str(total))
true
44d6bb3efe711025c225e9c9416561a3bed8ae9c
Python
danhtaihoang/protein-er
/19.11.0700_ACE_g1B/4plot_ct_di_top.py
UTF-8
2,534
2.703125
3
[]
no_license
#!/usr/bin/env python # coding: utf-8 import sys import numpy as np import matplotlib.pyplot as plt #get_ipython().run_line_magic('matplotlib', 'inline') #pfam = 'PF00504' #s = np.loadtxt('pfam_2_40k.txt',dtype='str') #pfam_list = s[:,0] pfam_list = ['PF00200'] #========================================================================================= def di_top(d,top): # find value of top biggest d1 = d.copy() np.fill_diagonal(d1, 0) #print(d1) a = d1.reshape((-1,)) #print(a) a = np.sort(a)[::-1] # descreasing sort #print(a) top_value = a[top] #print(top_value) # fill the top largest to be 1, other 0 top_pos = d1 > top_value #print(top_pos) d1[top_pos] = 1. d1[~top_pos] = 0. #print(d1) xy = np.argwhere(d1==1) return xy #========================================================================================= top_list = [40,60,80,100] for pfam in pfam_list: ct = np.loadtxt('../pfam_50_80pos/%s_ct.txt'%pfam) di = np.loadtxt('%s/di.dat'%pfam) nx,ny = 4,5 nfig = nx*ny fig, ax = plt.subplots(ny,nx,figsize=(nx*3.,ny*2.8)) for j,cutoff in enumerate([3,4,5,6,7]): ct_top = np.argwhere(ct < cutoff) for i,top in enumerate(top_list): xy_di = di_top(di,top) #ax[j,i].plot(ct_top[:,0],ct_top[:,1],'ko',markersize=2) #ax[j,i].plot(xy_di[:,0],xy_di[:,1],'ro',markersize=4) ax[j,i].plot(ct_top[:,0],ct_top[:,1],'co',markersize=5,mfc='none',label='contact map') ax[j,i].plot(xy_di[:,0],xy_di[:,1],'r*',markersize=6,label='direct information') for i in range(4): ax[0,i].set_title('top: %i'%top_list[i]) plt.tight_layout(h_pad=1, w_pad=1.5) plt.savefig('%s/ct_di_top.pdf'%pfam,format='pdf', dpi=50) plt.close() #------------------------------------------------------------------- # map plt.figure(figsize=(8,3.2)) plt.subplot2grid((1,2),(0,0)) plt.title('contact map') plt.imshow(ct,cmap='rainbow_r',origin='lower') plt.xlabel('i') plt.ylabel('j') plt.clim(0,10) plt.colorbar(fraction=0.045, pad=0.05) plt.subplot2grid((1,2),(0,1)) plt.title('direct info') plt.imshow(di,cmap='rainbow',origin='lower') plt.xlabel('i') plt.ylabel('j') plt.clim(0,0.01) plt.colorbar(fraction=0.045, pad=0.03) plt.tight_layout(h_pad=1, w_pad=1.5) plt.savefig('%s/ct_di.pdf'%pfam,format='pdf', dpi=100) plt.close()
true
9c7ee58d2858ab716474b08ab1c6a7940dbdb097
Python
lindafanglizhi/pytest_book1
/src/chapter3/test_fixture_data.py
UTF-8
753
3.421875
3
[]
no_license
# Author: lindafang # Date: 2020-05-07 14:25 # File: test_fixture_data.py import csv import pytest @pytest.fixture() def data(): test_data = {'name': 'linda', 'age': 18} return test_data def test_login(data): name = data['name'] age = data['age'] print("我的名字叫:{},今年{}。".format(name, age)) @pytest.fixture() def read_data(): with open('userinfo.csv') as f: row = csv.reader(f, delimiter=',') next(row) # 读取首行 users = [] for r in row: users.append(r) # 读取的字段均为str类型 return users def test_logins(read_data): name = read_data[0][0] age = read_data[0][1] print("我的名字叫:{},今年{}。".format(name, age))
true
d238058f8482b02aad744eb9363dd0e17765eb78
Python
nandini0727/MongoDB-Complex-Object-Creation
/Project2_department.py
UTF-8
2,207
3.203125
3
[]
no_license
## File to generate JSON with department as root from pymongo import MongoClient #Connect to Mongo client on port number 27017 mongo = MongoClient('localhost', 27017) #initialize mongodb collections to variables project_db = mongo.MongoProject.project department_db = mongo.MongoProject.department department_data = mongo.MongoProject.department_data employee_db = mongo.MongoProject.employee workson_db = mongo.MongoProject.workson #sort department collection based on department name department_db = department_db.find().sort("Dname") #method to return manager last name based on ssn value def getManagerLastName(ssn): data = employee_db.find_one({'Ssn':ssn}) return data['Lname'] #method to return manager first name based on ssn value def getManagerFirstName(ssn): data = employee_db.find_one({'Ssn':ssn}) return data['Name'] def main(): #Since department is root iterate through every record in department collection for data in department_db: #get manager last name based on Mgr_ssn value lastName = getManagerLastName(data['Mgr_ssn']) #get manager first name based on Mgr_ssn value firstName = getManagerFirstName(data['Mgr_ssn']) employeeList = [] #create employee nested object inside the main department object for empdata in employee_db.find({}): if(empdata['Dno'] == data['Dnumber']): empDict = {} empDict['EMP_LNAME'] = empdata['Lname'] empDict['EMP_FNAME'] = empdata['Name'] empDict['SALARY'] = empdata['Salary'] #add each employee dict value to employeeList employeeList.append(empDict) #insert the newly formed JSON to mongodb collection department_data for each department record try: department_data.insert_one({ 'DNAME' : data['Dname'], 'DNUMBER' : data['Dnumber'], 'MGR_LNAME' : lastName, 'MGR_FNAME' : firstName, 'EMPLOYEES' : employeeList }) except Exception as e: print(e) if __name__ == '__main__': main()
true
1487d39559d20532d1c6cf83f3ab7b7a4c391235
Python
jdcs/TheWxPythonTutorial
/FirstSteps/nominimizebox.py
UTF-8
293
2.53125
3
[]
no_license
#!/usr/bin/env python # -*- coding: utf-8 -*- # nominimizebox.py import wx app = wx.App() window = wx.Frame(None, style = wx.MAXIMIZE_BOX | wx.RESIZE_BORDER \ | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX) window.Show(True) app.MainLoop() # if __name__ == '__main__':
true
c01a819dc4b72573b9cc6df641e298cfaad52c7d
Python
Literably/theano-rnn-1
/rnn/lstm.py
UTF-8
712
2.703125
3
[ "BSD-3-Clause" ]
permissive
"""Module that contains functionality for lstm RNNs.""" __author__ = 'Justin Bayer, bayer.justin@googlemail.com' import theano import theano.tensor as T sig = T.nnet.sigmoid def make_expr(lstminpt, state): lstminpt_squashed = sig(lstminpt) slicesize = lstminpt.shape[0] / 4 inpt = lstminpt_squashed[:slicesize] ingate = lstminpt_squashed[slicesize: 2 * slicesize] forgetgate = lstminpt_squashed[2 * slicesize:3 * slicesize] outgate = lstminpt_squashed[3 * slicesize:4 * slicesize] new_state = inpt * ingate + state * forgetgate output = sig(new_state) * outgate return [new_state, output] inpt = T.vector('lstm-input') state = T.vector('lstm-state') func = theano.function([inpt, state], make_expr(inpt, state))
true
a125d13851354e099c720a297854c8bb13a6a8ee
Python
Varun-Nirala/Python
/SomeProject/ReadReelStrips.py
UTF-8
435
2.84375
3
[]
no_license
import sys import xml.etree.ElementTree as ET file_data = open("abc.xml", "r") str_data = file_data.read() xml_data = ET.fromstring(str_data) reelstripdef_list = xml_data.findall("reelstripdef") for reelstripdef in reelstripdef_list: stop_list = reelstripdef.findall("stop") print(reelstripdef.get("name"), "NumberOfSymbol = ", len(stop_list)) for symbol in stop_list: print("\t", symbol.get("symbolname"))
true
a1880daa35bd1369b45213bed076c3e993532774
Python
dimartinot/digital_race_team2005
/src/classes/CarControl.py
UTF-8
4,201
2.984375
3
[]
no_license
#! /usr/bin/env python #-*- coding: utf-8 -*- import numpy as np import cv2 import rospy import std_msgs.msg import math steer_publisher = None speed_publisher = None laneWidth = 40 minVelocity = 10 maxVelocity = 50 preError = None kP = None kI = None kD = None t_kP = None t_kI = None t_kD = None carPos = None class CarControl(): def __init__(self): self.max_step = 30 self.step_turn = self.max_step +1 self.direction = None self.carPos = cv2.KeyPoint( 120, 300, _size=0 ) self.coefRight = 3 self.coefLeft = 2 self.steer_publisher = rospy.Publisher("/team2005/set_angle", std_msgs.msg.Float32, queue_size=10) self.speed_publisher = rospy.Publisher("/team2005/set_speed", std_msgs.msg.Float32, queue_size=10) self.stay_left = False # indicates if the car has to stay on the left of the road def set_stay_left(self): self.stay_left = True def unset_stay_left(self): self.stay_left = False def errorAngle(self, dst): (dst_x, dst_y) = (dst[0], dst[1]) (carPos_x, carPos_y) = self.carPos.pt if (dst_x == carPos_x): return 0 if (dst_y == carPos_y): return (-90 if dst_x < carPos_x else 90) pi = math.acos(-1.0) dx = dst_x - carPos_x dy = carPos_y - dst_y if (dx < 0): return - math.atan(-dx/dy)*180/pi return math.atan(dx/dy)*180/pi def driverCar(self, left, right, velocity): if (len(left) > 11 and len(right) > 11): i = len(left) - 11 error = preError while (left[i] == None and right[i] == None): i -= 1 if (i < 0): return if (left[i] != None and right[i] != None): #error = self.errorAngle((np.array(left[i]) + np.array(right[i])) / 2) # margin coefficients: if (self.stay_left): error = self.errorAngle(self.coefRight*np.array(left[i]) / 5 + self.coefLeft*np.array(right[i])/5) else: error = self.errorAngle(self.coefLeft*np.array(left[i]) / 5 + self.coefRight*np.array(right[i])/5) elif left[i] != None: error = self.errorAngle(np.array(left[i]) + np.array([laneWidth / 2, 0])) else: error = self.errorAngle(np.array(right[i]) - np.array([laneWidth / 2, 0])) if abs(error)>20: error = 0 #print(error) self.steer_publisher.publish(std_msgs.msg.Float32(error)) self.speed_publisher.publish(std_msgs.msg.Float32(velocity)) def goStraight(self, velocity=10): self.steer_publisher.publish(std_msgs.msg.Float32(0)) self.speed_publisher.publish(std_msgs.msg.Float32(velocity)) def turnHere(self): """ Exécute la maneuvre nécessaire pour un virage. ENTREE: direction: "left" to trun left or "right to turn right" """ angle = 0 turning = self.step_turn < self.max_step if turning: if self.direction == "right": angle = 20 # to turn right elif self.direction == "left": angle = -20 # to turn left self.steer_publisher.publish(std_msgs.msg.Float32(angle)) self.step_turn+=1 return turning def obstacle(self, keypoint): height = 240 width = 320 center = [160,120] dangerZone = False angle = 0 distance = center[0] - keypoint.pt[0] if abs(distance) < 100: dangerZone = True if dangerZone == True: if distance > 0: print('Right') self.coefRight = 3.5 self.coefLeft = 1.5 elif distance < 0: print('Left') self.coefRight = 3.5 self.coefLeft = 1.5 #print('angle: {}'.format(angle)) #self.steer_publisher.publish(std_msgs.msg.Float32(angle))
true
cfc3bdfcdaff1af4b70516fc89540fdeb30a7a82
Python
sguna0100/pyGuvi
/perfectSquare.py
UTF-8
136
3.125
3
[]
no_license
import math n=int(input(int)) m=int(input(int)) o=m*n root=math.sqrt(o) if(int(root+0.5)**2==o): print("yes") else: print("no")
true
9835ebcfbc962d1a34a2a822811343d4eb6a27cb
Python
ligaydima/pygame_proj
/db_interaction.py
UTF-8
806
3.015625
3
[]
no_license
import sqlite3 import constants class Interactor: def __init__(self): self.connection = sqlite3.connect(constants.DB_NAME) self.cursor = self.connection.cursor() self.cursor.execute(f"""CREATE TABLE IF NOT EXISTS "{constants.TABLE_NAME}" ( "score" INTEGER, "date" DATE) """) self.connection.commit() def write(self, score): self.cursor.execute(f"""INSERT INTO {constants.TABLE_NAME} VALUES ({score}, DATE("now"))""") self.connection.commit() def get_top_5(self): res = sorted(self.cursor.execute(f"""SELECT * FROM {constants.TABLE_NAME}""").fetchall(), reverse=True) return res[:min(5, len(res))] def close(self): self.connection.close()
true
287fbee5a2fba77e3ec0984ea55ce8042574b63c
Python
Annisotropy/Python
/HW2/Task4.py
UTF-8
225
3.703125
4
[]
no_license
phrase = input('Введите предложение из нескольких слов, разделенных пробелами: ') words = phrase.split() print(words) for i, w in enumerate(words): print(i, w[0:10])
true
b53932bae1a5194efd4f835103c67cc99f5ebc40
Python
VegaGT/python-city
/outdoors/lake.py
UTF-8
556
4.1875
4
[]
no_license
def draw_lake(length, width): """ Print a lake of given parameters(length and width) :param length: integer defining the length of a lake :param width: integer defining the width of a lake :return: """ # Sanitizing parameters if length < 0: raise ValueError("Length is negative") if width < 0: raise ValueError("Width is negative") # Print the lake for _ in range(width): print('+', end='') for _ in range(length): print("~", end="") print('+') return
true
085fcbb47bac058bf62e0f98d56268ff4d646316
Python
ruanguoqing/basal-ganglia
/basalganglia/reinforce/util/torch_util.py
UTF-8
2,070
2.53125
3
[ "Unlicense" ]
permissive
import numpy as np import torch, torch.autograd as A from gym.spaces import Box, Discrete def torchify(y, type=Box): if type is Box: return torch.Tensor(y).type(torch.FloatTensor) elif type is Discrete: return torch.Tensor(y).type(torch.LongTensor) def make_indicator(y_tensor, n_dim=None): y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1) n_dim = n_dim if n_dim is not None else int(torch.max(y_tensor)) + 1 y_one_hot = torch.zeros(y_tensor.size()[0], n_dim).scatter_(1, y_tensor, 1) return y_one_hot def hvp(y, x, v): grad = A.grad(y, x(), create_graph=True, retain_graph=True) flat_grad = make_flat_from(grad) grad_v = (flat_grad * v).sum() grad_grad = A.grad(grad_v, x(), retain_graph=True) flat_grad_grad = make_flat_from(grad_grad).data return flat_grad_grad + 0.1 * v def do_conjugate_gradient(f_Ax, b, n_iters=10, tolerance=1e-10): p = b.clone() r = b.clone() x = torch.zeros(b.size()) for i in range(n_iters): residue = (r * r).sum() Ap = f_Ax(p) alpha = residue / ((p * Ap).sum() + 1e-8) x += alpha * p r -= alpha * Ap new_residue = (r * r).sum() if new_residue < tolerance: break beta = new_residue / (residue + 1e-8) p = r + beta * p return x def make_flat_from(v): return torch.cat([g.contiguous().view(-1) for g in v]) def make_flat_from_model(model): return torch.cat([p.data.view(-1) for p in model.parameters()]) def set_model_from_flat(model, flat_params): prev_ind = 0 for param in model.parameters(): flat_size = int(np.prod(list(param.size()))) param.data.copy_(flat_params[prev_ind:prev_ind + flat_size].view(param.size())) prev_ind += flat_size def set_grad_from_flat(model, flat_grad): prev_ind = 0 for param in model.parameters(): flat_size = int(np.prod(list(param.size()))) param.grad.data.copy_(flat_grad[prev_ind:prev_ind + flat_size].view(param.size())) prev_ind += flat_size
true
6b78f05aa70323e2bd8714b5bb296cdf102aab9f
Python
AdamZhouSE/pythonHomework
/Code/CodeRecords/2579/60668/279235.py
UTF-8
865
2.578125
3
[]
no_license
def maxSideLength(t,mat=[]): m,n = len(mat),len(mat[0]) P = [[0]*(n+1) for _ in range(m+1)] for i in range(1,m+1): for j in range(1,n+1): P[i][j] = P[i-1][j] + P[i][j-1]-P[i-1][j-1] +mat[i-1][j-1] def getR(x1,y1,x2,y2): return P[x2][y2] - P[x1 - 1][y2] - P[x2][y1 - 1] + P[x1 - 1][y1 - 1] l, r, ans = 1, min(m, n), 0 while l <= r: mid = (l + r) // 2 find = any(getR(i, j, i + mid - 1, j + mid - 1) <= t for i in range(1, m - mid + 2) for j in range(1, n - mid + 2)) if find: ans = mid l = mid + 1 else: r = mid - 1 return ans if __name__=='__main__': list = [] for _ in range(int(input())): lis = [int(i) for i in input().split(',')] list.append(lis) n = int(input()) maxSideLength(n,list)
true
e48b06a835a23c67a28856eae8db6eeafeed29c8
Python
Zapunidi/NNMG
/Data/ProcessingCutData.py
UTF-8
1,015
2.640625
3
[]
no_license
from mido import MidiFile, MidiTrack, Message, tick2second, second2tick import os import numpy as np number_file = 0 delete_file = 0 for root, dirs, files in os.walk("CutData/Classic"): for file in files: if (os.path.splitext(file)[1] == ".mid" or os.path.splitext(file)[1] == ".midi"): number_file += 1 print("Current file: {} Delete files: {}".format(number_file, delete_file), end="\r") try: notes = np.asarray([False]*12) midi = MidiFile(os.path.join(root, file)) for track in midi.tracks[:]: newTrack = MidiTrack() for msg in track: if msg.type == "note_off" or msg.type == "note_on": notes[msg.note%12] = True if np.count_nonzero(notes) < 10: delete_file += 1 os.remove(os.path.join(root, file)) except: pass input("Complete!")
true
ecba6a609508e2595339a38bd4409bc50971f404
Python
vta/BLDSify-the-County
/cities/city.py
UTF-8
602
3
3
[]
no_license
""" City class is a base class for the cities. Particular city implementation need to define self.fields dictionary that maps the field names from a city dataset to BLDS standard - get_value() function can be used for complicated field aggregations """ class City: def __init__(self, name, fields): self.city_name = name self.fields = dict(fields) def get_field(self, field_name): if field_name in self.fields: return self.fields[field_name] else: return None def get_value(self, field_name, value, record): return value
true
a1c438d4dcdb773e99c5009eff2b57d7997210bd
Python
xiaojkql/Algorithm-Data-Structure
/Basic Data Structure/Tree/BinaryTree/binNode.py
UTF-8
1,531
3.53125
4
[]
no_license
# -*- coding: utf-8 -*- ''' Author: Qin Yuan E-mail: xiaojkql@163.com Time: 2019-02-17 21:48:12 ''' # 二叉搜素树的树节点 import random class BinNode: def __init__(self, data=None, parent=None, lc=None, rc=None, h=0): self._data = data self._parent = parent self._lc = lc self._rc = rc self._height = h # 返回该节点的后代数目 def size(self): s = 1 # 这里实现的方法隐藏了两个递归 if self._lc is not None: s += self._lc.size() if self._rc: s += self._rc.size() return s # 两种插入的方法,作为左孩子或者作为右孩子插入 def insertAsLc(self, data): self._lc = BinNode(data, self) def insertAsRc(self, data): self._rc = BinNode(data, self) # 该节点的直接后继 def succ(self): pass # 遍历 # 先序遍历 def preTraverse(self): # 三个版本:递归,迭代1,2 def recurse(): pass def iter1(): pass def iter2(): pass sele = random.randint(0, 3) if sele == 0: recurse() elif sele == 1: iter1() else: iter2() # 中序遍历 # simple test def main(): node1 = BinNode(12) node1.insertAsLc(32) node1.insertAsRc(666) print(node1._lc._data) print(node1._rc._data) print(node1.size()) if __name__ == "__main__": main()
true
6ec6a005824bb97117a6ac34a3fb293c0509e1f6
Python
Krisly/Scientific_Computing
/Assignment2/code/test_cmp_plt.py
UTF-8
1,932
2.609375
3
[]
no_license
import numpy as np import matplotlib.pyplot as plt xmin = -30 xmax = 30 ymin = -30 ymax = 30 xv, yv = np.meshgrid(np.arange(xmin,xmax,0.05), np.arange(ymin,ymax,0.05), sparse=False, indexing='ij') a = xv + 1j*yv C = np.zeros(a.shape,dtype=np.float64) A = np.matrix([[(88-7*np.sqrt(6))/360,(296-169*np.sqrt(6))/(1800),(-2+3*np.sqrt(6))/225], [(296+169*np.sqrt(6))/(1800),(88+7*np.sqrt(6))/360,(-2-3*np.sqrt(6))/225], [(16-np.sqrt(6))/36,(16+np.sqrt(6))/36,1/9]],dtype=np.float64) b = np.array([(16-np.sqrt(6))/36,(16+np.sqrt(6))/36,1/9]) #A = np.matrix([[0,0,0,0], # [1/2,0,0,0], # [0,1/2,0,0], # [0,0,1,0]]) #b = np.array([1/6,1/3,1/3,1/6]) #A = np.matrix([[0,0,0,0,0,0,0], # [1/5,0,0,0,0,0,0], # [3/40,9/40,0,0,0,0,0], # [44/45,-56/15,32/9,0,0,0,0], # [19372/6561,-25360/2187,64448/6561,-212/729,0,0,0], # [9017/3168,-355/33,46732/5247,49/176,-5103/18656,0,0], # [35/384,0,500/1113,125/192,-2187/6784,11/84,0]]) #print(A,A.shape,type(A)) #b = np.array([35/384,0,500/1113,125/192,-2187/6784,11/84,0]) #b = np.array([5179/57600,0,7571/16695,393/640,-92097/339200,187/2100,1/40]) def fill_array(C,A,b,a): nrow = A.shape[0] ncol = A.shape[1] for i in range(len(a)): for k in range(len(a)): z = a[i,k] I = np.eye(nrow,ncol) e = np.ones((nrow,1)) eiAinv = np.linalg.inv(I-z*A) c_tmp = 1 + z*b.T*eiAinv*e if np.absolute(c_tmp) > 1: C[k,i] = 1 else: C[k,i] = np.absolute(c_tmp) return C t = fill_array(C,A,b,a) #t[t<1]=0 #print(t) #print(t/np.amax(t)) #plt.scatter(a.real,a.imag, c = t,cmap='hsv') plt.imshow(t,cmap='jet',extent=[xmin, xmax, ymax, ymin],interpolation='bilinear') plt.colorbar() plt.plot((xmin, xmax), (0, 0), '--k') plt.plot((0, 0), (ymin, ymax), '--k') plt.savefig('./figs/abs_stab_radau_col.pdf') plt.show()
true
550ec38184e6125d634d5af817d59c815298a61b
Python
CiscoTestAutomation/genielibs
/pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/hsrp/configure.py
UTF-8
8,411
2.5625
3
[ "Apache-2.0" ]
permissive
"""Common configure functions for HSRP""" # Python import logging # Unicon from unicon.core.errors import SubCommandFailure log = logging.getLogger(__name__) def configure_interface_vlan_standby_ip(device, vlan_number, group, ip_address): """ Configures vlan interface standby group IP Example: standby 0 ip 10.1.0.3 Args: device ('obj'): Device object vlan_number ('int'): Vlan interface number (Range 1-4093) group ('int'): Group number (Range 0-255) ip_address ('str'): Virtual IP address Return: None Raise: SubCommandFailure """ log.info(f"Configuring vlan interface standby group on {device.name}") cmd = [ f"interface vlan {vlan_number}", f"standby {group} ip {ip_address}" ] try: device.configure(cmd) except SubCommandFailure as e: raise SubCommandFailure(f"Could not configure vlan interface standby group on {device.name}. Error:\n{e}") def unconfigure_interface_vlan_standby_ip(device, vlan_number, group, ip_address): """ Unconfigures vlan interface standby group IP Example: no standby 0 ip 10.1.0.3 Args: device ('obj'): Device object vlan_number ('int'): Vlan interface number (Range 1-4093) group ('int'): Group number (Range 0-255) ip_address ('str'): Virtual IP address Return: None Raise: SubCommandFailure """ log.info(f"Unconfiguring vlan interface standby group on {device.name}") cmd = [ f"interface vlan {vlan_number}", f"no standby {group} ip {ip_address}" ] try: device.configure(cmd) except SubCommandFailure as e: raise SubCommandFailure(f"Could not unconfigure vlan interface standby group on {device.name}. Error:\n{e}") def configure_interface_vlan_standby_timers(device, vlan_number, group, interval, hold_time): """ Configures vlan interface standby timers Example: standby 0 timers 1 4 Args: device ('obj'): Device object vlan_number ('int'): Vlan interface number (Range 1-4093) group ('int'): Group number (Range 0-255) interval ('int'): Hello interval in seconds (Rang 1-254) hold_time ('int'): Hold time in seconds (Range 2-255) Return: None Raise: SubCommandFailure """ log.info(f"Configuring vlan interface standby timers on {device.name}") cmd = [ f"interface vlan {vlan_number}", f"standby {group} timers {interval} {hold_time}" ] try: device.configure(cmd) except SubCommandFailure as e: raise SubCommandFailure(f"Could not configure vlan interface standby timers on {device.name}. Error:\n{e}") def unconfigure_interface_vlan_standby_timers(device, vlan_number, group): """ Unconfigures vlan interface standby timers Example: no standby 0 timers Args: device ('obj'): Device object vlan_number ('int'): Vlan interface number (Range 1-4093) group ('int'): Group number (Range 0-255) Return: None Raise: SubCommandFailure """ log.info(f"Unconfiguring vlan interface standby timers on {device.name}") cmd = [ f"interface vlan {vlan_number}", f"no standby {group} timers" ] try: device.configure(cmd) except SubCommandFailure as e: raise SubCommandFailure(f"Could not unconfigure vlan interface standby timers on {device.name}. Error:\n{e}") def configure_interface_vlan_standby_preempt(device, vlan_number, group): """ Configures vlan interface standby preempt Example: standby 0 preempt Args: device ('obj'): Device object vlan_number ('int'): Vlan interface number (Range 1-4093) group ('int'): Group number (Range 0-255) Return: None Raise: SubCommandFailure """ log.info(f"Configuring vlan interface standby preempt on {device.name}") cmd = [ f"interface vlan {vlan_number}", f"standby {group} preempt" ] try: device.configure(cmd) except SubCommandFailure as e: raise SubCommandFailure(f"Could not configure vlan interface standby preempt on {device.name}. Error:\n{e}") def unconfigure_interface_vlan_standby_preempt(device, vlan_number, group): """ Unconfigures vlan interface standby preempt Example: no standby 0 preempt Args: device ('obj'): Device object vlan_number ('int'): Vlan interface number (Range 1-4093) group ('int'): Group number (Range 0-255) Return: None Raise: SubCommandFailure """ log.info(f"Unconfiguring vlan interface standby preempt on {device.name}") cmd = [ f"interface vlan {vlan_number}", f"no standby {group} preempt" ] try: device.configure(cmd) except SubCommandFailure as e: raise SubCommandFailure(f"Could not unconfigure vlan interface standby preempt on {device.name}. Error:\n{e}") def unconfigure_hsrp_interface(device, interface, version, group): """ unonfigures vlan interface standby group IP Example: standby 0 ip 10.1.0.3 Args: device ('obj'): Device object interface ('int'): Vlan <vlan Id> (Range 1-4093) / physical interface group ('int'): Group number (Range 0-255) version('int'): Version number (Range 1-2) Return: None Raises: SubCommandFailure: Failed to unconfigure hsrp interface """ cmd = [] cmd.append(f"interface {interface}") cmd.append(f"no standby version {version}") cmd.append(f"no standby {group}") try: device.configure(cmd) except SubCommandFailure as e: raise SubCommandFailure( f"Could not unconfigure interface hsrp standby group on {device.name}. Error:\n{e}") def configure_vrrp_interface(device, interface, group_number, advertisement_interval, priority_level, address_family, ipv4_or_ipv6_address): """ Configures vlan interface standby group IP make sure this command is already enabled in device fhrp version vrrp v3 Args: device ('obj'): Device object interface ('str'): Vlan <vlan Id> (Range 1-4093) / physical interface group_number('int'): group number (Range 1-255) advertisement_interval('int'): Advertisement interval range <100-40950> Advertisement interval in milliseconds priority_level('int'): priority level range <1-254> address_family ('str'): address family ipv4 or ipv6 to use ipv4_or_ipv6_address ('str'): based on the address family please use the ipv4 or ipv6 address to configure Return: None Raises: SubCommandFailure: Failed to configure vrrp interface """ config_cmd = [ f"interface {interface}", f"vrrp {group_number} address-family {address_family}", f"timers advertise {advertisement_interval}", f"priority {priority_level}", f"address {ipv4_or_ipv6_address} primary" ] try: device.configure(config_cmd) except SubCommandFailure as e: raise SubCommandFailure( f"Could not configure interface vrrp standby group on {device.name}. Error:\n{e}") def unconfigure_vrrp_interface(device, interface, group_number, address_family): """ unconfigures vlan interface standby group IP make sure this command is already enabled in device fhrp version vrrp v3 Args: device ('obj'): Device object interface ('str'): Vlan <vlan Id> (Range 1-4093) / physical interface group_number('int'): group number (Range 1-255) address_family ('str'): address family ipv4 or ipv6 to use Return: None Raises: SubCommandFailure: Failed to unconfigure vrrp interface """ config_cmd = [ f"interface {interface}", f"no vrrp {group_number} address-family {address_family}" ] try: device.configure(config_cmd) except SubCommandFailure as e: raise SubCommandFailure( f"Could not configure interface vrrp standby group on {device.name}. Error:\n{e}")
true
63edcd51b5f24926e7ac96c17cbda36fb1563bb1
Python
Aasthaengg/IBMdataset
/Python_codes/p03721/s344598788.py
UTF-8
201
2.609375
3
[]
no_license
n, k = map(int, input().split()) ab = [list(map(int, input().split())) for _ in range(n)] #print(ab) ab.sort() #print(ab) for i in range(n): k = k-ab[i][1] if k <= 0: print(ab[i][0]) exit()
true
4267d37fe5b4714eec1e1437446a24cdecd2453f
Python
smileyoung1993/Mysite_django
/mysite/urls.py
UTF-8
2,032
2.515625
3
[]
no_license
"""mysite URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path import user.views as user_view import main.views as main_view import guestbook.views as guestbook_view import board.views as board_view urlpatterns = [ path('',main_view.index), # guestbook path('guestbook/', guestbook_view.index), path('guestbook/guest_lst',guestbook_view.guest_lst), path('guestbook/add',guestbook_view.add), path('guestbook/deleteform', guestbook_view.deleteform), path('guestbook/delete', guestbook_view.delete), # url에서 포스트방식으로 보내는 방식은 /를 붙이면 안된다. path('user/joinform/',user_view.joinform), path('user/join', user_view.join), path('user/joinsuccess/', user_view.joinsuccess), path('user/loginform/', user_view.loginform), path('user/login', user_view.login), path('user/logout',user_view.logout), # board path('board/',board_view.list), path('board/modify',board_view.modify), path('board/view',board_view.view), path('board/write',board_view.write), path('board/add',board_view.add), path('board/deleteform',board_view.deleteform), path('board/delete',board_view.delete), path('board/modify',board_view.modify), path('board/update',board_view.update), path('board/hit_update',board_view.hit_update), path('admin/', admin.site.urls), # view 이름이 site ]
true
e8c8f73fe98833489cf418d15d6edde53411e18b
Python
xbh/Home-Work-of-MLES
/exercise/U4_loop/day2.py
UTF-8
319
3.328125
3
[]
no_license
ip = int(input()) position = ip + 65 op = 65 if ip <= 122: while op < position: print(chr(op),"", end="") op = op + 1 else: print("") else: print("F word") for i in range(1,11): print(i) text = "WHAT THE FXXX" for i in range(len(text)): print(text[i])
true
0ddbe2d9a079189528b11e64e1bf5ec56a25a283
Python
NilsBergmann/AdventOfCode2017
/Day1/__main__.py
UTF-8
570
3.703125
4
[]
no_license
def solve1(number): sum = 0 for digit, nextDigit in zip(number, number[1:]): if digit==nextDigit: sum += int(digit) if number[0] == number[-1]: sum += int(number[0]) print("solve 1:", sum) def solve2(number): sum = 0 for index ,digit in enumerate(number): matchingIndex = int((index+(len(number)/2))%len(number)) if digit == number[matchingIndex]: sum += int(digit) print("solve 2:", sum) _input = open("Day1/Input.txt", "r") for line in _input: solve1(line) solve2(line)
true
729ea794f7560d0e7e7550923bac36c0b15c5076
Python
matt-rowlinson/NCAS_CVAO
/bin/change_in_seasonal.py
UTF-8
4,450
2.546875
3
[]
no_license
# -*- coding: utf-8 -*- """ Created on Wed Oct 9 11:33:36 2019 @author: ee11mr """ import numpy as np import matplotlib import matplotlib.pyplot as plt plt.switch_backend('agg') from datetime import datetime, timedelta from scipy.interpolate import InterpolatedUnivariateSpline from pandas import DataFrame from scipy import stats plt.style.use('seaborn-darkgrid') plt.rcParams['figure.figsize'] = (11, 7) ##------------------- Get time and dates for plotting ------------------------- def daterange(start_date, end_date): delta = timedelta(hours=1) while start_date < end_date: yield start_date start_date += delta ##------------------------------------------------------------------------------## def moving_average(a,n=3): ret = np.nancumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n ##------------------------ MAIN SCRIPT ---------------------------------------- ## columns are as follows: #0=ozone, 1=CO, 2=ch4, 3=ethane, 4=propane, 5=, 6=, 7=, # species = ['O3','CO'] spec = 'CO' ; r=1 for r,spec in enumerate(species[:1]): print(spec) time=[] start_date = datetime(2006, 10, 2, 16, 00) end_date = datetime(2019, 8, 27, 21, 00) for single_date in daterange(start_date, end_date): time.append(single_date) filepath = '/users/mjr583/scratch/cape_verde/' filen = filepath+'test_merge.txt' data = np.loadtxt(filen,skiprows=1) blank_fltr = np.where(data == -999.99) data[blank_fltr] = np.nan zero_fltr = np.where(data == 0.0) data[zero_fltr] = np.nan n=len(data) d = data[:,r][2168:107360] time=time[2168:107360] msk = np.where(np.isfinite(d)) df = d[msk] plt.plot(time[:n],d) plt.savefig(filepath+'plots/'+spec+'_timeseries.png') plt.close() years = ['2007','2008','2009','2010','2011','2012','2013','2014','2015',\ '2016','2017','2018'] months=['01','02','03','04','05','06','07','08','09','10','11','12'] all_monthly = np.zeros(((len(months)),len(d))) ; all_monthly[:]=np.nan for m,mon in enumerate(months): for n,i in enumerate(time): j = str(i) if mon in j: all_monthly[m,n]=d[n] else: pass ann_monthly = np.zeros((len(years),len(months),len(d))) ; ann_monthly[:]=np.nan for y, year in enumerate(years): for m,mon in enumerate(months): for n,i in enumerate(time): j = str(i) if year in j and mon in j: ann_monthly[y,m,n]=d[n] else: pass all_seas = np.nanmean(all_monthly, axis=1) ann_seas = np.nanmean(ann_monthly, axis=2) month_names=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct',\ 'Nov','Dec'] colors = ['#f7fcf5','#e5f5e0','#c7e9c0','#a1d99b','#74c476','#41ab5d',\ '#7fcdbb','#41b6c4','#1d91c0',\ '#225ea8','#253494','#081d58'] for i in range(len(ann_monthly)): plt.plot(month_names,ann_seas[i], label=years[i], color=colors[i]) plt.plot(month_names,all_seas,label='2007-2018 mean',color='k',linewidth=3) plt.legend(ncol=2) plt.savefig(filepath+'plots/'+spec+'_seasonal.png') plt.close() all_seas = np.nanmean(all_monthly, axis=1) one_seas = np.nanmean(np.nanmean(ann_monthly[:3], axis=2),0) two_seas = np.nanmean(np.nanmean(ann_monthly[3:6], axis=2),0) thr_seas = np.nanmean(np.nanmean(ann_monthly[6:9], axis=2),0) fou_seas = np.nanmean(np.nanmean(ann_monthly[9:], axis=2),0) #colors=['#fee5d9','#fcae91','#fb6a4a','#cb181d'] periods=['2007-2009','2010-2012','2013-2015','2016-2018'] colors=['yellow','orange','red','darkred'] plt.plot(month_names,one_seas, label=periods[0], color=colors[0]) plt.plot(month_names,two_seas, label=periods[1], color=colors[1]) plt.plot(month_names,thr_seas, label=periods[2], color=colors[2]) plt.plot(month_names,fou_seas, label=periods[3], color=colors[3]) plt.plot(month_names,all_seas,label='2007-2018 mean',color='k',linewidth=3) plt.legend() plt.savefig(filepath+'plots/'+spec+'_period_seasonal.png') plt.close()
true
bc3c3fc50cd692f93573e812e0cf62dd6189dc92
Python
ponnuru171/Python-1
/online/reverse in arry.py
UTF-8
155
3.390625
3
[]
no_license
import numpy as np x=np.array([[1,2,3,4,5,6,7,8,9],[9,8,7,6,5,4,3,2,1]]) print(x) print("--------------------------------------------1") y=x[::-1] print(y)
true
9b168790796c395f6e3355c7c92c5ae574a651ad
Python
singultek/ModelAndLanguagesForBioInformatics
/Python/List/12.partition.py
UTF-8
701
4
4
[ "MIT" ]
permissive
def partition(pivot: int, in_list: list)-> (list,list): """ taken one list L of integers and one integer value N, divides the elements of L in two lists L1 and L2 such that all elements less or equal than N are in L1 and in L2 all the others. :param pivot: the pivot number :param in_list: the source list :return: two list that follow the statement (L1,L2) """ _out_list1 = [] _out_list2 = [] for element in in_list: if pivot >= element: _out_list1.append(element) else: _out_list2.append(element) return _out_list1,_out_list2 if __name__ == "__main__": l1,l2 = partition(5,[1,2,3,4,5,6,7,8,9,10]) print(f"{l1}\n{l2}")
true
c97b935e02589b1cdfe5d5610ad2554b8b0842e3
Python
Aleksandr-QAP/studySF
/17.4.3.py
UTF-8
336
3.1875
3
[]
no_license
def par_checker(string): stack = [] for s in string: if s == "(": stack.append(s) elif s == ")": if len(stack) > 0 and stack[-1] == "(": stack.pop() else: return False return len(stack) == 0 par_checker((5+6)*(7+8)/(4+3))
true
375be2c737113bd6a999e2fd235d2e6192e76e4d
Python
gerirosenberg/python-for-mapping
/intro_to_python/latlon.py
UTF-8
1,401
4.5
4
[]
no_license
# This program describes a longitude and latitude point. # Says what the program will help the user do, followed by an empty line print("This program provides information about a location based on its longitude and latitude.") print() # Uses the latitude to determine location details def latInfo() : lat = float(input("Please enter the latitude of the location: ")) if lat == 0.0: print('That location is on the equator.') elif 0.0 < lat <= 90.0: print('That location is north of the equator.') elif -90.0 <= lat < 0.0: print('That location is south of the equator.') # Restarts the function if the latitude is invalid else: print('That location does not have a valid latitude.') latInfo() # Uses the longitude to determine location details def longInfo() : long = float(input("Please enter the longitude of the location: ")) if long == 0.0: print('That location is on the prime meridian.') elif 0.0 < long <= 180.0: print('That location is east of the prime meridian.') elif -180.0 <= long < 0.0: print('That location is west of the prime meridian.') # Restarts the function if the longitude is invalid else: print('That location does not have a valid longitude.') longInfo() # Runs the latitude and longitude functions latInfo() longInfo()
true
7ad0c06cd21fa42d80078d40d3a393a2b54a0a9f
Python
shinmura0/dcase2020_task2_baseline
/keras_model.py
UTF-8
1,930
2.578125
3
[ "MIT" ]
permissive
""" @file keras_model.py @brief Script for keras model definition @author Toshiki Nakamura, Yuki Nikaido, and Yohei Kawaguchi (Hitachi Ltd.) Copyright (C) 2020 Hitachi, Ltd. All right reserved. """ ######################################################################## # import python-library ######################################################################## # from import import keras.models from keras.models import Model from keras.layers import Input, Dense, BatchNormalization, Activation ######################################################################## # keras model ######################################################################## def get_model(inputDim): """ define the keras model the model based on the simple dense auto encoder (128*128*128*128*8*128*128*128*128) """ inputLayer = Input(shape=(inputDim,)) h = Dense(128)(inputLayer) h = BatchNormalization()(h) h = Activation('relu')(h) h = Dense(128)(h) h = BatchNormalization()(h) h = Activation('relu')(h) h = Dense(128)(h) h = BatchNormalization()(h) h = Activation('relu')(h) h = Dense(128)(h) h = BatchNormalization()(h) h = Activation('relu')(h) h = Dense(8)(h) h = BatchNormalization()(h) h = Activation('relu')(h) h = Dense(128)(h) h = BatchNormalization()(h) h = Activation('relu')(h) h = Dense(128)(h) h = BatchNormalization()(h) h = Activation('relu')(h) h = Dense(128)(h) h = BatchNormalization()(h) h = Activation('relu')(h) h = Dense(128)(h) h = BatchNormalization()(h) h = Activation('relu')(h) h = Dense(inputDim)(h) return Model(inputs=inputLayer, outputs=h) ######################################################################### def load_model(file_path): return keras.models.load_model(file_path)
true
51863767014cc90100ec41114915b283bb013f69
Python
kongzichixiangjiao/GAFile
/小机智-工作相关/报表生成/Result/result.py
UTF-8
2,698
2.578125
3
[]
no_license
import sys import os import openpyxl import time import_dir = \ os.path.join(os.path.join(os.path.dirname( __file__), os.pardir), 'Root') sys.path.insert(0, import_dir) file_name = 'root' Root = __import__(file_name) print(Root.dates) print(Root.stores) import_dir1 = \ os.path.join(os.path.join(os.path.dirname( __file__), os.pardir), 'Tool') sys.path.insert(0, import_dir1) file_name1 = 'tool' Tool = __import__(file_name1) path = '../成都铁塔平台车辆日报表.xlsx' Tool.sameExcelCopySheet(path, '6.14', '6.15') wb = openpyxl.load_workbook(path) sheet = wb['6.15'] # localtime = time.localtime() # month = localtime.tm_mon # day = localtime.tm_mday # sheet = wb.create_sheet(str(month) + '.' + str(day).zfill(2)) targetStores = [] min_row = 3 for one_column_data in sheet.iter_rows(min_row=min_row): # 店名称 store = one_column_data[1].value targetStores.append(store) num = Root.stores.count(store) # 第一批投放量 a1 = one_column_data[3].value # 第二批投放量 a2 = one_column_data[4].value # 已出租数量 czNum = one_column_data[8].value # 滞留数量 zlNum = one_column_data[10].value # 第一批投放日期 a1_date = one_column_data[6].value # 第二批投放日期 a2_date = one_column_data[7].value lastDate = '' if a1_date is None: lastDate = str(a2_date) else: if a2_date is None: lastDate = str(a1_date) else: lastDate = str(a2_date) localtime = time.localtime() year = localtime.tm_year month = localtime.tm_mon day = localtime.tm_mday # 滞留天数 # if lastDate == 'None': # print("lastDate is None") # else: # if czNum != 0: # d = Tool.Caltime('2021.' + lastDate, '2021.' + # str(month).zfill(2) + '.' + str(day)) # print("已出租天数") # print(d.days) # print(type(print(d.days))) # sheet.cell(column=12, row=min_row, value=d.days) # else: # print(0) if isinstance(a1, int): if czNum != num: sheet.cell(column=9, row=min_row, value=num) sheet.cell(column=11, row=min_row, value=a1 + a2 - num) if store in Root.stores: print("") else: if isinstance(store, str): print("ERROR......." + store) min_row += 1 isSave = True for item in Root.stores: if item in targetStores: print('') else: print("ERROR-------有新店铺需要手动新增") isSave = False break if isSave: print("保存成功") wb.save(path)
true
eeefb09d2081a27cf6ec2bd167cdfe535c859cb9
Python
moallafatmaocto/FSS-1000
/tests/data_preprocessing/test_episode_batch_generator.py
UTF-8
1,079
2.53125
3
[]
no_license
from unittest import TestCase import torch from src.data_preprocessing.batch_generator import get_autolabel_batch class TestBatchGenerator(TestCase): def test_get_autolabel_batch_returns_the_right_size_for_support_and_query_batch(self): # Given testname = '1.jpg' class_num = 1 sample_num_per_class = 5 batch_num_per_class = 1 support_dir = 'test_data/african_elephant/supp' test_dir = 'test_data/african_elephant/test' # When support_images_tensor, support_labels_tensor, query_images_tensor, query_labels_tensor = get_autolabel_batch( testname, class_num, sample_num_per_class, batch_num_per_class, support_dir, test_dir ) # Then self.assertEqual(torch.Size([5, 4, 224, 224]), support_images_tensor.size()) self.assertEqual(torch.Size([5, 1, 224, 224]), support_labels_tensor.size()) self.assertEqual(torch.Size([1, 4, 224, 224]), query_images_tensor.size()) self.assertEqual(torch.Size([1, 1, 224, 224]), query_labels_tensor.size())
true
72f84eca5292d4cf6a09e6a643a73e24da951e3e
Python
CyHsiung/Recommendation-system
/src/Utils/LR_helper.py
UTF-8
8,302
2.59375
3
[]
no_license
import numpy as np import pandas as pd import os.path as join from collections import OrderedDict # drop_pre_thr = 30 # When # of p-edge > drop_pre_thr, the user become candidate of drop # drop_user_rate = 0.3 # perform delete on these ratio of candidates (candidate / total usesr) # drop_pre_rate = 0.3 # the ratio for drop the edge def removed_edge(graph, df_pref, df_table, drop_pre_thr = 30, drop_user_rate = 0.3, drop_pre_rate = 0.3, graph_type = 'w'): # copy an object for not modify original graph # graph = G.copy() # not work if graph_type == 'w': next_node = 'next_pref' else: next_node = 'next_prod' user_num = graph.getCount()[0] np.random.seed(42) edge_num = [] for idx in range(user_num): pre_cnt = len(graph.NodeList["user_"+str(idx)][next_node]) if pre_cnt >= drop_pre_thr: edge_num.append((pre_cnt, idx)) edge_num = sorted(edge_num, reverse=True) # find the user fit critera removed_pre = OrderedDict() user_drop_count = min(len(edge_num), int(drop_user_rate * user_num)) for drop_idx in range(user_drop_count): cnt, idx = edge_num[drop_idx] user_name = "user_"+str(idx) pre_drop_count = int(drop_pre_rate * cnt) # random select a edge to remove length = len(graph.NodeList[user_name][next_node]) # bug fix # rand_idx = list(np.random.random_integers(0, length-1, pre_drop_count)) rand_idx = list(np.random.choice(length, pre_drop_count, replace = False)) # record and remove the edges for i in sorted(rand_idx, reverse = True): try: removed_edge_name = graph.NodeList[user_name][next_node][i] del graph.NodeList[user_name][next_node][i] if user_name not in removed_pre: removed_pre[user_name] = [removed_edge_name] else: removed_pre[user_name].append(removed_edge_name) except: print("removing edges unknown error (have not be solved)") print("length ", length, ", max of rand_idx = ", max(rand_idx), "length of pref", len(graph.NodeList[user_name][next_node])) IDCG = [] item_num = [] for user in removed_pre.keys(): item_set = set() for preference in removed_pre[user]: if graph_type == 'w': line = preference.split("_") item_set.add(line[1]) item_set.add(line[2]) else: try: item = df_table['new2old']['item_U'][preference] except: item = df_table['new2old']['item_D'][preference] item_set.add(str(item)) rating_list = [] for item in item_set: real_idx = find_item_index(item, df_table) real_user_name = find_user_name(user, df_table) rate = df_pref[(df_pref['user'] == real_user_name) & (df_pref['product'] == int(item))]['rating'].values[0] rating_list.append(float(rate)) # DCG for this item IDCG.append(DCG_calculator(sorted(rating_list, reverse = True))) item_num.append(len(rating_list)) # print("IDCG rateinglist :", sorted(rating_list, reverse = True)) return graph, removed_pre, IDCG, item_num def generate_data(user_feature, item_feature, graph, removed_pre, df_pref, df_table, graph_type): x_train, y_train, x_test, y_test, original_label = [], [], [], [], [] user_num = graph.getCount()[0] if graph_type == 'w': print("generating data (with preference)") next_node = 'next_pref' # Generating training data print("Generating training data !!!!") for idx in range(user_num): user_name = "user_"+str(idx) item_set = set() for pref in graph.NodeList[user_name][next_node]: line = pref.split("_") item_set.add(line[1]) item_set.add(line[2]) for item in item_set: real_idx = find_item_index(item, df_table) real_user_name = find_user_name(user_name, df_table) rate = df_pref[(df_pref['user'] == real_user_name) & (df_pref['product'] == int(item))]['rating'].values[0] x_train.append(np.concatenate([user_feature[idx, :].flatten(), item_feature[real_idx, :].flatten()])) y_train.append(float(rate)) # Generating testing data print("Generating testing data !!!!") for user_name in removed_pre.keys(): item_set = set() for pref in removed_pre[user_name]: line = pref.split("_") item_set.add(line[1]) item_set.add(line[2]) for item in item_set: real_idx = find_item_index(item, df_table) real_user_name = find_user_name(user_name, df_table) rate = df_pref[(df_pref['user'] == real_user_name) & (df_pref['product'] == int(item))]['rating'].values[0] x_test.append(np.concatenate([user_feature[idx, :].flatten(), item_feature[real_idx, :].flatten()])) y_test.append(float(rate)) else: print("generating data (w/o preference)") next_node = 'next_prod' # Generating training data print("Generating training data !!!!") user_num = graph.getCount()[0] # Still getting confuse for idx in range(user_num): user_name = "user_"+str(idx) item_set = set() # calculate the normalize things mean, var = centralize(user_name, df_pref, df_table) for pref in graph.NodeList[user_name][next_node]: try: item = df_table['new2old']['item_U'][pref] except: item = df_table['new2old']['item_D'][pref] item_set.add(str(item)) for item in item_set: real_idx = find_item_index(item, df_table) real_user_name = find_user_name(user_name, df_table) rate = df_pref[(df_pref['user'] == real_user_name) & (df_pref['product'] == int(item))]['rating'].values[0] # print("user_feature", user_feature,) x_train.append(np.concatenate([user_feature[idx, :].flatten(), item_feature[real_idx, :].flatten()])) y_train.append((float(rate) - mean)/var) # Generating testing data print("Generating testing data !!!!") for user_name in removed_pre.keys(): # calculate the normalize things mean, var = centralize(user_name, df_pref, df_table) item_set = set() for pref in removed_pre[user_name]: try: item = df_table['new2old']['item_U'][pref] except: item = df_table['new2old']['item_D'][pref] item_set.add(str(item)) for item in item_set: real_idx = find_item_index(item, df_table) real_user_name = find_user_name(user_name, df_table) rate = df_pref[(df_pref['user'] == real_user_name) & (df_pref['product'] == int(item))]['rating'].values[0] x_test.append(np.concatenate([user_feature[idx, :].flatten(), item_feature[real_idx, :].flatten()])) y_test.append((float(rate) - mean) / var) original_label.append(float(rate)) print(np.asarray(x_train).shape, np.asarray(y_train).shape, np.asarray(x_test).shape, np.asarray(y_test).shape) # print(removed_pre) return np.asarray(x_train), np.asarray(y_train), np.asarray(x_test), [np.asarray(y_test), np.asarray(original_label)] def centralize(user_name, df_pref, df_table): real_user_name = find_user_name(user_name, df_table) mean = df_pref[df_pref['user'] == real_user_name]['rating'].mean() variance = df_pref[df_pref['user'] == real_user_name]['rating'].var() return mean, variance def find_item_index(item, df_table): return int(df_table["old2new"]['item_U'][item].split("_")[-1]) # return int(list(df_table[df_table["orginal id"] == item]["new id"])[0].split("_")[-1]) def find_user_name(user_name, df_table): return df_table["new2old"]['user'][user_name] # return df_table[df_table["new id"] == user_name]['orginal id'].values[0] def DCG_calculator(rating_list): res = 0 for i, rate in enumerate(rating_list, 1): res += (2**rate - 1) / (np.log2(i+1)) return res def hin_generate_data(user_feature, item_feature, training_ratio, G, removed_pre, df_pref, df_table, graph_type, skin_num=2): total_x_train, total_x_test, val_x, total_y_train, total_y_test, val_y = [], [], [], [], [], [] for i in range(skin_num): x_train, y_train, x_test, y_test = generate_data(user_feature[i], item_feature[i], G, removed_pre, df_pref, df_table, graph_type) n = x_train.shape[0] train_num = int(training_ratio*n) total_x_train.append(x_train[:train_num, :]) total_x_test.append(x_test) val_x.append(x_train[train_num:, :]) total_y_train = list(y_train[:train_num]) val_y = list(y_train[train_num:]) total_y_test = list(y_test[0]) return total_x_train, total_y_train, val_x, val_y, total_x_test, total_y_test def manipulate(array): inf_idx = np.where(np.isinf(array)) fin_idx = np.where(np.isfinite(array)) max_ = np.max(array[fin_idx]) array[inf_idx] = max_ return array
true
5ba7414ecc840d6c8d3114c655f1f74aa112f52f
Python
tuestudy/janggi
/gui/main.py
UTF-8
1,049
2.546875
3
[]
no_license
# coding: utf-8 import argparse from tkinter import Tk from .janggi_board import JanggiBoard formations = ['left', 'right', 'inside', 'outside'] parser = argparse.ArgumentParser(description='조선장기') parser.add_argument( '-a', '--red-formation', choices=formations, type=str, default='inside', help='초 상차림') parser.add_argument( '-b', '--green-formation', choices=formations, type=str, default='inside', help='한 상차림') parser.add_argument( '-c', '--custom-formation', metavar='file path', type=argparse.FileType('r'), help='custom formation - specify file path') args = parser.parse_args() if args.custom_formation: f = args.custom_formation args.custom_formation = f.read() f.close() root = Tk() root.title(u'조선장기') root.bind('<Escape>', lambda e: root.quit()) root.wm_attributes("-topmost", 1) root.after(1000, root.wm_attributes, "-topmost", 0) board = JanggiBoard(root) board.init_gui(args.red_formation, args.green_formation, args.custom_formation) root.mainloop()
true
3f9b0da05d85ba867d1d5f8a1cc13bce1f745225
Python
Ansen/Python_Learn
/oppdemo3.py
UTF-8
170
2.78125
3
[]
no_license
from oppdemo import Bird class happyBird(Bird): def __init__(self, more_words): print 'We are happy birds.', more_words summer = happyBird('Happy,Happy!')
true
63df2f42189a13b676cb06d869bc0a17cd609c69
Python
Grosbin/python-2021
/combinaciones.py
UTF-8
368
3.734375
4
[]
no_license
a = int(input("Enter the first digits")) b = int(input("Enter the second digits")) c = int(input("Enter the third digits")) list = [] list.append(a) list.append(b) list.append(c) for i in range(0, len(list)): for j in range(0, len(list)): for k in range(0, len(list)): if i!=j & j!=k & k!=i: print(list[i], list[j], list[k])
true
251fbe6b026c2719a68901e31ce32c60144e4d5a
Python
marczuo/LatinTextID
/textmodel.py
UTF-8
5,766
3.3125
3
[]
no_license
#!/usr/bin/env python # encoding: utf-8 import subprocess import string import re import json # Helper functions class TextModel(object): """A model for storing text identification data.""" def __init__(self, name): """Keyword arguments: name -- Identifying name of the text model. """ self.name = name self.words = {} # Dictionary of word frequencies self.wordlengths = {} # Dictionary of word length frequencies self.stems = {} # Dictionary of stem form frequencies self.sentencelengths = {} # Dictionary of sentence lenght frequencies def __repr__(self): s = "text model name: " + self.name + "\n" s += "number of words: " + str(len(self.words)) + "\n" s += "number of word lengths: " + str(len(self.wordlengths)) + "\n" s += "number of sentence lengths: " + str(len(self.sentencelengths)) + "\n" s += "number of stems: " + str(len(self.stems)) + "\n" return s def addTextFromString(self, s): """Loads a raw text from s and populates frequency dictionaries.""" known_count = 0 subst_count = 0 unknown_count = 0 stemdict = {} # Preprocess the text s = self.cleanText(s) lines = s.split('\n') for l in lines: if len(l) == 0: continue # Ignore empty lines LoW = l.split() for w in LoW: if w == "": continue # Ignore empty words w = w.lower() # Populate word and word length frequency dictionaries if w not in self.words: self.words[w] = 1 else: self.words[w] += 1 if len(w) not in self.wordlengths: self.wordlengths[len(w)] = 1 else: self.wordlengths[len(w)] += 1 # Parse the word into stem form and cache it in stemdict if w in stemdict: (stem, subst) = stemdict[w] else: stemdict[w] = self.stem(w) # Make manual adjustments for "sum" and "edo". if stemdict[w][0] == 'edo': stemdict[w] = ('sum', True) (stem, subst) = stemdict[w] # Increment appropriate counter and add the stem form if it is a substantive if stem == "" or stem.isspace(): unknown_count += 1 elif subst: subst_count += 1 if stem not in self.stems: self.stems[stem] = 1 else: self.stems[stem] += 1 else: known_count += 1 # Populate sentence length frequency dictionary if len(LoW) not in self.sentencelengths: self.sentencelengths[len(LoW)] = 1 else: self.sentencelengths[len(LoW)] += 1 # Print final statistics print "%d words processed in total, %d substantive, %d non-substantive, %d unknown" % \ (known_count + subst_count + unknown_count, subst_count, known_count, unknown_count) def addTextFromFile(self, filename): """Loads raw text from a file.""" with open(filename) as fp: self.addTextFromString(fp.read().strip('\n')) def stem(self, wd): """Parses the stem form of a Latin word. Returns a tuple (stem, is_subst), where is_subst is True iff stem is a substantive. IMPORTANT: This will only work if we have a copy of Whitaker's Words executable of the appropriate architecture! (A copy of OS X executable is included with the Python file.)""" def _is_subst(line): """Tests whether a line of Whitaker's Words entry is a substantive. A substantive is defined as a Noun, a Verb (including a Participle), an Adjective, or an Adverb. Abbreviations are not considered substantives.""" return (line.find(" N ")!=-1 or line.find(" V ")!=-1 or line.find(" ADJ ")!=-1\ or line.find(" ADV ")!=-1) and not line.find(" abb. ")!=-1 output = subprocess.check_output([r"./words/words", wd]) # Call Whitaker's Words, this works only on Unix-like OS out_lines = output.split('\n') best_level = 0 # 0=not found, 1=qualified, 2=unqualified for line in out_lines: if re.search(r"\[[A-Z]+\]", line): # Found a dict entry marker in the line (something like [XXXAX]) if line.split(',')[0].split(' ')[0].strip() == "": # Not found pass if line.find("lesser") != -1 or \ line.find("veryrare") != -1 or\ line.find("uncommon") != -1: # Found a qualified (i.e., not ideal) result best_level = 1 best = line.split(',')[0].split(' ')[0].strip() best_subst = _is_subst(line) else: # Found an unqualified result return (line.split(',')[0].split(' ')[0].strip(), _is_subst(line)) if best_level == 0: return ("", False) else: return (best, best_subst) def cleanText(self, s): """Preprocesses text into an appropriate form.""" def _sent_to_lines(sent): """Converts a string of text to one sentence each line.""" sent = re.sub(r"\n", " ", sent) return re.sub(r"(\.|\!|\?|--)+\s*", "\n", sent) def _strip_punct(sent): """Strips away all punctuations from the text.""" return sent.translate(string.maketrans("",""), string.punctuation) return _strip_punct(_sent_to_lines(s)).lower() def saveModelToFiles(self, filename=None): """Saves the model to file. Keyword arguments: filename -- File name to be saved to. If empty, defaults to model name. """ if not filename: filename = self.name + ".model" with open(filename, "w") as fp: fp.write(json.dumps(self.words) + '\n' + \ json.dumps(self.wordlengths) + '\n' + \ json.dumps(self.stems) + '\n' + \ json.dumps(self.sentencelengths)) # Dump all data using json def readModelFromFiles(self, filename=None): """Loads the model from file. Keyword arguments: filename -- File name to be loaded from. If empty, defaults to model name. """ if not filename: filename = self.name + ".model" with open(filename, "r") as fp: lines = fp.readlines() (words, wordlengths, stems, sentencelengths) = lines # Load json data from file self.words = json.loads(words) self.wordlengths = json.loads(wordlengths) self.stems = json.loads(stems) self.sentencelengths = json.loads(sentencelengths)
true
39f52223d78549fb40c0b8856d5cf8250dd73d7f
Python
taliav26/pupila
/viewer.py
UTF-8
5,515
2.59375
3
[]
no_license
import os import re from PySide2.QtCore import QObject, QStringListModel, QUrl, Slot, Signal, Property, QAbstractItemModel class Viewer(QObject): def __init__(self): QObject.__init__(self) self._selected_file = "" self._selected_file_folder = "" self._selected_file_siblings = QStringListModel(self) self._original_selected_file = "" self._supported_file_extensions = r'.png$|.jpg$' self._next_file = "" self._show_threshold_slider = False self._show_gamma_gain_slider = False self._show_log_gain_slider = False # self._selected_file def get_selected_file(self): return self._selected_file def set_selected_file(self, file): if isinstance(file, QUrl): self._selected_file = file.toLocalFile() elif isinstance(file, str): self._selected_file = file self.on_selected_file.emit(self._selected_file) self.set_show_threshold_slider(False) self.set_show_gamma_gain_slider(False) self.set_show_log_gain_slider(False) self._original_selected_file = self._selected_file self._detect_selected_file_siblings() self._detect_selected_file_next() # self._next_file def get_next_file(self): return self._next_file @Slot(str) def set_next_file(self, next_file): self._next_file = next_file self.on_next_file.emit() # self._original_selected_file def get_original_selected_file(self): return self._original_selected_file def set_original_selected_file(self, file): self._original_selected_file = file # self._show_threshold_slider def get_show_threshold_slider(self): return self._show_threshold_slider def set_show_threshold_slider(self, value): self._show_threshold_slider = value self.on_show_threshold_slider.emit() # self._show_gamma_gain_slider def get_show_gamma_gain_slider(self): return self._show_gamma_gain_slider def set_show_gamma_gain_slider(self, value): self._show_gamma_gain_slider = value self.on_show_gamma_gain_slider.emit() # self._show_log_gain_slider def get_show_log_gain_slider(self): return self._show_log_gain_slider def set_show_log_gain_slider(self, value): self._show_log_gain_slider = value self.on_show_log_gain_slider.emit() # self._selected_file_siblings def get_selected_file_siblings(self): return self._selected_file_siblings @Slot(list) def set_selected_file_siblings(self, files): self._selected_file_siblings.setStringList(files) self.on_selected_file_siblings.emit() # auxiliary methods @Slot(str) def set_temp_selected_file(self, temp_file): self._selected_file = temp_file self.on_selected_file.emit(temp_file) def _detect_selected_file_siblings(self): if self._selected_file_folder != os.path.dirname(self._selected_file): self._selected_file_folder = os.path.dirname(self._selected_file) temp_siblings = [] with os.scandir(self._selected_file_folder) as d: for entry in d: if entry.is_file() and re.search(self._supported_file_extensions, entry.name): # if entry.name != self._selected_file: file_path = os.path.abspath(os.path.join(self._selected_file_folder, entry.name)) temp_siblings.append(file_path) temp_siblings = sorted(temp_siblings) print(temp_siblings) self.set_selected_file_siblings(temp_siblings) def _detect_selected_file_next(self): # Detect next file file_siblings = self._selected_file_siblings.stringList() current_index = None for i, s in enumerate(file_siblings): if self._selected_file in s: current_index = i if current_index is not None and current_index < len(file_siblings) - 1: self.set_next_file(file_siblings[current_index + 1]) else: self.set_next_file("") # signals on_next_file = Signal() on_selected_file = Signal(str) on_selected_file_siblings = Signal() on_show_threshold_slider = Signal() on_show_gamma_gain_slider = Signal() on_show_log_gain_slider = Signal() # properties show_threshold_slider = Property(bool, get_show_threshold_slider, set_show_threshold_slider, notify=on_show_threshold_slider) show_gamma_gain_slider = Property(bool, get_show_gamma_gain_slider, set_show_gamma_gain_slider, notify=on_show_gamma_gain_slider) show_log_gain_slider = Property(bool, get_show_log_gain_slider, set_show_log_gain_slider, notify=on_show_log_gain_slider) next_file = Property(str, get_next_file, set_next_file, notify=on_next_file) selected_file = Property(QUrl, get_selected_file, set_selected_file, notify=on_selected_file) original_selected_file = Property(str, get_original_selected_file, set_original_selected_file, notify=on_selected_file) selected_file_siblings = Property(QAbstractItemModel, get_selected_file_siblings, set_selected_file_siblings, notify=on_selected_file_siblings)
true
cdb3d62093dbc3d5cfeae48dd4e3aa48801a1792
Python
hello-starry/MotionExplorer
/scripts/optimalcontrol/Reach.py
UTF-8
3,148
2.78125
3
[]
no_license
import numpy as np import scipy.linalg from scipy.linalg import expm, logm import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 import sys from scipy.spatial.transform import Rotation def splitConfig(x): q = x[:6] dq = x[6:] return [q,dq] def mergeConfig(q, dq): x = np.zeros(len(q)+len(dq)) x[:len(q)] = q x[len(q):len(q)+len(dq)] = dq return x def se3Derivative(u): X1 = np.zeros([4,4]) X2 = np.zeros([4,4]) X3 = np.zeros([4,4]) X4 = np.zeros([4,4]) X5 = np.zeros([4,4]) X6 = np.zeros([4,4]) X1[0,3] = 1 ########################## X2[1,3] = 1 ######################### X3[2,3] = 1 ######################### X4[0,1] = -1 X4[1,0] = 1 ######################### X5[0,2] = 1 X5[2,0] = -1 ######################### X6[1,2] = -1 X6[2,1] = 1 dx = X1*u[0] + X2*u[1] + X3*u[2] + X4*u[3] + X5*u[4] + X6*u[5] return dx def SE3ToConfig(q_SE3): x = np.zeros(6) x[:3] = q_SE3[0:3,3] R = Rotation.from_dcm(q_SE3[:3,:3]) x[3:] = np.array(R.as_euler('zyx')) return x def configToSE3(q): X = np.zeros([4,4]) X[0,3] = q[0] X[1,3] = q[1] X[2,3] = q[2] X[3,3] = 1 R = Rotation.from_euler('zyx', [q[3], q[4], q[5]]) X[:3,:3] = R.as_dcm() #as_matrix in 1.4.0 return X def M(q): return np.eye(6) def G(q, dq): return np.dot(np.linalg.inv(M(q)), np.eye(6)) def H(q, dq): return np.zeros(6) ## H(x) + G(x)*u def EquationOfMotion(q, dq, u): ddq = H(q,dq) + np.dot(G(q,dq), u) return ddq def drawPath(x, color): plt.plot(x[:,0],x[:,1],x[:,2],'bo-', color=color, markersize=3) def integratePath(x, u, T, color='b'): dt = 0.1 t = 0 xpath = [] while t < T: [q0,dq0] = splitConfig(x) ddq0 = EquationOfMotion(q0, dq0, u) dq1 = dq0 + dt*ddq0 q0_SE3 = configToSE3(q0) dq0_SE3 = se3Derivative(dq0) q1_SE3 = np.dot(q0_SE3, expm(dt*dq0_SE3)) q1 = SE3ToConfig(q1_SE3) x = mergeConfig(q1, dq1) xpath.append(x) t = t+dt xpath = np.array(xpath) drawPath(xpath, color=color) fig = plt.figure(0) fig.patch.set_facecolor('white') ax = fig.gca(projection='3d') #Steer system from xI to xG xI = np.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]) umin = np.array([0, 0, 0, -0.5, -0.0, -0.0]) umax = np.array([0, 0, 0, +0.5, +0.0, +0.0]) du = umax - umin T = 3 xG = np.array([+3, +0.765, 0, 0.3, 0, 0, 1, 0, 0, 0, 0, 0]) plt.plot([xG[0]],[xG[1]],[xG[2]],'go', markersize=8) [q0,dq0] = splitConfig(xI) [q1,dq1] = splitConfig(xG) q0_SE3 = configToSE3(q0) q1_SE3 = configToSE3(q1) dq0_des = logm(np.dot(np.linalg.pinv(q0_SE3), q1_SE3)) # q1_SE3 = np.dot(q0_SE3, expm(dt*dq0_SE3)) # q1 = SE3ToConfig(q1_SE3) # ## (1) can we do the change in dq0 to dq1 (relative in Tq0X) (velocity change # ## possible) for k in range(0,len(umin)): ukmin = np.zeros(6) ukmin[k] = umin[k] ukmax = np.zeros(6) ukmax[k] = umax[k] integratePath(xI, ukmin, T, color='r') integratePath(xI, ukmax, T, color='r') # # for k in range(0,5): # # u = (np.multiply(du,np.random.rand(6,1).T) + umin).flatten() # # integratePath(xI, u, T) plt.show()
true
e6dcb47552c5e09cf51741fa81cff1fb79687a32
Python
emscb/DnFauc
/get_item.py
UTF-8
162
3
3
[]
no_license
# 아이템 상세 정보 검색 from src.Item import Item while 1: name = input('아이템 이름을 입력하세요 : ') f = Item(name) f.select()
true
c91ebc1ff7fe0a48b24a3730aa7ee803eff1ef0e
Python
jason-padilla/DSFundamentals
/Leetcode/Easy/234E-IsLLPali.py
UTF-8
2,081
4.34375
4
[]
no_license
''' Given the head of a singly linked list, return true if it is a palindrome. Input: head = [1,2,2,1] Input: head = [1,2] Output: true Output: true ''' # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def isPalindrome(self, head: Optional[ListNode]) -> bool: pal = [] while head: pal.append(head.val) head = head.next l, r = 0, len(pal)-1 while l < r: if pal[l] != pal[r]: return False return True #Summary: # Iterate through the LL and add the values to a list # Then use two pointers to iterate through the LL and compare the values # If any of the values don't equal than return False class Solution: def isPalindrome(self, head: Optional[ListNode]) -> bool: values = [] mid, end = head, head while end and end.next: values.append(mid.val) mid = mid.next end = end.next.next if not end: mid = mid elif not end.next: mid = mid.next while mid: if mid.val != values.pop(): return False mid = mid.next return True #Summary # Use two pointers to iterate through the LL and half the values to the list and find the mid point # Then move the mid point to compare the values from the list to all the LL values left class Solution: def isPalindrome(self, head: Optional[ListNode]) -> bool: mid, end = head, head #Find mid point while end and end.next: mid = mid.next end = end.next.next #Reverse right side of the LL prev = None while mid: temp = mid.next mid.next = prev prev = mid mid = temp #Compare left and right sides left, right = head, prev while right: if left.val != right.val: return False left, right = left.next, right.next return True #Summary # Use two pointers to find the mid point # Then reverse the right side # Compare the left and right side to see if the LL is a palindrome
true
2c560e7046c5773cb2eb70cf88b0ad4729c0d13f
Python
markku63/mooc-tira-s20
/Viikko_4/stacksort.py
UTF-8
3,369
3.546875
4
[]
no_license
from random import shuffle def check(t): if len(t) < 2: # Yhden elementin lista on aina järjestyksessä return True stk1 = [] stk2 = [] result = [] # https://faculty.math.illinois.edu/~west/regs/stacksort.html for i in t: while True: # sijoitetaan tyhjään pinoon jos sellainen löytyy if len(stk1) == 0: stk1.append(i) break elif len(stk2) == 0: stk2.append(i) break # jos molempien pinojen päällimmäinen luku on sijoitettavaa # pienempi, popataan pienempi luku tulokseen ja yritetään uudelleen elif i >= stk1[-1] and i >= stk2[-1]: if stk1[-1] < stk2[-1]: result.append(stk1.pop()) else: result.append(stk2.pop()) else: # jos molempien pinojen päällimmäinen on suurempi kuin # sijoitettava numero, työnnetään pinoon jonka päällimmäinen on # pienempi if i < stk1[-1] and i < stk2[-1]: if stk1[-1] < stk2[-1]: stk1.append(i) else: stk2.append(i) # muutoin siihen pinoon jonka päällimmäinen on suurempi kuin # sijoitettava. elif i < stk1[-1]: stk1.append(i) else: stk2.append(i) break # tyhjennetään pinot tulokseen while len(stk1) > 0 and len(stk2) > 0: if stk1[-1] < stk2[-1]: result.append(stk1.pop()) else: result.append(stk2.pop()) while len(stk1) > 0: result.append(stk1.pop()) while len(stk2) > 0: result.append(stk2.pop()) # Tarkistetaan tulos for i in range(1, len(result)): if result[i-1] > result[i]: return False return True def check2(t): n = len(t) ways = ["0", "1"] while len(ways) < 2**n: temp = [] for w in ways: temp.append(w+"0") temp.append(w+"1") ways = temp for w in ways: stack1 = [] stack2 = [] next = 1 for i in range(n): if w[i] == "0": stack1.append(t[i]) else: stack2.append(t[i]) while True: if len(stack1) > 0 and stack1[-1] == next: stack1.pop() next += 1 elif len(stack2) > 0 and stack2[-1] == next: stack2.pop() next += 1 else: break if next == n + 1: return True return False if __name__ == "__main__": n = 3 while True: input = list(range(1, n+1)) shuffle(input) if check(input) != check2(input): print(input) break #print(check([4,5,2,3,1]) == check2([4,5,2,3,1])) # True #print(check([2,3,4,5,1]) == check2([2,3,4,5,1])) # False #print(check([1,5,2,4,3]) == check2([1,5,2,4,3])) # True #print(check([4, 2, 5, 1, 3]) == check2([4, 2, 5, 1, 3])) # True #print(check([8, 10, 2, 7, 3, 1, 4, 5, 9, 6]) == check2([8, 10, 2, 7, 3, 1, 4, 5, 9, 6]))
true
b9a120c32b4c452834e77903b6dae837e0126147
Python
LucasJun/cosmetic_spider
/cosmetic_spider/pipelines.py
UTF-8
1,820
2.671875
3
[]
no_license
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf import settings from datetime import datetime import time class CosmeticSpiderPipeline(object): def __init__(self): host = settings['MONGODB_HOST'] port = settings['MONGODB_PORT'] dbname = settings['MONGODB_DBNAME'] sheetname = settings['MONGODB_SHEETNAME'] # 创建数据库连接 client = pymongo.MongoClient(host=host, port=port) # 指定数据库 db = client[dbname] # 存放数据的数据库表名 self.sheet = db[sheetname] # 清空旧表 # self.sheet.drop() # 库存清零,等待更新 for item in self.sheet.find(): updateFilter = {'item_name': item['item_name']} self.sheet.update_one(filter=updateFilter, update={'$set': {'item_count': 0}}) if self.is_outdated(item['date']): self.sheet.delete_one(item) print('库存清零') # 过久无更新的物品则删除 def is_outdated(self, item_date): now_date = time.mktime(time.localtime()) item_date = time.mktime(time.strptime(item_date, "%Y-%m-%d %H:%M:%S")) time_t = now_date - item_date if time_t >= 200000.0: return True elif time_t < 200000.0: return False def process_item(self, item, spider): data = dict(item) updateFilter = {'item_name': data['item_name']} updateRes = self.sheet.update_one( filter=updateFilter, update={'$set': data}, upsert=True) # self.sheet.insert(data) return updateRes
true
8e96cccbde1ceac78d484968eae6db74b76c992d
Python
j-lazo/Azteca_marron
/scripts/direccion/MD49_Control.py
UTF-8
2,568
2.734375
3
[]
no_license
import numpy as np import skfuzzy as fuzz def FuzzControl(error): x_pos = np.arange(-480, 480, 1) x_speed = np.arange(0, 255, 1) # Generate fuzzy membership functions pos_NB = fuzz.trimf(x_pos, [-480, -360, -240]) pos_NM = fuzz.trimf(x_pos, [-360, -240, -120]) pos_NS = fuzz.trimf(x_pos, [-240, -120, 0]) pos_0 = fuzz.trimf(x_pos, [-120, 0, 120]) pos_S = fuzz.trimf(x_pos, [0, 120, 240]) pos_M = fuzz.trimf(x_pos, [120, 240, 360]) pos_B = fuzz.trimf(x_pos, [240, 360, 480]) speed_NB = fuzz.trimf(x_speed, [0, 0, 43]) speed_NM = fuzz.trimf(x_speed, [0, 43, 85]) speed_NS = fuzz.trimf(x_speed, [43, 85, 128]) speed_0 = fuzz.trimf(x_speed, [85, 128, 171]) speed_S = fuzz.trimf(x_speed, [128, 171, 213]) speed_M = fuzz.trimf(x_speed, [171, 213, 255]) speed_B = fuzz.trimf(x_speed, [213, 255, 255]) """ Fuzzy rules ----------- 1. Si Error en pos es NB, entonces la Velocidad es B 2. Si Error en pos es NM, entonces la Velocidad es M 3. Si Error en pos es NS, entonces la Velocidad es S 4. Si Error en pos es 0, entonces la Velocidad es 0 5. Si Error en pos es S, entonces la Velocidad es NS 6. Si Error en pos es M, entonces la Velocidad es NM 7. Si Error en pos es B, entonces la Velocidad es NB. """ level_pos_NB = fuzz.interp_membership(x_pos, pos_NB, error) level_pos_NM = fuzz.interp_membership(x_pos, pos_NM, error) level_pos_NS = fuzz.interp_membership(x_pos, pos_NS, error) level_pos_0 = fuzz.interp_membership(x_pos, pos_0, error) level_pos_S = fuzz.interp_membership(x_pos, pos_S, error) level_pos_M = fuzz.interp_membership(x_pos, pos_M, error) level_pos_B = fuzz.interp_membership(x_pos, pos_B, error) #Regla 1 SNB = np.fmin(level_pos_NB, speed_B) #Regla 2 SNM = np.fmin(level_pos_NM, speed_M) #Regla 3 SNS = np.fmin(level_pos_NS, speed_S) #Regla 4 S0 = np.fmin(level_pos_0, speed_0) #Regla 5 SS = np.fmin(level_pos_S, speed_NS) #Regla 6 SM = np.fmin(level_pos_M, speed_NM) #Regla 7 SB = np.fmin(level_pos_B, speed_NB) # Aggregate all three output membership functions together aggregated= np.fmax(SNB, np.fmax(SNM, np.fmax(SNS, np.fmax(S0, np.fmax(SS, np.fmax(SM, SB)))))) # Calculate defuzzified result Speed = fuzz.defuzz(x_speed, aggregated, 'centroid') Speedf=int(np.around(Speed, decimals=0)) return Speedf
true
4f615e6fb28f243a271d348db43c0442ae9eb384
Python
frauHello/flashFloodsPrediction
/build/lib/Tree/draw_tree.py
UTF-8
6,823
2.65625
3
[]
no_license
""" Uses graphviz to make pretty drawings of an SRPT or SRRF. """ import sys, os from utilities.utils import argparser try: import path_test # @UnusedImport except: sys.path.append(os.path.abspath('./')) import pickle from textwrap import wrap from itertools import chain import re import pydot import argparse FORMATS = ['canon', 'cmap', 'cmapx', 'cmapx_np', 'dot', 'eps', 'fig', 'gd', 'gd2', 'gif', 'gv', 'imap', 'imap_np', 'ismap', 'jpe', 'jpeg', 'jpg', 'pdf', 'plain', 'plain-ext', 'png', 'ps', 'ps2', 'svg', 'svgz', 'tk', 'vml', 'vmlz', 'vrml', 'wbmp', 'xdot'] def main(): path=r"C:\Users\user\Desktop\Prediction_model\experiment\flood\run=0_samples=100_depth=5_distinctions=all_stat=logrank.pkl" outfile=r"C:\Users\user\Desktop\Prediction_model\experiment\drawing.png" FILE = open(path, 'rb') all_trees = pickle.load(FILE) FILE.close() # how to get the label for an SRPTTree node def get_label(node): # leaf nodes have probabilities separated by commas, we don't want # to wrap them because will split split the lines on the commas ourselves label = node.__str__() ''.join(label) is_leaf = len(node.children) == 0 if is_leaf: label="time steps: {0}, number of graphs: {1}".format(node.t,node.total) else: # label formatting name, body = label.split('[', 1) body = body.strip()[:-1] # remove trailing bracket parts = re.split(r',\s+(\w+)\s*=', body) body = parts[0].strip() for i in range(1, len(parts), 2): body = '%s\\n%s=%s' % (body, parts[i].strip(), parts[i + 1].strip()) label = '[%s]\\n%s' % (name.strip(), body) pass print(is_leaf) print(label) label = label.replace('\n', r'\n') return (label, not is_leaf) # how to get a label between parent and child in an SRPTTree def get_edge_label(parent, child): s = '' # first child is the 'Yes' branch, second child the 'No' branch if hasattr(parent, 'num_yes'): if child is parent.children[0]: # print repr(parent) s = 'Y (%s)' % (parent.num_yes) s += '\\nCounts[%s]' % (', '.join('%s=%s' % item for item in sortedItems(parent.split.num_yes_graphs))) elif child is parent.children[1]: s = 'N (%s)' % (parent.num_no) s += '\\nCounts[%s]' % (', '.join('%s=%s' % item for item in sortedItems(parent.split.num_no_graphs))) return s fname, ext = os.path.splitext(outfile) dot_outfile = '%s.dot' % (fname) outfile = '%s.%s' % (fname, ext[1:]) # create the graph graph = buildTree(path,"", has_children=lambda node: hasattr(node, 'children') and len(node.children) > 0, get_children=lambda node: node.children, get_label=get_label, get_edge_label=get_edge_label) graph.write(dot_outfile, format='dot') graph.write(outfile, format=format) print('Tree picture written to:',outfile) def sortedItems(adict): items = adict.items() items.sort() return items def countNumYesAndNo(node, is_yes=False): num_yes = 0 freq_yes = {} num_no = 0 freq_no = {} if len(node.children) > 0: # distinction node countNumYesAndNo(node.children[0], is_yes=True) num_yes += (node.children[0].num_yes + node.children[0].num_no) for label, count in chain(node.children[0].freq_yes.iteritems(), node.children[0].freq_no.iteritems()): freq_yes[label] = freq_yes.get(label, 0) + count countNumYesAndNo(node.children[1], is_yes=False) num_no += (node.children[1].num_yes + node.children[1].num_no) for label, count in chain(node.children[1].freq_yes.iteritems(), node.children[1].freq_no.iteritems()): freq_no[label] = freq_no.get(label, 0) + count else: # leaf node if is_yes: num_yes = node.count freq_yes.update(node.frequency) else: num_no = node.count freq_no.update(node.frequency) node.num_yes = num_yes node.freq_yes = freq_yes node.num_no = num_no node.freq_no = freq_no def buildTree(treename, root, has_children=lambda node: hasattr(node, 'children'), get_children=lambda node: node.children, get_label=str, get_edge_label=lambda: '', label_width=60): """Creates a dot graph file for use with visgraph's dot program. This works on many kinds of trees because of using has_children, get_children, and get_label which are user supplied functions for accessing information about nodes in the tree. has_children : should return True if the node has children, False otherwise get_children : should return a list of child nodes get_label : should return the label for the node, defaults to str() label_width : the number of columns to wrap the node labels to """ graph = pydot.Dot() buildGraph(graph, root, has_children, get_children, get_label, get_edge_label, label_width) return graph def buildGraph(graph, node, has_children, get_children, get_label, get_edge_label, label_width): # use the accessor function to get the label for the node node_label = get_label(node) # it can return a tuple where the first index is the label and # the second is a boolean indicating if wrapping should be done or not if type(node_label) is tuple: node_label, do_wrap = node_label else: do_wrap = True # quotes need to be escaped node_label = node_label.replace('"', '\\"') if do_wrap: # wrap returns a list, so join it into a single string, newlines must # be escaped also node_label = '\\n'.join(wrap(node_label, label_width)) else: # escape the newlines node_label = node_label.replace('\n', '\\n"') # create a new node and add it to the graph gnode = pydot.Node(name=id(node), label=node_label) graph.add_node(gnode) # use accessor to check for children if has_children(node): # use accessor to get a list of children for child in get_children(node): # recursively call build graph on the children cnode = buildGraph(graph, child, has_children, get_children, get_label, get_edge_label, label_width) # add the edge, using the accessor to get the edge label edge = pydot.Edge(src=gnode, dst=cnode, label=get_edge_label(node, child)) graph.add_edge(edge) return gnode if __name__ == '__main__': main()
true
058f74d9d2737566f97ba3fcde9853ecb6717433
Python
ucsb-seclab/dr_checker
/helper_scripts/pp_scripts/pp_jsons.py
UTF-8
8,996
2.625
3
[ "BSD-2-Clause" ]
permissive
import argparse import multiprocessing import os import time import json import sys def log_info(*args): log_str = "[*] " for curr_a in args: log_str = log_str + " " + str(curr_a) print log_str def log_error(*args): log_str = "[!] " for curr_a in args: log_str = log_str + " " + str(curr_a) print log_str def log_warning(*args): log_str = "[?] " for curr_a in args: log_str = log_str + " " + str(curr_a) print log_str def log_success(*args): log_str = "[+] " for curr_a in args: log_str = log_str + " " + str(curr_a) print log_str def setup_args(): parser = argparse.ArgumentParser(description="Script that converts DR.CHECKER LLVM based jsons to jsons " "containing source code info.") parser.add_argument('-d', action='store', dest='dr_jsons', help='Destination directory where all the DR.CHECKER jsons should be read from.') parser.add_argument('-o', action='store', dest='output_jsons', help='Destination directory where all the post-processed jsons should be stored.') parser.add_argument('-k', action='store', dest='kernel_src_dir', default=None, help='Absolute path to the kernel sources.') parser.add_argument('-r', action='store', dest='original_src_dir', default=None, help='Path to the original kernel sources, that should be replaced with alternate source dir.') parser.add_argument('-n', action='store', dest='new_src_dir', default=None, help='Path to the new kernel sources, that should be used instead or original src dir.') return parser def read_file_line(lineno, given_file, k_src, o_src, a_src): """ Read the source line from the provided llvm line info :param lineno: line number to read from. :param given_file: file path in the original json :param k_src: Absolute path to the kernel sources. :param o_src: Path to the original kernel sources, that should be replaced with alternate source dir. :param a_src: Path to the new kernel sources, that should be used instead or original src dir. :return: None """ if lineno <= 0: return "" if o_src is not None and a_src is not None: if os.path.isabs(given_file): given_file = given_file.replace(o_src, a_src) if not os.path.isabs(given_file): if k_src is not None: given_file = os.path.join(k_src, given_file) if os.path.exists(given_file) and os.path.isfile(given_file): fp = open(given_file, "r") all_lines = fp.readlines() to_ret = "" if len(all_lines) >= lineno: to_ret = all_lines[lineno-1].strip() return to_ret return "" def process_instr(instr_obj, k_src, o_src, a_src): """ Process the instruction object. :param instr_obj: Current instruction object to process. :param k_src: Absolute path to the kernel sources. :param o_src: Path to the original kernel sources, that should be replaced with alternate source dir. :param a_src: Path to the new kernel sources, that should be used instead or original src dir. :return: None """ line_no = -1 if "lineno" in instr_obj: line_no = instr_obj["lineno"] if "instr_loc" in instr_obj: line_no = instr_obj["instr_loc"] instr_file = None if "file" in instr_obj: instr_file = instr_obj["file"] if "instr_file" in instr_obj: instr_file = instr_obj["instr_file"] target_instr = None if instr_file is not None and not instr_file.endswith(".h"): curr_l = read_file_line(line_no, instr_file, k_src, o_src, a_src) if curr_l: target_instr = curr_l if target_instr is not None: if "instr" in instr_obj: instr_obj["instr"] = target_instr def process_warning(src_warnings_obj, k_src, o_src, a_src): """ Process the current warning obj. :param src_warnings_obj: Warning obj to process. :param k_src: Absolute path to the kernel sources. :param o_src: Path to the original kernel sources, that should be replaced with alternate source dir. :param a_src: Path to the new kernel sources, that should be used instead or original src dir. :return: None """ curr_data = src_warnings_obj["warn_data"] curr_line = curr_data["at_line"] if "at_file" in curr_data: curr_file = curr_data["at_file"] if not curr_file.endswith(".h"): curr_cont = read_file_line(curr_line, curr_file, k_src, o_src, a_src) curr_data["at"] = curr_cont for curr_ins in curr_data["inst_trace"]: process_instr(curr_ins, k_src, o_src, a_src) def process_context(context_obj, k_src, o_src, a_src): """ Context obj. :param context_obj: context info to process :param k_src: Absolute path to the kernel sources. :param o_src: Path to the original kernel sources, that should be replaced with alternate source dir. :param a_src: Path to the new kernel sources, that should be used instead or original src dir. :return: None """ for curr_in in context_obj: process_instr(curr_in, k_src, o_src, a_src) def process_all_context(src_context_obj, k_src, o_src, a_src, fp): """ Process the current context object. :param src_context_obj: current object to process. :param k_src: Absolute path to the kernel sources. :param o_src: Path to the original kernel sources, that should be replaced with alternate source dir. :param a_src: Path to the new kernel sources, that should be used instead or original src dir. :param fp: Output file descriptor. :return: None """ process_context(src_context_obj["context"], k_src, o_src, a_src) fp.write("{\"num_warnings\": " + str(src_context_obj["num_warnings"]) + ",\n") fp.write("\"context\":" + json.dumps(src_context_obj["context"]) + ",\n") fp.write("\"warnings\":[") add_comma = False for curr_warning in src_context_obj["warnings"]: if add_comma: fp.write(",\n") process_warning(curr_warning, k_src, o_src, a_src) fp.write(json.dumps(curr_warning)) add_comma = True fp.write("]}") def process_json((src_json, output_json, ker_src, orig_src, alter_src)): """ Process the json :return: None """ fp = open(src_json, "r") fp_cont = fp.read() fp.close() if len(fp_cont) > 0: json_obj = json.loads(fp_cont) if "all_contexts" in json_obj: fp = open(output_json, "w") all_contexts = json_obj["all_contexts"] fp.write("{\"all_contexts\":[\n") add_comma = False for curr_context in all_contexts: if add_comma: fp.write(",\n") process_all_context(curr_context, ker_src, orig_src, alter_src, fp) # fp.write(json.dumps(curr_context)) add_comma = True fp.write("]}") fp.close() def usage(): log_error("Invalid Usage.") log_error("Run: python ", __file__, "--help", ", to know the correct usage.") sys.exit(-1) def main(): arg_parser = setup_args() parsed_args = arg_parser.parse_args() # check usage if parsed_args.dr_jsons is None or parsed_args.output_jsons is None: usage() kernel_dir = None if parsed_args.kernel_src_dir is not None: kernel_dir = os.path.realpath(parsed_args.kernel_src_dir) origianl_src_dir = None if parsed_args.original_src_dir is not None: origianl_src_dir = os.path.realpath(parsed_args.original_src_dir) alternate_src_dir = None if parsed_args.new_src_dir is not None: alternate_src_dir = os.path.realpath(parsed_args.new_src_dir) log_info("Provided DR.CHECKER json dir:", parsed_args.dr_jsons) log_info("Provided Output dir:", parsed_args.output_jsons) log_info("Provided Kernel source dir:", kernel_dir) log_info("Provided original source dir:", origianl_src_dir) log_info("Provided alternate source dir:", alternate_src_dir) # create output directory os.system('mkdir -p ' + parsed_args.output_jsons) all_tasks = [] for curr_json in os.listdir(parsed_args.dr_jsons): c_fp = os.path.join(parsed_args.dr_jsons, curr_json) if os.path.isfile(c_fp) and curr_json.endswith(".json"): output_p = os.path.join(parsed_args.output_jsons, curr_json) all_tasks.append((c_fp, output_p, kernel_dir, origianl_src_dir, alternate_src_dir)) log_info("Processing all jsons:", len(all_tasks), " in multiprocessing mode") p = multiprocessing.Pool() st = time.time() p.map(process_json, all_tasks) et = time.time() - st log_info("Total time:", et, " seconds.") if __name__ == "__main__": main()
true
bbc9c329224152d783b7a87141dbf7d5ce5e1698
Python
Huaguiyuan/chi
/sort.py
UTF-8
1,130
3.375
3
[]
no_license
def merge(list1, list2): """Takes two lists of bibtex dictionaries and merges list1 with list2 without redundances.""" rv = list1 IDs = [item['ID'] for item in list1] titles = [item['title'] for item in list1] for idx, src in enumerate(list2): try: flag_ID = src['ID'] in IDs except KeyError: flag_ID = False try: flag_title = src['title'] in titles except KeyError: flag_title = False if not flag_ID and not flag_title: try: IDs.append(src['ID']) except KeyError: pass try: titles.append(src['title']) except KeyError: pass rv.append(list2[idx]) return rv def exclude(list1, list2): """Takes two lists of bibtex dictionaries and removes entries from list1 that are in list2.""" ID = [item['ID'] for item in list2] titles = [item['title'] for item in list2] return [item for item in list1 if item['ID'] not in ID and item['title'] not in titles]
true
24fe9ef7bf5854f7b54e56665504da70c735ecd8
Python
AmitBaanerjee/Data-Structures-Algo-Practise
/leetcode problems/NextGreaterElement.py
UTF-8
2,346
3.78125
4
[]
no_license
# 496. Next Greater Element I # You are given two arrays (without duplicates) nums1 and nums2 where nums1’s elements are subset of nums2. Find all the next greater numbers for nums1's elements in the corresponding places of nums2. # # The Next Greater Number of a number x in nums1 is the first greater number to its right in nums2. If it does not exist, output -1 for this number. # # Example 1: # # Input: nums1 = [4,1,2], nums2 = [1,3,4,2]. # Output: [-1,3,-1] # Explanation: # For number 4 in the first array, you cannot find the next greater number for it in the second array, so output -1. # For number 1 in the first array, the next greater number for it in the second array is 3. # For number 2 in the first array, there is no next greater number for it in the second array, so output -1. # Example 2: # # Input: nums1 = [2,4], nums2 = [1,2,3,4]. # Output: [3,-1] # Explanation: # For number 2 in the first array, the next greater number for it in the second array is 3. # For number 4 in the first array, there is no next greater number for it in the second array, so output -1. #optimal solution O(N):-- # public int[] nextGreaterElement(int[] findNums, int[] nums) { # Map<Integer, Integer> map = new HashMap<>(); // map from x to next greater element of x # Stack<Integer> stack = new Stack<>(); # for (int num : nums) { # while (!stack.isEmpty() && stack.peek() < num) # map.put(stack.pop(), num); # stack.push(num); # } # for (int i = 0; i < findNums.length; i++) # findNums[i] = map.getOrDefault(findNums[i], -1); # return findNums; # } class Solution(object): def nextGreaterElement(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: List[int] """ output=[] for i in nums1: index=nums2.index(i) if index== len(nums2)-1: output.append(-1) else: flag=False for j in range(index+1,len(nums2)): if nums2[j]>i: output.append(nums2[j]) flag=True break if flag==False: output.append(-1) return output
true
edd3fa4ddfee0eb810050f10116f54465e31a3ba
Python
150170410/innovativeproject-resttest
/src/indor/command_factory.py
UTF-8
798
2.8125
3
[ "MIT" ]
permissive
import re from .indor_exceptions import SyntaxErrorClassNotExists from .singleton import Singleton class CommandFactory(object, metaclass=Singleton): def __init__(self): self.dict = {} def add_class(self, class_name, class_type): self.dict[class_name] = class_type def get_class(self, prefix, suffix, result_collector): new_class_name = prefix + suffix.title() if new_class_name not in self.dict: raise SyntaxErrorClassNotExists(prefix, suffix, new_class_name) return self.dict[new_class_name](result_collector) def get_class_children(self, class_name): prog = re.compile(class_name + "[A-Za-z]+") return [child_type.pretty_name for child_name, child_type in self.dict.iteritems() if prog.match(child_name)]
true
c81256e21d0f3b51e9edcf123bcda9272449071b
Python
n-fallahinia/realtime-force-estimation
/test.py
UTF-8
2,900
2.609375
3
[ "Apache-2.0" ]
permissive
""" Script to evaluate the model using test data Make sure to run the "build_dataset.py" to creat the data folder Navid Fallahinia - 07/11/2020 BioRobotics Lab """ import argparse import os from packaging import version os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from model.input_fn import * from model.evaluation import * from model.utils.utils import Params parser = argparse.ArgumentParser() parser.add_argument('--model_dir', default='./log/20200825-143805/', help="Experiment directory containing params.json") parser.add_argument('--data_dir', default='./data_single', help="Directory containing the dataset") parser.add_argument('--param_dir', default='./experiments', help="Experiment directory containing params.json") if __name__ == '__main__': # Set the random seed for the whole graph tf.random.set_seed(230) args = parser.parse_args() print("TensorFlow version: ", tf.__version__) assert version.parse(tf.__version__).release[0] >= 2, \ "This notebook requires TensorFlow 2.0 or above." # check if the data is available assert os.path.exists(args.data_dir), "No data file found at {}".format(args.data_dir) # Load the parameters from json file json_path = os.path.join(args.param_dir, 'params.json') assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path) params = Params(json_path) # check if the model directory is available assert os.path.exists(args.model_dir), "No model file found at {}".format(args.model_dir) model_path = os.path.join(args.model_dir, 'best_full_model_path') test_data_dir = os.path.join(args.data_dir, 'test') # Get the filenames from the train and dev sets test_filenames = [os.path.join(test_data_dir, f) for f in os.listdir(test_data_dir)] # Get the train images list images_list_test = glob.glob(test_filenames[0] + '/*.jpg') # Get the label forces force_list_test = load_force_txt(test_filenames[1]+ '/force.txt',len(images_list_test)) # Specify the sizes of the dataset we train on and evaluate on params.test_size = len(images_list_test) # Create the two iterators over the two datasets print('=================================================') print('[INFO] test data is built by {0} images'.format(len(images_list_test))) test_dataset = input_fn(False, images_list_test, force_list_test, params= params) # Open the saved model from log file the model print('=================================================') loaded_model = tf.saved_model.load(model_path) print('[INFO] Model loaded...') # Test the model print('=================================================') test_model = Evaluate(loaded_model, test_dataset) test_model.test(params) print('=================================================')
true
d161fc779aa65a77d8402cc91cbc8f32eacf9f16
Python
AndreyATGB/Facebook-Text-Extractor
/src/textExtract_all_onefile.py
UTF-8
3,017
3.40625
3
[]
no_license
import os, sys, time, random """ HTML Facebook log extractor message starts at <div class="message_header"> with usr who sent it. messages between <p> and </p> user between <span class="user"> and </span> """ def findMsgs(filename, shuffle=False, debug=False): startTime = time.time() # Read in entire file after </style><title> entireFile = '' with open(filename, 'r', encoding="utf8") as f: startRead = False for line in f: if line.startswith('</style><title>'): startRead = True if startRead: entireFile += line.replace('\n', '') # Split all messages allMsgs = entireFile.split('<span class="user">') if debug: with open(f'debug_{filename}.txt', 'w', encoding="utf8") as fw: for msg in allMsgs: fw.write(msg + "\n") # Adding to dictionary is very slow. First we find all users. tmpStr = '' for msg in allMsgs: # Ignore non-message lines if not (msg.startswith('</style>')): # Find who sent message usr = msg.split('</span>')[0] if (usr == ''): usr = 'Unknown' # Only first names usr = usr.split(' ')[0] # Find beginning of message msg1 = msg.split('<p>')[1] # End of message onlyMsg = msg1.split('</p>')[0] # Replace trash HTML characters onlyMsg = onlyMsg.replace("&#039;", "'") onlyMsg = onlyMsg.replace("&quot;", '"') onlyMsg = onlyMsg.replace("&gt;", '>') onlyMsg = onlyMsg.replace("&lt;", '<') onlyMsg = onlyMsg.replace("&amp;", '&') onlyMsg = onlyMsg.replace("&#123;", '{') onlyMsg = onlyMsg.replace("&#125;", '}') onlyMsg = onlyMsg.replace("&#064;", '@') # Ignore empty messages if onlyMsg != '': tmpStr += usr + ": " + onlyMsg + '\n' # Write to file saveFolder = filename.replace('.html', '') if not os.path.exists(saveFolder): os.mkdir(saveFolder) if shuffle: tmp = tmpStr.split('\n') random.shuffle(tmp) with open(f'{saveFolder}/{saveFolder}_shuffled.txt', 'w', encoding="utf8") as fw: fw.write('\n'.join(tmp)) # Save regular one anyway with open(f'{saveFolder}/{saveFolder}.txt', 'w', encoding="utf8") as fw: fw.write(tmpStr) print(F"Extracted {filename} in {time.time()-startTime:.1f}s.") return if __name__ == '__main__': print('This program extracts messages from all .html files in the same folder\nand creates one text file, everyone has their first name before their message.') shuffle = False if input('Do you want to shuffle messages (y/n)? ') == 'y': shuffle = True for filename in os.listdir(): if filename.endswith('.html'): print(F"-----Extracting from {filename}-----") findMsgs(filename, shuffle)
true
635d1981509fdd4d32ea35ab22a644085cb390bb
Python
mulligatawny/me408-hw5
/p1.py
UTF-8
883
3.09375
3
[]
no_license
# 1/4 import numpy as np import matplotlib.pyplot as plt N = 32 def error(N): x = np.linspace(0, 2*np.pi, 10000) x_N = np.linspace(0, 2*np.pi, N+1)[:-1] # f = np.abs(np.sin(x))**3 f = 1/(1+2*np.sin(x)**2) # f_N = np.abs(np.sin(x_N))**3 f_N = 1/(1+2*np.sin(x_N)**2) k = np.arange(-N/2, N/2) Fk = np.fft.fftshift(np.fft.fft(f_N))/N S_N = np.zeros_like(x, dtype='float') for i in range(N): S_N = S_N + Fk[i]*np.exp(1j*k[i]*x) err = np.max(f - np.real(S_N)) return err N = np.array([4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]) e = np.zeros_like(N, dtype='float') for i in range(len(N)): e[i] = error(N[i]) plt.loglog(N, e, 'o-', color='salmon') #plt.loglog(N, 1/N**3, '-', color='teal', label='slope 3') plt.grid(which='both') plt.xlabel('$N$') plt.ylabel('Error') plt.title('$1/(1+2sin(x)^{2})$') plt.legend() plt.show()
true
8af9c1b512308f6f9cb530b032331efbeef322ba
Python
mlz000/Algorithms
/leetcode/636.py
UTF-8
502
2.734375
3
[]
no_license
class Solution(object): def exclusiveTime(self, N, logs): ans = [0] * N s = [] last = 0 for log in logs: fn, typ, time = log.split(':') fn, time = int(fn), int(time) if typ == 'start': if s: ans[s[-1]] += time - last s.append(fn) last = time else: ans[s.pop()] += time - last + 1 last = time + 1 return ans
true
a64180e846ddc8bd9ee6d43c9f555afe49fd8c63
Python
Amel294/amel
/ml/day14/q1b.py
UTF-8
818
2.828125
3
[]
no_license
def ae(a): from sklearn.datasets import load_iris from sklearn.neural_network import MLPClassifier iris=load_iris() type(iris) if a==50: print('When iter is 50') elif a==500: print('Whaen iter is 500') elif a==1000: print('Whaen iter is 1000') #print(iris.feature_names) #print(iris.data) X=iris.data y=iris.target knn=MLPClassifier(activation='tanh',hidden_layer_sizes=(2),solver='sgd',learning_rate_init=0.01,max_iter=a,verbose=True) print(knn) knn.fit(X,y) print(knn.predict([[3,5,4,2]])) p=knn.predict(X) from sklearn.metrics import confusion_matrix print(confusion_matrix(y,p)) print(knn.coefs_) a=ae(50) a=ae(500) a=ae(1000)
true
82e02954a2aa9fff23f5b5148eb941cf76ba5085
Python
17637294588/x_xinxin
/users/serializer.py
UTF-8
7,751
2.8125
3
[]
no_license
from users import models from rest_framework import serializers from django.contrib.auth.hashers import check_password # 定义序列化器 class userSerializer(serializers.ModelSerializer): ''' 序列化器: 接受的参数就是 request.data 序列化器接受的 data 里面有 password2 这个字段我不需要入库这边只需要判断和第一次密码 是否一致,所以要先为 password2 定义一个字段 ''' password2 = serializers.CharField( min_length=6, # 长度至少6位 max_length=20, # 最长20位 required=True, # 那个data里必须得有一个叫password2的字段 write_only=True, # 只能被填写 不能被返回 error_messages={ 'min_length': '太短了', 'max_length': '太长了', } # 错误做键 ,value为错误信息 # 反序列化才有password2去接收 write_only 别人给我的 # 序列化返回的时候 数据库里是没有password2,是不需要返回的 # read_only 我给别人的 是不需要给password2 ) password = serializers.CharField( min_length=6, # 长度至少6位 max_length=20, # 最长20位 required=True, # 那个data里必须得有一个叫password2的字段 write_only=True, # 只能被填写 不能被返回 error_messages={ 'min_length': '太短了', 'max_length': '太长了', } # 错误做键 ,value为错误信息 # 反序列化才有password2去接收 write_only 别人给我的 # 序列化返回的时候 数据库里是没有password2,是不需要返回的 # read_only 我给别人的 是不需要给password2 ) #整体校验 def validate(self,attrs): # attrs == request.data 是一个整体数据属性 if attrs['password'] != attrs['password2']: raise serializers.ValidationError('两次密码不一样') return attrs # POST提交数据 存储 # create 创建 # update 修改 # 创建序列化对象 def create(self,validate): # validate == attrs # 入库前要先把 password2 从序列化器里删除掉 del validate['password2'] users = models.Users.objects.create_user(**validate) # 创建用户 return users ''' 下面反序列化操作 fields 里面有 password2 ??? fields 是指定反序列化的字段 ''' class Meta: fields = ('username', 'password', 'password2', 'email', 'mobile') model = models.Users # 指定的表 ''' 将对象反序列化用到的是 data传参 socialuser = serializer.socialuserSerializer(data=queryDict_socialuser,context={'outer_key':users}) 将数据序列化用到的是 instance传参 data_socialuser = serializer.socialuserSerializer(instance=socialuser_data) 将对象序列化后传给前端是 data_socialuser.data ''' # 用户社交表 反序列化器 class socialuserSerializer(serializers.ModelSerializer): # 外键字段设为可读字段 是为了序列化查询用的 # PrimaryKeyRelatedField:只显示外键关联对象的主键ID user = serializers.PrimaryKeyRelatedField(read_only=True) class Meta: fields = "__all__" model = models.SocialUser # 指定的表 related_name='user_info' def create(self,validated_data): # 反序列化入库外键对象要传参过来 user:表外键字段 users = models.SocialUser.objects.create(user=self.context['outer_key'],**validated_data) # 创建用户 return users # 定义验证密码序列化器 修改密码 class ChangePasswordSerializer(serializers.Serializer): # read_only 我给别人的 是不需要给password2 old_password = serializers.CharField( min_length=8, max_length=20, required=True, # 那个data里必须得有一个叫password2的字段 write_only=True, # 只能被填写 不能被返回 error_messages={ 'min_length': '太短了', 'max_length': '太长了', } ) new_password = serializers.CharField( min_length=8, max_length=20, required=True, # 那个data里必须得有一个叫password2的字段 write_only=True, # 只能被填写 不能被返回 error_messages={ 'min_length': '太短了', 'max_length': '太长了', } ) def update(self,instance,validate): ''' instance:是views传过来的数据,是当前用户对象,里面有用户信息 validate:是views 传过来的数据,是前端vue传过来的数据 ''' # 将旧密码和新密码都从前端发送过来的容器里取出来 old_password = validate['old_password'] new_password = validate['new_password'] if check_password(old_password,instance.password): # 前端发来的旧密码和用户解析出来的密码校验成功 就赋值新密码 instance.set_password(new_password) instance.save() else: # 抛出错误 raise serializers.ValidationError('旧密码输入错误') # 操作成功将 instance 返回 views return instance # 找回密码修改密码入库 因为这里只操作了 password 所以没必要用 ModelSerializer class FindPasswordSerializer(serializers.Serializer): password = serializers.CharField( write_only = True, min_length = 8, max_length = 20, error_messages={ 'min_length': '太短了', 'max_length': '太长了', } ) def update(self,instance,validate): # instance 当前用户对象 set_password:设置密码 validate:views接受的前端数据 instance.set_password(validate['password']) instance.save() return instance # 将修改过的用户返回 views # 城市表序列化器 class CitySerializer(serializers.ModelSerializer): class Meta: model = models.City fields = ('city_id','name') # 地址表序列化器 class addressSerializer(serializers.ModelSerializer): # 外键字段设为可读字段 是为了序列化查询用的 # PrimaryKeyRelatedField:只显示外键关联对象的主键ID user = serializers.PrimaryKeyRelatedField(read_only=True) # 指定反序列化入库的外键 province = serializers.IntegerField(label='省ID', write_only= True) city = serializers.IntegerField(label='市ID', write_only= True) town = serializers.IntegerField(label='区ID', write_only= True) # 序列化输出 外键入库的是ID,但我需要查询出来的是 名字,这里定义外键输出类型 province = serializers.StringRelatedField(read_only=True) city = serializers.StringRelatedField(read_only=True) town = serializers.StringRelatedField(read_only=True) class Meta: fields = "__all__" model = models.Address # 指定的表 def create(self,validated_data): # 反序列化入库外键对象要传参过来 user:表外键字段 users = models.Address.objects.create( user=self.context['user'], province=self.context['province'], city=self.context['city'], town=self.context['town'], **validated_data ) # 创建用户 return users
true
8d9fb3c98401a6601504c611b05a862a08dc2866
Python
ucandoitrohit/Python3
/Automation/Math_Module_Python/1.math_example.py
UTF-8
151
3.0625
3
[]
no_license
import math print(math.pi) print(math.e) print(math.degrees(1)) print(math.acos(.5)) print(math.asin(.5)) print(math.exp(2)) print(math.log(10,10))
true
0202591ea0cc40ef34be9fa8c89430e7bd0110ed
Python
charliethomson/block_game_old
/include/parser.py
UTF-8
8,058
3.171875
3
[]
no_license
from os.path import exists as file_exists from pyglet.window import key as keys # General parser for my file format class ParserError(Exception): pass class IncorrectGivenDataError(ParserError): pass class ParserUnexpectedCharacterError(ParserError): pass class UnknownKeycodeError(ParserError): pass def _fix_keybinds(vars: dict): """ Makes the keys how the program will expect them, gets the values from pyglet for key codes P.S, this function could be like 5 lines but errors so it's 30 """ assert isinstance( vars, dict ), f"Error fixing keybind vars, type mismatch; {type(vars)} -> {dict}" new = {} for key in list(vars.keys()): ## Error Checking ## if not isinstance(key, str): raise ParserError( f"Command incorrect - type mismatch; {type(key)} -> {str}" ) for char in key: if char.isnumeric(): raise ParserError( f"Command incorrect - command {key} contains bad symbol {char}" ) if not hasattr(keys, vars[key]): raise UnknownKeycodeError(f"Keycode {key} not found") ## /Error Checking ## # Getting the keycode from pyglet.window.key, and adding it to the new dictionary new[key.upper()] = getattr(keys, vars[key]) ## Error Checking ## for value in list(new.values()): if not isinstance(value, int): raise ParserError( f"Keybind translation failed, check ./saves/<save>/keybinds for errors: {value}" ) ## /Error Checking ## return new def _fix_types(vars: dict): # I don't know a situation in which this would happen, but checks ya know assert isinstance( vars, dict ), f"Unable to fix var types, `vars` incorrect type: {type(vars)}" new_vars = {} for key in list(vars.keys()): value = vars[key] # boolean if value in (True, False, None): new_vars[key.upper()] = value elif "-" in value and _check_negative(value): if _check_numeric(value): new_vars[key.upper()] = float(value) else: new_vars[key.upper()] = int(value) # float elif "." in value and _check_numeric(value): new_vars[key.upper()] = float(value) # int elif value.isnumeric(): new_vars[key.upper()] = int(value) # list / matrix, as deep as you want, keep the brackets even though elif "[" in value: new_vars[key.upper()] = _string_to_list(value) else: new_vars[key.upper()] = str(value) return new_vars def _check_missing_vars(vars: dict, reqds: list, defaults: list): assert isinstance( vars, dict ), f"Unable to check variables, `vars` incorrect type: {type(vars)}" if reqds: if defaults is None: defaults = [] [defaults.append(None) for _ in range(len(reqds))] elif len(defaults) < len(reqds): [defaults.append(None) for _ in range(len(reqds) - len(defaults))] for index in range(len(reqds)): current = reqds[index] if not current in list(vars.keys()): vars[current] = defaults[index] vars = _fix_types(vars) return vars def _check_negative(string: str): for item in string.split("-"): if item == "": continue elif not item.isnumeric(): return False return True def _check_numeric(string: str): for item in (_.isnumeric() for _ in string.split(".")): if not item: return False return True def _string_to_list(string: str) -> list: o, c = string.count("["), string.count("]") assert o == c, f"open / close bracket count mismatch (open: {o}, close: {c})" for index in range(len(string) - 1): current, next = string[index], string[index + 1] if current + next == "][": surrounding = string[index - 5 : index + 5] raise SyntaxError(f"Missing comma between two brackets {surrounding}") x = [] exec(f"x.append({string})") value = x[0] return value def parse( user_input: str, reqds: list = None, defaults: list = None, given_data: str = None, keybinds: bool = False, ): """ ARGS: `user_input` : String ; The file to be parsed, assumed to be formatted like: "./path/file", unix-style (one dot for current folder, two for one up, etc) ; if you don't have a file to read from, you can read from stdin, the `given_data` kwarg, or a string using (respectively) `stdin`, `given`, or using the string as the `user_input` arg KWARGS: `reqds` : list ; Required variable names, a list of strings `defaults` : list ; If missing a variable name, it will set it to the value at the same index as in reqds in this list, None if defaults is too short or if it is not provided ; These lists are reliant on indexing, get it right :) `given_data` : str ; `keybinds` : bool ; whether or not it's parsing a keybinds file, there's some special magic needed to do that """ assert isinstance( user_input, str ), f"User input type mismatch: {type(user_input)} -> {str}" # stdin ; ask for the data from stdin :) if user_input == "stdin": data = input("Enter the data to be parsed: ") # given ; check that data was given, assign it if it is, raise an error otherwise elif user_input == "given": if not given_data: raise IncorrectGivenDataError( "Data cannot be parsed if it doesn't exist, no data given when expected" ) data = given_data # directly given in a string ; the spec defines that it'll always (formatted correctly) # start with a colon and end with a semicolon, so we check for that, then assign the data elif user_input.startswith(":") and user_input.endswith(";"): data = user_input # here we assume it's a file path, so we check if the file exists then read the data else: assert file_exists(user_input), f"File {user_input} does not exist" with open(user_input, "r") as file_: data = file_.read() # Variable name declaration, variable value declaration varndec, varvdec = False, False # Variable name, variable value varn, varv = "", "" # Variables vars = {} # This part parses the data for char in data: if char in ["\n", "\t"]: continue elif varndec: if char == "=": varndec = False varvdec = True continue elif char == ";": raise ParserUnexpectedCharacterError( "Parser encountered unexpected character `;`, you may have forgotten an equals sign" ) else: varn += char continue elif varvdec: if char != ";": varv += char continue else: varvdec = False vars[varn] = varv varn, varv = "", "" continue elif char == ":": varndec = True continue else: raise ParserUnexpectedCharacterError( f"Parser encountered unexpected character: {char}" ) vars = _check_missing_vars(vars, reqds, defaults) if keybinds: vars = _fix_keybinds(vars) return vars
true
76eb9bcbd973dd86e74786c3874d6a85b0e8040d
Python
Dippimunch/Advent-2020
/day2.py
UTF-8
371
3.15625
3
[]
no_license
import re file = open(r'day2_data.txt', 'r') passwords = re.findall(r'(\d+)-(\d+)\s(\w):\s(\w+)', file.read()) valid = 0 for i in range(len(passwords)): valid_check = passwords[i][3].count(passwords[i][2]) if valid_check >= int(passwords[i][0]) and valid_check <= int(passwords[i][1]): valid += 1 print('# of valid passwords: ', valid)
true
4c84db41fad63e32b4af1629c31c427bad494fa1
Python
Neuronys/DRL_ColaborationCompetition
/model.py
UTF-8
3,212
2.9375
3
[]
no_license
# I've reused the model code I've made for project 2 # This is inspired from: # https://github.com/udacity/deep-reinforcement-learning/blob/55474449a112fa72323f484c4b7a498c8dc84be1/ddpg-bipedal/model.py import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # use for NN weight initialization def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1. / np.sqrt(fan_in) return (-lim, lim) class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=512, fc2_units=384): super(Actor, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) # After reading the slack channel, one suggest improvement using a batch normalisation layer # I do not notice better convergence with this additional layer, so I remove it before the submission #self.bn1 = nn.BatchNorm1d(fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-3e-3, 3e-3) def forward(self, state): """Build an actor (policy) network that maps states -> actions.""" # I do not notice better convergence with this additional batch normalization layer, so I remove it before the submission #x = F.relu(self.bn1(self.fc1(state))) x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return F.tanh(self.fc3(x)) class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=512, fc2_units=384): super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) # After reading the slack channel, one suggest improvement using a batch normalisation layer # I do not notice better convergence with this additional layer, so I remove it before the submission #self.bn1 = nn.BatchNorm1d(fc1_units) self.fc2 = nn.Linear(fc1_units+action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-3e-3, 3e-3) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" # I do not notice better convergence with this additional batch normalization layer, so I remove it before the submission # x = F.relu(self.bn1(self.fc1(state))) x = F.relu(self.fc1(state)) # Let's try leaky relu as suggested in the slack channel (it seems to be much slower) #x = F.leaky_relu(self.fc1(state)) x = torch.cat((x, action), dim=1) x = F.relu(self.fc2(x)) #x = F.leaky_relu(self.fc2(x)) return self.fc3(x)
true
f337379a1fcb8aaf97f14260813bfde457c1a053
Python
audehamdi/mlproject
/tests/tool_test.py
UTF-8
428
2.71875
3
[]
no_license
# -*- coding: UTF-8 -*- # Import from standard library import os import mlproject import pandas as pd # Import from our lib from mlproject.tools import haversine import pytest def test_haversine(): #lat1, lon1 = 48.865070, 2.380009 #Insert your coordinates from google maps here #lat2, lon2 = 51.527870, -0.143490 out = haversine(48.865070, 2.380009, 48.235070, 2.393409) assert out == 70.00789153218594
true
4af8bdd42c4c98d0909b105190f93c2b919302c1
Python
anandgarg91/CodeRozana
/Maclearn/angle_pmu/log_reg.py
UTF-8
1,298
2.71875
3
[]
no_license
from pandas import read_csv from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn import metrics from sklearn.model_selection import train_test_split import matplotlib import matplotlib.pyplot as plt import seaborn as sns import numpy as np names = ['v','i','label','v_dif','i_dif','v1_dif','i1_dif'] dataframe = read_csv('file2.csv', header=None, names=names) #feature_cols = ['v'] feature_cols = ['v1_dif','i1_dif'] X = dataframe[feature_cols] y = dataframe.label #y=dataframe.label X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=0) kfold = KFold(n_splits=100, random_state=0) model = LogisticRegression(solver='liblinear') #model = LinearRegression() model.fit(X_train,y_train) y_pred=model.predict(X_test) cnf_matrix = metrics.confusion_matrix(y_test,y_pred) print(cnf_matrix) results = cross_val_score(model, X, y, cv=kfold) print("Accuracy: %.3f%% (%.3f%%)") % (results.mean()*100.0, results.std()*100.0) sns.set_style('dark') plot = sns.scatterplot(y_test,y_pred) plot.set(xlabel='Given', ylabel='Prediction') x_plot = np.linspace(0,5,100) y_plot = x_plot plt.plot(x_plot, y_plot, color='r') plt.show()
true
9f4b0f35571b2d2e26a828f14b61ffce5ea1186c
Python
Apu66/Machine-Learning-Line-Follower
/learning/format.py
UTF-8
6,295
3.09375
3
[]
no_license
import time import json import pandas as pd """ Helper file which provides methods for formatting the raw sensor data """ """ load a dataset """ def load( dataset ): with open('/Users/samnockels/projects/dissertation/data/'+dataset+'.json', 'r') as f: data = json.load(f) return data """ Eliminates large groups of repeated frames """ def reduce( data ): cache = [] reduced = [] for frame in data: if frame != cache: cache = frame reduced.append(frame) return reduced """ Flattens data From [[[block1,block2,block3],[left,right]], ...] into [[block1,[left,right]], [block2,[left,right]] ] """ def flatten(data): flattened = [] for frame in data: for block in frame[0]: flattened.append([[block], frame[1]]) return flattened """ Extract features from raw data independant: tuple of features to extract from blocks each entry will build a list of those features # example cases: 'xy' should build list of [[x,y],[x,y]] # 'x' should build list of [[x],[x]] dependant: tuple of depandant variables to extract (motor values) # example cases: 'l' should build list of [[left],[left]] # example cases: 'lr' should build list of [[left,right],[left,right]] """ def extract(data, independant=(), dependant=(), dependant_match_num_blocks=False): # data structure we will return variables = {"independant":{}, "dependant":{}} # build empty lists for each feature for variable in independant: variables['independant'][variable] = [] for variable in dependant: variables['dependant'][variable] = [] # iterate over dataset, extracting all required features for sample in data: blocks = sample[0] motors = sample[1] # extract independant variables for block in blocks: for variable in independant: if len(variable) == 1: # example: 'x' should build list of [[x],[x]] variables['independant'][variable].append([block[blockFeatureToIndex(variable)]]) else: # example: 'xy' should build list of [[x,y],[x,y]] variables['independant'][variable].append([0]*len(variable)) for i, var in enumerate(variable): variables['independant'][variable][-1][i] = block[blockFeatureToIndex(var)] if dependant_match_num_blocks: repeat = len(blocks) else: repeat = 1 # extract dependant variables for variable in dependant: if len(variable) == 1: # example: 'l' should build list of [[l],[l]] for i in range(repeat): variables['dependant'][variable].append([motors[motorFeatureToIndex(variable)]]) else: for i in range(repeat): # example: 'lr' should build list of [[l,r],[l,r]] for i in range(len(blocks)): variables['dependant'][variable].append([0]*len(variable)) for i, var in enumerate(variable): variables['dependant'][variable][-1][i] = motors[motorFeatureToIndex(var)] return variables """ Converts block feature name to index returned by get_blocks API """ def blockFeatureToIndex(attr): if not attr in ['t','s','x','y','w','h']: raise Exception('invalid attribute "'+str(attr)+'"') if attr == 't': return 0 # block type if attr == 's': return 1 # block signature (colour) if attr == 'x': return 2 # block x coord if attr == 'y': return 3 # block y coord if attr == 'w': return 4 # block w width if attr == 'h': return 5 # block h height if attr == 'l': return 0 # left motor value if attr == 'r': return 1 # right motor value """ Converts motor name to index used """ def motorFeatureToIndex(attr): if not attr in ['l','r']: raise Exception('invalid attribute "'+str(attr)+'"') if attr == 'l': return 0 # left motor value if attr == 'r': return 1 # right motor value """ Take weighted average x,y for every sample in dataset """ def weightedAvg(data,keep_motors=False): start_time = time.time() newDataset = [] for frame in data: if keep_motors: newDataset.append([weightedAvgBlocks(frame[0]),frame[1]]) else: newDataset.append(weightedAvgBlocks(frame[0])) print("Took " + str(time.time() - start_time) + " seconds to compute") return newDataset """ Given the vector of blocks from pixy, return the weighted average x,y - taking into account block size """ def weightedAvgBlocks(blocks): totalX = 0 totalY = 0 totalWeight = 0 for block in blocks: x = block[2] y = block[3] w = block[4] h = block[5] weight = w*h totalX += x * weight totalY += y * weight totalWeight += weight if totalWeight == 0: totalWeight = 1 return [totalX/totalWeight, totalY/totalWeight] def toDataFrame(dataset): flattened = flatten(dataset) newData = [] for sample in flattened: newSample = [] newSample.append(sample[0][0][0]) # type newSample.append(sample[0][0][1]) # sig newSample.append(sample[0][0][2]) # x newSample.append(sample[0][0][3]) # y newSample.append(sample[0][0][4]) # w newSample.append(sample[0][0][5]) # h newSample.append(sample[1][0]) # left newSample.append(sample[1][1]) # right newData.append(newSample) df = pd.DataFrame(newData) df.columns = ["type","sig","x","y","w","h","left","right"] return df def toDataFrameWeighted(weighted_avg_dataset): newData = [] for sample in weighted_avg_dataset: newSample = [] newSample.append(sample[0][0]) # weighted x newSample.append(sample[0][1]) # weighted y newSample.append(sample[1][0]) # left newSample.append(sample[1][1]) # right newData.append(newSample) df = pd.DataFrame(newData) df.columns = ["x","y","left","right"] return df
true
7f0f66410133136707d71e99d19bc7bc6c5702bd
Python
aladdinpersson/Machine-Learning-Collection
/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py
UTF-8
5,524
3.296875
3
[ "MIT" ]
permissive
""" Example code of a simple RNN, GRU, LSTM on the MNIST dataset. Programmed by Aladdin Persson <aladdin.persson at hotmail dot com> * 2020-05-09 Initial coding * 2022-12-16 Updated with more detailed comments, docstrings to functions, and checked code still functions as intended. """ # Imports import torch import torch.nn.functional as F # Parameterless functions, like (some) activation functions import torchvision.datasets as datasets # Standard datasets import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation from torch import optim # For optimizers like SGD, Adam, etc. from torch import nn # All neural network modules from torch.utils.data import ( DataLoader, ) # Gives easier dataset managment by creating mini batches etc. from tqdm import tqdm # For a nice progress bar! # Set device device = "cuda" if torch.cuda.is_available() else "cpu" # Hyperparameters input_size = 28 hidden_size = 256 num_layers = 2 num_classes = 10 sequence_length = 28 learning_rate = 0.005 batch_size = 64 num_epochs = 3 # Recurrent neural network (many-to-one) class RNN(nn.Module): def __init__(self, input_size, hidden_size, num_layers, num_classes): super(RNN, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size * sequence_length, num_classes) def forward(self, x): # Set initial hidden and cell states h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Forward propagate LSTM out, _ = self.rnn(x, h0) out = out.reshape(out.shape[0], -1) # Decode the hidden state of the last time step out = self.fc(out) return out # Recurrent neural network with GRU (many-to-one) class RNN_GRU(nn.Module): def __init__(self, input_size, hidden_size, num_layers, num_classes): super(RNN_GRU, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size * sequence_length, num_classes) def forward(self, x): # Set initial hidden and cell states h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Forward propagate LSTM out, _ = self.gru(x, h0) out = out.reshape(out.shape[0], -1) # Decode the hidden state of the last time step out = self.fc(out) return out # Recurrent neural network with LSTM (many-to-one) class RNN_LSTM(nn.Module): def __init__(self, input_size, hidden_size, num_layers, num_classes): super(RNN_LSTM, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size * sequence_length, num_classes) def forward(self, x): # Set initial hidden and cell states h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Forward propagate LSTM out, _ = self.lstm( x, (h0, c0) ) # out: tensor of shape (batch_size, seq_length, hidden_size) out = out.reshape(out.shape[0], -1) # Decode the hidden state of the last time step out = self.fc(out) return out # Load Data train_dataset = datasets.MNIST( root="dataset/", train=True, transform=transforms.ToTensor(), download=True ) test_dataset = datasets.MNIST( root="dataset/", train=False, transform=transforms.ToTensor(), download=True ) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) # Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM) model = RNN_LSTM(input_size, hidden_size, num_layers, num_classes).to(device) # Loss and optimizer criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate) # Train Network for epoch in range(num_epochs): for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): # Get data to cuda if possible data = data.to(device=device).squeeze(1) targets = targets.to(device=device) # forward scores = model(data) loss = criterion(scores, targets) # backward optimizer.zero_grad() loss.backward() # gradient descent update step/adam step optimizer.step() # Check accuracy on training & test to see how good our model def check_accuracy(loader, model): num_correct = 0 num_samples = 0 # Set model to eval model.eval() with torch.no_grad(): for x, y in loader: x = x.to(device=device).squeeze(1) y = y.to(device=device) scores = model(x) _, predictions = scores.max(1) num_correct += (predictions == y).sum() num_samples += predictions.size(0) # Toggle model back to train model.train() return num_correct / num_samples print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}") print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}")
true
78a64fd6315dfa7c80d27997473dc0f8e1594842
Python
SCaicedo99/Sorting-Algorithms
/insertionSort.py
UTF-8
414
4.3125
4
[]
no_license
# This sorting algorithm sorts the given array from highest to lowest, its running time is o(n^2), def insertionSort(alist): for index in range(1,len(alist)): currentvalue = alist[index] position = index while position>0 and alist[position-1]>currentvalue: position = position-1 alist[position]=currentvalue alist = [54,26,93,17,77,31,44,55,20] insertionSort(alist) print(alist)
true
5045621bacc64bb1fcf97362fd208e2855d0109b
Python
AidenSmith09/Python
/Day17_File_and_exception/alice.py
UTF-8
766
3.421875
3
[]
no_license
# /bin/etc/env Python # -*- coding: utf-8 -*- filename2 = "alice2.txt" try: with open(filename2) as f_obj: contents2 = f_obj.read() except FileNotFoundError: print("没有" + filename2 + "文件") def count_words(filename): try: with open(filename) as file_obj: contents = file_obj.read() except FileNotFoundError: print("没有" + filename + "文件") # 如果什么都不想提示 次行改成 pass else: words = contents.split() num_words = len(words) print("这个 " + filename + " 中有 " + str(num_words) + " 单词") filename = 'alice.txt' count_words(filename) filename = ['alice.txt', 'siddhartha.txt','moby_dick.txt'] for filename in filename: count_words(filename)
true
d02b6d551607e63e489c991105ce9ada9cfbee62
Python
yunsseong/Make_Python_Game
/Make_Python_Game/Dig_Mines_And_Save_Lives/Dig_Mines_And_Save_Lives.py
UTF-8
8,185
2.90625
3
[]
no_license
#지뢰찾기 게임 import random import copy import sys global GAME_MODE GAME_MODE = "DEBUG" # 게임에 추가되면 좋을 것 같은 기능 # 이전으로 한턴 돌리기 기능(UNDO) # 로그인 기능 # 게임 이어하기 #역할 : 맵을 터미널에 출력하는 함수 def print_map(*map_obj): if GAME_MODE == "NOMAL": for i in map_obj: for j in i: print(j, end = " ") print("") elif GAME_MODE == "DEBUG": for i in range(size_of_map): print(" ".join(map(str,mine_map_show[i])) + " " + " ".join(map(str, (mine_map_sol[i])))) #역할 : 정해진 타입만 받도록하는 함수 #추후 수정사항 : 여러개의 인수를 넣는 경우도 보호해야할 필요가 있음 def safe_int_input(print_str): input_tmp = input(print_str) if input_tmp.isdigit() == True: return int(input_tmp) else: print("Error : You should input digit number") safe_int_input(print_str) #역할 : 디버그 모드일 경우 기본맵과 기본지뢰 갯수대로 맵 속성을 설정 def set_map_property(): global size_of_map, num_of_mine if GAME_MODE == "NOMAL": print_str = "Input size of map : " size_of_map = safe_int_input(print_str) print_str = "Input number of mines : " num_of_mine = safe_int_input(print_str) elif GAME_MODE == "DEBUG": size_of_map, num_of_mine = 10,10 def MINE_MAP_GEN(): #빈 지뢰 맵 생성 set_map_property() map_horiz = [] empty_mine_map = [] for i in range(size_of_map): for j in range(size_of_map): map_horiz.append("#") empty_mine_map.append(map_horiz) map_horiz = [] global mine_map_show, mine_planted_map mine_map_show = copy.deepcopy(empty_mine_map) mine_planted_map = copy.deepcopy(empty_mine_map) #빈 맵에 랜덤으로 지뢰를 심기 cord =[] range_sizeOfMap = range(size_of_map) # 추후 수정사항 : 좌표 리스트 생성하는 부분 리스트 컴프리헨션으로 줄여보기 for i in range_sizeOfMap: for j in range_sizeOfMap: cord.append([i, j]) global ran_mine ran_mine = random.sample(cord, num_of_mine) for i in ran_mine: mine_planted_map[i[0]][i[1]] = "*" global mine_map_sol mine_map_sol = copy.deepcopy(mine_planted_map) #정답 맵 생성 cnt = 0 for i in range(size_of_map): for j in range(size_of_map): if mine_map_sol[i][j] != "*": for a in [-1, 0, 1]: for b in [-1, 0, 1]: if i+a >= 0 and j+b >= 0 and i+a < size_of_map and j+b < size_of_map and abs(a)+abs(b)!=0: if mine_map_sol[i+a][j+b] == "*": cnt +=1 mine_map_sol[i][j]=cnt cnt=0 #어떤 상황에서 게임이 끝났는지 결과 변수에 담아서 각 상황별로 분류해서 멘트 출력, 함수 단일화 def game_end(RES): Bad_ment = "You dig the mine, We will remember your sacrifice" Happy_ment = "You saved many lives, Well done" Forgive_ment = "We still have Mines to dig... So come back ASAP" print_str = {"BAD" : Bad_ment, "HAPPY" : Happy_ment, "FORGIVE" : Forgive_ment}.get(RES) print(print_str) MAIN_MENU() #추후 수정 사항 : 0을 선택했을때 그 주변 0과 숫자를 알려주는 기능 추가, 처음 누르는 부분을 0으로 하고 이걸 기준으로 맵을 생성(이유 : 처음 클릭한 곳이 지뢰인 것을 방지하기 위해) def zero(y, x): cord_zero = [] for a in [-1, 0, 1]: for b in [-1, 0, 1]: if y+a >= 0 and x+b >= 0 and y+a < size_of_map and x+b < size_of_map: if mine_map_sol[y+a][x+b] == 0: cord_zero.append([y+a,x+b]) mine_map_show[y+a][x+b] = mine_map_sol[y+a][x+b] else: mine_map_show[y+a][x+b] = mine_map_sol[y+a][x+b] for y,x in cord_zero: zero(y,x) def dig(y, x): if mine_map_sol[y][x] == "*": print_map(mine_map_sol) game_end("BAD") elif mine_map_sol[y][x] == 0: zero(y,x) print_map(mine_map_show) ask_command() else: mine_map_show[y][x] = mine_map_sol[y][x] print_map(mine_map_show) ask_command() #버그 : flag 함수가 실행안되고 dig만 계속 실행되는 버그 def flag(y, x): if mine_map_show[y][x].isdigit(): print("Error : You can't raise flag here") ask_command() elif mine_map_show[y][x] == "^": mine_map_show[y][x] == "#" else: mine_map_show[y][x] = "^" print_map(mine_map_show) ask_command() #역할 : 사전에 정해놓은 명령어 형식에 맞도록 입력을 제한하는 함수 def safe_input_commend(num_params, print_str, params_type): input_tmp = input(print_str).split() if num_params == len(input_tmp): for i in range(num_params): if params_type[i] == "STR": if input_tmp[i].isalpha(): pass else: print("Error : Your commend something wrong") safe_input_commend(num_params, print_str, params_type) elif params_type[i] == "INT": if input_tmp[i].isdigit(): input_tmp[i] = int(input_tmp) else: print("Error : Your commend something wrong") safe_input_commend(num_params, print_str, params_type) else: print("Error : Something wrong in number of commend parameters") safe_input_commend(num_params, print_str, params_type) return input_tmp # 추후 수정사항 : 입력받고 다 소문자로 변환할 것, 좌표값 입력받는 것도 자연수 값만 받도록 조정할 것 def ask_command(): user_commend = input("Dig(D) Flag(F) End(E) : ").split() if user_commend[0] in ["d", "D", "dig", "f", "F", "flag"]: if len(user_commend) == 3 and user_commend[0] not in ["e", "E"]: com, x, y = user_commend[0], int(user_commend[1])-1, int(user_commend[2])-1 func_list = [dig(y,x), flag(y,x)] func_num = {"d": 0,"D": 0, "dig" : 0, "flag" : 1, "f": 1,"F": 1}.get(com) #func_list[func_num](y, x) act_func = func_list[1] act_func(y,x) map_check() else: print("Error : You have to input x, y collectly") ask_command() elif user_commend[0] in ["e","E"]: game_end("FORGIVE") else: print("Error : There is no such option") ask_command() #추후 수정사항 : check는 한 턴이 끝날때 마다 자동으로 하다록 변경 def map_check(): cnt_flag = 0 cnt_hash = 0 for i in mine_map_show: for j in i: if j == "^": cnt_flag+=1 if j == "#": cnt_hash+=1 if cnt_flag == num_of_mine and cnt_hash == 0: for i in ran_mine: if mine_map_show[i[0]][i[1]] != "^": ask_command() game_end("HAPPY") else: ask_command() def MAIN_MENU(): user_dicision = input("Play(P) Setting(S) Exit(E) : ") if user_dicision in ["P", "p"]: print("Welcome to mine clearing squad, Your mission is find Mine and Save lives. Good luck!") MINE_MAP_GEN() PLAY() elif user_dicision in ["S", "s"]: MAIN_MENU() elif user_dicision in ["E", "e"]: print("Okay, See you later Captine") sys.exit() elif user_dicision in ["DEBUG"]: GAME_MODE = "DEBUG" PLAY() else: print("Error : There is no such option") MAIN_MENU() def START(): print("Dig Mines And Save Lives") MAIN_MENU() def PLAY(): print_map(mine_map_show) ask_command() START()
true
29bf8382a08807f2f535c82184744eab7d9795e5
Python
SacredArrow/Secondary_structure_public
/Scripts/counter.py
UTF-8
197
3.28125
3
[]
no_license
#counts number of sequences in File import sys file = sys.argv[1] f = open(file, "r") cnt = 0 for line in f: if line[0] == "#": # 3 lines with "#" per sequence cnt+=1 print(cnt, cnt/3)
true
df6263b8f433ff07e06e4ff33862b339746bcdff
Python
sravanrao2/python_coding
/tictactoeFinal.py
UTF-8
4,563
2.9375
3
[]
no_license
from tkinter import * import tkinter.messagebox ttt = Tk() ttt.title("Tic Tac Toe Game") plyrx = StringVar() plyry = StringVar() bclk = True count = 0 def disableBtn(): button1.configure(state=DISABLED) button2.configure(state=DISABLED) button3.configure(state=DISABLED) button4.configure(state=DISABLED) button5.configure(state=DISABLED) button6.configure(state=DISABLED) button7.configure(state=DISABLED) button8.configure(state=DISABLED) button9.configure(state=DISABLED) def btnClk(btn): global bclk, count, plyrx, plyry if btn['text'] == " " and bclk == True: btn.configure(text="X") bclk = False plyrx = " X Wins!" plyry = " O Wins!" chkWin() count += 1 elif btn['text'] == " " and bclk == False: btn.configure(text="O") bclk = True chkWin() count += 1 else: tkinter.messagebox.showinfo("Tic-Tac-Toe", "Button already Choosed!") def chkWin(): if (button1['text'] == button2['text'] and button1['text'] == button3['text'] and button1['text'] == 'X' or button4['text'] == button5['text'] and button4['text'] == button6['text'] and button4['text'] == 'X' or button7['text'] == button8['text'] and button7['text'] == button9['text'] and button7['text'] == 'X' or button1['text'] == button4['text'] and button1['text'] == button7['text'] and button1['text'] == 'X' or button2['text'] == button5['text'] and button2['text'] == button8['text'] and button2['text'] == 'X' or button3['text'] == button6['text'] and button3['text'] == button9['text'] and button3['text'] == 'X' or button1['text'] == button5['text'] and button1['text'] == button9['text'] and button1['text'] == 'X' or button7['text'] == button5['text'] and button7['text'] == button3['text'] and button7['text'] == 'X' ): disableBtn() tkinter.messagebox.showinfo("Tic-Tac-Toe Game", plyrx) elif (button1['text'] == button2['text'] and button1['text'] == button3['text'] and button1['text'] == 'O' or button4['text'] == button5['text'] and button4['text'] == button6['text'] and button4['text'] == 'O' or button7['text'] == button8['text'] and button7['text'] == button9['text'] and button7['text'] == 'O' or button1['text'] == button4['text'] and button1['text'] == button7['text'] and button1['text'] == 'O' or button2['text'] == button5['text'] and button2['text'] == button8['text'] and button2['text'] == 'O' or button3['text'] == button6['text'] and button3['text'] == button9['text'] and button3['text'] == 'O' or button1['text'] == button5['text'] and button1['text'] == button9['text'] and button1['text'] == 'O' or button7['text'] == button5['text'] and button7['text'] == button3['text'] and button7['text'] == 'O' ): disableBtn() tkinter.messagebox.showinfo("Tic-Tac-Toe Game", plyry) elif count == 8: tkinter.messagebox.showinfo("Tic-Tac-Toe Game", "Tie Game") buttons = StringVar() button1 = Button(ttt, text=' ', font='Times 10 bold', bg='white', fg='green', height=4, width=8, command=lambda: btnClk(button1)) button1.grid(row=0, column=0) button2 = Button(ttt, text=' ', font='Times 10 bold', bg='white', fg='green', height=4, width=8, command=lambda: btnClk(button2)) button2.grid(row=0, column=1) button3 = Button(ttt, text=' ',font='Times 10 bold', bg='white', fg='green', height=4, width=8, command=lambda: btnClk(button3)) button3.grid(row=0, column=2) button4 = Button(ttt, text=' ', font='Times 10 bold', bg='white', fg='green', height=4, width=8, command=lambda: btnClk(button4)) button4.grid(row=1, column=0) button5 = Button(ttt, text=' ', font='Times 10 bold', bg='white', fg='green', height=4, width=8, command=lambda: btnClk(button5)) button5.grid(row=1, column=1) button6 = Button(ttt, text=' ', font='Times 10 bold', bg='white', fg='green', height=4, width=8, command=lambda: btnClk(button6)) button6.grid(row=1, column=2) button7 = Button(ttt, text=' ', font='Times 10 bold', bg='white', fg='green', height=4, width=8, command=lambda: btnClk(button7)) button7.grid(row=2, column=0) button8 = Button(ttt, text=' ', font='Times 10 bold', bg='white', fg='green', height=4, width=8, command=lambda: btnClk(button8)) button8.grid(row=2, column=1) button9 = Button(ttt, text=' ', font='Times 10 bold', bg='white', fg='green', height=4, width=8, command=lambda: btnClk(button9)) button9.grid(row=2, column=2) ttt.mainloop()
true
12860f4dae95a35bcdc1acecc2668d63ecffe8bc
Python
lemaoliu/LeetCode_Python_Accepted
/121_Best_Time_to_Buy_and_Sell_Stock.py
UTF-8
449
3.359375
3
[]
no_license
# 2015-03-30 Runtime: 97 ms class Solution: # @param prices, a list of integer # @return an integer def maxProfit(self, prices): # find the max diff, the larger number must come after the smaller number if not prices: return 0 minPrice, diff = 10**10, -10**10 for price in prices: minPrice = min(minPrice, price) diff = max(diff, price - minPrice) return diff
true
da1e0dfa801e2550370fb286bfe3ee69889df2c8
Python
santhoshbabu4546/GUVI-9
/Player/set13/del_string.py
UTF-8
104
2.984375
3
[]
no_license
a1 = input().split() a2 = input().split() for i in a2: a1.remove(i) print(' '.join(map(str,a1)))
true
74cc92d001468450218d75667bf79ee7fd8781b7
Python
vlad988/Day1
/15.py
UTF-8
321
3.140625
3
[]
no_license
a = int(input('Введіть ціле число ')) suma =(a*(a+1))/2 print(suma) import datetime def printTimeStamp(name): print('Автор програми:Негоденко ' + name) print('Час компіляції: ' + str(datetime.datetime.now())) printTimeStamp('Nehodenko and Neskoromny')
true