blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ff332087983e813cef12d7a5b5119a7604b4f2d8
|
Python
|
sadhanaauti93/End_To_End-testing
|
/PutUsres.py
|
UTF-8
| 520
| 2.59375
| 3
|
[] |
no_license
|
import requests
import json
import jsonpath
#API URL
url = "https://reqres.in/api/users/2"
# Read Input Json File
file = open('F:\\CreateUser.Json', 'r')
json_input = file.read()
request_json = json.loads(json_input)
# Make Put request with json input body
response = requests.put(url, request_json)
# validating response code
assert response.status_code == 200
# Parse response to json formate
response_json = json.loads(response.text)
updated_li = jsonpath.jsonpath(response_json, 'updatedAt')
print(updated_li)
| true
|
b658a383fce3b41638e13383741c86b5c3398118
|
Python
|
tnoumar/esp32-libraries
|
/esp32_si1145.py
|
UTF-8
| 2,553
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
"""
MicroPython driver for SI1145 light I2C sensor, low memory version :
https://github.com/neliogodoi/MicroPython-SI1145
Version: 0.3.0 @ 2018/04/02
"""
import time
from ustruct import unpack
class SI1145(object):
def __init__(self, i2c=None, addr=0x60):
if i2c is None:
raise ValueError('An I2C object is required.')
self._i2c = i2c
self._addr = addr
self._reset()
self._load_calibration()
def _read8(self, register):
return unpack('B', self._i2c.readfrom_mem(self._addr, register, 1))[0] & 0xFF
def _read16(self, register, little_endian=True):
result = unpack('BB', self._i2c.readfrom_mem(self._addr, register, 2))
result = ((result[1] << 8) | (result[0] & 0xFF))
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
return result
def _write8(self, register, value):
value = value & 0xFF
self._i2c.writeto_mem(self._addr, register, bytes([value]))
def _reset(self):
self._write8(0x08, 0x00)
self._write8(0x09, 0x00)
self._write8(0x04, 0x00)
self._write8(0x05, 0x00)
self._write8(0x06, 0x00)
self._write8(0x03, 0x00)
self._write8(0x21, 0xFF)
self._write8(0x18, 0x01)
time.sleep(.01)
self._write8(0x07, 0x17)
time.sleep(.01)
def _write_param(self, parameter, value):
self._write8(0x17, value)
self._write8(0x18, parameter | 0xA0)
return self._read8(0x2E)
def _load_calibration(self):
self._write8(0x13, 0x7B)
self._write8(0x14, 0x6B)
self._write8(0x15, 0x01)
self._write8(0x16, 0x00)
self._write_param( 0x01, 0x80 | 0x40 | 0x20 | 0x10 | 0x01)
self._write8(0x03, 0x01)
self._write8(0x04, 0x01)
self._i2c.writeto_mem(0x60, 0x0F, b'0x03')
self._write_param(0x07, 0x03)
self._write_param(0x02, 0x01)
self._write_param(0x0B, 0)
self._write_param(0x0A, 0x70)
self._write_param(0x0C, 0x20 | 0x04)
self._write_param(0x0E, 0x00)
self._write_param(0x1E, 0)
self._write_param(0x1D, 0x70)
self._write_param(0x1F, 0x20)
self._write_param(0x11, 0)
self._write_param(0x10, 0x70)
self._write_param(0x12, 0x20)
self._write8(0x08, 0xFF)
self._write8(0x18, 0x0F)
@property
def read_uv(self):
return self._read16(0x2C, little_endian=True) / 100
@property
def read_visible(self):
return self._read16(0x22, little_endian=True)
@property
def read_ir(self):
return self._read16(0x24, little_endian=True)
@property
def read_prox(self):
return self._read16(0x26, little_endian=True)
| true
|
fe63fa2d776386903d17a3d4232859c2f03c5571
|
Python
|
7jdope8/Bitcoin_Bruters_Toolkit
|
/SEEDPRIV/Fullkit.py
|
UTF-8
| 1,184
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
# This program takes a word file of mnemomics and calculates a new priv key and address from each line of the .txt file
# Work in Progress ... do we even need the (uncompressed) public key, just check the address? quicker
# > python fullkit.py # defaults to seedlist.txt for input
# Printing to screen is time consuming - comment out prints if serious
from bitcoin import privtopub, pubtoaddr, sha256
with open("seedlist.txt", "r") as ourfile, open("addresses.txt", "a+") as adds, open("addr_priv.txt", "a+") as addpriv: # Open our txt file to read from, file to write to, and 3rd file is only used if we hit a match and need to reference the priv key
for line in ourfile:
YourSeed = line
priv= sha256(YourSeed)
print("PRIVATE key: " + priv)
print (" *** never share your private key ***")
pub = privtopub(priv)
print("public key: " + pub)
address = pubtoaddr(pub)
print("address: " + address)
adds.write(address + "\n")
addpriv.write(address+","+priv + "\n")
print ("________________________________________________")
ourfile.close() # Close our txt file
| true
|
0700a5d0077add75fabaecf1d682b403577393ec
|
Python
|
dadinux/elia_teaching
|
/python/compito_info/Compito_201215/esercizio1.py
|
UTF-8
| 985
| 3.890625
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
#
#
def ciclo_while_somma():
i = 0 # i assumerà la funzione del numero da elevare al quadrato
# dunque sarà i = 0, i = 1, i = 2, ... fino a che sarà UGUALE a num
# ad ogni ciclo, il valore sarà incrementato di uno
isomma = 0
while (i <= 10 ):
# ================== blocco da eseguire =========================
# fino a che (while) viene VERIFICATA la condizione indicata
# nella parentesi dopo "while" ovvero:
# "i è MINORE O UGUALE A "10" ?"
#
# Se la risposta è SI allora ESEGUI il blocco
# Se la risposta è NO allora ESCI dal ciclo.
#
# QUESTA ISTRUZIONE CALCOLA LA SOMMA
isomma = isomma + i
# QUESTA ISTRUZIONE "STAMPA" A VIDEO IL RISULTATO DEL QUADRATO
print(str(isomma))
# QUESTA ISTRUZIONE INCREMENTA IL CONTATORE i DI UN'UNITA'
i = i + 1
ciclo_while_somma()
| true
|
cbf724de29769efc7f00235430174aa81adaa756
|
Python
|
HybridRbt/RoboND-Perception-Ex2
|
/RANSAC.py
|
UTF-8
| 2,305
| 2.921875
| 3
|
[] |
no_license
|
# Import PCL module
import pcl
# Load Point Cloud file
#cloud = pcl.load_XYZRGB('tabletop.pcd')
def voxel_downsampling(pcl_data):
# Voxel Grid filtering
# Create a VoxelGrid filter object for out input point cloud
vox = pcl_data.make_voxel_grid_filter()
# choose a voxel (leaf) size
LEAF_SIZE = 0.01
# Set voxel size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# call the filter funciton to obtain the resultant downsampled point cloud
pcl_filtered = vox.filter()
return pcl_filtered
def passthrough_filtering(pcl_data):
# PassThrough filtering
# Create a PassThrough filter objects
passthrough = pcl_data.make_passthrough_filter()
# Assign axis and range to the passthrough filter objects
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
# set the limits
axis_min = 0.6 # this retains the table and the objects
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
# Finally, use the filter function to obtain the resultant point cloud
pcl_filtered = passthrough.filter()
return pcl_filtered
def plane_fitting(pcl_data):
# RANSAC plane segmentation
# Create the segmentation object
seg = pcl_data.make_segmenter()
# Set the model you wish to filter
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Note: in lesson 3-15, the quizz for this number claims it's 0.01
# but in that case the front of the table will show, and will keep showing
# until increased to 0.034. but in this case the bottom of the bowl will be
# cut. Need to figure out which number to take.
max_distance = 0.035 # this leaves only the table
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
return inliers
def extract_inliers(inliers, pcl_data):
# Extract inliers
extracted_inliers = pcl_data.extract(inliers, negative=False)
return extracted_inliers
def extract_outliers(inliers, pcl_data):
# Extract outliers
extracted_outliers = pcl_data.extract(inliers, negative=True)
return extracted_outliers
| true
|
d9145e5868cc60975d24b7d226a8ebeba2926c70
|
Python
|
Mcvriez/small_projects
|
/fxcmSwaps/src/fxcmconnector.py
|
UTF-8
| 1,752
| 2.609375
| 3
|
[] |
no_license
|
import fxcmpy
class FXCMConnector:
def __init__(self, token, log_level='', log_file='', verbose=False):
self.token = token
self.log_level = log_level
self.log_file = log_file
self.connect = fxcmpy.fxcmpy(token, log_level=log_level, log_file=log_file)
self.instruments = self.connect.get_instruments()
self.subscription_step = 10
self.verbose = verbose
self.swaps = {}
def _reconnect(self):
self.connect.close()
if self.verbose: print('reconnecting..')
self.connect = fxcmpy.fxcmpy(self.token, log_level=self.log_level, log_file=self.log_file)
def _subscribe(self, inst_list):
for i in inst_list:
ret = self.connect.subscribe_instrument(i)
if self.verbose: print(f'subscribing {i}: {ret}')
def _unsubscribe(self, inst_list):
for i in inst_list:
ret = self.connect.unsubscribe_instrument(i)
if self.verbose: print(f'unsubscribing {i}: {ret}')
def _get_swap_update(self):
if self.verbose: print('getting offers')
offer = self.connect.get_offers(kind='list')
if self.verbose: print(offer)
for item in offer:
if item['currency'] not in self.swaps:
self.swaps[item['currency']] = [item['rollB'], item['rollS']]
def get_all_swaps(self):
while len(self.swaps) < len(self.instruments):
self._get_swap_update()
done = list(self.swaps.keys())
self._unsubscribe(done[-self.subscription_step:])
remains = list(set(self.instruments) - set(done))
self._subscribe(remains[:self.subscription_step])
self._reconnect()
self.connect.close()
| true
|
332a69c282238caa370409fbeb92641d060da2a7
|
Python
|
kmpatzke/theGoatProblem
|
/Manuell/door.py
|
UTF-8
| 373
| 3.4375
| 3
|
[] |
no_license
|
class door:
def __init__(self, number, price ):
self.__number = number
self.__price = price
def getNumber(self):
return self.__number
def getPrice(self):
return self.__price
def openDoor(self):
print("Door #{} opened. It is ..... a {}.".format(self.__number, self.__price))
| true
|
d6d6bce9310f5e26b9240e9649af8ca0f3d62ad6
|
Python
|
karist7/Python-study
|
/1장/1장9번.py
|
UTF-8
| 274
| 3.609375
| 4
|
[] |
no_license
|
import turtle
t=turtle.Turtle()
t.shape("turtle")
t.up()
t.goto(-90,0)
t.down()
t.circle(100)
t.up()
t.goto(90,0)
t.down()
t.circle(100)
t.up()
t.goto(270,0)
t.down()
t.circle(100)
t.up()
t.goto(0,-150)
t.down()
t.circle(100)
t.up()
t.goto(200,-150)
t.down()
t.circle(100)
| true
|
4902215e80dbdfa2bfe9a49d3a2e3d291736f06e
|
Python
|
wickywaka/stereo
|
/cm3stereo/calib_rect/rectify_preview.py
|
UTF-8
| 1,362
| 2.71875
| 3
|
[] |
no_license
|
# This program undistort and rectify two images
import numpy
import cv2
import io
import picamera
undistortion_map_left = numpy.load('maps/undistortion_map_left.npy')
rectification_map_left = numpy.load('maps/rectification_map_left.npy')
undistortion_map_right = numpy.load('maps/undistortion_map_right.npy')
rectification_map_right = numpy.load('maps/rectification_map_right.npy')
#left_image = cv2.imread('test_images/left_02.jpg')
#right_image = cv2.imread('test_images/right_02.jpg')
stream = io.BytesIO()
index = 0
with picamera.PiCamera(stereo_mode = 'side-by-side') as camera:
camera.resolution = (1280,480)
camera.vflip = True
while True:
print(index)
camera.capture(stream, format = 'jpeg', use_video_port = True)
buff = numpy.fromstring(stream.getvalue(), dtype = numpy.uint8)
image = cv2.imdecode(buff, 1)
#imgL = image[:,0:640]
#imgR = image[:,640:1280]
image[:,0:640] = cv2.remap(image[:,0:640], undistortion_map_left, rectification_map_left, cv2.INTER_NEAREST)
image[:,640:1280] = cv2.remap(image[:,640:1280], undistortion_map_right, rectification_map_right, cv2.INTER_NEAREST)
#cv2.imshow('left', left)
#cv2.imshow('right', right)
cv2.imshow('rectified Stereo', image)
cv2.waitKey(1)
index= index+1
stream.seek(0)
| true
|
d209ab8760b935fec05f315e91221a14751f8afb
|
Python
|
konman2/Calcmass
|
/calcmass/mass.py
|
UTF-8
| 4,063
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
from calcmass.pt_data import masses
val = ""
multiples = {}
def add_commas(orig):
with_Commas = ""
for i in range(len(orig) - 1):
with_Commas += orig[i]
if orig[i + 1].isupper():
with_Commas += ","
with_Commas += orig[-1]
with_Commas += ','
return with_Commas
# returns the complete coeficcient given the starting number
def find_num(i, val):
if val[i].isalpha() or val[i] == ',':
return val[i]
count = i
while(count < len(val) and not val[count].isalpha() and val[count] != ","):
count += 1
if count >= len(val):
return val[i:]
return val[i:count]
# distributes the parameter 'num' to all the numbers in the string
def distribute(val, num):
build = ""
sub = 1
for i in range(len(val)):
if not val[i].isalpha():
sub = float(find_num(i, val))
sub *= num
build += str(sub)
i += len(find_num(i, val)) - 1
elif i+1 >= len(val) or (val[i+1].isupper()):
sub = 1*num
build += val[i]
build += str(sub)
i += len(find_num(i, val))
sub = 1
else:
build += val[i]
return build
# distributes the number through parenthesis
def distrib_parenth(val):
between = ""
op = val.find('(')
num = 1
while(op != -1):
close = val.find(')', op)
between = val[op+1:close]
if close+1 < len(val) and not val[close+1].isalpha() and val[close+1] != '':
num = float(val[close+1])
val = val[:op] + distribute(between, num) + val[close+2:]
else:
val = val[:op] + distribute(between, num) + val[close+1:]
op = val.find('(', op+1)
num = 1
return val
# adds comma and parenthesis markers so rest of the functions can work
def add_markers(val):
if "[" in val and "]" in val:
val = val.replace("[","(")
val = val.replace("]",")")
val = distrib_parenth(val)
if ',' not in val:
val = add_commas(val)
return val
# adds coefficients and symbol to the multiples dictionary
def add(mult, symb):
com = symb.find(",")
if com >= 0:
symb = symb[:com]
if symb not in multiples:
multiples[symb] = mult
else:
multiples[symb] += mult
# returns a string with no coefficients and builds the multiples
# dictionary
def strip_coeff(val):
build = ""
symb = ""
coeff = False
i = 0
while i < len(val):
if not val[i].isalpha() and val[i] != ',':
num = float(find_num(i, val))
add(num, symb)
coeff = True
if val[i] == ',':
if val[i].isalpha() or val[i] == ',':
symb += val[i]
if not coeff:
add(1,symb)
build += symb
symb = ""
coeff = False
elif val[i].isalpha():
symb += val[i]
i += len(find_num(i, val))
if len(build) == 0:
return ""
return build
def calc_single_element(val):
count = 0
num = float(masses[val][:len(masses[val]) - 3].strip())
if val in multiples:
count += num * multiples[val]
else:
count += num
return count
# returns a mass if all elements are real
# otherwise returns the name of the offending element
def calculate(comp):
build = ""
count = 0
finished = []
for i in range(len(comp)):
if comp[i] == ",":
if build in masses and build not in finished:
count += calc_single_element(build)
finished.append(build)
elif build not in masses:
return build
build = ""
elif i == len(comp) - 1:
build += comp[i]
if build in masses and build not in finished:
count += calc_single_element(build)
finished.append(build)
elif build not in masses:
return build
else:
build += comp[i]
return count
| true
|
ceeffc11545f3127b8eeb553846c23bec891d81d
|
Python
|
GGreenfield/maze_ai
|
/Cell.py
|
UTF-8
| 748
| 3.625
| 4
|
[] |
no_license
|
import numpy as np
class Cell:
"""A Cell object represents a 1x1px space on a maze which is not a wall,
i.e. a valid space to be considered in the solution"""
wall_dic = {"N": "S", "S": "N", "W": "E", "E": "W"}
def __init__(self, x, y):
self.x = x
self.y = y
self.visited = False
self.walls = {"N": True, "S": True, "W": True, "E": True}
self.fScore = np.inf
def __str__(self):
return "Cell: (" + str(self.x) + "," + str(self.y) + ")"
def __lt__(self, other):
return self.fScore < other.fScore
def remove_wall(self, other, wall):
"""Removes a wall between two cells"""
self.walls[wall] = False
other.walls[Cell.wall_dic[wall]] = False
| true
|
4300fb19708d69837bd418655c445baee02f0edd
|
Python
|
gva-jjoyce/gva_data
|
/tests/test_display.py
|
UTF-8
| 1,555
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
Tests for paths to ensure the split and join methods
of paths return the expected values for various
stimulus.
"""
import datetime
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from gva.data.formats import display
try:
from rich import traceback
traceback.install()
except ImportError: # pragma: no cover
pass
def test_to_html():
ds = [
{'key': 1, 'value': 'one', 'plus1': 2},
{'key': 2, 'value': 'two', 'plus1': 3},
{'key': 3, 'value': 'three', 'plus1': 4},
{'key': 4, 'value': 'four', 'plus1': 5}
]
html = display.html_table(ds)
# are the headers there
assert "<th>key<th>" in html
assert "<th>value<th>" in html
assert "<th>plus1<th>" in html
# test for some of the values
assert "<td>one<td>" in html
assert "<td>1<td>" in html
assert "<td>5<td>" in html
def test_to_ascii():
ds = [
{'key': 1, 'value': 'one', 'plus1': 2},
{'key': 2, 'value': 'two', 'plus1': 3},
{'key': 3, 'value': 'three', 'plus1': 4},
{'key': 4, 'value': 'four', 'plus1': 5}
]
axki = display.ascii_table(ds)
print(axki)
# are the headers there
assert " key " in axki
assert " value " in axki
assert " plus1 " in axki
# test for some of the values
assert " one " in axki
assert " 1 " in axki
assert " 5 " in axki
if __name__ == "__main__":
test_to_html()
test_to_ascii()
print('okay')
| true
|
8310f36219a5b9eff3962e5e00b3f22039d449d9
|
Python
|
franciscoalbear/proyecto_productos
|
/productos.py
|
UTF-8
| 3,348
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Tkinter import *
import CRUD
def ventana_principal():
root = Tk()
root.title("Abarrotes")
root.geometry("400x400")
frame = Frame(root)
frame.pack(padx = 30,pady = 30)
label = Label(frame, text="Tienda de Abarrotes",fg = "blue",font=("Arial",18))
label.pack(padx = 10,pady = 20)
b1 = Button(frame,text="Agregar un artículo",command=ventana_agregar)
b1.pack(padx = 10,pady = 10)
b2 = Button(frame,text="Buscar una artículo")
b2.pack(padx = 10,pady = 10)
b3 = Button(frame,text="Eliminar un artículo",command=ventana_eliminar)
b3.pack(padx = 10,pady = 10)
b4 = Button(frame,text="Actualizar un artículo")
b4.pack(padx = 10,pady = 10)
b5 = Button(frame,text="Acerca de")
b5.pack(padx = 10,pady = 10)
b6 = Button(frame,text="Salir",command=root.destroy)
b6.pack(padx = 10,pady = 10)
root.mainloop()
def display_eliminarnombre():
dn = Tk()
dn.title("Eliminar por nombre")
dn.geometry("300x100")
label = Label(dn,text="Escriba el nombre del producto a eliminar").pack()
enom = Entry(dn,textvariable = 1)
enom.pack()
bn = Button(dn,text="Eliminar",command=lambda:CRUD.eliminar(Entry.get(enom)))
bn.pack()
dn.mainloop()
def display_eliminarcodigo():
dn = Tk()
dn.title("Eliminar por codigo")
dn.geometry("300x100")
label = Label(dn,text="Escriba el código del producto a eliminar").pack()
ecodigo = Entry(dn).pack()
bc = Button(dn,text="Eliminar")
bc.pack()
dn.mainloop()
def display_eliminarid():
dn = Tk()
dn.title("Eliminar por id")
dn.geometry("300x100")
label = Label(dn,text="Escriba el id del producto a eliminar").pack()
eid = Entry(dn).pack()
bi = Button(dn,text="Eliminar")
bi.pack()
dn.mainloop()
def display_message():
m = Tk()
m.geometry("90x200")
label = Label(m,text="Se ha realizado la operación con éxito")
m.mainloop()
def ventana_agregar():
v2 = Tk()
v2.title("Agregar productos")
v2.geometry("400x400")
v2.focus_set()
v2.grab_set()
#v2.transient(master=root)
f = Frame(v2)
f.pack()
label = Label(v2, text="Agregar productos",fg = "blue",font=("Arial",18))
label.pack(padx = 10,pady = 20)
lnombre = Label(v2,text="Nombre:").pack()
enombre = Entry(v2,textvariable = 1)
enombre.pack()
lmarca = Label(v2,text="Marca:").pack()
emarca = Entry(v2,textvariable = 2)
emarca.pack()
lcosto = Label(v2,text="Costo:").pack()
ecosto = Entry(v2,textvariable = 3)
ecosto.pack()
lcodigo = Label(v2,text="Código:").pack()
ecodigo = Entry(v2,textvariable = 4)
ecodigo.pack()
b1v2 = Button(v2,text="Agregar",command=lambda:CRUD.registrar(Entry.get(enombre),Entry.get(emarca),Entry.get(ecosto),Entry.get(ecodigo)) or display_message)
b1v2.pack(padx=30,pady=0)
b2v2 = Button(v2,text="Atrás",command=v2.destroy).pack(padx=30,pady=0)
v2.mainloop()
def ventana_eliminar():
v3 = Tk()
v3.title("Eliminar productos")
v3.geometry("400x400")
v3.focus_set()
v3.grab_set()
f = Frame(v3).pack()
label = Label(v3,text="Eliminar producto",fg="blue",font=("Arial",18)).pack(padx = 10,pady = 20)
b1 = Button(v3,text="Por nombre",command=display_eliminarnombre)
b1.pack(padx=10,pady=20)
b2 = Button(v3,text="Por Código",command=display_eliminarcodigo)
b2.pack(padx=10,pady=20)
b3 = Button(v3,text="Por id",command=display_eliminarid)
b3.pack(padx=10,pady=20)
v3.mainloop()
ventana_principal()
| true
|
ec59d053b66b75451460b36f0ab5b34da4a1ab40
|
Python
|
CarlosGiovannyG/Curso_Python
|
/Modulos/modulos.py
|
UTF-8
| 1,304
| 4.09375
| 4
|
[] |
no_license
|
"""
MODULO: es un archivo con extención .py o .pyc (PYTHON COMPILADO), es un modulo que posee
su propio espácio de nombres osea que contiene su propio contexto; el
cual puede contener variables, funciones,clases o incluso otros modulos
PARA QUE SIRVEN?: sirven para organizar mejor el código y poder reutilizarlo mejor.
Lo anterior viene ligado a dos principios que son: LA MODULIZACIÓN Y REUTILIZACIÓN.
De esta manera podemos hacer el código mas mantenible y podemos reutilizar y
estará mas organizado
"""
"""Para poder hacer uso de un modulo primero lo debemos importar con la palabra reservada IMPORT
ademas al usarla debemos ombrar el archivo donde se encuentra y a traves de un punto
nombramos la funcion o clase que vamos a usar"""
import Funciones_matematicas
print(Funciones_matematicas.sumar(5, 6))
print(Funciones_matematicas.multiplicar(5, 6))
print(" ")
print(" ")
"""Para hacerlo mas simple usamos la palabra reservada FROM e indicamos
la ruta (carpeta.carpeta2.archivo IMPORT) donde se encuentra el archivo
que vamos a usar y luego la palabra IMPORT y luego la función o clase a usar"""
from curso.Modulos.Funciones_matematicas import sumar,multiplicar
print(sumar(5, 6))
print(multiplicar(5, 6))
print(" ")
print(" ")
| true
|
60d5bc8588bd494bb05f8bb58516a10c8c160e41
|
Python
|
Dressro/PythonStudy
|
/Python02/crawling/instargram/image.py
|
UTF-8
| 271
| 2.671875
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import requests
tag = input("search tag : ")
url = 'http://www.instagram.com/explore/tags/' + tag
resp = requests.get(url)
soup = BeautifulSoup(resp.text,'html.parser')
print(soup.find('div',{'class','KL4Bh'}))
| true
|
f6cea77996c3e727fd796874c7e6e99a4b7c4445
|
Python
|
ReardenMetals/csc-manager
|
/ui/update_widget.py
|
UTF-8
| 1,384
| 3.0625
| 3
|
[] |
no_license
|
import tkinter
from tkinter import messagebox
from tkinter.ttk import Progressbar
from controller.update_controller import UpdateController
class UpdateWidget:
def __init__(self, root):
last_coin_frame = tkinter.Frame(root, pady=15)
tkinter.Label(last_coin_frame, text="Enter the last good coin id").pack()
self.last_coin_entry = tkinter.Entry(last_coin_frame)
self.last_coin_entry.pack()
last_coin_frame.pack()
btn_frame = tkinter.Frame(root, pady=15)
update_btn = tkinter.Button(btn_frame, text="Update", width=20, height=2)
update_btn.config(command=self.on_update_clicked)
update_btn.pack()
btn_frame.pack()
# Progress bar widget
progress_frame = tkinter.Frame(root)
self.progress = Progressbar(progress_frame, orient=tkinter.HORIZONTAL, length=100, mode='indeterminate')
self.progress.pack()
progress_frame.pack()
self.update_controller = UpdateController(self, root)
def on_update_clicked(self):
last_good_coin = self.last_coin_entry.get()
print("Update clicked: " + last_good_coin)
self.progress['value'] = 100
self.update_controller.update(last_good_coin)
def show_success(self):
self.progress['value'] = 0
messagebox.showinfo("Generate success", "Crypto successfully generated!")
| true
|
7773c82b02c10be14848faa6780c17f16aba2c44
|
Python
|
neil444/nd
|
/weight.py
|
UTF-8
| 282
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
import plotly.figure_factory as ff
import pandas as pd
import csv
df = pd.read_csv("c:/Users/ADI/Downloads/Normal-Distribution-master (1)/Normal-Distribution-master/data.csv")
fig = ff.create_distplot([df["Weight(Pounds)"].tolist()], ["Weight"], show_hist=False)
fig.show()
| true
|
0061246a333c2205778381614fb1e041e9081200
|
Python
|
MauroVA98/DAPOS
|
/src/atmos/nrlmsise00/IndexFindr/solarflux_process/data_extract.py
|
UTF-8
| 1,525
| 3
| 3
|
[] |
no_license
|
import pandas as pd
import os
import datetime as dt
class DataImport(object):
def __init__(self, datafile: str = r'\\'.join(os.getcwd().split('\\')[:-4]) + '\\data\\nrlmsise00_data\\SolarFlux_Indices\\nlrmsise00_f107data.txt'):
self.__datafile = datafile
self.__data = None
self.__import()
self.__parse()
def return_data(self):
return self.__data
def __import(self):
with open(self.__datafile, 'r') as datafile:
lines = datafile.readlines()
self.__data = lines
@staticmethod
def __stringsplitter(string: str):
result = ''
for char in string[8:]:
if char == " " or char == '\n':
continue
else:
result += char
return result
def __parse(self):
dates = []
solar_index = []
for row in self.__data:
length = len(row)
date = dt.date(int(row[:8][0:4]),int(row[:8][4:6]),int(row[:8][6:8])).isoformat()
dates.append(date)
if length == 9 or ' .' in row:
solar_index.append(None)
else:
solar_index.append(float(self.__stringsplitter(row)))
data = {
'date': dates,
'Solar Index': solar_index
}
self.__data = pd.DataFrame.from_dict(data)
if __name__ == "__main__":
DI = DataImport()
a = DI.return_data()
| true
|
112f68e92dcd1d577b2b2fa0faea80cff5c6c6e5
|
Python
|
hmarmal/Ch.07_Graphics
|
/7.2_Picasso.py
|
UTF-8
| 3,078
| 3.390625
| 3
|
[] |
no_license
|
'''
PICASSO PROJECT
---------------
Your job is to make a cool picture.
You must use multiple colors.
You must have a coherent picture. No abstract art with random shapes.
You must use multiple types of graphic functions (e.g. circles, rectangles, lines, etc.)
Somewhere you must include a WHILE or FOR loop to create a repeating pattern.
Do not just redraw the same thing in the same location 10 times.
You can contain multiple drawing commands in a loop, so you can draw multiple train cars for example.
Please use comments and blank lines to make it easy to follow your program.
If you have 5 lines that draw a robot, group them together with blank lines above and below.
Then add a comment at the top telling the reader what you are drawing.
IN THE WINDOW TITLE PLEASE PUT YOUR NAME.
When you are finished Pull Request your file to your instructor.
'''
import arcade
arcade.open_window(600,600,"Malsawmthara Hmar")
arcade.set_background_color(arcade.color.WHITE_SMOKE)
arcade.start_render()
x_offset=0
y_offset=0
for i in range(30):
x_offset+=20
arcade.draw_line(x_offset,0,x_offset,600,arcade.color.GENERIC_VIRIDIAN,6) #draw the vertical lines
for i in range(30):
y_offset+=20
arcade.draw_line(0,y_offset,600,y_offset,arcade.color.GENERIC_VIRIDIAN,6) #draw the horizontal lines
arcade.draw_circle_filled(300,120,160,arcade.color.GO_GREEN,) #draw the circle
arcade.draw_rectangle_filled(300,10,110,20,arcade.color.WHITE)
arcade.draw_rectangle_outline(300,10,110,20,arcade.color.BLACK,5) #midle bottom block
arcade.draw_rectangle_filled(318,10,10,20,arcade.color.WHITE)
arcade.draw_rectangle_outline(318,10,10,20,arcade.color.BLACK,5) #little block on the right
arcade.draw_rectangle_filled(282,10,10,20,arcade.color.WHITE)
arcade.draw_rectangle_outline(282,10,10,20,arcade.color.BLACK,5) #little block on the left
arcade.draw_rectangle_filled(300,125,44,210,arcade.color.WHITE)
arcade.draw_rectangle_outline(300,125,44,210,arcade.color.BLACK,5) #biggest block behind
arcade.draw_rectangle_filled(300,45,90,50,arcade.color.WHITE)
arcade.draw_rectangle_outline(300,45,90,50,arcade.color.BLACK,5) #big middle block
arcade.draw_rectangle_filled(280,100,30,160,arcade.color.WHITE)
arcade.draw_rectangle_outline(280,100,30,160,arcade.color.BLACK,5) #left block at front
arcade.draw_rectangle_filled(320,100,30,160,arcade.color.WHITE)
arcade.draw_rectangle_outline(320,100,30,160,arcade.color.BLACK,5) #right block at front
arcade.draw_rectangle_filled(281,200,20,40,arcade.color.WHITE)
arcade.draw_rectangle_outline(281,200,20,40,arcade.color.BLACK,5) #left block at front
arcade.draw_rectangle_filled(319,200,20,40,arcade.color.WHITE)
arcade.draw_rectangle_outline(319,200,20,40,arcade.color.BLACK,5) #Right block at front
arcade.draw_rectangle_filled(300,235,28,10,arcade.color.WHITE)
arcade.draw_rectangle_outline(300,235,28,10,arcade.color.BLACK,5) #Top block
arcade.draw_line(300,240,300,280,arcade.color.BLACK,5) #top line
arcade.draw_text("$",227,360,arcade.color.GOLD,200) #dollar sign
arcade.finish_render()
arcade.run()
| true
|
b70b13dc2142b72a482c5a5bc80234950c3c5eda
|
Python
|
dhatuker/for-pkl
|
/db/NewsparserDatabaseHandler.py
|
UTF-8
| 4,854
| 2.546875
| 3
|
[] |
no_license
|
import configparser
import logging
import records
class NewsparserDatabaseHandler(object):
_instance = None
_db = None
_host = None
_port = None
_user = None
_pass = None
_dbname = None
logger = None
def getInstance(_host, _port, _user, _pass, _dbname):
return NewsparserDatabaseHandler(_host, _port, _user, _pass, _dbname)
def __init__(self, _host, _port, _user, _pass, _dbname):
self._host = _host
self._port = _port
self._user = _user
self._pass = _pass
self._dbname = _dbname
self.logger = logging.getLogger()
self.connect()
NewsparserDatabaseHandler._instance = self
def setLogger(self, logger):
self.logger = logger
def connect(self):
# try:
self.logger.debug('connecting to MySQL database...')
conn_string = 'mysql://{}:{}/{}?user={}&password={}&charset=utf8mb4'. \
format(self._host, self._port, self._dbname, self._user, self._pass)
self.logger.debug(conn_string)
self._db = records.Database(conn_string)
rs = self._db.query('SELECT VERSION() as ver', fetchall=True)
if len(rs) > 0:
db_version = rs[0].ver
# except sqlalchemy.exc.OperationalError as error:
# self.logger.info('Error: connection not established {}'.format(error))
NewsparserDatabaseHandler._instance = None
# else:
self.logger.debug('connection established: {}'.format(db_version))
@staticmethod
def instantiate_from_configparser(cfg, logger):
if isinstance(cfg, configparser.ConfigParser):
dbhandler = NewsparserDatabaseHandler.getInstance(cfg.get('Database', 'host'),
cfg.get('Database', 'port'),
cfg.get('Database', 'username'),
cfg.get('Database', 'password'),
cfg.get('Database', 'dbname'))
dbhandler.setLogger(logger)
return dbhandler
else:
raise Exception('cfg is not an instance of configparser')
def insert_news(self, news_id, link, title, date, content):
sql = """REPLACE INTO news_content (news_id, link, title, date, content)
VALUES (:news_id, :link, :title, :date, :content)"""
rs = self._db.query(sql, news_id=news_id, link=link, title=title, date=date, content=content)
return rs
def get_source(self, input):
test = '%' + input + '%'
sql = """SELECT * FROM news_source WHERE link LIKE :test"""
rs = self._db.query(sql, test=test)
return rs
def get_article(self, input):
time = input + '%'
sql = """SELECT * FROM news_content WHERE date LIKE :time"""
rs = self._db.query(sql, time=time)
return rs
def insert_prepro(self, date, news_num, news_word):
sql = """REPLACE INTO news_prepro
(date, news_num, news_word)
VALUES (:date, :news_num, :news_word)"""
rs = self._db.query(sql, date=date, news_num=news_num, news_word=news_word)
return rs
def get_prepro(self, input):
time = input + '%'
sql = """SELECT * FROM news_prepro WHERE date LIKE :time"""
rs = self._db.query(sql, time=time)
return rs
def insert_newstopic(self, document_no, dominant_topic, topic_perc, keywords, text, time):
sql = """REPLACE INTO news_topic
(document_no, dominant_topic, topic_perc_contrib, keywords, text, date)
VALUES (:document_no, :dominant_topic, :topic_perc, :keywords, :text, :time)"""
rs = self._db.query(sql, document_no=document_no, dominant_topic=dominant_topic,
topic_perc=topic_perc, keywords=keywords, text=text, time=time)
return rs
def insert_newsdominant(self, dominant_topic, topic_keywords, num_doc, perc_doc, time):
sql = """REPLACE INTO news_dominant_topic
(dominant_topic, topic_keywords, num_doc, perc_doc, date)
VALUES (:dominant_topic, :topic_keywords, :num_doc, :perc_doc, :time)"""
rs = self._db.query(sql, dominant_topic=dominant_topic, topic_keywords=topic_keywords,
num_doc=num_doc, perc_doc=perc_doc, time=time)
return rs
def insert_newsrepre(self, topic_num, topik_perc, keyword, text, time):
sql = """REPLACE INTO news_repre_doc
(Topic_Num, Topic_Perc_Contrib, Keywords, Text, date)
VALUES (:topic_num, :topik_perc, :keyword, :text, :time)"""
rs = self._db.query(sql, topic_num=topic_num, topik_perc=topik_perc,
keyword=keyword, text=text, time=time)
return rs
| true
|
31fdda66962d9e3566ee666667ab724ccef2ef1d
|
Python
|
yasinshaw/leetcode
|
/src/n71.py
|
UTF-8
| 429
| 2.96875
| 3
|
[] |
no_license
|
#
# @lc app=leetcode.cn id=71 lang=python3
#
# [71] 简化路径
#
# @lc code=start
class Solution:
def simplifyPath(self, path: str) -> str:
arr = path.split("/")
stack = []
for s in arr:
if s == "..":
if stack:
stack.pop()
elif s and s != ".":
stack.append(s)
return "/" + "/".join(stack)
# @lc code=end
| true
|
d2bdfac1780205874bc6d30cace6983ee9b0f8ad
|
Python
|
zakf/cython_talk
|
/ex3.py
|
UTF-8
| 2,202
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
# File ex3.py
#
# Author: Zak Fallows (zakf@mit.edu)
# Copyright 2013
# Released for free use under the terms of the MIT License, see license.txt
#
# Demonstrates the speed of Cython.
import time
import ex3_u
import ex3_t
#============================= Interpreted Python =============================#
def inner_ipy(seed, factor):
intermediate = seed * factor
return intermediate % 278351
def middle_ipy(seed, n):
sum = seed + 34
for iii in range(n):
sum += inner_ipy(seed, iii)
seed += 61
if sum > 94217452:
sum %= 621943
if seed > 6129435:
seed %= 84125
return sum
def outer_ipy(seed, n0, n1):
sum = 0
for iii in range(n0):
curr_seed = (seed + iii) % 6943
sum += middle_ipy(curr_seed, n1)
if sum > 7245103:
sum %= 22581
return sum
#================================= Test Speed =================================#
def print_results(mode, t0, t1, dt_base=None, result=None):
dt_curr = t1 - t0
print "%s:" % mode
if result:
print " Result = %s" % result
print " Time: %s seconds" % dt_curr
if dt_base != None:
print " Ratio: %.3f" % (dt_base / dt_curr)
print ''
return dt_curr
def run_tests():
seed = 73
n0 = 1000
n1 = 10000
t0 = time.time()
r0 = outer_ipy(seed, n0, n1)
t1 = time.time()
dt_ipy = print_results("Interpreted Python", t0, t1, result=r0)
t0 = time.time()
r0 = ex3_u.outer_u(seed, n0, n1)
t1 = time.time()
dt_u = print_results("Untyped Cython", t0, t1, dt_ipy, r0)
t0 = time.time()
r0 = ex3_t.outer_tpy(seed, n0, n1)
t1 = time.time()
dt_tpy = print_results("Typed Cython, Python Functions", t0, t1, dt_ipy, r0)
t0 = time.time()
r0 = ex3_t.outer_tpy2(seed, n0, n1)
t1 = time.time()
dt_tpy2 = print_results("Typed Cython, Python Functions II", t0, t1,
dt_ipy, r0)
t0 = time.time()
r0 = ex3_t.outer_tc(seed, n0, n1)
t1 = time.time()
dt_tc = print_results("Typed Cython, C Functions", t0, t1, dt_ipy, r0)
# Do the benchmarks on import, for convenience:
run_tests()
| true
|
016d350e0129d434b188f62701dcb35300f1ca32
|
Python
|
Dan-Teles/URI_JUDGE
|
/1873 - Pedra-papel-tesoura-lagarto-Spock.py
|
UTF-8
| 1,494
| 3.0625
| 3
|
[] |
no_license
|
n = int(input())
for i in range(n):
a, b = map(str, input().split())
if a == 'papel' and b == 'pedra':
print('rajesh')
elif a == 'pedra' and b == 'papel':
print('sheldon')
elif a == 'tesoura' and b == 'papel':
print('rajesh')
elif a == 'papel' and b == 'tesoura':
print('sheldon')
elif a == 'pedra' and b == 'lagarto':
print('rajesh')
elif a == 'lagarto' and b == 'pedra':
print('sheldon')
elif a == 'lagarto' and b == 'spock':
print('rajesh')
elif a == 'spock' and b == 'lagarto':
print('sheldon')
elif a == 'spock' and b == 'tesoura':
print('rajesh')
elif a == 'tesoura' and b == 'spock':
print('sheldon')
elif a == 'tesoura' and b == 'lagarto':
print('rajesh')
elif a == 'lagarto' and b == 'tesoura':
print('sheldon')
elif a == 'lagarto' and b == 'papel':
print('rajesh')
elif a == 'papel' and b == 'lagarto':
print('sheldon')
elif a == 'papel' and b == 'spock':
print('rajesh')
elif a == 'spock' and b == 'papel':
print('sheldon')
elif a == 'spock' and b == 'pedra':
print('rajesh')
elif a == 'pedra' and b == 'spock':
print('sheldon')
elif a == 'pedra' and b == 'tesoura':
print('rajesh')
elif a == 'tesoura' and b == 'pedra':
print('sheldon')
else:
print('empate')
| true
|
3968326009ad6d6735b646a47428b87628cda79b
|
Python
|
trinhgliedt/Algo_Practice
|
/2021_01_15_HackerRank_Roblox_assessment.py
|
UTF-8
| 2,388
| 2.984375
| 3
|
[] |
no_license
|
from collections import deque
import itertools
import collections
# https://www.hackerrank.com/challenges/climbing-the-leaderboard/problem
def numPlayers(k, scores):
l = len(scores)
if len(scores) > 0:
lowest = scores[0]
else:
lowest = -1
rank = []
numOfPlayers = 0
if len(rank) == l:
return numOfPlayers
for i in range(1, len(scores)):
if scores[i] < lowest:
lowest = scores[i]
# count number of lowest
count = 0
newScores = []
for s in scores:
if s == lowest:
count += 1
if s > lowest:
newScores.append(s)
for n in range(count):
rank.append(len(rank)+1)
countPlayers = 0
for r in rank:
if r >= k:
countPlayers += 1
for i in range(len(scores)):
lowest = min(lowest, numPlayers(k, newScores))
def numPlayers(k, scores):
count = collections.Counter(scores)
ans, curRank = 0, 1
for k, v in sorted(count.items(), reverse=True):
if curRank > k:
break
ans += v
curRank += v
return ans
# print(numPlayers(3, [100, 50, 50, 25]))
# print(numPlayers(4, [20, 40, 60, 80, 10]))
# https: // leetcode.com/problems/least-number-of-unique-integers-after-k-removals/
def deleteProducts(ids, m):
counts = sorted([(i, ids.count(i)) for i in set(ids)], key=lambda x: x[1])
num = len(counts)
for item in counts:
if (m - item[1]) >= 0:
num -= 1
m -= item[1]
else:
break
return num
# https://leetcode.com/discuss/interview-question/221639/
def finMinDistance(h, w, n):
arr = []
for i in range(h):
for j in range(w):
arr.append((i, j, 0))
ans = float("inf")
for points in itertools.combinations(arr, n):
q = deque([])
visited = set()
for m, n, dist in points:
q.append((m, n, dist))
visited.add((m, n))
distAns = 0
distArr = []
while q:
i, j, dist = q.popleft()
distAns = max(dist, distAns)
for x, y in ((i+1, j), (i-1, j), (i, j+1), (i, j-1)):
if 0 <= x < h and 0 <= y < w and (x, y) not in visited:
q.append((x, y, dist+1))
visited.add((x, y))
ans = min(distAns, ans)
return ans
| true
|
ba30e461d4ce515b9dae212c13b32d43816456f6
|
Python
|
alvas-education-foundation/anagha_iyengar
|
/coding_solutions/23rd may solution.py
|
UTF-8
| 282
| 3.359375
| 3
|
[] |
no_license
|
PROGRAM 1
/* WriteaCProgram toDisplayfirstNTriangularNumbers(WhereNisreadfrom the
Keyboard)*
#include<stdio.h>
voidtriangular_series(intn)
{
for(inti=1;i<=n;i++)
printf("%d",i*(i+1)/2);
}
intmain()
{
intn;
printf("Entervalueforn");
scanf("%d",&n);
triangular_series(n);
return0;
}
| true
|
a479130b9dec2472bceefe0a114f7d6d5430375e
|
Python
|
songzy12/LeetCode
|
/python/42.trapping-rain-water.py
|
UTF-8
| 692
| 3.265625
| 3
|
[] |
no_license
|
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if not height:
return 0
l, r = 0, len(height)-1
ans = lower = 0
level = min(height[l], height[r])
while l < r:
# count from lower side
if height[l] < height[r]:
l += 1
else:
r -= 1
lower = min(height[l], height[r])
# level is the safe height
if lower > level:
level = lower
ans += level - lower
return ans
height = [0,1,0,2,1,0,1,3,2,1,2,1]
print Solution().trap(height)
| true
|
0c71b2f7bd7161c0d2748139e99774010b579cea
|
Python
|
kruthikakalmali/Solving-Scene-Understanding-for-Autonomous-Navigation-in-Unstructured-Environments-FY-PROJECT-2021
|
/UTILITY/domain_adaptation/source/core/bdds.py
|
UTF-8
| 1,918
| 2.53125
| 3
|
[] |
no_license
|
from pathlib import Path
import pandas as pd
import argparse
from tqdm import tqdm
import shutil
import os
#print("here")
parser = argparse.ArgumentParser()
parser.add_argument("datadir",help="path to dataset")
parser.add_argument("savedir",help="path to save directory")
#print(parser.parse_args())
### test ###
#dd = Path('/raid/datasets/SemanticSegmentation/domain_adaptation/bdds/')
#lbl = '/raid/datasets/SemanticSegmentation/domain_adaptation/bdds/labels/train/0004a4c0-d4dff0ad_train_id.png'
#
############/home/cvit/rohit/autoneu/github/public-code/domain_adaptation/source/core
def getImg(lbl,dd):
'''
returns corresponding image to a label, specific to bdd
'''
osfx = '_train_id.png'
nsfx = '.jpg'
return dd/f'images/train/{lbl.name.replace(osfx,nsfx)}'
def prepBDD(dd,sd):
assert dd.exists() , f'dataset directory doesn\'t exist'
d_strat = pd.read_csv('./domain_adaptation/source/core/csvs/stratified_bdds.csv',header=None)
strp = '/raid/datasets/SemanticSegmentation/domain_adaptation/bdds/'
lbls = sd/'BDD/labels'
imgs = sd/'BDD/images'
#lbls.mkdir(exist_ok=True)
#imgs.mkdir(exist_ok=True)
if not os.path.exists(lbls):
os.makedirs(lbls)
if not os.path.exists(imgs):
os.makedirs(imgs)
for lbl in tqdm(list(d_strat[0])):
#print("dd",dd)
#if str(dd)[-1] != "/": dd = str(dd) + "/"
if str(dd)[-1] != "/": lbl = Path(lbl.replace(strp,str(dd)+"/"))
else: lbl = Path(lbl.replace(strp,str(dd)))
img = getImg(lbl,dd)
#assert img.exists() and lbl.exists() , 'invalid files picked up'
shutil.copy(lbl,lbls/f'{lbl.name}')
shutil.copy(img,imgs/f'{img.name}')
if __name__ == "__main__":
#print("here1")
args = parser.parse_args()
dd = Path(args.datadir)
sd = Path(args.savedir)
#print(f'collecting BDDS from {dd} into {sd}')
prepBDD(dd,sd)
| true
|
375f977f3f6e6fdf57750b2ea96185024b1dd34c
|
Python
|
hanlin16/spider_service
|
/com/unif/pedily/ObtainPeDailyInfo.py
|
UTF-8
| 4,581
| 2.546875
| 3
|
[] |
no_license
|
# coding:utf-8
import re # 正则表达式
from bs4 import BeautifulSoup
from com.unif.util.LogUtil import LogUtil
logger = LogUtil.get_logger('ObtainPeDailyInfo')
class ObtainPeDailyInfo:
def __init__(self):
logger.info("初始化:ObtainPeDailyInfo")
# 获取标题
def find_title(self, data):
soup = BeautifulSoup(data, 'html.parser', from_encoding='utf-8')
title_info = soup.find_all('div', class_='main final-content')
if title_info is None:
return '无题'
if len(title_info) == 0:
return '无题'
title = title_info[0].attrs['data-title']
if title is None:
return '无题'
result = eval(repr(title).replace('\\', ''))
result = eval(repr(result).replace('/', ''))
result = eval(repr(result).replace('*', ''))
result = eval(repr(result).replace('?', ''))
result = eval(repr(result).replace('>', ''))
result = eval(repr(result).replace('<', ''))
result = eval(repr(result).replace('|', ''))
result = eval(repr(result).replace(',', ''))
result = eval(repr(result).replace('"', ''))
result = eval(repr(result).replace('.', ''))
result = re.sub('\s+', ' ', result).strip()
return result
# 获取分页列表
def find_pages(self, data):
begin = data.find(r'<li data-special')
end = self.find_last(data, begin, r'<div class="page-list page">')
context = data[begin:end]
return context
# 查找最后一处位置
def find_last(self, string, begin, str):
last_position = begin
while True:
position = string.find(str, last_position + 1)
if position == -1:
return last_position
last_position = position
# 获取文章摘要
def find_subject(self, data):
text = ''
soup = BeautifulSoup(data, 'html.parser', from_encoding='utf-8')
subject = soup.find_all('div', class_='subject')
if len(subject) >= 1:
text = str(subject[0].string)
return text
# 获取文章内容
def find_context(self, data):
text = ''
soup = BeautifulSoup(data, 'html.parser', from_encoding='utf-8')
content = soup.find_all('div', class_='news-content')
if len(content) >= 1:
content = str(content[0])
text = str(content)
return text
# 文章发布时间
def find_time(self, data):
public_time = ''
soup = BeautifulSoup(data, 'html.parser', from_encoding='utf-8')
subject = soup.find_all('span', class_='date')
if len(subject) >= 1:
public_time = subject[0].string
return public_time + ':00'
# 获得文章_图片地址
def find_page_info(self, html):
result = {}
soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')
list = soup.find_all('div', class_='img')
length = len(list) # 计算集合的个数
for i in range(length):
content = list[i]
img_soup = BeautifulSoup(str(content), 'html.parser')
sub_img = img_soup.find_all('img')
url_soup = BeautifulSoup(str(content), 'html.parser')
sub_url = url_soup.find_all('a')
if len(sub_url) == 0:
break
if sub_img is None:
result[sub_url[0].attrs['href']] = ''
continue
if len(sub_img) == 0:
result[sub_url[0].attrs['href']] = ''
continue
result[sub_url[0].attrs['href']] = sub_img[0].attrs['data-src']
return result
# 获得标签
def find_tags(self, html):
tag = ''
soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')
list = soup.find_all('div', class_='news-tag')
if list is None:
return ''
length = len(list) # 计算集合的个数
if length == 0:
return ''
data = list[0]
sub_soup = BeautifulSoup(str(data), 'html.parser')
tags = sub_soup.find_all('a')
if tags is None:
return ''
length = len(tags)
if length == 0:
return ''
for i in range(0, length):
if i == 0:
if not tags[i].string is None:
tag = tags[i].string
else:
if not tags[i].string is None:
tag = tag + ',' + tags[i].string
return tag
| true
|
37074f951b5adb2197a4818f0b505336ae465ad5
|
Python
|
rulkens/EosPython
|
/eos/server/tcp.py
|
UTF-8
| 1,998
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/python
# ===========================================================================
# default TCP socket server for communicating with the EOS from outside
# call this file directly or import main()
# ===========================================================================
import os
import logging
import json
import signal
import time
from eos.api.EOS_API import EOS_API
from tornado.ioloop import IOLoop
from tornado.tcpserver import TCPServer
# from tornaduv import UVLoop
# IOLoop.configure(UVLoop)
ERROR_API = {'error': 'API called with incorrect number of arguments'}
def handle_signal(sig, frame):
IOLoop.instance().add_callback(IOLoop.instance().stop)
class EchoServer(TCPServer):
def handle_stream(self, stream, address):
self._stream = stream
self._read_line()
def _read_line(self):
try:
self._stream.read_until('\n', self._handle_read)
except Exception, e:
print "Error while reading stream : %s" % e
def _handle_read(self, data):
try:
data = json.loads(data)
self._stream.write(json.dumps(act_on(data)))
except Exception, e:
print "Error in reading json data", e
self._read_line()
def act_on(data):
"""execute actions based on the data and return the status of the lamp after the action"""
# pipe straight to the API interface and return the result
try:
return EOS_API(data['action'], data['arguments'])
except:
return ERROR_API
def main():
"""main application entry point"""
logging.getLogger().setLevel(logging.DEBUG)
socket_port = int(os.getenv('EOS_TCP_PORT', 5154))
logging.info('listening on port %s' % socket_port)
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
server = EchoServer()
server.listen(socket_port)
IOLoop.instance().start()
IOLoop.instance().close()
if __name__ == "__main__":
main()
| true
|
8b299142e7ead51cfab93f1bd0c76e0bc1852f83
|
Python
|
zzelman/i3-projects
|
/i3-projects
|
UTF-8
| 7,888
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import subprocess
from pprint import pprint
import json
import time
import sys
"""Project Creation and Management for i3-wm.
Terminology
- project :: a collection of workspaces
- workspace :: a collection of applications on a single monitor
- move :: make an application go somewhere
- focus :: change what you are looking at
"""
version = '1.1.1'
def _ask_for_project_name() -> str:
"""Use a popup utility to ask the user for a project name.
The popup utility is zenity. See https://en.wikipedia.org/wiki/Zenity for more information.
Returns:
A project name.
"""
process = subprocess.Popen(['zenity', '--entry', '--title=i3', "--text=Project Name:"],
stdout=subprocess.PIPE)
out, err = process.communicate()
return out.decode('utf-8').replace('\n', '').replace('\r', '')
def _get_all_workspaces() -> list:
"""A helper function to get all workspaces.
Returns:
A list of all workspaces.
"""
process = subprocess.Popen(['i3-msg', '-t', 'get_workspaces'], stdout=subprocess.PIPE)
out, err = process.communicate()
return json.loads(out.decode('utf-8'))
def _get_current_project_name() -> str:
"""A helper function to get the project name of the currently focused workspace.
Returns:
The project name.
"""
workspaces = _get_all_workspaces()
# figure out the project name based on the current workspace
current_workspace = next(workspace for workspace in workspaces
if workspace['focused'] == True)
return current_workspace['name'].split(':')[0]
def focus_project(project_name: str) -> None:
"""Focuses a workspace on all xrandr outputs with the project name & output number.
Because of how i3 works, "focus workspace" means "create if not existing".
Args:
project_name: The name of the project
Returns:
None
"""
workspaces = _get_all_workspaces()
# figure out the current workspace num
current_project_name = _get_current_project_name()
current_workspace = next(workspace for workspace in workspaces
if workspace['focused'] == True)
current_workspace_name = current_workspace['name']
current_workspace_num = current_workspace_name[current_workspace_name.find(':') + 1:]
# for each xrandr output
unique_outputs = {workspace['output'] for workspace in workspaces}
for output in unique_outputs:
# find an already existing workspace for the output
target_workspace = next(workspace for workspace in workspaces
if workspace['output'] == output)
# move to that workspace
workspace_name = target_workspace['name']
subprocess.call(['i3-msg', 'workspace ', workspace_name])
# create a new workspace with the project name
workspace_num = workspace_name[workspace_name.find(':') + 1:]
subprocess.call(['i3-msg', 'workspace ', project_name + ':' + workspace_num])
# preserve the workspace number focus of before switching projects
subprocess.call(['i3-msg', 'workspace ', project_name + ':' + current_workspace_num])
def focus_workspace(workspace_num: str) -> None:
"""Change the focus to the requested workspace number INSIDE of the project.
Args:
workspace_num: Requested workspace number to move to.
Returns:
None
"""
project_name = _get_current_project_name()
# move to the requested workspace number
subprocess.call(['i3-msg', 'workspace ', project_name + ':' + workspace_num])
def move_to_workspace(workspace_num: str) -> None:
"""Move the focused application to the requested workspace number INSIDE of the project.
Args:
workspace_num: Requested workspace number to move to.
Returns:
None
"""
project_name = _get_current_project_name()
# move to the requested workspace number
subprocess.call(['i3-msg', 'move container to workspace ', project_name + ':' + workspace_num])
def move_to_project(project_name: str, workspace_num) -> None:
"""Move the focused application to the requested workspace number IN ANOTHER project.
Args:
project_name: The name of the project to move the application to.
workspace_num: The workspace number inside of the other project to move to.
Returns:
None
"""
subprocess.call(['i3-msg', 'move container to workspace ', project_name + ':' + workspace_num])
def show_help() -> None:
"""Show help text on stdout.
"""
help_msg = """\
i3-projects [command] [args]
Description:
This is a command line utility that interacts with i3-msg to make creation and management
of projects easier in i3-wm.
Commands:
focus_project [project_name]
Change the focus on all xrandr outputs to be a workspace in the requested project.
If project_name is not given, a gui popup will ask for it.
focus_workspace [workspace_num]
Change the focus to the requested workspace number in the current project.
move_to_workspace [workspace_num]
Move the focused application to the requested workspace number in the current project.
move_to_project [workspace_num]
Move the focused application to the requested workspace number on another project.
A gui popup will ask the user for the name of the project.
get_project_name
Prints the current workspace's project name.
Example .i3/config:
# focus or create a new project on all outputs
bindsym $mod+p exec i3-projects focus_project
# switch to workspace inside of current project
bindsym $mod+1 exec i3-projects focus_workspace 1
bindsym $mod+2 exec i3-projects focus_workspace 2
bindsym $mod+3 exec i3-projects focus_workspace 3
bindsym $mod+4 exec i3-projects focus_workspace 4
bindsym $mod+5 exec i3-projects focus_workspace 5
# move container to the current project's workspace
bindsym $mod+Shift+1 exec i3-projects move_to_workspace 1
bindsym $mod+Shift+2 exec i3-projects move_to_workspace 2
bindsym $mod+Shift+3 exec i3-projects move_to_workspace 3
bindsym $mod+Shift+4 exec i3-projects move_to_workspace 4
bindsym $mod+Shift+5 exec i3-projects move_to_workspace 5
# move container to another project's workspace
bindsym $mod+Ctrl+1 exec i3-projects move_to_project 1
bindsym $mod+Ctrl+2 exec i3-projects move_to_project 2
bindsym $mod+Ctrl+3 exec i3-projects move_to_project 3
bindsym $mod+Ctrl+4 exec i3-projects move_to_project 4
bindsym $mod+Ctrl+5 exec i3-projects move_to_project 5
# startup with a project called misc
exec --no-startup-id i3-projects focus_project misc
Version:
{}
Author:
Kyle Avrett <kyle dot avrett at gmail dot com>""".format(version)
print(help_msg)
def main() -> None:
"""Provide cli bindings to be used in .i3/config
Returns:
None
"""
if len(sys.argv) == 1:
show_help()
return
if sys.argv[1] == '--version':
print(version)
if sys.argv[1] == '-h' or \
sys.argv[1] == '--help':
show_help()
if sys.argv[1] == 'focus_project':
if len(sys.argv) >= 3:
project_name = sys.argv[2]
else:
project_name = _ask_for_project_name()
if project_name:
focus_project(project_name)
if sys.argv[1] == 'focus_workspace':
focus_workspace(sys.argv[2])
if sys.argv[1] == 'move_to_workspace':
move_to_workspace(sys.argv[2])
if sys.argv[1] == 'move_to_project':
project_name = _ask_for_project_name()
move_to_project(project_name, sys.argv[2])
if sys.argv[1] == 'get_project_name':
print(_get_current_project_name())
if __name__ == '__main__':
main()
| true
|
5c6e350d335669944b4e4bed9f8ec116b5b2d2cd
|
Python
|
lguerdan/Al-Gore-Rhythm
|
/DynamicRogramming/dynamic-stairs.py
|
UTF-8
| 292
| 3.90625
| 4
|
[] |
no_license
|
memo = [-1] * 50
def num_ways(stairs):
if stairs == 0:
return 0
elif stairs == 1:
return 1
elif stairs == 2:
return 2
if memo[stairs] == -1:
memo[stairs] = num_ways(stairs - 3) + num_ways(stairs - 2) + num_ways(stairs - 1)
return memo[stairs]
print num_ways(10)
| true
|
4540d0d17a339060d434dd3a014f1007d76375c7
|
Python
|
tribe01/Rosenthal
|
/plot.py
|
UTF-8
| 407
| 2.640625
| 3
|
[] |
no_license
|
import sys
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
data = np.genfromtxt('output_2D.csv', delimiter=',')
x = data[:,0]
y = data[:,1]
z = data[:,2]
x=np.unique(x)
y=np.unique(y)
X,Y=np.meshgrid(x,y)
Z=z.reshape(len(y),len(x))
HM=plt.pcolormesh(X,Y,Z)
HM.set_clim(vmin=1000, vmax=2000)
plt.title('Temperature Distribution (K)')
plt.colorbar()
plt.savefig("output.png")
plt.show()
| true
|
d7a46bfc822ee5a8e5f1665b4d8e22cb04d39e30
|
Python
|
coderZsq/coderZsq.practice.data
|
/study-notes/py-collection/02_turtle/04_凹.py
|
UTF-8
| 254
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
import turtle as t
t.forward(50)
t.right(90)
t.forward(50)
t.left(90)
t.forward(50)
t.left(90)
t.forward(50)
t.right(90)
t.forward(50)
t.right(90)
t.forward(100)
t.right(90)
t.forward(150)
t.right(90)
t.forward(100)
t.mainloop()
| true
|
34a3d063afe840eb6d33429ef0eba3c04fa4338e
|
Python
|
lucasw/timer_test
|
/scripts/timer_test.py
|
UTF-8
| 1,086
| 2.640625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env python
# Copyright 2020 Lucas Walter
import rospy
from std_msgs.msg import Float32
class TimerTest:
def __init__(self):
self.update_time_pub = rospy.Publisher("update_time", Float32, queue_size=10)
self.update_dt_pub = rospy.Publisher("update_dt", Float32, queue_size=10)
self.period = rospy.get_param("~period", 0.01)
self.cycles = rospy.get_param("~cycles", 1)
self.count = 0
self.last_real = None
self.timer = rospy.Timer(rospy.Duration(self.period / self.cycles), self.timer_callback)
def timer_callback(self, te):
self.count += 1
if self.count % self.cycles != 0:
return
self.update_time_pub.publish(Float32(te.current_real.to_sec()))
if self.last_real is not None:
msg = Float32()
msg.data = (te.current_real - self.last_real).to_sec()
self.update_dt_pub.publish(msg)
self.last_real = te.current_real
if __name__ == '__main__':
rospy.init_node("timer_test_py")
timer_test = TimerTest()
rospy.spin()
| true
|
227566fe30675886fe80f48e9820da82e9069220
|
Python
|
BenThienngern/WebbComscience
|
/problem10.py
|
UTF-8
| 627
| 3.625
| 4
|
[] |
no_license
|
# Euler Project problem 10,
# https://projecteuler.net/problem=10
# Find the sum of all the primes below two million.
# This is the find prime function I create in problem 7
def findPrime(start, end):
start = start + 3
theCount = 0
prime = [2]
for num in range(start, end):
# Can only do up to 10000 at a time because of the run time limit
check = True
for factor in prime:
if (num % factor) == 0:
check = False
if check == True:
prime.append(num)
return prime
# This is too much for my CPU !!!!!
print(sum(findPrime(0, 2000000)))
| true
|
acf2b5841b8f9f2f44e5fe39fbf9492834fc3bc7
|
Python
|
audoreven/IntroToPython
|
/main.py
|
UTF-8
| 810
| 3.875
| 4
|
[] |
no_license
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name} ') # Press Ctrl+F8 to toggle the breakpoint.
list1 = [x for x in range(1, 5)]
list2 = [2*x for x in list1]
list3 = [x for x in list2 if x < 4]
list3.append(1)
list3.sort()
print(list1)
print(list2)
print(list3)
sent = "Hello, my name is Audrey";
words = sent.split()
print(words)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| true
|
18a34fbd839ee51907f6d1176d9db1e77ee545f2
|
Python
|
djgroen/FabCovid19
|
/validation/validation_data_parser.py
|
UTF-8
| 2,479
| 2.640625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import pandas as pd
import os
def get_region_names():
path = '../config_files'
return os.listdir(path)
def get_validation_names():
path = 'raw_data'
d = os.listdir(path)
d = list(set(['_'.join(x.split('_')[:-1]) for x in d]))
return d
def update_validation_data(regions='all', force=False):
region_list = get_region_names()
if regions == 'all':
validation_region_list = get_validation_names()
else:
if isinstance(regions, list):
validation_region_list = regions
else:
validation_region_list = list([regions])
for region in validation_region_list:
if region not in region_list:
print('Region {} not found in config files...'.format(region))
else:
print('Region {} found in config files...'.format(region))
validation_path = '../config_files/' + region +'/covid_data/admissions.csv'
if os.path.exists(validation_path) and not force:
print('Validation data for {} already exists.'.format(region))
else:
print('Compiling validation data for {}...'.format(region))
ii = 1
data_path = 'raw_data/' + region + '_' + str(ii) + '.csv'
while (os.path.exists(data_path)):
data_path = 'raw_data/' + region + '_' + str(ii) + '.csv'
if ii == 1:
try:
df = pd.read_csv(data_path)
except:
print('Name format wrong')
df = df[['date', 'newAdmissions']]
else:
try:
ddf = pd.read_csv(data_path)
except:
break
df['newAdmissions_t'] = ddf['newAdmissions']
df['newAdmissions_t'] = df['newAdmissions_t'].fillna(0)
df['newAdmissions'] += df['newAdmissions_t']
df = df[['date', 'newAdmissions']]
ii += 1
df['date'] = pd.to_datetime(df['date'])
df['date'] = df['date'].dt.strftime('%d/%m/%Y')
df = df.rename(columns={'newAdmissions': 'admissions'})
df.to_csv(validation_path, index=False)
if __name__ == '__main__':
update_validation_data(force=True)
print('Done')
| true
|
e7de244ae9aa5ce7bda7d1f6407bf17c1f4a5967
|
Python
|
jongjunpark/TIL
|
/Public/problem/D1/2025.N줄덧셈.py
|
UTF-8
| 98
| 3.359375
| 3
|
[] |
no_license
|
inputs = int(input())
result = 0
for i in range(inputs):
result += (inputs - i)
print(result)
| true
|
1c2b63d68401c9b58439ae9227e43cbfda13922c
|
Python
|
davidrhmiller/zakim
|
/leads/highest_card_lead.py
|
UTF-8
| 667
| 3.078125
| 3
|
[] |
no_license
|
from cards import Card
from leads.lead_rule import LeadRule
class HighestCardLead(LeadRule):
'''West always leads their highest card, suit is tie-breaker.'''
def get_lead(cls, deal):
# This implementation relies on the cards in West's hand being reverse
# sorted by card id.
card_ids = deal.west.card_ids
best_pos = 0
best_rank = -1
for pos, card_id in enumerate(card_ids):
rank = Card.from_id(card_id).rank()
# Since this is strictly greater, and we encounter the higher valued
# suits first, we get the tiebreaker we want.
if rank > best_rank:
best_pos = pos
best_rank = rank
return best_pos
| true
|
7ed638ffb63ede1c00efb61f6ba4e6aee73cfc24
|
Python
|
bramgrooten/Hanabi
|
/unittests/test_hanabi_player.py
|
UTF-8
| 1,879
| 3
| 3
|
[] |
no_license
|
import unittest
from environment import HanabiPlayer, HanabiDeck, HanabiCard
from environment.utils.constants import Rank, Colors
class TestHanabiPlayer(unittest.TestCase):
def setUp(self) -> None:
self.deck = HanabiDeck(ranks=[Rank.ONE])
self.cards = self.deck.provide_hand(hand_size=6)
def test_init(self):
player = HanabiPlayer(player_id=0, cards=self.cards)
player.render()
def test_info_hidden(self):
player = HanabiPlayer(player_id=0, cards=self.cards)
player.render(can_see=False)
def test_info_color(self):
player = HanabiPlayer(player_id=0, cards=self.cards)
player.inform_color(Colors.BLACK)
player.render()
def test_info_rank(self):
player = HanabiPlayer(player_id=0, cards=self.cards)
player.inform_rank(Rank.ONE.value)
player.render()
def test_play(self):
player = HanabiPlayer(player_id=0, cards=self.cards)
card = player.play(3)
player.render()
self.assertEqual(4, len(player._hand), f"Player didn't properly play a card.")
self.assertEqual(True, isinstance(card, HanabiCard), f"return value from play isn't a card.")
def test_discard(self):
player = HanabiPlayer(player_id=0, cards=self.cards)
card = player.discard(3)
player.render()
self.assertEqual(4, len(player._hand), f"Player didn't properly discard a card.")
self.assertEqual(True, isinstance(card, HanabiCard), f"return value from discard isn't a card.")
def test_add_card(self):
player = HanabiPlayer(player_id=0, cards=self.cards)
player.add_card(HanabiCard(color=Colors.RED, rank=Rank.TWO))
player.render()
self.assertEqual(6, len(player._hand), f"Player didn't properly add a new card to his hand.")
if __name__ == '__main__':
unittest.main()
| true
|
bcc3cbd889a1ed0d9be21014366362e441e72bdb
|
Python
|
voidabhi/flask
|
/SQLAlchemy/SQLAlchemy-Basic/SQLAlchemy-SQLITE.py
|
UTF-8
| 1,614
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
from flask import Flask, jsonify, g, request
from sqlite3 import dbapi2 as sqlite3
DATABASE = './db/test.db'
app = Flask(__name__)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.row_factory = sqlite3.Row
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None: db.close()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def init_db():
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def add_student(name='test', age=10, sex='male'):
sql = "INSERT INTO students (name, sex, age) VALUES('%s', '%s', %d)" %(name, sex, int(age))
print sql
db = get_db()
db.execute(sql)
res = db.commit()
return res
def find_student(name=''):
sql = "select * from students where name = '%s' limit 1" %(name)
print sql
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
rv.close()
return res[0]
@app.route('/')
def users():
return jsonify(hello='world')
@app.route('/add',methods=['POST'])
def add_user():
print add_student(name=request.form['name'], age=request.form['age'], sex=request.form['sex'])
return ''
@app.route('/find_user')
def find_user_by_name():
name = request.args.get('name', '')
student = find_student(name)
return jsonify(name=student['name'], age=student['age'], sex=student['sex'])
if __name__ == '__main__' : app.run(debug=True)
| true
|
eb47348b0799aa0efbc69c40d90ff35e26c93d21
|
Python
|
acharp/IoT-kafka-spark
|
/consumer/server.py
|
UTF-8
| 4,543
| 2.71875
| 3
|
[] |
no_license
|
from datetime import datetime
import json
import statistics
from flask import Flask, request, Response
from kafka import KafkaConsumer, TopicPartition
METRICS = ('count', 'min', 'max', 'average')
SENSORS = ('temperature', 'humidity', 'pressure')
TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
KAFKA_SOCKET = 'localhost:9092'
app = Flask(__name__)
def validate_request_body(body):
"""Validate incoming request body."""
errors, from_tstamp, to_tstamp = validate_tstamp(body)
if errors:
return errors, {}
errors, operations = filter_wrong_input(body)
return errors, operations, from_tstamp, to_tstamp
def validate_tstamp(body):
"""Check that the timeframe requested is valid."""
if 'from' not in body or 'to' not in body:
return ['Keys "from" and "to" are required'], '', ''
try:
from_tstamp = datetime.strptime(body['from'], TIMESTAMP_FORMAT)
to_tstamp = datetime.strptime(body['to'], TIMESTAMP_FORMAT)
except ValueError:
return ['Incorrect input timestamp format, should be YYYY-MM-DD hh:mm:ss'], '', ''
if from_tstamp > to_tstamp:
return ['"from" has to be earlier than "to"'], '', ''
return [], from_tstamp, to_tstamp
def filter_wrong_input(body):
"""
Filter out wrong fields from the request body.
Returns list of errors and dict of operations to compute like {"temperature": ["count", "min"], "pressure": ["count", "min"]}
"""
errors = []
operations = {}
# Parse resquest body to keep only metrics and sensors expected
for key, value in body.items():
if key in ('from', 'to'):
continue
elif key in SENSORS:
for metric in value:
if metric in METRICS:
if key not in operations.keys():
operations[key] = [metric]
else:
operations[key].append(metric)
else:
errors.append('Unexpected metric {} to compute for the sensor {}'
.format(metric, key))
else:
errors.append('Unexpected key {} in the request body'.format(key))
return errors, operations
def compute_operations(operations, from_tstamp, to_tstamp):
"""Compute the metrics requested."""
result = {}
metric_functions = {'average': statistics.mean, 'min': min, 'max': max, 'count': len}
for sensor, metrics in operations.items():
# Get kafka offsets matching timestamps requested
consumer = KafkaConsumer(sensor, bootstrap_servers=KAFKA_SOCKET,
value_deserializer=lambda x: int.from_bytes(x, byteorder='big'))
partition = TopicPartition(sensor, 0)
start_offset = consumer.offsets_for_times(
{partition: int(round(from_tstamp.timestamp() * 1000))})[partition].offset
end_offset = consumer.offsets_for_times(
{partition: int(round(to_tstamp.timestamp() * 1000))})[partition].offset
# Read values and compute metrics requested
values = get_kafka_values(consumer, partition, start_offset, end_offset)
result[sensor] = {metric: metric_functions[metric](values) for metric in metrics}
return result
def get_kafka_values(consumer, partition, start_offset, end_offset):
"""Consume Kafka data between two offsets and return record values."""
values = []
consumer.seek(partition, start_offset)
for msg in consumer:
if msg.offset > end_offset:
break
else:
values.append(msg.value)
return values
def build_response(errors, result):
"""Build HTTP response depending on errors and result."""
response_body = json.dumps({'result': result, 'errors': errors})
status_code = 200
if errors and not result:
status_code = 400
return Response(response_body, status=status_code, mimetype='application/json')
@app.route('/health', methods=['GET'])
def health_check():
return {"status": "Healthy!"}
@app.route('/compute/metrics', methods=['POST'])
def compute_metrics():
if not request.json or request.json == {}:
return Response('{"bad request": "body must be valid non empty json"}',
status=400, mimetype='application/json')
errors, operations, from_tstamp, to_tstamp = validate_request_body(request.json)
result = compute_operations(operations, from_tstamp, to_tstamp)
return build_response(errors, result)
if __name__ == '__main__':
app.run()
| true
|
2c05e6636a7a00b04a370ed216cbf6362089f48e
|
Python
|
Rinqt/stock
|
/machine_learning/regression_algorithms/decision_tree_model.py
|
UTF-8
| 1,186
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
from sklearn.tree import DecisionTreeRegressor
from regression_algorithms.regression_model import Model
class DecisionTreeModel(Model):
def create_model(self):
self.MODEL = DecisionTreeRegressor(criterion=self.parameters['criterion'],
splitter=self.parameters['splitter'],
max_depth=self.parameters['maxDepth'],
min_samples_split=self.parameters['minSamplesSplit'],
min_samples_leaf=self.parameters['minSamplesLeaf'],
max_features=self.parameters['maxFeatures'],
max_leaf_nodes=self.parameters['maxLeafNodes'],
min_impurity_decrease=self.parameters['minImpurityDecrease'],
presort=self.parameters['preSort'])
return self.fit_evaluate()
def train_model(params):
model = DecisionTreeModel()
model.parameters = model.set_parameters(parameter_dict=params)
model.split_data()
return model.create_model()
| true
|
f65cdc9bfb0fae0b24bb0b20a1859bb6a152f3e1
|
Python
|
bartoszgorka/studia-wi-put-poznan
|
/semestr_6_metody_kompresji_danych/Exercise_1/Exercise_1.py
|
UTF-8
| 7,288
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
import numpy as np
import operator
import random
# Exercise 1 - Generate words and calculate average length.
def exercise_1(size):
alphabet = list("qazxswedcvfrtgbnhyujmkilop ")
total_length = 0
for _ in range(size):
total_length += len(exercise_1_single_word(alphabet))
return total_length / size
# Single word - Example 1 generator.
def exercise_1_single_word(alphabet):
word = ""
while True:
char = np.random.choice(alphabet)
if char != " ":
word += char
else:
break
return word
# Read file's content
def read_file(name):
file = open(name, 'r')
return file.read()
# Calculate file details, return average characters length in file.
def file_parameters(filename):
content = read_file(filename)
total_length = 0
words = content.split(" ")
for word in words:
total_length += len(word)
return total_length / len(words)
# Exercise 2 - Calculate frequency of letters in text.
def exercise_2(filename):
content = read_file(filename)
letters = {}
counter = 0
for _, letter in enumerate(content):
cardinality = letters.get(letter, 0)
letters.update({letter: cardinality + 1})
counter += 1
for letter in letters:
letters.update({letter: letters.get(letter) / counter})
return letters
# Exercise 3 - Calculate first row average length.
def exercise_3(size, frequency):
keys = list(frequency.keys())
probability_list = list(frequency.values())
total_length = 0
for _ in range(size):
total_length += len(exercise_3_word_generator(keys, probability_list))
return total_length / size
# Words generator - Exercise 3.
def exercise_3_word_generator(alphabet, probability):
word = ""
while True:
char = np.random.choice(alphabet, p=probability)
if char != " ":
word += char
else:
break
return word
# Exercise 4 - Probability of letters in text.
def exercise_4(filename):
content = read_file(filename)
letters = {}
counter = 0
old_letter = ""
for _, letter in enumerate(content):
if old_letter != "":
dictionary_item = letters.get(old_letter, {})
cardinality = dictionary_item.get(letter, 0)
cardinality_total = dictionary_item.get("total", 0)
dictionary_item.update({letter: cardinality + 1})
dictionary_item.update({"total": cardinality_total + 1})
letters.update({old_letter: dictionary_item})
counter += 1
old_letter = letter
return letters, counter
# Modify dictionaries.
def modify_dictionaries(dictionary, top_key, key):
selected = dictionary.get(top_key, {})
value = selected.get(key, 0)
total = selected.get("total", 0)
selected.update({key: value + 1})
selected.update({"total": total + 1})
dictionary.update({top_key: selected})
return dictionary
# Exercise 5 - Analyze files, calculate file's statistics.
def exercise_5_analyze(filename, row):
content = read_file(filename)
dictionary = {}
letters = []
for _, letter in enumerate(content):
if len(letters) > row:
del(letters[0])
dictionary = modify_dictionaries(dictionary, ''.join(letters), letter)
letters.append(letter)
return dictionary
# Roulette wheel
def roulette_wheel(letters, probability):
value = random.random()
probability_sum = 0.0
selected = 0
for ind, val in enumerate(probability):
probability_sum += val
if probability_sum >= value:
selected = ind
break
return letters[selected]
# Exercise 5 - generator with use roulette wheel and statistics from file.
def exercise_5_generator(dictionary, row, length):
result = "probability"
letters = list(result)
for _ in range(len(letters) - row):
del(letters[0])
for i in range(length):
letters_to_random = list()
probability_to_random = list()
selected = dictionary.get(''.join(letters))
total = selected.get("total", 1)
for (key, value) in selected.items():
if key != "total":
letters_to_random.append(key)
probability_to_random.append(value / total)
if probability_to_random:
char = roulette_wheel(letters_to_random, probability_to_random)
else:
char = np.random.choice(list("qazxswedcvfrtgbnhyujmkilop "))
result += char
letters.append(char)
if len(letters) > row:
del(letters[0])
return result
# Main function
def main():
# File details
print("File details:")
files = ["norm_hamlet.txt", "norm_romeo_and_juliet.txt", "norm_wiki_sample.txt"]
for filename in files:
print("\tFile =", filename, "\tAverage length =", file_parameters(filename), "characters")
# Exercise 1
words_size = 2000
print("\nExercise 1:\n\tWords =", words_size, "\tAverage length =", exercise_1(words_size), "characters")
# Exercise 2
frequency = {}
print("\nExercise 2:")
for filename in files:
frequency = exercise_2(filename)
print("\tFile =", filename, "\tLetters:", frequency)
# Exercise 3
print("\nExercise 3:\n\tWords = ", words_size, "\tAverage length =", exercise_3(words_size, frequency), "characters")
# Exercise 4
sorted_x = sorted(frequency.items(), key=operator.itemgetter(1))
exercise_4_first_letter = sorted_x.pop()[0]
exercise_4_second_letter = sorted_x.pop()[0]
print("\nExercise 4:")
for filename in files:
frequency_first, counter = exercise_4(filename)
print("\tFile =", filename, "\tLetters:", frequency_first)
print("\n\tSelected top used - starts with `" + exercise_4_first_letter + "` or `" + exercise_4_second_letter + "`")
results = {exercise_4_first_letter: frequency_first.get(exercise_4_first_letter, {}), exercise_4_second_letter: frequency_first.get(exercise_4_second_letter, {})}
for dictionary in results:
for (key, value) in results.get(dictionary).items():
if key != "total":
print("\t\t", dictionary + key, value / counter)
print("\n\t------------------------------------------\n")
# Exercise 5
print("\nExercise 5:")
filename = files[len(files) - 1]
for i in [1, 3, 5]:
print("Analyze file", filename)
statistics = exercise_5_analyze(filename, i)
print("Generate content")
content = exercise_5_generator(statistics, i, 2_000_000)
last_char = ""
words = 0
total_length = 0
for char in content:
if char != " ":
total_length += 1
if char == " " and last_char != " ":
words += 1
last_char = char
print("Row =", i, "\tAverage length:", total_length / words)
if __name__ == "__main__":
main()
| true
|
9942ab25c56f208052f64a5d86e1d3d0bbd8f7b0
|
Python
|
patchav0/Search-and-Rescue-Algorithm-Design
|
/code/examples/harris-corner-detection.py
|
UTF-8
| 386
| 2.5625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 10:56:19 2021
@author: Bryan Van Scoy
"""
import cv2 as cv
import numpy as np
filename = 'chessboard.png'
img = cv.imread(filename)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv.cornerHarris(gray,2,3,0.04)
# threshold
img[dst>0.01*dst.max()]=[0,0,255]
cv.imshow('dst',img)
cv.waitKey(0)
cv.destroyAllWindows()
| true
|
878d25ee5d784bcfc5650a1dce53a62abec1db16
|
Python
|
buuav/precision-landing
|
/video_streaming_with_flask_example/camera.py
|
UTF-8
| 2,308
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
import cv2
import numpy as np
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
d = kp.size
cv2.circle(vis, (int(x), int(y)), int(d), color)
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
self.video = cv2.VideoCapture(0)
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 10;
params.maxThreshold = 200;
# Filter by Area.
params.filterByArea = True
params.maxArea = 1500
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.9
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.01
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
self.detector = cv2.SimpleBlobDetector(params)
else :
self.detector = cv2.SimpleBlobDetector_create(params)
self.font = cv2.FONT_HERSHEY_SIMPLEX
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
def __del__(self):
self.video.release()
def get_frame(self):
success, frame = self.video.read()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
keypoints = self.detector.detect(frame)
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
numkey = len(keypoints)
for i in range(numkey):
cv2.putText(im_with_keypoints, '%4.4f,%4.4f' % (keypoints[i].pt[0],keypoints[i].pt[1]), (20, 20*(i+1)), self.font, 0.5,(255,255,255),1)
ret, jpeg = cv2.imencode('.jpg', im_with_keypoints)
return jpeg.tostring()
| true
|
4dadfad11875564915ec033932a331b280a7ff04
|
Python
|
ko-taro/atcoder
|
/notebooks/202101/abc174_d.py
|
UTF-8
| 1,025
| 2.828125
| 3
|
[] |
no_license
|
# %%
# WRが無くなるよう操作する
# WRRRRのようにWの右隣にRが連続する場合は、非効率なことがある?
# WRR...Rが非効率な場合はどのような時だろうか?
# WRWWRRWRR
# RRWWRRWRW
#
# WRWWRWRR
# RRWWRWRW
# RRWWRWRW
# WRRRRWWR -> 1
# WWRRRRWWR -> 2
# WWRRRRWWRR
# WWWRR
# WRRRRWWRR
# %%
# N = int(input())
# C = input()
C = 'RWRWRWRR'
ret = 0
if C.count('WR') != 0:
C = C.lstrip('R')
countr = C.count('R')
firstr = C.find('R')
firstr2 = firstr
for i in range(firstr, len(C)):
if C[i] != 'R':
firstr2 = i - 1
break
firstw_cnt = firstr
firstr_cnt = firstr2 - firstr + 1
remainr_cnt = countr - firstr_cnt
if firstw_cnt <= remainr_cnt:
ret += remainr_cnt
else:
ret += firstw_cnt if firstw_cnt <= countr else countr
print(ret)
# %%
N = int(input())
C = input()
countr = C.count('R')
print(C[0:countr].count('W'))
# %%
s = 'RRRWRWRW'
for i in range(len(s)-1, -1, -1):
print(s[i])
| true
|
c2454dd3434dd5e00ed86444406253a3d6e31174
|
Python
|
joshcampbell/tic-tac-toe
|
/t3/board.py
|
UTF-8
| 1,434
| 3.40625
| 3
|
[] |
no_license
|
import itertools
class Board:
def __init__(self,game):
self.game = game
def get_size(self):
return self.game.state["board"]["size"]
def get_valid_indices(self):
return range(1,self.get_size()+1)
def positions(self):
"""
Enumerate all of the Board's positions as a list of lists
(for JSON reasons)
"""
positions = []
for row in self.get_valid_indices():
for col in self.get_valid_indices():
positions.append([row,col])
return positions
def corners(self):
max_index = self.get_valid_indices()[-1]
extremes = [1,max_index]
return filter(lambda pos: pos[0] in extremes \
and pos[1] in extremes,
self.positions())
# FIXME DRY, possibly factor out class or module
def is_valid_position(self,position):
return self.is_position_well_formed(position) and \
self.is_within_bounds(position)
# FIXME uncovered
def validate_position(self,position):
assert self.is_position_well_formed(position), "malformed position: %s" % position
assert self.is_within_bounds(position), "%s not in %s"%(position,\
self.get_valid_indices())
def is_within_bounds(self,position):
return position in self.positions()
def is_position_well_formed(self,position):
return isinstance(position[0],int) and \
isinstance(position[1],int)
| true
|
86a09bd4df60c0de4b875404cc48f1eee8236fe8
|
Python
|
jgomezdans/modis_opendap
|
/parallel_leech.py
|
UTF-8
| 11,984
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
"""
SYNOPSIS
DESCRIPTION
A MODIS daily surface reflectance download tool. Uses the
recently made available OpenDAP server to download daily
MODIS reflectance data for a particular location. This
script is designed to only fetch a single pixel, but
annual time series of both TERRA and AQUA data. The
script works using threads, attempting to access the
server to simultaneously request different time periods.
As of writing, we don't know about the ethics of this ;-)
The data is put into an ASCII file in "UCL's BRDF" format.
The file stores the provenance of the observations, and does
some preliminary QA filtering using the state_1km band. The
crap observations are filtered out then...
EXAMPLES
./parallel_leech.py --lat 43.4130156 --lon -8.0694678 --output="caaveiro"
AUTHOR
Jose Gomez-Dans (UCL/NCEO)
j.gomez-dans@ucl.ac.uk
"""
from multiprocessing.dummy import Pool
import datetime
import sys
import numpy as np
import install_cas_client
from pydap.client import open_url
import optparse
def do_command_line ():
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), \
usage=globals()['__doc__'])
parser.add_option ('-l', '--lat', action='store', dest="latitude", \
type=float, help='Latitude in decimal degrees' )
parser.add_option ('-g', '--lon', action='store', dest="longitude", \
type=float, help='Longitude in decimal degrees' )
parser.add_option('-o', '--output', action="store", dest="output_file", \
default="grabba", type=str, help="Output file and directory" )
parser.add_option('-b', '--begin', action="store", dest="year_start", \
default=2003, type=int, help="Start year" )
parser.add_option('-e', '--end', action="store", dest="year_end", \
type=int, default=2014, help="End year" )
parser.add_option('-p', '--pool', action="store", dest="pool_size", \
type=int, default=10, help="Number of simultaneous threads to use" )
parser.add_option('-P', '--password', action="store", dest="password", \
type=str, help="EarthData Login password" )
parser.add_option('-U', '--user', action="store", dest="user", \
type=str, help="EarthData Login username" )
(options, args) = parser.parse_args()
if 'user' not in options.__dict__:
parser.error("You need to provide a username! Sgrunt!")
if 'password' not in options.__dict__:
parser.error("You need to provide a password! Sgrunt!")
return options.user, options.password, options.latitude, \
options.longitude, options.output_file, \
options.year_start, options.year_end, options.pool_size
def lonlat ( lon, lat, scale_factor=2. ):
"""A simple function to calculate the MODIS tile, as well as
pixel location for a particular longitude/latitude pair. The
scale factor relates to the actual MODIS spatial resolution,
and its possible values are 1 (=1km data), 2 (=0.5km data) and
4 (0.25km data).
Parameters
----------
lon: float
The longitude. I think it has to be between -180 and 180...
lat: float
The latitude. +ve is N Hemisphere, -ve is S Hemisphere
scale_factor: float
The scale factor for the product: 1 for 1km, 2 for 0.5km etc
Returns
-------
The H and V tiles, as well as the line and sample location in
MODIS array (either 1km, 500m or 250m, as indicated by
``scale_factor``)
"""
scale_factor = scale_factor*1.
sph = 6371007.181
ulx = -20015109.354
uly = 10007554.677
cell_size = 926.62543305
tile_size = 1200*cell_size
x = np.deg2rad ( lon )*sph*np.cos(np.deg2rad(lat))
y = np.deg2rad ( lat)*sph
v_tile = int ( -( y - uly)/tile_size )
h_tile = int ( (x - ulx)/tile_size )
line = (uly-y-v_tile*tile_size)/(cell_size/scale_factor)
sample = ( x - ulx - h_tile*tile_size)/(cell_size/scale_factor )
return h_tile, v_tile, int(line), int(sample)
def grab_slave ( inp, leech_query=50):
"""
This function downloads a particular band, for a given pixel
and time period. Returns the band name and the array with all
the data
Parameters
-----------
inp: iter
An iterable object with the location (e.g. full openDAP URL),
the band name, the starting time bin, end time and sample
and line
leech_query: int
It appears that the OpenDAP server has some issues servicing
long queries in time, so this option restricts the queries
to only do e.g. 50 time steps at a time. At the time of
writing, this can go up to 100, but if you try several
downloads in parallel, it's unstable.
Returns
--------
The band name and a 1D array with the data (one year of data)
"""
location, band, i0,time, sample, line = inp
ds = open_url ( location )
nsteps = len ( time )
tbins = nsteps/leech_query + 1 # We get 100 bins at a go
x = np.zeros ( nsteps )
for tstep in xrange(tbins):
the_end = min(leech_query,len(time) - (leech_query*(tstep)))
x[tstep*leech_query:(tstep*leech_query + the_end)] = \
ds[band][(i0+tstep*leech_query):(i0+tstep*leech_query+the_end), line, sample].squeeze()
return ( band, x )
def grab_refl_data_parallel ( user, password, lon, lat, year, collection="006" ):
"""
This function builds up a list of the required bands and
time intervals required to download. The output of this
list can then be used by e.g. ``grab_slave`` to download
individual bands.
TODO finish this stuff, can't be arsed to right now
"""
htile, vtile, line, sample = lonlat ( lon, lat, scale_factor=2. )
# Next line is needed for 1km data, such as angles, QA...
htile, vtile, line1k, sample1k = lonlat ( lon, lat, scale_factor=1. )
bands_hk = [ "sur_refl_b%02d_1" % i for i in xrange(1,8) ]
bands_hk += [ "obscov_500m_1", "QC_500m_1" ]
bands_1k = [ "SolarZenith_1", "SolarAzimuth_1", \
"SensorZenith_1", "SensorAzimuth_1" ]
bands_1k += [ "state_1km_1" ]
map_struct = []
plat_list = [ "MOD09GA.%s" % collection, "MYD09GA.%s" % collection ]
for isens, prod in enumerate( plat_list ):
location = "http://%s:%s@opendap.cr.usgs.gov/opendap/" % ( user, password) + \
"hyrax/%s/h%02dv%02d.ncml" % ( prod, htile, vtile )
ds = open_url( location )
time = ds['time'][:]
xs = (datetime.date ( year, 1, 1) - datetime.date ( 2000, 1, 1 )).days
xt = (datetime.date ( year, 12, 31) - datetime.date ( 2000, 1, 1 )).days
i0 = np.nonzero( time == xs )[0]
it = np.nonzero( time == xt )[0]
if len(i0) == 0 or len(it) == 0:
continue
time = time[i0:(it+1)]
for band in bands_hk:
map_struct.append ( [ location, band, i0,time, sample, line] )
for band in bands_1k:
map_struct.append ( [ location, band, i0,time, sample1k, line1k] )
return map_struct
def grab_refl_data ( user, password, lon, lat ):
"""This is the old and sequential way of doing things. We've gone
all parallel now..."""
htile, vtile, line, sample = lonlat ( lon, lat, scale_factor=2. )
# Next line is needed for 1km data, such as angles, QA...
htile, vtile, line1k, sample1k = lonlat ( lon, lat, scale_factor=1. )
print "Getting tile h%02dv%02d..." % (htile, vtile)
bands_hk = [ "sur_refl_b%02d_1" % i for i in xrange(1,8) ]
bands_hk += [ "obscov_500m_1", "QC_500m_1" ]
bands_1k = [ "SolarZenith_1", "SolarAzimuth_1", \
"SensorZenith_1", "SensorAzimuth_1" ]
bands_1k += [ "state_1km_1" ]
retrieved_data = [{}, {}]
for isens, prod in enumerate( [ "MOD09GA.005", "MYD09GA.005"] ):
print "Doing product %s" % prod
ds = open_url("http://%s:%s@opendap.cr.usgs.gov/opendap/" % ( user, password) +\
"hyrax/%s/h%02dv%02d.ncml" % ( prod, htile, vtile ) )
print "\tGetting time..."
sys.stdout.flush()
time = ds['time'][:]
retrieved_data[isens]['time'] = time
n_tbins = len(time)/100 + 1 # We get 100 bins at a go
for band in bands_hk:
print "\tDoing %s "%band,
sys.stdout.flush()
retrieved_data[isens][band] = np.zeros_like(time)
for tstep in xrange(n_tbins):
print "*",
sys.stdout.flush()
retrieved_data[isens][band][tstep*100:(tstep+1)*100] = \
ds[band][tstep*100:(tstep+1)*100, sample, line].squeeze()
for band in bands_1k:
print "\tDoing %s "%band,
sys.stdout.flush()
retrieved_data[isens][band] = np.zeros_like(time)
for tstep in xrange(n_tbins):
print "*",
sys.stdout.flush()
retrieved_data[isens][band][tstep*100:(tstep+1)*100] = \
ds[band][tstep*100:(tstep+1)*100, sample1k, line1k].squeeze()
return retrieved_data
def grab_data ( username, password, year,
longitude, latitude, output_file, pool_size ):
print "Downloading year %d..." % year
the_data = grab_refl_data_parallel ( username, password,
longitude, latitude, year )
pool = Pool( pool_size )
results = pool.map( grab_slave, the_data)
pool.close()
pool.join()
# Now add the DoY to each dataset...
doys = []
for dataset in the_data:
doys.append ( np.array ( [int((datetime.date(2000,1,1)+datetime.timedelta(days=x)).strftime("%j")) for x in dataset[3]] ) )
Ntime_slots = len( doys[0] ) + len ( doys[15] ) # TERRA & AQUA
# out = np.zeros( Ntime_slots, 7+4+1+1+1+1 ) # 7 bands, 4 angles,1 QA@1K, QA@HK, ObsCov, DoY
QA_OK=np.array([8,72,136,200,1032,1288,2056,2120,2184,2248])
qa_mod09 = np.logical_or.reduce([results[13][1]==x for x in QA_OK])
qa_myd09 = np.logical_or.reduce([results[27][1]==x for x in QA_OK])
output_fname = "%s_%04d.brdf" % ( output_file, year )
print "\tSaving file to ->%s<-" % output_fname
fp = open ( output_fname, 'w' )
fp.write ("# DoY,Platform,SZA,SAA,VZA,VAA,B01,B02,B03,B04,B05,B06,B07,QA1K,QAHK,ObsCov\n" )
for doy in doys[1]:
s = None
passer = doys[1] == doy
if passer.sum() == 1 and qa_mod09[passer]:
s = "%d, %d, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10d, %10d, %10.4G" % \
( doy, 1, results[9][1][passer], results[10][1][passer], results[11][1][passer], results[12][1][passer],
results[0][1][passer],results[1][1][passer],results[2][1][passer],results[3][1][passer],\
results[4][1][passer],results[5][1][passer],results[6][1][passer], results[13][1][passer],
results[8][1][passer],results[7][1][passer] )
fp.write ( "%s\n" % s )
passer = doys[14] == doy
if passer.sum() == 1 and qa_myd09[passer]:
s = "%d, %d, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10.4G, %10d, %10d, %10.4G" % \
( doy, 2,results[23][1][passer], results[24][1][passer], results[25][1][passer], results[26][1][passer],
results[14][1][passer],results[15][1][passer],results[16][1][passer],results[17][1][passer],\
results[18][1][passer],results[19][1][passer],results[20][1][passer], results[27][1][passer],
results[22][1][passer],results[21][1][passer] )
fp.write ( "%s\n" % s )
fp.close()
if __name__ == "__main__":
user, password, latitude, longitude, output_file,year_start, year_end, \
pool_size = do_command_line ()
for year in xrange ( year_start, year_end + 1):
grab_data ( user, password, year, longitude, latitude, output_file, pool_size )
| true
|
5fe750a66c5bdc3ff41c6664d3b7313dc1424671
|
Python
|
jcockbain/ctci-solutions
|
/chapter-17/Q10_majority_element.py
|
UTF-8
| 714
| 3.46875
| 3
|
[] |
no_license
|
import unittest
def majority_element(arr):
num_elements = len(arr)
count = 0
element = 0
for i in arr:
if count == 0:
element = i
if i == element:
count += 1
else:
count -= 1
validate_count = 0
for i in arr:
if i == element:
validate_count += 1
return element if validate_count > num_elements / 2 else -1
class Test(unittest.TestCase):
def test_majority_element(self):
self.assertEqual(5, majority_element([1, 2, 5, 9, 5, 9, 5, 5, 5]))
self.assertEqual(2, majority_element([2, 2, 2, 2, 2, 9, 5, 5, 5]))
self.assertEqual(-1, majority_element([1, 2, 5, 9, 5, 9, 2, 3, 5]))
| true
|
c35c1b8cf36cb5fce6f5684d8247a862a5ec2070
|
Python
|
phil-shin/Python-Practice-Projects
|
/SizeFilter.py
|
UTF-8
| 538
| 3.078125
| 3
|
[] |
no_license
|
#! python
# Sizefilter.py
# Filter through folder tree and lists out large files
import os, shutil
# Set working directory
cwd = os.path.join('c:', os.sep, 'Users', 'Phil', 'Documents', 'python')
# Loop to walk through folder tree
for folderName, subfolders, filenames in os.walk(cwd):
#print(folderName)
#print(subfolders)
#print(filenames)
for filename in filenames:
absPath = os.path.join(cwd, folderName, filename)
if os.path.getsize(absPath) >= 100000: #Set size filter
print(absPath)
| true
|
765a883100a39bcf26c16a54c3a27f5eed5ea85a
|
Python
|
MPIBGC-TEE/CompartmentalSystems
|
/prototypes/ABC/ModelRun.py
|
UTF-8
| 1,531
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
import unittest
from numpy import NaN
from abc import ABCMeta, abstractmethod
class ModelRun(metaclass=ABCMeta):
# abstractmehtods HAVE to be overloaded in the subclasses
# the decorator should only be used inside a class definition
@abstractmethod
def solve(self):
return NaN
# non abstract methods could be implemented and would
# then be inherited automatically by subclasses
# BUT do not have to be overloaded
def do_some_thing(self):
return 43
class ModelRunWithMissingMethods(ModelRun):
# does not implement solve yet
pass
class PWS_ModelRun(ModelRun):
def solve(self):
return 42
class D_ModelRun(ModelRun):
def solve(self):
return 24
class TestModelRun(unittest.TestCase):
def test_init__(self):
# The abstract class itself can not be instanciated
with self.assertRaises(TypeError):
ModelRun()
# Subclasses of the abstract class itself have to implement ALL abstractmethods
with self.assertRaises(TypeError):
ModelRunWithMissingMethods()
# A subclass implementing the abstract methods can be instanciated
mr=PWS_ModelRun()
def test_do_some_thing(self):
mr1=PWS_ModelRun()
self.assertEqual(mr1.solve(),42)
self.assertEqual(mr1.do_some_thing(),43)
mr2=D_ModelRun()
self.assertEqual(mr2.solve(),24)
self.assertEqual(mr2.do_some_thing(),43)
if __name__ == '__main__':
unittest.main()
| true
|
199eeab6d51316b3766c2b1acad9dfed1afcc5d9
|
Python
|
tonydavidx/Python-Crash-Course
|
/Chapter10/10_9_Silent_cats_and_dogs.py
|
UTF-8
| 386
| 3.28125
| 3
|
[] |
no_license
|
def read_files(filenames):
""" read files and give errors if found any """
try:
with open(filenames) as text_object:
contents = text_object.read()
print(contents)
except FileNotFoundError:
# print(f"the file {filenames} does not exist")
pass
names = ['cats.txt','dogs.txt','sdds.txt']
for name in names:
read_files(name)
| true
|
97ce30bdf496f17c4d9f98d2faed9d9123bc33e4
|
Python
|
andres0191/AirBnB_clone_v2
|
/models/place.py
|
UTF-8
| 2,243
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/python3
"""This is the place class"""
import models
from models.review import Review
from models.base_model import BaseModel, Base
from sqlalchemy import Column, Integer, Float, String, ForeignKey
from sqlalchemy.orm import relationship
from os import getenv
class Place(BaseModel, Base):
"""This is the class for Place
Attributes:
city_id: city id
user_id: user id
name: name input
description: string of description
number_rooms: number of room in int
number_bathrooms: number of bathrooms in int
max_guest: maximum guest in int
price_by_night:: pice for a staying in int
latitude: latitude in flaot
longitude: longitude in float
amenity_ids: list of Amenity ids
"""
__tablename__ = "places"
if getenv('HBNB_TYPE_STORAGE') == 'db':
city_id = Column(String(60), ForeignKey('cities.id'),
nullable=False)
user_id = Column(String(60), ForeignKey('users.id'),
nullable=False)
name = Column(String(128), nullable=False)
description = Column(String(1024), nullable=True)
number_rooms = Column(Integer, default=0, nullable=False)
number_bathrooms = Column(Integer, default=0, nullable=False)
max_guest = Column(Integer, default=0, nullable=False)
price_by_night = Column(Integer, default=0, nullable=False)
latitude = Column(Float, nullable=True)
longitude = Column(Float, nullable=True)
reviews = relationship("Review", cascade="delete", backref="place")
else:
city_id = ""
user_id = ""
name = ""
description = ""
number_rooms = 0
number_bathrooms = 0
max_guest = 0
price_by_night = 0
latitude = 0.0
longitude = 0.0
amenity_ids = []
@property
def reviews(self):
"""
Return the list of review instance with place.id
"""
list_review = []
for review in models.storage.all(Review).values():
if review.place_id == self.id:
list_review.append(review)
return list_review
| true
|
ad8ceae9d608912ca4a10debd99bf6b306022314
|
Python
|
HassanSherwani/Model_Deployment
|
/Books_rest_api/app.py
|
UTF-8
| 1,193
| 2.9375
| 3
|
[] |
no_license
|
from flask import Flask,jsonify,make_response,abort
import json
from flask_restful import Api, Resource
# init app
app = Flask(__name__)
# create small datasets
books = [{"id": 1,"title":"whatever1",},{"id":2,"tietle":"whatever2",}]
# Using External Local Data
with open("books.json") as f:
books_json = json.load(f)
# Route
@app.route('/')
def index():
return"Basic API for query of book information using titles and their index"
# api route
@app.route('/api/v1/books', methods=["GET"])
def get_book():
return jsonify({"books": books})
# api route if we want to take query i.e index from user
@app.route('/api/v1/books/<int:id>', methods=["GET"])
def get_book_index(id):
book=[book for book in books if book['id'] == id]
return jsonify({"books":book})
# version 2 is for loaded json file
@app.route('/api/v2/books', methods=["GET"])
def get_book_json():
return jsonify({"books": books_json})
# api route using title as query
@app.route('/api/v2/books/<string:title>' , methods=['GET'])
def get_book_title(title):
book_json_img = [book for book in books_json if book["title"] == title]
return jsonify({"books":book_json_img})
if __name__ == '__main__':
app.run(debug=True)
| true
|
aaf417461414daab4c277f0ffa69106ba8a435d7
|
Python
|
Madrich-routes/routes_laboratory
|
/solvers/madrich/api_module/osrm_module.py
|
UTF-8
| 3,732
| 2.625
| 3
|
[] |
no_license
|
from itertools import chain
from typing import List, Union, Tuple
from urllib.parse import quote
import numpy as np
import requests
import ujson
from polyline import encode as polyline_encode
from solvers.madrich.utils import to_array
array = np.ndarray
Point = Tuple[float, float]
osrm_host = 'http://dimitrius.keenetic.link:5000'
coefficient = {'speed_car': 7.5, 'speed_pedestrian': 1, 'distance_car': 1.06, 'distance_pedestrian': 0.8}
def _encode_src_dst(src, dst):
coords = tuple((c[1], c[0]) for c in chain(src, dst))
polyline = polyline_encode(coords)
ls, ld = map(len, (src, dst))
params = dict(
sources=";".join(map(str, range(ls))),
dests=";".join(map(str, range(ls, ls + ld))),
annotations=True, )
return quote(polyline), params
def _encode_src(src):
coords = tuple((c[1], c[0]) for c in src)
polyline = polyline_encode(coords)
params = dict(annotations="duration")
return quote(polyline), params
def _turn_over(points):
pts = points.copy()
for i in range(len(pts)):
pts[i][0], pts[i][1] = points[i][1], points[i][0]
return pts
def get_matrix(points: Union[array, List[Point]], factor: Union[str, List[str]], host=osrm_host, dst=None,
profile="driving") -> Union[array, List[array]]:
""" Возвращает ассиметричные матрицы смежности
:param points: points
:param factor: duration; distance annotation for osrm
:param host: osrm host
:param dst:
:param profile: default 'driving'
:return: one matrix in case key=str and list of matrix when key=list
"""
points = points if type(points) == array else to_array(points)
points = _turn_over(points)
if dst is not None:
polyline, _ = _encode_src_dst(points, dst)
else:
polyline, _ = _encode_src(points)
annotation = ','.join(factor) if type(factor) is list else factor
r = requests.get(f"{host}/table/v1/{profile}/polyline({polyline})?annotations={annotation}")
assert r.status_code == 200, f'osrm bad request: {r.reason}'
parsed_json = ujson.loads(r.content)
if type(factor) is str:
output = np.array(parsed_json[f'{factor}s'])
assert output.sum() != 0, 'координаты переверни, да?'
return output
else:
output = [np.array(parsed_json[f'{fact}s']) for fact in factor]
assert any([m.sum() == 0 for m in output]), 'координаты переверни, да?'
return output
def get_matrices(points: Union[array, List[Point]], factor: Union[str, List[str]], max_cost: int, split=15,
host=osrm_host, dst=None, profile="driving") -> Union[array, List[array]]:
""" Возвращает нужное кол-во матриц смежностей
:param points: points
:param factor: duration, distance
:param max_cost: сколько времени со старта пройдет
:param split: минуты
:param host: osrm host
:param dst:
:param profile: default 'driving'
:return: one matrix of matrix in case key=str and list when key=list
"""
split *= 60
size = len(points)
length = int(np.ceil(max_cost / split))
result = get_matrix(points, factor, host, dst, profile)
if type(result) is list:
output = []
for res in result:
matrices = np.zeros(shape=(length, size, size), dtype=np.int64)
for i in range(length):
matrices[i] = res.copy()
output.append(matrices)
else:
output = np.zeros(shape=(length, size, size), dtype=np.int64)
for i in range(length):
output[i] = result
return output
| true
|
133079b200289bcdde0fe01a1fd874e81c10d7f2
|
Python
|
francoislievens/ELEN0062-ML-Pass-Predictor
|
/Main_forest.py
|
UTF-8
| 1,284
| 2.578125
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from Dataset import Dataset
from Forest import Forest
import pickle
if __name__ == '__main__':
# Create the dataset structure:
dataset = Dataset()
# Import original training set
#dataset.import_original_training(split_train=0.8, split_test=0.17, split_val=0.03)
# Compute the pair form of the dataset
#dataset.learning_set_builders()
# Save in a file to speed up experiments
#dataset.save_dataset()
# Restore dataset from a file:
dataset.restore_dataset()
# Create the model
model = Forest()
# Set the dataset
model.set_dataset(dataset)
# Train the model
model.train()
# Serialize the model:
model.save_model()
# Restore
#model.restore_model()
# predict
pred = model.rf.predict(dataset.pairs_test_x)
pred = np.reshape(pred, (-1, 22))
pred_idx = np.argmax(pred, axis=1)
pred_idx += 1
score = np.zeros(pred_idx.shape[0])
for i in range(0, len(pred_idx)):
print('pred: {} - targets: {}'.format(pred_idx[i], dataset.original_test_y[i]))
if pred_idx[i] == dataset.original_test_y[i]:
score[i] = 1
print('final score = {}'.format(np.mean(score)))
| true
|
976f3bd8827029ca02273c1914ef58516af150fd
|
Python
|
hansimglueck/homeVisit
|
/hardware/ws.py
|
UTF-8
| 1,633
| 2.796875
| 3
|
[] |
no_license
|
import websocket
import thread
import time
import json
class Client(object):
def __init__(self, role, cb, name=None):
print "Client:init"
self.connected = False
self.cb = cb
self.role = role
if name is None:
self.name = "NN"
else:
self.name = name
websocket.enableTrace(True)
self.open_websocket()
def on_message(self, ws, message):
print "WS-message:"
print message
msg = json.loads(message)
#print msg["type"]
if msg["type"] == "registerConfirm": print "registered at the game"
self.cb(msg)
#if msg["type"] == "display":
#print msg["data"]["text"]
#self.cb(msg)
def on_error(self, ws, error):
print error
def on_close(self, ws):
print "Socket closed"
self.connected = False
print "### closed ###"
time.sleep(1)
self.open_websocket()
def on_open(self, ws):
print "Socket opened"
self.connected = True
self.ws.send(json.dumps({'type':"register", 'data':{'role': self.role, 'name':self.name}}))
def open_websocket(self):
def run(* args):
self.ws = websocket.WebSocketApp("ws://localhost:80",
on_message = self.on_message,
on_error = self.on_error,
on_close = self.on_close)
self.ws.on_open = self.on_open
self.ws.run_forever()
print "konnte keine verbindung aufbauen"
thread.start_new_thread(run, ())
def send(self, type, data=None, param=None):
print "send type:" + type
#print "send type: " + type + " data: " + data + " param: " + param
self.ws.send(json.dumps({'type':type, 'data':data, 'param':param}))
def conn(self):
return self.connected
| true
|
81e96814a2b8806097e15e64431fec66f3f7ee94
|
Python
|
xtymichael/Leetcode_python
|
/Solutions/048_Rotate_Image.py
|
UTF-8
| 380
| 3.296875
| 3
|
[] |
no_license
|
class Solution:
# @param {integer[][]} matrix
# @return {void} Do not return anything, modify matrix in-place instead.
def rotate(self, matrix):
dim = len(matrix)
copy = [x[:] for x in matrix] ### copy = matrix is wrong since they point to the same thing
for i in range(dim):
for j in range(dim):
matrix[i][j] = copy[~j][i]
| true
|
62f7a8213b8a229979a0fbe2db9cecdb4d236f07
|
Python
|
iBurnApp/iBurn-Data
|
/scripts/archive/2013/scraper.py
|
UTF-8
| 5,247
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python
# Scrapes data from the burningman website, and serializes it into json
import lxml.html
import lxml.html.soupparser
import urllib
import sys
import re
import urllib2
import json
import re
def _clean_string(str):
if str:
str = re.sub(r'^[\n\t\s]+', '', str)
str = re.sub(r'[\n\t\s]+$', '', str)
str = str.replace(" (dot) ", ".")
str = str.replace(" (at) ", "@")
str = re.sub(r"[\n\t\s]+\s+[\n\t\s]+", "\n\n", str)
if str.find("by ") == 0:
str = str[4:]
str = str.split(", ")
return str
def _parse_xml(xml):
parsed_data = []
for p in xml.iterchildren():
if p.text is None:
data = lxml.html.tostring(p, encoding=unicode)
else:
data = lxml.html.tostring(p, method='text', encoding=unicode)
data = re.sub(r"^<br>", "", data)\
.replace("<p>", "\n")\
.replace("<div style=\"clear:both\"></div>", "")
data = data.encode("utf-8")
parsed_data.append(data)
return parsed_data
def _request(url, element):
#print url
opener = urllib2.build_opener()
req = urllib2.Request(url)
f = opener.open(req)
data = f.read()[1:-1]
data = json.loads(data)
data = data[element]
data = re.sub(r"<script[\w\s=\/\"\n{}>:,;'-\.#]*</script>", "", data, flags=re.MULTILINE)
root = lxml.html.soupparser.fromstring(data)
return root
class Honorarium(object):
# http://www.burningman.com/installations/art_honor.html
PROXY_URL = "http://blog.burningman.com/ctrl/art/?job=getData&yy=2013&artType=H"
def _parse_artist(self, artist):
ret = {}
parsed_data = _parse_xml(artist)[2:]
ret["image_url"] = artist.xpath("//img")[0].get("src")
ret["title"] = parsed_data[0]
ret["artists"] = parsed_data[1].replace("by ", "")
ret["artist_location"] = parsed_data[2]
if "addthis" not in parsed_data[5]:
ret["description"] = parsed_data[5]
else:
ret["description"] = ""
i = 0
while i < len(parsed_data):
content = parsed_data[i]
if content.find("URL:") == 0:
i += 1
ret["url"] = parsed_data[i]
elif content.find("Contact:") == 0:
i += 1
ret["contact"] = parsed_data[i]
elif "<div" not in content and "addthis" not in content and "</p>" not in content and "script" not in content:
ret["description"] += " " + content
i += 1
for key, value in ret.iteritems():
ret[key] = _clean_string(value)
return ret
def get_data(self):
root = _request(self.PROXY_URL, "artData")
artists = root.xpath('//div[@class="artlisting"]')
return [self._parse_artist(i) for i in artists]
class Camp(object):
PROXY_URL = "http://blog.burningman.com/ctrl/themecamps/"
ROOT_URL = "http://www.burningman.com"
def get_index(self):
return list("ABCDEFGHIJKLMNOPQRSTUVWXYZ#")
#return list("W")
def _parse_camps(self, camp):
c = camp
#c = camp.getnext()
#print c.text_content().encode("utf-8")
parsed_data = []
if c is None:
return
parsed_data = _parse_xml(c)
ret = {}
i = 0
parsing_desc = False
while i < len(parsed_data):
content = parsed_data[i]
if i == 0:
ret["name"] = content
parsing_desc = True
elif content.find("Hometown:") == 0:
parsing_desc = False
ret["hometown"] = content[10:]
elif content.find("URL:") == 0:
parsing_desc = False
i += 1
ret["url"] = parsed_data[i]
elif content.find("http://") == 0:
parsing_desc = False
ret["url"] = content
elif content.find("Contact:") == 0:
parsing_desc = False
i += 1
ret["contact"] = parsed_data[i]
elif parsing_desc:
if "description" not in ret:
ret["description"] = ""
ret["description"] += content
else:
print "ERROR: %s %s" % (i, content)
i += 1
for key, value in ret.iteritems():
ret[key] = _clean_string(value)
return ret
def get_data(self):
results = []
for index in self.get_index():
root = _request(self.PROXY_URL+"?job=getData&yy=2013&ci=%s" % index, "campData")
camps = root.xpath('//div[@class="camp"]')
results.extend([self._parse_camps(i) for i in camps])
return results
if __name__ == "__main__":
if len(sys.argv) < 2 or sys.argv[1] not in ["camps", "honorarium"]:
print "Usage: scraper.py <camps|honorarium>"
sys.exit(0)
if sys.argv[1] == "camps":
Class = Camp
elif sys.argv[1] == "honorarium":
Class = Honorarium
h = Class()
data = h.get_data()
print json.dumps(data, ensure_ascii=False, sort_keys=True, indent=4)
#print json.dumps(data)
| true
|
df1cd93749c93540e57b857440ad93ea2a028d71
|
Python
|
l3shen/PredictingMolecularPropertiesCHAMPS
|
/visualization.py
|
UTF-8
| 1,444
| 2.96875
| 3
|
[] |
no_license
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
train = pd.read_csv("C:\\Users\\lawre\\Downloads\\train.csv (1)\\train.csv")
structures = pd.read_csv("C:\\Users\\lawre\\Downloads\\train.csv (1)\\structures.csv")
structures.head()
#print(structures.head())
#gotta try and plot this bad boy in 2d-2d
M = 8000
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
colors = ["black", "gold", "blue", "darkred", "purple"]
atoms = structures.atom.unique()
for n in range(len(atoms)):
ax[0].scatter(structures.loc[structures.atom==atoms[n]].x.values[0:M],
structures.loc[structures.atom==atoms[n]].y.values[0:M],
color=colors[n], s=2, alpha=0.5, label=atoms[n])
ax[0].legend()
ax[0].set_xlabel("x")
ax[0].set_ylabel("y")
ax[1].scatter(structures.loc[structures.atom==atoms[n]].x.values[0:M],
structures.loc[structures.atom==atoms[n]].z.values[0:M],
color=colors[n], s=2, alpha=0.5, label=atoms[n])
ax[1].legend()
ax[1].set_xlabel("x")
ax[1].set_ylabel("z")
ax[2].scatter(structures.loc[structures.atom==atoms[n]].y.values[0:M],
structures.loc[structures.atom==atoms[n]].z.values[0:M],
color=colors[n], s=2, alpha=0.5, label=atoms[n])
ax[2].legend()
ax[2].set_xlabel("y")
ax[2].set_ylabel("z")
plt.show()
| true
|
d023edde504e76cf9a059b194cf15c4e7458cec7
|
Python
|
tonyallard/CEIScripts
|
/problem-analysis/ExtractEHCHistogram.py
|
UTF-8
| 1,592
| 3.109375
| 3
|
[] |
no_license
|
#! /usr/bin/python
#Author: Tony Allard
#Date: 30 March 2016
#Description: A Python script for extracting the EHC Guidance Histogram from log files.
#Extracts data to CSV file, including averages.
#NOTE: It omits problems with no solution from CSV file.
import sys
import os
import re
import collections
EHC_DELIM = "#; EHC Performance Histogram:"
DEPTH_DELIM = "#; "
def extractEHCDepths(logFile):
histogram = {}
for line in logFile:
if EHC_DELIM in line:
line = next(logFile)
while DEPTH_DELIM in line:
values = re.findall(r'\d+', line)
if len(values) != 2:
raise RuntimeError("Error! Multiple values found: %s"%runTime)
histogram[int(values[0])] = int(values[1])
line = next(logFile)
return histogram
def main(args):
csvFile = open('ehc-data.csv', 'w')
csvFile.write("Depth, Count\n")
histogram = {}
path = "/mnt/data/160404-Colin-TRH-logs/"
avgDepth = 0
probCount = 0
for filename in os.listdir(path):
f = open(path+filename, 'r')
hist = extractEHCDepths(f)
if len(hist) > 0:
probCount += 1
for x in hist:
if x in histogram:
histogram[x] += hist[x]
else:
histogram[x] = hist[x]
#Sort histogram
histogram = collections.OrderedDict(sorted(histogram.items()))
#Save histogram to file
for x in histogram:
csvFile.write("%i, %i\n"%(x, histogram[x]))
avgDepth += histogram[x]
avgDepth /= float(probCount)
print ("%i problems evaluated. Average EHC Depth of %f."%(probCount, avgDepth))
csvFile.write("%i, %f\n"%(probCount, avgDepth))
csvFile.close()
#Run Main Function
if __name__ == "__main__":
main(sys.argv)
| true
|
d0a8633e13db5b0a8baaa03a3178d7c26d7ac71d
|
Python
|
hujianli94/Python-code
|
/2.程序流程语句/循环语句/while循环/test.py
|
UTF-8
| 375
| 4.4375
| 4
|
[] |
no_license
|
#!/usr/bin/env python
#-*- coding:utf8 -*-
num = 100
count = 0
print("今有一个数,在100以内,三三数之剩余2,五五数之剩余4,七七数之剩余3,请问这个数是什么?")
while count <= num:
if count %3==2 and count %5 ==4 and count%7 ==3:
print("这个数是:"+ str(count))
count +=1
print("循环结束!!".center(100, "-"))
| true
|
fa0f7ff57a25816f8641233216e9e5da5956f177
|
Python
|
BearHeathen/MadRamblings
|
/static_class_methods.py
|
UTF-8
| 136
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
# Static and Class methods
class Person(object):
population = 50
def __init__(self, name, age):
self.name = name
self.age = age
| true
|
a5eff38156577a1abc13f82974a2bf008b31247e
|
Python
|
andres-fm/ProblemSolving
|
/lcm_division_algorithm.py
|
UTF-8
| 893
| 4.0625
| 4
|
[] |
no_license
|
# computes the greates common divisor from two numbers
def gcd(a, b) :
dividend = max(a,b)
divisor = min(a,b)
r = -1
while r != 0 :
r = divmod(dividend, divisor)[1]
dividend = divisor
divisor = r
return dividend
# computes the least common multiple from two numbers given the fact that gcd(a,b)*lcm(a,b) = a*b
# (faster than factoring the numbers into primes or any other method)
def lcm(a, b) :
return (a*b)//mcd(a,b)
# recursive version to compute the lcm from the numbers of an iterable
def lcm_rec(iterable) :
if len(iterable) == 1 :
return iterable[0]
return lcm(iterable[0],lcm_rec(iterable[1:]))
# iterative version to compute the lcm from the numbers of an iterable
def lcm_it(iterable) :
current = iterable[0]
for i in range(1, len(iterable)) :
current = lcm(current, iterable[i])
return current
#print(mcm_rec(range(1,10000)))
#print(mcm_it(range(1,10000)))
| true
|
73e9bc4b501cf83d2cfee58c7a7a5db6826cbc15
|
Python
|
bipinmsit/mycode
|
/scripts/python/bin/angle_from_coordinates.py
|
UTF-8
| 2,119
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import pandas as pd
import os.path as path
import sys
import raster_processing.raster_translate as rt
import numpy as np
from numpy.linalg import norm
from subprocess import call
def help():
print("Usage:- angle_from_coordinates.py <input-csv> <row-id-1> <row-id-2> \n")
def main(argv=None):
if argv is None:
argv = sys.argv
data = argv[-3:]
if (argv.count('-h') > 0) or (argv.count('--help')) > 0:
help()
return -1
if len(argv) < 4:
#When there are not enough arguments to use the command
print("Use '-h' to view help",file=sys.stdout)
print("ERROR 1. Not enough arguments",file=sys.stderr)
return 1
CSV_FILE = data[0]
site_name_1 = data[1]
site_name_2 = data[2]
site_data = pd.read_csv(CSV_FILE)
try:
site_idx_1 = list(site_data[['site_name']].values).index(site_name_1)
site_idx_2 = list(site_data[['site_name']].values).index(site_name_2)
except Exception:
print("Site not found!")
sys.exit()
p1_local = np.array(site_data[['ref_local_x', 'ref_local_y']].values[site_idx_1],
dtype=np.float)
print("Local Coordinates 1 {}".format(p1_local))
p1_global = np.array(site_data[['ref_global_x', 'ref_global_y']].values[site_idx_1],
dtype=np.float)
print("Global Coordinates 1 {}".format(p1_global))
p2_local = np.array(site_data[['ref_local_x', 'ref_local_y']].values[site_idx_2],
dtype=np.float)
print("Local Coordinates 2 {}".format(p2_local))
p2_global = np.array(site_data[['ref_global_x', 'ref_global_y']].values[site_idx_2],
dtype=np.float)
print("Global Coordinates 2 {}".format(p2_global))
dot_prod = np.dot(p1_local - p2_local, p1_global-p2_global)
angle = np.arccos(dot_prod/(np.linalg.norm(p1_global - p2_global) * np.linalg.norm(p1_local - p2_local)))
print("Angle of Rotation is {}".format(angle))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| true
|
eac335c569b6873c24514fc1451218fb21761eaa
|
Python
|
bslate/tess
|
/tess.py
|
UTF-8
| 11,109
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
"""Converts 2D polygon point geometry into triangles.
Usage:
tess.py [input] [output]
The input file should be formatted like this:
10.34 10.234
10 50
60 50
60 10
20 30
25 40
30 30
40 30
45 20
50 30
Each line represents X and Y data for a point. Empty lines signify a
new path. Therefore this sample file contains three paths.
If the output file name is not given it spills to standard output. The
output will be formatted like this:
# test_data.dat --> Standard Output: 3 paths, 12 triangles
8 3 9 f f t
3 8 6 f f f
7 6 8 f f t
6 7 2 f f f
9 2 7 f f t
2 9 3 f f t
4 3 6 f f t
3 4 0 f f t
4 0 5 f f t
0 5 1 f f t
2 1 5 t f f
6 2 5 f f t
Each line has the format: idx0, idx1, idx2, edge0, edge1, edge2
The indexes refer to points in the order they appear in the input file.
The edges are true/false values. edge0 tells you if the line between
idx0 and idx1 is a shape boundary. edge1 is for idx1 to idx2, edge2 is
for idx2 to idx1.
"""
from OpenGL.GLU import *
from OpenGL.GL import *
import sys
import traceback
class ZoteTess:
def __init__(self):
self.tess_style = 0
self.current_shape = []
def triangulate(self, shape):
"""
Converts a list of paths into a list of triangles.
The input object should be a subclass of Shape. It has a
'paths' member, which is a list of lists. Each sub-list is an
individual path. The tessellation will determine if paths
represent holes or disjoint shapes.
"""
triangles = [] # store result
self.current_shape = []
#
# Define several callback functions.
#
def cb_vert(v):
self.current_shape.append(v)
def cb_begin(style):
self.tess_style = style
def cb_end():
if self.tess_style == GL_TRIANGLE_FAN:
c = self.current_shape.pop(0)
p1 = self.current_shape.pop(0)
while self.current_shape:
p2 = self.current_shape.pop(0)
triangles.append([c, p1, p2])
p1 = p2
elif self.tess_style == GL_TRIANGLE_STRIP:
p1 = self.current_shape.pop(0)
p2 = self.current_shape.pop(0)
while self.current_shape:
p3 = self.current_shape.pop(0)
triangles.append([p1, p2, p3])
p1 = p2
p2 = p3
elif self.tess_style == GL_TRIANGLES:
# each three points constitute a triangle, no sharing
while self.current_shape:
p1 = self.current_shape.pop(0)
p2 = self.current_shape.pop(0)
p3 = self.current_shape.pop(0)
triangles.append([p1, p2, p3])
else:
print "Unknown tessellation style:", self.tess_style
self.tess_style = None
self.current_shape = []
def cb_error(what):
print "error:", what
def cb_combine(c, v, weight):
print "combine:", c, v, weight, "(this will probably cause problems)"
return (c[0], c[1], c[2])
tess = gluNewTess()
gluTessCallback(tess, GLU_TESS_VERTEX, cb_vert)
gluTessCallback(tess, GLU_TESS_BEGIN, cb_begin)
gluTessCallback(tess, GLU_TESS_END, cb_end)
gluTessCallback(tess, GLU_TESS_ERROR, cb_error)
gluTessCallback(tess, GLU_TESS_COMBINE, cb_combine)
count = 0
gluTessBeginPolygon(tess, None)
for path in shape.paths:
gluTessBeginContour(tess)
for pt in path:
gluTessVertex(tess, pt, count)
count = count + 1
gluTessEndContour(tess)
gluTessEndPolygon(tess)
tuples = shape.make_bound_tuples()
flat = shape.flattened_points()
ret = []
for t in triangles:
perhaps = Triangle(t, tuples, flat)
if not perhaps.degenerate:
ret.append(perhaps)
return ret
def is_edge(a, b, bounds):
"""Returns true if a and b are adjacent within a single path."""
span = find_bound_tuple(a, b, bounds)
if span is not None:
return is_adjacent(a, b, span)
else:
return False
def is_adjacent(a, b, span):
ret = False
lower = min(a, b)
upper = max(a, b)
diff = upper - lower
if diff is 1:
ret = True
elif lower is span[0] and upper is span[1]:
ret = True
return ret
def find_bound_tuple(a, b, bounds):
"""If a and b are both included in a bounds tuple, return it.
Otherwise return None.
"""
def inside(num, spanish):
return num >= spanish[0] and num <= spanish[1]
for span in bounds:
if inside(a, span) and inside(b, span):
return span
return None
def cross(a, b):
return (a[0] * b[1]) - (a[1] * b[0])
def vec(start, end):
return (end[0] - start[0], end[1] - start[1])
class Triangle(object):
def __init__(self, tri, bounds, flattened_points):
"""Given three input indexes (e.g. 7, 3, 12) and a list of boundary
tuples, create a triangle whose surface normal points the
correct way and knows which edges are user-defined.
Surface normal: the winding order of the points will on exit
be defined such that it is in a right-handed coordinate
system. Specifically: let vec1 run from p0 to p1, and vec2
from p0 to p2. vec1 x vec2 is positive.
Edges: Each edge of a triangle might be on the boundary of the
shape, or it might be internal to the shape. An edge is True
if it is on the boundary of the shape. We know it is on the
boundary if the vertex indices are adjacent within a single
path.
Say we have an input paths list with indexes like this:
[ [0, 1, 2, 3, 4], [5, 6, 7], [8, 9, 10] ]
Use Shape's make_bound_tuples method to generate the list of
tuples that looks like this: [ (0, 4), (5, 7), (8, 10) ], and
pass that in as the 'bounds' parameter.
Points 1 and 2 are adjacent (as are 2 and 1). Points 4 and 0
are adjacent because lists are considered circular. Points 4
and 5 are NOT adjacent because they come from different lists.
With these path bounds and triangle verts at 6, 7, 9, the
resulting triangle will have edges [True, False, False] since
the first edge from 6 to 7 is a boundary, while the other two
are not.
"""
self.degenerate = False
self.points = [None] * 3 # because that's easy to understand, right?
self.points[0] = tri[0]
self.points[1] = tri[1]
self.points[2] = tri[2]
self.edges = [None] * 3
self.edges[0] = is_edge(tri[0], tri[1], bounds)
self.edges[1] = is_edge(tri[1], tri[2], bounds)
self.edges[2] = is_edge(tri[2], tri[0], bounds)
# Ensure the winding order is correct. Cross product must be positive.
# Swap things around if it is not.
#
# make vectors from 0 to 1 and 0 to 2
v1 = vec(flattened_points[tri[0]], flattened_points[tri[1]])
v2 = vec(flattened_points[tri[0]], flattened_points[tri[2]])
c = cross(v1, v2)
if abs(c) < 0.0001:
self.degenerate = True
elif (c < 0):
# swap points 0 and 2
tmpPt = self.points[0]
self.points[0] = self.points[2]
self.points[2] = tmpPt
# swap edges 0 and 1
tmpEdge = self.edges[0]
self.edges[0] = self.edges[1]
self.edges[1] = tmpEdge
def __str__(self):
def tf(b):
if b:
return "t"
else:
return "f"
return str(self.points[0]) + " " + str(self.points[1]) + " " + str(self.points[2]) + " " + tf(self.edges[0]) + " " + tf(self.edges[1]) + " " + tf(self.edges[2])
class Shape(object):
def __init__(self):
self.paths = []
def print_paths(self):
for path in self.paths:
for pt in path:
print str(pt[0]) + ", " + str(pt[1])
def make_bound_tuples(self):
"""Returns a list of tuples. Each has the lower and upper inclusive
bounds of a path.
Example input: [ [0, 1, 2, 3, 4], [5, 6, 7], [8, 9, 10] ]
Example output: [ (0, 4), (5, 7), (8, 10) ]
"""
ret = []
low = 0
for path in self.paths:
high = low + len(path) - 1
ret.append((low, high))
low = high + 1
return ret
def flattened_points(self):
ret = []
for sublist in self.paths:
for item in sublist:
ret.append(item)
return ret
class DiskFile(Shape):
def __init__(self, file_name):
super(DiskFile, self).__init__()
try:
infile = open(file_name)
except:
print "Could not open file:", file_name
sys.exit()
path = []
for line in infile:
try:
if len(line) < 2:
self.paths.append(path)
path = []
else:
tokens = line.split()
if len(tokens) is 2:
x = float(tokens[0])
y = float(tokens[1])
point = (x, y, 0)
path.append(point)
except:
print "Error reading line from", file_name
print "Perhaps there's a syntax error?"
sys.exit()
self.paths.append(path)
def print_triangles(label, triangles):
print label, "--", len(triangles), "triangles"
print "-----------"
print ""
for t in triangles:
print t
print ""
def send_output(inputFile, outputFile, numPaths, triangles, output):
output.write("# " + inputFile + " --> " + outputFile + ": " +
str(numPaths) + " paths, " + str(len(triangles)) +
" triangles\n")
for t in triangles:
output.write(str(t) + "\n")
def usage():
print __doc__
if __name__ == "__main__":
if len(sys.argv) > 1:
try:
zt = ZoteTess()
df = DiskFile(sys.argv[1])
triangles = zt.triangulate(df) # list of Triangle objects
if len(sys.argv) > 2:
output = open(sys.argv[2], "w")
fn = sys.argv[2]
else:
output = sys.stdout
fn = "Standard Output"
send_output(sys.argv[1], fn, len(df.paths), triangles, output)
except Exception, e:
print(traceback.format_exc())
print "Got exception while trying to read file", sys.argv[1]
# usage()
else:
usage()
| true
|
09d90c03fecad72156716d87765a80ca9b30652b
|
Python
|
perolz/TIF155
|
/Problem set 2/Hopf Bifurcation.py
|
UTF-8
| 1,858
| 2.734375
| 3
|
[] |
no_license
|
import sympy as sy
import scipy
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# def model1(X,t):
# dxdt=-X[0]**3+4*X[0]-3*X[1]
# dydt=3*X[0]+2*X[1]**3+4*X[1]
# return [dxdt,dydt]
#
# ts = np.linspace(0, 12, 100)
# P0 = [1, 1]
# Ps = odeint(model1, P0, ts)
# plt.plot(Ps[:,0],Ps[:,1])
# plt.show()
#Linearisation gives
x,y=sy.symbols('x,y',function=True)
mu,t=sy.symbols('mu,t')
omega=sy.symbols('omega',complex=True)
A=sy.Matrix([[mu,-3],[3,mu]])
yprim,xprim=sy.symbols('yprim,xprim',complex=True)
f,g=sy.symbols('f g',function=True)
f=x**3
g=2*y**3
# f=-x**2
# g=2*x**2
w=3
a=sy.symbols('a')
equation=sy.Eq(16*a,f.diff(x,x,x)+f.diff(x,y,y)+g.diff(x,x,y)+g.diff(y,y,y)+
sy.Rational(1,w)*(f.diff(x,y)*(f.diff(x,x)+f.diff(y,y))-g.diff(x,y)*(g.diff(x,x)+g.diff(y,y))
-f.diff(x,x)*g.diff(x,x)+f.diff(y,y)*g.diff(y,y))
)
#sy.pprint(sy.solve(equation))
eq1=sy.Eq(sy.Derivative(x(t),t),mu*x-3*y-x**3)
eq2=sy.Eq(sy.Derivative(y(t),t),3*x+mu*y+2*y**3)
system=[eq1,eq2]
muValue=4
dxdt=sy.lambdify((mu,x,y),eq1.rhs)
dydt=sy.lambdify((mu,x,y),eq2.rhs)
newSystem=[z.subs(mu,muValue) for z in system]
newSystem=[z.subs('**','^') for z in newSystem]
print(newSystem)
ts = np.linspace(-1, 1, 100)
Y, X = np.mgrid[-3:3:100j, -3:3:100j]
U=muValue*Y-X**2
V=-X+muValue*Y+2*X**2
# U = -X**3 +muValue * X-3 *Y
# V = 3*X + 2*Y**3 + muValue*Y
speed = np.sqrt(U*U + V*V)
fig = plt.figure(figsize=(7, 9))
# Varying density along a streamline
ax0 = fig.add_subplot(1,1,1)
strm = ax0.streamplot(X, Y, U, V, linewidth=2)
ax0.set_title(r'$\dot{x}=4*x+y-x^2 \quad \dot{y}=-x +4*y+2*x^2$')
plt.xlabel('x',fontweight='bold')
plt.ylabel('y',fontweight='bold')
plt.suptitle(r'Hopf bifucation with $\mu$=%d' %muValue)
plt.savefig('Images/Hopfeq2%d.png' %muValue)
plt.show()
| true
|
9c77804fd3508b2e5d4b9eb2c3446a7eb8707671
|
Python
|
littlesearch/python-study
|
/05_functional_programming/05_partial_function.py
|
UTF-8
| 673
| 4.125
| 4
|
[] |
no_license
|
#coding=utf-8
import functools
# 偏函数
# 假设要转换大量的二进制字符串,每次都传入int(x, base=2)非常麻烦,于是,我们想到,可以定义一个int2()的函数,默认把base=2传进去:
def int2(x, base=2):
return int(x, base)
# functools.partial就是帮助我们创建一个偏函数的,不需要我们自己定义int2(),可以直接使用下面的代码创建一个新的函数int2:
int3 = functools.partial(int,base=2)
print int3("1111")
# functools.partial的作用就是,把一个函数的某些参数给固定住(也就是设置默认值),返回一个新的函数,调用这个新函数会更简单。
| true
|
c9a0c57a60b905ce42f4434aa25488193bc0e1e4
|
Python
|
kingno21/auto_compile
|
/compile.py
|
UTF-8
| 1,395
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/python
import os, glob, sys
import subprocess
import re
import read_json as rj
def find_class_name(contents):
pattern = "(?<=class )\w+"
for index, line in enumerate(contents):
if "class" in line:
contents[index] = line.replace('public', '')
return re.search(pattern, line).group(0), contents
def run_cmd(cmd, test=None):
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
try:
outs, errs = proc.communicate(test)
print outs, errs
except OSError:
proc.kill()
def main():
if len(sys.argv) < 1:
print 'Use test case'
return
test_case = rj.get_case(sys.argv[-1])
for index, file in enumerate(glob.glob("*.java")):
if os.stat(file).st_size == 0:
continue
class_name = ""
tmp_name = "tmp.java"
test_tmp = "test{}".format(index + 1)
with open(file, 'r') as f:
class_name, contents = find_class_name(f.readlines())
with open(tmp_name, 'w') as f1:
f1.writelines(contents)
run_cmd(["javac", tmp_name])
print 'run: {}'.format(test_tmp)
for test in test_case[test_tmp]:
run_cmd(["java", class_name], test)
os.remove(tmp_name)
os.remove(class_name + '.class')
if __name__ == '__main__':
main()
| true
|
c59704fd17a63135d3e1d34f60c69cbf9a8b70ff
|
Python
|
pkhadka56/ip2host
|
/domain2servername.py
|
UTF-8
| 320
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
import socket
host = {
'servername1': ('127.0.0.1','127.0.0.2'),
'servername2': ('127.0.0.3')
}
ipaddr = socket.gethostbyname(sys.argv[1])
for server,ip in host.iteritems():
if ipaddr in ip:
print sys.argv[1],"got ip address",ipaddr,".It is in",server,"server."
| true
|
66ed173072c0dfe4a693a9b4aa9d0ec0b28e0176
|
Python
|
Hank-Liao-Yu-Chih/document
|
/OpenCV讀者資源/讀者資源/程式實例/ch4/ch4_10.py
|
UTF-8
| 368
| 3.1875
| 3
|
[] |
no_license
|
# ch4_10.py
import cv2
image = cv2.imread('street.jpg')
blue, green, red = cv2.split(image)
bgr_image = cv2.merge([blue, green, red]) # 依據 B G R 順序合併
cv2.imshow("B -> G -> R ", bgr_image)
rgb_image = cv2.merge([red, green, blue]) # 依據 R G B 順序合併
cv2.imshow("R -> G -> B ", rgb_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true
|
141b5ce383d1c313bf1068a32cf822850ef97f24
|
Python
|
LeonLH/Study-note
|
/learn_python_the_hard_way/mystuff/test.py
|
UTF-8
| 1,614
| 3.5625
| 4
|
[] |
no_license
|
# from math import pi
# Flo = pi
# print "%03g %.9f %G %E" % (Flo, Flo, Flo, Flo)
# R = round(4.33333)
# print "R = %r" % R
# print "%r" % pi
##################################################
# #!/usr/bin/python
#
# # Open a file
# fo = open("foo.txt", "ra+")
# print "Name of the file: ", fo.name
#
# # Assuming file has following 5 lines
# # This is 1st line
# # This is 2nd line
# # This is 3rd line
# # This is 4th line
# # This is 5th line
#
# # line = fo.readline()
# # print "Read Line: %s" % (line)
#
# # Now truncate remaining file.
# fo.truncate(28)
#
# # Try to read file now
# line = fo.readline()
# print "Read Line: %s" % (line)
#
# # Close opend file
# fo.close()
#################################################################
# Test for `Will print print a \n? `
# Yes, It will, and If you don't want it act like this, you can put a
# `,` after print sentence.
# print "Hello world"
# print "I'm Leon. "
#################################################################
# Test for coercive transformation
# def add(a, b):
# return a + b
#
# a =
# print "%d + %d = %d" % (a, b, add(a, b))
#################################################################
# Test for formatted string
# print "The number is \x42"
# print "The number is \103"
#################################################################
# This is test for file.truncate()
txt = open("test.txt", 'r+w')
# txt.truncate(20);
print txt.readline()
line = txt.readline()
print line
txt.seek(1)
line = txt.readline()
print line
#################################################################
| true
|
f555a04111b913245a610fa3731b6297cbc7b97c
|
Python
|
pm0n3s/Python
|
/python1/python/dictionaries.py
|
UTF-8
| 1,000
| 4.59375
| 5
|
[] |
no_license
|
'''Create a dictionary containing some information about yourself.
The keys should include name, age, country of birth, favorite language.'''
me = {
"name": "Patrick",
"age": 26,
"country": "USA",
"favorite language": "Python"
}
'''Write a function that will print something like the following as it executes:
My name is Anna
My age is 101
My country of birth is The United States
My favorite language is Python
There are two steps to this process, building a dictionary and then gathering all
the data from it. Write a function that can take in and print out any dictionary
keys and values.
Note: The majority of data we will manipulate as web developers will be hashed in
a dictionary using key-value pairs. Repeat this assignment a few times to really
get the hang of unpacking dictionaries, as it's a very common requirement of any
web application.'''
def read_dict(d):
for key, value in d.iteritems():
print "My {} is {}".format(key, value)
read_dict(me)
| true
|
72c0dd4e317826af46a3ae2199198709695e9f81
|
Python
|
yuweiDu/divide_NinaPro_database_5
|
/ninaweb_sEMG_envelop_divide_by_subject.py
|
UTF-8
| 3,712
| 2.515625
| 3
|
[] |
no_license
|
# coding: utf-8
from __future__ import division, print_function
import numpy as np
import os
import pdb
import matplotlib.pyplot as plt
import get_max_min
import utilities
import get_envelop
PLOT_ENVELOP = False
def str_in_str(list_of_str, str):
results = []
for s in list_of_str:
results.append(s in str)
return np.any(results)
if __name__=='__main__':
nb_channels = 16
cut_len = 64
fs = 200 # 采样频率
ma_len = 10 # 求envelop里面mean average的长度,越长数据越平滑
data_dir = './raw_data'
save_dir = './processed_data/envelop/divide_by_subject'
mat_path = utilities.walk_through_dir(data_dir)
# 选取subject的id
nb_subjects = 10
nb_subjects_for_train = 7
subject_id = np.arange(nb_subjects)
np.random.shuffle(subject_id)
subject_id_for_train = []
for i in range(nb_subjects_for_train):
subject_id_for_train.append('S' + str(subject_id[i] + 1))
EMG_train = []
LABEL_train = []
EMG_test = []
LABEL_test = []
for path in mat_path:
emg, label = utilities.read_mat(path) # emg, label均是二维矩阵
for i in range(nb_channels):
emg[:, i] = get_envelop.envelop(emg[:, i], fs, ma_len)
if PLOT_ENVELOP:
plt.figure()
plt.plot(emg[:, 0])
plt.show()
emg_cut, label_cut = utilities.process_emg_according_to_label(emg, label, cut_len)
if 'E1' in path:
pass
elif 'E2' in path:
for i, _label in enumerate(label_cut):
if _label != 0:
label_cut[i] = label_cut[i] + 12
elif 'E3' in path:
for i, _label in enumerate(label_cut):
if _label != 0:
label_cut[i] = label_cut[i] + 12 + 17
if str_in_str(subject_id_for_train, path):
EMG_train.append(emg_cut)
LABEL_train.append(label_cut)
else:
EMG_test.append(emg_cut)
LABEL_test.append(label_cut)
EMG_train = np.concatenate(EMG_train)
LABEL_train = np.concatenate(LABEL_train)
EMG_test = np.concatenate(EMG_test)
LABEL_test = np.concatenate(LABEL_test)
# 减少rest即label=0的动作
def reduce_rest_movement(emg, label):
emg_rest = emg[label == 0]
label_rest = label[label == 0]
pick_random_amount = int(np.sum(label == 1))
pick_random_index = np.random.choice(len(label_rest), pick_random_amount)
emg_rest = emg_rest[pick_random_index]
label_rest = label_rest[pick_random_index]
emg = emg[label > 0]
label = label[label > 0]
emg = np.concatenate((emg, emg_rest))
label = np.concatenate((label, label_rest))
return emg, label
EMG_train, LABEL_train = reduce_rest_movement(EMG_train, LABEL_train)
EMG_test, LABEL_test = reduce_rest_movement(EMG_test, LABEL_test)
# 将EMG归一化到[0, 1]之间
max_value = np.max(EMG_train)
EMG_train = EMG_train / max_value
EMG_test = EMG_test / max_value
EMG_train[EMG_train > 1] = 1
EMG_train[EMG_train < 0] = 0
EMG_test[EMG_test > 1] = 1
EMG_test[EMG_test < 0] = 0
# 保存
np.save(os.path.join(save_dir, 'EMG_train.npy'), EMG_train)
np.save(os.path.join(save_dir, 'label_train.npy'), LABEL_train)
np.save(os.path.join(save_dir, 'EMG_test.npy'), EMG_test)
np.save(os.path.join(save_dir, 'label_test.npy'), LABEL_test)
print('EMG train shape: ', EMG_train.shape)
print('LABEL train shape: ', LABEL_train.shape)
print('EMG test shape: ', EMG_test.shape)
print('LABEL test shape: ', LABEL_test.shape)
| true
|
fdf29076fe9195b3b0c5614ed6fcd9b5b22bd0e8
|
Python
|
SachinPitale/Python
|
/ex5.py
|
UTF-8
| 417
| 3.5625
| 4
|
[] |
no_license
|
my_name = "Sachin Pitale"
my_age = "27" #Not a lie
my_hight = 165
my_weight = 75
my_eys = "blue"
my_teeth = "white"
my_hair = "brwon"
print "Let's talk about %s." %my_name
print "He's %d inches tall." %my_weight
print "He's %d punds heavy." %my_hight
print "That is not a too actully heavy"
print "He 's got %s eyes and %s hair" %(my_eys, my_hair)
print "He's teeth are usually %s depending on the coffee. " %my_teeth
| true
|
02df0626d5f794cb48f0332e790acf32db795f5f
|
Python
|
jkeung/Hackerrank_Problems
|
/algorithms/warmup/simple_array_sum.py
|
UTF-8
| 158
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/python
import sys
n = int(raw_input())
arr = map(int,raw_input().strip().split(' '))
summation = reduce(lambda x,y: x + y, arr, 0)
print summation
| true
|
4072292eaf248a68e578223878bf976481f49f6c
|
Python
|
jonathanmann/leetcode
|
/python2/plusOne.py
|
UTF-8
| 544
| 3.203125
| 3
|
[] |
no_license
|
class Solution:
def plusOne(self,digits):
if digits == [9]: return [1,0]
if digits[-1] != 9:
digits[-1] += 1
return digits
digits.reverse()
digits[0] = 0
for i,digit in enumerate(digits[1:]):
print i, digit
if digit + 1 < 10:
digits[i+1] = digit + 1
digits.reverse()
return digits
else:
if (i + 2) == len(digits):
digits[i + 1] = 0
digits.append(1)
digits.reverse()
return digits
else:
digits[i+1] = 0
s = Solution()
digits = [8,9,9]
print s.plusOne(digits)
| true
|
6cf833d3444256c994707e3a72cffcb46dd9ddd5
|
Python
|
ilkerc/ObjDetector
|
/helpers/DiscOP.py
|
UTF-8
| 3,458
| 2.8125
| 3
|
[] |
no_license
|
import theano
import theano.tensor as T
import numpy as np
class DiscOP(theano.Op):
"""
This creates an Op that takes x to a*x+b.
"""
__props__ = ("mins", "maxs", "ranges")
itypes = [theano.tensor.fmatrix]
otypes = [theano.tensor.fmatrix]
def __init__(self, mins, maxs, ranges):
self.mins = mins
self.maxs = maxs
self.ranges = ranges
super(DiscOP, self).__init__()
def perform(self, node, inputs, output_storage):
# Input & output storage settings
x = inputs[0]
out = output_storage[0]
# Calculation
new_theta = self.discrete_theta(x)
# Output Setting
out[0] = new_theta
# TODO: Investigate Output Gradients,
# TODO: If we decide to include ranges as learning parameters, hereby we need to define their gradients
def grad(self, inputs, output_grads):
return [output_grads[0]]
def discrete_theta(self, theta):
theta = theta.reshape((-1, 6))
batch_size = theta.shape[0]
t_1 = np.tile(np.linspace(self.mins[0], self.maxs[0], self.ranges[0]), batch_size).reshape((batch_size, -1))
t_2 = np.tile(np.linspace(self.mins[1], self.maxs[1], self.ranges[1]), batch_size).reshape((batch_size, -1))
t_3 = np.tile(np.linspace(self.mins[2], self.maxs[2], self.ranges[2]), batch_size).reshape((batch_size, -1))
t_4 = np.tile(np.linspace(self.mins[3], self.maxs[3], self.ranges[3]), batch_size).reshape((batch_size, -1))
t_5 = np.tile(np.linspace(self.mins[4], self.maxs[4], self.ranges[4]), batch_size).reshape((batch_size, -1))
t_6 = np.tile(np.linspace(self.mins[5], self.maxs[5], self.ranges[5]), batch_size).reshape((batch_size, -1))
t_1_o = np.expand_dims(theta[:, 0], axis=0)
t_2_o = np.expand_dims(theta[:, 1], axis=0)
t_3_o = np.expand_dims(theta[:, 2], axis=0)
t_4_o = np.expand_dims(theta[:, 3], axis=0)
t_5_o = np.expand_dims(theta[:, 4], axis=0)
t_6_o = np.expand_dims(theta[:, 5], axis=0)
dist_t1 = abs(t_1_o.T - t_1)
dist_t2 = abs(t_2_o.T - t_2)
dist_t3 = abs(t_3_o.T - t_3)
dist_t4 = abs(t_4_o.T - t_4)
dist_t5 = abs(t_5_o.T - t_5)
dist_t6 = abs(t_6_o.T - t_6)
arg_min_t1 = np.argmin(abs(dist_t1), axis=1)
arg_min_t2 = np.argmin(abs(dist_t2), axis=1)
arg_min_t3 = np.argmin(abs(dist_t3), axis=1)
arg_min_t4 = np.argmin(abs(dist_t4), axis=1)
arg_min_t5 = np.argmin(abs(dist_t5), axis=1)
arg_min_t6 = np.argmin(abs(dist_t6), axis=1)
new_t1 = t_1[0, arg_min_t1]
new_t2 = t_2[0, arg_min_t2]
new_t3 = t_3[0, arg_min_t3]
new_t4 = t_4[0, arg_min_t4]
new_t5 = t_5[0, arg_min_t5]
new_t6 = t_6[0, arg_min_t6]
new_theta = np.squeeze(np.dstack([new_t1, new_t2, new_t3, new_t4, new_t5, new_t6]))
return new_theta.astype('float32')
if __name__ == "__main__":
theano.config.exception_verbosity = 'high'
b_size = 2
bins_choose = np.linspace(-1, 1, 5)
t_size = b_size*6
bins = np.tile(bins_choose, t_size).reshape((t_size, -1))
disc_operator = DiscOP()
x = theano.tensor.fmatrix()
y = theano.tensor.fmatrix()
f = theano.function([x, y], disc_operator(x, y), mode='DebugMode', allow_input_downcast=True)
x1 = bins
y1 = np.random.rand(b_size, 6)
print y1
print "\n\n\n"
print f(x1, y1)
| true
|
e29ac7c302cef8aa8ffb8f8b72f57425fdc36629
|
Python
|
Satwik95/Coding-101
|
/Competative Concepts/DP/ladder.py
|
UTF-8
| 231
| 2.921875
| 3
|
[] |
no_license
|
def ladder(n):
if n==0:
return 1
elif dp[n]!=0:
return dp[n]
else:
for i in [1,2,3]:
if n-i>=0:
dp[n] += ladder(n-i)
return dp[n]
n=4
dp = [0]*(n+1)
ladder(n)
| true
|
1247ca91aafe749dc4e3db6372f8329c77f99f4c
|
Python
|
Gry1005/PytorchLearning1
|
/src/CnnCuda.py
|
UTF-8
| 3,535
| 2.96875
| 3
|
[] |
no_license
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
EPOCH = 1 #数据集训练几遍
BATCH_SIZE = 50 #一批数据的个数
LR = 0.001
DOWNLOAD_MNIST = False
#加载数据集
train_data = torchvision.datasets.MNIST(
root='./mnist',
train=True,
transform=torchvision.transforms.ToTensor(), #(0,1)
download=DOWNLOAD_MNIST
)
#生成训练器
train_loader = Data.DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True,num_workers=2)
#加载测试集
test_data = torchvision.datasets.MNIST(root='./mnist/',train=False)
#!!!cuda change here!!!
test_x=Variable(torch.unsqueeze(test_data.test_data,dim=1),volatile=True).type(torch.FloatTensor)[:2000].cuda()/255 #每个像素点的原值是0-255之间
test_y=test_data.test_labels[:2000].cuda()
class CNN(nn.Module):
def __init__(self):
super(CNN,self).__init__()
#一般来说一个大卷积层包括卷积层,激活函数和池化层
self.conv1=nn.Sequential(
nn.Conv2d(
in_channels=1, #表示原始图片有多少层,也就是有多少不同种类的特征值,如RGB图片,有红,绿,蓝三个值
out_channels=16,#表示输出多少个不同种类的特征值;也就是对同一个图片块,有16个过滤器同时工作
kernel_size=5, #一个过滤器的长和宽都是五个像素点
stride=1, #相邻两次扫描的图片块之间相隔几个像素点
padding=2, #在图片周围多出2圈0值,防止过滤器的某一边超过图片边界,如何计算:if stride=1,padding=(kernel_size-1)/2,保证提取出的新图片长宽和原图一样
),
nn.ReLU(),
#池化层向下筛选需要的部分
nn.MaxPool2d(
kernel_size=2, #使用一个长宽为2的池化过滤器
),
)
self.conv2=nn.Sequential(
nn.Conv2d(16,32,5,1,2), #输入的图片有16层,输出图片有32层
nn.ReLU(),
nn.MaxPool2d(2),
)
self.out=nn.Linear(32*7*7,10) #输入的高度是32,长宽为7,因为经过两次池化;输出为10个不同的值,即0-9
def forward(self, x):
x=self.conv1(x)
x=self.conv2(x) #x中的数据有四个维度:(batch,32,7,7)
x=x.view(x.size(0),-1) #保留batch,数据变为二维:(batch,32*7*7);因为输出层只接受一维数据作为输入
output=self.out(x)
return output
cnn = CNN()
#!!!cuda change here!!!
cnn.cuda()
#训练过程
optimizer=torch.optim.Adam(cnn.parameters(),lr=LR)
loss_func=nn.CrossEntropyLoss() #选择误差函数
if __name__ == '__main__':
for epoch in range(EPOCH):
for step,(x,y) in enumerate(train_loader):
#!!!cuda在此做了改变!!!
b_x=Variable(x).cuda()
b_y=Variable(y).cuda()
output=cnn(b_x)
loss=loss_func(output,b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step%50 == 0:
test_output=cnn(test_x)
#!!! cuda change here!!!
pred_y=torch.max(test_output,1)[1].cuda().data
accuracy = torch.sum(pred_y == test_y).type(torch.FloatTensor) / test_y.size(0)
print('Epoch: ', epoch, '| train loss: %.4f' % loss.item(), '| test accuracy: %.2f' % accuracy)
| true
|
5195b2f0239997a23bfb3c414c94de424e153723
|
Python
|
atlasmao/Python-book-code
|
/book_14_Python_cook_book/chapter_05_文件与IO/code_08_创建临时文件和文件夹.py
|
UTF-8
| 559
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tempfile import TemporaryFile
with TemporaryFile('w+') as f:
# Read/write to the file
f.write('Hello World\n')
f.write('Testing\n')
# Seek back to beginning and read the data
f.seek(0)
data = f.read()
print(data)
from tempfile import TemporaryDirectory
with TemporaryDirectory() as dirname:
print(dirname)
from tempfile import NamedTemporaryFile
with NamedTemporaryFile('w') as f:
print(f.name)
import tempfile
print(tempfile.mkstemp())
print(tempfile.mkdtemp())
| true
|
de7cb19955e00895cf2551a3f85abca7550aa202
|
Python
|
CaptainJackZhang/LeetCode-easy
|
/LeetCode-easy-Python/fizzBuzz.py
|
UTF-8
| 656
| 3.4375
| 3
|
[] |
no_license
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: CaptainJack
@datetime: 2018/9/27 13:05
@E-mail: zhangxianlei117@gmail.com
"""
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
lis = []
i=1
while(i<=n):
if i % 15 == 0:
lis.append('FizzBuzz')
elif i % 5 == 0:
lis.append('Buzz')
elif i % 3 == 0:
lis.append('Fizz')
else:
lis.append(str(i))
i += 1
return lis
if __name__ == '__main__':
s = Solution()
print s.fizzBuzz(1)
| true
|
10478fb0034a6e8e1d1c5b0b591f5be4d76fd022
|
Python
|
damax1995/Python101
|
/conditionals.py
|
UTF-8
| 750
| 4.5
| 4
|
[] |
no_license
|
if 2 > 1:
print("This is a True statement\n")
var1 = 1
var2 = 3
if var1 > var2:
print("That is also True\n");
else:
print("That was False!\n")
value = input("How old are you?: ")
value = int(value)
if value < 10:
print("You are under 10\n")
elif 10 <= value <= 30:
print("You are between 10 and 30\n")
else:
print("Damn, over 30\n")
x = 10
y = 20
if x < 10 or y > 15:
print("This statement was True!\n")
x = 10
y = 10
if x == 10 and y==15:
print("The statement was True!\n")
else:
print("The statement was False!\n")
x = 10
if x != 11:
print("'x' is not equals to 11!\n")
myList = [1, 2, 3, 4]
x = 10
if x not in myList:
print("'x' is not in myList\n")
| true
|
e8d25daf28c5daf0cf056f0d174d4ef5a8570ef2
|
Python
|
PerceptumNL/KhanLatest
|
/gae_mini_profiler/unformatter/__init__.py
|
UTF-8
| 5,018
| 3.453125
| 3
|
[] |
no_license
|
import sys
class UnformatStream(object):
"Outputs tokens to a text stream"
def __init__(self, out=sys.stdout, indent=u' '):
self.out = out
self.indent = indent
def emit_token(self, token, level=0):
try:
t = unicode(token)
except UnicodeDecodeError:
t = repr(token)
self.out.write((self.indent * level) + t)
self.out.write(u'\n')
class UnformatObject(object):
"Saves the tokens as a json-like object"
def __init__(self):
self.value = None
self.stack = []
self.lastlevel = 0
def get_last_parent(self):
current = self.value
for k in self.stack[:-1]:
current = current[k]
if type(current) == list:
current = current[-1]
return current
def emit_token(self, token, level=0):
if level == 0 and self.value is None:
self.value = {token: None}
self.stack.append(token)
return
if token != ']': # lists don't have keys on the stack
for i in xrange(level, self.lastlevel):
self.stack.pop()
self.lastlevel = level
if token in ['>', ']']:
return
key = self.stack[-1]
parent = self.get_last_parent()
val = parent[key]
if token == '<':
if val == None: # special case at beginning where {'Class':None}
parent[key] = {}
elif type(val) == list:
actualval = val[-1] # use the last list element as the class
val[-1] = {actualval: {}}
self.stack.append(actualval)
else: # it's a class name
parent[key] = {val: {}}
self.stack.append(val)
else:
if token == '[':
token = []
if val == None:
val = token
elif type(val) == list:
val.append(token)
elif type(val) == dict:
val[token] = None
self.stack.append(token)
else:
raise Exception('invalid token %s', token)
parent[key] = val
def unformat(text):
result = UnformatObject()
unformat_value(text, out=result)
return result.value
def unformat_value(text, i=0, level=0, delim=None, out=UnformatStream()):
start = i
if text == '':
return i
if text[i].isdigit():
# number
while text[i] not in [',', delim]:
i += 1
number = eval(text[start:i])
out.emit_token(repr(number), level)
elif text[i] in ["'", '"']:
i = unformat_quoted(text, i, level, out=out)
elif text[i] == '[':
i = unformat_list(text, i, level, '[]', unformat_value, out=out)
elif text[i] == '(':
i = unformat_list(text, i, level, '()', unformat_value, out=out)
else:
i = unformat_class(text, i, level, delim, out=out)
return i
def unformat_quoted(text, i, level, out=UnformatStream()):
start = i
delim = text[start]
i += 1
while text[i] != delim:
if text[i] == '\\': # escaped
i += 1
i += 1
i += 1 # go past end of quoted section
try:
quoted = eval(text[start:i])
except ValueError:
# this occurs when \x00lotsmorechars -> \x0...
quoted = text[start:i]
out.emit_token(quoted, level)
return i
def unformat_class(text, i=0, level=0, delim=None, out=UnformatStream()):
# name
start = i
while text[i] not in ['<', ',', delim]:
i += 1
class_name = text[start:i]
out.emit_token(class_name, level)
if text[i] == '<':
i = unformat_list(text, i, level, '<>', unformat_attrval, out=out)
return i
def unformat_attrval(text, i, level, delim, out=UnformatStream()):
if text[i] == '.':
out.emit_token('...', level)
return i + 3
# attr
start = i
while text[i] != '=':
i += 1
attr = text[start:i]
out.emit_token(attr, level)
i += 1 # unformat =
# val
i = unformat_value(text, i, level + 1, delim, out=out)
return i
def unformat_list(text, i, level, delim, elfn, out=UnformatStream()):
if len(delim) != 2:
raise Exception
out.emit_token(delim[0], level)
i += 1 # unformat open bracket
while text[i] != delim[1]:
i = elfn(text, i, level + 1, delim[1], out=out)
if text[i] == ',':
i += 2
i += 1 # unformat close bracket
out.emit_token(delim[1], level)
return i
def main():
from io import StringIO
from pprint import pprint
f = open('examples.txt', 'r')
for line in f:
s = StringIO()
unformat_value(line.strip(), out=UnformatStream(s))
print(s.getvalue())
s.close()
result = UnformatObject()
unformat_value(line.strip(), out=result)
pprint(result.value)
raw_input('cont?')
f.close()
if __name__ == '__main__':
main()
| true
|
89ae37fc0c6b7856d81709a413da2f9186c780f8
|
Python
|
CreativePenguin/stuy-cs
|
/intro-comp-sci2/python/Classwork(5-15-2018).py
|
UTF-8
| 465
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
def makeDictFromCSV(s):
li = s.split('\n')
fin = []
val = {}
for i in li:
fin += [i.split(',')]
for a in fin:
val[a[0]] = a[1]
return val
def tally(l):
val = {}
for i in l:
if i in val:
val[i] += 1
else:
val[i] = 1
return val
'''
print makeDictFromCSV("""a,3
b,4
c,6
d,10
f,9
a,99
b,0""")
'''
print tally( [ 'a', 'b', 'c', 'a', 'fish', 'b', 'a'])
print tally( ['one','1','ONE','two','two','2',
'2','oNe','3','3','ONe','uno','3'])
| true
|
b59c7cf6c09761c1fd673056ad18bd28fa76577b
|
Python
|
Caiel3/PIM
|
/pim/material/helpers/TxtControlador.py
|
UTF-8
| 1,020
| 2.734375
| 3
|
[] |
no_license
|
from django.conf import settings
import os.path as path
class Txt():
def __str__(self):
return
def __unicode__(self):
return
def __init__(self,hash,texto,fecha_ini,fecha_fin):
self.hash=hash
self.fecha_ini=fecha_ini.strftime('%d/%m/%Y %H:%M.%S')
self.fecha_fin=fecha_fin.strftime('%d/%m/%Y %H:%M.%S')
self.Escribir_log(texto)
def Escribir_log(self,log_texto):
try:
text= 'inicio {}: {} Fin: {}'.format(self.fecha_ini,log_texto,self.fecha_fin)
dire=settings.MEDIA_ROOT+"/log/"+self.hash+'.txt'
if path.exists(dire):
with open(dire,'a', encoding='UTF-8') as log:
log.write(text+' \n ')
log.close()
else:
log=open(dire,'w',encoding='UTF-8')
log.write(log_texto)
log.close()
except Exception as ex:
print('Something went wrong'+ex)
| true
|
c89b6fd8f32392b9bc9be87b16bd3c5ac741577b
|
Python
|
ynigoreyes/SudokuSolver
|
/src/io.py
|
UTF-8
| 738
| 3.421875
| 3
|
[] |
no_license
|
from os import path, listdir, mkdir
"""
Writes and Reads the Sudoku Board
"""
# Does this make a working directory no matter where it is?
DATA_DIR = "../data/"
def ensure_data_dir():
# Checks to see if there is a directory made
data_dir = path.normpath(DATA_DIR)
if not path.isdir(data_dir):
mkdir(data_dir)
def write(board):
ensure_data_dir()
f_path = ""
try:
name = DATA_DIR + 'saved-board'
number = len(listdir(DATA_DIR))
name += str(number) + ".txt"
file = open(name, 'w')
out = ""
for r in range(9):
for c in range(9):
cell = board.board[r][c]
number = cell.number
out += str(number)
file.write(out)
f_path = name
file.close()
except FileNotFoundError as e:
print(e)
return f_path
| true
|
3f4b3a37c6240e05fa8249ec716035c11e3170c1
|
Python
|
zzygyx9119/Whole_pipelines
|
/aft_pipelines_analysis/visualize_mut_heatmap.py
|
UTF-8
| 3,293
| 2.640625
| 3
|
[] |
no_license
|
import plotly
import plotly.graph_objs as go
import plotly.figure_factory as ff
import pandas as pd
import numpy as np
from scipy.spatial.distance import pdist, squareform
# get data
proteins_df = pd.read_csv('/home/liaoth/project/brca_171208/output/mut_counts.csv',index_col=0)
# data_array = data.values
# labels = proteins_df.index.values
figure = ff.create_dendrogram(proteins_df.T.values, orientation='bottom')
for i in range(len(figure['data'])):
figure['data'][i]['yaxis'] = 'y2'
# Create Side Dendrogram
dendro_side = ff.create_dendrogram(proteins_df.values, orientation='right')
for i in range(len(dendro_side['data'])):
dendro_side['data'][i]['xaxis'] = 'x2'
# Add Side Dendrogram Data to Figure
figure['data'].extend(dendro_side['data'])
dendro_leaves = dendro_side['layout']['yaxis']['ticktext']
x_order = list(map(int, dendro_leaves))
y_order = list(map(int, figure['layout']['xaxis']['ticktext']))
figure['layout']['xaxis']['ticktext'] = proteins_df.columns.values[y_order]
figure['layout']['yaxis']['ticktext'] = proteins_df.index.values[x_order]
figure['layout']['yaxis']['tickvals'] = dendro_side['layout']['yaxis']['tickvals']
heatmap = go.Heatmap(y=proteins_df.iloc[x_order, y_order].index.values, z=proteins_df.iloc[x_order, y_order].values,
colorscale=[[0, '#FFFFFF'], [0.5, '#FFFFFF'],
[0.5, '#bb2a34'], [1, '#bb2a34']],
)
heatmap['x'] = figure['layout']['xaxis']['tickvals']
heatmap['y'] = dendro_side['layout']['yaxis']['tickvals']
# Add Heatmap Data to Figure
figure['data'].append(heatmap)
figure['layout'].update({'width':2500, 'height':1500,
'showlegend':False, 'hovermode': 'closest',
'margin':{'b':120}
})
# Edit xaxis
figure['layout']['xaxis'].update({'domain': [.15, 1],
'mirror': False,
'showgrid': False,
'showline': False,
'zeroline': False,
'ticks':""})
# Edit xaxis2
figure['layout'].update({'xaxis2': {'domain': [0, .10],
'mirror': False,
'showgrid': False,
'showline': False,
'zeroline': False,
'showticklabels': False,
'ticks':""}})
# Edit yaxis
figure['layout']['yaxis'].update({'domain': [0, .85],
'mirror': False,
'showgrid': False,
'showline': False,
'zeroline': False,
'showticklabels': False,
'ticks': ""})
# Edit yaxis2
figure['layout'].update({'yaxis2':{'domain':[.825, .975],
'mirror': False,
'showgrid': False,
'showline': False,
'zeroline': False,
'showticklabels': False,
'ticks':""}})
plotly.offline.plot(figure)
| true
|
d3506c5edaa24430233bfc16021b2dcb300e44ea
|
Python
|
UtopiaBeam/2110101-ComProg
|
/06/06_V.py
|
UTF-8
| 1,060
| 3.265625
| 3
|
[] |
no_license
|
# 06_V1
print(['x', 'k', 'tu', 'c', 'h'][int(input())-1])
# 06_V2 (Adv: zip + next())
ans = []
for ls in [l.strip().split(';') for l in open(input().strip())] :
grd = next(gr for gr, sc in zip('ABCDF', [80, 70, 60, 50, 0]) if sum(map(float, ls[3:])) >= sc)
ans.append([ls[0], '{} {}'.format(*ls[1:3]), grd])
print(ans)
# 06_V3 (Adv: zip + next())
ans = []
for ls in [l.strip().split(';') for l in open(input().strip()) if l.strip()] :
grd = next(gr for gr, sc in zip('ABCDF', [80, 70, 60, 50, 0]) if sum(map(float, ls[3:])) >= sc)
ans.append([ls[0], '{} {}'.format(*ls[1:3]), grd])
while True :
n = input().strip()
if n == '-1' : break
print(next((x for x in ans if n in x), 'Not Found'))
# 06_V4 (Adv: dict + lambda)
dc, od = {}, {}
for k, v in [l.strip().split() for l in open(input().strip())] :
dc.setdefault(k, []).append(v)
if k not in od : od[k] = len(od)
print(sorted([[k, dc[k]] for k in dc], key = lambda x : od[x[0]]))
print('The most favorite fruit is', max(dc.items(), key = lambda x : len(x[1]))[0])
| true
|
89afe282152d9e778cf2f9ec8edd4a5e195ad16c
|
Python
|
lllyee/AirlineCompany
|
/data_explore.py
|
UTF-8
| 398
| 2.75
| 3
|
[] |
no_license
|
import pandas as pd
datafile='/Users/yiliu/lllyeeData/air_data.csv'
resultfile='/Users/yiliu/lllyeeData/explore.xls'
data=pd.read_csv(datafile,encoding='utf-8')
explore = data.describe(percentiles = [], include = 'all').T
explore['null'] = len(data)-explore['count']
explore = explore[['null', 'max', 'min']]
explore.columns = [u'空值数', u'最大值', u'最小值']
explore.to_excel(resultfile)
| true
|
74097dfedacc303d9f7924fa7ce2b1a409fdadab
|
Python
|
Stanford-PERTS/triton
|
/app/model/cycle.py
|
UTF-8
| 9,198
| 2.828125
| 3
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
"""
Cycle
===========
Cycles of users running a single survey together with their students.
# Note on dates
Cycles have user-set start and end dates to help them schedule their
activities. These are always the dates use for display.
The extended end date is derived from the whole set of a team's cycles in order
to calculate the most intuitive window to use when associating responses to
cycles. Before introducing this date, responses that fell between or after
the last cycle weren't captured in any Copilot data. This unnecessarily
penalizes users. With the extended end date used for participation queries,
all late participation is attributed to the previous cycle.
"""
import datetime
import logging
import util
from model import SqlModel, SqlField as Field
from .program import Program
import config
import mysql_connection
class Cycle(SqlModel):
table = 'cycle'
py_table_definition = {
'table_name': table,
'fields': [
# name, type, length, unsigned, null, default, on_update
Field('uid', 'varchar', 50, None, False, None, None),
Field('short_uid', 'varchar', 50, None, False, None, None),
Field('created', 'datetime', None, None, False, SqlModel.sql_current_timestamp, None),
Field('modified', 'datetime', None, None, False, SqlModel.sql_current_timestamp, SqlModel.sql_current_timestamp),
Field('team_id', 'varchar', 50, None, False, None, None),
Field('ordinal', 'tinyint', 4, True, False, 1, None),
Field('start_date', 'date', None, None, True, SqlModel.sql_null, None),
Field('end_date', 'date', None, None, True, SqlModel.sql_null, None),
Field('extended_end_date','date', None, None, True, SqlModel.sql_null, None),
Field('meeting_datetime', 'datetime', None, None, True, SqlModel.sql_null,None),
Field('meeting_location', 'varchar', 200, None, True, None, None),
Field('resolution_date', 'date', None, None, True, SqlModel.sql_null, None),
# Represents _current_ participation, based on the active cycle.
Field('students_completed','smallint',5, True, True, SqlModel.sql_null, None),
],
'primary_key': ['uid'],
'indices': [
{
'name': 'team',
'fields': ['team_id'],
},
],
'engine': 'InnoDB',
'charset': 'utf8',
}
@classmethod
def cycleless_start_date(klass, current_date=None):
# Returns the previous July 1
if current_date is None:
current_date = datetime.date.today()
current_year = current_date.year
current_month = current_date.month
july_month = 7
start_date_month = july_month
start_date_day = 1
if current_month < july_month:
start_date_year = current_year - 1
else:
start_date_year = current_year
return datetime.date(
start_date_year,
start_date_month,
start_date_day
)
@classmethod
def cycleless_end_date(klass, current_date=None):
# Returns the next June 30
if current_date is None:
current_date = datetime.date.today()
current_year = current_date.year
current_month = current_date.month
june_month = 6
end_date_month = june_month
end_date_day = 30
if current_month > june_month:
end_date_year = current_year + 1
else:
end_date_year = current_year
return datetime.date(
end_date_year,
end_date_month,
end_date_day
)
@classmethod
def create_for_team(klass, team):
program = Program.get_by_id(team.program_id)
if not program:
return []
if not program.use_cycles:
# https://github.com/PERTS/triton/issues/1632
# The program is in cycleless mode. Generate a single cycle with
# date range from previous July 1 through the next June 30.
today = datetime.date.today()
start_date = klass.cycleless_start_date(today)
end_date = klass.cycleless_end_date(today)
return [
Cycle.create(
team_id=team.uid,
ordinal=1,
start_date=start_date,
end_date=end_date
)
]
return [Cycle.create(team_id=team.uid, ordinal=x + 1)
for x in range(program.min_cycles or 0)]
@classmethod
def get_current_for_team(klass, team_id, today=None):
"""Returns current cycle or None."""
if today is None:
today = datetime.date.today()
today_str = today.strftime(config.sql_datetime_format)
query = '''
SELECT *
FROM `{table}`
WHERE `team_id` = %s
AND `start_date` <= %s
AND (
`end_date` >= %s OR
`extended_end_date` >= %s
)
LIMIT 1
'''.format(table=klass.table)
params = (team_id, today_str, today_str, today_str)
with mysql_connection.connect() as sql:
row_dicts = sql.select_query(query, params)
return klass.row_dict_to_obj(row_dicts[0]) if row_dicts else None
@classmethod
def query_by_teams(klass, team_ids):
query = '''
SELECT *
FROM `{table}`
WHERE `team_id` IN ({interps})
'''.format(
table=klass.table,
interps=', '.join(['%s'] * len(team_ids))
)
params = tuple(team_ids)
with mysql_connection.connect() as sql:
row_dicts = sql.select_query(query, params)
return [klass.row_dict_to_obj(r) for r in row_dicts]
@classmethod
def reorder_and_extend(klass, team_cycles):
"""Order cycles within a team by date and adds extended_end_date.
Cycles without dates are placed at the end ordered by ordinal.
Raises if dates overlap.
Returns cycles, likely modified/mutated. N.B. non-pure!
"""
if len(team_cycles) == 0:
return []
if not len(set(c.team_id for c in team_cycles)) == 1:
raise Exception("Got cycles from multiple teams: {}"
.format(team_cycles))
# Sort and apply ordinals.
dated = [c for c in team_cycles if c.start_date]
undated = [c for c in team_cycles if not c.start_date]
ordered_dated = sorted(dated, key=lambda c: c.start_date)
ordered_undated = sorted(undated, key=lambda c: c.ordinal)
ordered = ordered_dated + ordered_undated
for i, cycle in enumerate(ordered):
new_ordinal = i + 1
if cycle.ordinal != new_ordinal:
cycle.ordinal = new_ordinal
# Sanity-check all but the last for date overlap.
if i == len(ordered) - 1:
break
next_cycle = ordered[i + 1]
dates_set = bool(cycle.end_date and next_cycle.start_date)
if dates_set and cycle.end_date >= next_cycle.start_date:
raise Exception("Cycle dates overlap: {}, {}"
.format(cycle, next_cycle))
ordered = klass.extend_dates(ordered)
return ordered
@classmethod
def extend_dates(klass, team_cycles):
# When we need to figure out the end of the current program, do it
# relative to the beginning of the first cycle.
program_end_date = klass.cycleless_end_date(team_cycles[0].start_date)
for i, cycle in enumerate(team_cycles):
if not cycle.start_date or not cycle.end_date:
# Cycles should come in order, and if dates aren't set on this
# one then none of the later cycles have dates set either.
# Don't add any extended dates to this or any later cycles.
cycle.extended_end_date = None
continue
if i + 1 < len(team_cycles):
# There's a next cycle. Attempt to extend the end date to the
# that next cycle.
next_cycle = team_cycles[i + 1]
if next_cycle.start_date:
cycle.extended_end_date = (next_cycle.start_date -
datetime.timedelta(days=1))
else:
# The next cycle doesn't have dates defined; extend the
# end date to the latest possible day.
cycle.extended_end_date = program_end_date
else:
# This is the last cycle; extend the end date to latest
# possible day.
cycle.extended_end_date = program_end_date
return team_cycles
| true
|
40d977b1fef3b49e7f9abd6ec3c794387984547c
|
Python
|
cgtyyldrm/PythonKamp
|
/workshop4.py
|
UTF-8
| 368
| 4.0625
| 4
|
[] |
no_license
|
#kullanıcı 3 sayı girsin bunlardan en büyüğünü versin
sayi1 = int(input ("sayi 1:"))
sayi2 = int(input ("sayi 2:"))
sayi3 = int(input ("sayi 3:"))
if sayi1>sayi2 and sayi1>sayi3:
print ("en büyük sayi1")
elif sayi2>sayi1 and sayi2>sayi3:
print ("en büyük sayi2")
elif sayi3>sayi1 and sayi3>sayi2:
print ("en büyük sayi 3. sayı olan", sayi3)
| true
|
4656e510e9945d8f137895283cae4cc517a3d38d
|
Python
|
mcuntz/jams_python
|
/jams/argsort.py
|
UTF-8
| 10,995
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
"""
argsort : argmax, argmin and argsort for array_like and Python iterables.
This module was written by Matthias Cuntz while at Department of
Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany, and continued while at Institut
National de Recherche pour l'Agriculture, l'Alimentation et
l'Environnement (INRAE), Nancy, France.
Copyright (c) 2014-2020 Matthias Cuntz - mc (at) macu (dot) de
Released under the MIT License; see LICENSE file for details.
* Written Dec 2014 by Matthias Cuntz (mc (at) macu (dot) de)
* Added argmin, argmax, Jul 2019, Matthias Cuntz
* Using numpy docstring format, extending examples from numpy docstrings, May 2020, Matthias Cuntz
.. moduleauthor:: Matthias Cuntz
The following functions are provided
.. autosummary::
argmax
argmin
argsort
"""
from __future__ import division, absolute_import, print_function
from warnings import warn, filterwarnings
filterwarnings("default", category=DeprecationWarning)
import numpy as np
__all__ = ['argmax', 'argmin', 'argsort']
def argmax(a, *args, **kwargs):
"""
Wrapper for numpy.argmax, numpy.ma.argmax, and using max for Python iterables.
Passes all keywords directly to the individual routines, i.e.
numpy.argmax(a, axis=None, out=None)
numpy.ma.argmax(self, axis=None, fill_value=None, out=None)
No keyword will be passed to max routine for Python iterables.
Parameters
----------
a : array_like
input array, masked array, or Python iterable
*args : optional
all arguments of numpy.argmax or numpy.ma.argmax
**kwargs : optional
all keyword arguments of numpy.argmax or numpy.ma.argmax
Returns
-------
index_array : ndarray, int
Array of indices of the largest element in input array `a`.
It has the same shape as `a.shape` with the dimension along `axis` removed.
a[np.unravel_index(argmax(a), a.shape)] is the maximum value of a.
Examples
--------
>>> import numpy as np
# One-dimensional array
>>> a = np.array([0,4,6,2,1,5,3,5])
>>> ii = argmax(a)
>>> print(ii)
2
>>> print(a[ii])
6
# One-dimensional masked array
>>> a = np.ma.array([0,4,6,2,1,5,3,5], mask=[0,0,1,1,0,0,0,0])
>>> ii = argmax(a)
>>> print(ii)
5
>>> print(a[ii])
5
>>> ii = argmax(a, fill_value=6)
>>> print(ii)
2
# List
>>> a = [0,4,6,2,1,5,3,5]
>>> ii = argmax(a)
>>> print(ii)
2
>>> print(a[ii])
6
>>> # from numpy.argmax docstring
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
# Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
Notes
-----
argmax for iterables was taken from
https://stackoverflow.com/questions/16945518/finding-the-index-of-the-value-which-is-the-min-or-max-in-python
History
-------
Written, Matthias Cuntz, Jul 2019
Modified, Matthias Cuntz, May 2020 - numpy docstring, more examples from numpy docs
"""
warn('The function argmax is deprecated from JAMS. Use module pyjams.',
category=DeprecationWarning)
if isinstance(a, np.ma.MaskedArray):
return np.ma.argmax(a, *args, **kwargs)
elif isinstance(a, np.ndarray):
return np.argmax(a, *args, **kwargs)
else:
return _argmax(a)
def argmin(a, *args, **kwargs):
"""
Wrapper for numpy.argmin, numpy.ma.argmin, and using min for Python iterables.
Passes all keywords directly to the individual routines, i.e.
numpy.argmin(a, axis=None, out=None)
numpy.ma.argmin(self, axis=None, fill_value=None, out=None)
No keyword will be passed to min routine for Python iterables.
Parameters
----------
a : array_like
input array, masked array, or Python iterable
*args : optional
all arguments of numpy.argmin or numpy.ma.argmin
**kwargs : optional
all keyword arguments of numpy.argmin or numpy.ma.argmin
Returns
-------
index_array : ndarray, int
Array of indices of the largest element in input array `a`.
It has the same shape as `a.shape` with the dimension along `axis` removed.
a[np.unravel_index(argmin(a), a.shape)] is the minimum value of a.
Examples
--------
>>> import numpy as np
# One-dimensional array
>>> a = np.array([0,4,6,2,1,5,3,5])
>>> ii = argmin(a)
>>> print(ii)
0
>>> print(a[ii])
0
# One-dimensional masked array
>>> a = np.ma.array([0,4,6,2,1,5,3,5], mask=[1,0,1,1,0,0,0,0])
>>> ii = argmin(a)
>>> print(ii)
4
>>> print(a[ii])
1
>>> ii = argmin(a, fill_value=1)
>>> print(ii)
0
# List
>>> a = [0,4,6,2,1,5,3,5]
>>> ii = argmin(a)
>>> print(ii)
0
>>> print(a[ii])
0
>>> # from numpy.argmin docstring
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
# Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
Notes
-----
argmin for iterables was taken from
https://stackoverflow.com/questions/16945518/finding-the-index-of-the-value-which-is-the-min-or-max-in-python
History
-------
Written, Matthias Cuntz, Jul 2019
Modified, Matthias Cuntz, May 2020 - numpy docstring, more examples from numpy docs
"""
warn('The function argmin is deprecated from JAMS. Use module pyjams.',
category=DeprecationWarning)
if isinstance(a, np.ma.MaskedArray):
return np.ma.argmin(a, *args, **kwargs)
elif isinstance(a, np.ndarray):
return np.argmin(a, *args, **kwargs)
else:
return _argmin(a)
def argsort(a, *args, **kwargs):
"""
Wrapper for numpy.argsort, numpy.ma.argsort, and using sorted for Python iterables.
Passes all keywords directly to the individual routines, i.e.
numpy.argsort(a, axis=-1, kind='quicksort', order=None)
numpy.ma.argsort(a, axis=None, kind='quicksort', order=None, fill_value=None)
sorted(iterable[, cmp[, key[, reverse]]])
Only key cannot be given for Python iterables because the input array is used
as key in the sorted function.
Parameters
----------
a : array_like
input array, masked array, or Python iterable
*args : optional
all arguments of numpy.argsort, numpy.ma.argsort, and sorted (except key argument)
**kwargs : optional
all keyword arguments of numpy.argsort, numpy.ma.argsort, and sorted (except key argument)
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
Examples
--------
>>> import numpy as np
# 1D array
>>> a = np.array([0,4,6,2,1,5,3,5])
>>> ii = argsort(a)
>>> print(a[ii])
[0 1 2 3 4 5 5 6]
>>> ii = argsort(a, kind='quicksort')
>>> print(a[ii])
[0 1 2 3 4 5 5 6]
# 1D masked array
>>> a = np.ma.array([0,4,6,2,1,5,3,5], mask=[0,0,1,1,0,0,0,0])
>>> ii = argsort(a)
>>> print(a[ii])
[0 1 3 4 5 5 -- --]
>>> ii = argsort(a, fill_value=1)
>>> print(a[ii])
[0 -- -- 1 3 4 5 5]
# list
>>> a = [0,4,6,2,1,5,3,5]
>>> ii = argsort(a)
>>> b = [ a[i] for i in ii ]
>>> print(b)
[0, 1, 2, 3, 4, 5, 5, 6]
>>> a = [0,4,6,2,1,5,3,5]
>>> ii = argsort(a, reverse=True)
>>> b = [ a[i] for i in ii ]
>>> print(b)
[6, 5, 5, 4, 3, 2, 1, 0]
# from numpy.argsort docstring
# One-dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
>>> # Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
# Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
>>> # Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
Notes
-----
argsort for iterables was taken from
http://stackoverflow.com/questions/3382352/equivalent-of-numpy-argsort-in-basic-python
http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
History
-------
Written, Matthias Cuntz, Dec 2014
Modified, Matthias Cuntz, May 2020 - numpy docstring, more examples from numpy docs
"""
warn('The function argsort is deprecated from JAMS. Use module pyjams.',
category=DeprecationWarning)
if isinstance(a, np.ma.MaskedArray):
return np.ma.argsort(a, *args, **kwargs)
elif isinstance(a, np.ndarray):
return np.argsort(a, *args, **kwargs)
else:
return _argsort(a, *args, **kwargs)
# same as numpy.argmax but for python iterables
def _argmax(iterable):
return max(enumerate(iterable), key=lambda x: x[1])[0]
# same as numpy.argmin but for python iterables
def _argmin(iterable):
return min(enumerate(iterable), key=lambda x: x[1])[0]
# same as numpy.argsort but for python iterables
def _argsort(seq, *args, **kwargs):
if 'key' in kwargs:
raise KeyError('keyword key cannot be given to argsort.')
return sorted(range(len(seq)), *args, key=seq.__getitem__, **kwargs)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| true
|
c8f0868e20746c3a45303cf35cb82c295cd15ea6
|
Python
|
suscaria/python3
|
/leetcode/5_implement_strstr.py
|
UTF-8
| 2,719
| 4.4375
| 4
|
[] |
no_license
|
# Question:
# Implement strstr(). Returns the index of the first occurrence of needle in haystack, or –1
# if needle is not part of haystack.
# O(nm) runtime, O(1) space – Brute force:
# There are known efficient algorithms such as Rabin-Karp algorithm, KMP algorithm, or
# the Boyer-Moore algorithm. Since these algorithms are usually studied in an advanced
# algorithms class, it is sufficient to solve it using the most direct method in an interview –
# The brute force method.
# The brute force method is straightforward to implement. We scan the needle with the
# haystack from its first position and start matching all subsequent letters one by one. If one
# of the letters does not match, we start over again with the next position in the haystack.
# Assume that n = length of haystack and m = length of needle, then the runtime
# complexity is O(nm).
# Have you considered these scenarios?
# i. needle or haystack is empty. If needle is empty, always return 0. If haystack is
# empty, then there will always be no match (return –1) unless needle is also
# empty which 0 is returned.
# ii. needle’s length is greater than haystack’s length. Should always return –1.
# iii. needle is located at the end of haystack. For example, “aaaba” and “ba”. Catch
# possible off-by-one errors.
# iv. needle occur multiple times in haystack. For example, “mississippi” and
# “issi”. It should return index 2 as the first match of “issi”.
# v. Imagine two very long strings of equal lengths = n, haystack = “aaa…aa” and
# needle = “aaa…ab”. You should not do more than n character comparisons, or
# else your code will get Time Limit Exceeded in OJ.
class StrInStr(object):
def __init__(self, needle: str, haystack: str) -> None:
self.needle = needle
self.haystack = haystack
def is_present(self) -> int:
for i in range(len(self.haystack)):
for j in range(len(self.needle)):
print("i:", i)
print("j:", j)
print("self.needle[j]",self.needle[j])
print("self.haystack[i+j]",self.haystack[i+j])
if self.needle[j] != self.haystack[i+j]:
print("************BREAK**************")
break
elif j == len(self.needle)-1:
return i
return -1
if __name__ == "__main__":
index1 = StrInStr("tin", "nitin").is_present()
print("index1:",index1)
assert index1 == 2
index2 = StrInStr("nit", "nitin").is_present()
print("index2:",index2)
assert index2 == 0
index3 = StrInStr("in", "nitin").is_present()
print("index3:",index3)
assert index3 == 3
| true
|
116f39477f16be3e25ee0c498ac43c85384cc7c5
|
Python
|
hamma95/patrol_robot
|
/src/shapes2.py
|
UTF-8
| 1,368
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from __future__ import print_function
import rospy
from smach import State,StateMachine
from time import sleep
class Drive(State):
"""docstring for Drive"""
def __init__(self, distance):
State.__init__(self,outcomes=['success'])
self.distance = distance
def execute(self,userdata):
print("driving at {} m/s".format (self.distance))
sleep(1)
return 'success'
class Turn(State):
"""docstring for Turn"""
def __init__(self, angle):
State.__init__(self,outcomes=['success'])
self.angle = angle
def execute(self,userdata):
print("turning at {} degrees".format (self.angle))
sleep(1)
return 'success'
def polygon(sides):
polygon = StateMachine(outcomes=['success'])
with polygon :
for i in xrange(sides-1):
StateMachine.add('side{0}'.format(i+1),Drive(1),transitions={'success':'turn{0}'.format(i+1)})
StateMachine.add('turn{0}'.format(i+1),Turn(360/sides),transitions={'success':'side{0}'.format(i+2)})
StateMachine.add('side{0}'.format(sides),Drive(1),transitions={'success':'success'})
return polygon
if __name__=='__main__':
triangle=polygon(3)
square=polygon(4)
shapes=StateMachine(outcomes=['success'])
with shapes:
StateMachine.add('triangle',triangle,transitions={'success':'square'})
StateMachine.add('square',square,transitions={'success':'triangle'})
shapes.execute()
| true
|
9673b2e6bc11427ae433abc018bb16b5085c3f21
|
Python
|
meiordac/Interview
|
/Code Fights!/distancesum.py
|
UTF-8
| 186
| 3.265625
| 3
|
[] |
no_license
|
def distancesum(n, xcor):
dist=0
for i in range(n):
for j in range(i+1,n):
dist+=((xcor[i]-xcor[j])**2)**0.5
print dist
return dist
print(distancesum(3,[-3,4,-3]))
| true
|
19682a07f6b970b9aece2ea595a0633590a3e6ee
|
Python
|
2020-A-Python-GR1/py-reina-gamboa-miguel-esteban
|
/03 - Pandas/d_lectura_csv.py
|
UTF-8
| 839
| 2.5625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 25 10:18:45 2020
@author: migue
"""
import pandas as pd
import os
path = "C:/Users/migue/OneDrive/Documentos/EPN/Sexto Semestre/Desarrollo web con python/Github/py-reina-gamboa-miguel-esteban/04 - Pandas/data/artwork_data.csv"
df1 = pd.read_csv(
path,
nrows = 10)
columnas = ['id', 'artist', 'title', 'medium', 'year', 'acquisitionYear', 'height', 'width', 'units']
df2 = pd.read_csv (
path,
nrows = 10,
usecols = columnas)
df3 = pd.read_csv (
path,
usecols = columnas,
index_col = 'id',
low_memory = False)
path_guardado = "C:/Users/migue/OneDrive/Documentos/EPN/Sexto Semestre/Desarrollo web con python/Github/py-reina-gamboa-miguel-esteban/04 - Pandas/data/artwork_data.pickle"
df3.to_pickle(path_guardado)
df4 = pd.read_pickle(path_guardado)
| true
|
faa04072f43a64fd93e1e977b809553343115938
|
Python
|
AbstractThinks/python-tutorial
|
/selenium/demo/demo.py
|
UTF-8
| 223
| 2.734375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from selenium import webdriver
import time
browser = webdriver.Firefox()
# browser.get("http://www.baidu.com")
time.sleep(5)
print("Browser will be closed")
browser.quit()
print("Browser is close")
| true
|
9d298f6c20f5c834f12e3511dc05a38d564262d4
|
Python
|
mafei0728/python
|
/12.内置函数/18_filter_map_reduce.py
|
UTF-8
| 1,176
| 4.28125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
# author:mafei0728
"""
map(func, *iterables) --> map object
Make an iterator that computes the function using arguments from
each of the iterables. Stops when the shortest iterable is exhausted.
"""
### map
a=[1,2,3,4,5,6]
c=map(lambda x:x**2,a)
print(list(c))
"""
reduce(function, sequence[, initial]) -> value
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
((((1+2)+3)+4)+5). If initial is present, it is placed before the items
of the sequence in the calculation, and serves as a default when the
sequence is empty.
"""
from functools import reduce
### reduce
a=range(100)
print(reduce(lambda x,y:x+y,a))
"""
filter(function or None, iterable) --> filter object
Return an iterator yielding those items of iterable for which function(item)
is true. If function is None, return the items that are true.
"""
a={
"a":1111111111,
"b":222,
"c":33332222,
"d":333322,
}
### filter
c=filter(lambda x:a[x]==222,a)
print(next(c))
| true
|