hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f64b82838ee7021aed6a41f6268978be205f715f | 973 | py | Python | script.py | inforkgodara/python-network-scanner | 24d064fba907bbc662bb362c61c1e46a1e803129 | [
"MIT"
] | null | null | null | script.py | inforkgodara/python-network-scanner | 24d064fba907bbc662bb362c61c1e46a1e803129 | [
"MIT"
] | null | null | null | script.py | inforkgodara/python-network-scanner | 24d064fba907bbc662bb362c61c1e46a1e803129 | [
"MIT"
] | 1 | 2022-01-21T07:09:42.000Z | 2022-01-21T07:09:42.000Z | import socket
from datetime import datetime
# Author @inforkgodara
ip_address = input("IP Address: ")
splitted_ip_digits = ip_address.split('.')
dot = '.'
first_three_ip_digits = splitted_ip_digits[0] + dot + splitted_ip_digits[1] + dot + splitted_ip_digits[2] + dot
starting_number = int(input("Starting IP Number: "))
ending_number = int(input("Ending IP Number: "))
ending_number = ending_number + 1
start_time = datetime.now()
def scan(ip_address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
result = sock.connect_ex((ip_address, 135))
if result == 0:
return 1
else:
return 0
def execute():
for ip in range(starting_number, ending_number):
ip_address = first_three_ip_digits + str(ip)
if (scan(ip_address)):
print(ip_address, "is live")
execute()
end_time = datetime.now()
total_time = end_time - start_time
print("Scanning completed in: ", total_time) | 27.8 | 111 | 0.701953 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.121274 |
f64c3361d7ec09fcaf7f6ec8576af34f801bb4dc | 38 | py | Python | modules/2.79/bpy/types/CyclesMeshSettings.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/CyclesMeshSettings.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/CyclesMeshSettings.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | class CyclesMeshSettings:
pass
| 6.333333 | 25 | 0.710526 | 35 | 0.921053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f64d0d7507a0bf1361c152ed2a9e7c2c37431db2 | 1,528 | py | Python | uibuilder/backend/__init__.py | stonewell/pyuibuilder | 0ed21f7cc0eee9d808a90a6c072c099b27dfffad | [
"BSD-2-Clause"
] | 1 | 2020-04-21T02:11:12.000Z | 2020-04-21T02:11:12.000Z | uibuilder/backend/__init__.py | stonewell/pyuibuilder | 0ed21f7cc0eee9d808a90a6c072c099b27dfffad | [
"BSD-2-Clause"
] | null | null | null | uibuilder/backend/__init__.py | stonewell/pyuibuilder | 0ed21f7cc0eee9d808a90a6c072c099b27dfffad | [
"BSD-2-Clause"
] | null | null | null | #__init__.py backend
import os
import sys
import logging
from importlib import import_module
L = logging.getLogger('backend')
def create_widget(node):
'''
create widget based on xml node
'''
_widget = None
if 'impl' in node.attrib:
try:
_widget = create_widget_from_impl(node.attrib['impl'])
except:
L.exception('unable to create using impl class:{}'.format(node.attrib['impl']))
if not _widget:
_widget = create_widget_from_tag(node.tag)
return _widget
def create_widget_from_impl(impl_cls):
'''
create widget from the impl class
'''
_module = __load_module_by_name(impl_cls)
return _module.create_widget()
def create_widget_from_tag(tag):
'''
create widget from tag using backend
'''
_wm = __load_backend_widget_manager()
return _wm.create_widget(tag)
def run_app():
_wm = __load_backend_widget_manager()
_wm.run_app()
def stop_app():
_wm = __load_backend_widget_manager()
_wm.stop_app()
def __load_module_by_name(m_name):
'''
load module using given moudle name m_name
'''
if not m_name in sys.modules:
if m_name.startswith('.'):
import_module(m_name, __package__)
m_name = __package__ + m_name
else:
import_module(m_name)
return sys.modules[m_name]
def __load_backend_widget_manager():
'''
load widget manager from predefined backend
'''
return __load_module_by_name('.libui.widget_manager')
| 23.151515 | 91 | 0.665576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.246073 |
f64d58a53350e87f2961f9114eb18d59516ae402 | 638 | py | Python | CondTools/Geometry/python/HGCalParametersWriter_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CondTools/Geometry/python/HGCalParametersWriter_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CondTools/Geometry/python/HGCalParametersWriter_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from CondTools.Geometry.HGCalEEParametersWriter_cfi import *
from Configuration.ProcessModifiers.dd4hep_cff import dd4hep
dd4hep.toModify(HGCalEEParametersWriter,
fromDD4Hep = cms.bool(True)
)
HGCalHESiParametersWriter = HGCalEEParametersWriter.clone(
name = cms.string("HGCalHESiliconSensitive"),
nameW = cms.string("HGCalHEWafer"),
nameC = cms.string("HGCalHECell"),
)
HGCalHEScParametersWriter = HGCalEEParametersWriter.clone(
name = cms.string("HGCalHEScintillatorSensitive"),
nameW = cms.string("HGCalWafer"),
nameC = cms.string("HGCalCell"),
)
| 29 | 60 | 0.757053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.164577 |
f64d7ae0c5f676ffe1417197ce2cd8024ae8d693 | 1,895 | py | Python | Transition_examples_NCL_to_PyNGL/streamlines/TRANS_streamline.py | 1271756664/-xESMF | f2341fe5a949050dc9e350fdc8c7d3e3d3d48222 | [
"MIT"
] | 54 | 2015-11-09T13:39:00.000Z | 2022-02-16T10:31:19.000Z | Transition_examples_NCL_to_PyNGL/streamlines/TRANS_streamline.py | wengensheng/PyEarthScience | 0c5b116a80604c5a892369b975df8b15b9b34717 | [
"MIT"
] | null | null | null | Transition_examples_NCL_to_PyNGL/streamlines/TRANS_streamline.py | wengensheng/PyEarthScience | 0c5b116a80604c5a892369b975df8b15b9b34717 | [
"MIT"
] | 25 | 2016-04-11T20:40:40.000Z | 2021-12-01T14:38:41.000Z | #
# File:
# TRANS_streamline.py
#
# Synopsis:
# Illustrates how to create a streamline plot
#
# Categories:
# streamline plot
#
# Author:
# Karin Meier-Fleischer, based on NCL example
#
# Date of initial publication:
# September 2018
#
# Description:
# This example shows how to create a streamline plot.
#
# Effects illustrated:
# o Read netCDF data
# o Drawing a streamline plot
#
# Output:
# A single visualization is produced.
#
# Notes: The data for this example can be downloaded from
# http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/
#
'''
Transition Guide PyNGL Example: TRANS_streamline.py
- Read netCDF data
- Drawing a streamline plot
18-09-04 kmf
'''
from __future__ import print_function
import Ngl,Nio
#-- open a file and read variables
f = Nio.open_file("../read_data/rectilinear_grid_2D.nc", "r")
u = f.variables["u10"]
v = f.variables["v10"]
ua = f.variables["u10"][0,:,:]
va = f.variables["v10"][0,:,:]
lat = f.variables["lat"]
lon = f.variables["lon"]
nlon = len(lon)
nlat = len(lat)
#-- open a workstation
wks = Ngl.open_wks("png","plot_TRANS_streamline_py")
#-- resource settings
stres = Ngl.Resources()
stres.nglFrame = False
stres.vfXArray = lon[::3]
stres.vfYArray = lat[::3]
stres.mpFillOn = True
stres.mpOceanFillColor = "Transparent"
stres.mpLandFillColor = "Gray90"
stres.mpInlandWaterFillColor = "Gray90"
#-- create the plot
plot = Ngl.streamline_map(wks,ua[::3,::3],va[::3,::3],stres)
#-- write variable long_name and units to the plot
txres = Ngl.Resources()
txres.txFontHeightF = 0.014
Ngl.text_ndc(wks,f.variables["u10"].attributes['long_name'],0.16,0.76,txres)
Ngl.text_ndc(wks,f.variables["u10"].attributes['units'], 0.95,0.76,txres)
#-- advance the frame
Ngl.frame(wks)
Ngl.end()
| 22.559524 | 76 | 0.658047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,035 | 0.546174 |
f64d7d189c96dd02a499180601f2f832ec95df53 | 889 | py | Python | src/normalise.py | amystar101/fingerprint-image-enhancement | 45110796f47781317516d053ce876853a2fb682c | [
"Unlicense"
] | 1 | 2021-06-24T17:51:48.000Z | 2021-06-24T17:51:48.000Z | src/normalise.py | amystar101/fingerprint-image-enhancement | 45110796f47781317516d053ce876853a2fb682c | [
"Unlicense"
] | null | null | null | src/normalise.py | amystar101/fingerprint-image-enhancement | 45110796f47781317516d053ce876853a2fb682c | [
"Unlicense"
] | null | null | null | #function to normalise image
#setting new mean = 1, and new varriance = 1
import numpy as np
import math
def normalise(img,new_mean = 1.0,new_variance = 1.0):
print("Normalising the image")
print("setting new mean = "+str(new_mean)+" and new varriance = "+str(new_variance))
r,c = img.shape
mean = np.mean(img)
variance = np.var(img)
new_img = np.zeros(img.shape)
for i in range(0,r):
for j in range(0,c):
if img[i][j] > mean:
new_img[i][j] = new_mean + math.sqrt(new_variance*(img[i][j]-mean)**2/variance)
else:
new_img[i][j] = new_mean - math.sqrt(new_variance*(img[i][j]-mean)**2/variance)
# checking mean and variance of normalised image
mean = np.mean(new_img)
variance = np.var(new_img)
print("mean = ",mean," variance = ",variance)
return new_img
| 24.027027 | 96 | 0.600675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.23622 |
f64de443ca7e5b34fb910ad26f914c99342b2c99 | 222 | py | Python | tests/photos.py | DennyWeinberg/photoprism | 39107489dec1fe1bbdbd86e2d57c2364c6d115a0 | [
"MIT"
] | 1 | 2022-02-26T15:43:53.000Z | 2022-02-26T15:43:53.000Z | tests/photos.py | DennyWeinberg/photoprism | 39107489dec1fe1bbdbd86e2d57c2364c6d115a0 | [
"MIT"
] | null | null | null | tests/photos.py | DennyWeinberg/photoprism | 39107489dec1fe1bbdbd86e2d57c2364c6d115a0 | [
"MIT"
] | null | null | null | import unittest
from photoprism import Client
class TestClass(unittest.TestCase):
def test_upload():
client = Client()
client.upload_photo('20210104_223259.jpg', b'TODO', album_names=['Test Album'])
| 22.2 | 87 | 0.707207 | 172 | 0.774775 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.18018 |
f64e55e11d9a142eb48cd1c9e5fe0bd127f8241b | 1,778 | py | Python | simulate.py | mikedigriz/Brave-Trigger | 4decb22b7f75ba4730d5c083af62c8e12386e0a3 | [
"Apache-2.0"
] | 1 | 2022-03-28T23:23:38.000Z | 2022-03-28T23:23:38.000Z | simulate.py | mikedigriz/Brave-Trigger | 4decb22b7f75ba4730d5c083af62c8e12386e0a3 | [
"Apache-2.0"
] | null | null | null | simulate.py | mikedigriz/Brave-Trigger | 4decb22b7f75ba4730d5c083af62c8e12386e0a3 | [
"Apache-2.0"
] | 1 | 2021-12-24T07:14:53.000Z | 2021-12-24T07:14:53.000Z | # Simulate user activity for Windows
# Can trigger Brave Ads
import random
from time import sleep
import pydirectinput
import os
# clear log function
def cls():
os.system('cls' if os.name == 'nt' else 'clear')
# main simulate function
def simulate():
while True:
# u can change x,y with your screen resolution
# defined screen resolution
rand_x = random.randint(2567, 4460)
rand_x2 = random.randint(2567, 4460)
rand_y = random.randint(1337, 2986)
rand_y2 = random.randint(1337, 2986)
# random generator True/False
choice = random.choice([True, False])
choice2 = random.choice([True, False])
choice3 = random.choice([True, False])
# random move to xy
pydirectinput.moveTo(rand_x2, rand_y2)
if choice:
# move from current pos
pydirectinput.move(rand_x, rand_y)
if choice2:
print('shift')
# press down shift key
pydirectinput.keyDown('shift')
# random sleep
sleep(random.randint(1, 2))
# release shift key
pydirectinput.keyUp('shift')
if choice3:
print('ctrl')
pydirectinput.keyDown('ctrl')
sleep(random.randint(1, 2))
pydirectinput.keyUp('ctrl')
sleep(20)
# move to defined xy, then click (notif window)
pydirectinput.moveTo(2321, 1304)
print('Click')
# press left mouse button
pydirectinput.click()
print('Sleep')
sleep(random.randint(25, 35))
if choice and choice2 and choice3:
print('Sleep 6m')
sleep(350)
cls()
if __name__ == '__main__':
simulate()
| 28.677419 | 59 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.260405 |
f6518ee8615255e8e5a356ab1a00d2f48a8a58b4 | 1,688 | py | Python | modules/module8/extra/manual_predict_demand.py | shourya01/power_data_analytics_tools | 7621eaebb5b3bd107238016b07e5ae71c891c4b1 | [
"MIT"
] | 1 | 2021-09-26T17:29:46.000Z | 2021-09-26T17:29:46.000Z | modules/module8/extra/manual_predict_demand.py | shourya01/power_data_analytics_tools | 7621eaebb5b3bd107238016b07e5ae71c891c4b1 | [
"MIT"
] | null | null | null | modules/module8/extra/manual_predict_demand.py | shourya01/power_data_analytics_tools | 7621eaebb5b3bd107238016b07e5ae71c891c4b1 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
counties_drop_list = ["Year", "Los Angeles County", "Merced County", "Riverside County", "San Diego County", "San Mateo County", "Santa Barbara County", "Santa Clara County", "Santa Cruz County"]
df = pd.read_csv("population_all.csv")
df = pd.concat([pd.Series(1, index=df.index, name="00"), df], axis=1)
X_global = df.drop(columns=counties_drop_list[1:])
df_demand = pd.read_csv("demand_all.csv")
y_global = df_demand.drop(columns=counties_drop_list)
#for i in range(1, len(X.columns)):
# X[i-1] = X[i-1]/np.max(X[i-1])
#print(X.head())
theta_global = np.array([0]*len(X_global.columns))
m_global = len(X_global)
def hypothesis(theta, X):
return theta*X
def computeCost(X, y, theta, m):
y1 = hypothesis(theta, X)
y1 = np.sum(y1, axis=1)
return (sum((y1.sub(y.squeeze()))**2))/(2*m)
def gradientDescent(X, y, theta, m, alpha, i):
J = [] #cost function in each iterations
k = 0
while k < i:
y1 = hypothesis(theta, X)
y1 = np.sum(y1, axis=1)
for c in range(0, len(X.columns)):
theta[c] = theta[c] - alpha*(sum((y1.sub(y.squeeze()))*X.iloc[:,c]))/m
print(theta[c])
j = computeCost(X, y, theta, m)
J.append(j)
k += 1
print(J, j, theta)
return J, j, theta
J, j, theta_global = gradientDescent(X_global, y_global, theta_global, m_global, 0.05, 10000)
y_hat = hypothesis(theta_global, X_global)
y_hat = np.sum(y_hat, axis=1)
plt.figure()
plt.scatter(x=list(range(0, m_global)), y=y_global, color='blue')
plt.figure()
plt.scatter(x=list(range(0, m_global)), y=y_hat, color='black')
plt.show()
| 28.610169 | 195 | 0.640403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.194313 |
f65224814a8fb77dc67c3f651cede815e9b8f99b | 1,786 | py | Python | config.py | vsmelov/neural-music | 0cbe06080e2c257c323ffc93dc673bb1e0edf2c4 | [
"MIT"
] | 2 | 2020-03-06T19:36:17.000Z | 2022-03-09T07:29:08.000Z | config.py | vsmelov/neural-music | 0cbe06080e2c257c323ffc93dc673bb1e0edf2c4 | [
"MIT"
] | null | null | null | config.py | vsmelov/neural-music | 0cbe06080e2c257c323ffc93dc673bb1e0edf2c4 | [
"MIT"
] | null | null | null | # coding: utf-8
import os
base_dir = os.path.dirname(os.path.realpath(__file__))
music_dir = os.path.join(base_dir, 'music-3')
data_dir = os.path.join(base_dir, 'data-3')
weights_dir = os.path.join(data_dir, 'weights')
weights_file = os.path.join(weights_dir, 'weights')
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(weights_dir):
os.makedirs(weights_dir)
# choice of rectangular, hanning, hamming, blackman, blackmanharris
window = 'hanning'
N = 2*512 # fft size, must be power of 2 and >= M
M = N-1 # window size
H = int(M / 2) # hop size
fs = 44100
NN = N / 2 + 1 # number of meaning-ful Fourier coefficients
# границы слышимости человека
min_freq = 20
max_freq = 20000
# привычные границы слышимости
min_freq = 20
max_freq = 8000
min_freq = 0
max_freq = 4000
# минимальные и максимальные номера коэффициентов Фурье
# соответствующие ограничениям на спектр
min_k = int(min_freq * N / fs)
max_k = int(max_freq * N / fs)
NP = max_k - min_k
print 'NP: {}'.format(NP)
zero_db = -80 # граница слышимости
mem_sec = 3
mem_n = int(mem_sec * fs / H)
gen_time = 5*60
sequence_length = int(gen_time * fs / H)
print 'sequence_length: {}'.format(sequence_length)
# DataSet Vectorization params
max_sentence_duration = 1.5 # seconds
max_sentence_len = int(fs * max_sentence_duration / H)
sentences_overlapping = 0.25
sentences_step = int(max_sentence_len * (1 - sentences_overlapping))
# сколько фреймов пропустим для анализа в начале каждой проверки,
# чтобы прогреть нейронку и дать ей
# угадать мелодию перед тем как делать предсказания
skip_first = 0
# Sin Model
sin_t = -80
minSineDur = 0.001
maxnSines = 200
freqDevOffset = 50
freqDevSlope = 0.001
Ns = N # size of fft used in synthesisNs = 512 # size of fft used in synthesis
| 25.15493 | 80 | 0.731243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 984 | 0.477207 |
f653d187c14a15a2c7898dab067018d3da9f4ba1 | 1,184 | py | Python | tests/conftest.py | BookOps-CAT/ChangeSubject | bff86ad58685db169a99f9ad3b2df1179cffb5a3 | [
"MIT"
] | 1 | 2022-01-13T20:28:12.000Z | 2022-01-13T20:28:12.000Z | tests/conftest.py | BookOps-CAT/ChangeSubject | bff86ad58685db169a99f9ad3b2df1179cffb5a3 | [
"MIT"
] | null | null | null | tests/conftest.py | BookOps-CAT/ChangeSubject | bff86ad58685db169a99f9ad3b2df1179cffb5a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from pymarc import Field, Record
@pytest.fixture
def fake_subfields():
return ["a", "subA", "x", "subX1", "x", "subX2", "z", "subZ."]
@pytest.fixture
def fake_subjects(fake_subfields):
return [
Field(tag="600", indicators=["1", "0"], subfields=fake_subfields),
Field(tag="650", indicators=[" ", "7"], subfields=fake_subfields),
Field(tag="650", indicators=[" ", "0"], subfields=fake_subfields),
Field(tag="653", indicators=[" ", " "], subfields=fake_subfields),
]
@pytest.fixture
def stub_bib():
record = Record()
record.add_field(
Field(tag="650", indicators=[" ", "0"], subfields=["a", "LCSH sub A."])
)
record.add_field(
Field(
tag="650",
indicators=[" ", "7"],
subfields=["a", "Unwanted FAST.", "2", "fast"],
)
)
record.add_field(
Field(
tag="650",
indicators=[" ", "7"],
subfields=["a", "neutral FAST.", "2", "fast"],
)
)
record.add_field(
Field(tag="907", indicators=[" ", " "], subfields=["a", ".b111111111"])
)
return record
| 24.666667 | 79 | 0.525338 | 0 | 0 | 0 | 0 | 1,101 | 0.929899 | 0 | 0 | 237 | 0.200169 |
f65532be9b4201eff4ea7369b593b54567404d32 | 8,132 | py | Python | src/lexer_rules2.py | ezielramos/cool-compiler-2021 | e5b2aa51e47296b21f8b1ea14d73b1c7e7ef0785 | [
"MIT"
] | null | null | null | src/lexer_rules2.py | ezielramos/cool-compiler-2021 | e5b2aa51e47296b21f8b1ea14d73b1c7e7ef0785 | [
"MIT"
] | null | null | null | src/lexer_rules2.py | ezielramos/cool-compiler-2021 | e5b2aa51e47296b21f8b1ea14d73b1c7e7ef0785 | [
"MIT"
] | null | null | null | from TOKEN import LexToken
class Lexer:
def __init__(self,text):
self.my_bool = False
self.result = ''
self.names = {
"case" : "CASE",
"class" : "CLASS",
"else" : "ELSE",
"esac" : "ESAC",
"fi" : "FI",
"if" : "IF",
"in" : "IN",
"inherits" : "INHERITS",
"isvoid" : "ISVOID",
"let" : "LET",
"loop" : "LOOP",
"new" : "NEW",
"of" : "OF",
"pool" : "POOL",
"then" : "THEN",
"while" : "WHILE",
"not" : "NOT",
"true" : "TRUE",
"false" : "FALSE",
"(" : "LPAREN",
")" : "RPAREN",
"{" : "LBRACE",
"}" : "RBRACE",
":" : "TDOTS",
"," : "COMMA",
"." : "DOT",
";" : "SEMICOLON",
"@" : "AT",
"*" : "MULTIPLY",
"/" : "DIVIDE",
"+" : "PLUS",
"-" : "MINUS",
"~" : "INT_COMP",
"<" : "LT",
"=" : "EQ",
"<=" : "LTEQ",
"<-" : "ASSIGN",
"=>" : "ARROW", }
self.token_list = []
self.Simple_tokens = ['(', ')', '{', '}', ':', ',','.',';','@','*','/','+','-','~','<','=','<=','<-','=>']
self.error_tokens = ['!','$','%','^','?','[',']','#','&']
self.ABC = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
self.abc = [str.lower(item) for item in self.ABC]
self._int = ['0','1','2','3','4','5','6','7','8','9']
self.get_tokens(text)
def error(self,line,column,value):
message = f'({line}, {column}) - LexicographicError: ERROR "'
message += value
message +='"'
if self.result =='':
self.result = message
self.my_bool = True
def error_String_null(self,line,column):
if self.result=='':
self.result = f'({line}, {column}) - LexicographicError: String contains null character'
self.my_bool = True
def error_Comment_EOF(self,line,column):
if self.result=='':
self.result = f"({line}, {column}) - LexicographicError: EOF in comment"
self.my_bool = True
def error_String_EOF(self,line,column):
if self.result=='':
self.result = f'({line}, {column}) - LexicographicError: EOF in string constant'
self.my_bool = True
def error_String_New_Line(self,line,column):
if self.result == '':
self.result = f'({line}, {column}) - LexicographicError: Unterminated string constant'
self.my_bool = True
def get_tokens(self,text):
i=-1
n = len(text)
Ln = 1
Col = 1
current1 = ''
current2 = ''
open_comments = 0
while i < n - 1:
i += 1
if text[i] in self.error_tokens:
Col+=len(current1)
self.error(Ln, Col, text[i])
break
if text[i] == '\t':
Col+=1
continue
if text[i] == ' ':
Col+=1
continue
if text[i] == '\n': #end line
Col=1
Ln+=1
continue
if text[i] == '-' and text[i + 1] == '-': #ignore comment
while not text[i] == '\n': i+=1
Col=1
Ln+=1
continue
if text[i] == '(' and text[i + 1] == '*': #ignore comment
open_comments += 1
while open_comments > 0:
i+=1
Col+=1
if i == len(text):
self.error_Comment_EOF(Ln,Col)
i=len(text) #end
break
if text[i] == '\n':
Ln+=1
Col=0
if text[i] == '(' and text[i + 1] == '*':
open_comments += 1
if text[i] == '*' and text[i + 1] == ')':
i+=1
open_comments -= 1
continue
if text[i] == '"':
i+=1
length = 1
if i==len(text):
Col+=length
self.error_String_EOF(Ln,Col)
break
while not text[i] == '"':
if text[i] == '\n':
Col+=length
self.error_String_New_Line(Ln,Col)
i=len(text)
break
if text[i]=='\0':
Col+=length
self.error_String_null(Ln,Col)
i=len(text)
break
if text[i]=='\\':
if not text[i+1]=='b' and not text[i+1]=='t' and not text[i+1]=='n' and not text[i+1]=='f':
current1+=text[i+1]
length+=2
if text[i+1]=='\n':
Ln+=1
Col=0
length=1
i+=2
continue
current1 += text[i]
length+=1
i+=1
if i==len(text):
Col+=length
self.error_String_EOF(Ln,Col)
break
self.token_list.append(LexToken('STRING',current1,Ln,Col))
Col+=length + 1
current1 = ''
continue
current1 += text[i]
if i + 1 < len(text): current2 = current1 + text[i + 1]
else: current2 = current1
_next = current2[-1] #text[i + 1]
if current1[0] == '_':
self.error(Ln,Col,current1[0])
break
if current1[0] in self._int:
i+=1
while text[i] in self._int:
current1 += text[i]
i+=1
i-=1
self.token_list.append(LexToken('INTEGER',int(current1), Ln,Col))
Col+=len(current1)
current1 = ''
continue
if current2 in self.Simple_tokens:
self.token_list.append(LexToken(self.names[current2],current2,Ln,Col))
Col+=len(current2)
i+=1
current1 = ''
continue
if current1 in self.Simple_tokens:
self.token_list.append(LexToken(self.names[current1],current1,Ln,Col))
Col+=len(current1)
current1 = ''
continue
if _next in self.Simple_tokens or _next == ' ' or _next == '\n' or _next == '\t' or i+1==len(text):
lower = str.lower(current1)
if self.names.__contains__(lower):
self.token_list.append(LexToken(self.names[lower],lower,Ln,Col))
Col+=len(current1)
current1 = ''
continue
if current1[0] in self.ABC:
self.token_list.append(LexToken('TYPE',current1,Ln,Col))
Col+=len(current1)
current1 = ''
continue
if current1[0] in self.abc:
self.token_list.append(LexToken('ID',current1,Ln,Col))
Col+=len(current1)
current1 = ''
| 33.327869 | 124 | 0.356739 | 8,102 | 0.996311 | 0 | 0 | 0 | 0 | 0 | 0 | 1,142 | 0.140433 |
f655f02af6c4b546b8ad4d6e4f667bc6a96d3c25 | 5,578 | py | Python | UCI/abalone/abalone.py | tqtifnypmb/ML | dfcc6f849b1d5ee2efd0faa0f585091fc7263a6c | [
"MIT"
] | null | null | null | UCI/abalone/abalone.py | tqtifnypmb/ML | dfcc6f849b1d5ee2efd0faa0f585091fc7263a6c | [
"MIT"
] | null | null | null | UCI/abalone/abalone.py | tqtifnypmb/ML | dfcc6f849b1d5ee2efd0faa0f585091fc7263a6c | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn import feature_selection as fs
from sklearn import naive_bayes
from sklearn import model_selection
from sklearn import metrics
from sklearn import linear_model
from sklearn import svm
from imblearn.under_sampling import NeighbourhoodCleaningRule
from imblearn.over_sampling import SMOTE, RandomOverSampler
COLUMN_NAMES = ['sex', 'length', 'diameter', 'height',
'whole weight', 'shucked weight', 'viscera weight',
'shell weight', 'rings']
# feature selection
def cal_features_mutual_info(data):
y = data['rings']
features = data.loc[:, data.columns != 'rings']
info = fs.mutual_info_regression(features, y)
print('========== mutual info ==============')
for idx, col in enumerate(COLUMN_NAMES):
if col == 'rings':
break
name = COLUMN_NAMES[idx]
print('{0} ==> {1}'.format(name, info[idx]))
def cal_feature_variance(data):
vt = fs.VarianceThreshold()
vt.fit_transform(data)
print('======== variance ================')
for idx, col in enumerate(COLUMN_NAMES):
print('{0} ==> {1}'.format(col, vt.variances_[idx]))
def draw_class_hist(Y):
bins = [x for x in range(1, 29, 5)]
Y.plot.hist(bins=bins)
plt.show()
# data loading / preprocessing
def preprocessing(data):
_, v = np.unique(data['sex'], return_inverse=True)
data['sex'] = v
def load_data():
data = pd.read_csv('../uci_data/abalone.data.txt', header=None, names=COLUMN_NAMES)
preprocessing(data)
print(data.describe())
return data
def oversampling(X, Y):
# some class has only one sample
# to apply SMOTE we first oversample it randomly
X_resampled, Y_resampled = RandomOverSampler().fit_sample(X, Y)
X_resampled, Y_resampled = SMOTE().fit_sample(X_resampled, Y_resampled)
return (X_resampled, Y_resampled)
def undersampling(X, Y):
rus = NeighbourhoodCleaningRule(ratio='majority')
x_new, y_new = rus.fit_sample(X, Y)
return (x_new, y_new)
# metrics
# 1. metrics for multi-class classification problem
def cal_metrics(y_test, y_pred, label):
acc = metrics.accuracy_score(y_test, y_pred)
print('{0} acc: {1}'.format(label, acc))
prec = metrics.precision_score(y_test, y_pred, average='weighted')
print('{0} precision: {1}'.format(label, prec))
recall = metrics.recall_score(y_test, y_pred, average='weighted')
print('{0} recall: {1}'.format(label, recall))
# models
def gaussian_naive_bayes(x_train, y_train, x_test, y_test):
model = naive_bayes.GaussianNB()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
cal_metrics(y_test, y_pred, 'gaussianNB')
def multinomial_naive_bayes(x_train, y_train, x_test, y_test):
model = naive_bayes.MultinomialNB()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
cal_metrics(y_test, y_pred, 'multinomialNB')
def logistics_regression(x_train, y_train, x_test, y_test):
model = linear_model.LogisticRegression(solver='sag', multi_class='multinomial')
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
cal_metrics(y_test, y_pred, 'logisticsc regression')
def select_features_by_stat_info(data):
cal_features_mutual_info(data)
cal_feature_variance(data)
print('==================')
# ignore features with low variance
return['sex', 'length', 'whole weight',
'shucked weight', 'viscera weight',
'shell weight']
def select_feature_by_L1(data_train, data_test):
all_cols = ['sex', 'length', 'diameter', 'height',
'whole weight', 'shucked weight', 'viscera weight',
'shell weight']
Y = data_train['rings']
X = data_train[all_cols]
X_test = data_test[all_cols]
svc = svm.LinearSVC(penalty='l1', dual=False).fit(X, Y)
model = fs.SelectFromModel(svc, threshold=0.5, prefit=True)
return (model.transform(X), model.transform(X_test))
if __name__ == '__main__':
data = load_data()
split_point = math.floor(len(data) * 0.8)
data_train = data[: split_point]
data_test = data[split_point:]
y_train = data_train['rings']
y_test = data_test['rings']
print('======== select features by stat info ========')
selected_features = select_features_by_stat_info(data)
x_train = data_train[selected_features]
x_test = data_test[selected_features]
gaussian_naive_bayes(x_train, y_train, x_test, y_test)
logistics_regression(x_train, y_train, x_test, y_test)
multinomial_naive_bayes(x_train, y_train, x_test, y_test)
print('=========== select features by L1 =============')
x_train, x_test = select_feature_by_L1(data_train, data_test)
gaussian_naive_bayes(x_train, y_train, x_test, y_test)
logistics_regression(x_train, y_train, x_test, y_test)
multinomial_naive_bayes(x_train, y_train, x_test, y_test)
print('============ under sampling ==============')
x_res, y_res = undersampling(x_train, y_train)
gaussian_naive_bayes(x_res, y_res, x_test, y_test)
logistics_regression(x_res, y_res, x_test, y_test)
multinomial_naive_bayes(x_res, y_res, x_test, y_test)
print('============ over sampling ==============')
x_res, y_res = oversampling(x_train, y_train)
gaussian_naive_bayes(x_res, y_res, x_test, y_test)
logistics_regression(x_res, y_res, x_test, y_test)
multinomial_naive_bayes(x_res, y_res, x_test, y_test)
#draw_class_hist(data['rings']) | 34.220859 | 87 | 0.674256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,075 | 0.192721 |
f6566e16378a091190a0b2c504267d971df5e5d3 | 3,204 | py | Python | os_migrate/plugins/filter/stringfilter.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
] | 35 | 2020-01-22T18:38:27.000Z | 2022-03-22T16:19:56.000Z | os_migrate/plugins/filter/stringfilter.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
] | 292 | 2019-12-09T11:15:26.000Z | 2022-03-31T14:37:52.000Z | os_migrate/plugins/filter/stringfilter.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
] | 32 | 2019-12-09T11:09:44.000Z | 2022-03-24T01:13:31.000Z | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from pprint import pformat
import re
from ansible import errors
def stringfilter(items, queries, attribute=None):
"""Filter a `items` list according to a list of `queries`. Values from
`items` are kept if they match at least one query. The original
`items` list is untouched but the result list uses the same data
(not a deep copy).
If `attribute` is None, it is assumed that `items` is a list of
strings to be filtered directly. If `attribute` is provided, it is
assumed that `items` is a list of dicts, and `queries` will tested
against value under `attribute` key in each dict.
`attribute` can point into a nested dictionary, individual keys of
the nested key path are separated by '.' character.
`queries` is a list where each item can be:
- string: String equality match is performed.
- dict with single key `regex`: The value of `regex` is a Python
regular expression, and a regex match is performed.
Returns: a list - subset of `strings` where each item matched one
or more `queries`
"""
result = []
if attribute is not None:
key_path = attribute.split('.')
else:
key_path = None
for item in items:
if key_path is not None:
string = _get_nested_value(item, key_path)
if not isinstance(string, str):
raise errors.AnsibleFilterError(
f"stringfilter: value under '{attribute}' in '{pformat(item)}' is not string: {pformat(string)}"
)
else:
if not isinstance(item, str):
raise errors.AnsibleFilterError(f"stringfilter: list item is not string: {pformat(item)}")
string = item
for query in queries:
if isinstance(query, str):
if query == string:
result.append(item)
break
elif isinstance(query, dict) and query.get('regex'):
if re.search(query['regex'], string):
result.append(item)
break
else:
raise errors.AnsibleFilterError(
f"stringfilter: unrecognized query: {pformat(query)}"
)
return result
def _get_nested_value(dct, key_path):
"""Get value under `key_path` key in `dct` dictionary.
`key_path` is a list of keys to be traversed into a potentially
nested `dct` dictionary.
"""
key = key_path[0]
if not isinstance(dct, dict):
raise errors.AnsibleFilterError(
f"stringfilter: looking for key '{key}' "
f"but list item is not dict: {pformat(dct)}"
)
if key not in dct:
raise errors.AnsibleFilterError(
f"stringfilter: key is '{key}' "
f"but it was not found in dict: {pformat(dct)}"
)
value = dct[key]
if len(key_path) > 1:
return _get_nested_value(value, key_path[1:])
else:
return value
class FilterModule(object):
def filters(self):
return {
'stringfilter': stringfilter,
}
| 32.363636 | 116 | 0.604869 | 120 | 0.037453 | 0 | 0 | 0 | 0 | 0 | 0 | 1,514 | 0.472534 |
f65864777f5cc28180536badeeb5028094ebe5da | 1,082 | py | Python | src/qgis_ros/core/translators/wireless_msgs.py | acfrmarine/qgis_ros | 01dd107f963b87df063bf2f11b0a484f4323cae9 | [
"MIT"
] | 31 | 2018-09-11T17:50:17.000Z | 2021-09-13T11:48:47.000Z | src/qgis_ros/core/translators/wireless_msgs.py | acfrmarine/qgis_ros | 01dd107f963b87df063bf2f11b0a484f4323cae9 | [
"MIT"
] | 9 | 2018-09-11T00:45:23.000Z | 2022-01-07T13:19:20.000Z | src/qgis_ros/core/translators/wireless_msgs.py | acfrmarine/qgis_ros | 01dd107f963b87df063bf2f11b0a484f4323cae9 | [
"MIT"
] | 9 | 2018-10-12T12:31:38.000Z | 2021-06-05T02:52:44.000Z | from wireless_msgs.msg import Connection
from .translator import Translator, TableTranslatorMixin
class ConnectionTranslator(Translator, TableTranslatorMixin):
messageType = Connection
geomType = Translator.GeomTypes.NoGeometry
@staticmethod
def translate(msg):
# Some forks of wireless_msgs/Connection have a header.
try:
seq = msg.header.seq
stamp = msg.header.stamp.to_sec()
except AttributeError:
seq = None
stamp = None
return [{
'type': 'Feature',
'properties': {
'bitrate': msg.bitrate,
'txpower': msg.txpower,
'link_quality_raw': msg.link_quality_raw,
'link_quality': msg.link_quality,
'signal_level': msg.signal_level,
'noise_level': msg.noise_level,
'essid': msg.essid,
'bssid': msg.bssid,
'frequency': msg.frequency,
'seq': seq,
'stamp': stamp
}
}]
| 29.243243 | 63 | 0.543438 | 981 | 0.906654 | 0 | 0 | 837 | 0.773567 | 0 | 0 | 196 | 0.181146 |
f65898c28ffae1a8584de739a8a23ca7c104b359 | 671 | py | Python | Lab11/BacktrackingRecursive.py | alexnaiman/Fundamentals-Of-Programming---Lab-assignments | ef066e6036e20b9c686799f507f10e15e50e3285 | [
"MIT"
] | 4 | 2018-02-19T13:57:38.000Z | 2022-01-08T04:10:54.000Z | Lab11/BacktrackingRecursive.py | alexnaiman/Fundamentals-Of-Programming---Lab-assignments | ef066e6036e20b9c686799f507f10e15e50e3285 | [
"MIT"
] | null | null | null | Lab11/BacktrackingRecursive.py | alexnaiman/Fundamentals-Of-Programming---Lab-assignments | ef066e6036e20b9c686799f507f10e15e50e3285 | [
"MIT"
] | null | null | null | l = ["+", "-"]
def backRec(x):
for j in l:
x.append(j)
if consistent(x):
if solution(x):
solutionFound(x)
backRec(x)
x.pop()
def consistent(s):
return len(s) < n
def solution(s):
summ = list2[0]
if not len(s) == n - 1:
return False
for i in range(n - 1):
if s[i] == "-":
summ -= list2[i + 1]
else:
summ += list2[i + 1]
return summ > 0
def solutionFound(s):
print(s)
n = int(input("Give number"))
list2 = []
for i in range(n):
list2.append(int(input(str(i) + ":")))
backRec([])
| 16.775 | 43 | 0.4307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.037258 |
f659ad3a0d5abc3c751802d36141241340ba30f7 | 8,098 | py | Python | RBM.py | alibell/binary_rbm | f9f30005b7866c0b1e2714350c22b81e33dac72c | [
"MIT"
] | null | null | null | RBM.py | alibell/binary_rbm | f9f30005b7866c0b1e2714350c22b81e33dac72c | [
"MIT"
] | null | null | null | RBM.py | alibell/binary_rbm | f9f30005b7866c0b1e2714350c22b81e33dac72c | [
"MIT"
] | null | null | null | import numpy as np
def sigmoid(X):
"""sigmoid
Compute the sigmoid function
Parameters
----------
X: numpy array
Output:
-------
Numpy array of the same size of X
"""
return 1/(1+np.exp(-X))
class binary_RBM ():
def __init__ (self, q, max_iter=300, batch_size=64, stop_criterion=1e-4, lr=0.01):
"""__init__
Initialisation of the binary_RBM class
Parameters
----------
q: int, number of hidden features
stop_criterion: float, amplitude of absence of loss changes during 3 iterations to stop the training
batch_size: int, number of sample to process in each batch
max_iter: int, maximum number of iteration during training
lr: float, learning rate
"""
if isinstance(batch_size, int):
self.batch_size = batch_size
else:
raise ValueError("batch_size should be of type float")
if isinstance(lr, float):
self.lr = lr
else:
raise ValueError("lr should be of type float")
if isinstance(stop_criterion, float):
self.stop_criterion = stop_criterion
else:
raise ValueError("stop_criterion should be of type float")
if isinstance(q, int):
self.q = q
else:
raise ValueError("q should be of type int")
if isinstance(max_iter, int):
self.max_iter = max_iter
else:
raise ValueError("max_iter should be of type int")
self.coefs_ = {
'a':None,
'b':None,
'W':None
}
self.fitted_ = False
def _init_params(self, X):
"""_init_params
Initialize the network parameters according to the dataset
Parameters
----------
X: size (n,p) with n the number of samples and p the number of features
"""
# Initialising the coefs
self.p_ = X.shape[1]
self.coefs_["a"] = np.zeros((1,self.p_,))
self.coefs_["b"] = np.zeros((1,self.q,))
self.coefs_["W"] = (np.random.randn(self.p_*self.q)*np.sqrt(1e-2)).reshape((self.p_, self.q))
# Loss list
self.loss = []
def _get_conditional_probability(self, variable, variable_value):
"""_get_conditional_probability
Compute the conditional probability of hidden of visible variable
Parameters
----------
variable: str, hidden or visible
variable_value: np vector or size (p) or (q) according to if it is visible or hidden
"""
if variable not in ("hidden","visible"):
raise ValueError("Variable should be valued 'hidden' or 'visible'")
if variable=='hidden':
Z_ = variable_value.dot(self.coefs_["W"])+self.coefs_["b"]
else:
Z_ = variable_value.dot(self.coefs_["W"].T)+self.coefs_["a"]
res = sigmoid(Z_)
return res
def get_hidden(self, X):
"""Get hidden variables
Use gibbs to sample hidden variable from visible variable
Parameters
----------
X: size (n,p) with n the number of samples and p the number of features
Output
------
Y: size (n,q) with n the number of sample and q the number of hidden variables
"""
hidden_probs = self._get_conditional_probability("hidden", X)
H = (np.random.rand(X.shape[0],self.q) <= hidden_probs).astype("int")
return H
def get_visible(self, H):
"""Get visible variables
Use gibbs to sample visibles variable from hidden variable
Parameters
----------
X: size (n,q) with n the number of samples and q the number of hidden variables
Output
------
Y: size (n,p) with n the number of sample and q the number of visibles variables
"""
visible_probs = self._get_conditional_probability("visible", H)
X = (np.random.rand(H.shape[0], self.p_) <= visible_probs).astype("int")
return X
def grad(self, X):
"""grad
Compute the gradient of the parameters
Parameters
----------
X: size (n,p) with n the number of samples and p the number of features
Output
------
Dict containing the gradients of W, a and b
"""
# Getting X_1, the gibbs estimation of the expectancy p(v=1/h)
H = self.get_hidden(X)
X_1 = self.get_visible(H)
# Getting the probabilities
h_prob = self._get_conditional_probability("hidden", X)
h_1_prob = self._get_conditional_probability("hidden", X_1)
# Getting the gradient
W_grad = (1/X.shape[0])*(X.T.dot(h_prob)-X_1.T.dot(h_1_prob))
a_grad = (X-X_1).mean(axis=0)
b_grad = (h_prob-h_1_prob).mean(axis=0)
# Returning gradients
grad_dict = {
"W":W_grad,
"a":a_grad,
"b":b_grad
}
return grad_dict
def get_loss(self, X):
"""
Compute the loss which is the mean square error of X reconstitution
Parameters
----------
X: size (n,p) with n the number of samples and p the number of features
"""
h_prob = self._get_conditional_probability("hidden", X)
x_prob = self._get_conditional_probability("visible", h_prob)
loss = np.square(X-x_prob).mean()
return loss
def fit(self, X, y=None):
"""fit
Train the binary RBM
Parameters
----------
X: size (n,p) with n the number of samples and p the number of features
y: Not expected, keeped for standard fit api compatibility
"""
# Initialisation of the parameters
self._init_params(X)
n_samples = X.shape[0]
n_batchs = (n_samples//self.batch_size)+int(n_samples%self.batch_size != 0)
# Keep a record of the number of iter with no changes
n_iter_no_changes = 0
for i in range(self.max_iter):
# We shuffle X
X_ = X.copy()
np.random.shuffle(X_)
for batch in range(n_batchs):
X_batch_ = X_[batch*self.batch_size:(batch+1)*self.batch_size]
# Gradient descent step
grads = self.grad(X_batch_)
# Perform gradient descent steps
self.coefs_["W"] += self.lr*grads["W"]
self.coefs_["a"] += self.lr*grads["a"]
self.coefs_["b"] += self.lr*grads["b"]
# Getting the loss
loss = self.get_loss(X)
self.loss.append(loss)
if (len(self.loss) > 1):
if self.loss[-2]-self.loss[-1] <= self.stop_criterion:
n_iter_no_changes = n_iter_no_changes+1
if n_iter_no_changes >= 20:
return None
else:
n_iter_no_changes = 0
# Storing the fitted state
self.fitted_ = True
def generate(self, n_sample=1, p=0.5, n_gibbs=10):
"""generate
Generate data by sampling according to a binomial distribution
Parameters:
----------
p: float, binomial parameter
n_sample: int, size of the sample to generate
n_gibbs: integer, number of gibbs sampling to proceed
"""
# Data type checks
if isinstance(p, float) == False:
raise ValueError("P should be of type float")
if isinstance(n_gibbs, int) == False:
raise ValueError("n_gibbs should be of type int")
if isinstance(n_sample, int) == False:
raise ValueError("n_sample should be of type int")
# Initializing by random generation
V = (np.random.rand(n_sample, self.p_) <= p)*1
# Gibbs sampling
for i in range(n_gibbs):
H = self.get_hidden(V)
V = self.get_visible(H)
return V | 29.234657 | 108 | 0.554828 | 7,862 | 0.970857 | 0 | 0 | 0 | 0 | 0 | 0 | 3,746 | 0.462583 |
f65cdec3341b6d687da7bd47d8ce74b25665b578 | 745 | py | Python | Day 07/Anagrams.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | Day 07/Anagrams.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | Day 07/Anagrams.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | '''
Anagrams
Given two strings, a and b , that may or may not be of the same length, determine the minimum number of character deletions required to make a and b anagrams. Any characters can be deleted from either of the strings.
Input :
test cases,t
two strings a and b, for each test case
Output:
Desired O/p
Constraints :
string lengths<=10000
Note :
Anagram of a word is formed by rearranging the letters of the word.
For e.g. -> For the word RAM - MAR,ARM,AMR,RMA etc. are few anagrams.
SAMPLE INPUT
1
cde
abc
SAMPLE OUTPUT
4
'''
from collections import Counter
for _ in range(int(input())):
a=list(input())
b=list(input())
x=Counter(a)
y=Counter(b)
x.subtract(y)
print(sum(abs(i) for i in x.values())) | 16.195652 | 216 | 0.703356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.732886 |
f65e0aca86dc9604b4e7e6ec844b23ecb9545677 | 2,031 | py | Python | bookstore/lib/green/__init__.py | Inveracity/python-grpc-betterproto-quartz | a94a0ea8429d93f3275389a66d0c41ca9b4b616c | [
"MIT"
] | null | null | null | bookstore/lib/green/__init__.py | Inveracity/python-grpc-betterproto-quartz | a94a0ea8429d93f3275389a66d0c41ca9b4b616c | [
"MIT"
] | null | null | null | bookstore/lib/green/__init__.py | Inveracity/python-grpc-betterproto-quartz | a94a0ea8429d93f3275389a66d0c41ca9b4b616c | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: green.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import Dict, List
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
class GreenColors(betterproto.Enum):
MOLDY = 0
MODERN = 1
PASTEL = 2
@dataclass(eq=False, repr=False)
class GreenHexadecimal(betterproto.Message):
hexdict: Dict[int, str] = betterproto.map_field(
1, betterproto.TYPE_INT32, betterproto.TYPE_STRING
)
def __post_init__(self) -> None:
super().__post_init__()
@dataclass(eq=False, repr=False)
class GreenRequest(betterproto.Message):
"""Green request"""
pass
def __post_init__(self) -> None:
super().__post_init__()
@dataclass(eq=False, repr=False)
class GreenResponse(betterproto.Message):
"""Green response"""
hexadecimal: List["GreenHexadecimal"] = betterproto.message_field(1)
def __post_init__(self) -> None:
super().__post_init__()
class GreenStub(betterproto.ServiceStub):
"""Book recommendation call"""
async def green(self) -> "GreenResponse":
request = GreenRequest()
return await self._unary_unary("/green.Green/Green", request, GreenResponse)
class GreenBase(ServiceBase):
"""Book recommendation call"""
async def green(self) -> "GreenResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_green(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.green(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/green.Green/Green": grpclib.const.Handler(
self.__rpc_green,
grpclib.const.Cardinality.UNARY_UNARY,
GreenRequest,
GreenResponse,
),
}
| 25.074074 | 84 | 0.672575 | 1,648 | 0.811423 | 0 | 0 | 685 | 0.337272 | 511 | 0.2516 | 295 | 0.145249 |
f65e102078e64cef47d78af8c0bec25735c6165b | 2,874 | py | Python | install-git-config-upstream.py | omunroe-com/cobaltdepottools | a8c8b5948286ceaa647c8a3bd5f75ea6ffb263da | [
"BSD-3-Clause"
] | null | null | null | install-git-config-upstream.py | omunroe-com/cobaltdepottools | a8c8b5948286ceaa647c8a3bd5f75ea6ffb263da | [
"BSD-3-Clause"
] | null | null | null | install-git-config-upstream.py | omunroe-com/cobaltdepottools | a8c8b5948286ceaa647c8a3bd5f75ea6ffb263da | [
"BSD-3-Clause"
] | null | null | null | """For each repo in DEPS, git config an appropriate depot-tools.upstream.
This will allow git new-branch to set the correct tracking branch.
"""
import argparse
import hashlib
import json
import os
import sys
import textwrap
import gclient_utils
import git_common
def _GclientEntriesToString(entries):
entries_str = json.dumps(entries, sort_keys=True)
return entries_str
def ConfigUpstream(repo_dir, url):
"""Determine the upstream branch for this repo, and run git config."""
if not os.path.exists(repo_dir):
sys.stderr.write('%s not found\n' % repo_dir)
return False
os.chdir(repo_dir)
unused_url, revision = gclient_utils.SplitUrlRevision(url)
if revision.find('remotes/origin') != -1:
upstream = revision
else:
# Ignore e.g. a pinned sha1, or other unusual remote.
sys.stderr.write('Skipping %s with upstream %s\n' % (repo_dir, revision))
return True
# Check git's current upstream config, if any.
current_upstream = git_common.root()
if current_upstream:
current_upstream = current_upstream.strip()
if current_upstream != upstream:
sys.stdout.write(
'In %s, setting %s to %s\n' %
(repo_dir, 'depot-tools.upstream', upstream))
git_common.set_config('depot-tools.upstream', upstream)
return True
def Main(args):
"""For each repo in the gclient root, set the upstream config."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(__doc__))
parser.add_argument('-f', '--force', required=False, action='store_true',
help='Force the script to run, ignoring cached results.')
options = parser.parse_args(args)
# We expect this to be run as a hook in the gclient root directory.
root_dir, gclient_entries = gclient_utils.GetGClientRootAndEntries()
# Compute a hash combined of the .gclient_entries and this script.
# We should re-run if either changes.
md5 = hashlib.md5()
md5.update(_GclientEntriesToString(gclient_entries))
with open(__file__) as f:
md5.update(f.read())
current_hash = md5.hexdigest()
already_processed_hash = None
entries_hash_file = os.path.join(root_dir, '.git_config_entries_hash')
if os.path.exists(entries_hash_file):
with open(entries_hash_file) as f:
already_processed_hash = f.readlines()[0]
if current_hash == already_processed_hash and not options.force:
return 0
results = []
for dirname in sorted(gclient_entries):
abs_path = os.path.normpath(os.path.join(root_dir, dirname))
results.append(ConfigUpstream(abs_path, gclient_entries[dirname]))
if all(results):
# Success. Write the new hash to the cached location.
with open(entries_hash_file, 'wb') as f:
f.write(current_hash)
return 0
else:
return 1
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| 29.9375 | 79 | 0.720251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 853 | 0.296799 |
f65e76516161e35a56048949db48a5fb3cb8e36c | 3,269 | py | Python | examples/visualise_labels.py | meyerjo/simple-waymo-open-dataset-reader | 11dc3ecca77b9c877d0c42c71da2f2c93eb0aca0 | [
"Apache-2.0"
] | null | null | null | examples/visualise_labels.py | meyerjo/simple-waymo-open-dataset-reader | 11dc3ecca77b9c877d0c42c71da2f2c93eb0aca0 | [
"Apache-2.0"
] | null | null | null | examples/visualise_labels.py | meyerjo/simple-waymo-open-dataset-reader | 11dc3ecca77b9c877d0c42c71da2f2c93eb0aca0 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019, Grégoire Payen de La Garanderie, Durham University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import cv2
import io
import sys
from simple_waymo_open_dataset_reader import WaymoDataFileReader
from simple_waymo_open_dataset_reader import dataset_pb2
from simple_waymo_open_dataset_reader import utils
def display_labels_on_image(camera_calibration, camera, labels, camera_labels, display_time = -1):
# Get the image transformation matrix
vehicle_to_image = utils.get_image_transform(camera_calibration)
# Decode the JPEG image
img = utils.decode_image(camera)
# Draw all the groundtruth labels
box_3d_to_2d = []
class_labels = [l.type for l in labels]
for label in labels:
x1, y1, x2, y2 = utils.get_3d_boxes_to_2d(img, vehicle_to_image, label)
box_3d_to_2d += [x1, y1, x2, y2]
utils.draw_3d_box(img, vehicle_to_image, label)
utils.draw_3d_box(img, vehicle_to_image, label, draw_2d_bounding_box=True, colour=(0, 255, 0))
for label in camera_labels:
utils.draw_2d_box(img, label, colour=(255, 0, 255))
# Display the image
cv2.imshow("Image", img)
cv2.waitKey(display_time)
if len(sys.argv) != 2:
print("""Usage: python visualise_labels.py <datafile>
Display the groundtruth 3D bounding boxes on the front camera video stream.""")
sys.exit(0)
# Open a .tfrecord
filename = sys.argv[1]
datafile = WaymoDataFileReader(filename)
# Generate a table of the offset of all frame records in the file.
table = datafile.get_record_table()
print("There are %d frames in this file." % len(table))
# Loop through the whole file
## and display 3D labels.
for frame in datafile:
camera_name = dataset_pb2.CameraName.FRONT
camera_calibration = utils.get(frame.context.camera_calibrations, camera_name)
camera = utils.get(frame.images, camera_name)
camera_labels = utils.get(frame.camera_labels, camera_name)
camera_labels = camera_labels.labels
display_labels_on_image(
camera_calibration, camera,
frame.laser_labels, camera_labels, 10)
# Alternative: Displaying a single frame:
# # Jump to the frame 150
# datafile.seek(table[150])
#
# # Read and display this frame
# frame = datafile.read_record()
# display_labels_on_image(frame.context.camera_calibrations[0], frame.images[0], frame.laser_labels)
# Alternative: Displaying a 10 frames:
# # Jump to the frame 150
# datafile.seek(table[150])
#
# for _ in range(10):
# # Read and display this frame
# frame = datafile.read_record()
# display_labels_on_image(frame.context.camera_calibrations[0], frame.images[0], frame.laser_labels, 10)
| 34.052083 | 108 | 0.722239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,657 | 0.506728 |
f65f255a9168cd924c215d076e0c41cb26abdeb2 | 1,833 | py | Python | utils.py | ProRansum/site-scanner | 39988350980c477996f6bb18b1db3cc097cc0893 | [
"MIT"
] | null | null | null | utils.py | ProRansum/site-scanner | 39988350980c477996f6bb18b1db3cc097cc0893 | [
"MIT"
] | null | null | null | utils.py | ProRansum/site-scanner | 39988350980c477996f6bb18b1db3cc097cc0893 | [
"MIT"
] | null | null | null | import os
import socket
import codecs
import urllib3
from urllib.parse import urlparse
def __process__(command):
try:
process = os.popen(command)
results = str(process.read())
return results
except Exception as e:
raise e
def create_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
print("[+] Directory created")
else:
# print("[!] Directory exists")
pass
def write_file(filepath, data=''):
f = open(filepath, 'w')
f.write('' if not data else data)
f.close()
return
def get_domain_name(url):
print("[+] Resolving Domain Name")
try:
parsed = urlparse(url)
if not parsed.scheme:
print("[!] No protocol scheme not found, default to https.")
url = 'https://%s' % url
parsed = urlparse(url)
domain_name = parsed.netloc
return domain_name, url
except:
print("[!] Failed to resolve Domain Name.")
def get_whois(domain_name):
print("[+] Fetching WhoIs Data.")
result = None
command = "whois %s" % domain_name
try:
return __process__(command)
except:
print("[!] Failed to get Whois Data.")
return result
def get_ip_address(domain_name):
print("[+] Fetching IP Address of Domain")
try:
ip_address = socket.gethostbyname(domain_name)
return ip_address
except:
print("[!] Failed to resolve IP Address.")
def get_nmap(options, ip):
print("[+] Retrieving Nmap Data.")
command = "nmap %s %s" % (options, ip)
try:
return __process__(command)
except:
print("[!] Failed to retrieve Nmap Data.")
def get_robots_txt(url):
print("[+] Fetching robots.txt.")
if url.endswith('/'):
path = url[:-1]
try:
req = urllib2.Request('%s/robots.txt' % path, data=None)
response = urllib2.urlopen(req)
page = response.read()
page = page.encode('utf8')
return page
except:
print("[+] Failed to retrieve robots.txt.")
| 19.09375 | 63 | 0.675396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.265139 |
f6606f4ec40d7bf3ddd3d04cdb7161f2cd3e479b | 8,504 | py | Python | finnhub/models/filing.py | gavinjay/finnhub-python | b5c409dafeda390d14a2b0618ae6f25ab8d76c5b | [
"Apache-2.0"
] | null | null | null | finnhub/models/filing.py | gavinjay/finnhub-python | b5c409dafeda390d14a2b0618ae6f25ab8d76c5b | [
"Apache-2.0"
] | null | null | null | finnhub/models/filing.py | gavinjay/finnhub-python | b5c409dafeda390d14a2b0618ae6f25ab8d76c5b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Finnhub API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from finnhub.configuration import Configuration
class Filing(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_number': 'str',
'symbol': 'str',
'cik': 'str',
'form': 'str',
'filed_date': 'datetime',
'accepted_date': 'datetime',
'report_url': 'str',
'filing_url': 'str'
}
attribute_map = {
'access_number': 'accessNumber',
'symbol': 'symbol',
'cik': 'cik',
'form': 'form',
'filed_date': 'filedDate',
'accepted_date': 'acceptedDate',
'report_url': 'reportUrl',
'filing_url': 'filingUrl'
}
def __init__(self, access_number=None, symbol=None, cik=None, form=None, filed_date=None, accepted_date=None, report_url=None, filing_url=None, local_vars_configuration=None): # noqa: E501
"""Filing - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_number = None
self._symbol = None
self._cik = None
self._form = None
self._filed_date = None
self._accepted_date = None
self._report_url = None
self._filing_url = None
self.discriminator = None
if access_number is not None:
self.access_number = access_number
if symbol is not None:
self.symbol = symbol
if cik is not None:
self.cik = cik
if form is not None:
self.form = form
if filed_date is not None:
self.filed_date = filed_date
if accepted_date is not None:
self.accepted_date = accepted_date
if report_url is not None:
self.report_url = report_url
if filing_url is not None:
self.filing_url = filing_url
@property
def access_number(self):
"""Gets the access_number of this Filing. # noqa: E501
Access number. # noqa: E501
:return: The access_number of this Filing. # noqa: E501
:rtype: str
"""
return self._access_number
@access_number.setter
def access_number(self, access_number):
"""Sets the access_number of this Filing.
Access number. # noqa: E501
:param access_number: The access_number of this Filing. # noqa: E501
:type: str
"""
self._access_number = access_number
@property
def symbol(self):
"""Gets the symbol of this Filing. # noqa: E501
Symbol. # noqa: E501
:return: The symbol of this Filing. # noqa: E501
:rtype: str
"""
return self._symbol
@symbol.setter
def symbol(self, symbol):
"""Sets the symbol of this Filing.
Symbol. # noqa: E501
:param symbol: The symbol of this Filing. # noqa: E501
:type: str
"""
self._symbol = symbol
@property
def cik(self):
"""Gets the cik of this Filing. # noqa: E501
CIK. # noqa: E501
:return: The cik of this Filing. # noqa: E501
:rtype: str
"""
return self._cik
@cik.setter
def cik(self, cik):
"""Sets the cik of this Filing.
CIK. # noqa: E501
:param cik: The cik of this Filing. # noqa: E501
:type: str
"""
self._cik = cik
@property
def form(self):
"""Gets the form of this Filing. # noqa: E501
Form type. # noqa: E501
:return: The form of this Filing. # noqa: E501
:rtype: str
"""
return self._form
@form.setter
def form(self, form):
"""Sets the form of this Filing.
Form type. # noqa: E501
:param form: The form of this Filing. # noqa: E501
:type: str
"""
self._form = form
@property
def filed_date(self):
"""Gets the filed_date of this Filing. # noqa: E501
Filed date <code>%Y-%m-%d %H:%M:%S</code>. # noqa: E501
:return: The filed_date of this Filing. # noqa: E501
:rtype: datetime
"""
return self._filed_date
@filed_date.setter
def filed_date(self, filed_date):
"""Sets the filed_date of this Filing.
Filed date <code>%Y-%m-%d %H:%M:%S</code>. # noqa: E501
:param filed_date: The filed_date of this Filing. # noqa: E501
:type: datetime
"""
self._filed_date = filed_date
@property
def accepted_date(self):
"""Gets the accepted_date of this Filing. # noqa: E501
Accepted date <code>%Y-%m-%d %H:%M:%S</code>. # noqa: E501
:return: The accepted_date of this Filing. # noqa: E501
:rtype: datetime
"""
return self._accepted_date
@accepted_date.setter
def accepted_date(self, accepted_date):
"""Sets the accepted_date of this Filing.
Accepted date <code>%Y-%m-%d %H:%M:%S</code>. # noqa: E501
:param accepted_date: The accepted_date of this Filing. # noqa: E501
:type: datetime
"""
self._accepted_date = accepted_date
@property
def report_url(self):
"""Gets the report_url of this Filing. # noqa: E501
Report's URL. # noqa: E501
:return: The report_url of this Filing. # noqa: E501
:rtype: str
"""
return self._report_url
@report_url.setter
def report_url(self, report_url):
"""Sets the report_url of this Filing.
Report's URL. # noqa: E501
:param report_url: The report_url of this Filing. # noqa: E501
:type: str
"""
self._report_url = report_url
@property
def filing_url(self):
"""Gets the filing_url of this Filing. # noqa: E501
Filing's URL. # noqa: E501
:return: The filing_url of this Filing. # noqa: E501
:rtype: str
"""
return self._filing_url
@filing_url.setter
def filing_url(self, filing_url):
"""Sets the filing_url of this Filing.
Filing's URL. # noqa: E501
:param filing_url: The filing_url of this Filing. # noqa: E501
:type: str
"""
self._filing_url = filing_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Filing):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Filing):
return True
return self.to_dict() != other.to_dict()
| 26.658307 | 193 | 0.563617 | 8,136 | 0.956726 | 0 | 0 | 4,320 | 0.507996 | 0 | 0 | 4,249 | 0.499647 |
f661ad9fa986a012f353988d72881833e9a5c889 | 1,876 | py | Python | year_2019/day_11_2019.py | TheTomcat/AdventOfCode | bb81a46f7064bd69e924a0b58e6c5e0cc450e102 | [
"MIT"
] | null | null | null | year_2019/day_11_2019.py | TheTomcat/AdventOfCode | bb81a46f7064bd69e924a0b58e6c5e0cc450e102 | [
"MIT"
] | null | null | null | year_2019/day_11_2019.py | TheTomcat/AdventOfCode | bb81a46f7064bd69e924a0b58e6c5e0cc450e102 | [
"MIT"
] | null | null | null | from collections import defaultdict
from typing import List, Any, Tuple
from util.helpers import solution_timer
from util.input_helper import read_entire_input
from util.console import console
from year_2019.intcode import IntCode, parse
data = read_entire_input(2019,11)
def run_robot(data:List[str], init=0):
debug = False
robot = IntCode(data, debug=debug)
is_white = defaultdict(lambda: 0)
is_white[(0,0)] = init
position = (0,0)
direction = (0,1)
while not robot.halted:
colour = robot.run([is_white[position]])
turn = robot.run([])
if debug:
console.print(f"ROBO pos{position}-dir{direction} -> current colour: {is_white[position]} -> {colour}. Turning {turn}")
is_white[position] = colour
if turn == 0: # Turn left
direction = -direction[1], direction[0]
elif turn == 1:
direction = direction[1], -direction[0]
position = position[0] + direction[0], position[1] + direction[1]
if debug:
console.print(f"MOVE pos{position}-dir{direction} -> current colour: {is_white[position]}")
return is_white
@solution_timer(2019,11,1)
def part_one(data: List[str]):
instructions = parse(data)
is_white = run_robot(instructions)
return len(is_white)
@solution_timer(2019,11,2)
def part_two(data: List[str]):
instructions = parse(data)
is_white = run_robot(instructions, 1)
xmin = min(i[0] for i in is_white.keys())
xmax = max(i[0] for i in is_white.keys())
ymin = min(i[1] for i in is_white.keys())
ymax = max(i[1] for i in is_white.keys())
image = '\n'.join(''.join(chr(9608) if is_white[(i,j)] else ' ' for i in range(xmin, xmax+1)) for j in range(ymax, ymin-1, -1))
return '\n'+image
if __name__ == "__main__":
data = read_entire_input(2019,11)
part_one(data)
part_two(data) | 35.396226 | 131 | 0.650853 | 0 | 0 | 0 | 0 | 620 | 0.33049 | 0 | 0 | 214 | 0.114072 |
f6630d45727b60cf6121d275d6c726d45711227d | 287 | py | Python | Life.py | lianwt115/python_pygame | 4d70846740e8905f63059de107a8d0831d9ffe0c | [
"MIT"
] | 1 | 2018-08-14T02:54:14.000Z | 2018-08-14T02:54:14.000Z | Life.py | lianwt115/python_pygame | 4d70846740e8905f63059de107a8d0831d9ffe0c | [
"MIT"
] | null | null | null | Life.py | lianwt115/python_pygame | 4d70846740e8905f63059de107a8d0831d9ffe0c | [
"MIT"
] | null | null | null | # 子弹
import pygame
class Life(pygame.sprite.Sprite):
def __init__(self, img, init_pos):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.topleft = init_pos
def update(self):
self.kill()
| 17.9375 | 43 | 0.620209 | 265 | 0.910653 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.027491 |
f665d858f07e9b00c3c2dbdc919c6e8a395f2e99 | 6,588 | py | Python | course_selection/scrape_parse.py | PrincetonUSG/ReCal | a65782602b1a6d8932ff4cca1526cb9a3addacf6 | [
"MIT"
] | 1 | 2017-01-01T22:17:52.000Z | 2017-01-01T22:17:52.000Z | course_selection/scrape_parse.py | PrincetonUSG/recal | d748f6871e7f7bbbd7e9d66482de040c42c8c18b | [
"MIT"
] | 1 | 2022-02-20T19:43:33.000Z | 2022-02-20T19:43:33.000Z | course_selection/scrape_parse.py | PrincetonUSG/ReCal | a65782602b1a6d8932ff4cca1526cb9a3addacf6 | [
"MIT"
] | null | null | null | """
Scrapes OIT's Web Feeds to add courses and sections to database.
Procedure:
- Get list of departments (3-letter department codes)
- Run this: http://etcweb.princeton.edu/webfeeds/courseofferings/?term=current&subject=COS
- Parse it for courses, sections, and lecture times (as recurring events)
"""
from mobileapp import MobileApp
class ParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def scrape_parse_semester(term_code):
TERM_CODE = term_code
CURRENT_SEMESTER = ['']
def get_text(key, object):
return raise_if_none(object.get(key), "key " + key + " does not exist")
def get_current_semester(data):
""" get semester according to TERM_CODE
"""
#global CURRENT_SEMESTER
if not CURRENT_SEMESTER[0]:
term = data['term'][0]
CURRENT_SEMESTER[0] = {
'start_date': get_text('start_date', term),
'end_date': get_text('end_date', term),
'term_code': str(TERM_CODE),
}
return CURRENT_SEMESTER[0]
def get_department_list():
res = MobileApp().get_courses(term=term_code, subject='list')
try:
codes = [k['code'] for k in res['term'][0]['subjects']]
codes[0] and codes[1]
except:
raise Exception('failed to get all department codes')
return codes
def scrape_all():
""" scrape all events from Princeton's course webfeed
"""
#global course_count
#global section_count
departments = get_department_list()
courses = []
for department in departments:
print('Processing ' + department)
courses += scrape(department)
return courses
# goes through the listings for this department
def scrape(department):
""" Scrape all events listed under department
"""
data = MobileApp().get_courses(term=TERM_CODE, subject=department)
if data['term'][0].get('subjects') is None:
print('Empty MobileApp response')
return []
parsed_courses = []
try:
for subject in data['term'][0]['subjects']:
for course in subject['courses']:
x = parse_course(data, course, subject)
if x is not None:
parsed_courses.append(x)
except Exception as e:
print('Potential missing key')
return []
return parsed_courses
def none_to_empty(text):
if text is None:
return ''
else:
return text
def none_to_empty_list(x):
if x is None:
return []
else:
return x
def raise_if_none(text, error_message):
if text is None:
raise ParseError(error_message)
return text
# Parse it for courses, sections, and lecture times (as recurring events)
# If the course with this ID exists in the database, we update the course
# Otherwise, create new course with the information
def parse_course(data, course, subject):
""" create a course with basic information.
"""
try:
#global new_course_count
#global course_count
return {
"title": course['title'],
"guid": course['guid'],
"description": none_to_empty(course['detail']['description']),
"semester": get_current_semester(data),
"professors": [parse_prof(x) for x in course['instructors']],
"course_listings": parse_listings(course, subject),
"sections": [parse_section(x) for x in course['classes']]
}
except Exception as inst:
# print inst
raise inst
return None
# may decide to make this function for just one prof/listing/section, then
# do a map
def parse_prof(prof):
return {
"full_name": prof['full_name']
}
def parse_listings(course, subject):
def parse_cross_listing(cross_listing):
return {
'dept': cross_listing['subject'],
'code': cross_listing['catalog_number'],
'is_primary': False
}
cross_listings = [parse_cross_listing(
x) for x in none_to_empty_list(course['crosslistings'])]
primary_listing = {
'dept': get_text('code', subject),
'code': course['catalog_number'],
'is_primary': True
}
return cross_listings + [primary_listing]
def parse_section(section):
def parse_meeting(meeting):
def get_days(meeting):
days = ""
for day in meeting['days']:
days += day + ' '
return days[:10]
def get_location(meeting):
location = ''
try:
building = meeting['building']['name']
room = meeting['room']
location = building + " " + room
except Exception as e:
raise e
finally:
return location
# the times are in the format:
# HH:MM AM/PM
return {
'start_time': '01:00 AM' if 'start_time' not in meeting else meeting['start_time'],
'end_time': '01:00 AM' if 'end_time' not in meeting else meeting['end_time'],
'days': get_days(meeting),
'location': get_location(meeting),
}
# NOTE: section.find('schedule') doesn't seem to be used
meetings = None
schedule = section['schedule']
if schedule is not None:
meetings = schedule['meetings']
typeName = "UNKNOWN"
if get_text('section', section) != "M99":
try:
typeName = get_text('type_name', section)
except:
print("error reading section")
test = {
'registrar_id': get_text('class_number', section),
'name': get_text('section', section),
'type': typeName[0:3].upper(),
'capacity': get_text('capacity', section),
'enrollment': get_text('enrollment', section),
'meetings': [parse_meeting(x) for x in none_to_empty_list(meetings)]
}
return test
return scrape_all()
| 32.453202 | 99 | 0.548118 | 142 | 0.021554 | 0 | 0 | 0 | 0 | 0 | 0 | 1,922 | 0.291743 |
f66643e629de4e532f98ff43ed8508b807166f50 | 34,590 | py | Python | wxRavenGUI/application/core/wxViewsManager.py | sLiinuX/wxRaven | a513a029fa1ff2059ee262c524b4b2b45111f1a6 | [
"MIT"
] | 11 | 2021-12-20T15:32:17.000Z | 2022-03-16T03:54:02.000Z | wxRavenGUI/application/core/wxViewsManager.py | sLiinuX/wxRaven | a513a029fa1ff2059ee262c524b4b2b45111f1a6 | [
"MIT"
] | 156 | 2021-12-31T21:01:31.000Z | 2022-03-20T21:57:31.000Z | wxRavenGUI/application/core/wxViewsManager.py | sLiinuX/wxRaven | a513a029fa1ff2059ee262c524b4b2b45111f1a6 | [
"MIT"
] | 3 | 2022-01-21T14:52:43.000Z | 2022-02-12T05:32:19.000Z | '''
Created on 13 déc. 2021
@author: slinux
'''
import inspect
from wxRavenGUI.view import wxRavenAddView
from wxRavenGUI.application.wxcustom.CustomDialog import wxRavenCustomDialog
import wx
import wx.aui
import logging
from .jobs import *
class ViewsManager(object):
'''
classdocs
'''
parentframe = None
dialogs = {}
all_areas = {}
#all_views = {}
#viewsChangeCallbacks=[]
force_mgr = False
def __init__(self, parentframe, forceinprincipalauimanager=False):
'''
Constructor
'''
self.parentframe = parentframe
self.force_mgr = forceinprincipalauimanager
self.nViewDialog = None
self.dialogs = {}
self.logger = logging.getLogger('wxRaven')
self.InitViewManager()
parentframe.Bind( wx.aui.EVT_AUINOTEBOOK_PAGE_CHANGED, self.OnAuiPaneActivated )
parentframe.Bind( wx.aui.EVT_AUI_PANE_ACTIVATED, self.OnAuiPaneActivated )
parentframe.Bind( wx.aui.EVT_AUI_PANE_CLOSE, self.OnAuiPaneClose )
#parentframe.Bind( wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnAuiPaneClose )
#parentframe.Bind( wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnAuiNotebookPageClose )
#
#
# Event management (for GUI refresh)
#
#
#
def __refreshGUI_Job__(self, evt=None):
p = self.parentframe.GetPlugin('General')
j = Job_RefreshGUI(p, viewCallback=None, safeMode=True)
self.parentframe.NewJob(j)
def OnAuiNotebookPageClose(self, evt):
self.logger.info("OnAuiNotebookPageClose in view man ")
self.OnPageClose(evt)
#wx.CallAfter(self.parentframe.MenusAndTool.refreshViewsListMenu, ())
#wx.CallAfter(self.parentframe.MenusAndTool.RefreshToolbar, ())
self.__refreshGUI_Job__()
def OnAuiPaneClose(self, evt):
self.logger.info("OnAuiPaneClose in view man ")
#wx.CallAfter(self.parentframe.MenusAndTool.refreshViewsListMenu, ())
#wx.CallAfter(self.parentframe.MenusAndTool.RefreshToolbar, ())
#self.UpdateGUIManager()
self.__refreshGUI_Job__()
def OnAuiPaneActivated(self, evt):
self.logger.info("OnAuiPaneActivated in view man : tweaked version in test")
self.__refreshGUI_Job__()
'''
p = self.parentframe.GetPlugin('General')
j = Job_RefreshGUI(p, viewCallback=None, safeMode=True)
self.parentframe.NewJob(j)
'''
#wx.CallAfter(self.parentframe.MenusAndTool.refreshViewsListMenu, ())
#wx.CallAfter(self.parentframe.MenusAndTool.RefreshToolbar, ())
#self.UpdateGUIManager()
def OnPageClose( self, event ):
self.logger.info("OnPageClose")
objNotebook = event.GetEventObject()
index = event.GetSelection()
page = objNotebook.GetPage(index)
label = objNotebook.GetPageText(index)
self.logger.info("view instance to delete !")
_v = self.parentframe.Plugins.GetViewNameInstance(label)
if _v != None:
self.logger.info('instance found, close it')
self.parentframe.Plugins.DeleteViewInstance(label)
_closed=False
try:
_v['instance'].Close()
#_closed=True
except Exception as e:
self.logger.error("_v['instance'].Close() " + str(e))
if not _closed:
try:
_v['instance'].OnClose(None)
#_closed=True
except Exception as e:
self.logger.error("_v['instance'].OnClose() " + str(e))
if not _closed:
try:
_v['instance'].safeClose(None)
#_closed=True
except Exception as e:
#pass
self.logger.error("_v['instance'].SafeClose() " + str(e))
#wx.CallAfter(self.parentframe.MenusAndTool.refreshViewsListMenu, ())
self.__refreshGUI_Job__()
event.Skip()
#SearchViewPanelInManager
#
#
# Init and creation of the view manager
#
#
#
def InitViewManager(self):
self.AddArea('main', self.parentframe.wxRavenMainBook)
self.AddArea('mgr', self.parentframe.m_mgr)
self.parentframe.wxRavenToolBook1.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnPageClose)
self.parentframe.wxRavenToolBook2.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnPageClose)
self.parentframe.wxRavenToolBook3.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnPageClose)
self.parentframe.wxRavenMainBook.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnPageClose)
self.parentframe.RessourcesProvider.ApplyThemeOnPanel(self.parentframe.wxRavenToolBook1)
self.parentframe.RessourcesProvider.ApplyThemeOnPanel(self.parentframe.wxRavenToolBook2)
self.parentframe.RessourcesProvider.ApplyThemeOnPanel(self.parentframe.wxRavenToolBook3)
self.parentframe.RessourcesProvider.ApplyThemeOnPanel(self.parentframe.wxRavenMainBook)
if not self.force_mgr:
self.AddArea('toolbox1', self.parentframe.wxRavenToolBook1)
self.AddArea('toolbox2', self.parentframe.wxRavenToolBook2)
self.AddArea('toolbox3', self.parentframe.wxRavenToolBook3)
else:
self.parentframe.m_mgr.GetPane("toolbox1").DestroyOnClose(True)
self.parentframe.m_mgr.GetPane("toolbox2").DestroyOnClose(True)
self.parentframe.m_mgr.GetPane("toolbox3").DestroyOnClose(True)
#pa = self.parentframe.m_mgr.GetPane("Toolbox1")
#self.parentframe.m_mgr.ClosePane(pa)
self.parentframe.m_mgr.ClosePane(self.parentframe.m_mgr.GetPane("toolbox1"))
self.parentframe.m_mgr.ClosePane(self.parentframe.m_mgr.GetPane("toolbox2"))
self.parentframe.m_mgr.ClosePane(self.parentframe.m_mgr.GetPane("toolbox3"))
#self.UpdateGUIManager()
if self.parentframe._isReady:
self.__refreshGUI_Job__()
def AddArea(self, frameName, Obj):
self.all_areas[frameName] = Obj
self.logger.info(f"Add Area {frameName}")
#self.AddView(frameName, Obj)
def GetAllAreas(self):
return self.all_areas
def GetArea(self, areaName):
result = None
if self.all_areas.__contains__(areaName):
result = self.all_areas[areaName]
return result
def Add(self, obj, nameFrame, position="main", icon=None):
self.logger.info("Add")
if position=="dialog":
self.logger.info("FORCE Dialog !")
self.AddDialog(obj, nameFrame, position, icon)
if self.all_areas.__contains__(position):
targetPosition= self.all_areas[position]
if self.force_mgr:
position = "main"
self.logger.info("FORCE MAIN !")
else:
self.logger.info(f"position received = {position}")
if position == "main":
self.AddInMainbook(obj, nameFrame, icon=icon)
if position == "mgr":
self.AddInMainFrame(obj, nameFrame, icon=icon)
if position.__contains__("toolbox") or position.__contains__("Notebook Toolbox") :
#self.logger.info("Position !!!")
self.AddInNotebook(obj, nameFrame, targetPosition, icon=icon)
self.RaiseViewLog("New View ["+nameFrame+"] has been added in '"+position+"'", "info")
#self.AddView(nameFrame, obj, icon)
else:
self.RaiseViewLog("["+nameFrame+"] : Invalid position '"+position+"'", "error")
def SearchDialog(self, dname):
_d=None
for d in self.dialogs:
if d == dname:
_d = self.dialogs[d]
break
return _d
def __registerDialog__(self, dname, dinst):
self.dialogs[dname] = dinst
def __unregisterDialog__(self, dname):
self.dialogs[dname] = None
def AddDialog(self,_view, nameFrame="", position="dialog", icon=None):
self.logger.info(_view)
if nameFrame == "":
nameFrame = _view[0]['name']
if icon == None:
icon = _view[0]['icon']
_newDialog = wxRavenCustomDialog(self.parentframe, _view[0], title=nameFrame, icon=icon)
self.__registerDialog__(nameFrame, _newDialog)
_newDialog.Show()
#_newDialog.ShowModal()
#self.__unregisterDialog__(nameFrame, _newDialog)
def ShowAllActiveViews(self):
### TODO , what if not in mgr, like all toolbox and stuff.
all_panes = self.parentframe.m_mgr.GetAllPanes()
for ii in range(len(all_panes)):
if not all_panes[ii].IsToolbar():
#self.logger.info(all_panes[ii])
capt = all_panes[ii].caption
na = all_panes[ii].name
all_panes[ii].Show()
#self.logger.info(capt)
#self.logger.info(na)
#self.UpdateGUIManager()
self.__refreshGUI_Job__()
def DestroyAllNonVisible(self):
### TODO , what if not in mgr, like all toolbox and stuff.
### Update 1 : toolbox partially fixed since notebook destroy view on closing page
###
### a ton of things to fix ? still ?
###
all_panes = self.parentframe.m_mgr.GetAllPanes()
todelete = []
for ii in range(len(all_panes)):
if not all_panes[ii].IsToolbar():
#self.logger.info(all_panes[ii])
#capt = all_panes[ii].caption
#na = all_panes[ii].name
#all_panes[ii].Show()
#self.logger.info(capt)
#self.logger.info(na)
if not all_panes[ii].IsShown():
capt = all_panes[ii].caption
na = all_panes[ii].name
self.logger.info(f"Hidden dialog found : {capt} - {na}")
#self.logger.info(na)
self.parentframe.Plugins.DeleteViewInstance(na)
all_panes[ii].DestroyOnClose(True)
todelete.append(all_panes[ii])
for td in todelete:
self.logger.info(f"removing {td}")
self.parentframe.m_mgr.ClosePane(td)
self.RaiseViewLog("["+str(td)+"] has been destroyed.", "info")
#wx.CallAfter(self.parentframe.MenusAndTool.refreshViewsListMenu, ())
#self.UpdateGUIManager()
self.__refreshGUI_Job__()
def UpdateGUIManager(self, evt=None):
self.parentframe.m_mgr.GetPane("wxRavenToolBar").window.Realize()
self.parentframe.m_mgr.Update()
self.parentframe.Layout()
#self.parentframe.Centre( wx.BOTH )
"""
def RegisterOnViewsChanged(self, callback):
if not self.viewsChangeCallbacks.__contains__(callback):
self.viewsChangeCallbacks.append(callback)
def UnregisterOnViewsChanged(self, callback):
if self.viewsChangeCallbacks.__contains__(callback):
self.viewsChangeCallbacks.remove(callback)
def SafeCallbackLoop(self, connexionName):
for c in self.networkChangeCallbacks:
try:
c(connexionName)
except Exception as e:
#self.logger.info(e)
self.RaiseViewError()
"""
def RaiseViewLog(self, message, type="error"):
try:
_source = str(inspect.stack()[1][0])
self.parentframe.Log( message, source=str(_source), type=type)
except Exception as e:
self.logger.error("RaiseViewError() " + str(e))
def HideParentInManager(self, instanceParent):
all_panes = self.parentframe.m_mgr.GetAllPanes()
for ii in range(len(all_panes)):
if not all_panes[ii].IsToolbar():
#self.logger.info(all_panes[ii].name)
#self.logger.info(all_panes[ii].caption)
if all_panes[ii].window == instanceParent:
all_panes[ii].Hide()
#self.UpdateGUIManager()
self.__refreshGUI_Job__()
def ShowParentInManager(self, instanceParent):
all_panes = self.parentframe.m_mgr.GetAllPanes()
for ii in range(len(all_panes)):
if not all_panes[ii].IsToolbar():
#self.logger.info(all_panes[ii].name)
#self.logger.info(all_panes[ii].caption)
if all_panes[ii].window == instanceParent:
all_panes[ii].Show(True)
#self.UpdateGUIManager()
self.__refreshGUI_Job__()
def SearchViewInstance(self, viewname):
_v = None
for _p in self.parentframe.Plugins.plugins:
#self.logger.info(f"scanning {_p}")
_plugin = self.parentframe.GetPlugin(_p)
_v = _plugin.GetViewAttrDetails(viewname, attr="name")
if _v == None:
_v = _plugin.GetViewAttrDetails(viewname, attr="viewid")
if _v != None:
#self.logger.info("found!")
break
return _v
def SearchViewPanelInManager(self, viewname):
_panel = None
all_panes = self.parentframe.m_mgr.GetAllPanes()
for ii in range(len(all_panes)):
n = all_panes[ii].name
c = all_panes[ii].caption
if viewname == c or viewname == n:
_panel = all_panes[ii]
break
return _panel
def UpdateView(self, viewname):
_panelFound = self.SearchViewInstance(viewname)
_visible = False
if _panelFound !=None:
_panelFound['instance'].UpdateView()
def isViewVisible(self, viewname):
#self.logger.info(f"isViewVisible {viewname}")
_panelFound = self.SearchViewPanelInManager(viewname)
if _panelFound == None:
_panelIns = self.SearchViewInstance(viewname)
if _panelIns != None :
_panelFound = _panelIns['instance']
_visible = False
#self.logger.info(_panelFound)
if _panelFound !=None:
try:
if _panelFound.window.IsShownOnScreen():
_visible = True
#self.logger.info(f"IsShownOnScreen {_visible}")
except Exception as e:
pass
try:
if _panelFound.IsShownOnScreen():
_visible = True
#self.logger.info(f"IsShownOnScreen {_visible}")
except Exception as e:
pass
try:
if _panelFound.IsShown():
_visible = True
#self.logger.info(f"IsShown {_visible}")
except Exception as e:
pass
return _visible
def HideView(self, viewName, pluginname=""):
_v = None
_v = self.SearchViewInstance(viewName)
if _v != None:
if _v['position'] == 'mgr':
self.parentframe.m_mgr.GetPane(_v['name']).Hide()
elif _v['position'] == 'main':
#RemovePage
pass
elif _v['position'] == 'toolbox1':
self.parentframe.m_mgr.GetPane(_v['name']).Hide()
_parent = self.GetArea(_v['position'])
self.HideParentInManager(_parent)
else:
self.parentframe.m_mgr.GetPane(_v['name']).Hide()
_parent = self.GetArea(_v['position'])
self.HideParentInManager(_parent)
_parentArea = self.GetArea(_v['position'])
if _parentArea != None :
self.HideParentInManager(_parentArea)
#self.UpdateGUIManager()
self.__refreshGUI_Job__()
def RenameView(self):
pass
#
#
#Best and cleanest way to call a view
#
def OpenView(self, viewName, pluginname="", createIfNull=False):
_defaultArea = self.parentframe.GetPluginSetting("General", 'defaultviewarea')
_v = None
_isDialog = False
if pluginname == "":
_v = self.SearchViewInstance(viewName)
"""
for _p in self.parentframe.Plugins.plugins:
#self.logger.info(f"scanning {_p}")
_plugin = self.parentframe.GetPlugin(_p)
_v = _plugin.GetViewAttrDetails(viewName, attr="name")
if _v != None:
#self.logger.info("found!")
break
"""
else:
_plugin = self.parentframe.GetPlugin(pluginname)
_v = _plugin.GetViewAttrDetails(viewName, attr="name")
if _v == None:
_v = _plugin.GetViewAttrDetails(viewName, attr="viewid")
if _v== None:
_v = self.SearchDialog(viewName)
_isDialog = True
if _v != None :
#self.logger.info(_v)
if not _isDialog :
if self.force_mgr:
position = "main"
_v['position']= 'main'
self.logger.info("FORCE MAIN !")
if _v['position'] == 'mgr':
self.logger.info(f"{_v['position']} will be managed dynamically with Manager")
self.parentframe.m_mgr.GetPane(_v['name']).Show(True)
#self.UpdateGUIManager()
self.__refreshGUI_Job__()
elif _v['position'] == 'main':
self.logger.info(f"{_v['position']} will be managed dynamically with MainNotebook")
self.SetCurrentView_Notebook(viewName, self.parentframe.wxRavenMainBook)
elif _v['position'] == 'toolbox1':
self.logger.info(f"{_v['position']} will be managed dynamically with Toolbox")
_parent = self.GetArea(_v['position'])
#self.parentframe.m_mgr.GetPane("Toolbox1").Show(True)
self.SetCurrentView_Notebook(viewName, _parent)
self.ShowParentInManager(_parent)
else:
self.logger.info(f"{_v['position']} will be managed dynamically with getParent")
self.SetCurrentView_Notebook(viewName, _v['instance'].GetParent())
self.ShowParentInManager(_v['instance'].GetParent())
_parentArea = self.GetArea(_v['position'])
if _parentArea != None :
self.ShowParentInManager(_parentArea)
else:
if createIfNull:
_viewObj = None
if pluginname == "":
for _p in self.parentframe.Plugins.plugins:
#self.logger.info(f"scanning {_p}")
_plugin = self.parentframe.GetPlugin(_p)
_viewObj = _plugin.SearchPluginView(viewName)
if _viewObj != None:
#self.logger.info("found!")
pluginname = _p
break
else:
_plugin = self.parentframe.GetPlugin(pluginname)
_viewObj = _plugin.SearchPluginView(viewName)
if _viewObj != None:
if _viewObj['position'] == 'dialog':
self.AddDialog((_viewObj,))
else:
_plugin = self.parentframe.GetPlugin(pluginname)
_plugin.LoadView(_viewObj, _defaultArea)
_v = _viewObj
"""
cp = self.parentframe.wxRavenMainBook.GetCurrentPage()
cpi = self.parentframe.wxRavenMainBook.GetPageIndex(cp)
cpText = self.parentframe.wxRavenMainBook.GetPageText( cpi)
self.logger.info(f"current mainbook page {cp} {cpText}")
for _x in range(0, self.parentframe.wxRavenMainBook.GetPageCount()-1):
_xname = self.parentframe.wxRavenMainBook.GetPageText(_x)
self.logger.info(f"current mainbook page {_x} {_xname}")
if _xname == viewName:
self.parentframe.wxRavenMainBook.SetSelection(_x)
self.logger.info(f"selecting {_x}")
"""
return _v
def SetCurrentView_Notebook(self, viewname, notebook):
cp = notebook.GetCurrentPage()
cpi = notebook.GetPageIndex(cp)
cpText = notebook.GetPageText( cpi)
#self.logger.info(f"current mainbook page {cp} {cpText}")
for _x in range(0, self.parentframe.wxRavenMainBook.GetPageCount()):
_xname = notebook.GetPageText(_x)
#self.logger.info(f"current mainbook page {_x} {_xname}")
if _xname == viewname:
notebook.SetSelection(_x)
#self.logger.info(f"selecting {_x}")
"""
Low Level Functions to place elements in Dialog
"""
def getFrameTitleAndName(self, obj, nameFrame):
view_name = nameFrame
view_base_name = view_name
try:
view_base_name = obj.view_base_name
except Exception as e:
pass
#self.logger.info(e)
#view_base_name = getattr(obj, "view_base_name")
if view_base_name == None:
view_base_name = view_name
return view_name, view_base_name
def AddInMainFrame(self, obj, nameFrame, icon=None):
t, n = self.getFrameTitleAndName(obj, nameFrame)
title = ""+t+" ("+ n +")"
if icon==None:
#icon = wx.Bitmap( u"res/default_style/normal/view_default_frame.png", wx.BITMAP_TYPE_ANY )
icon = self.parentframe.RessourcesProvider.GetImage('view_default_frame')
#self.logger.info("Add Frame :" + nameFrame)
#self.parentframe.m_mgr.AddPane( obj, wx.aui.AuiPaneInfo() .Bottom() .Icon(icon) .Name( nameFrame ) .Caption( u"> "+title+"" ).MaximizeButton( True ).MinimizeButton( True ).PinButton( True ).Dock().Resizable().FloatingSize( wx.DefaultSize ) )
self.parentframe.m_mgr.AddPane( obj, wx.aui.AuiPaneInfo() .Bottom() .Icon(icon) .Name( nameFrame ) .Caption( u"> "+title+"" ).MaximizeButton( True ).MinimizeButton( True ).PinButton( True ).Dock().Resizable().FloatingSize( wx.DefaultSize ) )
self.parentframe.m_mgr.GetPane(nameFrame).Icon(icon)
#self.AddView(nameFrame, obj, icon=icon)
#self.UpdateGUIManager()
self.__refreshGUI_Job__()
def AddInMainbook(self, obj, nameFrame, icon=None):
self.AddInNotebook(obj, nameFrame, self.parentframe.wxRavenMainBook, icon=icon)
#self.AddView(nameFrame, obj, icon)
def AddInNotebook(self, obj, nameFrame, notebook, icon=None):
t, n = self.getFrameTitleAndName(obj, nameFrame)
title = ""+t+" ("+ n +")"
#self.logger.info(title)
if icon==None:
#icon = wx.Bitmap( u"res/default_style/normal/mainnet-mini.png", wx.BITMAP_TYPE_ANY )
icon = self.parentframe.RessourcesProvider.GetImage('ravencoin')
#self.logger.info(str(type(notebook)))
if str(type(notebook)).__contains__("wx._aui.AuiNotebook"):
notebook.AddPage(obj, t, bitmap = icon)
self.logger.info("notebook.AddPage()")
elif str(type(notebook)).__contains__("RavenNotebookToolbox"):
notebook.AddPage(obj, t, icon)
self.logger.info("notebook.AddPage()")
elif str(type(notebook)).__contains__("wx._core.Notebook"):
self.RaiseViewLog("Unable to addview '"+ nameFrame+"' not supported type target : " + str(type(notebook)) , "warning")
else:
self.RaiseViewLog("Unable to addview '"+ nameFrame+"' unknown type : " + str(type(notebook)) , "error")
#self.AddView(nameFrame, obj, icon)
#self.UpdateGUIManager()
self.__refreshGUI_Job__()
def ShowAddViewDialog(self):
_defaultViewSett = self.parentframe.GetPluginSetting("General", 'defaultviewarea')#main
#self.logger.info(_defaultViewSett)
nViewDialog = RavenAddViewDialog(self.parentframe, _defaultViewSett)
nViewDialog.Show()
nViewDialog.Bind(wx.EVT_CLOSE, self.OnAddViewClose )
self.nViewDialog = nViewDialog
def OnAddViewClose(self, evt):
if self.nViewDialog != None :
self.nViewDialog.Destroy()
self.nViewDialog=None
class RavenAddViewDialog(wxRavenAddView):
parentframe = None
imagesListReference = {}
_selected_plugin = ""
_selected_view = {}
_target = "mgr"
def __init__(self, parentFrame, targetDefault="main"):
super().__init__(parentFrame)
self.parentframe = parentFrame
icon = wx.EmptyIcon()
icon.CopyFromBitmap( parentFrame.RessourcesProvider.GetImage('new_view') )
self.SetIcon(icon)
self.logger = logging.getLogger('wxRaven')
self._selected_plugin = ""
self._selected_view = {}
self._target = targetDefault
self.openButton.Enable(False)
self.imagesListReference = {}
self.SetupTreeView()
self.FillAreaList()
self.FillTree()
def SetupTreeView(self):
isz = (16,16)
il = wx.ImageList(isz[0], isz[1])
fldridx = il.Add(self.parentframe.RessourcesProvider.GetImage('packagefolder_obj') )
fldrOpenidx = il.Add(self.parentframe.RessourcesProvider.GetImage('packagefolder_obj'))
viewIdx = il.Add( self.parentframe.RessourcesProvider.GetImage('view_default_frame'))
self.imagesListReference['folder'] = fldridx
self.imagesListReference['folderOpen'] = fldrOpenidx
self.imagesListReference['view'] = viewIdx
self.m_treeCtrl1.SetImageList(il)
self.il = il
self.root = self.m_treeCtrl1.AddRoot("Views")
#self.rootb = self.m_treeCtrl1.AddRoot("Viewds")
self.m_treeCtrl1.SetItemData(self.root, None)
self.m_treeCtrl1.SetItemImage(self.root, self.imagesListReference['folder'], wx.TreeItemIcon_Normal)
self.m_treeCtrl1.SetItemImage(self.root, self.imagesListReference['folderOpen'], wx.TreeItemIcon_Expanded)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged, self.m_treeCtrl1)
def FillAreaList(self):
#m_choice1
for _areas in self.parentframe.Views.GetAllAreas():
self.m_choice1.Append(_areas)
#_defaultViewSett = self.parentframe.GetPluginSetting("General", 'defaultviewarea')#main
#self.logger.info(_defaultViewSett)
#if _defaultViewSett == None:
# _defaultViewSett = "main"
#self.logger.info(_defaultViewSett)
default = self.m_choice1.FindString(self._target)
self.m_choice1.SetSelection(default)
self.Bind(wx.EVT_CHOICE, self.EvtChoice, self.m_choice1)
def EvtChoice(self, event):
self._target = event.GetString()
def FillTree(self):
#DeleteAllItems()
for _plugin in self.parentframe.Plugins.plugins:
_pluginInstance = self.parentframe.GetPlugin(_plugin)
child = self.m_treeCtrl1.AppendItem(self.root, _plugin)
self.m_treeCtrl1.SetItemData(child, None)
self.m_treeCtrl1.SetItemImage(child, self.imagesListReference['folder'], wx.TreeItemIcon_Normal)
self.m_treeCtrl1.SetItemImage(child, self.imagesListReference['folderOpen'], wx.TreeItemIcon_Expanded)
for _views in _pluginInstance.PLUGINS_VIEWS:
_viewName = _views['name']
_viewIcon = _views['icon']
_viewClass = _views['class']
iconName = 'view'
if not self.imagesListReference.__contains__(_viewName):
_viewIconIdx = self.il.Add(_viewIcon)
self.imagesListReference[_viewName] = _viewIconIdx
iconName = _viewName
last = self.m_treeCtrl1.AppendItem(child, _viewName)
self.m_treeCtrl1.SetItemData(last, _views)
self.m_treeCtrl1.SetItemImage(last, self.imagesListReference[_viewName], wx.TreeItemIcon_Normal)
self.m_treeCtrl1.SetItemImage(last, self.imagesListReference[_viewName], wx.TreeItemIcon_Selected)
def OnSelChanged(self, event):
self.item = event.GetItem()
#self.logger.info(self.item)
self.openButton.Enable(False)
if self.item:
_itemData = self.m_treeCtrl1.GetItemData(self.item)
if _itemData != None:
self.openButton.Enable(True)
itemtext = self.m_treeCtrl1.GetItemText(self.item)
parentPluginItem = self.m_treeCtrl1.GetItemParent( self.item)
pluginName = self.m_treeCtrl1.GetItemText(parentPluginItem)
self._selected_plugin = pluginName
self._selected_view = _itemData
#self.logger.info(itemtext)
#self.logger.info(_itemData)
#items = self.tree.GetSelections()
#self.logger.info(map(self.tree.GetItemText, items))
event.Skip()
def OnCancel(self, event):
self.Close(force=True)
def OnOpen(self, event):
#self.logger.info("OnOpen")
if self._selected_plugin != "" and self._selected_view != None:
if self._selected_view['position']=='dialog':
self._target = 'dialog'
df_class = self._selected_view['class']
df_name = self._selected_view['name']
df_icon = self._selected_view['icon']
self.logger.info(self._selected_view)
#self.parentframe.Views.AddDialog(self._selected_view, df_name, position="dialog", icon=df_icon)
wx.CallAfter(self.parentframe.Views.AddDialog, (self._selected_view,))
else:
self.parentframe.GetPlugin(self._selected_plugin).LoadView(self._selected_view, self._target)
wx.CallAfter(self.parentframe.Views.__refreshGUI_Job__, ())
self.Close(force=True)
| 31.733945 | 250 | 0.521798 | 34,194 | 0.988523 | 0 | 0 | 0 | 0 | 0 | 0 | 8,256 | 0.238675 |
f666df25a3a1f1644fff7b7708a3e6801a52f114 | 4,714 | py | Python | python/image-tools/adjust-exif-timestamp.py | bmaupin/graveyard | 71d52fe6589ce13dfe7433906d1aa50df48c9f94 | [
"MIT"
] | 1 | 2019-11-23T10:44:58.000Z | 2019-11-23T10:44:58.000Z | python/image-tools/adjust-exif-timestamp.py | bmaupin/graveyard | 71d52fe6589ce13dfe7433906d1aa50df48c9f94 | [
"MIT"
] | 8 | 2020-07-16T07:14:12.000Z | 2020-10-14T17:25:33.000Z | python/image-tools/adjust-exif-timestamp.py | bmaupin/graveyard | 71d52fe6589ce13dfe7433906d1aa50df48c9f94 | [
"MIT"
] | 1 | 2019-11-23T10:45:00.000Z | 2019-11-23T10:45:00.000Z | #!/usr/bin/env python
import datetime
import optparse
import os
import os.path
import struct
import sys
# sudo pip3 install piexif
import piexif
# Make this negative to subtract time, e.g.:
# -datetime.timedelta(hours=5, minutes=9)
#TIME_ADJUSTMENT = datetime.timedelta(hours=5, minutes=9)
#TIME_ADJUSTMENT = datetime.timedelta(days=1)
TIME_ADJUSTMENT = datetime.timedelta(days=788, seconds=13032)
def main():
EXIF_TIME_FORMAT = '%Y:%m:%d %H:%M:%S'
EXIF_UNSET = 'unset'
infile_name = parse_options()
# Get the mtime of the file
infile_mtime_original = os.path.getmtime(infile_name)
try:
exif_data = piexif.load(infile_name)
except struct.error:
exif_data = {'0th': {}, '1st': {}, 'Exif': {}}
exif_dt_location = None
if piexif.ImageIFD.DateTime in exif_data['0th']:
exif_dt_location = '0th'
elif piexif.ImageIFD.DateTime in exif_data['1st']:
exif_dt_location = '1st'
# DateTime is when the image was last changed
exif_dt = EXIF_UNSET
if exif_dt_location != None:
# I've seen timestamp values that look like this: ' : : : : '
try:
exif_dt = datetime.datetime.strptime(exif_data[exif_dt_location][piexif.ImageIFD.DateTime].decode('utf8'), EXIF_TIME_FORMAT)
except ValueError as e:
sys.stderr.write('WARNING: Malformed DateTime\n')
sys.stderr.write('\tValueError: {}\n'.format(e))
else:
exif_dt_location = '0th'
# DateTimeDigitized is when the image was stored digitally (may be different from DateTimeOriginal if image was scanned)
exif_dtd = EXIF_UNSET
if piexif.ExifIFD.DateTimeDigitized in exif_data['Exif']:
try:
exif_dtd = datetime.datetime.strptime(exif_data['Exif'][piexif.ExifIFD.DateTimeDigitized].decode('utf8'), EXIF_TIME_FORMAT)
except ValueError as e:
sys.stderr.write('WARNING: Malformed DateTimeDigitized\n')
sys.stderr.write('\tValueError: {}\n'.format(e))
# DateTimeOriginal is when the image was taken
exif_dto = EXIF_UNSET
if piexif.ExifIFD.DateTimeOriginal in exif_data['Exif']:
try:
exif_dto = datetime.datetime.strptime(exif_data['Exif'][piexif.ExifIFD.DateTimeOriginal].decode('utf8'), EXIF_TIME_FORMAT)
except ValueError as e:
sys.stderr.write('WARNING: Malformed DateTimeOriginal\n')
sys.stderr.write('\tValueError: {}\n'.format(e))
# If only the Exif DateTime isn't set, set it based on DateTimeOriginal
if exif_dt == EXIF_UNSET and exif_dtd != EXIF_UNSET and exif_dto != EXIF_UNSET and exif_dtd == exif_dto:
set_exif_timestamp(exif_dto)
exif_dt = exif_dto
print('Exif DateTime is {}'.format(exif_dt))
print('Exif DateTimeDigitized is {}'.format(exif_dtd))
print('Exif DateTimeOriginal is {}'.format(exif_dto))
new_exif_dt = exif_dt + TIME_ADJUSTMENT
new_exif_dtd = exif_dtd + TIME_ADJUSTMENT
new_exif_dto = exif_dto + TIME_ADJUSTMENT
print('\nNew values:')
print('Exif DateTime: {}'.format(new_exif_dt))
print('Exif DateTimeDigitized: {}'.format(new_exif_dtd))
print('Exif DateTimeOriginal: {}'.format(new_exif_dto))
if parser.values.yes:
response = 'y'
else:
response = input('\nProceed? (y/n) ')
if response.lower() == 'y':
exif_data[exif_dt_location][piexif.ImageIFD.DateTime] = new_exif_dt.strftime(EXIF_TIME_FORMAT).encode('utf8')
exif_data['Exif'][piexif.ExifIFD.DateTimeDigitized] = new_exif_dtd.strftime(EXIF_TIME_FORMAT).encode('utf8')
exif_data['Exif'][piexif.ExifIFD.DateTimeOriginal] = new_exif_dto.strftime(EXIF_TIME_FORMAT).encode('utf8')
# Write the changes to the file
piexif.insert(piexif.dump(exif_data), infile_name)
# Set the atime and mtime of the file back to their original values
os.utime(infile_name, (infile_mtime_original, infile_mtime_original))
def parse_options():
''' set up and parse command line arguments
'''
global parser
usage = ('Usage: %prog FILE [options]\n'
'Where FILE = full path to jpeg file to adjust EXIF tags')
parser = optparse.OptionParser(usage=usage)
# command line options to parse
parser.add_option('-y', '--yes', action='store_true', dest='yes',
default=False, help='Adjust files without asking for confirmation')
# parse the arguments
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit('Error: FILE is required')
return args[0]
if __name__ == '__main__':
main()
| 35.179104 | 136 | 0.663131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,515 | 0.321383 |
f6686b5276ad867dc68aa933b114b3472854d95c | 99 | py | Python | change_funname.py | poojadhoble32/python-projects | 4c094dd0072d4213344e26476e4e99a4a12246e7 | [
"bzip2-1.0.6"
] | null | null | null | change_funname.py | poojadhoble32/python-projects | 4c094dd0072d4213344e26476e4e99a4a12246e7 | [
"bzip2-1.0.6"
] | null | null | null | change_funname.py | poojadhoble32/python-projects | 4c094dd0072d4213344e26476e4e99a4a12246e7 | [
"bzip2-1.0.6"
] | null | null | null | def old(name,age):
print(f"my name is {name} and age is {age}")
old as new
new("pooja",23)
| 19.8 | 49 | 0.606061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.444444 |
f66a07a388a781ad9ca3ffd91259ea2672b30eef | 2,449 | py | Python | idgo_admin/views/sld_preview.py | neogeo-technologies/idgo | 23e028b0d7fb2daf54d7e2954e0cc4d7b9be4210 | [
"Apache-2.0"
] | null | null | null | idgo_admin/views/sld_preview.py | neogeo-technologies/idgo | 23e028b0d7fb2daf54d7e2954e0cc4d7b9be4210 | [
"Apache-2.0"
] | 2 | 2018-09-14T07:12:00.000Z | 2019-11-13T09:32:24.000Z | idgo_admin/views/sld_preview.py | neogeo-technologies/idgo | 23e028b0d7fb2daf54d7e2954e0cc4d7b9be4210 | [
"Apache-2.0"
] | 2 | 2019-03-25T08:27:43.000Z | 2019-10-07T15:25:30.000Z | # Copyright (c) 2017-2021 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import redis
import urllib.parse
import uuid
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.http import HttpResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views import View
from idgo_admin import REDIS_HOST
from idgo_admin import REDIS_EXPIRATION
from idgo_admin import LOGIN_URL
from idgo_admin import HOST_INTERNAL
from idgo_admin import PORT_INTERNAL
strict_redis = redis.StrictRedis(REDIS_HOST)
@method_decorator([csrf_exempt, login_required(login_url=LOGIN_URL)], name='dispatch')
class SLDPreviewSetter(View):
def post(self, request, *args, **kwargs):
sld = request.POST.get('sld')
key = str(uuid.uuid4())
strict_redis.set(key, sld)
strict_redis.expire(key, REDIS_EXPIRATION)
response = HttpResponse(status=201)
location = request.build_absolute_uri(
reverse('idgo_admin:sld_preview_getter', kwargs={'key': key}))
# C'est moche
if HOST_INTERNAL and PORT_INTERNAL:
netloc = '{host}:{port}'.format(
host=HOST_INTERNAL, port=PORT_INTERNAL)
parsed = urllib.parse.urlparse(location)
replaced = parsed._replace(netloc=netloc)
response['Content-Location'] = replaced.geturl()
else:
response['Content-Location'] = location
return response
@method_decorator([csrf_exempt], name='dispatch')
class SLDPreviewGetter(View):
def get(self, request, key=None, *args, **kwargs):
sld = strict_redis.get(key)
if not sld:
raise Http404
return HttpResponse(sld, status=200, content_type='application/vnd.ogc.sld+xml')
| 31.805195 | 88 | 0.721111 | 1,084 | 0.44263 | 0 | 0 | 1,221 | 0.498571 | 0 | 0 | 756 | 0.308697 |
f66a1e25a8296b28e107195d808f86bbb9fe4c11 | 14,785 | py | Python | FM/deepFM.py | sameul-yuan/markdowns | fcb8b77bfe402ffabba42352f1ae686cf1a12ecd | [
"MIT"
] | null | null | null | FM/deepFM.py | sameul-yuan/markdowns | fcb8b77bfe402ffabba42352f1ae686cf1a12ecd | [
"MIT"
] | null | null | null | FM/deepFM.py | sameul-yuan/markdowns | fcb8b77bfe402ffabba42352f1ae686cf1a12ecd | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from losses import focal_loss,weighted_binary_crossentropy
from utils import Dataset
class DeepFM(object):
def __init__(self, params):
self.feature_size = params['feature_size']
self.field_size = params['field_size']
self.embedding_size = params['embedding_size']
self.deep_layers = params['deep_layers']
self.l2_reg_coef = params['l2_reg']
self.learning_rate = params['learning_rate']
self.pos_ratio = params['pos_ratio']
self.keep_prob_v = params['keep_prob']
self.activate = tf.nn.relu
self.weight = {}
self.saver=None
self.checkpoint_dir = params['checkpoint_dir']
self.build()
def build(self):
"""
feature_size: N
field_size: F
embedding_size: K
batch_size: None
"""
self.feat_index = tf.placeholder(tf.int32, shape=[None, None], name='feature_index')
self.feat_value = tf.placeholder(tf.float32, shape=[None, None], name='feature_value')
self.label = tf.placeholder(tf.float32, shape=[None,1], name='label')
self.keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob') # scaler
self.is_training= tf.placeholder(tf.bool, shape=[],name='is_training')
#1、-------------------------定义权值-----------------------------------------
# FM部分中一次项的权值定义
self.weight['first_order'] = tf.Variable(tf.random_normal([self.feature_size, 1], 0.0, 0.05), # N * 1
name='first_order')
# One-hot编码后的输入层与Dense embeddings层的权值定义,即DNN的输入embedding。
self.weight['embedding_weight'] = tf.Variable(tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.05), # N*K
name='embedding_weight')
# deep网络部分的weight和bias, deep网络初始输入维度:input_size = F*K
num_layer = len(self.deep_layers)
input_size = self.field_size * self.embedding_size
# glorot_normal = np.sqrt(2.0 / (input_size + self.deep_layers[0])) # for sigmoid
he_normal = np.sqrt(2.0 /input_size) # for relu
self.weight['layer_0'] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(input_size, self.deep_layers[0])), dtype=np.float32)
self.weight['bias_0'] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(1, self.deep_layers[0])), dtype=np.float32)
# 生成deep network里面每层的weight 和 bias
for i in range(1, num_layer):
he_normal = np.sqrt(2.0 / (self.deep_layers[i - 1]))
self.weight['layer_' + str(i)] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(self.deep_layers[i - 1], self.deep_layers[i])),
dtype=np.float32)
self.weight['bias_' + str(i)] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(1, self.deep_layers[i])),dtype=np.float32)
# deep部分output_size + 一次项output_size + 二次项output_size
last_layer_size = self.deep_layers[-1] + self.field_size + self.embedding_size
glorot_normal = np.sqrt(2.0 / (last_layer_size + 1))
# 生成最后一层的weight和bias
self.weight['last_layer'] = tf.Variable(np.random.normal(loc=0, scale=glorot_normal, size=(last_layer_size, 1)), dtype=np.float32)
self.weight['last_bias'] = tf.Variable(tf.constant(0.0), dtype=np.float32)
#2、----------------------前向传播------------------------------------
# None*F*K
self.embedding_index = tf.nn.embedding_lookup(self.weight['embedding_weight'],self.feat_index)
# [None*F*K] .*[None*F*1] = None*F*K
self.embedding_part = tf.multiply(self.embedding_index, tf.reshape(self.feat_value, [-1, self.field_size, 1]))
# FM部分一阶特征
# None * F*1
self.embedding_first = tf.nn.embedding_lookup(self.weight['first_order'],
self.feat_index)
#[None*F*1].*[None*F*1] = None*F*1
self.embedding_first = tf.multiply(self.embedding_first, tf.reshape(self.feat_value, [-1, self.field_size, 1]))
# None*F
self.first_order = tf.reduce_sum(self.embedding_first, 2)
# 二阶特征 None*K
self.sum_second_order = tf.reduce_sum(self.embedding_part, 1)
self.sum_second_order_square = tf.square(self.sum_second_order)
self.square_second_order = tf.square(self.embedding_part)
self.square_second_order_sum = tf.reduce_sum(self.square_second_order, 1)
# 1/2*((a+b)^2 - a^2 - b^2)=ab
# None*K
self.second_order = 0.5 * tf.subtract(self.sum_second_order_square, self.square_second_order_sum)
# FM部分的输出 None*(F+K)
self.fm_part = tf.concat([self.first_order, self.second_order], axis=1)
# DNN部分
# None*(F*K)
self.deep_embedding = tf.reshape(self.embedding_part, [-1, self.field_size * self.embedding_size])
# 全连接部分
for i in range(0, len(self.deep_layers)):
self.deep_embedding = tf.add(tf.matmul(self.deep_embedding, self.weight["layer_%d" % i]),
self.weight["bias_%d" % i])
# self.deep_embedding =tf.matmul(self.deep_embedding, self.weight["layer_%d" % i])
self.bn_out = tf.layers.batch_normalization(self.deep_embedding, training=self.is_training)
# self.bn_out = tf.layers.dropout(self.deep_embedding, rate=self.keep_prob,training=self.is_training)
self.deep_embedding = self.activate(self.bn_out)
self.deep_embedding = tf.layers.dropout(self.deep_embedding, rate =1.0-self.keep_prob, training= self.is_training)
# FM输出与DNN输出拼接 None*(F+K+layer[-1]])
din_all = tf.concat([self.fm_part, self.deep_embedding], axis=1)
#None*1
self.out = tf.add(tf.matmul(din_all, self.weight['last_layer']), self.weight['last_bias'])
#3. ------------------确定损失---------------------------------------
# loss部分 None*1
self.prob = tf.nn.sigmoid(self.out)
# self.entropy_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels= self.label, logits= self.out))
# self.entropy_loss = -tf.reduce_mean(
# self.label * tf.log(tf.clip_by_value(self.prob, 1e-10, 1.0))+ (1 - self.label)* tf.log(tf.clip_by_value(1-self.prob,1e-10,1.0)))
self.entropy_loss = focal_loss(self.prob, self.label, alpha=0.5, gamma=2)
# self.entropy_loss = weighted_binary_crossentropy(self.prob, self.label, pos_ratio=self.pos_ratio)
# 正则:sum(w^2)/2*l2_reg_coef
self.reg_loss = tf.contrib.layers.l2_regularizer(self.l2_reg_coef)(self.weight["last_layer"])
for i in range(len(self.deep_layers)):
self.reg_loss += tf.contrib.layers.l2_regularizer(self.l2_reg_coef)(self.weight["layer_%d" % i])
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(self.l2_reg_coef)(self.weight['layer_1']))
# print(self.entropy_loss.shape.as_list(), self.reg_loss.shape.as_list())
self.loss = self.entropy_loss + self.reg_loss
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step,3000, 0.99,staircase=False)
opt = tf.train.AdamOptimizer(self.learning_rate)
# opt = tf.train.GradientDescentOptimizer(self.learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
clip_gradients, _ = tf.clip_by_global_norm(gradients, 5)
with tf.control_dependencies(update_ops):
# self.train_op = opt.minimize(self.loss, global_step = self.global_step)
self.train_op = opt.apply_gradients(zip(clip_gradients, trainable_params), global_step=self.global_step)
self.saver = tf.train.Saver(max_to_keep=3)
def train(self, sess, feat_index, feat_value, label):
_, step = sess.run([self.train_op, self.global_step], feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.label: label,
self.keep_prob: self.keep_prob_v,
self.is_training:True})
return step
def predict(self, sess, feat_index, feat_value, batch_size=None):
if batch_size is None:
prob = sess.run([self.prob], feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.keep_prob: 1,
self.is_training:False})[0]
else:
data =Dataset(feat_value, feat_index, [None]*len(feat_index), batch_size, shuffle=False)
probs =[]
for feat_index, feat_value, _ in data:
prob = sess.run([self.prob], feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.keep_prob: 1,
self.is_training:False})[0]
probs.append(prob.ravel())
prob = np.concatenate(probs)
return prob.ravel()
def evaluate(self, sess, feat_index, feat_value, label, batch_size=None):
tloss, entloss,regloss = 0,0,0
if batch_size is None:
tloss, entloss,regloss = sess.run([self.loss, self.entropy_loss, self.reg_loss],feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.label: label,
self.keep_prob: 1,
self.is_training:False})
else:
data = Dataset(feat_value,feat_index,label, batch_size, shuffle=False)
for i, (feat_index, feat_value, label) in enumerate(data,1):
_tloss, _entloss, _regloss = sess.run([self.loss, self.entropy_loss, self.reg_loss],feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.label: label,
self.keep_prob: 1,
self.is_training:False})
tloss = tloss+ (_tloss-tloss)/i
entloss = entloss + (_entloss-entloss)/i
regloss = regloss + (_regloss-regloss)/i
return tloss, entloss, regloss
def save(self, sess, path, global_step):
if self.saver is not None:
self.saver.save(sess, save_path=path, global_step= global_step)
def restore(self, sess, path):
model_file = tf.train.latest_checkpoint(path)
if model_file is not None:
print('restore model:', model_file)
self.saver.restore(sess, save_path=model_file)
if __name__ == '__main__':
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
params ={'feature_size':None,
'field_size':None,
'embedding_size':4,
'deep_layers':[32,32,32],
'epoch':200,
'batch_size':128,
'learning_rate':0.001,
'l2_reg': 0.001,
'keep_prob':0.7,
'checkpoint_dir':os.path.join(BASE_PATH,'data/deepfm'),
'training_model':True}
with tf.Session() as sess:
model = DeepFM(params)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer()) # global_step counter etc.
sys.stdout.flush()
if params['training_model']:
#---------------training---------------------------------
for i in range(params['epoch']):
print('epoch ={}'.format(i).center(50,'-'))
for j, (xi, xv, y) in enumerate(train_data):
loss,_, step = model.train(sess, xi, xv, y)
if j %1000 ==0:
train_loss,train_entropy,train_reg = model.evaluate(sess, Xi,Xv, Y)
val_loss,val_entropy, val_reg = model.evaluate(sess, val_Xi, val_Xv, val_y)
print('---batch= %d--- \n train_loss=%f,\t train_entropy=%f,\t train_reg=%f \n val_loss=%f,\t val_entropy=%f,\t val_reg=%f' % (
j,train_loss,train_entropy,train_reg, val_loss,val_entropy,val_reg))
if i%10 ==0 or i == params['epoch']-1:
model.save(sess, model.checkpoint_dir, i)
prob = model.predict(sess, Xi, Xv)
hit_rate, top_k = top_ratio_hit_rate(np.array(Y).ravel(), np.array(prob[0]).ravel(), top_ratio=0.001) # ravel return view, flatten return copy
print('top-k={}, train-hit-rate={}'.format(top_k ,hit_rate))
#-----------------test-----------------------------------
probs =[]
test_y=[]
for xi, xv, y in test_data:
prob = model.predict(sess, xi, xv) # list of np.ndarry
probs.extend(prob[0].ravel().tolist())
test_y.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(test_y).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('top-k={}, test-hit-rate={}'.format(top_k ,hit_rate))
calc_threshold_vs_depth(np.asarray(test_y).ravel(), np.asarray(probs).ravel())
else:
model.restore(sess, os.path.split(model.checkpoint_dir)[0])
probs=[]
Y =[]
for xi, xv, y in train_data:
prob = model.predict(sess, xi, xv) # np.ndarry
probs.extend(prob[0].ravel().tolist())
Y.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(Y).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('train-top-k={}, train-hit-rate={}'.format(top_k ,hit_rate))
probs=[]
test_y=[]
for xi, xv, y in test_data:
prob = model.predict(sess, xi, xv) # np.ndarry
probs.extend(prob[0].ravel().tolist())
test_y.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(test_y).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('test-top-k={}, test-hit-rate={}'.format(top_k ,hit_rate))
| 52.429078 | 162 | 0.573216 | 11,218 | 0.746722 | 0 | 0 | 0 | 0 | 0 | 0 | 3,164 | 0.21061 |
f66b8ff9a4185c06c771d36dfd22c91e6cab609f | 3,815 | py | Python | pfa.py | JonathanSilver/pyKT | 2bef7e926177725e3ddd79226895a67fa9b13cba | [
"MIT"
] | 1 | 2021-03-06T08:48:10.000Z | 2021-03-06T08:48:10.000Z | pfa.py | moduxiaohunhun/pyKT | 2bef7e926177725e3ddd79226895a67fa9b13cba | [
"MIT"
] | null | null | null | pfa.py | moduxiaohunhun/pyKT | 2bef7e926177725e3ddd79226895a67fa9b13cba | [
"MIT"
] | 3 | 2021-03-15T12:19:06.000Z | 2021-04-13T12:09:19.000Z | import numpy as np
from math import log
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error, classification_report
from math import sqrt
import json
from pprint import pprint
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--problems', type=str, help='file path to problems.json')
parser.add_argument('-s', '--submissions', type=str, help='file path to user_submissions.json')
parser.add_argument('-k', type=int, default=1, help='k-fold cross validation')
args = parser.parse_args()
with open(args.problems, 'r') as file:
problems = json.load(file)
problem_id_2_tag_ids = {problem['id']: problem['tags'] for problem in problems}
with open(args.submissions, 'r') as file:
user_submissions = json.load(file)
max_skill = max([max(problem['tags']) for problem in problems if len(problem['tags']) > 0]) + 1
print('max_skill:', max_skill)
def read_data(training, group, expand_tags=False):
x = []
y = []
for user_data in user_submissions:
user_group = user_data['group']
if training and user_group == group \
or not training and user_group != group:
continue
submissions = user_data['submissions']
user_success = {}
user_fail = {}
for sub in submissions:
tags = problem_id_2_tag_ids[sub['problem']]
if not expand_tags:
y.append(sub['verdict'])
x.append([0] * 3 * max_skill)
for tag in tags:
s = user_success.get(tag, 1)
f = user_fail.get(tag, 1)
x[-1][tag * 3 + 0] = 1
x[-1][tag * 3 + 1] = log(s)
x[-1][tag * 3 + 2] = log(f)
if sub['verdict'] == 1:
user_success[tag] = s + 1
else:
user_fail[tag] = f + 1
else:
for tag in tags:
s = user_success.get(tag, 1)
f = user_fail.get(tag, 1)
x.append([0] * 3 * max_skill)
x[-1][tag * 3 + 0] = 1
x[-1][tag * 3 + 1] = log(s)
x[-1][tag * 3 + 2] = log(f)
if sub['verdict'] == 1:
y.append(1)
user_success[tag] = s + 1
else:
y.append(0)
user_fail[tag] = f + 1
return x, y
def train(group):
model = LogisticRegression()
x, y = read_data(training=True, group=group, expand_tags=False)
print('Fitting')
model.fit(x, y)
x, y = read_data(training=False, group=group, expand_tags=False)
print('Predicting')
pred = model.predict_proba(x)[:, 1]
auc = roc_auc_score(y, pred)
rmse = sqrt(mean_squared_error(y, pred))
mae = mean_absolute_error(y, pred)
print('ROC AUC: {}'.format(auc))
print('RMSE: {}'.format(rmse))
print('MAE: {}'.format(mae))
# res = np.zeros(pred.shape[0])
# res[pred >= 0.5] = 1
# print(classification_report(y, res))
return auc, rmse, mae
def main():
k = args.k
auc = np.zeros(k)
rmse = np.zeros(k)
mae = np.zeros(k)
for i in range(k):
print('group: %d' % i)
auc[i], rmse[i], mae[i] = train(i)
print('-' * 20)
print('ROC AUC: {} (+/- {})'.format(auc.mean(), auc.std()))
print('RMSE: {} (+/- {})'.format(rmse.mean(), rmse.std()))
print('MAE: {} (+/- {})'.format(mae.mean(), mae.std()))
if __name__ == '__main__':
main()
| 35.990566 | 106 | 0.527392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.119004 |
f66f8d8c41fecc47a0097bf4d34448a0a8795388 | 207 | py | Python | apps/web/api/urls.py | rubmu/QuestBot | 34c42a25e8400060a80e1a234ca7bc7a265769ae | [
"MIT"
] | 16 | 2017-12-01T04:45:50.000Z | 2021-08-28T17:25:11.000Z | apps/web/api/urls.py | daniilzinevich/QuestBot | 927b9f35aa4a4c88bf3825b965387d358c98f413 | [
"MIT"
] | 2 | 2018-08-18T16:42:49.000Z | 2021-02-25T21:16:06.000Z | apps/web/api/urls.py | daniilzinevich/QuestBot | 927b9f35aa4a4c88bf3825b965387d358c98f413 | [
"MIT"
] | 13 | 2018-01-26T14:35:51.000Z | 2020-05-10T20:48:36.000Z | from django.urls import path
from .views import ProcessWebHookAPIView
urlpatterns = [
path(
'webhook/<hook_id>/',
ProcessWebHookAPIView.as_view(),
name='hooks-handler'
),
]
| 17.25 | 40 | 0.642512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.169082 |
f67092cd8f576ff8a96e8e2a3849a3ba743001f8 | 3,534 | py | Python | databand_airflow_monitor.py | databand-ai/databand_templates | e5cae45b3bb73fa7214f6f1d38d001f74752360c | [
"Apache-2.0"
] | 4 | 2021-03-15T15:54:01.000Z | 2022-02-21T20:35:25.000Z | databand_airflow_monitor.py | databand-ai/databand_templates | e5cae45b3bb73fa7214f6f1d38d001f74752360c | [
"Apache-2.0"
] | null | null | null | databand_airflow_monitor.py | databand-ai/databand_templates | e5cae45b3bb73fa7214f6f1d38d001f74752360c | [
"Apache-2.0"
] | 3 | 2021-03-30T12:41:50.000Z | 2022-01-03T07:44:35.000Z | import logging
import os
from datetime import timedelta
from airflow import settings
from airflow.hooks.base_hook import BaseHook
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.utils.dates import days_ago
CHECK_INTERVAL = 10 # Sleep time (in seconds) between sync iterations
AUTO_RESTART_TIMEOUT = 30 * 60 # Restart after this number of seconds
# We're using FORCE_RESTART_TIMEOUT as backup mechanism for the case monitor is
# stuck for some reason. Normally it should auto-restart by itself after
# AUTO_RESTART_TIMEOUT, but in case it's not - we'd like to kill it.
FORCE_RESTART_TIMEOUT = timedelta(seconds=AUTO_RESTART_TIMEOUT + 5 * 60)
LOG_LEVEL = "WARN"
DATABAND_AIRFLOW_CONN_ID = "dbnd_config"
"""
Airflow-Monitor-as-DAG expects airflow connection with dbnd configuration
to be define:
connection id: dbnd_config
extra:
{
"core": {
"databand_url": "<dbnd webserver url>",
"databand_access_token": "<access token>"
},
"airflow_monitor": {
"syncer_name": "<airflow syncer name>"
}
}
"""
args = {
"owner": "Databand",
"start_date": days_ago(2),
}
logger = logging.getLogger(__name__)
class MonitorOperator(BashOperator):
def pre_execute(self, context):
dbnd_conn_config = BaseHook.get_connection(DATABAND_AIRFLOW_CONN_ID)
json_config = dbnd_conn_config.extra_dejson
dbnd_config = self.to_env(
self.flatten(json_config, parent_key="DBND", sep="__")
)
self.env = os.environ.copy()
self.env.update(dbnd_config)
self.env.update(
{
"DBND__LOG__LEVEL": LOG_LEVEL,
"DBND__AIRFLOW_MONITOR__SQL_ALCHEMY_CONN": settings.SQL_ALCHEMY_CONN,
"DBND__AIRFLOW_MONITOR__LOCAL_DAG_FOLDER": settings.DAGS_FOLDER,
"DBND__AIRFLOW_MONITOR__FETCHER": "db",
}
)
def flatten(self, d, parent_key="", sep="_"):
"""
Flatten input dict to env variables:
{ "core": { "conf1": "v1", "conf2": "v2" } } =>
{ "dbnd__core__conf1": "v1", "dbnd__core__conf2": "v2" }
source: https://stackoverflow.com/a/6027615/15495440
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, dict):
items.extend(self.flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def to_env(self, d):
"""
convert dict to be env friendly - uppercase keys and stringify values
"""
return {k.upper(): str(v) for k, v in d.items()}
dag = DAG(
dag_id="databand_airflow_monitor",
default_args=args,
schedule_interval="* * * * *",
dagrun_timeout=None,
tags=["project:airflow-monitor"],
max_active_runs=1,
catchup=False,
)
with dag:
# show_env = BashOperator(task_id="env", bash_command="env")
opts = " --interval %d " % CHECK_INTERVAL
if AUTO_RESTART_TIMEOUT:
opts += " --stop-after %d " % AUTO_RESTART_TIMEOUT
run_monitor = MonitorOperator(
task_id="monitor",
task_concurrency=1,
retries=10,
bash_command="python3 -m dbnd airflow-monitor-v2 %s" % opts,
retry_delay=timedelta(seconds=1),
retry_exponential_backoff=False,
max_retry_delay=timedelta(seconds=1),
execution_timeout=FORCE_RESTART_TIMEOUT,
)
if __name__ == "__main__":
dag.cli()
| 29.697479 | 85 | 0.649123 | 1,520 | 0.430108 | 0 | 0 | 0 | 0 | 0 | 0 | 1,367 | 0.386814 |
f673429c5f822b682d2ef47c28ccc96363542050 | 1,092 | py | Python | src/search/sort/prepare_data.py | lingeen/lingeen-Ying | b7d4771d556e2168bd3dfae15b39336046319a9c | [
"Apache-2.0"
] | null | null | null | src/search/sort/prepare_data.py | lingeen/lingeen-Ying | b7d4771d556e2168bd3dfae15b39336046319a9c | [
"Apache-2.0"
] | null | null | null | src/search/sort/prepare_data.py | lingeen/lingeen-Ying | b7d4771d556e2168bd3dfae15b39336046319a9c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/12/23 2:27 PM
# @Author : Kevin
import config
from utils.sentence_process import cut_sentence_by_character
from search.sort.word_to_sequence import Word2Sequence
import pickle
def prepare_dict_model():
lines=open(config.sort_all_file_path,"r").readlines()
ws=Word2Sequence()
lines=[cut_sentence_by_character(line) for line in lines]
for line in lines:
ws.fit(line)
ws.build_vocab()
pickle.dump(ws,open(config.sort_ws_model_path,"wb"))
def test_dict_model():
sentence="如何在linux下安装storm"
ws=pickle.load(open(config.sort_ws_model_path,"rb"))
sequence=ws.transform(cut_sentence_by_character(sentence))
print(cut_sentence_by_character(sentence))
print(sequence)
def make_data_file():
with open(config.sort_label_file_path,"w+") as file:
for i in range(96339):
if i%3==0:
file.write("0"+"\n")
else:
file.write("1" + "\n")
if __name__ == '__main__':
# prepare_dict_model()
# test_dict_model()
make_data_file() | 23.234043 | 62 | 0.667582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.164855 |
f67473386f78c6d97077c35b82b59ca8dd569a05 | 681 | py | Python | equipment_piece.py | cookyt/mhw_optimizer | 05dc4483f9bb0b6f8c2d8e205239d58bbd4c274a | [
"MIT"
] | null | null | null | equipment_piece.py | cookyt/mhw_optimizer | 05dc4483f9bb0b6f8c2d8e205239d58bbd4c274a | [
"MIT"
] | null | null | null | equipment_piece.py | cookyt/mhw_optimizer | 05dc4483f9bb0b6f8c2d8e205239d58bbd4c274a | [
"MIT"
] | null | null | null | from enum import Enum
class BodyPart(Enum):
HEAD = 0
BODY = 1
ARMS = 2
WAIST = 3
LEGS = 4
CHARM = 5
class EquipmentPiece:
def __init__(self, name, body_part, skills):
self.name = name
self.body_part = body_part
self.skills = skills
class ArmourPiece(EquipmentPiece):
def __init__(self, name, body_part, defence, skills, decoration_slots):
EquipmentPiece.__init__(self, name, body_part, skills)
self.defence = defence
self.decoration_slots = decoration_slots
class Charm(EquipmentPiece):
def __init__(self, name, skills):
EquipmentPiece.__init__(self, name, BodyPart.CHARM, skills)
| 22.7 | 75 | 0.666667 | 647 | 0.950073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f674e20787b0a96114591ae162f4d1c5ca96e86b | 11,984 | py | Python | tests/message_handler_test.py | FelixSchwarz/mailqueue-runner | 8e1a53ac4b363ced55636b1dda042c605386fc81 | [
"0BSD"
] | 3 | 2019-12-15T18:17:16.000Z | 2022-03-18T23:00:49.000Z | tests/message_handler_test.py | FelixSchwarz/mailqueue-runner | 8e1a53ac4b363ced55636b1dda042c605386fc81 | [
"0BSD"
] | 24 | 2018-10-12T21:13:00.000Z | 2020-11-12T23:03:31.000Z | tests/message_handler_test.py | FelixSchwarz/mailqueue-runner | 8e1a53ac4b363ced55636b1dda042c605386fc81 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, print_function, unicode_literals
import os
import shutil
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
import uuid
from ddt import ddt as DataDrivenTestCase, data as ddt_data
from pythonic_testcase import *
from schwarz.fakefs_helpers import TempFS
from schwarz.log_utils import l_
from schwarz.puzzle_plugins import connect_signals, SignalRegistry
from testfixtures import LogCapture
from schwarz.mailqueue import (create_maildir_directories, lock_file,
DebugMailer, MessageHandler)
from schwarz.mailqueue.compat import IS_WINDOWS
from schwarz.mailqueue.maildir_utils import find_messages
from schwarz.mailqueue.message_utils import parse_message_envelope
from schwarz.mailqueue.plugins import MQAction, MQSignal
from schwarz.mailqueue.queue_runner import MaildirBackedMsg, MaildirBackend
from schwarz.mailqueue.testutils import (assert_did_log_message, info_logger,
inject_example_message, message as example_message)
@DataDrivenTestCase
class MessageHandlerTest(PythonicTestCase):
def setUp(self):
self.tempfs = TempFS.set_up(test=self)
self.path_maildir = os.path.join(self.tempfs.root, 'mailqueue')
create_maildir_directories(self.path_maildir)
@ddt_data(True, False)
def test_can_send_message(self, with_msg_id):
mailer = DebugMailer()
msg_header = b'X-Header: somevalue\n'
if with_msg_id:
msg_id = '%s@host.example' % uuid.uuid4()
msg_header += b'Message-ID: <%s>\n' % msg_id.encode('ascii')
msg_body = b'MsgBody\n'
msg_bytes = msg_header + b'\n' + msg_body
msg = inject_example_message(self.path_maildir,
sender = b'foo@site.example',
recipient = b'bar@site.example',
msg_bytes = msg_bytes,
)
assert_true(os.path.exists(msg.path))
with LogCapture() as lc:
mh = MessageHandler([mailer], info_logger(lc))
was_sent = mh.send_message(msg)
assert_trueish(was_sent)
expected_log_msg = '%s => %s' % ('foo@site.example', 'bar@site.example')
if with_msg_id:
expected_log_msg += ' <%s>' % msg_id
assert_did_log_message(lc, expected_msg=expected_log_msg)
assert_length(1, mailer.sent_mails)
sent_msg, = mailer.sent_mails
assert_equals('foo@site.example', sent_msg.from_addr)
assert_equals(('bar@site.example',), sent_msg.to_addrs)
assert_equals(msg_nl(msg_bytes), sent_msg.msg_fp.read())
assert_false(os.path.exists(msg.path))
# ensure there are no left-overs/tmp files
assert_length(0, self.list_all_files(self.path_maildir))
def test_can_handle_sending_failure(self):
mailer = DebugMailer(simulate_failed_sending=True)
msg = inject_example_message(self.path_maildir)
assert_true(os.path.exists(msg.path))
was_sent = MessageHandler([mailer]).send_message(msg)
assert_falseish(was_sent)
assert_true(os.path.exists(msg.path))
# no left-overs (e.g. in "tmp" folder) other than the initial message file
assert_length(1, self.list_all_files(self.path_maildir))
def test_can_handle_non_existent_file_in_send(self):
mailer = DebugMailer()
invalid_path = os.path.join(self.path_maildir, 'new', 'invalid')
msg_with_invalid_path = MaildirBackedMsg(invalid_path)
was_sent = MessageHandler([mailer]).send_message(msg_with_invalid_path)
assert_none(was_sent)
assert_length(0, mailer.sent_mails)
def test_can_handle_vanished_file_after_successful_send(self):
if IS_WINDOWS:
self.skipTest('unable to unlink open file on Windows')
msg = inject_example_message(self.path_maildir)
path_in_progress = msg.path.replace('new', 'cur')
def delete_on_send(*args):
os.unlink(path_in_progress)
return True
mailer = DebugMailer(send_callback=delete_on_send)
was_sent = MessageHandler([mailer]).send_message(msg)
assert_true(was_sent)
assert_length(1, mailer.sent_mails)
assert_length(0, self.list_all_files(self.path_maildir))
def test_can_handle_vanished_file_after_failed_send(self):
if IS_WINDOWS:
self.skipTest('unable to unlink open file on Windows')
msg = inject_example_message(self.path_maildir)
path_in_progress = msg.path.replace('new', 'cur')
def delete_on_send(*args):
os.unlink(path_in_progress)
return False
mailer = DebugMailer(send_callback=delete_on_send)
was_sent = MessageHandler([mailer]).send_message(msg)
assert_false(was_sent)
assert_length(0, mailer.sent_mails)
assert_length(0, self.list_all_files(self.path_maildir))
def test_can_handle_duplicate_file_in_cur_before_send(self):
msg = inject_example_message(self.path_maildir)
path_in_progress = msg.path.replace('new', 'cur')
# this can happen on Unix/Posix because Python does not provide an
# atomic "move without overwrite". Linux provides the "renameat2"
# system call (with RENAME_NOREPLACE flag) but Python does not expose
# that API.
shutil.copy(msg.path, path_in_progress)
mailer = DebugMailer()
was_sent = MessageHandler([mailer]).send_message(msg)
assert_none(was_sent)
assert_length(0, mailer.sent_mails)
assert_length(2, self.list_all_files(self.path_maildir))
def test_can_handle_duplicate_file_in_new_after_failed_send(self):
msg = inject_example_message(self.path_maildir)
path_in_progress = msg.path.replace('new', 'cur')
# again: can happen because Python provides not atomic "move without
# overwrite" on Linux (see also "renameat2" system call)
def duplicate_on_failed_send(*args):
shutil.copy(path_in_progress, msg.path)
return False
mailer = DebugMailer(send_callback=duplicate_on_failed_send)
was_sent = MessageHandler([mailer]).send_message(msg)
assert_false(was_sent)
assert_length(0, mailer.sent_mails)
assert_length(2, self.list_all_files(self.path_maildir))
def test_tries_to_lock_message_while_sending(self):
mailer = DebugMailer()
msg = inject_example_message(self.path_maildir)
locked_msg = lock_file(msg.path, timeout=0.1)
mh = MessageHandler([mailer])
was_sent = mh.send_message(msg)
assert_none(was_sent)
assert_length(1, self.msg_files(folder='new'))
assert_is_empty(mailer.sent_mails)
locked_msg.close()
was_sent = mh.send_message(msg)
assert_trueish(was_sent)
assert_is_empty(self.msg_files(folder='new'))
assert_length(1, mailer.sent_mails)
def test_can_enqueue_message_after_failed_sending(self):
mailer = DebugMailer(simulate_failed_sending=True)
maildir_fallback = MaildirBackend(self.path_maildir)
msg = example_message()
mh = MessageHandler([mailer, maildir_fallback])
was_sent = mh.send_message(msg, sender='foo@site.example', recipient='bar@site.example')
assert_trueish(was_sent)
assert_is_empty(mailer.sent_mails)
msg_path, = self.msg_files(folder='new')
with open(msg_path, 'rb') as msg_fp:
stored_msg = parse_message_envelope(msg_fp)
assert_equals('foo@site.example', stored_msg.from_addr)
assert_equals(('bar@site.example',), stored_msg.to_addrs)
def test_can_enqueue_message_with_multiple_recipients_after_failed_sending(self):
mailer = DebugMailer(simulate_failed_sending=True)
mh = MessageHandler([mailer, MaildirBackend(self.path_maildir)])
msg = example_message()
recipients = ('r1@foo.example', 'r2@bar.example')
mh.send_message(msg, sender='foo@site.example', recipients=recipients)
msg_path, = self.msg_files(folder='new')
with open(msg_path, 'rb') as msg_fp:
stored_msg = parse_message_envelope(msg_fp)
assert_equals(recipients, stored_msg.to_addrs)
@ddt_data(True, False)
def test_can_notify_plugin_after_delivery(self, delivery_successful):
if delivery_successful:
signal = MQSignal.delivery_successful
mailer = DebugMailer()
else:
signal = MQSignal.delivery_failed
mailer = DebugMailer(simulate_failed_sending=True)
registry = SignalRegistry()
plugin = MagicMock(return_value=None, spec={})
connect_signals({signal: plugin}, registry.namespace)
mh = MessageHandler([mailer], plugins=registry)
mh.send_message(example_message(), sender='foo@site.example', recipient='bar@site.example')
plugin.assert_called_once()
# would be able to simplify this with Python 3 only:
# call_kwargs = plugin.call_args.kwargs
call_kwargs = plugin.call_args[-1]
send_result = call_kwargs['send_result']
if delivery_successful:
assert_length(1, mailer.sent_mails)
assert_trueish(send_result)
else:
assert_length(0, mailer.sent_mails)
assert_falseish(send_result)
assert_false(send_result.queued)
assert_equals('debug', send_result.transport)
def test_plugin_can_discard_message_after_failed_delivery(self):
mailer = DebugMailer(simulate_failed_sending=True)
sender = 'foo@site.example'
recipient = 'bar@site.example'
def discard_message(event_sender, msg, send_result):
assert_falseish(send_result)
assert_none(send_result.discarded)
assert_equals(sender, msg.from_addr)
assert_equals({recipient}, set(msg.to_addrs))
return MQAction.DISCARD
registry = SignalRegistry()
connect_signals({MQSignal.delivery_failed: discard_message}, registry.namespace)
msg = example_message()
mh = MessageHandler([mailer], plugins=registry)
send_result = mh.send_message(msg, sender=sender, recipient=recipient)
assert_falseish(send_result)
assert_false(send_result.queued)
assert_true(send_result.discarded)
def test_plugin_can_access_number_of_failed_deliveries(self):
registry = SignalRegistry()
def discard_after_two_attempts(sender, msg, send_result):
return MQAction.DISCARD if (msg.retries > 1) else None
connect_signals({MQSignal.delivery_failed: discard_after_two_attempts}, registry.namespace)
msg = inject_example_message(self.path_maildir)
mailer = DebugMailer(simulate_failed_sending=True)
mh = MessageHandler([mailer], plugins=registry)
mh.send_message(msg)
assert_length(1, find_messages(self.path_maildir, log=l_(None)))
send_result = mh.send_message(msg)
assert_falseish(send_result)
assert_length(0, mailer.sent_mails)
assert_length(0, find_messages(self.path_maildir, log=l_(None)))
assert_true(send_result.discarded)
# --- internal helpers ----------------------------------------------------
def list_all_files(self, basedir):
files = []
for root_dir, dirnames, filenames in os.walk(basedir):
for filename in filenames:
path = os.path.join(root_dir, filename)
files.append(path)
return files
def msg_files(self, folder='new'):
path = os.path.join(self.path_maildir, folder)
files = []
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
files.append(file_path)
return files
def msg_nl(msg_bytes):
return msg_bytes if (not IS_WINDOWS) else msg_bytes.replace(b'\n', b'\r\n')
| 41.467128 | 99 | 0.685831 | 10,778 | 0.899366 | 0 | 0 | 10,798 | 0.901035 | 0 | 0 | 1,285 | 0.107226 |
f674f2c2add88e217619ce81024522e2b7f8ad3d | 1,632 | py | Python | Python3/687.longest-univalue-path.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/687.longest-univalue-path.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/687.longest-univalue-path.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=687 lang=python3
#
# [687] Longest Univalue Path
#
# @lc code=start
from collections import deque
def construct_tree(values):
if not values:
return None
root = TreeNode(values[0])
queue = deque([root])
leng = len(values)
nums = 1
while nums < leng:
node = queue.popleft()
if node:
node.left = TreeNode(values[nums]) if values[nums] else None
queue.append(node.left)
if nums + 1 < leng:
node.right = TreeNode(values[nums+1]) if values[nums+1] else None
queue.append(node.right)
nums += 1
nums += 1
return root
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def longestUnivaluePath(self, root):
if not root:
return 0
self.mx = 0
self.finder(root)
return self.mx
def finder(self,root):
if not root:
return 0
left = self.finder(root.left)
right = self.finder(root.right)
l = 0
r = 0
# do not use left += 1
if root.left and root.left.val == root.val:
l = left + 1
if root.right and root.right.val == root.val:
r = right + 1
self.mx = max(self.mx, l + r)
return max(l, r)
if __name__ == '__main__':
tree = construct_tree([5,4,5,1,1,5])
b = Solution()
c = b.longestUnivaluePath(tree)
print(c)
# @lc code=end
| 24.358209 | 85 | 0.525123 | 708 | 0.433824 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.102941 |
f6768e992a307af16822d4dff6a0e361ad829926 | 6,803 | py | Python | tests/reflinks_tests.py | xqt/pwb | 9a4fe27138f32952e533256195849d05855df0b0 | [
"MIT"
] | 1 | 2022-02-10T00:20:42.000Z | 2022-02-10T00:20:42.000Z | tests/reflinks_tests.py | euku/spbot | e6d505c8965b4e6730b3dc4505f92e35a3edb2e2 | [
"MIT"
] | 1 | 2021-12-08T16:29:41.000Z | 2021-12-08T16:29:41.000Z | tests/reflinks_tests.py | euku/spbot | e6d505c8965b4e6730b3dc4505f92e35a3edb2e2 | [
"MIT"
] | 2 | 2022-01-04T04:10:38.000Z | 2022-01-04T04:18:18.000Z | #!/usr/bin/python3
"""Tests for reflinks script."""
#
# (C) Pywikibot team, 2014-2022
#
# Distributed under the terms of the MIT license.
#
import unittest
from scripts.reflinks import ReferencesRobot, XmlDumpPageGenerator, main
from tests import join_xml_data_path
from tests.aspects import ScriptMainTestCase, TestCase
from tests.utils import empty_sites
class TestXMLPageGenerator(TestCase):
"""Test XML Page generator."""
family = 'wikipedia'
code = 'en'
dry = True
def test_non_bare_ref_urls(self):
"""Test pages without bare references are not processed."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('article-pear-0.10.xml'),
start='Pear',
namespaces=[0, 1],
site=self.site)
pages = list(gen)
self.assertIsEmpty(pages)
def test_simple_bare_refs(self):
"""Test simple bare references with several namespaces options."""
namespace_variants = (None, [], [0, 1], ['0', '1'])
filename = join_xml_data_path('dummy-reflinks.xml')
for namespaces in namespace_variants:
with self.subTest(namespaces=namespaces):
gen = XmlDumpPageGenerator(filename=filename,
start='Fake page',
namespaces=namespaces,
site=self.site)
pages = list(gen)
self.assertPageTitlesEqual(pages, ('Fake page',
'Talk:Fake page'),
site=self.site)
def test_namespace_names(self):
"""Test namespaces with namespace names."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('dummy-reflinks.xml'),
start='Fake page',
namespaces=['Talk'],
site=self.site)
pages = list(gen)
self.assertPageTitlesEqual(pages, ['Talk:Fake page'], site=self.site)
def test_start_variants(self):
"""Test with several page title options."""
start_variants = (
None, # None
'Fake', # prefix
'Fake_page', # underscore
)
filename = join_xml_data_path('dummy-reflinks.xml')
for start in start_variants:
with self.subTest(start=start):
gen = XmlDumpPageGenerator(filename=filename,
start=start,
namespaces=[0, 1],
site=self.site)
pages = list(gen)
self.assertPageTitlesEqual(pages, ('Fake page',
'Talk:Fake page'),
site=self.site)
class TestReferencesBotConstructor(ScriptMainTestCase):
"""
Test reflinks with run() removed.
These tests can't verify the order of the pages in the XML
as the constructor is given a preloading generator.
See APISite.preloadpages for details.
"""
family = 'wikipedia'
code = 'en'
def setUp(self):
"""Set up the script by patching the bot class."""
super().setUp()
self._original_constructor = ReferencesRobot.__init__
self._original_run = ReferencesRobot.run
ReferencesRobot.__init__ = dummy_constructor
ReferencesRobot.run = lambda self: None
def tearDown(self):
"""Tear down the test by undoing the bot class patch."""
ReferencesRobot.__init__ = self._original_constructor
ReferencesRobot.run = self._original_run
with empty_sites():
super().tearDown()
def test_xml_simple(self):
"""Test the generator without any narrowing."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'))
gen = self.constructor_kwargs['generator']
self.assertPageTitlesCountEqual(gen, ['Fake page', 'Talk:Fake page'],
site=self.get_site())
def test_xml_one_namespace(self):
"""Test the generator using one namespace id."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:1')
gen = self.constructor_kwargs['generator']
pages = list(gen)
self.assertPageTitlesEqual(pages, ['Talk:Fake page'],
site=self.get_site())
def test_xml_multiple_namespace_ids(self):
"""Test the generator using multiple separate namespaces parameters."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:0', '-namespace:1', '-xmlstart:Fake page')
gen = self.constructor_kwargs['generator']
self.assertPageTitlesCountEqual(gen, ['Fake page', 'Talk:Fake page'],
site=self.get_site())
def test_xml_multiple_namespace_ids_2(self):
"""Test the generator using multiple namespaces in one parameter."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:0,1', '-xmlstart:Fake page')
gen = self.constructor_kwargs['generator']
self.assertPageTitlesCountEqual(gen, ['Fake page', 'Talk:Fake page'],
site=self.get_site())
def test_xml_start_variants(self):
"""Test the generator using variants of start page."""
start_variants = (
'-xmlstart:Fake page', # title
'-xmlstart:Fake_page', # underscore
'-xmlstart:Fake', # prefix
)
filename = '-xml:' + join_xml_data_path('dummy-reflinks.xml')
for start in start_variants:
with self.subTest(xmlstart=start), empty_sites():
main(filename, '-namespace:1', start)
gen = self.constructor_kwargs['generator']
pages = list(gen)
self.assertPageTitlesEqual(pages, ['Talk:Fake page'],
site=self.site)
def test_xml_namespace_name(self):
"""Test the generator using a namespace name."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:Talk', '-xmlstart:Fake page')
gen = self.constructor_kwargs['generator']
pages = list(gen)
self.assertPageTitlesEqual(pages, ['Talk:Fake page'],
site=self.get_site())
def dummy_constructor(self, *args, **kwargs):
"""A constructor faking the actual constructor."""
TestReferencesBotConstructor.constructor_args = args
TestReferencesBotConstructor.constructor_kwargs = kwargs
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 38.435028 | 79 | 0.575922 | 6,149 | 0.903866 | 0 | 0 | 0 | 0 | 0 | 0 | 1,965 | 0.288843 |
f67b88939087fdc071c7e83e728a3ef5a850c797 | 319 | py | Python | sunny/publisher/ioloop.py | AnkitAggarwalPEC/HFT-Analytics-Luigi | b01a48d7384accaca1869a7728912019b73f7e9c | [
"MIT"
] | null | null | null | sunny/publisher/ioloop.py | AnkitAggarwalPEC/HFT-Analytics-Luigi | b01a48d7384accaca1869a7728912019b73f7e9c | [
"MIT"
] | null | null | null | sunny/publisher/ioloop.py | AnkitAggarwalPEC/HFT-Analytics-Luigi | b01a48d7384accaca1869a7728912019b73f7e9c | [
"MIT"
] | null | null | null |
import asyncio
class IOLoop(object):
self.event_loop = asyncio.get_event_loop()
def __init__(self):
pass
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
if self._stopped:
self._stopped = False
return
| 13.291667 | 59 | 0.583072 | 295 | 0.924765 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.084639 |
f67c5cfd49d70bb094eaf858bc53e07e4d05c0a5 | 1,756 | py | Python | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/EDABIT/EARLIER/49_triangle_challenge.py | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/EDABIT/EARLIER/49_triangle_challenge.py | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/EDABIT/EARLIER/49_triangle_challenge.py | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | 2 | 2022-02-09T15:41:33.000Z | 2022-02-11T07:47:40.000Z | """
Triangle Challenge
Given the perimeter and the area of a triangle, devise a function that returns the length of the sides of all triangles that fit those specifications. The length of the sides must be integers. Sort results in ascending order.
triangle(perimeter, area) ➞ [[s1, s2, s3]]
Examples
triangle(12, 6) ➞ [[3, 4, 5]]
triangle(45, 97.42786) ➞ [[15, 15, 15]]
triangle(70, 210) ➞ [[17, 25, 28], [20, 21, 29]]
triangle(3, 0.43301) ➞ [[1, 1, 1]]
Notes
def triangle(p,a):
r=[]
s=p/2
for x in range(1,p//2+1):
for y in range(int(s-x+1),p//2+1):
z=p-x-y
if round((s*(s-x)*(s-y)*(s-z))**.5,5)==a:
new=sorted((x,y,z))
if new not in r:
r.append(new)
return sorted(r)
"""
def triangle(perimeter,area):
p,q=perimeter,[]
u = []
for a in range(1,p//2+1):
for b in range(p//2+1-a,p//2+1):
c= p - (a+b)
if round(((p/2)*(p/2-a)*(p/2-b)*(p/2-c))**0.5,3) == round(area,3):
u.append(a)
u.append(b)
u.append(c)
for i in range(len(u)//3):
if not sorted([u[3*i],u[3*i+1],u[3*i+2]]) in q:
q.append (sorted([u[3*i],u[3*i+1],u[3*i+2]]))
return q
##(p/2>a) and (p/2>b) and(p/2>c) and
#triangle(3, 0.43301) #, [[1, 1, 1]])
#triangle(201, 49.99937) #, [[1, 100, 100]])
#triangle(98, 420) #, [[24, 37, 37], [25, 34, 39], [29, 29, 40]])
#triangle(70, 210) #, [[17, 25, 28], [20, 21, 29]])
#triangle(30, 30) #, [[5, 12, 13]])
triangle(1792, 55440) #, [[170, 761, 861], [291, 626, 875]])
#triangle(150, 420) #, [[26, 51, 73]])
#triangle(864, 23760) #, [[132, 366, 366], [135, 352, 377]]) | 35.12 | 225 | 0.490888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,212 | 0.686297 |
f68047cd370902e0716ae55685d30c821200f866 | 4,232 | py | Python | src/my_resnet.py | fahim19dipu/Seed-classifier | 8acce2be9482cb8d8c11fa14433f9b4f8d4d92ba | [
"MIT"
] | null | null | null | src/my_resnet.py | fahim19dipu/Seed-classifier | 8acce2be9482cb8d8c11fa14433f9b4f8d4d92ba | [
"MIT"
] | null | null | null | src/my_resnet.py | fahim19dipu/Seed-classifier | 8acce2be9482cb8d8c11fa14433f9b4f8d4d92ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 4 17:01:28 2021
@author: fahim
"""
from keras.models import Model
from keras.layers import Input, Add, Activation, ZeroPadding2D, BatchNormalization, Conv2D, AveragePooling2D, MaxPooling2D
from keras.initializers import glorot_uniform
def identity_block(X, f, filters, stage, block):
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
F1, F2, F3 = filters
X_shortcut = X
X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
X = Activation('relu')(X)
X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
X = Activation('relu')(X)
X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
X = Add()([X, X_shortcut])# SKIP Connection
X = Activation('relu')(X)
return X
def convolutional_block(X, f, filters, stage, block, s=2):
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
F1, F2, F3 = filters
X_shortcut = X
X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
X = Activation('relu')(X)
X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
X = Activation('relu')(X)
X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
return X
def ResNet50(input_shape=(224, 224, 3)):
X_input = Input(input_shape)
X = ZeroPadding2D((3, 3))(X_input)
X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name='bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2)
X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')
X = convolutional_block(X, f=3, filters=[256, 256, 1024], stage=4, block='a', s=2)
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')
X = X = convolutional_block(X, f=3, filters=[512, 512, 2048], stage=5, block='a', s=2)
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')
X = AveragePooling2D(pool_size=(2, 2), padding='same')(X)
model = Model(inputs=X_input, outputs=X, name='ResNet50')
return model | 42.747475 | 170 | 0.641777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 377 | 0.089083 |
9c8515789c228e61ad777a8d32e3b11f40b6960c | 808 | py | Python | integration-test/630-bus-routes-z12.py | slachiewicz/vector-datasource | fb37603ba264be05839992f40eba1b5ae8a9c290 | [
"MIT"
] | null | null | null | integration-test/630-bus-routes-z12.py | slachiewicz/vector-datasource | fb37603ba264be05839992f40eba1b5ae8a9c290 | [
"MIT"
] | 2 | 2021-03-26T00:41:17.000Z | 2021-12-13T20:49:38.000Z | integration-test/630-bus-routes-z12.py | slachiewicz/vector-datasource | fb37603ba264be05839992f40eba1b5ae8a9c290 | [
"MIT"
] | null | null | null | # block between mission & 6th and howard & 5th in SF.
# appears to have lots of buses.
# https://www.openstreetmap.org/way/88572932 -- Mission St
# https://www.openstreetmap.org/relation/3406710 -- 14X to Daly City
# https://www.openstreetmap.org/relation/3406709 -- 14X to Downtown
# https://www.openstreetmap.org/relation/3406708 -- 14R to Mission
# https://www.openstreetmap.org/relation/3000713 -- 14R to Downtown
# ... and many more bus route relations
z, x, y = (16, 10484, 25329)
# test that at least one is present in tiles up to z12
while z >= 12:
assert_has_feature(
z, x, y, 'roads',
{ 'is_bus_route': True })
z, x, y = (z-1, x/2, y/2)
# but that none are present in the parent tile at z11
assert_no_matching_feature(
z, x, y, 'roads',
{ 'is_bus_route': True })
| 35.130435 | 68 | 0.67698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 599 | 0.741337 |
9c85502891b8c1bec06144b4cf625c033b9b3990 | 1,137 | py | Python | bioinf/cli.py | szymanskir/bioinf | e55adf696d0116845f26901decb93a771ffa1489 | [
"MIT"
] | 1 | 2021-01-19T18:18:21.000Z | 2021-01-19T18:18:21.000Z | bioinf/cli.py | szymanskir/bioinf | e55adf696d0116845f26901decb93a771ffa1489 | [
"MIT"
] | null | null | null | bioinf/cli.py | szymanskir/bioinf | e55adf696d0116845f26901decb93a771ffa1489 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Console script for bioinf."""
import sys
import click
from .sequence import Sequence
from .sequence_alignment import NeedlemanWunschSequenceAlignmentAlgorithm
from .utils import read_config, read_sequence
@click.group()
def main(args=None):
"""Console script for bioinf."""
@main.command()
@click.option("-a", type=click.Path(exists=True), required=True)
@click.option("-b", type=click.Path(exists=True), required=True)
@click.option("-c", type=click.Path(exists=True), required=True)
@click.option("-o", type=click.Path())
def align(a: str, b: str, c: str, o: str):
try:
left_sequence: Sequence = read_sequence(a)
right_sequence: Sequence = read_sequence(b)
config = read_config(c)
algorithm = NeedlemanWunschSequenceAlignmentAlgorithm(config)
result = algorithm.align(left_sequence, right_sequence)
if o:
with open(o, "w") as f:
f.write(str(result))
else:
click.echo(result)
except Exception as e:
click.echo(str(e))
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 29.153846 | 73 | 0.66139 | 0 | 0 | 0 | 0 | 828 | 0.728232 | 0 | 0 | 134 | 0.117854 |
9c85e485ac5021974344ec109d1e1572822b9624 | 703 | py | Python | package_one/tests/test_integer_adder.py | afaquejam/AwesomeApp | d86cc29b4047d9341e6e368530e922f28daad309 | [
"MIT"
] | 4 | 2018-07-28T01:18:55.000Z | 2019-04-04T09:32:19.000Z | package_one/tests/test_integer_adder.py | afaquejam/AwesomeApp | d86cc29b4047d9341e6e368530e922f28daad309 | [
"MIT"
] | null | null | null | package_one/tests/test_integer_adder.py | afaquejam/AwesomeApp | d86cc29b4047d9341e6e368530e922f28daad309 | [
"MIT"
] | null | null | null | import pytest
from package_one.module_one import IntegerAdder
@pytest.fixture
def adder():
print("Test set-up!")
yield IntegerAdder()
print("Test tear-down")
def test_integer_adder(adder):
assert adder.add(1, 2) == 3
"""
In case you'd like to declare a fixture that executes only once per module, then declare a fixture like this:
@pytest.fixture(scope="module")
"""
@pytest.mark.parametrize(
"operand_one, operand_two, expected_result",
[
(1, 2, 3),
(10, 20, 30),
(-5, -10, -15)
]
)
def test_integer_adder_complex(
adder, operand_one, operand_two, expected_result
):
assert adder.add(operand_one, operand_two) == expected_result
| 22.677419 | 109 | 0.671408 | 0 | 0 | 91 | 0.129445 | 422 | 0.600284 | 0 | 0 | 222 | 0.315789 |
9c8686e93160db726ab6ec2f606cf595263e00fa | 1,179 | py | Python | tests/local/test_json.py | abarisain/mopidy | 4026e16996280016ceadfe98bd48f574306e2cef | [
"Apache-2.0"
] | 2 | 2015-07-09T09:36:26.000Z | 2019-10-05T04:13:19.000Z | tests/local/test_json.py | abarisain/mopidy | 4026e16996280016ceadfe98bd48f574306e2cef | [
"Apache-2.0"
] | null | null | null | tests/local/test_json.py | abarisain/mopidy | 4026e16996280016ceadfe98bd48f574306e2cef | [
"Apache-2.0"
] | 1 | 2019-10-05T04:13:10.000Z | 2019-10-05T04:13:10.000Z | from __future__ import unicode_literals
import unittest
from mopidy.local import json
from mopidy.models import Ref
class BrowseCacheTest(unittest.TestCase):
def setUp(self):
self.uris = [b'local:track:foo/bar/song1',
b'local:track:foo/bar/song2',
b'local:track:foo/song3']
self.cache = json._BrowseCache(self.uris)
def test_lookup_root(self):
expected = [Ref.directory(uri='local:directory:foo', name='foo')]
self.assertEqual(expected, self.cache.lookup('local:directory'))
def test_lookup_foo(self):
expected = [Ref.directory(uri='local:directory:foo/bar', name='bar'),
Ref.track(uri=self.uris[2], name='song3')]
self.assertEqual(expected, self.cache.lookup('local:directory:foo'))
def test_lookup_foo_bar(self):
expected = [Ref.track(uri=self.uris[0], name='song1'),
Ref.track(uri=self.uris[1], name='song2')]
self.assertEqual(
expected, self.cache.lookup('local:directory:foo/bar'))
def test_lookup_foo_baz(self):
self.assertEqual([], self.cache.lookup('local:directory:foo/baz'))
| 35.727273 | 77 | 0.641221 | 1,058 | 0.897371 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.207803 |
9c86a8fde028ad53edc3558eed458f5bce3f030f | 2,214 | py | Python | test/test_oximachine.py | ltalirz/oximachinerunner | ca8092a8b247216cb98b7d308862dba184e27f1e | [
"MIT"
] | null | null | null | test/test_oximachine.py | ltalirz/oximachinerunner | ca8092a8b247216cb98b7d308862dba184e27f1e | [
"MIT"
] | null | null | null | test/test_oximachine.py | ltalirz/oximachinerunner | ca8092a8b247216cb98b7d308862dba184e27f1e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint:disable=missing-module-docstring, missing-function-docstring
import os
from oximachinerunner import OximachineRunner
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
def test_oximachine():
runner = OximachineRunner()
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "oximachinerunner/assets/ACODAA.cif")
)
assert len(output) == 5
assert output["prediction"] == [2, 2]
assert output["metal_indices"] == [0, 1]
assert output["metal_symbols"] == ["Fe", "Fe"]
output = runner.run_oximachine(os.path.join(THIS_DIR, "..", "examples/guvzee.cif"))
assert output["prediction"] == [
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/GUVZII_clean.cif")
)
assert output["prediction"] == [
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/IDIWOH_clean.cif")
)
assert output["prediction"] == [4, 4, 4, 4]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/IDIWIB_clean.cif")
)
assert output["prediction"] == [3, 3, 3, 3]
# testing the MOF model
runner = OximachineRunner(modelname="mof")
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/IDIWIB_clean.cif")
)
assert output["prediction"] == [3, 3, 3, 3]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/IDIWOH_clean.cif")
)
assert output["prediction"] == [4, 4, 4, 4]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "oximachinerunner/assets/ACODAA.cif")
)
assert len(output) == 5
assert output["prediction"] == [2, 2]
assert output["metal_indices"] == [0, 1]
assert output["metal_symbols"] == ["Fe", "Fe"]
| 23.305263 | 87 | 0.531165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 552 | 0.249322 |
9c875ae7c2afe703c37e9e06e4b3067ddcb1e099 | 11,643 | py | Python | src/OTLMOW/OTLModel/Datatypes/KlVerlichtingstoestelModelnaam.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/OTLModel/Datatypes/KlVerlichtingstoestelModelnaam.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/OTLModel/Datatypes/KlVerlichtingstoestelModelnaam.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlVerlichtingstoestelModelnaam(KeuzelijstField):
"""De modelnaam van het verlichtingstoestel."""
naam = 'KlVerlichtingstoestelModelnaam'
label = 'Verlichtingstoestel modelnaam'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#KlVerlichtingstoestelModelnaam'
definition = 'De modelnaam van het verlichtingstoestel.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlVerlichtingstoestelModelnaam'
options = {
'ARC': KeuzelijstWaarde(invulwaarde='ARC',
label='ARC',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/ARC'),
'Belgica': KeuzelijstWaarde(invulwaarde='Belgica',
label='Belgica',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Belgica'),
'Calypso': KeuzelijstWaarde(invulwaarde='Calypso',
label='Calypso',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Calypso'),
'Corus': KeuzelijstWaarde(invulwaarde='Corus',
label='Corus',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Corus'),
'DTN': KeuzelijstWaarde(invulwaarde='DTN',
label='DTN',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/DTN'),
'Evolo': KeuzelijstWaarde(invulwaarde='Evolo',
label='Evolo',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Evolo'),
'Focal': KeuzelijstWaarde(invulwaarde='Focal',
label='Focal',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Focal'),
'GSM': KeuzelijstWaarde(invulwaarde='GSM',
label='GSM',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/GSM'),
'GTMB': KeuzelijstWaarde(invulwaarde='GTMB',
label='GTMB',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/GTMB'),
'GTNB': KeuzelijstWaarde(invulwaarde='GTNB',
label='GTNB',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/GTNB'),
'GZM': KeuzelijstWaarde(invulwaarde='GZM',
label='GZM',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/GZM'),
'Gema': KeuzelijstWaarde(invulwaarde='Gema',
label='Gema',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Gema'),
'HCI-TS': KeuzelijstWaarde(invulwaarde='HCI-TS',
label='HCI-TS',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/HCI-TS'),
'Iridium': KeuzelijstWaarde(invulwaarde='Iridium',
label='Iridium',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Iridium'),
'MNF300': KeuzelijstWaarde(invulwaarde='MNF300',
label='MNF300',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/MNF300'),
'MWF230': KeuzelijstWaarde(invulwaarde='MWF230',
label='MWF230',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/MWF230'),
'MY11': KeuzelijstWaarde(invulwaarde='MY11',
label='MY11',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/MY11'),
'Neos': KeuzelijstWaarde(invulwaarde='Neos',
label='Neos',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Neos'),
'Onyx': KeuzelijstWaarde(invulwaarde='Onyx',
label='Onyx',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Onyx'),
'RT3NB': KeuzelijstWaarde(invulwaarde='RT3NB',
label='RT3NB',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/RT3NB'),
'RT3SB': KeuzelijstWaarde(invulwaarde='RT3SB',
label='RT3SB',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/RT3SB'),
'RXN': KeuzelijstWaarde(invulwaarde='RXN',
label='RXN',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/RXN'),
'RXS': KeuzelijstWaarde(invulwaarde='RXS',
label='RXS',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/RXS'),
'Radial': KeuzelijstWaarde(invulwaarde='Radial',
label='Radial',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Radial'),
'SRS201': KeuzelijstWaarde(invulwaarde='SRS201',
label='SRS201',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/SRS201'),
'Safir': KeuzelijstWaarde(invulwaarde='Safir',
label='Safir',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Safir'),
'Saturnus': KeuzelijstWaarde(invulwaarde='Saturnus',
label='Saturnus',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Saturnus'),
'Squalo': KeuzelijstWaarde(invulwaarde='Squalo',
label='Squalo',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Squalo'),
'Syntra': KeuzelijstWaarde(invulwaarde='Syntra',
label='Syntra',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Syntra'),
'VTP': KeuzelijstWaarde(invulwaarde='VTP',
label='VTP',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/VTP'),
'Z18': KeuzelijstWaarde(invulwaarde='Z18',
label='Z18',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Z18'),
'Z2': KeuzelijstWaarde(invulwaarde='Z2',
label='Z2',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Z2'),
'Z21': KeuzelijstWaarde(invulwaarde='Z21',
label='Z21',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Z21'),
'ampera': KeuzelijstWaarde(invulwaarde='ampera',
label='Ampera',
definitie='Ampera',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/ampera'),
'andere': KeuzelijstWaarde(invulwaarde='andere',
label='andere',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/andere'),
'brugleuning': KeuzelijstWaarde(invulwaarde='brugleuning',
label='brugleuning',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/brugleuning'),
'clear-field': KeuzelijstWaarde(invulwaarde='clear-field',
label='ClearField',
definitie='ClearField',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/clear-field'),
'digi-street': KeuzelijstWaarde(invulwaarde='digi-street',
label='DigiStreet',
definitie='DigiStreet',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/digi-street'),
'izylum': KeuzelijstWaarde(invulwaarde='izylum',
label='Izylum',
definitie='Izylum',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/izylum'),
'luma': KeuzelijstWaarde(invulwaarde='luma',
label='Luma',
definitie='Luma',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/luma'),
'lumi-street': KeuzelijstWaarde(invulwaarde='lumi-street',
label='LumiStreet',
definitie='LumiStreet',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/lumi-street'),
'projector': KeuzelijstWaarde(invulwaarde='projector',
label='projector',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/projector'),
'teceo': KeuzelijstWaarde(invulwaarde='teceo',
label='Teceo',
definitie='Teceo',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/teceo')
}
| 76.098039 | 149 | 0.562913 | 11,411 | 0.980074 | 0 | 0 | 0 | 0 | 0 | 0 | 5,370 | 0.461221 |
9c878638bf1045523352e1ded796f50059e55f03 | 4,007 | py | Python | api/src/domain/object/user_interface/modal/Modal.py | SamuelJansen/Application | 6ab3202fb7de12782510f477a3e74d8800ea2927 | [
"MIT"
] | null | null | null | api/src/domain/object/user_interface/modal/Modal.py | SamuelJansen/Application | 6ab3202fb7de12782510f477a3e74d8800ea2927 | [
"MIT"
] | null | null | null | api/src/domain/object/user_interface/modal/Modal.py | SamuelJansen/Application | 6ab3202fb7de12782510f477a3e74d8800ea2927 | [
"MIT"
] | null | null | null | import UserInterface
import surfaceFunction, eventFunction
print('Modal library imported')
class Modal(UserInterface.UserInterface):
def __init__(self,name,size,father,
type = None,
position = None,
text = None,
textPosition = None,
fontSize = None,
scale = None,
padding = None,
onLeftClick = None,
onMenuResolve = None,
onMessageResolve = None,
noImage = True,
surfaceColor = None,
imagePath = None,
audioPath = None
):
name,position,padding,originalPadding,keepFatherImage,father,tutor = self.getModalFatherAttributes(name,position,padding,father)
UserInterface.UserInterface.__init__(self,name,position,size,father,
type = type,
text = text,
textPosition = textPosition,
fontSize = fontSize,
scale = scale,
padding = padding,
onLeftClick = onLeftClick,
onMenuResolve = onMenuResolve,
noImage = noImage,
surfaceColor = surfaceColor,
imagePath = imagePath,
audioPath = imagePath
)
self.setModalTutorAttributes(originalPadding,keepFatherImage,tutor)
###- print(f'{self.name} created, father = {self.father.name}, tutor = {self.tutor.name}, type = {self.type}, blit order = {self.blitOrder}')
# if eventFunction.Type.MODAL in self.handler.inheritanceTree :
# try :
# print(f'Modal: name = {self.name}, position = {self.position}, originalPosition = {self.handler.originalPosition}, size = {self.size}, originalSize = {self.handler.originalSize}, padding = {self.padding}')
# except :
# print(f'Modal: name = {self.name}, position = {self.position}, originalPosition = {self.handler.originalPosition}, size = {self.size}, originalSize = {self.handler.originalSize}, padding = noPadding')
# try :
# print(f' father = {self.father.name}, position = {self.father.position}, originalPosition = {self.father.handler.originalPosition}, size = {self.father.size}, originalSize = {self.father.handler.originalSize}, padding = {self.father.padding}')
# except :
# print(f' father = {self.father.name}, position = {self.father.position}, originalPosition = {self.father.handler.originalPosition}, size = {self.father.size}, originalSize = {self.father.handler.originalSize}, padding = noPadding')
# try :
# print(f' tutor = {self.tutor.name}, position = {self.tutor.position}, originalPosition = {self.tutor.handler.originalPosition}, size = {self.tutor.size}, originalSize = {self.tutor.handler.originalSize}, padding = {self.tutor.padding}')
# except :
# print(f' tutor = {self.tutor.name}, position = {self.tutor.position}, originalPosition = {self.tutor.handler.originalPosition}, size = {self.tutor.size}, originalSize = {self.tutor.handler.originalSize}, padding = noPadding')
def getModalFatherAttributes(self,name,position,padding,father):
name += f'.{father.name}'
if not position :
position = father.getAbsoluteOriginalPosition().copy()
keepFatherImage = True
else :
keepFatherImage = False
padding,originalPadding = surfaceFunction.stashPadding(padding,father)
tutor = father
father = father.application.getFloor()
return name,position,padding,originalPadding,keepFatherImage,father,tutor
def setModalTutorAttributes(self,originalPadding,keepFatherImage,tutor):
self.padding = originalPadding
self.handler.setTutor(tutor)
if keepFatherImage :
self.handler.addTutorImage(self.tutor,surfaceFunction.getPositionPadded([0,0],self.padding))
# self.father.screen.mustUpdateNextFrame()
def getName(self):
return self.name.split('.')[0]
| 48.865854 | 268 | 0.639132 | 3,913 | 0.976541 | 0 | 0 | 0 | 0 | 0 | 0 | 1,795 | 0.447966 |
9c879e5d1943efd11dd7e5215bb6e0131412a141 | 209 | py | Python | ospath/ospath_expanduser.py | dineshkumar2509/learning-python | e8af11ff0b396da4c3f2cfe21d14131bae4b2adb | [
"MIT"
] | 86 | 2015-06-13T16:53:55.000Z | 2022-03-24T20:56:42.000Z | ospath/ospath_expanduser.py | pei-zheng-yi/learning-python | 55e350dfe44cf04f7d4408e76e72d2f467bd42ce | [
"MIT"
] | 9 | 2015-05-27T07:52:44.000Z | 2022-03-29T21:52:40.000Z | ospath/ospath_expanduser.py | pei-zheng-yi/learning-python | 55e350dfe44cf04f7d4408e76e72d2f467bd42ce | [
"MIT"
] | 124 | 2015-12-10T01:17:18.000Z | 2021-11-08T04:03:38.000Z | #!/usr/bin/env python
# encoding: utf-8
"""Expand tilde in filenames.
"""
import os.path
for user in ['', 'dhellmann', 'postgres']:
lookup = '~' + user
print lookup, ':', os.path.expanduser(lookup)
| 17.416667 | 49 | 0.62201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.478469 |
9c8a9179f77e608d06dcb7f9c2663a4c5550e626 | 5,103 | py | Python | tests/test_repo.py | OmegaDroid/git-hooks | d890621e890796d0ab800267493444000c75214e | [
"MIT"
] | 2 | 2016-01-08T17:57:59.000Z | 2016-08-10T00:54:35.000Z | tests/test_repo.py | OmegaDroid/git-hooks | d890621e890796d0ab800267493444000c75214e | [
"MIT"
] | 4 | 2016-01-09T13:50:37.000Z | 2016-08-25T10:50:40.000Z | tests/test_repo.py | wildfish/git-hooks | d890621e890796d0ab800267493444000c75214e | [
"MIT"
] | null | null | null | import string
from unittest2 import TestCase
import os
from hypothesis import given
from hypothesis.strategies import text, lists
from mock import patch, Mock
from githooks import repo
class FakeDiffObject(object):
def __init__(self, a_path, b_path, new, deleted):
self.a_path = a_path
self.b_path = b_path
self.new_file = new
self.deleted_file = deleted
class RepoGet(TestCase):
@patch('githooks.repo.git')
def test_result_is_repo_created_from_the_parent_of_script_directory(self, git_mock):
git_mock.Repo = Mock(return_value='git repo')
repo_obj = repo.get()
self.assertEqual('git repo', repo_obj)
git_mock.Repo.assert_called_once_with(
os.getcwd(),
search_parent_directories=True,
)
class RepoRepoRoot(TestCase):
@patch('githooks.repo.get')
def test_result_is_the_parent_directory_of_the_git_directory(self, get_mock):
git_dir = os.path.dirname(__file__)
result = Mock()
result.git_dir = git_dir
get_mock.return_value = result
self.assertEqual(os.path.dirname(git_dir), repo.repo_root())
class RepoUntrackedFiles(TestCase):
@patch('githooks.repo.get')
def test_result_is_untracked_files_from_the_repo_object(self, get_mock):
git_dir = os.path.dirname(__file__)
result = Mock()
result.untracked_files = ['untracked files']
result.git_dir = git_dir
get_mock.return_value = result
files = repo.untracked_files()
self.assertListEqual([os.path.join(repo.repo_root(), 'untracked files')], files)
class RepoModifiedFiles(TestCase):
@given(
lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10),
lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10),
lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10),
)
def test_result_is_the_absolute_paths_to_all_changed_but_not_new_or_deleted_files(self, mod, new, deleted):
mod_diffs = [FakeDiffObject(f, f, False, False) for f in mod]
new_diffs = [FakeDiffObject(None, f, True, False) for f in new]
deleted_diffs = [FakeDiffObject(None, f, False, True) for f in deleted]
with patch('githooks.repo.get') as get_mock:
git_dir = os.path.dirname(__file__)
result = Mock()
result.head.commit.diff = Mock(return_value=mod_diffs + new_diffs + deleted_diffs)
result.git_dir = git_dir
get_mock.return_value = result
files = repo.modified_files()
self.assertEqual([os.path.join(repo.repo_root(), f) for f in mod], files)
result.head.commit.diff.assert_called_once_with()
class RepoAddedFiles(TestCase):
@given(
lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10),
lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10),
lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10),
)
def test_result_is_the_absolute_paths_to_all_new_but_not_modified_or_deleted_files(self, mod, new, deleted):
mod_diffs = [FakeDiffObject(f, f, False, False) for f in mod]
new_diffs = [FakeDiffObject(None, f, True, False) for f in new]
deleted_diffs = [FakeDiffObject(None, f, False, True) for f in deleted]
with patch('githooks.repo.get') as get_mock:
git_dir = os.path.dirname(__file__)
result = Mock()
result.head.commit.diff = Mock(return_value=mod_diffs + new_diffs + deleted_diffs)
result.git_dir = git_dir
get_mock.return_value = result
files = repo.added_files()
self.assertEqual([os.path.join(repo.repo_root(), f) for f in new], files)
result.head.commit.diff.assert_called_once_with()
class RepoDeletedFiles(TestCase):
@given(
lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10),
lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10),
lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10),
)
def test_result_is_the_absolute_paths_to_all_deleted_but_not_new_or_modified_files(self, mod, new, deleted):
mod_diffs = [FakeDiffObject(f, f, False, False) for f in mod]
new_diffs = [FakeDiffObject(None, f, True, False) for f in new]
deleted_diffs = [FakeDiffObject(None, f, False, True) for f in deleted]
with patch('githooks.repo.get') as get_mock:
git_dir = os.path.dirname(__file__)
result = Mock()
result.head.commit.diff = Mock(return_value=mod_diffs + new_diffs + deleted_diffs)
result.git_dir = git_dir
get_mock.return_value = result
files = repo.deleted_files()
self.assertEqual([os.path.join(repo.repo_root(), f) for f in deleted], files)
result.head.commit.diff.assert_called_once_with()
| 36.978261 | 112 | 0.676073 | 4,895 | 0.95924 | 0 | 0 | 4,474 | 0.876739 | 0 | 0 | 168 | 0.032922 |
9c8b2b5dd22e630bf72cf5ab5e527547533efeed | 1,164 | py | Python | Python/Zoo/zoo.py | bill-neely/ITSE1311-1302-Spring2018 | db8cddb66d921a378bcbbd0423e9036ffc3b7a8a | [
"MIT"
] | null | null | null | Python/Zoo/zoo.py | bill-neely/ITSE1311-1302-Spring2018 | db8cddb66d921a378bcbbd0423e9036ffc3b7a8a | [
"MIT"
] | null | null | null | Python/Zoo/zoo.py | bill-neely/ITSE1311-1302-Spring2018 | db8cddb66d921a378bcbbd0423e9036ffc3b7a8a | [
"MIT"
] | null | null | null | class Zoo:
def __init__(self, name, locations):
self.name = name
self.stillActive = True
self.locations = locations
self.currentLocation = self.locations[1]
def changeLocation(self, direction):
neighborID = self.currentLocation.neighbors[direction]
self.currentLocation = self.locations[neighborID]
def exit(self):
if self.currentLocation.allowExit:
self.stillActive = False
class Location:
def __init__(self, id, name, animal, neighbors, allowExit):
self.id = id
self.name = name
self.animal = animal
self.neighbors = neighbors
self.allowExit = allowExit
class Animal:
def __init__(self, name, soundMade, foodEaten, shelterType):
self.name = name
self.soundMade = soundMade
self.foodEaten = foodEaten
self.shelterType = shelterType
def speak(self):
return 'The ' + self.name + ' sounds like: ' + self.soundMade
def diet(self):
return 'The ' + self.name + ' eats ' + self.foodEaten
def shelter(self):
return 'The ' + self.name + ' prefers: ' + self.shelterType
| 29.846154 | 69 | 0.628007 | 1,159 | 0.995704 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.046392 |
9c8c0ef5b1b94d70574d07bd18de4607864827eb | 1,056 | py | Python | src/hashing_.py | pawelmhm/recenseo.es | 026ac67cb6e146745aad1649c338ed89b83f0c63 | [
"MIT"
] | null | null | null | src/hashing_.py | pawelmhm/recenseo.es | 026ac67cb6e146745aad1649c338ed89b83f0c63 | [
"MIT"
] | 1 | 2021-03-19T22:06:54.000Z | 2021-03-19T22:06:54.000Z | src/hashing_.py | pawelmhm/recenseo.es | 026ac67cb6e146745aad1649c338ed89b83f0c63 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from hmac import HMAC
from hashlib import sha256
import random
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def pbkd(password,salt):
"""
password must be a string in ascii, for some reasons
string of type unicode provokes the following error:
"TypeError: character mapping must return integer, None or unicode"
TODO: should we check type of string before it gets here?
"""
return HMAC(str(password),salt,sha256).digest()
def randomSalt(num_bytes):
return "".join(chr(random.randrange(256)) for i in xrange(num_bytes))
def hash_password(plain_text):
salt = randomSalt(8)
for i in xrange(1000):
hashed_password = pbkd(plain_text,salt)
return salt.encode("base64").strip() + "," + hashed_password.encode("base64").strip()
def check_password(saved_pass, plain_pass):
salt,hashed_p = saved_pass.split(",")
salt=salt.decode("base64")
hashed_p = hashed_p.decode("base64")
return hashed_p == pbkd(plain_pass, salt) | 31.058824 | 89 | 0.710227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.309659 |
9c8f281197fddfa1af410d31983e1c6f137a5fe2 | 2,312 | py | Python | neural-net/tensorflow/datasets.py | burntcustard/DeskBot-Zero | 5efb6bcca21eb66f88499b3671c8fda5f6517b11 | [
"MIT"
] | null | null | null | neural-net/tensorflow/datasets.py | burntcustard/DeskBot-Zero | 5efb6bcca21eb66f88499b3671c8fda5f6517b11 | [
"MIT"
] | null | null | null | neural-net/tensorflow/datasets.py | burntcustard/DeskBot-Zero | 5efb6bcca21eb66f88499b3671c8fda5f6517b11 | [
"MIT"
] | null | null | null |
# Useful tutorial on tensorflow.contrib.data:
# https://kratzert.github.io/2017/06/15/example-of-tensorflows-new-input-pipeline.html
import glob # Used to generate image filename list
import tensorflow as tf
def input_parser(image_path, label):
"""
Convert label to one_hot, read image from file & perform adjustments.
See: https://www.tensorflow.org/api_guides/python/image
and: https://www.tensorflow.org/programmers_guide/datasets
"""
image = tf.read_file(image_path)
image = tf.image.decode_image(image)
image = tf.reshape(image, [128, 128, 3])
#image = image[:, :, ::-1] # BGE -> RGB conversion if needed?
image = tf.image.rgb_to_grayscale(image)
#image = tf.image.resize_images(image, [32, 32])
image = tf.image.convert_image_dtype(image, tf.float32)
return image, label
def create(training_validation_ratio = 0.6, batch_size = 4):
"""Creates and returns a dataset, & number of classification classes."""
# Get image filenames, labels, and the number of classification classes
filenames = glob.glob("../img/*.png")
imgLabels = []
for filename in filenames:
imgLabels.append(int(filename.split("-d",1)[1].split('-',1)[0]))
# Label is currently just the distance integer (int32).
# Grouping e.g. having a classifier of "distance between 5 and 10cm"
# will be done here if it's required.
num_classes = max(imgLabels)
numTrainImgs = int(len(filenames) * training_validation_ratio)
training_paths = tf.constant(filenames[:numTrainImgs])
training_labels = tf.constant(imgLabels[:numTrainImgs])
evaluation_paths = tf.constant(filenames[numTrainImgs:])
evaluation_labels = tf.constant(imgLabels[numTrainImgs:])
# Create TensorFlow Dataset objects
training_dataset = tf.data.Dataset.from_tensor_slices(
(training_paths, training_labels)
)
training_dataset = training_dataset.map(input_parser)
training_dataset = training_dataset.batch(batch_size)
evaluation_dataset = tf.data.Dataset.from_tensor_slices(
(evaluation_paths, evaluation_labels)
)
evaluation_dataset = evaluation_dataset.map(input_parser)
evaluation_dataset = evaluation_dataset.batch(batch_size)
return training_dataset, evaluation_dataset, num_classes
| 39.186441 | 86 | 0.719291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 846 | 0.365917 |
9c90a31f9f59a4bbc673d9e0cd9b46fab6dc56ae | 2,999 | py | Python | uis/widget_formatter.py | AlberLC/qt-app | 96fe79293de7f570a505845ed4ecd73f9f55572d | [
"MIT"
] | null | null | null | uis/widget_formatter.py | AlberLC/qt-app | 96fe79293de7f570a505845ed4ecd73f9f55572d | [
"MIT"
] | null | null | null | uis/widget_formatter.py | AlberLC/qt-app | 96fe79293de7f570a505845ed4ecd73f9f55572d | [
"MIT"
] | null | null | null | import os
import subprocess
import pathlib
def reemplazar(string):
return string.replace('self.', 'self.w.').replace('Form"', 'self.w.centralWidget"').replace('Form.', 'self.w.centralWidget.').replace('Form)', 'self.w.centralWidget)').replace('"', "'")
try:
url_archivo = input('Archivo: ').strip().strip('"').strip("'")
nombre, extension = os.path.splitext(os.path.basename(url_archivo))
nombre_clase = nombre.title().replace('_', '')
if extension == '.ui':
# Creo el codigo python a partir del .ui
url_archivo_py = pathlib.Path(url_archivo).with_suffix('.py')
subprocess.Popen(f'pyside2-uic {url_archivo} -o {url_archivo_py}', shell=True, stdout=subprocess.PIPE).wait()
# ---------- Comenzamos el formateo del archivo .py ----------
lineas_resultado = []
# Leo las lineas del archivo
with open(url_archivo_py) as file:
lineas = file.readlines()
i = 0
# Ignoramos los comentarios del principio
while 'from PySide2 import' not in lineas[i]:
i += 1
# Empezamos con la clase
while 'class' not in lineas[i]:
lineas_resultado.append(reemplazar(lineas[i]))
i += 1
lineas_resultado.append("\n")
lineas_resultado.append(f"class {nombre_clase}:\n")
lineas_resultado.append(f" def __init__(self, window):\n")
lineas_resultado.append(f" self.w = window\n")
lineas_resultado.append("\n")
lineas_resultado.append(f" self.setup_gui()\n")
lineas_resultado.append("\n")
lineas_resultado.append(f" def setup_gui(self):\n")
lineas_resultado.append(f" self.w.centralWidget = QtWidgets.QWidget(self.w)\n")
lineas_resultado.append(f" self.w.centralWidget.setObjectName('centralWidget')\n")
# Ignoramos las 3 lineas siguientes (def, Form.set, Form.resize) y avanzamos a la siguiente
i += 4
# Copiamos hasta linea en blanco
while lineas[i] != '\n':
lineas_resultado.append(reemplazar(lineas[i]))
i += 1
# Anadimos el widget a la vista
lineas_resultado.append(' self.w.setCentralWidget(self.w.centralWidget)\n')
# Copiamos la linea en blanco
lineas_resultado.append(reemplazar(lineas[i]))
# Ignoramos hasta los setText()
while 'Form.' not in lineas[i]:
i += 1
# Nos saltamos el Form.setWindowTitle()
i += 1
# Transformamos las lineas de los setText()
for linea in lineas[i:]:
lineas_resultado.append(reemplazar(
linea.replace('QtWidgets.QApplication.translate("Form", ', '').replace(', None, -1)', '')))
lineas_resultado.append(' def connect_signals(self, controller):\n')
lineas_resultado.append(' pass\n')
# Sobreescribo el archivo .py
with open(url_archivo_py, 'w', encoding='utf-8') as file:
file.writelines(lineas_resultado)
except Exception as e:
os.remove(url_archivo_py)
print(f'{e.__class__.__name__}: {e}')
input('Presione una tecla para continuar...')
| 35.282353 | 189 | 0.64955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,293 | 0.431144 |
9c90c9812ecdd62e381c8bf6cedd2186659f9529 | 2,450 | py | Python | applications/HDF5Application/python_scripts/single_mesh_xdmf_output_process.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 2 | 2020-04-30T19:13:08.000Z | 2021-04-14T19:40:47.000Z | applications/HDF5Application/python_scripts/single_mesh_xdmf_output_process.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-04-30T19:19:09.000Z | 2020-05-02T14:22:36.000Z | applications/HDF5Application/python_scripts/single_mesh_xdmf_output_process.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | import KratosMultiphysics as KM
import KratosMultiphysics.HDF5Application.temporal_output_process_factory as output_factory
import KratosMultiphysics.HDF5Application.file_utilities as file_utils
def Factory(settings, Model):
"""Return a process for writing simulation results for a single mesh to HDF5.
It also creates the xdmf-file on the fly
"""
if not isinstance(settings, KM.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
params = settings["Parameters"]
# setting default "file_settings"
if not params.Has("file_settings"):
file_params = KM.Parameters(r'''{
"file_access_mode" : "truncate",
"save_h5_files_in_folder" : true
}''')
params.AddValue("file_settings", file_params)
else:
if not params["file_settings"].Has("file_access_mode"):
params["file_settings"].AddEmptyValue("file_access_mode").SetString("truncate")
if not params["file_settings"].Has("save_h5_files_in_folder"):
params["file_settings"].AddEmptyValue("save_h5_files_in_folder").SetBool(True)
model_part_name = params["model_part_name"].GetString() # name of modelpart must be specified!
#todo(msandre): collapse older partitioned scripts to their serial counterparts like this
if Model[model_part_name].GetCommunicator().TotalProcesses() > 1:
factory_helper = output_factory.PartitionedTemporalOutputFactoryHelper()
else:
factory_helper = output_factory.TemporalOutputFactoryHelper()
(temporal_output_process, _, list_of_results_output) = factory_helper.Execute(params, Model)
for results_output in list_of_results_output:
temporal_output_process.AddOutput(results_output)
temporal_output_process._initial_output.AddOutput(file_utils.DeleteOldH5Files())
# in case the h5py-module is not installed (e.g. on clusters) we don't want it to crash the simulation!
# => in such a case the xdmf can be created manually afterwards locally
try:
from KratosMultiphysics.HDF5Application.xdmf_io import XdmfOutput
temporal_output_process.AddOutput(XdmfOutput()) # xdmf should be the last in the list
except ImportError:
warn_msg = "XDMF-Writing is not available,\nOnly HDF5 files are written"
KM.Logger.PrintWarning("SingleMeshXdmfOutputProcess", warn_msg)
return temporal_output_process
| 50 | 107 | 0.739592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.405306 |
9c92c7ba102b28aaf3f497f452ff69ebb7754948 | 2,551 | py | Python | docs/rename_function.py | samaid/sdc | bcc377f430d58c2f167e09754c65abfedde263f6 | [
"BSD-2-Clause"
] | 1 | 2019-11-06T18:10:07.000Z | 2019-11-06T18:10:07.000Z | docs/rename_function.py | samaid/sdc | bcc377f430d58c2f167e09754c65abfedde263f6 | [
"BSD-2-Clause"
] | null | null | null | docs/rename_function.py | samaid/sdc | bcc377f430d58c2f167e09754c65abfedde263f6 | [
"BSD-2-Clause"
] | null | null | null | """
This script requires developers to add the following information:
1. add file and function name to srcfiles_srcfuncs
2. add file and directory name to srcdir_srcfiles
3. add expected display name for the function to display_names
"""
import os
import itertools
from shutil import copyfile
"""
Add the function names with the src file in this dictionary
If the file is already present, just add the func name in the respective values
Create new entry if the srcfile is not present
srcfiles_srcfuncs = { srcfile : [func1, func2..]}
srcfile : file containing the function that should be renamed
[func1, func2..] : list of function names that should be changed
"""
srcfiles_srcfuncs = {
"hpat_pandas_series_functions.py": ["hpat_pandas_series_append", "hpat_pandas_series_ne", "hpat_pandas_series_iloc"]
}
"""
Add the filenames and the parent directory in this dictionary
If the dir is already present in this list, just add the filename in the respective values
Create a new entry if the dir is not present in this dictionary
srcdir_srcfiles = { parentdir : [filename1, filename2..]}
parentdir : Parent directory for the file
[filename1, filename2 ..] : List of files that have the functions to be renamed
"""
srcdir_srcfiles = {
"hpat/datatypes": ["hpat_pandas_series_functions.py"],
"hpat/hiframes": ["aggregate.py", "boxing.py"]
}
# Add the function name that will replace the original name and should be displayed in documentation
# Always add new name at the ends. Do not change the order
display_names = ['append', 'ne', 'iloc']
cur_dir = os.getcwd()
# This is the dir where all the source files will be copied
src_copy_dir = os.path.join(cur_dir, "API_Doc")
if not os.path.exists(src_copy_dir):
os.mkdir(src_copy_dir)
# Copy all required srcfiles
for dir in srcdir_srcfiles:
file_list = srcdir_srcfiles[dir]
for f in file_list:
src_file = os.path.join(cur_dir, dir, f)
dst_file = os.path.join(cur_dir, "API_Doc", f)
copyfile(src_file, dst_file)
os.chdir(src_copy_dir)
# Change the function names in copied files
i = 0
for filename in srcfiles_srcfuncs:
func_list = srcfiles_srcfuncs[filename]
with open(filename, 'r') as fn:
content = fn.read()
for func in func_list:
content = content.replace(func, display_names[i])
i += 1
with open(filename, 'w') as fn:
fn.write(content)
| 34.013333 | 121 | 0.689142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,642 | 0.643669 |
9c944fb70978ce339d17a2b5185083d51125ce9b | 1,442 | py | Python | src/entities/user.py | clayz/crazy-quiz-web | 7601809ad521d95ae251a026f171b9ec6939c55f | [
"Apache-2.0"
] | null | null | null | src/entities/user.py | clayz/crazy-quiz-web | 7601809ad521d95ae251a026f171b9ec6939c55f | [
"Apache-2.0"
] | null | null | null | src/entities/user.py | clayz/crazy-quiz-web | 7601809ad521d95ae251a026f171b9ec6939c55f | [
"Apache-2.0"
] | null | null | null | from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
from entities import BaseEntity
from constants import Gender, UserStatus, Device, APIStatus
from errors import DataError
class User(BaseEntity):
name = ndb.StringProperty()
mail = ndb.StringProperty()
gender = msgprop.EnumProperty(Gender)
birthday = ndb.DateProperty()
avatar = ndb.BlobProperty(compressed=True)
status = msgprop.EnumProperty(UserStatus, required=True, default=UserStatus.INACTIVE)
device = msgprop.EnumProperty(Device, required=True)
continue_got_count = ndb.IntegerProperty(required=True, default=0) # daily bonus
last_got_datetime = ndb.DateTimeProperty() # daily bonus
push_token = ndb.StringProperty()
update_date = ndb.DateTimeProperty(required=True, auto_now=True)
@classmethod
def get(cls, uuid):
user = cls.get_by_id(uuid)
if user:
return user
else:
raise DataError(APIStatus.DATA_NOT_FOUND, 'User not found, uuid: %s' % uuid)
class Currency(BaseEntity):
gem = ndb.IntegerProperty(required=True, default=0)
coin = ndb.IntegerProperty(required=True, default=0)
total_spend = ndb.IntegerProperty(required=True, default=0)
update_date = ndb.DateTimeProperty(required=True, auto_now=True)
class StartupHistory(BaseEntity):
version = ndb.StringProperty(required=True)
ip = ndb.StringProperty(required=True)
| 36.05 | 89 | 0.73301 | 1,230 | 0.852982 | 0 | 0 | 215 | 0.149098 | 0 | 0 | 52 | 0.036061 |
9c983e06b9771f99c2823cbf81e31f60d912dcdd | 3,327 | py | Python | paytm/models.py | Faisal-Manzer/django-paytm-checkout | bc796fca160f2664d85d0a09acf558532cc74442 | [
"MIT"
] | 10 | 2019-11-08T12:10:19.000Z | 2019-11-16T09:59:45.000Z | paytm/models.py | Faisal-Manzer/django-paytm-checkout | bc796fca160f2664d85d0a09acf558532cc74442 | [
"MIT"
] | 6 | 2019-11-09T09:16:46.000Z | 2022-02-10T11:30:55.000Z | paytm/models.py | Faisal-Manzer/django-paytm-checkout | bc796fca160f2664d85d0a09acf558532cc74442 | [
"MIT"
] | 1 | 2020-06-02T15:56:56.000Z | 2020-06-02T15:56:56.000Z | __all__ = ['Order', 'Item']
from django.db import models
from django.contrib.auth import get_user_model
from paytm import conf as paytm_conf
from paytm.helpers import sha256
class Item(models.Model):
price = models.FloatField()
name = models.CharField(max_length=255)
tag = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Order(models.Model):
class Channel:
WEB = paytm_conf.CHANNEL_WEBSITE
APP = paytm_conf.CHANNEL_MOBILE_APP
choices = (
(WEB, 'Web'),
(APP, 'App')
)
class Status:
SUCCESS = 'S'
FAILURE = 'F'
UNKNOWN = 'U'
PENDING = 'P'
FRAUD = 'E'
choices = (
(SUCCESS, 'Success'),
(FAILURE, 'Failure'),
(PENDING, 'Pending'),
(UNKNOWN, 'Unknown'),
(FRAUD, 'Fraud')
)
user = models.ForeignKey(get_user_model(), null=True, blank=True, on_delete=models.DO_NOTHING)
# ------------------------------------ Pay load sent to paytm --------------------------------
# ORDER_ID* String(50)
order_id = models.CharField(max_length=50)
# CUST_ID* String(64)
customer_id = models.CharField(max_length=64)
# TXN_AMOUNT* String(10)
amount = models.FloatField()
real_amount = models.FloatField() # amount aimed to capture
# CHANNEL_ID* String(3)
channel = models.CharField(max_length=3, choices=Channel.choices, default=Channel.WEB)
# MOBILE_NO String(15)
mobile = models.CharField(max_length=15, null=True, blank=True)
# EMAIL String(50
email = models.EmailField(null=True, blank=True)
# MERC_UNQ_REF String(50)
notes = models.CharField(max_length=50, null=True, blank=True)
# ---------------------------------- Response sent by paytm ---------------------------------
# TXNID* String(64)
txn_id = models.CharField(max_length=64, null=True, blank=True)
# BANKTXNID* String
bank_txn_id = models.TextField(null=True, blank=True)
# STATUS* String(20)
status = models.CharField(max_length=20, choices=Status.choices, default=Status.UNKNOWN)
# CURRENCY* String(3)
currency = models.CharField(max_length=3, null=True, blank=True)
# RESPCODE* String(10)
resp_code = models.CharField(max_length=10, null=True, blank=True)
# RESPMSG* String
resp_message = models.TextField(null=True, blank=True)
# TXNDATE* DateTime
transaction_date = models.DateTimeField(null=True, blank=True)
# GATEWAYNAME String(15)
gateway = models.CharField(max_length=15, null=True, blank=True)
# BANKNAME* String
bank = models.TextField(null=True, blank=True)
# PAYMENTMODE* String(15)
mode = models.CharField(max_length=15, null=True, blank=True)
initiated = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
is_new_order = False
if self.id is None:
is_new_order = True
self.order_id = ''
super(Order, self).save(*args, **kwargs)
if is_new_order:
self.order_id = sha256(str(self.id).rjust(50, '0'))[:50]
self.save()
def __str__(self):
return f'{self.order_id}'
| 28.681034 | 98 | 0.612564 | 3,146 | 0.945597 | 0 | 0 | 0 | 0 | 0 | 0 | 674 | 0.202585 |
9c98805df16923bcad50b539375b76c4c6448e60 | 125 | py | Python | tffm/__init__.py | FlorisHoogenboom/tffm | baf8086a8696a2b71bebdefe3a64d3b897599f72 | [
"MIT"
] | null | null | null | tffm/__init__.py | FlorisHoogenboom/tffm | baf8086a8696a2b71bebdefe3a64d3b897599f72 | [
"MIT"
] | null | null | null | tffm/__init__.py | FlorisHoogenboom/tffm | baf8086a8696a2b71bebdefe3a64d3b897599f72 | [
"MIT"
] | null | null | null | from .models import TFFMClassifier, TFFMRegressor, TFFMRankNet
__all__ = ['TFFMClassifier', 'TFFMRegressor', 'TFFMRankNet']
| 31.25 | 62 | 0.792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.352 |
9c98b5419ef51d74e2d7e47ae36323002d369066 | 7,617 | py | Python | competitions/avito-demand-prediction/base_xgb_tune_mthread.py | gtesei/fast-furious | b974e6b71be92ad8892864794af57631291ebac1 | [
"MIT"
] | 19 | 2015-06-24T00:04:11.000Z | 2021-02-28T16:55:44.000Z | competitions/avito-demand-prediction/base_xgb_tune_mthread.py | gtesei/fast-furious | b974e6b71be92ad8892864794af57631291ebac1 | [
"MIT"
] | null | null | null | competitions/avito-demand-prediction/base_xgb_tune_mthread.py | gtesei/fast-furious | b974e6b71be92ad8892864794af57631291ebac1 | [
"MIT"
] | 4 | 2016-10-11T17:36:44.000Z | 2019-08-16T10:03:04.000Z | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import re
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
import nltk
import xgboost as xgb
from extract_feat_base import *
from multiprocessing import Pool, Lock
import pandas as pd
import numpy as np
import os
import glob
global grid
### FUNC ########################################################################
def create_grid():
#
min_child_weight_p = []
eta_p = []
colsample_bytree_p = []
max_depth_p = []
subsample_p = []
lambda_p = []
nround_p = []
rmse_cv_mean = []
rmse_cv_std = []
i = 1
for min_child_weight in [0,0.5,1,15,50]:
for eta in [0.01,0.005]:
for colsample_bytree in [0.5,0.7]:
for max_depth in [6,15]:
for subsample in [0.5,0.7]:
for lambdaa in [0.5,1]:
xgb_pars = {'min_child_weight': min_child_weight,
'eta': eta,
'colsample_bytree': colsample_bytree,
'max_depth': max_depth,
'subsample': subsample,
'lambda': lambdaa,
'nthread': -1,
'booster' : 'gbtree',
'silent': 1,
'eval_metric': 'rmse',
'objective': 'reg:linear'}
#print(">>>>",i,"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
min_child_weight_p.append(min_child_weight)
eta_p.append(eta)
colsample_bytree_p.append(colsample_bytree)
max_depth_p.append(max_depth)
subsample_p.append(subsample)
lambda_p.append(lambdaa)
nround_p.append(-1)
rmse_cv_mean.append(-1)
rmse_cv_std.append(-1)
i = i + 1
grid = pd.DataFrame({
'min_child_weight': min_child_weight_p,
'eta': eta_p,
'colsample_bytree': colsample_bytree_p,
'max_depth': max_depth_p,
'subsample': subsample_p,
'lambda': lambda_p,
'nround': nround_p,
'rmse_cv_mean': rmse_cv_mean,
'rmse_cv_std': rmse_cv_std,
'nround': nround_p
})
grid.index = range(len(grid))
print("Grid:",str(grid.shape))
print(grid.head())
#grid.to_csv('base_grid_xgb_40perc.csv',index=False)
return grid
grid = create_grid()
def do_compute(x):
row = grid.iloc[x,:]
eta = row['eta']
min_child_weight = row['min_child_weight']
colsample_bytree = row['colsample_bytree']
max_depth = row['max_depth']
subsample = row['subsample']
_lambda = row['lambda']
nround = row['nround']
####
xgb_pars = {'min_child_weight': min_child_weight,
'eta': eta,
'colsample_bytree': colsample_bytree,
'max_depth': int(max_depth),
'subsample': subsample,
'lambda': _lambda,
'nthread': -1,
'booster' : 'gbtree',
'silent': 1,
'eval_metric': 'rmse',
'objective': 'reg:linear'}
#print(xgb_pars)
model = xgb.cv(xgb_pars, dtrain, 100000,nfold = 4, early_stopping_rounds=50,maximize=False, verbose_eval=10)
nround = model.shape[0]
rmse_cv_mean = model['test-rmse-mean'][model.shape[0]-1]
rmse_cv_std = model['test-rmse-std'][model.shape[0]-1]
# calculate the square of the value of x
grid.loc[x,'rmse_cv_mean'] = rmse_cv_mean
grid.loc[x,'rmse_cv_std'] = rmse_cv_std
grid.loc[x,'nround'] = nround
grid.to_csv('base_grid_xgb_40perc__'+str(os.getpid())+'.csv',index=False)
return rmse_cv_mean
#################################################################################
### FEATURE ENG. ################################################################
meta = {'target': 'deal_probability',
'test_id': 'item_id',
'cols': {
'item_id': 'REM',
'user_id': 'CAT',
'region': 'CAT',
'city': 'CAT',
'parent_category_name': 'CAT',
'category_name': 'CAT',
'param_1': 'CAT',
'param_2': 'CAT',
'param_3': 'CAT',
'title': 'LEN',
'description': 'LEN' ,
'price': 'NUM',
'item_seq_number': 'NUM',
'activation_date': 'DATE',
'user_type': 'CAT',
'image': 'REM',
'image_top_1': 'NUM'
}}
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
print('--------------> Basic Feature Engineering ... ')
all_data , y_train = encode_dataset(train=train,test=test,meta=meta)
print(all_data.head())
print(">>>>>>> shape:",all_data.shape)
#for f in ['activation_date_is_holiday']:
# all_data = all_data.drop(f,axis=1)
print(all_data.head())
print(">>>>>>> shape:",all_data.shape)
#################################################################################
### MODELING ####################################################################
print('--------------> Modeling ... ')
train_obs = len(y_train)
Xtr, Xv, ytr, yv = train_test_split(all_data[:train_obs].values, y_train, test_size=0.6, random_state=1973)
dtrain = xgb.DMatrix(Xtr, label=ytr)
dvalid = xgb.DMatrix(Xv, label=yv)
dtest = xgb.DMatrix(all_data[train_obs:].values)
watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
#Try different parameters! My favorite is random search :)
#################################################################################
if __name__ == '__main__':
#print("grid created")
#print(grid.head())
# Define the dataset
dataset = range(len(grid))
agents = 4
chunksize = int(len(grid)/agents)
# Output the dataset
#print ('Dataset: ' , str(dataset) , "chunksize:",str(chunksize))
# Run this with a pool of 5 agents having a chunksize of 3 until finished
with Pool(processes=agents) as pool:
result = pool.map(do_compute, dataset, chunksize)
# Output the result
print ('Result: ' + str(result) , "---type:",type(result))
#grid.to_csv('base_grid_xgb_40perc.csv',index=False)
print(">>> merge ...")
agrid = create_grid()
listing = glob.glob('./base_grid_xgb_40perc__*')
print(listing)
for filename in listing:
print(filename)
gg = pd.read_csv(filename)
gg = gg[gg.rmse_cv_mean >=0]
print(gg.index)
for i in (gg.index):
row = gg.loc[i,:]
rmse_cv_mean = row['rmse_cv_mean']
rmse_cv_std = row['rmse_cv_std']
nround = row['nround']
agrid.loc[i,'rmse_cv_mean'] = rmse_cv_mean
agrid.loc[i,'rmse_cv_std'] = rmse_cv_std
agrid.loc[i,'nround'] = nround
agrid.to_csv('base_grid_xgb_40perc.csv',index=False)
| 35.593458 | 112 | 0.495339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,319 | 0.304451 |
9c98d51c1433c4a39642bc8ab3e23a7db335e475 | 1,456 | py | Python | tests/program_test.py | stanfortonski/Perlin-Noise-3D-Voxel-Generator | f06b0329c948848b22b5e3c04978e95ce71f5ab0 | [
"MIT"
] | 27 | 2020-05-15T23:24:17.000Z | 2021-11-22T01:23:27.000Z | tests/program_test.py | stanfortonski/Perlin-Noise-3D-Voxel-Generator | f06b0329c948848b22b5e3c04978e95ce71f5ab0 | [
"MIT"
] | null | null | null | tests/program_test.py | stanfortonski/Perlin-Noise-3D-Voxel-Generator | f06b0329c948848b22b5e3c04978e95ce71f5ab0 | [
"MIT"
] | null | null | null | import sys, unittest, glfw
sys.path.insert(0, '..')
from OpenGL.GL import *
from engine.base.shader import Shader
from engine.base.program import *
import helper
class ProgramTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.window = helper.initAndGetWindow()
@classmethod
def tearDownClass(cls):
glfw.terminate()
def testLinking(self):
try:
program = Program()
program.attachShader(Shader('resources/shaders/test_vert.vs', GL_VERTEX_SHADER))
program.attachShader(Shader('resources/shaders/test_frag.fs', GL_FRAGMENT_SHADER))
program.link()
self.assertEqual(program.getId(), 1)
except RuntimeError:
self.assertTrue(False)
def testLinked(self):
try:
program = getLinkedProgram('resources/shaders/test_vert.vs', 'resources/shaders/test_frag.fs')
self.assertEqual(program.getId(), 1)
except RuntimeError:
self.assertTrue(False)
def testErrorCompile(self):
try:
program = Program()
program.attachShader(Shader('resources/shaders/test_vert.vs', GL_VERTEX_SHADER))
program.attachShader(Shader('resources/shaders/error.fs', GL_FRAGMENT_SHADER))
program.link()
self.assertTrue(False)
except RuntimeError:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main() | 32.355556 | 106 | 0.644231 | 1,245 | 0.855082 | 0 | 0 | 149 | 0.102335 | 0 | 0 | 202 | 0.138736 |
9c9a7452e9e547327aec4a9da8b0f126d46152b2 | 1,253 | py | Python | movie_night/genrecollector.py | MattDrouin/movie_night | 4445b5fc66e7f7a5b2829142101d05dcf87a3227 | [
"MIT"
] | null | null | null | movie_night/genrecollector.py | MattDrouin/movie_night | 4445b5fc66e7f7a5b2829142101d05dcf87a3227 | [
"MIT"
] | null | null | null | movie_night/genrecollector.py | MattDrouin/movie_night | 4445b5fc66e7f7a5b2829142101d05dcf87a3227 | [
"MIT"
] | null | null | null | import aiohttp
import asyncio
# Grab the movie genre in a slightly painful way
async def get_genre(movie_title, session):
search_string = "+".join(movie_title.split())
url = f'https://www.google.com/search?q={search_string}'
async with session.get(url) as resp:
html = await resp.text()
# this represents the dot symbol that appears on either
# side of the genre on a google knowledge panel.
parts = html.split("‧")
if len(parts) < 3:
return "Unknown"
return parts[1].strip()
async def get_genres(movie_list):
REQ_HDRS = {
'User-Agent': 'python-requests/2.25.1',
'Accept-Encoding': 'gzip, deflate',
'Accept': '*/*',
'Connection': 'keep-alive'
}
tasks = []
async with aiohttp.ClientSession(headers=REQ_HDRS) as session:
for movie in movie_list:
tasks.append(get_genre(movie, session))
genres = await asyncio.gather(*tasks)
return genres
| 26.104167 | 67 | 0.497207 | 0 | 0 | 0 | 0 | 0 | 0 | 959 | 0.765363 | 330 | 0.263368 |
9c9b489e028cbf9b38f6936d0fc0e18d8c7399d9 | 2,623 | py | Python | ML/WhatsApp/radar_chart.py | PepSalehi/algorithms | 1c20f57185e6324aa840ccff98e69764b4213131 | [
"MIT"
] | null | null | null | ML/WhatsApp/radar_chart.py | PepSalehi/algorithms | 1c20f57185e6324aa840ccff98e69764b4213131 | [
"MIT"
] | null | null | null | ML/WhatsApp/radar_chart.py | PepSalehi/algorithms | 1c20f57185e6324aa840ccff98e69764b4213131 | [
"MIT"
] | 1 | 2019-12-09T21:40:46.000Z | 2019-12-09T21:40:46.000Z | # core modules
from math import pi
# 3rd party modules
import matplotlib.pyplot as plt
import pandas as pd
# internal modules
import analysis
def main(path):
df = analysis.parse_file(path)
df = prepare_df(df, grouping=(df['date'].dt.hour))
print(df.reset_index().to_dict(orient='list'))
df = pd.DataFrame({'date': [209, 13, 1, 2, 1, 25, 809, 3571, 1952, 1448, 942, 1007, 1531, 1132, 981, 864, 975, 2502, 2786, 2717, 3985, 4991, 2872, 761]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
print(df)
create_radar_chart(df,
# cat_names=['Monday',
# 'Tuesday',
# 'Wednesday',
# 'Thursday',
# 'Friday',
# 'Saturday',
# 'Sunday']
)
def prepare_df(df, grouping):
df = df['date'].groupby(grouping).count().to_frame().reset_index(drop=True)
return df
def create_radar_chart(df, cat_names=None):
"""
Parameters
----------
df : pandas.DataFrame
Has a column 'date'
"""
values = df['date'].tolist()
df = df.T.reset_index(drop=True)
df.insert(0, 'group', 'A')
# number of variable
categories = list(df)[1:]
if cat_names is None:
cat_names = categories
assert len(cat_names) == len(categories)
N = len(categories)
# What will be the angle of each axis in the plot?
# (we divide the plot / number of variable)
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
# Initialise the spider plot
ax = plt.subplot(111, polar=True)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], cat_names, color='grey', size=8)
# Draw ylabels
# ax.set_rlabel_position(0)
ticks = get_ticks(values)
# plt.yticks(ticks, [str(tick) for tick in ticks], color="grey", size=7)
# plt.ylim(0, 40)
# We are going to plot the first line of the data frame.
# But we need to repeat the first value to close the circular graph:
values = df.loc[0].drop('group').values.flatten().tolist()
values += values[:1]
values
# Plot data
ax.plot(angles, values, linewidth=1, linestyle='solid')
# Fill area
ax.fill(angles, values, 'b', alpha=0.1)
plt.show()
def get_ticks(values):
return sorted(values)
if __name__ == '__main__':
args = analysis.get_parser().parse_args()
main(args.filename)
| 27.904255 | 157 | 0.559283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 850 | 0.324056 |
9c9c644c1666b0d5fa37d62234243dadbc44f8cf | 1,894 | py | Python | LoggedSensor.py | majorpeter/puha-manager | fd51b560a7e1dad99204ae3c9c7369d17cbcfd32 | [
"MIT"
] | null | null | null | LoggedSensor.py | majorpeter/puha-manager | fd51b560a7e1dad99204ae3c9c7369d17cbcfd32 | [
"MIT"
] | 5 | 2018-07-29T19:55:27.000Z | 2018-09-08T17:39:48.000Z | LoggedSensor.py | majorpeter/puha-manager | fd51b560a7e1dad99204ae3c9c7369d17cbcfd32 | [
"MIT"
] | null | null | null | from datetime import datetime
from threading import Lock
from Database import Database
class LoggedSensor:
"""
This is a common base class for all sensors that have data to be stored/logged.
"""
registered_type_ids = []
def __init__(self, type_id, max_measurements=200, holdoff_time=None):
if type_id is LoggedSensor.registered_type_ids:
raise BaseException('Type ID already exists: %d' % type_id)
self.type_id = type_id
self.data = []
self.max_measurements = max_measurements
self.holdoff_time = holdoff_time
self.lock = Lock()
with self.lock:
self.data = Database.instance.fetch_latest_measurements(self.type_id, self.max_measurements)
def add_measurement(self, measurement):
now = datetime.now().timestamp()
with self.lock:
if self.holdoff_time is not None:
if len(self.data) > 0:
diff = now - self.data[-1]['time']
if diff < self.holdoff_time.total_seconds():
return
if len(self.data) > self.max_measurements:
del self.data[0]
self.data.append({'time': now, 'measurement': measurement})
Database.instance.insert_measurement(now, self.type_id, measurement)
def get_chart_data(self, from_timestamp=0):
label = []
data = []
last_timestamp = 0
with self.lock:
for item in self.data:
if item['time'] > from_timestamp:
timestamp = datetime.fromtimestamp(item['time'])
label.append(timestamp.strftime('%H:%M:%S'))
data.append('%.2f' % item['measurement'])
if len(self.data) > 0:
last_timestamp = int(self.data[-1]['time'])
return label, data, last_timestamp
| 32.101695 | 104 | 0.586589 | 1,803 | 0.951954 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.102957 |
9c9dc2b48466cf77d6827d16eafaafb0b544703d | 1,046 | py | Python | tests/algorithms/sets/test_cartesian.py | maurobaraldi/python-algorithms | 94fe5e46737d6ea2f7ff62b5b61b7b9cca3fbd14 | [
"MIT"
] | 2 | 2018-11-05T17:12:40.000Z | 2019-09-05T17:10:01.000Z | tests/algorithms/sets/test_cartesian.py | maurobaraldi/python-algorithms | 94fe5e46737d6ea2f7ff62b5b61b7b9cca3fbd14 | [
"MIT"
] | null | null | null | tests/algorithms/sets/test_cartesian.py | maurobaraldi/python-algorithms | 94fe5e46737d6ea2f7ff62b5b61b7b9cca3fbd14 | [
"MIT"
] | 1 | 2019-01-02T19:07:51.000Z | 2019-01-02T19:07:51.000Z | #!/usr/bin/env python
import unittest
from algorithms.sets.cartesian_product import cartesian
class TestFatorial(unittest.TestCase):
def setUp(self):
self.set_a = [1, 2]
self.set_b = [4, 5]
def test_cartesian_product(self):
self.assertEqual(cartesian.product(self.set_a, self.set_b), [[1, 4], [1, 5], [2, 4], [2, 5]])
def test_cartesian_product_by_list_comprehension(self):
self.assertEqual(cartesian.list_comprehension(self.set_a, self.set_b), [[1, 4], [1, 5], [2, 4], [2, 5]])
def test_cartesian_product_recursive_two_sets(self):
result = [i for i in cartesian.product_n(self.set_a, self.set_b)]
self.assertEqual(result, [[1, 4], [1, 5], [2, 4], [2, 5]])
def test_cartesian_product_recursive_three_sets(self):
result = [i for i in cartesian.product_n(self.set_a, self.set_b, self.set_a)]
self.assertEqual(result, [[1, 4, 1], [1, 4, 2], [1, 5, 1], [1, 5, 2], [2, 4, 1], [2, 4, 2], [2, 5, 1], [2, 5, 2]])
if __name__ == '__main__':
unittest.main()
| 34.866667 | 122 | 0.630975 | 899 | 0.859465 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.029637 |
9c9e72363186eee33893fff1aebbb6657c6b8082 | 14,501 | py | Python | libs/sdc_etl_libs/aws_helpers/S3Data.py | darknegma/docker-airflow | 44e3d02d7ac43c8876145ae47acfbbbde67230df | [
"Apache-2.0"
] | null | null | null | libs/sdc_etl_libs/aws_helpers/S3Data.py | darknegma/docker-airflow | 44e3d02d7ac43c8876145ae47acfbbbde67230df | [
"Apache-2.0"
] | 3 | 2021-03-31T19:26:57.000Z | 2021-12-13T20:33:01.000Z | libs/sdc_etl_libs/aws_helpers/S3Data.py | darknegma/docker-airflow | 44e3d02d7ac43c8876145ae47acfbbbde67230df | [
"Apache-2.0"
] | null | null | null | import boto3
from botocore.exceptions import ClientError
import gzip
import io
import os
import csv
import re
class S3Data(object):
def __init__(self, bucket_name_, prefix_, file_, df_schema_, compression_type_,
check_headers_, file_type_, access_key_=None, secret_key_=None,
region_='us-east-2', decode_='utf-8'):
"""
Writes a compressed s3 file to snowflake
:param bucket_name_: S3 bucket
:param prefix_: S3 prefix (i.e., "directory path" of bucket)
:param file_: S3 file / obj
:param df_schema_: Schema as defined by the json file for this object
:param check_headers_: True or False to check for headers
:param file_type_: S3 object file type (if applicable). Currently,
this function only supports 'csv'. and 'tsv'
:param compression_type: type of compression being used.
:param access_key_: AWS Access Key for S3 bucket (if applicable)
:param secret_key_: AWS Secret Key for S3 bucket (if applicable)
:param region_: AWS region (Default = 'us-east-2')
:param decode_: Character decoding of object
:return: List of Data Dictionaries
"""
# File connection properties
self.file_name = file_
self.df_schema = df_schema_
self.region = region_
self.decode = decode_
self.bucket_name = bucket_name_
self.prefix = prefix_
self.file_type = file_type_
self.compression_type = compression_type_
self.access_key = access_key_
self.secret_key = secret_key_
# file processing properties
self.check_headers = check_headers_
self.current_row = 1
self.lines = None
self.field_names = [] #for snowflake and dataframe processing
self.field_names_file =[] #for column header check
self.table = ''
self.schema = ''
self.file_row_count = 0
if self.file_type == "csv":
self.delimiter = b','
elif self.file_type == "tsv":
self.delimiter = b'\t'
else:
raise Exception(f"File type: {self.file_type} not supported.")
def __read_data(self, s3_data_):
if self.compression_type == "gz":
file_handle = io.BytesIO(s3_data_)
self.lines = gzip.open(file_handle)
elif isinstance(self.compression_type,type(None)):
self.lines = s3_data_.decode('utf-8').split()
else:
raise Exception(f"Compression type: {self.compression_type} not supported.")
def load_data(self,skip_schema=False):
"""
Makes the connection, opens the file as bytes
calls the load schema
calls the load file row count
return: None
"""
client = boto3.client('s3', region_name=self.region,
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key)
obj = client.get_object(Bucket=self.bucket_name, Key=self.prefix + self.file_name)
s3_data = obj['Body'].read()
self.__read_data(s3_data)
if skip_schema is False:
self.__load_schema()
def __load_schema(self):
"""
Private Function
Loads the schema file into a dictionary
Retrieves table and schema values
return: None
"""
# retrieve some data from our json
# get our field names
self.field_names.clear()
for f in self.df_schema['fields']:
self.field_names.append(f['name'])
self.field_names_file.append(f['name_in_file'])
# retrieve what table and schema we are using from the json and pass into the dataframe
self.table = self.df_schema['data_sink']['table_name']
self.schema = self.df_schema['data_sink']['schema']
def __get_file_size(self):
"""
Private Function
gets the row count of the file
return: None sets the self.file_row_count value
"""
row_count = 1
has_column_header = False
for line in self.lines:
newline = line.rstrip().lstrip().split(self.delimiter)
if self.current_row == 1: # check once for column header
if self.check_headers:
if str(newline[0].decode(self.decode)).upper() in self.field_names_file:
has_column_header = True
if has_column_header == False:
row_count += 1
else:
has_column_header = False
self.file_row_count = row_count
def get_file_record_count(self):
if self.file_row_count > 0:
return self.file_row_count
self.__get_file_size()
self.lines.close()
self.load_data()
self.current_row = 1
return self.file_row_count
def get_records(self, row_to_start_: int, chunk_size_: int, file_id_=None):
"""
Loads a set of data into a list of dictionary objects. Keeps track of the row number pointer
:param row_to_start_: The row number to start processing. Note: header records do not count as a row
:param chunk_size_: the number of records to process
:param file_id_: Required parameter when adding the column FILE_ID to the data schema and using the ProcessLogger.
return: List of Data Dictionaries
"""
data = []
has_column_header = False
row_to_end = chunk_size_ + row_to_start_
if row_to_start_ >= self.current_row:
pass
else:
self.lines.close()
self.load_data()
self.current_row = 1
for line in self.lines:
if self.current_row >= row_to_start_:
data_dict = {}
newline = line.rstrip().lstrip().split(self.delimiter)
if self.current_row == 1: # check once for column header
if self.check_headers:
if str(newline[0].decode(self.decode)).upper() in self.field_names_file:
has_column_header = True
if not has_column_header:
column_number = 0
for fields in self.field_names:
if fields == "FILE_ID":
if not isinstance(file_id_, type(None)):
data_dict[fields] = file_id_
else:
raise Exception("Missing file id field")
else:
data_dict[fields] = str(newline[column_number].decode(self.decode))
column_number += 1
if len(data_dict) > 0:
data.append(data_dict)
if not has_column_header:
self.current_row += 1
has_column_header = False
if self.current_row >= row_to_end:
break
return data
@staticmethod
def iterate_on_s3_response(response_: dict, bucket_name_: str,
prefix_: str, files_: list, give_full_path_):
"""
Iterate over an S3 List Objects result and adds object file/object
names to list.
:param response_: Response from List Objects func.
:param bucket_name_: Name of S3 bucket that was searched.
:param prefix_: Prefix used to find files.
:param files_: List append S3 URLs to.
:return: None
"""
for item in response_["Contents"]:
if prefix_ in item["Key"]:
if give_full_path_:
files_.append("s3://" + bucket_name_ + "/" + item["Key"])
else:
files_.append(os.path.basename(item["Key"]))
@staticmethod
def get_file_list_s3(bucket_name_, prefix_, access_key_=None,
secret_key_=None, region_='us-east-2',
file_prefix_: str = None, file_suffix_: str = None,
file_regex_: str = None, give_full_path_=False):
"""
Creates a list of items in an S3 bucket.
:param bucket_name_: Name of S3 bucket to search
:param prefix_: Prefix used to find files.
:param access_key_: AWS Access Key for S3 bucket (if applicable)
:param secret_key_: AWS Secret Key for S3 bucket (if applicable)
:param region_: AWS region (Default = 'us-east-2')
:param file_prefix_: If used, function will return files that start
with this (case-sensitive). Can be used in tandem with file_suffix_
:param file_suffix_: If used, function will return files that end
with this (case-sensitive). Can be used in tandem with file_prefix_
:param file_regex_: If used, will return all files that match this
regex pattern. file_prefix_ & file_suffix_ will be ignored.
:param give_full_path_: If False, only file name will be returned. If
true, full path & file name will be returned.
:return: List of S3 file/object names as strings
"""
client = boto3.client('s3', region_name=region_,
aws_access_key_id=access_key_,
aws_secret_access_key=secret_key_)
response = client.list_objects_v2(Bucket=bucket_name_, Prefix=prefix_)
all_files = []
if "Contents" in response:
S3Data.iterate_on_s3_response(response, bucket_name_,
prefix_, all_files, give_full_path_)
while response["IsTruncated"]:
print(response["NextContinuationToken"])
response = client.list_objects_v2(
Bucket=bucket_name_, Prefix=prefix_,
ContinuationToken=response["NextContinuationToken"])
S3Data.iterate_on_s3_response(response, bucket_name_,
prefix_, all_files, give_full_path_)
if file_regex_ or file_prefix_ or file_suffix_:
pattern = file_regex_ if file_regex_ else \
f"{file_prefix_ if file_prefix_ else ''}.*{file_suffix_ if file_suffix_ else ''}"
files = [x for x in all_files if re.search(pattern, x)]
else:
files = all_files
return files
@staticmethod
def s3_obj_to_dict(bucket_name_, prefix_, file_, file_type_='csv',
access_key_=None, secret_key_=None, region_='us-east-2',
decode_='utf-8'):
"""
Converts an S3 object to a list of flattened dictionary records.
:param bucket_name_: S3 bucket
:param prefix_: S3 prefix (i.e., "directory path" of bucket)
:param file_: S3 file / obj
:param file_type_: S3 object file type (if applicable). Currently,
this function only supports 'csv'. No other file types have been
tested.
:param access_key_: AWS Access Key for S3 bucket (if applicable)
:param secret_key_: AWS Secret Key for S3 bucket (if applicable)
:param region_: AWS region (Default = 'us-east-2')
:param decode_: Character decoding of object
:return: List of flattened dictionaries.
"""
if not file_type_:
raise Exception("Need to pass a file_type_.")
client = boto3.client('s3', region_name=region_,
aws_access_key_id=access_key_,
aws_secret_access_key=secret_key_)
obj = client.get_object(Bucket=bucket_name_, Key=prefix_ + file_)
lines = obj['Body'].read().decode(decode_).split()
data = []
try:
if file_type_ == 'csv':
reader = csv.DictReader(lines)
for row in reader:
data.append(dict(row))
else:
raise Exception(f"{file_type_} not currently supported for S3 obj conversion.")
return data
except Exception as e:
raise Exception(f"There was an issue converting {file_} to a "
f"list of flattened dictionaries. {e}")
def upload_file(self,object_name_=None):
"""Upload a file to an S3 bucket
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name_ is None:
object_name_ = self.prefix+"/"+self.file_name
else:
object_name_ = self.prefix+"/"+object_name_
# Upload the file
client = boto3.client('s3', region_name=self.region,
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key)
try:
response = client.upload_file(Filename=self.file_name, Bucket=self.bucket_name, Key=object_name_)
except ClientError as e:
print(f"There was an error {e} while uploading {self.file_name} to S3")
return False
target_path=f"https://{self.bucket_name}.s3.{self.region}.amazonaws.com/{object_name_}"
return target_path
def save_to_s3(self, data_string_):
"""
Saves data as a file in S3 bucket
:param data_string_: data to be saved
return: target_path
"""
encoded_data=data_string_.encode(self.decode)
s3_path=self.prefix+"/"+self.file_name
resource=boto3.resource('s3', region_name=self.region,
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key)
try:
response=resource.Bucket(self.bucket_name).put_object(Key=s3_path, Body=encoded_data)
except ClientError as e:
print(f"There was an error {e} while uploading {self.file_name} to S3")
return False
target_path=f"https://{self.bucket_name}.s3.{self.region}.amazonaws.com/{s3_path}"
return target_path
| 40.058011 | 122 | 0.579064 | 14,388 | 0.992207 | 0 | 0 | 5,251 | 0.362113 | 0 | 0 | 5,754 | 0.3968 |
9c9e9f62b13ea8939d24495e7e75ae788410b53b | 5,520 | py | Python | tests/test_models.py | thmslmr/timebomb-client | a57fdbb8bfc0157d2c3d713496ab4819fb33f1fd | [
"MIT"
] | 1 | 2020-03-31T17:17:40.000Z | 2020-03-31T17:17:40.000Z | tests/test_models.py | thmslmr/timebomb-client | a57fdbb8bfc0157d2c3d713496ab4819fb33f1fd | [
"MIT"
] | 2 | 2020-03-31T17:18:38.000Z | 2020-03-31T17:21:13.000Z | tests/test_models.py | thmslmr/timebomb-client | a57fdbb8bfc0157d2c3d713496ab4819fb33f1fd | [
"MIT"
] | null | null | null | from datetime import datetime
import timebomb.models as models
def test_Notification():
notif = models.Notification("message")
assert notif.content == "message"
assert notif.read is False
assert str(notif) == "message"
def test_Player():
player = models.Player("name", "id")
assert player.name == "name"
assert player.id == "id"
assert player.team is None
assert player.hand is None
player = models.Player("name", "id", "team", ("A", "B"), "roomid")
assert player.name == "name"
assert player.id == "id"
assert player.team == "team"
assert player.hand == ("A", "B")
assert player.roomId == "roomid"
def test_Message():
now = datetime.now()
message = models.Message("player", "message")
assert message.player_name == "player"
assert message.content == "message"
assert message.timestamp and isinstance(message.timestamp, datetime)
assert str(message) == f"[{now:%H:%M}] player: message"
def test_Room():
player = models.Player("player", "player_id")
room = models.Room("room", "room_id", (player,))
assert room.name == "room" and room.id == "room_id"
assert len(room.players) == 1 and room.players[0] is player
assert room.cutter is None and room.winning_team is None and room.status == ""
assert isinstance(room.cards_found, dict) and isinstance(room.cards_left, dict)
assert not room.cards_found and not room.cards_left
def test_GameState():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
def test_GameState_new_message():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
message = state.new_message({"player": "player", "message": "test_message"})
assert len(state.messages) == 1 and state.messages[0] is message
assert message.player_name == "player" and message.content == "test_message"
for i in range(99):
last = state.new_message(
{"player": f"player{i}", "message": f"test_message{i}"}
)
assert len(state.messages) == 100
assert state.messages[0] is message and state.messages[99] is last
assert last.player_name == "player98" and last.content == "test_message98"
last = state.new_message({"player": "player99", "message": "test_message99"})
assert len(state.messages) == 100
assert state.messages[0] is not message and state.messages[99] is last
assert (
state.messages[0].player_name == "player0"
and state.messages[0].content == "test_message0"
)
assert last.player_name == "player99" and last.content == "test_message99"
res = state.new_message({"message": "test_message100"})
assert res is None
assert state.messages[99] is last
def test_GameState_new_notification():
state = models.GameState()
assert state.notification is None
notif1 = state.new_notification({"message": "notif1"})
assert state.notification is notif1 and notif1.content == "notif1"
notif2 = state.new_notification({"message": "notif2"})
assert state.notification is notif2 and notif2.content == "notif2"
notif3 = state.new_notification({"unknown": "notif2"})
assert notif3 is None and state.notification is notif2
def test_GameState_update_room():
state = models.GameState()
assert state.room is None
players_data = [{"name": "player1", "id": "id1"}]
room_data = {"name": "roomname", "id": "roomid", "players": players_data}
room = state.update_room(room_data)
assert state.room is room and room.name == "roomname" and room.id == "roomid"
assert len(room.players) == 1
assert room.players[0].name == "player1" and room.players[0].id == "id1"
new_data = {"name": "newname", "cutter": {"name": "cutter", "id": "cutterid"}}
room = state.update_room(new_data)
assert state.room is room and room.name == "newname" and room.id == "roomid"
assert len(room.players) == 1
assert room.players[0].name == "player1" and room.players[0].id == "id1"
assert (
isinstance(room.cutter, models.Player)
and room.cutter.id == "cutterid"
and room.cutter.name == "cutter"
)
new_data = {
"players": [{"name": "player1", "id": "id1"}, {"name": "player2", "id": "id2"}]
}
room = state.update_room(new_data)
assert state.room is room and room.name == "newname" and room.id == "roomid"
assert len(room.players) == 2
def test_GameState_update_me():
state = models.GameState()
assert state.me is None
player = state.update_me({"name": "player1", "id": "id1"})
assert state.me is player and player.name == "player1" and player.id == "id1"
assert player.hand is None
player = state.update_me({"hand": ("A", "A", "B", "A")})
assert state.me is player and player.name == "player1" and player.id == "id1"
assert player.hand == ("A", "A", "B", "A")
def test_GameState_reset():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
state.messages = ["m1", "m2"]
state.room = "Room"
state.me = "Me"
state.notification = "Notification"
state.reset()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
| 32.470588 | 87 | 0.661051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 925 | 0.167572 |
9ca0a611b163641ccd9e67b37966344da7a5eac2 | 1,801 | py | Python | words_in_sentences/admin.py | FatliTalk/learnenglish | f0393346f2e696b2af542c05e5005d2495f00e37 | [
"MIT"
] | 1 | 2021-10-06T12:40:28.000Z | 2021-10-06T12:40:28.000Z | words_in_sentences/admin.py | FatliTalk/learnenglish | f0393346f2e696b2af542c05e5005d2495f00e37 | [
"MIT"
] | null | null | null | words_in_sentences/admin.py | FatliTalk/learnenglish | f0393346f2e696b2af542c05e5005d2495f00e37 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Tag, Sentence, Review
admin.site.register(Tag)
class ReviewInline(admin.StackedInline):
model = Review
extra = 0
readonly_fields = (
'modified_time',
'last_review_date',
)
class SentenceAdmin(admin.ModelAdmin):
# docs.djangoproject.com/en/3.2/ref/contrib/admin/#inlinemodeladmin-objects
inlines = [ReviewInline]
prepopulated_fields = {'slug': ('highlight_word', )}
# docs.djangoproject.com/en/3.2/ref/contrib/admin/#django.contrib.admin.ModelAdmin.save_model
# books.agiliq.com/projects/django-admin-cookbook/en/latest/current_user.html
def save_model(self, request, obj, form, change):
obj.author = request.user
super().save_model(request, obj, form, change)
fieldsets = (
(None, {
'fields': (
'is_active',
'english_sentence',
'highlight_word',
'slug',
'chinese_translation',
'original_source',
'note',
'tags',
'publish_date',
)
}),
('Sentence Readonly fields', {
'classes': ('wide', 'extrapretty'),
'fields': (
'created_time',
'modified_time',
'version',
),
}),
)
readonly_fields = (
'created_time',
'modified_time',
'version',
)
list_display = (
'english_sentence',
'highlight_word',
)
list_filter = (
'tags',
'publish_date',
)
search_fields = (
'english_sentence',
'highlight_word',
'chinese_translation',
)
admin.site.register(Sentence, SentenceAdmin)
| 23.38961 | 97 | 0.541921 | 1,646 | 0.913937 | 0 | 0 | 0 | 0 | 0 | 0 | 670 | 0.372016 |
9ca288ba9259937c8aeaac578a1dd9eb50715c34 | 1,930 | py | Python | scheduled_bots/scripts/add_ECO_evidence_code.py | turoger/scheduled-bots | 23fd30ccc242391151af3a1727f9fbf9dc95d433 | [
"MIT"
] | 6 | 2017-05-04T01:04:26.000Z | 2022-03-04T12:22:17.000Z | scheduled_bots/scripts/add_ECO_evidence_code.py | turoger/scheduled-bots | 23fd30ccc242391151af3a1727f9fbf9dc95d433 | [
"MIT"
] | 55 | 2017-03-14T21:16:44.000Z | 2022-03-02T12:39:14.000Z | scheduled_bots/scripts/add_ECO_evidence_code.py | turoger/scheduled-bots | 23fd30ccc242391151af3a1727f9fbf9dc95d433 | [
"MIT"
] | 13 | 2017-02-10T21:40:06.000Z | 2022-01-18T01:27:52.000Z | """
One off script to Map evidence codes between ECO and GO
https://github.com/evidenceontology/evidenceontology/blob/master/gaf-eco-mapping.txt
"""
import datetime
from wikidataintegrator import wdi_core, wdi_login
from scheduled_bots.local import WDPASS, WDUSER
login = wdi_login.WDLogin(WDUSER, WDPASS)
go_evidence_codes = {'EXP': 'Q23173789', 'IDA': 'Q23174122', 'IPI': 'Q23174389', 'IMP': 'Q23174671', 'IGI': 'Q23174952',
'IEP': 'Q23175251', 'ISS': 'Q23175558', 'ISO': 'Q23190637', 'ISA': 'Q23190738', 'ISM': 'Q23190825',
'IGC': 'Q23190826', 'IBA': 'Q23190827', 'IBD': 'Q23190833', 'IKR': 'Q23190842', 'IRD': 'Q23190850',
'RCA': 'Q23190852', 'TAS': 'Q23190853', 'NAS': 'Q23190854', 'IC': 'Q23190856', 'ND': 'Q23190857',
'IEA': 'Q23190881', 'IMR': 'Q23190842'}
eco = {'EXP': 'ECO:0000269', 'IBA': 'ECO:0000318', 'IBD': 'ECO:0000319', 'IC': 'ECO:0000305', 'IDA': 'ECO:0000314',
'IEA': 'ECO:0000501', 'IEP': 'ECO:0000270', 'IGC': 'ECO:0000317', 'IGI': 'ECO:0000316', 'IKR': 'ECO:0000320',
'IMP': 'ECO:0000315', 'IMR': 'ECO:0000320', 'IPI': 'ECO:0000353', 'IRD': 'ECO:0000321', 'ISA': 'ECO:0000247',
'ISM': 'ECO:0000255', 'ISO': 'ECO:0000266', 'ISS': 'ECO:0000250', 'NAS': 'ECO:0000303', 'ND': 'ECO:0000307',
'RCA': 'ECO:0000245', 'TAS': 'ECO:0000304'}
reference = [wdi_core.WDItemID("Q28445410", "P248", is_reference=True), # stated in ECO
wdi_core.WDTime(datetime.datetime.now().strftime('+%Y-%m-%dT00:00:00Z'), 'P813', is_reference=True)]
for evidence_code, wdid in go_evidence_codes.items():
data = [wdi_core.WDString('http://purl.obolibrary.org/obo/{}'.format(eco[evidence_code].replace(":", "_")), 'P1709',
references=[reference])]
item = wdi_core.WDItemEngine(wd_item_id=wdid, data=data)
item.write(login, edit_summary="add ECO equivalent class")
| 58.484848 | 120 | 0.615544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,025 | 0.531088 |
9ca3e6cc572151c426dc39e7c0ad47e0d772a12a | 315 | py | Python | emenu/conftest.py | Ryszyy/emenu | 735e1e36580e26ba9b22609e461f06aab281aea7 | [
"MIT"
] | null | null | null | emenu/conftest.py | Ryszyy/emenu | 735e1e36580e26ba9b22609e461f06aab281aea7 | [
"MIT"
] | null | null | null | emenu/conftest.py | Ryszyy/emenu | 735e1e36580e26ba9b22609e461f06aab281aea7 | [
"MIT"
] | null | null | null | import pytest
from django.contrib.auth import get_user_model
from emenu.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> get_user_model(): # type: ignore
return UserFactory()
| 21 | 51 | 0.774603 | 0 | 0 | 0 | 0 | 195 | 0.619048 | 0 | 0 | 14 | 0.044444 |
9ca412a5e4887117f87a070c4624fcb1569c592d | 6,322 | py | Python | pychemia/population/_population.py | quanshengwu/PyChemia | 98e9f7a1118b694dbda3ee75411ff8f8d7b9688b | [
"MIT"
] | 1 | 2021-03-26T12:34:45.000Z | 2021-03-26T12:34:45.000Z | pychemia/population/_population.py | quanshengwu/PyChemia | 98e9f7a1118b694dbda3ee75411ff8f8d7b9688b | [
"MIT"
] | null | null | null | pychemia/population/_population.py | quanshengwu/PyChemia | 98e9f7a1118b694dbda3ee75411ff8f8d7b9688b | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import json
import numpy as np
from builtins import str
from abc import ABCMeta, abstractmethod
from pychemia import HAS_PYMONGO
from pychemia.utils.computing import deep_unicode
if HAS_PYMONGO:
from pychemia.db import PyChemiaDB
class Population:
__metaclass__ = ABCMeta
"""
General class for all optimization algorithms that uses fixed and blocked
Generations
"""
def __init__(self, name, tag, use_mongo=True):
name = deep_unicode(name)
self.tag = tag
self.pcdb = None
if isinstance(name, str):
self.name = name
if use_mongo:
self.pcdb = PyChemiaDB(name)
else:
self.name = name.name
if use_mongo:
self.pcdb = name
def __iter__(self):
return self.pcdb.entries.find()
def __len__(self):
return len(self.members)
def __str__(self):
ret = ' Population Name: %s\n' % self.name
ret += ' Tag: %s\n' % self.tag
ret += ' Members: %s\n' % len(self)
return ret
def disable(self, entry_id):
self.pcdb.entries.update({'_id': entry_id}, {'$set': {'status.' + self.tag: False}})
def enable(self, entry_id):
self.pcdb.entries.update({'_id': entry_id}, {'$set': {'status.' + self.tag: True}})
def get_values(self, selection):
ret = {}
for i in selection:
ret[i] = self.value(i)
return ret
def update_properties(self, entry_id, new_properties):
self.pcdb.update(entry_id, properties=new_properties)
def set_in_properties(self, entry_id, field, value):
return self.pcdb.entries.update_one({'_id': entry_id}, {'$set': {'properties.'+field: value}})
def get_population_info(self):
return self.pcdb.db.population_info.find_one({'tag': self.tag})
def insert_entry(self, entry):
if 'structure' not in entry:
entry['structure']={}
if 'properties' not in entry:
entry['properties']={}
if 'status' not in entry:
entry['status']={}
self.pcdb.entries.insert(entry)
def get_structure(self, entry_id):
return self.pcdb.get_structure(entry_id)
def set_structure(self, entry_id, structure):
return self.pcdb.update(entry_id, structure=structure)
def get_entry(self, entry_id, projection=None, with_id=True):
"""
Return an entry identified by 'entry_id'
:param with_id:
:param projection: Insert that projection into the query
:param entry_id: A database identifier
:return:
"""
if projection is None:
projection = {}
if not with_id:
projection['_id']=0
entry = self.pcdb.entries.find_one({'_id': entry_id}, projection)
return entry
def ids_sorted(self, selection):
values = np.array([self.value(i) for i in selection])
sorted_indices = np.argsort(values)
return np.array(selection)[sorted_indices]
def load_json(self, filename):
filep = open(filename, 'r')
data = json.load(filep)
for entry in data:
self.pcdb.entries.insert(entry)
def random_population(self, n):
"""
Create N new random structures to the population
:param n: (int) The number of new structures
:return: (list) The identifiers for the new structures
"""
return [self.add_random() for i in range(n)]
def replace_failed(self):
pass
def save_info(self):
data = self.pcdb.db.population_info.find_one({'_id': self.tag})
if data is None:
data = self.to_dict
data['_id'] = self.tag
self.pcdb.db.population_info.insert(data)
else:
self.pcdb.db.population_info.update({'_id': self.tag}, self.to_dict)
def save_json(self, filename):
ret = []
for entry_id in self.members:
ret.append(self.get_entry(entry_id, with_id=False))
filep = open(filename, 'w')
json.dump(ret, filep, sort_keys=True, indent=4, separators=(',', ': '))
def unlock_all(self, name=None):
for i in self.members:
self.pcdb.unlock(i, name=name)
@abstractmethod
def add_random(self):
pass
@abstractmethod
def check_duplicates(self, ids):
pass
@abstractmethod
def cross(self, ids):
pass
@abstractmethod
def distance(self, entry_id, entry_jd):
pass
@abstractmethod
def get_duplicates(self, ids):
pass
@abstractmethod
def from_dict(self, population_dict):
pass
@abstractmethod
def is_evaluated(self, entry_id):
pass
@abstractmethod
def move_random(self, entry_id, factor=0.2, in_place=False, kind='move'):
pass
@abstractmethod
def move(self, entry_id, entry_jd, factor=0.2, in_place=False):
pass
@abstractmethod
def new_entry(self, data, active=True):
pass
@abstractmethod
def recover(self):
pass
@abstractmethod
def value(self, entry_id):
pass
@abstractmethod
def str_entry(self, entry_id):
pass
@property
def actives(self):
return [entry['_id'] for entry in self.pcdb.entries.find({'status.' + self.tag: True}, {'_id': 1})]
@property
def actives_evaluated(self):
return [x for x in self.actives if self.is_evaluated(x)]
@property
def actives_no_evaluated(self):
return [x for x in self.actives if not self.is_evaluated(x)]
@property
def evaluated(self):
return [entry for entry in self.members if self.is_evaluated(entry)]
@property
def fraction_evaluated(self):
ret = np.sum([1 for i in self.actives if self.is_evaluated(i)])
return float(ret) / len(self.actives)
@property
def members(self):
return [x['_id'] for x in self.pcdb.entries.find({}, {'_id': 1})]
@property
def to_dict(self):
return {'name': self.name, 'tag': self.tag}
@property
def best_candidate(self):
return self.ids_sorted(self.evaluated)[0]
def refine_progressive(self, entry_id):
pass
| 27.72807 | 107 | 0.607403 | 6,044 | 0.956027 | 0 | 0 | 1,803 | 0.285195 | 0 | 0 | 811 | 0.128282 |
9ca437236746459609c5690cd68a04412ca8726f | 2,034 | py | Python | python/tests/test_nessie_cli.py | ryantse/nessie | c347d61ec7358fdc5759147cc6207143927246f3 | [
"Apache-2.0"
] | null | null | null | python/tests/test_nessie_cli.py | ryantse/nessie | c347d61ec7358fdc5759147cc6207143927246f3 | [
"Apache-2.0"
] | null | null | null | python/tests/test_nessie_cli.py | ryantse/nessie | c347d61ec7358fdc5759147cc6207143927246f3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pynessie` package."""
import pytest
import requests_mock
import simplejson as json
from click.testing import CliRunner
from pynessie import __version__
from pynessie import cli
from pynessie.model import ReferenceSchema
def test_command_line_interface(requests_mock: requests_mock) -> None:
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.cli)
assert result.exit_code == 0
assert "Usage: cli" in result.output
help_result = runner.invoke(cli.cli, ["--help"])
assert help_result.exit_code == 0
assert "Usage: cli" in help_result.output
help_result = runner.invoke(cli.cli, ["--version"])
assert help_result.exit_code == 0
assert __version__ in help_result.output
requests_mock.get(
"http://localhost:19120/api/v1/trees",
text=json.dumps([{"name": "main", "type": "BRANCH", "hash": "1234567890abcdef"}]),
)
help_result = runner.invoke(cli.cli, ["list-references"])
assert help_result.exit_code == 0
references = ReferenceSchema().loads(help_result.output, many=True)
assert len(references) == 1
assert references[0].name == "main"
assert references[0].kind == "BRANCH"
assert references[0].hash_ == "1234567890abcdef"
@pytest.mark.e2e
def test_command_line_interface_e2e() -> None:
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.cli)
assert result.exit_code == 0
assert "Usage: cli" in result.output
help_result = runner.invoke(cli.cli, ["--help"])
assert help_result.exit_code == 0
assert "Usage: cli" in help_result.output
help_result = runner.invoke(cli.cli, ["--version"])
assert help_result.exit_code == 0
assert __version__ in help_result.output
help_result = runner.invoke(cli.cli, ["list-references"])
assert help_result.exit_code == 0
branches = ReferenceSchema().loads(help_result.output, many=True)
assert len(branches) == 1
assert branches[0].name == "main"
| 35.684211 | 90 | 0.69174 | 0 | 0 | 0 | 0 | 736 | 0.361849 | 0 | 0 | 362 | 0.177974 |
9ca439ec225fe16010ea11790d66c7004a6e42da | 129 | py | Python | dongtai_agent_python/context/__init__.py | luzhongyang/DongTai-agent-python-1 | f4e14afb136946809c5e84b7b163a8c32267a27a | [
"Apache-2.0"
] | 17 | 2021-11-13T11:57:10.000Z | 2022-03-26T12:45:30.000Z | dongtai_agent_python/context/__init__.py | luzhongyang/DongTai-agent-python-1 | f4e14afb136946809c5e84b7b163a8c32267a27a | [
"Apache-2.0"
] | 2 | 2021-11-08T07:43:38.000Z | 2021-12-09T02:23:46.000Z | dongtai_agent_python/context/__init__.py | luzhongyang/DongTai-agent-python-1 | f4e14afb136946809c5e84b7b163a8c32267a27a | [
"Apache-2.0"
] | 17 | 2021-11-02T08:21:57.000Z | 2022-02-19T13:24:36.000Z | from .request_context import RequestContext
from .tracker import ContextTracker
from .request import DjangoRequest, FlaskRequest
| 32.25 | 48 | 0.868217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9ca92489a95d5d6f10768832e66cdc6a00655f55 | 8,183 | py | Python | iaso/api/org_unit_search.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | 29 | 2020-12-26T07:22:19.000Z | 2022-03-07T13:40:09.000Z | iaso/api/org_unit_search.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | 150 | 2020-11-09T15:03:27.000Z | 2022-03-07T15:36:07.000Z | iaso/api/org_unit_search.py | BLSQ/iaso | 95c8087c0182bdd576598eb8cd39c440e58e15d7 | [
"MIT"
] | 4 | 2020-11-09T10:38:13.000Z | 2021-10-04T09:42:47.000Z | import re
from django.db.models import Q, Count, Sum, Case, When, IntegerField, Value
from iaso.models import OrgUnit, Instance, DataSource
def build_org_units_queryset(queryset, params, profile):
validation_status = params.get("validation_status", OrgUnit.VALIDATION_VALID)
has_instances = params.get("hasInstances", None)
date_from = params.get("dateFrom", None)
date_to = params.get("dateTo", None)
search = params.get("search", None)
org_unit_type_id = params.get("orgUnitTypeId", None)
source_id = params.get("sourceId", None)
with_shape = params.get("withShape", None)
with_location = params.get("withLocation", None)
geography = params.get("geography", None)
parent_id = params.get("parent_id", None)
source = params.get("source", None)
group = params.get("group", None)
version = params.get("version", None)
default_version = params.get("defaultVersion", None)
org_unit_parent_id = params.get("orgUnitParentId", None)
linked_to = params.get("linkedTo", None)
link_validated = params.get("linkValidated", True)
link_source = params.get("linkSource", None)
link_version = params.get("linkVersion", None)
roots_for_user = params.get("rootsForUser", None)
ignore_empty_names = params.get("ignoreEmptyNames", False)
org_unit_type_category = params.get("orgUnitTypeCategory", None)
if validation_status != "all":
queryset = queryset.filter(validation_status=validation_status)
if search:
if search.startswith("ids:"):
s = search.replace("ids:", "")
try:
ids = re.findall("[A-Za-z0-9_-]+", s)
queryset = queryset.filter(id__in=ids)
except:
queryset = queryset.filter(id__in=[])
print("Failed parsing ids in search", search)
elif search.startswith("refs:"):
s = search.replace("refs:", "")
try:
refs = re.findall("[A-Za-z0-9_-]+", s)
queryset = queryset.filter(source_ref__in=refs)
except:
queryset = queryset.filter(source_ref__in=[])
print("Failed parsing refs in search", search)
else:
queryset = queryset.filter(Q(name__icontains=search) | Q(aliases__contains=[search]))
if group:
queryset = queryset.filter(groups__in=group.split(","))
if source:
source = DataSource.objects.get(id=source)
if source.default_version:
queryset = queryset.filter(version=source.default_version)
else:
queryset = queryset.filter(version__data_source_id=source)
if version:
queryset = queryset.filter(version=version)
if default_version == "true" and profile is not None:
queryset = queryset.filter(version=profile.account.default_version)
if date_from is not None and date_to is None:
queryset = queryset.filter(instance__created_at__gte=date_from)
if date_from is None and date_to is not None:
queryset = queryset.filter(instance__created_at__lte=date_to)
if date_from is not None and date_to is not None:
queryset = queryset.filter(instance__created_at__range=[date_from, date_to])
if has_instances is not None:
if has_instances == "true":
ids_with_instances = (
Instance.objects.filter(org_unit__isnull=False)
.exclude(file="")
.exclude(deleted=True)
.values_list("org_unit_id", flat=True)
)
queryset = queryset.filter(id__in=ids_with_instances)
if has_instances == "false":
ids_with_instances = (
Instance.objects.filter(org_unit__isnull=False)
.exclude(file="")
.exclude(deleted=True)
.values_list("org_unit_id", flat=True)
)
queryset = queryset.exclude(id__in=ids_with_instances)
if has_instances == "duplicates":
ids_with_duplicate_instances = (
Instance.objects.with_status()
.filter(org_unit__isnull=False, status=Instance.STATUS_DUPLICATED)
.exclude(file="")
.exclude(deleted=True)
.values_list("org_unit_id", flat=True)
)
queryset = queryset.filter(id__in=ids_with_duplicate_instances)
if org_unit_type_id:
queryset = queryset.filter(org_unit_type__id__in=org_unit_type_id.split(","))
if geography == "location":
queryset = queryset.filter(location__isnull=False)
if geography == "shape":
queryset = queryset.filter(simplified_geom__isnull=False)
if geography == "none":
queryset = queryset.filter(Q(location__isnull=True) & Q(simplified_geom__isnull=True))
if geography == "any":
queryset = queryset.filter(Q(location__isnull=False) | Q(simplified_geom__isnull=False))
if with_shape == "true":
queryset = queryset.filter(simplified_geom__isnull=False)
if with_shape == "false":
queryset = queryset.filter(simplified_geom__isnull=True)
if with_location == "true":
queryset = queryset.filter(Q(location__isnull=False))
if with_location == "false":
queryset = queryset.filter(Q(location__isnull=True))
if parent_id:
if parent_id == "0":
queryset = queryset.filter(parent__isnull=True)
else:
queryset = queryset.filter(parent__id=parent_id)
if roots_for_user:
if profile.org_units.all():
queryset = queryset.filter(id__in=profile.org_units.all())
else:
queryset = queryset.filter(parent__isnull=True)
if org_unit_parent_id:
parent = OrgUnit.objects.get(id=org_unit_parent_id)
queryset = queryset.hierarchy(parent)
if linked_to:
is_destination = Q(destination_set__destination_id=linked_to)
if link_validated != "all":
is_destination &= Q(destination_set__validated=link_validated)
is_source = Q(source_set__source_id=linked_to)
if link_validated != "all":
is_source &= Q(source_set__validated=link_validated)
queryset = queryset.filter(is_source | is_destination)
if link_source:
queryset = queryset.filter(version__data_source_id=link_source)
if link_version:
queryset = queryset.filter(version_id=link_version)
if source_id:
queryset = queryset.filter(sub_source=source_id)
if org_unit_type_category:
queryset = queryset.filter(org_unit_type__category=org_unit_type_category.upper())
if ignore_empty_names:
queryset = queryset.filter(~Q(name=""))
queryset = queryset.select_related("version__data_source")
queryset = queryset.select_related("org_unit_type")
queryset = queryset.prefetch_related("groups")
queryset = queryset.prefetch_related("parent")
queryset = queryset.prefetch_related("parent__parent")
queryset = queryset.prefetch_related("parent__parent__parent")
queryset = queryset.prefetch_related("parent__parent__parent__parent")
return queryset.distinct()
def annotate_query(queryset, count_instances, count_per_form, forms):
if count_instances:
queryset = queryset.annotate(
instances_count=Count(
"instance",
filter=(~Q(instance__file="") & ~Q(instance__device__test_device=True) & ~Q(instance__deleted=True)),
)
)
if count_per_form:
annotations = {
f"form_{frm.id}_instances": Sum(
Case(
When(
Q(instance__form_id=frm.id)
& ~Q(instance__file="")
& ~Q(instance__device__test_device=True)
& ~Q(instance__deleted=True),
then=1,
),
default=0,
output_field=IntegerField(),
)
)
for frm in forms
}
queryset = queryset.annotate(**annotations)
return queryset
| 37.365297 | 117 | 0.633142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 735 | 0.08982 |
9cab8a834e9e682c0ee397c003bc6bb100756a66 | 7,551 | py | Python | deep_NLP.py | AI-tist/NlpDeeplearning | 6d8b5b9d244fc49df00858023118318a2d4125d4 | [
"MIT"
] | null | null | null | deep_NLP.py | AI-tist/NlpDeeplearning | 6d8b5b9d244fc49df00858023118318a2d4125d4 | [
"MIT"
] | null | null | null | deep_NLP.py | AI-tist/NlpDeeplearning | 6d8b5b9d244fc49df00858023118318a2d4125d4 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import xgboost as xgb
from tqdm import tqdm
from sklearn.svm import SVC
from keras.models import Sequential
from keras.layers.recurrent import LSTM, GRU
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.embeddings import Embedding
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from sklearn import preprocessing, decomposition, model_selection, metrics, pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from keras.layers import GlobalMaxPooling1D, Conv1D, MaxPooling1D, Flatten, Bidirectional, SpatialDropout1D
from keras.preprocessing import sequence, text
from keras.callbacks import EarlyStopping
import nltk
nltk.download("stopwords")
from nltk import word_tokenize
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
sample = pd.read_csv('sample_submission.csv')
def multiclass_logloss(actual, predicted, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
:param actual: Array containing the actual target classes
:param predicted: Matrix with class predictions, one probability per class
"""
# Convert 'actual' to a binary array if it's not already:
if len(actual.shape) == 1:
actual2 = np.zeros((actual.shape[0], predicted.shape[1]))
for i, val in enumerate(actual):
actual2[i, val] = 1
actual = actual2
clip = np.clip(predicted, eps, 1 - eps)
rows = actual.shape[0]
vsota = np.sum(actual * np.log(clip))
return -1.0 / rows * vsota
lbl_enc = preprocessing.LabelEncoder()
y = lbl_enc.fit_transform(train.author.values)
xtrain, xvalid, ytrain, yvalid = train_test_split(train.text.values, y,
stratify=y,
random_state=42,
test_size=0.1, shuffle=True)
# Always start with these features. They work (almost) everytime!
tfv = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1,
stop_words='english')
# Fitting TF-IDF to both training and test sets (semi-supervised learning)
tfv.fit(list(xtrain) + list(xvalid))
xtrain_tfv = tfv.transform(xtrain)
xvalid_tfv = tfv.transform(xvalid)
# # Fitting a simple xgboost on tf-idf svd features
# clf = xgb.XGBClassifier(nthread=10)
# clf.fit(xtrain_svd, ytrain)
# predictions = clf.predict_proba(xvalid_svd)
# print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
mll_scorer = metrics.make_scorer(
multiclass_logloss, greater_is_better=False, needs_proba=True)
# Initialize SVD
svd = TruncatedSVD()
# Initialize the standard scaler
scl = preprocessing.StandardScaler()
# We will use logistic regression here..
lr_model = LogisticRegression()
nb_model = MultinomialNB()
# Create the pipeline
clf = pipeline.Pipeline([('nb', nb_model)])
# parameter grid
param_grid = {'nb__alpha': [0.001, 0.01, 0.1, 1, 10, 100]}
# Initialize Grid Search Model
model = GridSearchCV(estimator=clf, param_grid=param_grid, scoring=mll_scorer,
verbose=10, n_jobs=-1, iid=True, refit=True, cv=2)
# Fit Grid Search Model
# we can use the full data here but im only using xtrain.
model.fit(xtrain_tfv, ytrain)
print("Best score: %0.3f" % model.best_score_)
print("Best parameters set:")
best_parameters = model.best_estimator_.get_params()
for param_name in sorted(param_grid.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
# load the GloVe vectors in a dictionary:
embeddings_index = {}
f = open('../input/data/glove.840B.300d.txt')
for line in tqdm(f):
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
print(coefs)
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# this function creates a normalized vector for the whole sentence
def sent2vec(s):
words = str(s).lower().decode('utf-8')
words = word_tokenize(words)
words = [w for w in words if not w in stop_words]
words = [w for w in words if w.isalpha()]
M = []
for w in words:
try:
M.append(embeddings_index[w])
except:
continue
M = np.array(M)
v = M.sum(axis=0)
if type(v) != np.ndarray:
return np.zeros(300)
return v / np.sqrt((v ** 2).sum())
# create sentence vectors using the above function for training and validation set
xtrain_glove = [sent2vec(x) for x in tqdm(xtrain)]
xvalid_glove = [sent2vec(x) for x in tqdm(xvalid)]
xtrain_glove = np.array(xtrain_glove)
xvalid_glove = np.array(xvalid_glove)
# Fitting a simple xgboost on glove features
clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8,
subsample=0.8, nthread=10, learning_rate=0.1, silent=False)
clf.fit(xtrain_glove, ytrain)
predictions = clf.predict_proba(xvalid_glove)
print("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
# scale the data before any neural net:
scl = preprocessing.StandardScaler()
xtrain_glove_scl = scl.fit_transform(xtrain_glove)
xvalid_glove_scl = scl.transform(xvalid_glove)
# we need to binarize the labels for the neural net
ytrain_enc = np_utils.to_categorical(ytrain)
yvalid_enc = np_utils.to_categorical(yvalid)
# using keras tokenizer here
token = text.Tokenizer(num_words=None)
max_len = 70
token.fit_on_texts(list(xtrain) + list(xvalid))
xtrain_seq = token.texts_to_sequences(xtrain)
xvalid_seq = token.texts_to_sequences(xvalid)
# zero pad the sequences
xtrain_pad = sequence.pad_sequences(xtrain_seq, maxlen=max_len)
xvalid_pad = sequence.pad_sequences(xvalid_seq, maxlen=max_len)
word_index = token.word_index
# create an embedding matrix for the words we have in the dataset
embedding_matrix = np.zeros((len(word_index) + 1, 300))
for word, i in tqdm(word_index.items()):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
# GRU with glove embeddings and two dense layers
model = Sequential()
model.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=max_len,
trainable=False))
model.add(SpatialDropout1D(0.3))
model.add(GRU(300, dropout=0.3, recurrent_dropout=0.3,
return_sequences=True))
model.add(GRU(300, dropout=0.3, recurrent_dropout=0.3))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# Fit the model with early stopping callback
earlystop = EarlyStopping(
monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
model.fit(xtrain_pad, y=ytrain_enc, batch_size=512, epochs=100,
verbose=1, validation_data=(xvalid_pad, yvalid_enc), callbacks=[earlystop])
| 33.411504 | 107 | 0.713283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,717 | 0.227387 |
9cac0ad4d3324c47506f3d542aa657ca67da7b25 | 81 | py | Python | cbt/apps.py | belloshehu/multiple-choice-questions | abfb7ac8cc24bc3f9ee34e9505bc6c6944786ac0 | [
"MIT"
] | null | null | null | cbt/apps.py | belloshehu/multiple-choice-questions | abfb7ac8cc24bc3f9ee34e9505bc6c6944786ac0 | [
"MIT"
] | 2 | 2020-09-03T21:48:33.000Z | 2020-09-22T08:51:14.000Z | cbt/apps.py | belloshehu/multiple-choice-questions | abfb7ac8cc24bc3f9ee34e9505bc6c6944786ac0 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class CbtConfig(AppConfig):
name = 'cbt'
| 13.5 | 33 | 0.728395 | 44 | 0.54321 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.061728 |
9cac9b27985bf5baf54eb0058ab26de0a6a723b5 | 176 | py | Python | examples/python/src/authors/models.py | ShivamSarodia/sqlc | 194065e223a53d19219efb290a53b45fcd036a6b | [
"MIT"
] | 5,153 | 2019-08-19T19:24:06.000Z | 2022-03-31T22:26:53.000Z | examples/python/src/authors/models.py | ShivamSarodia/sqlc | 194065e223a53d19219efb290a53b45fcd036a6b | [
"MIT"
] | 871 | 2019-08-19T02:20:57.000Z | 2022-03-31T21:03:04.000Z | examples/python/src/authors/models.py | ShivamSarodia/sqlc | 194065e223a53d19219efb290a53b45fcd036a6b | [
"MIT"
] | 377 | 2019-09-04T07:27:09.000Z | 2022-03-31T21:54:45.000Z | # Code generated by sqlc. DO NOT EDIT.
import dataclasses
from typing import Optional
@dataclasses.dataclass()
class Author:
id: int
name: str
bio: Optional[str]
| 16 | 38 | 0.715909 | 62 | 0.352273 | 0 | 0 | 87 | 0.494318 | 0 | 0 | 38 | 0.215909 |
9cacd9c2da031e3221ef68c08b555dad87d95e00 | 108 | py | Python | src/wagtail_site_inheritance/__init__.py | labd/wagtail-site-inheritance | 2d13de198a5b6354c49073dacb3964eef8e71647 | [
"MIT"
] | 5 | 2020-03-09T18:53:08.000Z | 2022-02-08T12:38:07.000Z | src/wagtail_site_inheritance/__init__.py | labd/wagtail-site-inheritance | 2d13de198a5b6354c49073dacb3964eef8e71647 | [
"MIT"
] | null | null | null | src/wagtail_site_inheritance/__init__.py | labd/wagtail-site-inheritance | 2d13de198a5b6354c49073dacb3964eef8e71647 | [
"MIT"
] | null | null | null | __version__ = "0.0.1"
default_app_config = "wagtail_site_inheritance.apps.WagtailSiteInheritanceAppConfig"
| 27 | 84 | 0.842593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.648148 |
9cadce2a65716efbbbaef6fb365b1a19a14d1464 | 779 | py | Python | code/mod.py | aragilar/python-testing | 312496a9016975d99230744e262799999740720b | [
"CC-BY-4.0"
] | null | null | null | code/mod.py | aragilar/python-testing | 312496a9016975d99230744e262799999740720b | [
"CC-BY-4.0"
] | null | null | null | code/mod.py | aragilar/python-testing | 312496a9016975d99230744e262799999740720b | [
"CC-BY-4.0"
] | null | null | null | import os.path
import numpy as np
def sinc2d(x, y):
if x == 0.0 and y == 0.0:
return 1.0
elif x == 0.0:
return np.sin(y) / y
elif y == 0.0:
return np.sin(x) / x
else:
return (np.sin(x) / x) * (np.sin(y) / y)
def a(x):
return x + 1
def b(x):
return 2 * x
def c(x):
return b(a(x))
def std(vals):
# surely this is cheating...
return 1.0
def fib(n):
if n == 0 or n == 1:
return n
else:
return fib(n - 1) + fib(n - 2)
def f(workdir=None):
if workdir is None:
workdir = os.getcwd()
else:
workdir = str(workdir)
if not os.path.exists(os.path.join(workdir, "no.txt")):
with open(os.path.join(workdir, "yes.txt"), "w") as f:
f.write("42")
| 18.116279 | 62 | 0.499358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.066752 |
9cade016723908e48e753e2ca24979b97411963e | 1,970 | py | Python | chooks/commands/add.py | thegedge/chooks | 1a5115d16f67d716206da456ee405a562a6df042 | [
"MIT"
] | null | null | null | chooks/commands/add.py | thegedge/chooks | 1a5115d16f67d716206da456ee405a562a6df042 | [
"MIT"
] | 1 | 2015-05-11T18:25:49.000Z | 2015-05-11T18:29:11.000Z | chooks/commands/add.py | thegedge/chooks | 1a5115d16f67d716206da456ee405a562a6df042 | [
"MIT"
] | null | null | null | """Adds a chook to a git repository.
Usage:
chooks add [--stdin | --argument] [--once | --filter=FILTER...] [--global]
[--fatal] [--hook=NAME...] [--name=NAME] [--disabled]
[--] <command> [<args>...]
Options:
--stdin Supply input files to this chook via stdin.
--filter=<filter> Only execute this chook for files who names match the
given filter.
--global Execute this chook for all repositories.
--hook=<name> Name of the git hooks this chook will be executed for
(if not specified, default to all git hooks).
--fatal Return a nonzero status to the git hook if this chook
returns a nonzero status.
--once If specified, this chook is only applied once for the git
hook. If not specified, this chook is applied against all
files echoed by 'git status' (excluding ignored/untracked)
--name=<name> Custom hook name (defaults to the command name).
--disabled Default the hook to a disabled state.
"""
from chooks import constants
from chooks import git
# TODO interactive mode?
def run(args):
full_cmd = '%s %s' % (args['<command>'], ' '.join(args['<args>']))
filters = args.get('--filter') and ','.join(args['--filter'])
# TODO validate hook names, making sure they're actually git hooks
hooks = args.get('--hook') and ','.join(args['--hook'])
values = {
constants.KEY_COMMAND: full_cmd,
constants.KEY_STDIN: args.get('--stdin'),
constants.KEY_FILTERS: filters,
constants.KEY_HOOKS: hooks,
constants.KEY_FATAL: args.get('--fatal'),
constants.KEY_DISABLED: args.get('--disabled'),
}
hook_name = args.get('--name') or args['<command>']
is_global = args.get('--global', False)
if git.set_hook_values(hook_name, values, is_global=is_global):
return 0
return 1
| 38.627451 | 79 | 0.601015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,330 | 0.675127 |
9cade33f66ce27c884971c2cb87a60d28b5e287f | 1,095 | py | Python | dwave/inspector/utils.py | hotmess47/dwave-inspector | b958ab0c2c943c8d55d610b5903cffd4b88a2dff | [
"Apache-2.0"
] | 3 | 2020-02-27T18:55:39.000Z | 2021-09-16T14:37:46.000Z | dwave/inspector/utils.py | hotmess47/dwave-inspector | b958ab0c2c943c8d55d610b5903cffd4b88a2dff | [
"Apache-2.0"
] | 46 | 2020-02-25T10:12:45.000Z | 2022-02-23T22:03:52.000Z | dwave/inspector/utils.py | hotmess47/dwave-inspector | b958ab0c2c943c8d55d610b5903cffd4b88a2dff | [
"Apache-2.0"
] | 11 | 2020-02-27T17:14:26.000Z | 2022-03-11T10:14:36.000Z | # Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import operator
__all__ = [
'itemsgetter',
]
logger = logging.getLogger(__name__)
def itemsgetter(*items):
"""Variant of :func:`operator.itemgetter` that returns a callable that
always returns a tuple, even when called with one argument. This is to make
the result type consistent, regardless of input.
"""
if len(items) == 1:
item = items[0]
def f(obj):
return (obj[item], )
else:
f = operator.itemgetter(*items)
return f
| 27.375 | 79 | 0.702283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 792 | 0.723288 |
9caf4f6433bdc8722933884a9170248eea827c22 | 3,432 | py | Python | MakeConects.py | Gimba/AmberUtils | f10ccc474a4fa4ea3aa2d93f85b99e2bb8b5d3b0 | [
"MIT"
] | null | null | null | MakeConects.py | Gimba/AmberUtils | f10ccc474a4fa4ea3aa2d93f85b99e2bb8b5d3b0 | [
"MIT"
] | null | null | null | MakeConects.py | Gimba/AmberUtils | f10ccc474a4fa4ea3aa2d93f85b99e2bb8b5d3b0 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# Copyright (c) 2015 William Lees
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Read SSBOND directives from a PDB, and generate CONECT records at the end. This can be used to fix up a PDB file
# after residues and atom numbers have been modified.
__author__ = 'William Lees'
__docformat__ = "restructuredtext en"
import sys
import argparse
def main(argv):
parser = argparse.ArgumentParser(description='Read SSBOND directives from a PDB, and generate corresponding CONECT records')
parser.add_argument('infile', help='input file (PDB format)')
parser.add_argument('outfile', help='output file (PDB format)')
args = parser.parse_args()
ssbonds = []
atoms = {}
written = False
with open(args.infile, "r") as f, open(args.outfile, "w") as of:
for line in f:
line = line.strip()
if line[0:6] == "SSBOND":
res1 = line[15:22]
res2 = line[29:36]
ssbonds.append((res1, res2))
elif line[0:6] == "ATOM ":
res = line[21] + ' ' + line [22:27]
atom = line[12:16]
number = line[6:11]
if 'SG' in atom:
atoms[res] = number
elif line[0:6] == "CONECT":
continue
elif line[0:3] == "END":
if len(line) == 3 or line[3] == ' ':
for r1,r2 in ssbonds:
if r1 in atoms and r2 in atoms:
of.write("CONECT%s%s\n" % (atoms[r1], atoms[r2]))
of.write("CONECT%s%s\n" % (atoms[r2], atoms[r1]))
else:
print 'Warning: atoms corresponding to SSBOND(%s,%s) were not found.' % (r1, r2)
written = True
of.write(line + '\n')
if not written:
print 'Warning: END record was not found. CONECTS will be written at the end of the file.'
for r1, r2 in ssbonds:
if r1 in atoms and r2 in atoms:
of.write("CONECT%s%s\n" % (atoms[r1], atoms[r2]))
of.write("CONECT%s%s\n" % (atoms[r2], atoms[r1]))
else:
print 'Warning: atoms corresponding to SSBOND(%s,%s) were not found.' % (r1, r2)
if __name__ == "__main__":
main(sys.argv)
| 41.349398 | 128 | 0.592657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,759 | 0.512529 |
9cb06ce3367f1f618bf6ef97ee8c9a3b4e22a068 | 481 | py | Python | supersalon/professionals/models.py | dogukantufekci/supersalon | 6ce552712d9d91fb493043030a09386544ed8b81 | [
"BSD-3-Clause"
] | null | null | null | supersalon/professionals/models.py | dogukantufekci/supersalon | 6ce552712d9d91fb493043030a09386544ed8b81 | [
"BSD-3-Clause"
] | null | null | null | supersalon/professionals/models.py | dogukantufekci/supersalon | 6ce552712d9d91fb493043030a09386544ed8b81 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Professional(models.Model):
# User
user = models.OneToOneField('users.User', primary_key=True, related_name='professional', verbose_name=_("User"))
class Meta:
ordering = ('user__first_name', 'user__last_name',)
verbose_name = _("Professional")
verbose_name_plural = _("Professionals")
def __str__(self):
return self.user.get_full_name() | 30.0625 | 116 | 0.704782 | 394 | 0.819127 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.212058 |
9cb27a0b9ec9facd3f25dae465a1a3f58269e3b3 | 1,882 | py | Python | test.py | imjacksonchen/weightTracker | 9dfdd9a943cfdc5148a5a15876df2f159b0623cf | [
"MIT"
] | null | null | null | test.py | imjacksonchen/weightTracker | 9dfdd9a943cfdc5148a5a15876df2f159b0623cf | [
"MIT"
] | null | null | null | test.py | imjacksonchen/weightTracker | 9dfdd9a943cfdc5148a5a15876df2f159b0623cf | [
"MIT"
] | null | null | null | import unittest
import datetime
from weightTrack import WeightNote
class TestWeightNote(unittest.TestCase):
### Testing getter methods ###
def test_shouldGetWeight(self):
testWeight = WeightNote(100, "Ate breakfast")
self.assertEqual(testWeight.getWeight(), 100, "Should be 100")
# Note: Impossible to check time with current time; instead
# use library called freezetime to mock a date and time
"""
def test_shouldGetTime(self):
testWeight = WeightNote(100, "Ate breakfast")
self.assertEqual(testWeight.getTime(),
datetime.datetime.now(),
"Should be same time as creation")
"""
def test_shouldGetNote(self):
testWeight = WeightNote(100, "Ate breakfast")
self.assertEqual(testWeight.getNote(),
"Ate breakfast",
"Should be 'Ate breakfast'")
### Testing setter methods ###
def test_shouldSetWeight(self):
testWeight = WeightNote(100, "Ate breakfast")
testWeight.setWeight(150)
self.assertEqual(testWeight.getWeight(), 150, "Should be 100")
# Note: Impossible to check time with current time; instead
# use library called freezetime to mock a date and time
"""
def test_shouldSetTime(self):
testWeight = WeightNote(100, "Ate breakfast")
self.assertEqual(testWeight.getTime(),
datetime.datetime.now(),
"Should be same time as creation")
"""
def test_shouldSetNote(self):
testWeight = WeightNote(100, "Ate breakfast")
testWeight.setNote("Ate lunch")
self.assertEqual(testWeight.getNote(),
"Ate lunch",
"Should be 'Ate lunch'")
# main
if __name__ == "__main__":
unittest.main()
| 32.448276 | 70 | 0.600956 | 1,755 | 0.932519 | 0 | 0 | 0 | 0 | 0 | 0 | 1,011 | 0.537194 |
9cb339f88a19712fdd5c854216d9d0e7bc780ed9 | 22 | py | Python | networking_infoblox/neutron/cmd/eventlet/__init__.py | rav28/networking-infoblox | fce27ca1c2bafb9b0a90aa22388bbc2dbec68294 | [
"Apache-2.0"
] | null | null | null | networking_infoblox/neutron/cmd/eventlet/__init__.py | rav28/networking-infoblox | fce27ca1c2bafb9b0a90aa22388bbc2dbec68294 | [
"Apache-2.0"
] | 1 | 2021-11-20T19:39:58.000Z | 2021-11-20T19:39:58.000Z | networking_infoblox/neutron/cmd/eventlet/__init__.py | rav28/networking-infoblox | fce27ca1c2bafb9b0a90aa22388bbc2dbec68294 | [
"Apache-2.0"
] | 1 | 2021-11-20T19:37:55.000Z | 2021-11-20T19:37:55.000Z | __author__ = 'hhwang'
| 11 | 21 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.363636 |
9cb3f5cf51b8b20433b3b320d11d55ba5d9140d5 | 243 | py | Python | debauto/utils.py | flaviomilan/python-debauto-br | c776c9094b0fc380797360231d9d1574813b2852 | [
"MIT"
] | 5 | 2019-05-02T01:05:28.000Z | 2021-12-30T05:30:02.000Z | debauto/utils.py | flaviomilan/python-debauto-br | c776c9094b0fc380797360231d9d1574813b2852 | [
"MIT"
] | null | null | null | debauto/utils.py | flaviomilan/python-debauto-br | c776c9094b0fc380797360231d9d1574813b2852 | [
"MIT"
] | 2 | 2021-01-01T22:28:32.000Z | 2021-01-01T22:33:04.000Z | # -*- encoding: utf-8 -*-
import datetime
def formata_data(data):
data = datetime.datetime.strptime(data, '%d/%m/%Y').date()
return data.strftime("%Y%m%d")
def formata_valor(valor):
return str("%.2f" % valor).replace(".", "")
| 18.692308 | 62 | 0.613169 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.222222 |
9cb50604a1507becdac54fdbcfd2fefb61cc2c48 | 1,105 | py | Python | docker-images/pysyft-worker/worker-server.py | linamnt/PySyft | 4b60a86c003acbe1967d6c3d611df3d5f2d377ee | [
"Apache-2.0"
] | 3 | 2020-11-24T05:15:57.000Z | 2020-12-07T09:52:45.000Z | docker-images/pysyft-worker/worker-server.py | linamnt/PySyft | 4b60a86c003acbe1967d6c3d611df3d5f2d377ee | [
"Apache-2.0"
] | 2 | 2020-03-09T09:17:06.000Z | 2020-04-09T13:33:12.000Z | docker-images/pysyft-worker/worker-server.py | linamnt/PySyft | 4b60a86c003acbe1967d6c3d611df3d5f2d377ee | [
"Apache-2.0"
] | 1 | 2022-03-06T06:22:21.000Z | 2022-03-06T06:22:21.000Z | import argparse
import torch
import syft as sy
from syft import WebsocketServerWorker
def get_args():
parser = argparse.ArgumentParser(description="Run websocket server worker.")
parser.add_argument(
"--port",
"-p",
type=int,
default=8777,
help="port number of the websocket server worker, e.g. --port 8777",
)
parser.add_argument("--host", type=str, default="0.0.0.0", help="host for the connection")
parser.add_argument(
"--id", type=str, help="name (id) of the websocket server worker, e.g. --id alice"
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="if set, websocket server worker will be started in verbose mode",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
hook = sy.TorchHook(torch)
args = get_args()
kwargs = {
"id": args.id,
"host": args.host,
"port": args.port,
"hook": hook,
"verbose": args.verbose,
}
server = WebsocketServerWorker(**kwargs)
server.start()
| 25.113636 | 94 | 0.59819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.311312 |
9cb7bc1d4808741ef60b11b2ef895ca36d1c2b75 | 7,687 | py | Python | datageneration/dbmake_h264hits.py | utlive/VIDMAP | 60656d532ac497c1070f1c94c06807b2d57e2af4 | [
"Unlicense"
] | 1 | 2022-02-21T02:45:25.000Z | 2022-02-21T02:45:25.000Z | datageneration/dbmake_h264hits.py | utlive/VIDMAP | 60656d532ac497c1070f1c94c06807b2d57e2af4 | [
"Unlicense"
] | null | null | null | datageneration/dbmake_h264hits.py | utlive/VIDMAP | 60656d532ac497c1070f1c94c06807b2d57e2af4 | [
"Unlicense"
] | null | null | null | import skimage.io
import skvideo.io
import os
import h5py
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import f1_score
import scipy.misc
import scipy.signal
import numpy as np
from sporco import util
import matplotlib.pyplot as plt
import pylab as py
import glob
from PIL import Image
import cv2
import sys
def get_postrainpatches(hdf5_im, hdf5_lab, hdf5_trainset, offsets, idx=0, traintest=0):
return genericpospatcher(hdf5_im, hdf5_lab, hdf5_trainset, offsets, idx=idx, traintest=traintest)
def genericpospatcher(hdf5_im, hdf5_lab, hdf5_trainset, offsets, idx=0, traintest=0):
width = 256
height = 256
lst480p = np.array(glob.glob("/mnt/hd3/scenes/480p/*avi"))
lst1080p = np.array(glob.glob("/mnt/hd3/scenes/1080p/*avi"))
lst480p = np.sort(lst480p)
lst1080p = np.sort(lst1080p)
if traintest == 0:
lst = np.hstack((lst480p[:575], lst1080p[:215]))
else:
lst = np.hstack((lst480p[575:], lst1080p[215:]))
for repeates in [1, 2]:
n_samples = len(lst)
for fidx, fname in enumerate(lst):
print fidx, n_samples, fname
#vid = skvideo.io.vread(fname, as_grey=True).astype(np.float32)
cmd = "ffmpeg -y -nostats -loglevel 0 -i %s -codec:v libx264 -g 50 -mpv_flags +strict_gop -bsf noise=2000000 -b:v 40000k /tmp/test_distorted.mp4" % (fname,)
os.system(cmd)
cmd = "ffmpeg -y -nostats -loglevel 0 -ec 0 -i /tmp/test_distorted.mp4 -vcodec rawvideo -pix_fmt yuv420p /tmp/test_distorted.avi"
os.system(cmd)
cmd = "ffmpeg -y -nostats -loglevel 0 -i %s -codec:v mpeg2video -b:v 40000k /tmp/test_pristine.mp4" % (fname,)
os.system(cmd)
cmd = "ffmpeg -y -nostats -loglevel 0 -ec 0 -i /tmp/test_pristine.mp4 -vcodec rawvideo -pix_fmt yuv420p /tmp/test_pristine.avi"
os.system(cmd)
vid_dis = skvideo.io.vread("/tmp/test_distorted.avi", as_grey=True).astype(np.float32)
vid_pris = skvideo.io.vread("/tmp/test_pristine.avi", as_grey=True).astype(np.float32)
os.remove("/tmp/test_distorted.mp4")
os.remove("/tmp/test_pristine.mp4")
os.remove("/tmp/test_distorted.avi")
os.remove("/tmp/test_pristine.avi")
T, H, W, C = vid_dis.shape
adj_h = H - height
adj_w = W - width
iv, jv = np.meshgrid(np.arange(adj_h), np.arange(adj_w), sparse=False, indexing='ij')
iv = iv.reshape(-1)
jv = jv.reshape(-1)
jdx = np.random.permutation(adj_h*adj_w)
iv = iv[jdx]
jv = jv[jdx]
tv = np.arange(1, T-1)
limit = 0
for (y, x) in zip(iv, jv):
np.random.shuffle(tv)
t = tv[0]
goodpatch = vid_pris[t-1:t+2, y:y+height, x:x+width, 0]
badpatch = vid_dis[t-1:t+2, y:y+height, x:x+width, 0]
# difference the magntudes, so we don't worry about phase shifts
if badpatch.shape[0] == goodpatch.shape[0]:
diff = np.mean(np.abs(badpatch[1, 30:-30, 30:-30] - goodpatch[1, 30:-30, 30:-30])**2)
if diff < 50:
continue
# check that either the previous frame or next frame match exactly, except where the middle frame doesn't match
# this ensures that the difference measured is not because of frame mis-alignment
error1 = np.sum((goodpatch[0] - badpatch[0])**2)
error2 = np.sum((goodpatch[2] - badpatch[2])**2)
print error1, error2
else:
continue
# check for no variance img
if np.std(badpatch[0, 30:-30, 30:-30]) < 10:
continue
if np.std(badpatch[1, 30:-30, 30:-30]) < 10:
continue
if np.std(badpatch[2, 30:-30, 30:-30]) < 10:
continue
#goodpatch = goodpatch.astype(np.uint8)
#badpatch = badpatch.astype(np.uint8)
#badimg = badpatch[0].astype(np.uint8)
#skimage.io.imsave("dump/patch_%d.png" % (idx,), badimg)
#print diff
#skimage.io.imsave("/tmp/test_%d.png" % (limit,), np.hstack((goodpatch.astype(np.uint8), badpatch.astype(np.uint8))))
#preprocess = preprocess[:, 5:-5, 5:-5]
hdf5_im[idx] = badpatch
hdf5_lab[idx] = 1
hdf5_trainset[idx] = traintest
offsets[idx] = [y, x]
#skimage.io.imsave("extract/%d.png" % (idx,), patch)
limit += 1
idx += 1
if limit >= 10:
break
return idx
def get_negtrainpatches(image_patches, labels, hdf5_traintest, offsets, idx=0, traintest=0):
return genericnegpatcher(image_patches, labels, hdf5_traintest, offsets, idx=idx, traintest=traintest)
def genericnegpatcher(hdf5_im, hdf5_lab, hdf5_traintest, offsets, idx=0, traintest=0):
width = 256
height = 256
#lst = glob.glob("/mnt/hd3/databases/video/film_pristine/480p/*/*mpg")
lst480p = np.array(glob.glob("/mnt/hd3/scenes/480p/*avi"))
lst1080p = np.array(glob.glob("/mnt/hd3/scenes/1080p/*avi"))
lst480p = np.sort(lst480p)
lst1080p = np.sort(lst1080p)
if traintest == 0:
lst = np.hstack((lst480p[:575], lst1080p[:215]))
else:
lst = np.hstack((lst480p[575:], lst1080p[215:]))
n_samples = len(lst)
for fidx, fname in enumerate(lst):
print fidx, n_samples, fname
#vid = skvideo.io.vread(fname, as_grey=True).astype(np.float32)
cmd = "ffmpeg -y -nostats -loglevel 0 -i %s -codec:v h264 -b:v 40000k /tmp/test_pristine.mp4" % (fname,)
os.system(cmd)
vid_pris = skvideo.io.vread("/tmp/test_pristine.mp4", inputdict={'-ec': '0'}, as_grey=True).astype(np.float32)
T, H, W, C = vid_pris.shape
adj_h = H - height
adj_w = W - width
iv, jv = np.meshgrid(np.arange(adj_h), np.arange(adj_w), sparse=False, indexing='ij')
iv = iv.reshape(-1)
jv = jv.reshape(-1)
jdx = np.random.permutation(adj_h*adj_w)
iv = iv[jdx]
jv = jv[jdx]
tv = np.arange(1, T-1)
limit = 0
for (y, x) in zip(iv, jv):
np.random.shuffle(tv)
t = tv[0]
goodpatch = vid_pris[t-1:t+2, y:y+height, x:x+width, 0]
#print diff
#skimage.io.imsave("/tmp/test_%d.png" % (limit,), np.hstack((goodpatch.astype(np.uint8), badpatch.astype(np.uint8))))
hdf5_im[idx] = goodpatch
hdf5_lab[idx] = 0
hdf5_traintest[idx] = traintest
offsets[idx] = [y, x]
#skimage.io.imsave("extract/%d.png" % (idx,), patch)
limit += 1
idx += 1
if limit >= 20:
break
return idx
# get the number of patches
np.random.seed(12345)
n_total_images = 62417
patch_height = 256
patch_width = 256
n_channels = 3
# sf = single frame
# fd = frame diff
f = h5py.File('/mnt/hd2/hitsdataset_sf_h264_2.hdf5', mode='w')
image_patches = f.create_dataset('image_patches', (n_total_images, n_channels, patch_height, patch_width), dtype='float')
image_patches.dims[0].label = 'batch'
image_patches.dims[1].label = 'channel'
image_patches.dims[2].label = 'height'
image_patches.dims[3].label = 'width'
labels = f.create_dataset('labels', (n_total_images,), dtype='uint8')
trainset = f.create_dataset('set', (n_total_images,), dtype='uint8')
offsets = f.create_dataset('offsets', (n_total_images, 2), dtype='int32')
n_idx = 0
n_idx = get_postrainpatches(image_patches, labels, trainset, offsets, n_idx, traintest=0)
n_idx = get_negtrainpatches(image_patches, labels, trainset, offsets, n_idx, traintest=0)
n_idx = get_postrainpatches(image_patches, labels, trainset, offsets, n_idx, traintest=1)
n_idx = get_negtrainpatches(image_patches, labels, trainset, offsets, n_idx, traintest=1)
print n_idx, n_total_images
#n_idx = get_negtestpatches(image_patches, labels, trainset, n_idx)
#n_idx = get_postestpatches(image_patches, labels, trainset, n_idx)
f.flush()
f.close()
| 34.013274 | 162 | 0.655392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,241 | 0.291531 |
9cb7df62cf935affe51b3c5e32536d465e8754d7 | 1,126 | py | Python | mopidy_mqtt/utils.py | odiroot/mopidy-mq | 7cefe3cbc3e3d8f741d887b8370e92ab47c0d493 | [
"Apache-2.0"
] | null | null | null | mopidy_mqtt/utils.py | odiroot/mopidy-mq | 7cefe3cbc3e3d8f741d887b8370e92ab47c0d493 | [
"Apache-2.0"
] | null | null | null | mopidy_mqtt/utils.py | odiroot/mopidy-mq | 7cefe3cbc3e3d8f741d887b8370e92ab47c0d493 | [
"Apache-2.0"
] | null | null | null | UNKNOWN = u''
def describe_track(track):
"""
Prepare a short human-readable Track description.
track (mopidy.models.Track): Track to source song data from.
"""
title = track.name or UNKNOWN
# Simple/regular case: normal song (e.g. from Spotify).
if track.artists:
artist = next(iter(track.artists)).name
elif track.album and track.album.artists: # Album-only artist case.
artist = next(iter(track.album.artists)).name
else:
artist = UNKNOWN
if track.album and track.album.name:
album = track.album.name
else:
album = UNKNOWN
return u';'.join([title, artist, album])
def describe_stream(raw_title):
"""
Attempt to parse given stream title in very rudimentary way.
"""
title = UNKNOWN
artist = UNKNOWN
album = UNKNOWN
# Very common separator.
if '-' in raw_title:
parts = raw_title.split('-')
artist = parts[0].strip()
title = parts[1].strip()
else:
# Just assume we only have track title.
title = raw_title
return u';'.join([title, artist, album])
| 24.478261 | 72 | 0.617229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.325933 |
9cb959cded414dc66d1ee592c294e6008ab6b58b | 11,162 | py | Python | SimModel_Python_API/simmodel_swig/Release/SimInternalLoad_Lights_Default.py | EnEff-BIM/EnEffBIM-Framework | 6328d39b498dc4065a60b5cc9370b8c2a9a1cddf | [
"MIT"
] | 3 | 2016-05-30T15:12:16.000Z | 2022-03-22T08:11:13.000Z | SimModel_Python_API/simmodel_swig/Release/SimInternalLoad_Lights_Default.py | EnEff-BIM/EnEffBIM-Framework | 6328d39b498dc4065a60b5cc9370b8c2a9a1cddf | [
"MIT"
] | 21 | 2016-06-13T11:33:45.000Z | 2017-05-23T09:46:52.000Z | SimModel_Python_API/simmodel_swig/Release/SimInternalLoad_Lights_Default.py | EnEff-BIM/EnEffBIM-Framework | 6328d39b498dc4065a60b5cc9370b8c2a9a1cddf | [
"MIT"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimInternalLoad_Lights_Default', [dirname(__file__)])
except ImportError:
import _SimInternalLoad_Lights_Default
return _SimInternalLoad_Lights_Default
if fp is not None:
try:
_mod = imp.load_module('_SimInternalLoad_Lights_Default', fp, pathname, description)
finally:
fp.close()
return _mod
_SimInternalLoad_Lights_Default = swig_import_helper()
del swig_import_helper
else:
import _SimInternalLoad_Lights_Default
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
import SimInternalLoad_Equipment_Electric
class SimInternalLoad_Lights(SimInternalLoad_Equipment_Electric.SimInternalLoad):
__swig_setmethods__ = {}
for _s in [SimInternalLoad_Equipment_Electric.SimInternalLoad]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimInternalLoad_Lights, name, value)
__swig_getmethods__ = {}
for _s in [SimInternalLoad_Equipment_Electric.SimInternalLoad]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimInternalLoad_Lights, name)
__repr__ = _swig_repr
def SimInternalLoad_Name(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_Name(self, *args)
def SimInternalLoad_ZoneOrZoneListName(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_ZoneOrZoneListName(self, *args)
def SimInternalLoad_FracRadiant(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_FracRadiant(self, *args)
def SimInternalLoad_SchedName(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_SchedName(self, *args)
def SimInternalLoad_DesignLevelCalcMeth(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_DesignLevelCalcMeth(self, *args)
def SimInternalLoad_LightLevel(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_LightLevel(self, *args)
def SimInternalLoad_PowerPerZoneFloorArea(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_PowerPerZoneFloorArea(self, *args)
def SimInternalLoad_PowerPerPerson(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_PowerPerPerson(self, *args)
def SimInternalLoad_RtnAirFrac(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_RtnAirFrac(self, *args)
def SimInternalLoad_FracVisible(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_FracVisible(self, *args)
def SimInternalLoad_FracReplaceable(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_FracReplaceable(self, *args)
def SimInternalLoad_EndUseSubCat(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_EndUseSubCat(self, *args)
def SimInternalLoad_RtnAirFracCalcFromPlenTemp(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_RtnAirFracCalcFromPlenTemp(self, *args)
def SimInternalLoad_RtnAirFracFuncofPlenumTempCoef1(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_RtnAirFracFuncofPlenumTempCoef1(self, *args)
def SimInternalLoad_RtnAirFracFuncofPlenumTempCoef2(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_RtnAirFracFuncofPlenumTempCoef2(self, *args)
def __init__(self, *args):
this = _SimInternalLoad_Lights_Default.new_SimInternalLoad_Lights(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights__clone(self, f, c)
__swig_destroy__ = _SimInternalLoad_Lights_Default.delete_SimInternalLoad_Lights
__del__ = lambda self: None
SimInternalLoad_Lights_swigregister = _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_swigregister
SimInternalLoad_Lights_swigregister(SimInternalLoad_Lights)
class SimInternalLoad_Lights_Default(SimInternalLoad_Lights):
__swig_setmethods__ = {}
for _s in [SimInternalLoad_Lights]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimInternalLoad_Lights_Default, name, value)
__swig_getmethods__ = {}
for _s in [SimInternalLoad_Lights]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimInternalLoad_Lights_Default, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimInternalLoad_Lights_Default.new_SimInternalLoad_Lights_Default(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default__clone(self, f, c)
__swig_destroy__ = _SimInternalLoad_Lights_Default.delete_SimInternalLoad_Lights_Default
__del__ = lambda self: None
SimInternalLoad_Lights_Default_swigregister = _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_swigregister
SimInternalLoad_Lights_Default_swigregister(SimInternalLoad_Lights_Default)
class SimInternalLoad_Lights_Default_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimInternalLoad_Lights_Default_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimInternalLoad_Lights_Default_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimInternalLoad_Lights_Default.new_SimInternalLoad_Lights_Default_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_assign(self, n, x)
def begin(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_begin(self, *args)
def end(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_end(self, *args)
def rbegin(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_rend(self, *args)
def at(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_at(self, *args)
def front(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_front(self, *args)
def back(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_back(self, *args)
def push_back(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_push_back(self, *args)
def pop_back(self):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_insert(self, *args)
def erase(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_swap(self, x)
__swig_destroy__ = _SimInternalLoad_Lights_Default.delete_SimInternalLoad_Lights_Default_sequence
__del__ = lambda self: None
SimInternalLoad_Lights_Default_sequence_swigregister = _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_swigregister
SimInternalLoad_Lights_Default_sequence_swigregister(SimInternalLoad_Lights_Default_sequence)
# This file is compatible with both classic and new-style classes.
| 41.805243 | 139 | 0.760796 | 7,757 | 0.694947 | 0 | 0 | 0 | 0 | 0 | 0 | 592 | 0.053037 |
9cb99a5d200c09e89fd5f801960f8cb0d1f949ca | 1,925 | py | Python | ggpy/cruft/autocode/Test_CanonicalJSON.py | hobson/ggpy | 4e6e6e876c3a4294cd711647051da2d9c1836b60 | [
"MIT"
] | 1 | 2015-01-26T19:07:45.000Z | 2015-01-26T19:07:45.000Z | ggpy/cruft/autocode/Test_CanonicalJSON.py | hobson/ggpy | 4e6e6e876c3a4294cd711647051da2d9c1836b60 | [
"MIT"
] | null | null | null | ggpy/cruft/autocode/Test_CanonicalJSON.py | hobson/ggpy | 4e6e6e876c3a4294cd711647051da2d9c1836b60 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
""" generated source for module Test_CanonicalJSON """
# package: org.ggp.base.util.crypto
import junit.framework.TestCase
import org.ggp.base.util.crypto.CanonicalJSON.CanonicalizationStrategy
#
# * Unit tests for the CanonicalJSON class, which provides a
# * standard way for GGP systems to canonicalize JSON objects.
# * This is an important step in signing JSON objects.
# *
# * @author Sam
#
class Test_CanonicalJSON(TestCase):
""" generated source for class Test_CanonicalJSON """
def testSimpleCanonicalization(self):
""" generated source for method testSimpleCanonicalization """
theStrategy = CanonicalizationStrategy.SIMPLE
a = CanonicalJSON.getCanonicalForm("{1:2,2:3,3:{2:5,c:4,7:9,a:6}}", theStrategy)
assertEquals(a, CanonicalJSON.getCanonicalForm("{2:3,3:{c:4,7:9,2:5,a:6},1:2}", theStrategy))
assertEquals(a, CanonicalJSON.getCanonicalForm("{3:{c:4,7:9,2:5,a:6},2:3,1:2}", theStrategy))
assertEquals(a, CanonicalJSON.getCanonicalForm("{3:{7:9,c:4,2:5,a:6},1:2,2:3}", theStrategy))
assertEquals(a, CanonicalJSON.getCanonicalForm("{2:3,3:{c:4,7:9,a:6,2:5},1:2}", theStrategy))
assertEquals(a, "{\"1\":2,\"2\":3,\"3\":{\"2\":5,\"7\":9,\"a\":6,\"c\":4}}")
b = CanonicalJSON.getCanonicalForm("{'abc':3, \"def\":4, ghi:5}", theStrategy)
assertEquals(b, CanonicalJSON.getCanonicalForm("{'def':4, abc:3, \"ghi\":5}", theStrategy))
assertEquals(b, CanonicalJSON.getCanonicalForm("{\"def\":4, ghi:5, 'abc':3}", theStrategy))
assertEquals(b, CanonicalJSON.getCanonicalForm("{abc:3, def:4, ghi:5}", theStrategy))
assertEquals(b, CanonicalJSON.getCanonicalForm("{'abc':3, 'def':4, 'ghi':5}", theStrategy))
assertEquals(b, CanonicalJSON.getCanonicalForm("{\"abc\":3, \"def\":4, \"ghi\":5}", theStrategy))
assertEquals(b, "{\"abc\":3,\"def\":4,\"ghi\":5}")
| 56.617647 | 105 | 0.656623 | 1,488 | 0.772987 | 0 | 0 | 0 | 0 | 0 | 0 | 856 | 0.444675 |
9cbb4a2a9efa65411f4aa36bd270da45be788b3e | 3,217 | py | Python | python_app/tenhou-bot/mahjong/tile.py | 0xsuu/Project-Mahjong | e82edc67651ff93c8ec158b590cd728f28504be9 | [
"Apache-2.0"
] | 9 | 2018-06-08T00:09:08.000Z | 2021-11-17T11:05:11.000Z | python_app/tenhou-bot/mahjong/tile.py | 0xsuu/Project-Mahjong | e82edc67651ff93c8ec158b590cd728f28504be9 | [
"Apache-2.0"
] | 1 | 2020-04-25T12:43:26.000Z | 2020-04-25T12:43:26.000Z | python_app/tenhou-bot/mahjong/tile.py | 0xsuu/Project-Mahjong | e82edc67651ff93c8ec158b590cd728f28504be9 | [
"Apache-2.0"
] | 2 | 2019-05-30T07:18:45.000Z | 2019-11-05T09:15:13.000Z | # -*- coding: utf-8 -*-
class Tile(int):
TILES = '''
1s 2s 3s 4s 5s 6s 7s 8s 9s
1p 2p 3p 4p 5p 6p 7p 8p 9p
1m 2m 3m 4m 5m 6m 7m 8m 9m
ew sw ww nw
wd gd rd
'''.split()
def as_data(self):
return self.TILES[self // 4]
class TilesConverter(object):
@staticmethod
def to_one_line_string(tiles):
"""
Convert 136 tiles array to the one line string
Example of output 123s123p123m33z
"""
tiles = sorted(tiles)
man = [t for t in tiles if t < 36]
pin = [t for t in tiles if 36 <= t < 72]
pin = [t - 36 for t in pin]
sou = [t for t in tiles if 72 <= t < 108]
sou = [t - 72 for t in sou]
honors = [t for t in tiles if t >= 108]
honors = [t - 108 for t in honors]
sou = sou and ''.join([str((i // 4) + 1) for i in sou]) + 's' or ''
pin = pin and ''.join([str((i // 4) + 1) for i in pin]) + 'p' or ''
man = man and ''.join([str((i // 4) + 1) for i in man]) + 'm' or ''
honors = honors and ''.join([str((i // 4) + 1) for i in honors]) + 'z' or ''
return man + pin + sou + honors
@staticmethod
def to_34_array(tiles):
"""
Convert 136 array to the 34 tiles array
"""
results = [0] * 34
for tile in tiles:
tile //= 4
results[tile] += 1
return results
@staticmethod
def string_to_136_array(sou=None, pin=None, man=None, honors=None):
"""
Method to convert one line string tiles format to the 136 array
We need it to increase readability of our tests
"""
def _split_string(string, offset):
data = []
if not string:
return []
for i in string:
tile = offset + (int(i) - 1) * 4
data.append(tile)
return data
results = _split_string(man, 0)
results += _split_string(pin, 36)
results += _split_string(sou, 72)
results += _split_string(honors, 108)
return results
@staticmethod
def string_to_34_array(sou=None, pin=None, man=None, honors=None):
"""
Method to convert one line string tiles format to the 34 array
We need it to increase readability of our tests
"""
results = TilesConverter.string_to_136_array(sou, pin, man, honors)
results = TilesConverter.to_34_array(results)
return results
@staticmethod
def find_34_tile_in_136_array(tile34, tiles):
"""
Our shanten calculator will operate with 34 tiles format,
after calculations we need to find calculated 34 tile
in player's 136 tiles.
For example we had 0 tile from 34 array
in 136 array it can be present as 0, 1, 2, 3
"""
if tile34 > 33:
return None
tile = tile34 * 4
possible_tiles = [tile] + [tile + i for i in range(1, 4)]
found_tile = None
for possible_tile in possible_tiles:
if possible_tile in tiles:
found_tile = possible_tile
break
return found_tile
| 27.973913 | 84 | 0.537146 | 3,187 | 0.990675 | 0 | 0 | 2,877 | 0.894311 | 0 | 0 | 940 | 0.292198 |
9cbdcc9bd41342ec729fa4605f8c43d39b4c42ad | 461 | py | Python | python/chrysophylax/buy_and_hold.py | dichodaemon/chrysophylax | 834214e7d021643084be910d2b99bd7d18497faa | [
"MIT"
] | null | null | null | python/chrysophylax/buy_and_hold.py | dichodaemon/chrysophylax | 834214e7d021643084be910d2b99bd7d18497faa | [
"MIT"
] | null | null | null | python/chrysophylax/buy_and_hold.py | dichodaemon/chrysophylax | 834214e7d021643084be910d2b99bd7d18497faa | [
"MIT"
] | null | null | null | import garm.indicators as gari
import ham.time_utils as hamt
import ohlcv
import luigi
import strategies as chs
from luigi.util import inherits
@inherits(chs.Strategy)
class BuyAndHold(chs.Strategy):
FN = gari.buy_and_hold_signals
def requires(self):
for m in hamt.months(self.start_date, self.end_date):
yield ohlcv.OHLCV(
self.pair, self.exchange, m, self.period,
self.destination_path)
| 24.263158 | 61 | 0.683297 | 289 | 0.626898 | 217 | 0.470716 | 313 | 0.678959 | 0 | 0 | 0 | 0 |
9cbe11d37a19426b463bd6c4dcaa771e00215206 | 4,866 | py | Python | py2api/output_trans.py | andeaseme/py2api | 6d7a051543e9a63dbc14c43d83de643a61022e9e | [
"Apache-2.0"
] | 2 | 2019-08-29T01:35:09.000Z | 2020-03-31T18:49:50.000Z | py2api/output_trans.py | andeaseme/py2api | 6d7a051543e9a63dbc14c43d83de643a61022e9e | [
"Apache-2.0"
] | 1 | 2019-11-14T00:05:54.000Z | 2019-11-14T00:05:54.000Z | py2api/output_trans.py | andeaseme/py2api | 6d7a051543e9a63dbc14c43d83de643a61022e9e | [
"Apache-2.0"
] | 4 | 2018-07-19T16:50:11.000Z | 2020-06-08T15:27:44.000Z | from py2api.constants import TRANS_NOT_FOUND, _OUTPUT_TRANS, _ATTR, _VALTYPE, _ELSE
class OutputTrans(object):
"""
OutputTrans allows to flexibly define a callable object to convert the output of a function or method.
For more information, see InputTrans
"""
def __init__(self, trans_spec=None):
"""
An output transformer builder.
>>> from py2api.output_trans import OutputTrans
>>> from py2api.constants import _ATTR, _VALTYPE, _ELSE, _OUTPUT_TRANS
>>>
>>> trans_spec = {
... _OUTPUT_TRANS : {
... 'csv': lambda x: ",".join(map(str, x))
... },
... _ATTR: {
... 'this_attr': list,
... 'other_attr': str,
... 'yet_another_attr': {
... _VALTYPE: {
... dict: lambda x: x
... },
... _ELSE: lambda x: {'result': x}
... }
... },
... }
>>> output_trans = OutputTrans(trans_spec)
>>>
>>> output_trans([1,4,2,5], output_trans='csv')
'1,4,2,5'
>>> output_trans(tuple(['was', 'a', 'tuple']), attr='this_attr')
['was', 'a', 'tuple']
>>> output_trans(tuple(['was', 'a', 'tuple']), attr='other_attr')
"('was', 'a', 'tuple')"
>>> output_trans({'a': 'dict'}, attr='yet_another_attr')
{'a': 'dict'}
>>> output_trans(['not', 'a', 'dict'], attr='yet_another_attr')
{'result': ['not', 'a', 'dict']}
>>>
>>> # An example of type-based conversion, using pandas and numpy if present
>>> try:
... import pandas as pd
... import numpy as np
... trans_spec = {
... _VALTYPE : {
... pd.DataFrame: lambda x: {'result': x.to_dict(orient='records')},
... np.ndarray: lambda x: {'result': x.tolist()}
... }
... }
... output_trans = OutputTrans(trans_spec)
... df = pd.DataFrame({'A': [1,2,3, 4], 'B': ['a', 'ab', 'abc', 'abcd']})
... print(output_trans(df))
... arr = np.array([[2,3,4], [1,2,3]])
... print(output_trans(arr))
... except ImportError:
... pass
...
{'result': [{'A': 1, 'B': 'a'}, {'A': 2, 'B': 'ab'}, {'A': 3, 'B': 'abc'}, {'A': 4, 'B': 'abcd'}]}
{'result': [[2, 3, 4], [1, 2, 3]]}
"""
if trans_spec is None:
trans_spec = {}
elif callable(trans_spec):
trans_spec = {_ELSE: trans_spec}
self.trans_spec = trans_spec
def search_trans_func(self, attr, val, trans_spec, output_trans=None):
trans_func = TRANS_NOT_FOUND # fallback default (i.e. "found nothing")
if callable(trans_spec):
return trans_spec
elif isinstance(trans_spec, dict):
if len(trans_spec) == 0:
return TRANS_NOT_FOUND
elif len(trans_spec) > 0:
############### search _OUTPUT_TRANS #######
if output_trans is not None:
_trans_spec = trans_spec.get(_OUTPUT_TRANS, {}).get(output_trans, {})
if _trans_spec:
trans_func = self.search_trans_func(
attr, val, trans_spec=_trans_spec, output_trans=output_trans)
if trans_func is not TRANS_NOT_FOUND:
return trans_func
############### search _ATTR ###############
_trans_spec = trans_spec.get(_ATTR, {}).get(attr, {})
if _trans_spec:
trans_func = self.search_trans_func(attr, val, trans_spec=_trans_spec)
if trans_func is not TRANS_NOT_FOUND:
return trans_func
############### search _VALTYPE #############
if _VALTYPE in trans_spec:
for _type, _type_trans_spec in list(trans_spec[_VALTYPE].items()):
if isinstance(val, _type):
return _type_trans_spec
############### _ELSE #######################
if _ELSE in trans_spec:
return self.search_trans_func(attr, val, trans_spec[_ELSE])
else:
return TRANS_NOT_FOUND
def __call__(self, val, attr=None, output_trans=None):
trans_func = self.search_trans_func(attr, val, trans_spec=self.trans_spec, output_trans=output_trans)
if trans_func is not TRANS_NOT_FOUND: # if there is...
trans_val = trans_func(val) # ... convert the val
else: # if there's not...
trans_val = val # ... just take the val as is
return trans_val
| 41.589744 | 109 | 0.480682 | 4,779 | 0.982121 | 0 | 0 | 0 | 0 | 0 | 0 | 2,649 | 0.54439 |
9cbf34bd2e80e0ed154936e5a0e7dad3b9986416 | 1,778 | py | Python | imageApi.py | draJiang/Figma-To-Eagle | f003f3fff664a6f3fe64c19462339d66d1129c29 | [
"MIT"
] | null | null | null | imageApi.py | draJiang/Figma-To-Eagle | f003f3fff664a6f3fe64c19462339d66d1129c29 | [
"MIT"
] | null | null | null | imageApi.py | draJiang/Figma-To-Eagle | f003f3fff664a6f3fe64c19462339d66d1129c29 | [
"MIT"
] | null | null | null | import pytesseract
import os
import time
import requests
import json
from PIL import Image,ImageFont,ImageDraw
# 读取配置文件
with open('config.json') as json_file:
config = json.load(json_file)
# 默认的文件保存的目录
MAIN_PATH = './imageApi/image/'
# FONT,用于将文字渲染成图片
FONT = config['font']
def strToImg(text,mainPath):
'''
文字转图片
'''
if(mainPath=='' or mainPath==None):
mainPath = MAIN_PATH
W,H = (800,400)
# 图片宽、高、背景色
im = Image.new("RGB", (W,H), (26, 26, 26))
dr = ImageDraw.Draw(im)
# 字体、字号
font = ImageFont.truetype(FONT, 44)
w,h = dr.textsize(text,font=font)
# 文字在背景中的位置、颜色
dr.text((20, (H-h)/2), text, font=font, fill="#F3F3F3")
# im.show()
# 图片保存的路径
path = mainPath+ str(int(time.time()*1000))+'.png'
im.save(path)
return {'success':True,'imgLoaclPath':path}
def getOCRCode(path):
'''
path:本地路径,str
code:返回 ocr 文本
'''
# open的是图片的路径
image = Image.open(path)
code = pytesseract.image_to_string(image, lang='chi_sim')
print(code)
return code
def downloadImg(imgURL,mainPath):
'''
下载图片
输入 imgURL:图片 URL,str
返回 imgLocalPath:图片本地文件
'''
if(mainPath==''):
mainPath = MAIN_PATH
os.makedirs(mainPath, exist_ok=True)
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'}
r = requests.get(url=imgURL, headers=headers)
imgLoaclPath = mainPath + str(int(time.time()*1000))+'.png'
with open(imgLoaclPath, 'wb') as f:
f.write(r.content)
if(r.status_code!=200):
success = False
else:
success = True
return {'success':success,'imgLoaclPath':imgLoaclPath} | 23.090909 | 153 | 0.614736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 709 | 0.357719 |
9cbf5a147625facf6148e000945ed5b866447e87 | 804 | py | Python | level_11.py | ceafdc/PythonChallenge | fe4f64aabf1d6e672a06785538391dd16a03ad04 | [
"MIT"
] | 1 | 2018-01-20T17:58:10.000Z | 2018-01-20T17:58:10.000Z | level_11.py | ceafdc/PythonChallenge | fe4f64aabf1d6e672a06785538391dd16a03ad04 | [
"MIT"
] | null | null | null | level_11.py | ceafdc/PythonChallenge | fe4f64aabf1d6e672a06785538391dd16a03ad04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# url: http://www.pythonchallenge.com/pc/return/5808.html
import requests
import io
import PIL.Image
url = 'http://www.pythonchallenge.com/pc/return/cave.jpg'
un = 'huge'
pw = 'file'
auth = un, pw
req = requests.get(url, auth=auth)
img_io = io.BytesIO(req.content)
img = PIL.Image.open(img_io)
pixels = img.load()
half = img.size[0] // 2, img.size[1] // 2
img1 = PIL.Image.new('RGB', half, 'black')
img2 = PIL.Image.new('RGB', half, 'black')
pixels1 = img1.load()
pixels2 = img2.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
if (i + j) % 2:
pixels1[i // 2, j // 2] = pixels[i, j]
else:
pixels2[i // 2, j // 2] = pixels[i, j]
img1.show()
img2.show()
# next: http://www.pythonchallenge.com/pc/return/evil.html
| 21.157895 | 58 | 0.615672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.278607 |
9cc0a99af59fe409930941e63a1459a695a01a4f | 2,258 | py | Python | scripts/spark.py | Sapphirine/Reducing_Manufacturing_Failures | ee6fd6dba345997431cae30380150e3513cab58e | [
"Apache-2.0"
] | 4 | 2019-04-02T21:20:19.000Z | 2021-09-01T06:08:47.000Z | scripts/spark.py | Sapphirine/Reducing_Manufacturing_Failures | ee6fd6dba345997431cae30380150e3513cab58e | [
"Apache-2.0"
] | null | null | null | scripts/spark.py | Sapphirine/Reducing_Manufacturing_Failures | ee6fd6dba345997431cae30380150e3513cab58e | [
"Apache-2.0"
] | 3 | 2019-08-02T12:13:04.000Z | 2020-03-24T12:16:33.000Z |
# coding: utf-8
# ### Open using Databricks Platform/Py-spark. It holds the code for developing the RandomForest Classifier on the chosen subset of important features.
# In[1]:
import os, sys
import pandas as pd
import numpy as np
from sklearn.metrics import matthews_corrcoef
import pyspark
from numpy import array
import numpy as np
import pandas as pd
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.feature import StringIndexer, VectorAssembler, VectorIndexer
import gc
from pyspark.sql.functions import col, count, sum
from sklearn.metrics import matthews_corrcoef
from pyspark.ml.feature import VectorAssembler
from pyspark.sql.functions import rand
REPLACE_YOUR_FILE = "/FileStore/tables/e9svdv4y1482386357547/test_numeric.csv"
df0 = sqlContext.read.format("csv").load(REPLACE_YOUR_FILE, header="true", inferSchema="true")
df = df0.na.fill(99999)
df = df.na.drop()
df.printSchema()
# In[2]:
feature=['L3_S31_F3846','L1_S24_F1578','L3_S33_F3857','L1_S24_F1406','L3_S29_F3348','L3_S33_F3863',
'L3_S29_F3427','L3_S37_F3950','L0_S9_F170', 'L3_S29_F3321','L1_S24_F1346','L3_S32_F3850',
'L3_S30_F3514','L1_S24_F1366','L2_S26_F3036']
assembler = VectorAssembler(
inputCols=feature,
outputCol='features')
data = (assembler.transform(df).select("features", df.Response.astype('double')))
(trainingData, testData) = data.randomSplit([0.8, 0.2], seed=451)
data.printSchema()
# In[3]:
cls = RandomForestClassifier(numTrees=60, seed=1111, maxDepth=15, labelCol="Response", featuresCol="features")
pipeline = Pipeline(stages=[cls])
evaluator = MulticlassClassificationEvaluator(
labelCol="Response", predictionCol="prediction", metricName="accuracy")
trainingData=trainingData.na.drop()
trainingData.printSchema()
# In[4]:
gc.collect()
model = pipeline.fit(trainingData)
# In[5]:
# making predictions
predicted = model.transform(testData)
response = predictions.select("Response").rdd.map(lambda r: r[0]).collect()
predictedValue = predictions.select("probability").rdd.map(lambda r: int(r[0][1])).collect()
mcc = matthews_corrcoef(response, predictedValue)
print (mcc)
| 27.876543 | 152 | 0.767493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 613 | 0.271479 |
9cc2352b704dd6477ce3defdff442d1b06e25a8f | 4,997 | py | Python | scripts/merge_map_blocks.py | ptrebert/reference-data | 7bca069b8995660252d4f601976f9f7abaaf063b | [
"MIT"
] | null | null | null | scripts/merge_map_blocks.py | ptrebert/reference-data | 7bca069b8995660252d4f601976f9f7abaaf063b | [
"MIT"
] | null | null | null | scripts/merge_map_blocks.py | ptrebert/reference-data | 7bca069b8995660252d4f601976f9f7abaaf063b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
import os as os
import sys as sys
import io as io
import traceback as trb
import argparse as argp
import gzip as gz
import operator as op
import functools as fnt
def parse_command_line():
"""
:return:
"""
parser = argp.ArgumentParser()
parser.add_argument('--target', '-t', type=str, dest='target')
parser.add_argument('--query', '-q', type=str, dest='query')
parser.add_argument('--output', '-o', type=str, dest='output')
parser.add_argument('--switch', '-s', action='store_true', default=False, dest='switch',
help='Switch target and query in the output')
parser.add_argument('--filter', '-f', type=int, dest='filter', default=0,
help='Skip blocks smaller than this size. Default: 0')
args = parser.parse_args()
return args
def join_parts(switch, *args):
"""
:param switch:
:param args: tc, ts, te, tstr, bc, qc, qs, qe, qstr
:return:
"""
# had an annoying bug here - passed "(switch,)" instead of "switch"
# which always evaluated to True; but did not affect the one file
# where I used switch, so maybe introduced the error later...?
# anyway, just to be sure here, some manual type checking...
assert isinstance(switch, bool), 'Received wrong type for switch: {}'.format(switch)
if switch:
items = op.itemgetter(*(5, 6, 7, 3, # the new target / original query region
4, # block ID
0, 1, 2, 8)) # the new query / original target region
else:
items = op.itemgetter(*(0, 1, 2, 3, # the target region
4, # block ID
5, 6, 7, 8)) # the query region
joined = '\t'.join(items(args))
return joined
def main():
"""
:return:
"""
args = parse_command_line()
outbuffer = io.StringIO()
bufsize = 0
block_count = 0
block_ids = set()
build_block = fnt.partial(join_parts, *(args.switch, ))
with open(args.target, 'r') as trgfile:
with open(args.query, 'r') as qryfile:
while 1:
tb = trgfile.readline().strip()
qb = qryfile.readline().strip()
try:
tc, ts, te, tid = tb.split()
qc, qs, qe, qid = qb.split()
assert tid == qid,\
'Block mismatch for files {} and {}\nLines {} and {}'.format(os.path.basename(args.target),
os.path.basename(args.query),
tb, qb)
assert tid not in block_ids,\
'Block ID duplicate in files {} and {}\nLines {} and {}'.format(os.path.basename(args.target),
os.path.basename(args.query),
tb, qb)
tl = int(te) - int(ts)
ql = int(qe) - int(qs)
assert tl == ql,\
'Coverage mismatch for files {} and {}\nLines {} and {}'.format(os.path.basename(args.target),
os.path.basename(args.query),
tb, qb)
if tl < args.filter:
continue
block_count += 1
qstrand = qid.split('_')[-1]
#blockline = '\t'.join([tc, ts, te, '+', str(block_count),
# qc, qs, qe, qstrand])
blockline = build_block(tc, ts, te, '+',
str(block_count),
qc, qs, qe, qstrand)
bufsize += len(blockline)
outbuffer.write(blockline + '\n')
if bufsize > 100000:
with gz.open(args.output, 'at') as outfile:
_ = outfile.write(outbuffer.getvalue())
outfile.flush()
outbuffer = io.StringIO()
bufsize = 0
except ValueError:
break
with gz.open(args.output, 'at') as outfile:
_ = outfile.write(outbuffer.getvalue())
# head a corrupted gzip once - not sure about the cause... I/O interrupted???
outfile.flush()
return
if __name__ == '__main__':
try:
main()
except Exception as err:
trb.print_exc(file=sys.stderr)
sys.stderr.write('\nError: {}\n'.format(str(err)))
sys.exit(1)
else:
sys.exit(0)
| 41.641667 | 118 | 0.454673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,215 | 0.243146 |
9cc4b83e0e4850879d9d17a7f64c101491943cb0 | 4,219 | py | Python | stockMarket/getData/models.py | seba-1511/stockMarket | cd571a89bca71f7c821d2b57328123e58e07347e | [
"MIT"
] | 10 | 2016-02-20T04:17:37.000Z | 2021-06-26T12:14:01.000Z | stockMarket/getData/models.py | seba-1511/stockMarket | cd571a89bca71f7c821d2b57328123e58e07347e | [
"MIT"
] | null | null | null | stockMarket/getData/models.py | seba-1511/stockMarket | cd571a89bca71f7c821d2b57328123e58e07347e | [
"MIT"
] | 4 | 2017-05-25T06:16:48.000Z | 2021-12-03T07:45:44.000Z | #-*- coding: utf-8 -*-
from django.db import models
# Create your models here.
class Feature(models.Model):
day = models.SmallIntegerField()
month = models.SmallIntegerField()
year = models.SmallIntegerField()
momentum = models.FloatField(
null=True, blank=True)
day5disparity = models.FloatField(
null=True, blank=True)
day10disparity = models.FloatField(
null=True, blank=True)
stochK = models.FloatField(
null=True, blank=True)
priceVolumeTrend = models.FloatField(
null=True, blank=True)
movAverageExp = models.FloatField(
null=True, blank=True)
paraSar = models.FloatField(
null=True, blank=True)
accDistrLine = models.FloatField(
null=True, blank=True)
avTrueRange = models.FloatField(
null=True, blank=True)
indicB = models.FloatField(
null=True, blank=True)
commChanIndex = models.FloatField(
null=True, blank=True)
chaikinMF = models.FloatField(
null=True, blank=True)
detrPriceOsc = models.FloatField(
null=True, blank=True)
easeMove = models.FloatField(
null=True, blank=True)
forceIndex = models.FloatField(
null=True, blank=True)
macd = models.FloatField(
null=True, blank=True)
monneyFI = models.FloatField(
null=True, blank=True)
negVolIndex = models.FloatField(
null=True, blank=True)
percVolOsc = models.FloatField(
null=True, blank=True)
priceRelWarrent = models.FloatField(
null=True, blank=True)
priceRelAsian = models.FloatField(
null=True, blank=True)
priceRelDiana = models.FloatField(
null=True, blank=True)
priceRelTenren = models.FloatField(
null=True, blank=True)
rateChange = models.FloatField(
null=True, blank=True)
relStrengthI = models.FloatField(
null=True, blank=True)
slope = models.FloatField(
null=True, blank=True)
stdDev = models.FloatField(
null=True, blank=True)
stochOsc = models.FloatField(
null=True, blank=True)
stochRSI = models.FloatField(
null=True, blank=True)
ultimateOsc = models.FloatField(
null=True, blank=True)
williamR = models.FloatField(
null=True, blank=True)
def __unicode__(self):
return u'' + str(self.day) + '/' + str(self.month) + '/' + str(self.year)
class Meta:
abstract = True
class W(models.Model):
temperature = models.SmallIntegerField(null=True, blank=True)
humidity = models.SmallIntegerField(null=True, blank=True)
windSpeed = models.SmallIntegerField(null=True, blank=True)
pressure = models.SmallIntegerField(null=True, blank=True)
day = models.SmallIntegerField()
month = models.SmallIntegerField()
year = models.SmallIntegerField()
def __unicode__(self):
return u'' + str(self.day) + '/' + str(self.month) + '/' + str(self.year)
class Meta:
abstract = True
class Stock(models.Model):
day = models.SmallIntegerField()
month = models.SmallIntegerField()
year = models.SmallIntegerField()
open = models.DecimalField(
null=True, blank=True, max_digits=7, decimal_places=4)
close = models.DecimalField(
null=True, blank=True, max_digits=7, decimal_places=4)
low = models.DecimalField(
null=True, blank=True, max_digits=7, decimal_places=4)
high = models.DecimalField(
null=True, blank=True, max_digits=7, decimal_places=4)
adj = models.DecimalField(
null=True, blank=True, max_digits=7, decimal_places=4)
volume = models.DecimalField(
null=True, blank=True, max_digits=13, decimal_places=4)
class Meta:
abstract = True
def __unicode__(self):
return u'' + str(self.day) + '/' + str(self.month) + '/' + str(self.year)
class TyroonStock(Stock):
pass
class WarrentStock(Stock):
pass
class IndianStock(Stock):
pass
class TenRenStock(Stock):
pass
class DianaStock(Stock):
pass
class Weather(W):
pass
class dWeather(W):
pass
class ddWeather(W):
pass
class Feature35(Feature):
pass
class dFeature35(Feature):
pass
| 26.534591 | 81 | 0.651813 | 4,100 | 0.971794 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.017777 |