blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9aa6e5fb065c69b21645bff657e9fc0394c80395 | 38cc32a77077c1ebcea939a81969a6f9f4d37dfe | /lekce/lekce_11/json_load.py | 4980d23ece4459c57ff977200c2e2778f09f5cc5 | [] | no_license | kabezd/pyladies | 13ad60c39a6908cc4cd1401aa6c7d1c69546bc39 | c6b0a44f86880ea3deb244fd8ec84c4fb6bebccb | refs/heads/master | 2021-02-13T16:59:14.144039 | 2020-07-31T18:18:14 | 2020-07-31T18:18:14 | 244,714,800 | 0 | 0 | null | 2020-08-02T14:38:38 | 2020-03-03T18:45:26 | Python | UTF-8 | Python | false | false | 220 | py | import json
json_string = """
{
"name": "Anna",
"city": "Brno",
"language": ["czech", "english", "Python"],
"age": 26
}
"""
data = json.loads(json_string)
print(data)
print(data['city']) | [
"bezdekova.k@seznam.cz"
] | bezdekova.k@seznam.cz |
99c4e44f477c0bac23afd0bd0105f76f1483e7d6 | 8ae2a6f0e9a9f4ed651f13a63a4124a1c99b4dba | /xmudata/DIV2K2018.py | 733dd9bea788782457d040368847d253e9f6401b | [] | no_license | SrWYG/channelPruningXMU | 7515950210a32aa78bd04030e7bf2b214cc85583 | 2f03300cb909c751c73f21452721dddbce33f7fd | refs/heads/master | 2020-03-17T06:47:44.939130 | 2018-05-14T14:21:47 | 2018-05-14T14:21:47 | 133,369,763 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | from xmudata.data import Data
import tensorlayer as tl
from xmuutil import utils
import os
from xmuutil.exception import LargeSizeException
class DIV2K2018(Data):
def __init__(self, train_truth_dir, train_data_dir, test_truth_dir = None, test_data_dir=None, image_size = 96, scale = 4, train_postfix_len = 3, test_postfix_len = -1, test_per=0.01):
Data.__init__(self, train_truth_dir, train_data_dir,test_truth_dir,test_data_dir, train_postfix_len, test_postfix_len, test_per)
self.image_size = image_size
self.scale = scale
def get_image_set(self, image_lr_list,input_dir,ground_truth_dir, postfix_len):
y_imgs = []
x_imgs = []
# use 10 threads to read files
imgs_lr = tl.visualize.read_images(image_lr_list, input_dir)
image_hr_list = utils.get_hrimg_list(image_lr_list, postfix_len)
imgs_hr = tl.visualize.read_images(image_hr_list, ground_truth_dir)
for i in range(len(imgs_lr)):
#crop the image randomly
try:
x_img,y_img = utils.crop(imgs_lr[i], imgs_hr[i], self.image_size, self.image_size, self.scale, is_random=True)
except LargeSizeException as e:
print(e)
else:
y_imgs.append(y_img)
x_imgs.append(x_img)
return x_imgs, y_imgs
| [
"24320142202497@stu.xmu.edu.cn"
] | 24320142202497@stu.xmu.edu.cn |
0aacd4bb5f93f7cde57b903a1aec981ffb433ed5 | 92827cba7b89fce22f0ffce68fa8a9243127d482 | /chapter_8/first_mlp.py | 545dd387d4688c24a9682742998fb0da0218dd1c | [] | no_license | pm3310/deep_learning_with_python | c028fff8d6ead45dc5bd5849c474821563ee8235 | 76da1698742b083698803d19d1bcbb0843f7f840 | refs/heads/master | 2020-12-25T14:13:40.948359 | 2016-08-17T18:21:12 | 2016-08-17T18:21:12 | 65,931,347 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | from keras.models import Sequential
from keras.layers import Dense
import numpy
from sklearn.cross_validation import StratifiedKFold
seed = 7
numpy.random.seed(seed)
# load pima indians dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:, 0:8]
Y = dataset[:, 8]
# define 10-fold cross validation test harness
kfold = StratifiedKFold(y=Y, n_folds=10, shuffle=True, random_state=seed)
cvscores = []
for i, (train, test) in enumerate(kfold):
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, init='uniform', activation='relu'))
model.add(Dense(8, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X[train], Y[train], nb_epoch=150, batch_size=10, verbose=0)
# evaluate the model
scores = model.evaluate(X[test], Y[test], verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores))) | [
"pavlos@workable.com"
] | pavlos@workable.com |
4cd67a0681d367c305de9840035756f49ffbf251 | c7eff37821123960716d818f2bbecf54b50a3e80 | /HeroloDjango/settings.py | acdaa15979c891a39900a9b476312697ca4ca547 | [] | no_license | PazBazak/HeroloDjango | abeaaf3ed6ec6dd7c8d2e4e197cd0ca7fb544958 | 7b08316337420914d132851a1e69b86c514e9a49 | refs/heads/dev | 2023-01-30T16:41:19.500980 | 2020-12-09T15:40:07 | 2020-12-09T15:40:07 | 319,662,166 | 0 | 0 | null | 2020-12-09T15:40:08 | 2020-12-08T14:23:49 | Python | UTF-8 | Python | false | false | 3,635 | py | """
Django settings for HeroloDjango project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
import sys
import django_heroku
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'HeroloDjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'HeroloDjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'api.CustomUser'
# rest framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Activate Django-Heroku when not on test!.
if sys.argv[1] != 'test':
django_heroku.settings(locals())
| [
"yolobazak1@gmail.com"
] | yolobazak1@gmail.com |
d3f8274bb3c977361e26de93b6ca096b5273f249 | f1a2a34628c67829ee321b1edb40b1f41875d293 | /detection_nbdev/metrics.py | 4dadc419659b0441f904eb9efd9bb1603124ef63 | [
"Apache-2.0"
] | permissive | Sports-AI-Coaching/detection-nbdev | bb09218fe8a418225fc714e6711082a6ea64ae70 | 331d36aa7ed65492a879d5012c1d6de866ec3519 | refs/heads/master | 2023-03-05T01:26:35.833951 | 2021-02-14T23:38:25 | 2021-02-14T23:38:25 | 338,741,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | # AUTOGENERATED! DO NOT EDIT! File to edit: 02_metrics.ipynb (unless otherwise specified).
__all__ = ['bbox_iou', 'hungarian_loss']
# Cell
import torch
from scipy.optimize import linear_sum_assignment
# Cell
def bbox_iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
# Cell
def hungarian_loss(boxesA, boxesB, loss_func=bbox_iou, maximize=True):
n = max(len(boxesA), len(boxesB))
cost_matrix = torch.zeros((n,n))
for i, boxA in enumerate(boxesA):
for j, boxB in enumerate(boxesB):
if boxA is None or boxB is None:
cost_matrix[i,j] = int(not maximize)
else:
cost_matrix[i, j] = bbox_iou(boxA, boxB)
row_ind, col_ind = linear_sum_assignment(cost_matrix, maximize=maximize)
return cost_matrix[row_ind, col_ind].mean() | [
"atom@phi.asura"
] | atom@phi.asura |
45d56d1354e14bbecd02cb825af9747942a005f9 | 134502316380ec6e66a23b49fd168a5ce0413d29 | /Simulator.py | 10b5a14e7d6150d93ceb3875c69d21f4e8b3b27d | [] | no_license | helgejo/cozmo_rl | 83cd63e5d2880e29d5a0668f8a7e8a4e8575143e | 6e2c6a8a7362684c9bc8b1d7ed00601971a369c7 | refs/heads/master | 2021-01-15T17:15:30.256995 | 2017-08-10T12:48:37 | 2017-08-10T12:48:37 | 99,742,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 8 21:05:44 2017
@author: bjorland
"""
class Gym:
def __init__(self):
self.x = 1
self.observation
self.action_space
self.observation_space
def step(self, action):
return observation, reward, done, info
def get_new_state(self, action):
hsdjkfshj = 1
def get_reward(self, action):
def reset(self):
def make(self): | [
"Helge.Johannessen-Bjorland@telenor.com"
] | Helge.Johannessen-Bjorland@telenor.com |
c05da3b7474a985ebb26bd86f561fd8df1dd8a8e | b5fd4552474f2b55d6c1d3d2a0e6974221cb022a | /build/turtlebot3_msgs/cmake/turtlebot3_msgs-genmsg-context.py | 228d0e782b34294a153d343b357471aac543ee3c | [] | no_license | bryceustc/ROS | ea042f96ce36653252c6c8fe2eb89c4ed5a8c61d | 073c0261f729ca52da1def63cba14618d41e7741 | refs/heads/master | 2021-03-21T00:04:43.528576 | 2020-03-14T09:34:24 | 2020-03-14T09:34:24 | 247,245,675 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/bryce/pid_control_ws/src/turtlebot3_msgs/msg/SensorState.msg;/home/bryce/pid_control_ws/src/turtlebot3_msgs/msg/VersionInfo.msg;/home/bryce/pid_control_ws/src/turtlebot3_msgs/msg/Sound.msg"
services_str = ""
pkg_name = "turtlebot3_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "turtlebot3_msgs;/home/bryce/pid_control_ws/src/turtlebot3_msgs/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"bryce@mail.ustc.edu.cn"
] | bryce@mail.ustc.edu.cn |
bb31114011aaefceadd2ce293442dbf0b50b6cff | b24012b8e8b4e42600903a04e7740b9e28bc17ac | /utils/DetectClosestColor.py | e63fe8459632ab60fa24e7972866945a859e6d27 | [] | no_license | msjun23/Lane-Detection | 747e8443a22d0d7d909a2d803d0a601f9095488e | 02825a55b684969cda31e061119429d13234cf83 | refs/heads/master | 2023-08-01T06:54:57.708416 | 2021-09-20T13:56:37 | 2021-09-20T13:56:37 | 288,739,009 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
from numpy.core.fromnumeric import shape
delta_h = 30
delta_sv = 75
def DetectYellow(img):
hsv_yellow = np.array([30, 255, 255])
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
dist_img = np.sum((hsv_img - hsv_yellow)**2, axis=2)**(1/2)
pixel_min = np.min(dist_img)
pixel_max = np.max(dist_img)
# Pixel Normalization: 0~255
dist_img = ((dist_img - pixel_min) / (pixel_max - pixel_min)) * 255
yellow_mask = cv2.inRange(dist_img, np.array([0]), np.array([40]))
return yellow_mask
def DetectWhite(img):
hsv_white = np.array([0, 0, 255])
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
dist_img = np.sum((hsv_img - hsv_white)**2, axis=2)**(1/2)
pixel_min = np.min(dist_img)
pixel_max = np.max(dist_img)
# Pixel Normalization: 0~255
dist_img = ((dist_img - pixel_min) / (pixel_max - pixel_min)) * 255
white_mask = cv2.inRange(dist_img, np.array([0]), np.array([20]))
return white_mask
def DetectYellowWhite(img):
yellow_mask = DetectYellow(img)
white_mask = DetectWhite(img)
return cv2.bitwise_or(yellow_mask, white_mask)
| [
"msjun23@gmail.com"
] | msjun23@gmail.com |
5571bc3f500861a21934a71a6330c94ce561521b | db8692575378366957b246a5b5938f67de936386 | /26feb/largest_number_possible.py | 2b2bed7f39dbc14461f5e1c5f9167e348adbb691 | [] | no_license | adarhp0/coding | 24f474f2a3ef4ef578e33509dc57514563929956 | 63db62efdcdeaae934066c337437afcf52a6e28c | refs/heads/master | 2021-07-10T20:04:54.375292 | 2021-04-03T15:11:06 | 2021-04-03T15:11:06 | 242,801,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | tes=int(input())
for t in range(tes):
n,s=map(int,input().split())
if s/n>9 and s!=0:
print(-1)
else:
z={}
for i in range(10):
z[i]=0
q=9
while s>0:
z[q]=int(s/q)
s=s-z[q]*q
q=q-1
a=''
su=0
for i in range(9,0,-1):
k=z[i]
su=su+k
for j in range(k):
a=a+str(i)
b=n-su
for i in range(b):
a=a+str(0)
print(a)
#print(z)
| [
"adarshahp0@gmail.com"
] | adarshahp0@gmail.com |
202431c6183a6dcff01d28a468d59da31fa8c7b1 | cb9f5db2cdaa5c85a4c5950e34fa22d931da445e | /seed.py | d94c6e63d50668962053785917432aba4eb825c1 | [] | no_license | rmmistry/movie-ratings- | 248fdb36a7392cebc8cfc9686cae61a3b0c516c4 | 89050e4da2dc998ab99fca8537d8df75a650e845 | refs/heads/master | 2021-01-10T05:13:17.863638 | 2015-10-23T00:58:23 | 2015-10-23T00:58:23 | 44,561,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,133 | py | """Utility file to seed ratings database from MovieLens data in seed_data/"""
from model import User, Movie, Rating
# from model import Rating
# from model import Movie
from model import connect_to_db, db
from server import app
from datetime import datetime
def load_users():
"""Load users from u.user into database."""
print "Users"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
User.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.user"):
row = row.rstrip()
user_id, age, gender, occupation, zipcode = row.split("|")
user = User(user_id=user_id,
age=age,
zipcode=zipcode)
# We need to add to the session or it won't ever be stored
db.session.add(user)
# Once we're done, we should commit our work
db.session.commit()
def load_movies():
"""Load movies from u.item into database."""
print "Movies"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Movie.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.item"):
row = row.rstrip()
row_splitted = row.split("|")
##throwing out rows with no release date or title is unknown
movie_id = row_splitted[0]
title = row_splitted[1]
released_at = row_splitted[2]
imdb_url = row_splitted[4]
## FIX LATER: optionally, rstrip('(') - why didn't it work?
title = title[:-7]
print title
if released_at != (''):
released_at_ob = datetime.strptime(released_at, '%d-%b-%Y')
else:
pass
movie = Movie(movie_id=movie_id,
title=title,
released_at=released_at_ob,
imdb_url=imdb_url)
# We need to add to the session or it won't ever be stored
db.session.add(movie)
# Once we're done, we should commit our work
db.session.commit()
def load_ratings():
"""Load ratings from u.data into database."""
print "Ratings"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Rating.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.data"):
row = row.rstrip()
row_splitted=row.split()
user_id = row_splitted[0]
movie_id = row_splitted[1]
score = row_splitted[2]
rating = Rating(movie_id=movie_id,
user_id=user_id,
score=score)
# We need to add to the session or it won't ever be stored
db.session.add(rating)
# Once we're done, we should commit our work
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_users()
load_movies()
load_ratings()
| [
"info@hackbrightacademy.com"
] | info@hackbrightacademy.com |
e2f67ccafad355edc13a17d8c188f2dd33a654a9 | 268cb0493160b06b1f5d5b33bf0dd80fd58a94f9 | /spotify-recommendations/ml/get_feat_playlists_new_albums.py | 500cb47eb38407d330a0304bb35a6dd9f044aec2 | [] | no_license | restevesd/PROJECTS-DATA-SCIENTIST-TRAINING | 1d92c88db700df4e8a067ca02f5d53ec9a85671d | 0d3c315594aa6901079650283dc13b866fb98971 | refs/heads/main | 2023-08-18T09:43:34.489337 | 2021-10-02T10:21:15 | 2021-10-02T10:21:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | import django
import os
import logging
from spotipy.oauth2 import SpotifyClientCredentials
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spotify_recs.settings")
django.setup()
from spotify_app.models import Playlist
import spotipy
import time
logger = logging.getLogger('django')
logger.setLevel(logging.INFO)
def main():
client_credentials_manager = SpotifyClientCredentials(
client_id="fd2ae3d3b2d3407da3b02a97376827b5", client_secret="e84c4e18dd2f463aa0c649341d64dbfb"
)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
logger.info("Getting featured playlists...")
payload = sp.featured_playlists()["playlists"]
playlist_total = payload["items"]
while payload["next"]:
payload = sp.next(payload)
playlist_total.extend(payload["items"])
for playlist in playlist_total:
temp_obj = Playlist(
playlist_id=playlist["id"],
playlist_name=playlist["name"],
playlist_url=playlist["external_urls"]["spotify"],
playlist_num_tracks=playlist["tracks"]["total"],
playlist_featured=True,
playlist_owner=playlist["owner"]["display_name"].lower(),
date_created=time.time(),
playlist_img_src=playlist["images"][0]["url"],
)
temp_obj.save()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
086f450b7863c94d7a6224d270dac70c108e4121 | adc041494866c5fb9e7879c6f3dc28b992310fd2 | /Lists and Tuples/Tuples/list to tuple.py | 0b8c7617e1d8e907f4476dacaa73acca0070d703 | [] | no_license | REGENTMONK97/Python- | 8b53dd684b770d7d6d79b352efa3729731047329 | ab8987506dbbae2a9660852c85f21535954738c7 | refs/heads/master | 2020-03-20T20:56:37.708665 | 2018-06-18T08:01:38 | 2018-06-18T08:01:38 | 137,715,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | a = input("Enter all data separated by comma")
a = a.lstrip()
a1 = a.split(',')
tuple(a1)
| [
"sairaghunath97@gmail.com"
] | sairaghunath97@gmail.com |
159f081af2707fdc632c02abb3f8cfda47e6c305 | 9a8a58abd169ad6dabd20cf806249faa9e89bf47 | /typogrify.py | 22c8f23dda4d7f462656dcc918bbc625aa73a0e0 | [] | no_license | curiousleo/kardiopraxis-flask | 583a25edb4e366b981b20691dc5e0821894e4b3b | d9a7a14b386c5d4415d6b650e503c3c104e5ebaf | refs/heads/master | 2021-01-19T03:13:09.666560 | 2015-05-21T19:58:08 | 2015-05-21T19:58:08 | 4,799,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,163 | py | # Copyright (c) 2007, Christian Metts
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This version of typogrify is stripped of all Django-related functions.
'''
.. module:: typogrify
:synopsis: Prettify web typography.
'''
import re
def amp(text):
'''Wraps apersands in HTML with ``<span class="amp">`` so they can
be styled with CSS. Apersands are also normalized to ``&``.
Requires ampersands to have whitespace or an `` `` on both
sides.
>>> amp('One & two')
'One <span class="amp">&</span> two'
>>> amp('One & two')
'One <span class="amp">&</span> two'
>>> amp('One & two')
'One <span class="amp">&</span> two'
>>> amp('One & two')
'One <span class="amp">&</span> two'
It won't mess up ampersands that are already wrapped, in entities or
URLs.
>>> amp('One <span class="amp">&</span> two')
'One <span class="amp">&</span> two'
>>> amp('“this” & <a href="/?that&test">that</a>')
'“this” <span class="amp">&</span> <a href="/?that&test">that</a>'
It should ignore standalone amps that are in attributes
>>> amp('<link href="xyz.html" title="One & Two">xyz</link>')
'<link href="xyz.html" title="One & Two">xyz</link>'
'''
# tag_pattern from http://haacked.com/archive/2004/10/25/usingregularexpressionstomatchhtml.aspx
# it kinda sucks but it fixes the standalone amps in attributes bug
tag_pattern = '</?\w+((\s+\w+(\s*=\s*(?:".*?"|\'.*?\'|[^\'">\s]+))?)+\s*|\s*)/?>'
amp_finder = re.compile(r'(\s| )(&|&|&\#38;)(\s| )')
intra_tag_finder = re.compile(r'(?P<prefix>(%s)?)(?P<text>([^<]*))(?P<suffix>(%s)?)' % (tag_pattern, tag_pattern))
def _amp_process(groups):
prefix = groups.group('prefix') or ''
text = amp_finder.sub(r'''\1<span class="amp">&</span>\3''', groups.group('text'))
suffix = groups.group('suffix') or ''
return prefix + text + suffix
output = intra_tag_finder.sub(_amp_process, text)
return output
amp.is_safe = True
def caps(text):
'''Wraps multiple capital letters in ``<span class="caps">``
so they can be styled with CSS.
>>> caps('A message from KU')
'A message from <span class="caps">KU</span>'
Uses the smartypants tokenizer to not screw with HTML or with tags it shouldn't.
>>> caps('<PRE>CAPS</pre> more CAPS')
'<PRE>CAPS</pre> more <span class="caps">CAPS</span>'
>>> caps('A message from 2KU2 with digits')
'A message from <span class="caps">2KU2</span> with digits'
>>> caps('Dotted caps followed by spaces should never include them in the wrap D.O.T. like so.')
'Dotted caps followed by spaces should never include them in the wrap <span class="caps">D.O.T.</span> like so.'
All caps with with apostrophes in them shouldn't break. Only handles dump apostrophes though.
>>> caps("JIMMY'S")
'<span class="caps">JIMMY\\'S</span>'
>>> caps('<i>D.O.T.</i>HE34T<b>RFID</b>')
'<i><span class="caps">D.O.T.</span></i><span class="caps">HE34T</span><b><span class="caps">RFID</span></b>'
'''
try:
import smartypants
except ImportError:
raise Exception, "The Python SmartyPants library isn't installed."
return text
tokens = smartypants._tokenize(text)
result = []
in_skipped_tag = False
cap_finder = re.compile(r'''(
(\b[A-Z\d]* # Group 2: Any amount of caps and digits
[A-Z]\d*[A-Z] # A cap string much at least include two caps (but they can have digits between them)
[A-Z\d']*\b) # Any amount of caps and digits or dumb apostsrophes
| (\b[A-Z]+\.\s? # OR: Group 3: Some caps, followed by a '.' and an optional space
(?:[A-Z]+\.\s?)+) # Followed by the same thing at least once more
(?:\s|\b|$))
''', re.VERBOSE)
def _cap_wrapper(matchobj):
'''This is necessary to keep dotted cap strings to pick up extra spaces'''
if matchobj.group(2):
return '''<span class="caps">%s</span>''' % matchobj.group(2)
else:
if matchobj.group(3)[-1] == ' ':
caps = matchobj.group(3)[:-1]
tail = ' '
else:
caps = matchobj.group(3)
tail = ''
return '''<span class="caps">%s</span>%s''' % (caps, tail)
tags_to_skip_regex = re.compile('<(/)?(?:pre|code|kbd|script|math)[^>]*>', re.IGNORECASE)
for token in tokens:
if token[0] == 'tag':
# Don't mess with tags.
result.append(token[1])
close_match = tags_to_skip_regex.match(token[1])
if close_match and close_match.group(1) == None:
in_skipped_tag = True
else:
in_skipped_tag = False
else:
if in_skipped_tag:
result.append(token[1])
else:
result.append(cap_finder.sub(_cap_wrapper, token[1]))
output = ''.join(result)
return output
caps.is_safe = True
def initial_quotes(text):
'''Wraps initial quotes in ``class="dquo"`` for double quotes or
``class="quo"`` for single quotes. Works in these block tags ``(h1-h6, p, li, dt, dd)``
and also accounts for potential opening inline elements ``a, em, strong, span, b, i``
>>> initial_quotes('"With primes"')
'<span class="dquo">"</span>With primes"'
>>> initial_quotes("'With single primes'")
'<span class="quo">\\'</span>With single primes\\''
>>> initial_quotes('<a href="#">"With primes and a link"</a>')
'<a href="#"><span class="dquo">"</span>With primes and a link"</a>'
>>> initial_quotes('“With smartypanted quotes”')
'<span class="dquo">“</span>With smartypanted quotes”'
'''
quote_finder = re.compile(r'''((<(p|h[1-6]|li|dt|dd)[^>]*>|^) # start with an opening p, h1-6, li, dd, dt or the start of the string
\s* # optional white space!
(<(a|em|span|strong|i|b)[^>]*>\s*)*) # optional opening inline tags, with more optional white space for each.
(("|“|&\#8220;)|('|‘|&\#8216;)) # Find me a quote! (only need to find the left quotes and the primes)
# double quotes are in group 7, singles in group 8
''', re.VERBOSE)
def _quote_wrapper(matchobj):
if matchobj.group(7):
classname = 'dquo'
quote = matchobj.group(7)
else:
classname = 'quo'
quote = matchobj.group(8)
return '''%s<span class="%s">%s</span>''' % (matchobj.group(1), classname, quote)
output = quote_finder.sub(_quote_wrapper, text)
return output
initial_quotes.is_safe = True
def smartypants(text):
'''Applies smarty pants to curl quotes.
>>> smartypants('The "Green" man')
'The “Green” man'
'''
try:
import smartypants
except ImportError:
raise Exception, "The Python smartypants library isn't installed."
return text
else:
output = smartypants.smartypants(text)
return output
smartypants.is_safe = True
def typogrify(text):
'''The super typography filter
Applies the following filters: widont, smartypants, caps, amp, initial_quotes
>>> typogrify('<h2>"Jayhawks" & KU fans act extremely obnoxiously</h2>')
'<h2><span class="dquo">“</span>Jayhawks” <span class="amp">&</span> <span class="caps">KU</span> fans act extremely obnoxiously</h2>'
'''
text = amp(text)
text = widont(text)
text = smartypants(text)
text = caps(text)
text = initial_quotes(text)
return text
def widont(text):
'''Replaces the space between the last two words in a string with `` ``
Works in these block tags ``(h1-h6, p, li, dd, dt)`` and also accounts for
potential closing inline elements ``a, em, strong, span, b, i``
>>> widont('A very simple test')
'A very simple test'
Single word items shouldn't be changed
>>> widont('Test')
'Test'
>>> widont(' Test')
' Test'
>>> widont('<ul><li>Test</p></li><ul>')
'<ul><li>Test</p></li><ul>'
>>> widont('<ul><li> Test</p></li><ul>')
'<ul><li> Test</p></li><ul>'
>>> widont('<p>In a couple of paragraphs</p><p>paragraph two</p>')
'<p>In a couple of paragraphs</p><p>paragraph two</p>'
>>> widont('<h1><a href="#">In a link inside a heading</i> </a></h1>')
'<h1><a href="#">In a link inside a heading</i> </a></h1>'
>>> widont('<h1><a href="#">In a link</a> followed by other text</h1>')
'<h1><a href="#">In a link</a> followed by other text</h1>'
Empty HTMLs shouldn't error
>>> widont('<h1><a href="#"></a></h1>')
'<h1><a href="#"></a></h1>'
>>> widont('<div>Divs get no love!</div>')
'<div>Divs get no love!</div>'
>>> widont('<pre>Neither do PREs</pre>')
'<pre>Neither do PREs</pre>'
>>> widont('<div><p>But divs with paragraphs do!</p></div>')
'<div><p>But divs with paragraphs do!</p></div>'
'''
widont_finder = re.compile(r'''((?:</?(?:a|em|span|strong|i|b)[^>]*>)|[^<>\s]) # must be proceeded by an approved inline opening or closing tag or a nontag/nonspace
\s+ # the space to replace
([^<>\s]+ # must be flollowed by non-tag non-space characters
\s* # optional white space!
(</(a|em|span|strong|i|b)>\s*)* # optional closing inline tags with optional white space after each
((</(p|h[1-6]|li|dt|dd)>)|$)) # end with a closing p, h1-6, li or the end of the string
''', re.VERBOSE)
output = widont_finder.sub(r'\1 \2', text)
return output
widont.is_safe = True
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| [
"curiousleo@ymail.com"
] | curiousleo@ymail.com |
f36fc9dfdae9c007db292d27d4f13e847e448d2b | 93176908770ccfdfcd28849e0344ca8f08433d3e | /MealRunner/__init__.py | 148a82d5d9c8dc01e271a367ac3c8d26661c1c5f | [] | no_license | mamutahr/MealRunner | 96804943f090e0e6521468aaba7991b6c7414067 | 038dae2c744b7ec03a68456167fbac8db307bf73 | refs/heads/master | 2023-05-25T05:07:47.012726 | 2019-10-13T14:51:14 | 2019-10-13T14:51:14 | 214,579,514 | 0 | 0 | null | 2023-05-22T22:30:40 | 2019-10-12T04:49:57 | Python | UTF-8 | Python | false | false | 1,003 | py | """
MealRunner package initializer.
"""
import flask
# app is a single object used by all the code modules in this package
app = flask.Flask(__name__) # pylint: disable=invalid-name
# Read settings from config module (MealRunner/config.py)
app.config.from_object('MealRunner.config')
# Overlay settings read from file specified by environment variable. This is
# useful for using different on development and production machines.
# Reference: http://flask.pocoo.org/docs/config/
app.config.from_envvar('MEALRUNNER_SETTINGS', silent=True)
# Tell our app about views and model. This is dangerously close to a
# circular import, which is naughty, but Flask was designed that way.
# (Reference http://flask.pocoo.org/docs/patterns/packages/) We're
# going to tell pylint and pycodestyle to ignore this coding style violation.
import MealRunner.views # noqa: E402 pylint: disable=wrong-import-position
import MealRunner.model # noqa: E402 pylint: disable=wrong-import-position | [
"mamutahr@umich.edu"
] | mamutahr@umich.edu |
81a39a0d1720fe639ac2b59e7861b623c6118af5 | 2324dea2cb3003c8ab7e8fd80588d44973eb8c77 | /Euler_1_17a.py | 9a350a6d333f32263cf6731390cfab23de618e79 | [] | no_license | MikeOcc/MyProjectEulerFiles | 5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56 | 4d066d52380aade215636953589bf56d6b88f745 | refs/heads/master | 2021-01-16T18:45:44.133229 | 2015-05-27T18:28:43 | 2015-05-27T18:28:43 | 5,876,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | def p17():
def lowest_digit(n):
return n/10, n%10
def words(n):
if n > 999:
raise ValueError, "Number too big."
digits = [None, 'one', 'two', 'three', 'four', 'five', 'six', 'seven',
'eight', 'nine']
teens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen']
tens = [None, None, 'twenty', 'thirty', 'forty', 'fifty', 'sixty',
'seventy', 'eighty', 'ninety']
n, o = lowest_digit(n)
n, t = lowest_digit(n)
n, h = lowest_digit(n)
result = []
if t == 1:
result.append(teens[o])
else:
if o:
result.append(digits[o])
if t:
result.append(tens[t])
if h:
if t or o:
result.append('and')
result.append('hundred')
result.append(digits[h])
#return ''.join(reversed(result))
return ''.join(result)
c = 0
for i in range(1,1000):
c += len(words(i))
c+=len('onethousand')
print c
p17()
| [
"mike.occhipinti@mlsassistant.com"
] | mike.occhipinti@mlsassistant.com |
e9b5253151a81e420a53c743d91c567e23d8895f | 295d62b0d1abe97f8a3d1500c44666040870d4b0 | /out/gt/real-reinforcement-learning/v2/actor_critic.py | d7a3a664a84c0034c5b89e1caae3832c9a2c3f56 | [] | no_license | adandamudi/hlast | 3c5e493e15ce9c64a5644ca1d3138e7c87629cbb | 94d3e2b12434c137399546a71da5ad063d32d201 | refs/heads/main | 2023-02-02T13:56:00.955051 | 2020-12-18T06:45:17 | 2020-12-18T06:45:17 | 311,791,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,324 | py | import argparse
import gym
import numpy as np
from itertools import count
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
import torchvision.transforms as T
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=int, default=0.99, metavar='G', help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N', help='random seed (default: 1)')
parser.add_argument('--render', action='store_true', help='render the environment')
parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='interval between training status logs (default: 10)')
args = parser.parse_args()
env = gym.make('CartPole-v0')
env.seed(args.seed)
torch.manual_seed(args.seed)
SavedAction = namedtuple('SavedAction', ['action', 'value'])
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.rewards = []
def forward(self, x):
x = F.relu(self.affine1(x))
action_scores = self.action_head(x)
state_values = self.value_head(x)
return (F.softmax(action_scores), state_values)
model = Policy()
optimizer = optim.Adam(model.parameters(), lr=0.03)
def select_action(state):
state = torch.from_numpy(state).float().unsqueeze(0)
(probs, state_value) = model(Variable(state))
action = probs.multinomial()
model.saved_actions.append(SavedAction(action, state_value))
return action.data
def finish_episode():
R = 0
saved_actions = model.saved_actions
value_loss = 0
rewards = []
for r in model.rewards[::-1]:
R = r + args.gamma * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
rewards = (rewards - rewards.mean()) / rewards.std()
for ((action, value), r) in zip(saved_actions, rewards):
action.reinforce(r - value.data.squeeze())
value_loss += F.smooth_l1_loss(value, Variable(torch.Tensor([r])))
optimizer.zero_grad()
final_nodes = [value_loss] + list(map(lambda p: p.action, saved_actions))
gradients = [torch.ones(1)] + [None] * len(saved_actions)
autograd.backward(final_nodes, gradients)
print('LOG STMT: Model weights = %s' % model.parameters())
optimizer.step()
del model.rewards[:]
del model.saved_actions[:]
running_reward = 10
for i_episode in count(1):
state = env.reset()
for t in range(10000):
action = select_action(state)
(state, reward, done, _) = env.step(action[0, 0])
if args.render:
env.render()
model.rewards.append(reward)
if done:
break
running_reward = running_reward * 0.99 + t * 0.01
finish_episode()
if i_episode % args.log_interval == 0:
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(i_episode, t, running_reward))
if running_reward > 200:
print('Solved! Running reward is now {} and the last episode runs to {} time steps!'.format(running_reward, t))
break
| [
"gmatute@berkeley.edu"
] | gmatute@berkeley.edu |
988567162ac57924714584f1b7ec11851c83a23f | c86a7673c4258f78b9a8a20495b53f0a0ec965eb | /selenium_pyPOM/test_dou_pypom.py | 6901052233358dba2e82b0414ec0bbd059d80ede | [] | no_license | Brombult/Test_Automation | 5bb96a3dbedcf9201d0befb36c26a3d7a029e224 | 40f956f6e5cf48a8ba17e7837a316db70b9f981a | refs/heads/master | 2020-04-27T07:05:00.276716 | 2019-04-20T08:54:59 | 2019-04-20T08:54:59 | 174,126,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | from selenium import webdriver
import pytest
from page import DouMainPage, SearchResultPage
base_url = 'https://dou.ua/'
COMPANY_NAME = 'DOU'
INVALID_NAME = '123efdvdfbgfdbfg'
driver = webdriver.Chrome()
@pytest.fixture(scope='module', autouse=True)
def setup_and_teardown():
driver.implicitly_wait(5)
driver.get('https://dou.ua/')
yield
driver.quit()
def test_search():
"""Searches for company name, then asserts that name is present in search results """
main_page = DouMainPage(driver, base_url).open()
main_page.search_for(COMPANY_NAME)
result_page = SearchResultPage(driver)
result_page.assert_result_found(COMPANY_NAME)
def test_invalid_search():
"""Searches for company that don't exists, that asserts that nothing was found"""
main_page = DouMainPage(driver, base_url).open()
main_page.search_for(INVALID_NAME)
result_page = SearchResultPage(driver)
result_page.assert_no_result_found()
| [
"rickrolled5997@gmail.com"
] | rickrolled5997@gmail.com |
1ad194458a4f64f614b9ac861a9e7623c7eaa041 | 29345337bf86edc938f3b5652702d551bfc3f11a | /python/src/main/python/pyalink/alink/tests/examples/from_docs/test_alsusersperitemrecommbatchop.py | 21104be85c65e675c3b2d8099853b1de16f0fc5b | [
"Apache-2.0"
] | permissive | vacaly/Alink | 32b71ac4572ae3509d343e3d1ff31a4da2321b6d | edb543ee05260a1dd314b11384d918fa1622d9c1 | refs/heads/master | 2023-07-21T03:29:07.612507 | 2023-07-12T12:41:31 | 2023-07-12T12:41:31 | 283,079,072 | 0 | 0 | Apache-2.0 | 2020-07-28T02:46:14 | 2020-07-28T02:46:13 | null | UTF-8 | Python | false | false | 920 | py | import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestAlsUsersPerItemRecommBatchOp(unittest.TestCase):
def test_alsusersperitemrecommbatchop(self):
df_data = pd.DataFrame([
[1, 1, 0.6],
[2, 2, 0.8],
[2, 3, 0.6],
[4, 1, 0.6],
[4, 2, 0.3],
[4, 3, 0.4],
])
data = BatchOperator.fromDataframe(df_data, schemaStr='user bigint, item bigint, rating double')
als = AlsTrainBatchOp().setUserCol("user").setItemCol("item").setRateCol("rating") \
.setNumIter(10).setRank(10).setLambda(0.01)
model = als.linkFrom(data)
predictor = AlsUsersPerItemRecommBatchOp() \
.setItemCol("item").setRecommCol("rec").setK(1).setReservedCols(["item"])
predictor.linkFrom(model, data).print();
pass | [
"shaomeng.wang.w@gmail.com"
] | shaomeng.wang.w@gmail.com |
ab3b457e28589da1217469ef6bbcb0b189d0a23f | 69f05b01112c94491c0185b27738f3b4b5906ca2 | /libnd4j/include/graph/generated/nd4j/graph/UIOp.py | c6207f9624c6c34d361ebb6f6b298f65c089048b | [
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] | permissive | saudet/deeplearning4j | 2b55becb2e8b0881d33b2dacd28489d4f23651b4 | 21254a4d5a3584a480fce71fd678e021edc1498f | refs/heads/master | 2020-05-31T23:47:31.786374 | 2019-06-04T11:33:52 | 2019-06-04T11:33:52 | 190,545,263 | 2 | 0 | Apache-2.0 | 2019-06-06T08:32:50 | 2019-06-06T08:32:49 | null | UTF-8 | Python | false | false | 3,752 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: graph
import flatbuffers
class UIOp(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsUIOp(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = UIOp()
x.Init(buf, n + offset)
return x
# UIOp
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# UIOp
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# UIOp
def OpName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# UIOp
def Inputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# UIOp
def InputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# UIOp
def Outputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# UIOp
def OutputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# UIOp
def ControlDeps(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# UIOp
def ControlDepsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# UIOp
def UiLabelExtra(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def UIOpStart(builder): builder.StartObject(6)
def UIOpAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def UIOpAddOpName(builder, opName): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(opName), 0)
def UIOpAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
def UIOpStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def UIOpAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
def UIOpStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def UIOpAddControlDeps(builder, controlDeps): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(controlDeps), 0)
def UIOpStartControlDepsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def UIOpAddUiLabelExtra(builder, uiLabelExtra): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(uiLabelExtra), 0)
def UIOpEnd(builder): return builder.EndObject()
| [
"noreply@github.com"
] | noreply@github.com |
c82b626196c32cb53a26ce7409d33e52aeb8817f | d82efe8ea61a9d391e1444af55bb35c1b95ae7b0 | /mainapp/__init__.py | f0be2b2fac1640b58f67d8e2a5d515d8f769813c | [] | no_license | xulongyuan203/leargit | ecbdb46b54d95d6c569ce5e3edb234bff1125e89 | 40b70ee4d2512d1e5827a9558483bc8c6b4ea761 | refs/heads/main | 2023-09-04T07:31:06.858491 | 2021-10-17T03:57:35 | 2021-10-17T03:57:35 | 346,919,815 | 0 | 0 | null | 2021-10-17T03:56:18 | 2021-03-12T03:12:12 | Python | UTF-8 | Python | false | false | 100 | py | from flask import Flask
import settings
app = Flask(__name__)
app.config.from_object(settings.Dev) | [
"email@example.com"
] | email@example.com |
037a54ca983b23a17ffe063f910d6ead4fb49b1f | a5479f34e87b046d12fdc020bc3652f8b4498484 | /scrapy2019/spiders/ASTRO2.py | 2de6840a0de131ca820124e40b3c76f8764fe578 | [] | no_license | pavankumar-k/Scrapy2019 | 4a8226c79455234c73d9ac33c72b57e5bdfc8d18 | 7e82f8ec0ac467712eaae137c1d42871959a2ef8 | refs/heads/master | 2020-05-04T20:32:12.728899 | 2019-04-04T07:10:47 | 2019-04-04T07:10:47 | 179,441,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | # -*- coding: utf-8 -*-
import scrapy
import logging
class AstroSpider(scrapy.Spider):
name = 'ASTRO2'
url = 'https://www.redjournal.org'
#
start_urls = ['https://www.redjournal.org/issue/S0360-3016(16)X0010-7']
def parse(self, response):
lis = response.css('h2.title > a::attr(href)').extract()
logging.info('urlsLENGTH:'+str(len(lis)))
for l in lis:
yield response.follow(self.url+l,callback = self.product)
t = response.css('li.next > a::attr(href)').extract_first()
if t is not None:
yield response.follow(self.url+t,callback = self.parse)
def product(self,response):
title = response.css("h1.articleTitle::text").extract_first()
doi= ''.join(response.css("div.doi ::text").extract())
text = ''.join(response.css("div.body > div.content ::text").extract())
disc = ''.join(response.css("div.footnotes ::text").extract())
alis = response.css('div.author')
affli = ''.join(response.css('div.affiliation ::text').extract())
for a in alis:
auth = a.css('a.openAuthorLayer.layerTrigger ::text').extract_first()
aff = ';'.join(a.css('ul.affiliations ::text').extract())
if len(aff)==0:
aff = affli
if aff is None:
aff = ''
yield{
'url':response.url,
'author':auth,
'affli':aff,
'title':title,
'doi':doi,
'text':text,
'disc':disc}
| [
"noreply@github.com"
] | noreply@github.com |
945e1f553c53f149a5257df89cfd0aae397e9d11 | 9430f005c3de2a62962a1351f9d6d4a57264e2d4 | /composeTiendita/dockerTiendita/tiendita/tiendita/wsgi.py | 19a2123f71a6c0f34a69e8dc0f92b45e797ecb20 | [] | no_license | xl666/tiendita | 6555f5e480a73fa16e28ef5d38ee033a76b84af9 | b067fb45a81c56fe84a3ca7ca4176676ac7e098e | refs/heads/master | 2020-04-06T20:37:26.079522 | 2018-11-22T14:57:34 | 2018-11-22T14:57:34 | 157,777,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | """
WSGI config for tiendita project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tiendita.settings")
application = get_wsgi_application()
| [
"xavier120@hotmail.com"
] | xavier120@hotmail.com |
4331fac607aeb690fffb8d9b6f614658cc0f4fe0 | e678642002db4882cb4a6641aff40c1b7f1f4348 | /chapter3/recommender3.py | 37bfb16c88474af1551a41b35685f5439f20976d | [] | no_license | eks5115/DatamingGuideBook-Codes | 108260cc09656c59b046b6bb6b54ddbd0040e6da | 0048ea13e8e60afe61c791fd2f25d4ee87167c16 | refs/heads/master | 2020-03-24T23:14:39.112447 | 2018-08-01T08:21:19 | 2018-08-01T08:21:19 | 143,125,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,744 | py | import codecs
from math import sqrt
users2 = {"Amy": {"Taylor Swift": 4, "PSY": 3, "Whitney Houston": 4},
"Ben": {"Taylor Swift": 5, "PSY": 2},
"Clara": {"PSY": 3.5, "Whitney Houston": 4},
"Daisy": {"Taylor Swift": 5, "Whitney Houston": 3}}
users = {"Angelica": {"Blues Traveler": 3.5, "Broken Bells": 2.0,
"Norah Jones": 4.5, "Phoenix": 5.0,
"Slightly Stoopid": 1.5, "The Strokes": 2.5,
"Vampire Weekend": 2.0},
"Bill": {"Blues Traveler": 2.0, "Broken Bells": 3.5,
"Deadmau5": 4.0, "Phoenix": 2.0,
"Slightly Stoopid": 3.5, "Vampire Weekend": 3.0},
"Chan": {"Blues Traveler": 5.0, "Broken Bells": 1.0,
"Deadmau5": 1.0, "Norah Jones": 3.0,
"Phoenix": 5, "Slightly Stoopid": 1.0},
"Dan": {"Blues Traveler": 3.0, "Broken Bells": 4.0,
"Deadmau5": 4.5, "Phoenix": 3.0,
"Slightly Stoopid": 4.5, "The Strokes": 4.0,
"Vampire Weekend": 2.0},
"Hailey": {"Broken Bells": 4.0, "Deadmau5": 1.0,
"Norah Jones": 4.0, "The Strokes": 4.0,
"Vampire Weekend": 1.0},
"Jordyn": {"Broken Bells": 4.5, "Deadmau5": 4.0,
"Norah Jones": 5.0, "Phoenix": 5.0,
"Slightly Stoopid": 4.5, "The Strokes": 4.0,
"Vampire Weekend": 4.0},
"Sam": {"Blues Traveler": 5.0, "Broken Bells": 2.0,
"Norah Jones": 3.0, "Phoenix": 5.0,
"Slightly Stoopid": 4.0, "The Strokes": 5.0},
"Veronica": {"Blues Traveler": 3.0, "Norah Jones": 5.0,
"Phoenix": 4.0, "Slightly Stoopid": 2.5,
"The Strokes": 3.0}
}
class recommender:
def __init__(self, data, k=1, metric='pearson', n=5):
""" initialize recommender
currently, if data is dictionary the recommender is initialized
to it.
For all other data types of data, no initialization occurs
k is the k value for k nearest neighbor
metric is which distance formula to use
n is the maximum number of recommendations to make"""
self.k = k
self.n = n
self.username2id = {}
self.userid2name = {}
self.productid2name = {}
#
# The following two variables are used for Slope One
#
self.frequencies = {}
self.deviations = {}
# for some reason I want to save the name of the metric
self.metric = metric
if self.metric == 'pearson':
self.fn = self.pearson
#
# if data is dictionary set recommender data to it
#
if type(data).__name__ == 'dict':
self.data = data
def convertProductID2name(self, id):
"""Given product id number return product name"""
if id in self.productid2name:
return self.productid2name[id]
else:
return id
def userRatings(self, id, n):
"""Return n top ratings for user with id"""
print("Ratings for " + self.userid2name[id])
ratings = self.data[id]
print(len(ratings))
ratings = list(ratings.items())[:n]
ratings = [(self.convertProductID2name(k), v)
for (k, v) in ratings]
# finally sort and return
ratings.sort(key=lambda artistTuple: artistTuple[1],
reverse=True)
for rating in ratings:
print("%s\t%i" % (rating[0], rating[1]))
def showUserTopItems(self, user, n):
""" show top n items for user"""
items = list(self.data[user].items())
items.sort(key=lambda itemTuple: itemTuple[1], reverse=True)
for i in range(n):
print("%s\t%i" % (self.convertProductID2name(items[i][0]),
items[i][1]))
def loadMovieLens(self, path=''):
self.data = {}
#
# first load movie ratings
#
i = 0
#
# First load book ratings into self.data
#
# f = codecs.open(path + "u.data", 'r', 'utf8')
f = codecs.open(path + "u.data", 'r', 'ascii')
# f = open(path + "u.data")
for line in f:
i += 1
# separate line into fields
fields = line.split('\t')
user = fields[0]
movie = fields[1]
rating = int(fields[2].strip().strip('"'))
if user in self.data:
currentRatings = self.data[user]
else:
currentRatings = {}
currentRatings[movie] = rating
self.data[user] = currentRatings
f.close()
#
# Now load movie into self.productid2name
# the file u.item contains movie id, title, release date among
# other fields
#
# f = codecs.open(path + "u.item", 'r', 'utf8')
f = codecs.open(path + "u.item", 'r', 'iso8859-1', 'ignore')
# f = open(path + "u.item")
for line in f:
i += 1
# separate line into fields
fields = line.split('|')
mid = fields[0].strip()
title = fields[1].strip()
self.productid2name[mid] = title
f.close()
#
# Now load user info into both self.userid2name
# and self.username2id
#
# f = codecs.open(path + "u.user", 'r', 'utf8')
f = open(path + "u.user")
for line in f:
i += 1
fields = line.split('|')
userid = fields[0].strip('"')
self.userid2name[userid] = line
self.username2id[line] = userid
f.close()
print(i)
def loadBookDB(self, path=''):
"""loads the BX book dataset. Path is where the BX files are
located"""
self.data = {}
i = 0
#
# First load book ratings into self.data
#
f = codecs.open(path + "u.data", 'r', 'utf8')
for line in f:
i += 1
# separate line into fields
fields = line.split(';')
user = fields[0].strip('"')
book = fields[1].strip('"')
rating = int(fields[2].strip().strip('"'))
if rating > 5:
print("EXCEEDING ", rating)
if user in self.data:
currentRatings = self.data[user]
else:
currentRatings = {}
currentRatings[book] = rating
self.data[user] = currentRatings
f.close()
#
# Now load books into self.productid2name
# Books contains isbn, title, and author among other fields
#
f = codecs.open(path + "BX-Books.csv", 'r', 'utf8')
for line in f:
i += 1
# separate line into fields
fields = line.split(';')
isbn = fields[0].strip('"')
title = fields[1].strip('"')
author = fields[2].strip().strip('"')
title = title + ' by ' + author
self.productid2name[isbn] = title
f.close()
#
# Now load user info into both self.userid2name and
# self.username2id
#
f = codecs.open(path + "BX-Users.csv", 'r', 'utf8')
for line in f:
i += 1
# separate line into fields
fields = line.split(';')
userid = fields[0].strip('"')
location = fields[1].strip('"')
if len(fields) > 3:
age = fields[2].strip().strip('"')
else:
age = 'NULL'
if age != 'NULL':
value = location + ' (age: ' + age + ')'
else:
value = location
self.userid2name[userid] = value
self.username2id[location] = userid
f.close()
print(i)
def computeDeviations(self):
# for each person in the data:
# get their ratings
for ratings in self.data.values():
# for each item & rating in that set of ratings:
for (item, rating) in ratings.items():
self.frequencies.setdefault(item, {})
self.deviations.setdefault(item, {})
# for each item2 & rating2 in that set of ratings:
for (item2, rating2) in ratings.items():
if item != item2:
# add the difference between the ratings to our
# computation
self.frequencies[item].setdefault(item2, 0)
self.deviations[item].setdefault(item2, 0.0)
self.frequencies[item][item2] += 1
self.deviations[item][item2] += rating - rating2
for (item, ratings) in self.deviations.items():
for item2 in ratings:
ratings[item2] /= self.frequencies[item][item2]
def slopeOneRecommendations(self, userRatings):
recommendations = {}
frequencies = {}
# for every item and rating in the user's recommendations
for (userItem, userRating) in userRatings.items():
# for every item in our dataset that the user didn't rate
for (diffItem, diffRatings) in self.deviations.items():
if diffItem not in userRatings and \
userItem in self.deviations[diffItem]:
freq = self.frequencies[diffItem][userItem]
recommendations.setdefault(diffItem, 0.0)
frequencies.setdefault(diffItem, 0)
# add to the running sum representing the numerator
# of the formula
recommendations[diffItem] += (diffRatings[userItem] +
userRating) * freq
# keep a running sum of the frequency of diffitem
frequencies[diffItem] += freq
recommendations = [(self.convertProductID2name(k),
v / frequencies[k])
for (k, v) in recommendations.items()]
# finally sort and return
recommendations.sort(key=lambda artistTuple: artistTuple[1],
reverse=True)
# I am only going to return the first 50 recommendations
return recommendations[:50]
def pearson(self, rating1, rating2):
sum_xy = 0
sum_x = 0
sum_y = 0
sum_x2 = 0
sum_y2 = 0
n = 0
for key in rating1:
if key in rating2:
n += 1
x = rating1[key]
y = rating2[key]
sum_xy += x * y
sum_x += x
sum_y += y
sum_x2 += pow(x, 2)
sum_y2 += pow(y, 2)
if n == 0:
return 0
# now compute denominator
denominator = sqrt(sum_x2 - pow(sum_x, 2) / n) * \
sqrt(sum_y2 - pow(sum_y, 2) / n)
if denominator == 0:
return 0
else:
return (sum_xy - (sum_x * sum_y) / n) / denominator
def computeNearestNeighbor(self, username):
"""creates a sorted list of users based on their distance
to username"""
distances = []
for instance in self.data:
if instance != username:
distance = self.fn(self.data[username],
self.data[instance])
distances.append((instance, distance))
# sort based on distance -- closest first
distances.sort(key=lambda artistTuple: artistTuple[1],
reverse=True)
return distances
def recommend(self, user):
"""Give list of recommendations"""
recommendations = {}
# first get list of users ordered by nearness
nearest = self.computeNearestNeighbor(user)
#
# now get the ratings for the user
#
userRatings = self.data[user]
#
# determine the total distance
totalDistance = 0.0
for i in range(self.k):
totalDistance += nearest[i][1]
# now iterate through the k nearest neighbors
# accumulating their ratings
for i in range(self.k):
# compute slice of pie
weight = nearest[i][1] / totalDistance
# get the name of the person
name = nearest[i][0]
# get the ratings for this person
neighborRatings = self.data[name]
# get the name of the person
# now find bands neighbor rated that user didn't
for artist in neighborRatings:
if not artist in userRatings:
if artist not in recommendations:
recommendations[artist] = neighborRatings[artist] * \
weight
else:
recommendations[artist] = recommendations[artist] + \
neighborRatings[artist] * \
weight
# now make list from dictionary and only get the first n items
recommendations = list(recommendations.items())[:self.n]
recommendations = [(self.convertProductID2name(k), v)
for (k, v) in recommendations]
# finally sort and return
recommendations.sort(key=lambda artistTuple: artistTuple[1],
reverse=True)
return recommendations
| [
"eks5115@139.com"
] | eks5115@139.com |
61937d51bfde6945241775ba8c469abe3cd44364 | 19d9fe02d86e70d9106eb6e2f1f4ec4dc1999fc5 | /src/estimations/routes/estimations.py | 1e49cbca991954bc8975efaf41a3b2a8406cfb08 | [] | no_license | arnulfojr/scrum-estimations-api | 38f30dfcc31d8f0708e127b5187b8ff8b13862d1 | 5b9f645d99035982fd0affda42ec99485f0df46a | refs/heads/master | 2023-05-10T23:03:23.815010 | 2021-05-26T13:13:57 | 2021-05-26T13:13:57 | 174,892,482 | 0 | 0 | null | 2023-05-01T21:14:21 | 2019-03-10T23:12:51 | Python | UTF-8 | Python | false | false | 9,021 | py | from http import HTTPStatus
from typing import Tuple, Union
from cerberus import Validator
from flask import jsonify, make_response, request
from estimations import schemas
from users.models import User
from ..app import estimations_app
from ..exc import EmptyIdentifier, InvalidRequest, ValueNotFound
from ..models import (
Estimation,
Sequence,
Session,
Task,
Value,
)
@estimations_app.route('/sessions/<session_id>/tasks/<task_id>/estimations', methods=['GET'])
def get_estimations(session_id: str, task_id: str):
"""Get the tasks' estimations.
---
tags:
- Tasks
- Estimations
parameters:
- in: path
name: session_id
type: string
format: uuid
required: True
- in: path
name: task_id
type: string
required: True
definitions:
Estimations:
type: array
items:
$ref: '#/definitions/Estimation'
Estimation:
type: object
properties:
value:
$ref: '#/definitions/Value'
task:
$ref: '#/definitions/TaskWithoutSession'
user:
$ref: '#/definitions/UserWithoutOrganization'
created_at:
type: string
format: datetime
responses:
200:
description: Task estimations
schema:
$ref: '#/definitions/Estimations'
404:
description: The session or task were not found
schema:
$ref: '#/definitions/NotFound'
"""
session, task = get_or_fail(session_id, task_id)
payload = [estimation.dump(with_task=False) for estimation in task.estimations]
return make_response(jsonify(payload), HTTPStatus.OK)
@estimations_app.route('/sessions/<session_id>/tasks/<task_id>/estimations/', methods=['PUT'])
def estimate(session_id: str, task_id: str):
"""Estimate a task.
---
tags:
- Estimations
- Tasks
parameters:
- in: path
name: session_id
type: string
format: uuid
required: True
- in: path
name: task_id
type: string
required: True
- in: body
name: body
required: True
schema:
type: object
properties:
value:
type: object
description: 'Only one of the attributes is required.
If all given the first priority is the id, then the value and the name at the end.'
properties:
id:
type: string
format: uuid
description: Provide only one of these
name:
type: string
example: Coffee Break
description: Provide only one of these
value:
type: number
format: float
example: 2.0
description: Provide only one of these
user:
type: object
properties:
id:
type: string
format: uuid
responses:
200:
description: The estimation was updated
schema:
$ref: '#/definitions/Estimation'
201:
description: The estimation was created
schema:
$ref: '#/definitions/Estimation'
400:
description: Bad request input
schema:
$ref: '#/definitions/ValidationErrors'
404:
description: The session or task were not found
schema:
$ref: '#/definitions/NotFound'
"""
session, task = get_or_fail(session_id, task_id)
payload = request.get_json()
validator = Validator()
if not validator.validate(payload, schemas.CREATE_ESTIMATION):
return make_response(jsonify(validator.errors), HTTPStatus.BAD_REQUEST)
# FIXME: move the user to the authentication layer
user_id = payload['user']['id']
user = User.lookup(user_id)
if not user.belongs_to_organization(session.organization):
return make_response(jsonify({
'message': f'This user({user_id}) seems to not be part of the organization\'s session',
}), HTTPStatus.UNAUTHORIZED)
value_payload = payload['value']
sequence: Sequence = session.sequence
if 'id' in value_payload:
value = Value.lookup(value_payload['id'])
elif 'value' in value_payload:
value = sequence.get_value_for_numeric_value(value_payload['value'])
elif 'name' in value_payload:
value = sequence.get_value_for_value_name(value_payload['name'])
else:
value = None
if not value:
raise ValueNotFound('The Value given did not contain a value from the sequence')
# did the user already estimated?
estimation = Estimation.lookup(task, user)
if not estimation:
estimation = Estimation(value=value, user=user, task=task)
estimation.save(force_insert=True)
http_status_code = HTTPStatus.CREATED
else:
estimation.value = value
estimation.save()
http_status_code = HTTPStatus.OK
return make_response(
jsonify(estimation.dump()),
http_status_code,
)
@estimations_app.route('/sessions/<session_id>/tasks/<task_id>/summary', methods=['GET'])
def get_task_summary(session_id: str, task_id: str):
"""Get the summary of the task.
---
tags:
- Tasks
- Estimations
parameters:
- in: path
name: session_id
type: string
format: uuid
required: True
- in: path
name: task_id
type: string
required: True
definitions:
RuntimeSummary:
type: object
properties:
mean:
type: number
format: float
example: 2.5
description: The task's mean
everybody_estimated:
type: boolean
description: true if all the session members had estimated
consensus_met:
type: boolean
description: If everybody voted for the same value
closest_value:
$ref: '#/definitions/Value'
task:
$ref: '#/definitions/TaskWithoutSession'
has_non_numeric_estimations:
type: boolean
description: If somebody voted for a value that does not have a numeric representation
non_numeric_estimations:
type: array
items:
$ref: '#/definitions/Estimation'
responses:
200:
description: Get the task's summary
schema:
$ref: '#/definitions/RuntimeSummary'
404:
description: Task or session were not found
schema:
$ref: '#/definitions/NotFound'
"""
session, task = get_or_fail(session_id, task_id)
mean_estimation = task.mean_estimation
everybody_estimated = task.is_estimated_by_all_members
consensus_met = task.consensus_met and everybody_estimated
closest_value = session.sequence.closest_possible_value(mean_estimation)
non_numeric_estimations = [estimation.dump(with_task=False)
for estimation in task.non_numeric_estimations]
return make_response(jsonify({
'mean': float(mean_estimation),
'everybody_estimated': everybody_estimated,
'consensus_met': consensus_met,
'closest_value': closest_value.dump() if closest_value else 0,
'task': task.dump(with_session=False, with_estimations=True),
'has_non_numeric_estimations': task.has_non_numeric_estimations(),
'non_numeric_estimations': non_numeric_estimations,
}), HTTPStatus.OK)
def get_or_fail(session_id: Union[str, None], task_id: Union[str, None]) -> Tuple[Session, Task]:
"""Gets the session and task based on their identifiers."""
if not session_id:
raise EmptyIdentifier('Please provide a session identifier')
if not task_id:
raise EmptyIdentifier('Please provide a valid task identifier')
session = Session.lookup(session_id)
try:
task = Task.lookup(task_id, session=session)
except (TypeError, ValueError) as e:
raise InvalidRequest('We could not infer the Task from the given input...') from e
else:
return session, task
| [
"arnulfojr94@gmail.com"
] | arnulfojr94@gmail.com |
77a600b8a161271244c70a072a2ad68e0c19c0f9 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/个人项目/weather/venv/Scripts/pip3-script.py | a6ac6cc88412f3e6968662a23c89959c23f69bbe | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 446 | py | #!E:\学习文件\python学习资料\开班笔记\个人项目\weather\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"yabing_ji@163.com"
] | yabing_ji@163.com |
c7ebb944709b1acfb824b0d6ecea1065b462c1a3 | 79588ca7cc97b19940101b69e82e562921f9c254 | /Lab/bibtex.py | 5a3af0cccbebb0729d0b05df3dee0b43a53694fd | [] | no_license | Yifan-Liu-95/SoftwareTestingPandas | 1c40761da518ec5a639def28342b3c0af57b2adc | 3030fb6ffe8e26d5491fd0156e0c79c50be6cfde | refs/heads/main | 2023-05-01T11:16:49.142108 | 2020-12-16T09:57:06 | 2020-12-16T09:57:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | def extract_author(str):
commaSplit = str.split(',')
if len(commaSplit) == 1:
spaceSplit = str.split(' ')
if len(spaceSplit) == 1:
result = (str,'')
else:
result = (spaceSplit[len(spaceSplit)-1],' '.join(spaceSplit[0:(len(spaceSplit)-1)]))
else:
result = (commaSplit[0], commaSplit[1].strip())
return result
def extract_authors(str):
andSplit = str.split('and')
result = len(andSplit)*[None]
for i in range(len(andSplit)):
result[i] = extract_author(andSplit[i].strip())
return result | [
"36402231+dannemarre@users.noreply.github.com"
] | 36402231+dannemarre@users.noreply.github.com |
4ad7c781ebb466cdf938c49be828e1efbcf43234 | e8da76ed3458cc3589c6e02a7088550c799651ac | /8lesson/area_circle.py | 7bec204476891e733b27455501d381c5c4ff08ab | [] | no_license | leilahassan/introduction-python-leila | 9a8ff8d2d3818c5a3beec15f3a6da9492480faa5 | fa86b12e7ff6d2804854c1a17af20dc7a28e4df7 | refs/heads/master | 2021-01-01T18:13:19.670649 | 2017-07-25T07:36:45 | 2017-07-25T07:36:45 | 98,277,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import math
def area_circle(radius):
radius = int(radius)
area = math.pi * radius * radius
return area
user_radius = input("What is the radius of the circle?:")
calculated_area = area_circle(user_radius)
print("The area of a circle with radius {} is {}".format(user_radius,calculated_area)) | [
"hassanleila333@gmail.com"
] | hassanleila333@gmail.com |
410d7498c362b982e00c1371ea8e80ffedc787f5 | 2ecfe0e10d10513917e4f2770e0a56075404c5d8 | /oldnumba/tests/test_exceptions.py | 80cbe4e56325c6d8248dd39bfb2723c2511aeeb1 | [
"BSD-2-Clause"
] | permissive | laserson/numba | 84ab7615ea0177b496a63e2a86319f0b12992cd2 | 35546517b27764a9120f6dfcd82eba7f4dd858cb | refs/heads/master | 2020-05-20T23:13:23.011971 | 2014-12-08T20:16:20 | 2014-12-08T20:16:20 | 16,754,385 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | """
>>> boom()
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'boom'
>>> boom2()
Traceback (most recent call last):
...
TypeError: 'object' object is not callable
>>> boom3()
Traceback (most recent call last):
...
TypeError: 'object' object is not callable
"""
import sys
import ctypes
from numba import *
import numpy as np
@autojit(backend='ast')
def boom():
return int('boom')
@jit(int_())
def boom2():
return object()('boom')
@jit(complex128())
def boom3():
return object()('boom')
if __name__ == "__main__":
import numba
numba.testing.testmod()
| [
"markflorisson88@gmail.com"
] | markflorisson88@gmail.com |
8e75ad0ab217c0a6003d5478916d0b1f69fab144 | f455a3bbcf5fbdd7dd684e49b73cc99733da1559 | /datacenter/active_passcards_view.py | aeadd06bb7c30ef5ad574eae10f5089b790c6fb1 | [
"MIT"
] | permissive | KirillYabl/django-orm-watching-storage | c08541d3b7d68af988ce0ab94b9f61b90fe01da4 | f263e37178d027de173794070a4ae507a80fa258 | refs/heads/master | 2022-09-05T00:16:29.870889 | 2020-05-31T08:49:49 | 2020-05-31T08:49:49 | 268,242,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | from datacenter.models import Passcard
from datacenter.models import Visit
from django.shortcuts import render
def active_passcards_view(request):
all_passcards = Passcard.objects.all()
active_passcards = Passcard.objects.filter(is_active=True).all()
context = {
"active_passcards": active_passcards, # люди с активными пропусками
}
return render(request, 'active_passcards.html', context)
| [
"kirill-yablunovskii@mail.ru"
] | kirill-yablunovskii@mail.ru |
14337940b0714b1dadc8e28c06a38350bf389f9d | d0cb58e1658d4b5b88bdc07e497dc8092707ae02 | /2020/09September/26PandasDataSeries02.py | fb8a0a14ecc6a675d6bea128b69cab3fbaa6b025 | [] | no_license | June-fu/python365 | 27f9b753d38ade549d59aa8f2d8bda0fb8b1e20c | 242033a4b644a7566fbfa4dba9b60f60aa31fe91 | refs/heads/master | 2021-07-02T21:42:28.454091 | 2021-05-04T15:08:44 | 2021-05-04T15:08:44 | 233,629,713 | 0 | 0 | null | 2020-01-13T15:52:58 | 2020-01-13T15:36:53 | null | UTF-8 | Python | false | false | 420 | py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''
# @ Author: june-fu
# @ Create Time: 2020-11-20 23:22:57
# @ Modified by: june-fu
# @ Modified time: 2020-11-20 23:23:25
# @ Description:
# 2.Write a Pandas program to convert a Panda module Series to Python list and it's type
'''
import pandas as pd
import numpy as np
s = pd.Series(np.arange(10))
print(s)
list1= s.to_list()
print(list1)
print(type(list1)) | [
"june@yeah.net"
] | june@yeah.net |
f6839df226465c2552f3a1069ffe64e78af771a0 | 57a9c836d8a933b5e6680dad4711676d0693a5e5 | /src/locationAPI.py | 38326db19bf750e2d312f6b0efb05c02a8afb94e | [] | no_license | mouseProgrammouse/neighborhood_map | c382588f661a0de31adc0efc3a4dcbab5cab3d9c | 1a8d0a0ca114955f1f10682071b5fabf4920de6a | refs/heads/master | 2020-03-21T00:40:18.616868 | 2018-06-27T13:24:55 | 2018-06-27T13:24:55 | 137,904,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | import json
import os
from flask import Flask
from yelpapi import YelpAPI
from flask import request
from flask.json import jsonify
from flask_cors import CORS
yelp_api = YelpAPI("7LgYxOlDAZm3sQI3jcyxuDnX2KCI1apQAMZkgB1qDlIpjYQgCr-yZ2q1Abeu7C5dE8kxCrPtjbTY_p29v2b2fosjP8evmheO4hDuEoHEkOheEqBBXX5t-Bp8ogYtW3Yx")
app = Flask(__name__)
CORS(app)
def read_json(filename):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), filename), 'r') as f:
return json.loads(f.read())
@app.route("/search")
def search_yelp():
term = request.args.get('term')
lat = request.args.get('lat')
lng = request.args.get('lng')
loc = request.args.get('loc')
radius = request.args.get('radius')
return jsonify(yelp_api.search_query(term=term, latitude=lat, longitude=lng, location=loc, radius=radius))
@app.route("/get_locations")
def get_locations():
return jsonify(read_json("backendLocations.json"))
@app.route("/get_details")
def get_details():
business_id = request.args.get('id')
return jsonify(yelp_api.business_query(id=business_id))
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5959)
| [
"mouse.programmouse@gmail.com"
] | mouse.programmouse@gmail.com |
16e1c2a3227a5d0baee604734564e9d99490428f | bc441bb06b8948288f110af63feda4e798f30225 | /monitor_sdk/model/notify/operation_log_with_meta_pb2.pyi | 4eb4b81a00e7c4fbc85a45c47c2b95b1eeda8653 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from monitor_sdk.model.notify.operation_log_pb2 import (
OperationLog as monitor_sdk___model___notify___operation_log_pb2___OperationLog,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class OperationLogWithMeta(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
system = ... # type: typing___Text
topic = ... # type: typing___Text
@property
def data(self) -> monitor_sdk___model___notify___operation_log_pb2___OperationLog: ...
def __init__(self,
*,
system : typing___Optional[typing___Text] = None,
topic : typing___Optional[typing___Text] = None,
data : typing___Optional[monitor_sdk___model___notify___operation_log_pb2___OperationLog] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> OperationLogWithMeta: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> OperationLogWithMeta: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"data",b"data",u"system",b"system",u"topic",b"topic"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
b54da0a37016d92c2647e2492dc40440cbd11337 | bdfe5001eaef25b767615b7fe5cb73c32d08427f | /api/allergen/admin.py | 715b2fcae16c8935e4d8f52aa18e71fe04e48f02 | [] | no_license | dducluzeaud/Allergen | 462afa5d47850d431112da9d9a2cd491711c0672 | 1073461e8100134d3fd7ad0d296c3f6b71823085 | refs/heads/develop | 2022-12-25T14:15:57.880666 | 2019-07-18T19:27:06 | 2019-07-18T19:27:06 | 139,955,843 | 0 | 0 | null | 2022-12-10T19:17:23 | 2018-07-06T08:26:07 | Python | UTF-8 | Python | false | false | 3,874 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from .models import (
Additive,
Allergen,
Category,
Ingredient,
Nutriment,
NutrimentComposeProduct,
Product,
Trace,
Vitamin,
VitaminComposeProduct,
Profile,
)
class CategoryInline(admin.TabularInline):
model = Product.categories.through
extra = 0
verbose_name_plural = "Categories"
class AdditiveInline(admin.TabularInline):
model = Product.additives.through
extra = 0
verbose_name_plural = "Additifs"
class VitaminInline(admin.TabularInline):
model = VitaminComposeProduct
extra = 0
verbose_name_plural = "Vitamines"
class NutrimentInline(admin.TabularInline):
model = NutrimentComposeProduct
extra = 0
verbose_name_plural = "Nutriments"
class IngredientInline(admin.TabularInline):
model = Product.ingredients.through
extra = 0
verbose_name_plural = "Ingrédients"
class AllergenInline(admin.TabularInline):
model = Product.allergens.through
extra = 0
verbose_name_plural = "Allergènes"
class TraceInline(admin.TabularInline):
model = Product.traces.through
extra = 0
verbose_name_plural = "Traces"
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name_plural = 'Profile'
fk_name = 'user'
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ("product_name", "barcode", "nutrition_grade")
fields = ("product_name", "image_url", "url_off", "barcode", "nutrition_grade")
inlines = [
CategoryInline,
AdditiveInline,
VitaminInline,
NutrimentInline,
IngredientInline,
AllergenInline,
TraceInline,
]
search_fields = ("product_name", "barcode")
list_filter = ["nutrition_grade"]
def get_readonly_fields(self, request, obj=None):
if obj: # editing existing object
return self.readonly_fields + (
"product_name",
"image_url",
"url_off",
"barcode",
"nutrition_grade",
)
return self.readonly_fields
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
ordering = ("category_name",)
@admin.register(Additive)
class AdditiveAdmin(admin.ModelAdmin):
fields = ("additive_name", "description", "risk", "max_permissible_dose")
ordering = ("additive_name",)
# if an additive already exist we don't want to modify it
def get_readonly_fields(self, request, obj=None):
if obj: # editing existing object
return self.readonly_fields + ("additive_name",)
return self.readonly_fields
@admin.register(Vitamin)
class VitaminAdmin(admin.ModelAdmin):
fields = ("vitamin_name", "description", ("daily_quantity_m", "daily_quantity_f"))
ordering = ("vitamin_name",)
@admin.register(Nutriment)
class NutrimentAdmin(admin.ModelAdmin):
fields = ('nutriment_name', 'description', 'image', ('daily_quantity_m', 'daily_quantity_f'))
ordering = ('nutriment_name',)
@admin.register(Ingredient)
class IngredientAdmin(admin.ModelAdmin):
fields = ("ingredient_name",)
ordering = ("ingredient_name",)
@admin.register(Allergen)
class AllergenAdmin(admin.ModelAdmin):
fields = ("allergen_name",)
ordering = ("allergen_name",)
@admin.register(Trace)
class TraceAdmin(admin.ModelAdmin):
fields = ("name",)
ordering = ("name",)
class CustomUserAdmin(UserAdmin):
inlines = (ProfileInline,)
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super(CustomUserAdmin, self).get_inline_instances(request, obj)
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
| [
"david.ducluzeaud@live.fr"
] | david.ducluzeaud@live.fr |
6013abaf1ceca0fccaeea59d0bb1e9949aee08e7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_337/ch8_2020_03_02_19_15_37_237019.py | 7d27944df20781e9fb1001e6829337fdf582639c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | def calcula_posicao(tempo):
posicao = posição inicial + velocidade*(instante t)
return posicao
| [
"you@example.com"
] | you@example.com |
2edd6447fadbc8f2d5d7f0f1999ee008bc0bec11 | 278e00c80a4ef641a8899a4977ce1e9378d8ffa1 | /tensorflow/mnist/tensorflow-mnist.py | c6c44d3c361df41913aba1342a91333112d9551e | [] | no_license | jjjhai/Python | 6e570c349822bf770376c3369f86e2dfc8ccfe98 | fee2b0b304071d50253b647aa6b4706bd6485fb5 | refs/heads/master | 2020-03-07T13:52:37.316600 | 2019-04-30T11:02:35 | 2019-04-30T11:02:35 | 127,512,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 25 20:44:01 2018
@author: Administrator
"""
import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
x = tf.placeholder("float", [None, 784])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
# 计算成本函数:交叉熵
# 计算损失
y_ = tf.placeholder("float", [None,10])
#累加( 实际值*log(预测值) )
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# 随机梯度下降训练
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
print(sess.run([train_step, cross_entropy], feed_dict={x: batch_xs, y_: batch_ys}))
# argmax找最大值,参数二为纬度,在第二纬度
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# cast类型转换
# reduce_mean求平均值
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
sess.close()
| [
"1520997976@qq.com"
] | 1520997976@qq.com |
88328caa01f9345062f6c71ba6ec86c52c2b4132 | 96c884767f020cffd93e5572b9dfef65e4c29742 | /Práctica 7/p7.12a.cuenta.palabras.py | d68f8fe11652a6ba5ab4a7b235781e64f743074e | [] | no_license | a-valado/python | bff790c0119b882b363277f792fa5dbc43a00de2 | dfd52058bd5a19db983acd612ed7d4ad45856f78 | refs/heads/master | 2020-08-28T02:51:48.745396 | 2019-11-06T17:50:45 | 2019-11-06T17:50:45 | 217,566,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | #Albert Valado Pujol
#Práctica 7 - Ejercico 12a
#Escribir un programa que lea una frase,
#y pase ésta como parámetro a una función que debe contar el número de palabras que contiene.
#Debe imprimir el programa principal el resultado.
#Asumir que cada palabra está separada por un solo blanco:
print("Este programa cuenta el número de palabras de una frase.")
frase=input("Introduzca la frase.\n")
def cuentaPalabras(a):
lista=a.split(" ")
numero=len(lista)
return numero
print("La frase tiene", cuentaPalabras(frase)," palabras.")
input()
| [
"avalado@cifpfbmoll.eu"
] | avalado@cifpfbmoll.eu |
aef62d376e11b623b10924545474a7345a0d133b | d24bace66da749d30651401e6afa202588751970 | /strings/zeroMatrix.py | 67787f263dd8f2219e15580eda9a541133de8b51 | [] | no_license | roncrisostomo/CrackingTheCodingInterview | 16aa227e641b661b8cc6bbea34a8f7dd86749804 | bfe36cf48cd047edbe0fe3371444d341c04555ec | refs/heads/master | 2020-03-27T17:24:13.376234 | 2018-09-03T11:37:32 | 2018-09-03T11:37:32 | 146,848,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | def zeroMatrix(m):
"""
m: a matrix
If element in m x n matrix is zero, sets entire row and column to zero
e.g. 1 2 3 1 2 0
4 5 0 -> 0 0 0
7 8 9 7 8 0
Modifies matrix m
Returns nothing
"""
# Approach: Iterate over all elements of matrix, add row and column indices
# of zero-value elements to lists, then iterate over each list, setting
# all values in the row/column index to zero--O(m*n)
rowsToZero = []
colsToZero = []
# Find zeroes in matrix
for rowNo in range(len(m)):
for colNo in range(len(m[rowNo])):
# If zero, add row and column indices to lists
if m[rowNo][colNo] == 0:
if rowNo not in rowsToZero:
rowsToZero.append(rowNo)
if colNo not in colsToZero:
colsToZero.append(colNo)
# Set elements in rows/columns with listed indices to zero
for rowNo in rowsToZero:
for colNo in range(len(m[rowNo])):
m[rowNo][colNo] = 0
for colNo in colsToZero:
for rowNo in range(len(m)):
# Catch cases when matrix is jagged, or rows have different lengths
if colNo < len(m[rowNo]):
m[rowNo][colNo] = 0
#m = [[1, 2, 3], [4, 5, 0], [7, 8, 9]]
# Ans: [[1, 2, 0], [0, 0, 0], [7, 8, 0]]
#m = [[0, 2, 3], [4, 5, 0], [7, 8, 9]]
# Ans: [[0, 0, 0], [0, 0, 0], [0, 8, 0]]
#m = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 0]]
# Ans: [[1, 2, 0, 4], [5, 6, 0, 8], [0, 0, 0]]
m = [[1, 0, 3, 4], [5, 0, 7, 8], [9, 10, 11, 12]]
# Ans: [[0, 0, 0, 0], [0, 0, 0, 0], [9, 0, 11, 12]]
zeroMatrix(m)
print(m)
| [
"rlcrisostomo06@gmail.com"
] | rlcrisostomo06@gmail.com |
4921da7ce88afd440d097cc404d7f4dd8e132cd1 | fdc75f73274ae55d90e8085100d95c7f3452c1ae | /test_igseq/test_colors.py | 03f1274ce616043875d9f9c328d5bb37debf185a | [] | no_license | ShawHahnLab/igseq | dc63b93eb11fecb8376fd5ffba31a7a6263e67d6 | 539868dab2041b7694c0d53e8e74cf1b5b033653 | refs/heads/dev | 2023-04-28T09:03:09.404590 | 2023-04-21T17:11:11 | 2023-04-21T17:11:11 | 415,071,017 | 1 | 0 | null | 2023-04-21T17:11:12 | 2021-10-08T17:20:30 | Python | UTF-8 | Python | false | false | 1,921 | py | from igseq import colors
from .util import TestBase
class TestColors(TestBase):
"""Tests for color-related helper functions."""
TRIOS = [
([255, 0, 0], "#ff0000"),
([0, 0, 0], "#000000"),
([0, 136, 0], "#008800"),
([255, 255, 255], "#ffffff"),
]
TEXTS = [
("#ff0000", [255, 0, 0]),
("#FF0000", [255, 0, 0]),
("FF0000", [255, 0, 0]),
("#008800", [0, 136, 0]),
("#f00", [255, 0, 0]),
("#F00", [255, 0, 0]),
("#080", [0, 136, 0]),
("f00", [255, 0, 0]),
]
SCALES = [
# Two colors averaged, no scaling
(([[255, 0, 0], [0, 0, 255]], 0), [127, 0, 127]),
# Two colors averaged, of 2 total, scales to black
(([[255, 0, 0], [0, 0, 255]], 2), [0, 0, 0]),
# one color of two, stays the same
(([[255, 0, 0]], 2), [255, 0, 0]),
# no colors = black by definition
(([], 0), [0, 0, 0]),
(([], 2), [0, 0, 0]),
# two colors of three, averaged + scaled
(([[255, 0, 0], [0, 0, 255]], 3), [91, 0, 91]),
]
def test_merge_colors(self):
"""Test blending colors together."""
for case in self.__class__.SCALES:
with self.subTest(case=case):
self.assertEqual(colors.merge_colors(case[0][0], case[0][1]), case[1])
def test_color_str_to_trio(self):
"""Test converting color text codes to integer trios."""
for case in self.__class__.TEXTS:
with self.subTest(case=case):
self.assertEqual(colors.color_str_to_trio(case[0]), case[1])
def test_color_trio_to_str(self):
"""Test converting integer trios to color text codes."""
for case in self.__class__.TRIOS:
with self.subTest(case=case):
self.assertEqual(colors.color_trio_to_str(case[0]), case[1])
| [
"ancon@upenn.edu"
] | ancon@upenn.edu |
597d15f9483743209611d978a2f889f859a6aa82 | 2a8a6327fb9a7ce8696aa15b197d5170661fb94f | /test/test_put_order_action_trigger_dates_request_type_order_actions.py | f00aa4180e526b79d4133bf03d89dcfe4c1837b8 | [] | no_license | moderndatainc/zuora-client | 8b88e05132ddf7e8c411a6d7dad8c0baabaa6dad | d50da49ce1b8465c76723496c2561a3b8ebdf07d | refs/heads/master | 2021-09-21T19:17:34.752404 | 2018-08-29T23:24:07 | 2018-08-29T23:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,976 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/G_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/3_Responses_and_errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation and Export ZOQL queries only. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2018-08-23
Contact: docs@zuora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import zuora_client
from zuora_client.models.put_order_action_trigger_dates_request_type_order_actions import PUTOrderActionTriggerDatesRequestTypeOrderActions # noqa: E501
from zuora_client.rest import ApiException
class TestPUTOrderActionTriggerDatesRequestTypeOrderActions(unittest.TestCase):
"""PUTOrderActionTriggerDatesRequestTypeOrderActions unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPUTOrderActionTriggerDatesRequestTypeOrderActions(self):
"""Test PUTOrderActionTriggerDatesRequestTypeOrderActions"""
# FIXME: construct object with mandatory attributes with example values
# model = zuora_client.models.put_order_action_trigger_dates_request_type_order_actions.PUTOrderActionTriggerDatesRequestTypeOrderActions() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"jairo.velasco@alertlogic.com"
] | jairo.velasco@alertlogic.com |
457d79392860d7287c6190a1cddadcbd53f1995a | b26406a338263ec6cb6d9391bb628cba0fa9e37b | /summary_ranges.py | 930ad9819e1bed87b1cafa1a49f2449508508f25 | [] | no_license | nhiggins13/leetcode | 096e27d9945439f6b26470a4712102f1c7f290d5 | f949a09c6a9251dc167fd807b412b86d5344977f | refs/heads/main | 2023-01-29T20:35:41.472210 | 2020-12-01T23:20:17 | 2020-12-01T23:20:17 | 302,689,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | class Solution:
def summaryRanges(self, nums: List[int]) -> List[str]:
if not nums:
return
start = nums[0]
curr_index = 0
results = []
while (curr_index < len(nums) - 1):
curr = nums[curr_index]
nxt = nums[curr_index + 1]
if nxt != curr + 1:
if curr == start:
results.append(str(start))
start = nxt
else:
results.append("%d->%d" % (start, curr))
start = nxt
curr_index += 1
if start == nums[-1]:
results.append(str(start))
else:
results.append("%d->%d" % (start, nums[-1]))
return results | [
"n.higgins1313@gmail.com"
] | n.higgins1313@gmail.com |
d315787bb6b8a33384f02df4fd9358fc7f3ae68e | f359c953ef823cc44f7d87a3736c3e4fb1817c0b | /EDBRCommon/python/simulation/RunIIDR74X50ns/TTbar/TTaw.py | 71536ff1fd213b3a0b0ae79234018df0b109d56f | [] | no_license | jruizvar/ExoDiBosonResonancesRun2 | aa613200725cf6cd825d7bcbde60d2e39ba84e39 | b407ab36504d0e04e6bddba4e57856f9f8c0ec66 | refs/heads/Analysis76X | 2021-01-18T20:00:57.358494 | 2016-05-30T21:30:19 | 2016-05-30T21:30:19 | 23,619,682 | 1 | 1 | null | 2016-04-22T18:38:45 | 2014-09-03T12:41:07 | Python | UTF-8 | Python | false | false | 1,426 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FAB076ED-590F-E511-B784-0CC47A4DEEBA.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FC007331-5E0F-E511-8D0C-0025904B1424.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FC9BEF1E-540F-E511-8740-002590E39F36.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FCD4075D-6A0F-E511-AA8B-00259073E410.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FEC4769D-6E0F-E511-8A65-0025907277E8.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FECA6F36-360F-E511-8BA1-0CC47A13D09C.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FED5EE4E-C910-E511-91E8-AC853D9DAC41.root' ] );
| [
"jruizvar@cern.ch"
] | jruizvar@cern.ch |
068db76603c5e98e0f06eb9aa9ea1e6671dd2e65 | d0fb46fb2868089663a4af80ea27509b57f55fce | /puffy/views.py | 1b8a20418d8d38cfa26fdf400597c4908599f185 | [] | no_license | javad-hub/original-puffy | cb90e44d43b5fb391ce7e8686265f2cefddd3852 | caac094d2a60d984d7a2747792a35d2c613148f5 | refs/heads/main | 2023-04-02T19:52:36.364479 | 2021-04-13T12:57:55 | 2021-04-13T12:57:55 | 357,553,866 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.shortcuts import render
from django.shortcuts import HttpResponse
def about(request):
# return HttpResponse('Hello this is puffy network')
return render(request , 'About.html')
def home(request):
# return HttpResponse('Home')
return render(request , 'Home.html')
| [
"fasanehsepasian@gmail.com"
] | fasanehsepasian@gmail.com |
8c739165a6e3e1ef886537f46b1820b695a7ead4 | 039bcc5f447bf636ff68fbfef9ba461c1aa6b7c9 | /lab_4/ex1.py | b1b3897bd49feeaf3b6c5330060b3159273ee1b0 | [
"MIT"
] | permissive | davve50/d0009e | 06439363feeeac95a52b00d9ccf5d40b0042808c | 3af55d7fc9dbdb085a27298961557a60f379b773 | refs/heads/main | 2023-02-22T00:00:31.703467 | 2021-01-22T09:13:21 | 2021-01-22T09:13:21 | 317,813,418 | 0 | 0 | null | 2021-01-22T09:13:22 | 2020-12-02T09:34:25 | Python | UTF-8 | Python | false | false | 2,352 | py | def main():
temp = []
book = []
com = []
while True:
ch = input("Phonebook> ")
com = ch.split()
if "add" in com:
add(com,book,temp)
elif "lookup" in com:
lookup(com,book)
elif "alias" in com:
alias(com,book)
elif "change" in com:
change(com,book)
elif "quit" in com:
print("Exiting program...")
break
elif "save" in com:
save(com,book)
elif "load" in com:
load(com,book)
else:
print("Enter a valid command!")
def add(com,book,temp):
for x in book:
for y in x:
if com[1] == y:
print("User already exists!")
break
elif com[2] == y:
print("Number already exists!")
break
else:
temp = [com[1],com[2]]
book.append(temp)
def lookup(com,book):
if(exist(book,com[1])):
for x in book:
for y in x:
if com[1] == y:
print(x[1])
break
else:
print("User not found!")
def alias(com,book):
if(exist(book,com[1])):
if(exist(book,com[2])):
print("User already exists!")
else:
for x in book:
for y in x:
if com[1] == y:
x.append(com[2])
break
else:
print("User not found!")
def change(com,book):
if(exist(book,com[1])):
for x in book:
for y in x:
if com[1] == y:
x[1] = com[2]
break
else:
print("User not found!")
def save(com,book):
f = open(com[1], "w")
for i in book:
for x in i:
line = x + ";"
f.write(line)
f.write("\n")
print("Saving...")
f.close()
def load(com,book):
book.clear()
f = open(com[1], "r")
for line in f:
line = line.split(";")
line = line[:-1]
book.append(line)
print("Loading...")
f.close()
def exist(book,user):
for i in book:
for x in i:
if user == x:
return True
else:
return False
if __name__ == "__main__":
main() | [
"davarv-7@student.ltu.se"
] | davarv-7@student.ltu.se |
3e36c235549137785ce9d51fb36ee188d100364d | bbe7132e45134d015cd96f7ad10f7fd379b8112f | /Functions/TrainingDataCollection/CollectingData_Guided.py | 0ed542957cc49c9e5f73e051ff18ec186eacab3b | [] | no_license | lxb1989/DeepClawBenchmark | e8dc770eabe813f87298e3ce9482b1f256f5f331 | 3f955c9faf82c7c2f20ed8165ef6eb80d1db564d | refs/heads/master | 2020-08-25T07:08:50.035336 | 2019-10-22T09:35:19 | 2019-10-22T09:35:19 | 216,980,575 | 0 | 0 | null | 2019-10-23T06:06:27 | 2019-10-23T06:06:27 | null | UTF-8 | Python | false | false | 3,149 | py | import sys
sys.path.append('/home/yang/python-urx')
sys.path.append('/home/yang/Git/CobotBenchmark/Driver')
sys.path.append('/home/yang/Git/CobotBenchmark/Functions')
sys.path.append('/home/yang/Git/CobotBenchmark/ToolKit')
from fc_predictor import Predictor
from realsense_controller import RealsenseController
import cv2
from PIL import Image, ImageDraw
import time
import numpy as np
import ur_controller_urx as urc
import Calibration_2D as Cali
from datetime import datetime
from success_label import *
import random
from random import choice
import os
import heapq
WorkSpace = [-0.21,-0.37,0.21,-0.68]
rob = urc.ur5()
camera = RealsenseController()
hand_eye=Cali.calibration()
crop_box = [340,160,980,700]
str = '/media/yang/Linux/Data/OriginalData/2-Fingers_Guided'
G = Predictor('/home/yang/Git/CobotBenchmark/Functions/checkpoint/fc_cnn(new)/Toys') # /ur5
color_image,depth_image,infrared_L,infrared_R = camera.getImage()
rob.homing()
rob.close_gripper()
with open(str+'/TrainingRecord', 'aw') as f:
f.write('i,x,y,rz,success_label'+'\n')
for i in range(999):
color_image1,depth_image,infrared_L,infrared_R = camera.getImage()
if not os.path.exists(str+"/"+"TrialCount_%01d"%i):
os.makedirs(str+"/"+"TrialCount_%01d"%i)
strnew=str+"/"+"TrialCount_%01d"%i
cv2.imwrite(strnew +"/"+"image_01.jpg", color_image1)
img = Image.open(strnew+'/image_01.jpg').crop(crop_box)
img.save(strnew +"/"+"Croppedimage.jpg")
rob.open_gripper()
image = np.array(img).reshape(1,(crop_box[3]-crop_box[1]),(crop_box[2]-crop_box[0]),3)
y_value = G.eval(image,(crop_box[3]-crop_box[1]),(crop_box[2]-crop_box[0]))
angle_patch,probability_patch = G.parse_eval(y_value)
probability_patch_sub = []
for j in range(len(probability_patch)):
if probability_patch[j] > 0.5:
probability_patch_sub.append(j)
if len(probability_patch_sub) == 0:
max_num_list = map(probability_patch.index, heapq.nlargest(5, probability_patch))
b = random.sample(max_num_list,1)
idx = b[0]
else:
idx = choice(probability_patch_sub)
y = (int(idx/y_value.shape[2]))*((crop_box[3]-crop_box[1]-238)/(y_value.shape[1]-1))+111+crop_box[1]
x = (idx%y_value.shape[2])*((crop_box[2]-crop_box[0]-238)/(y_value.shape[2]-1))+111+crop_box[0]
rz = (-1.57 + (random.sample(range(18),1)[0]+0.5)*(1.57/9))
print(x,y,rz)
x,y = hand_eye.cam2rob(x,y)
rob.move([x,y,0.25],rz)
rob.grasping()
rob.homing()
time.sleep(5)
x2=random.uniform(WorkSpace[0]+0.1,WorkSpace[2]-0.1)
y2=random.uniform(WorkSpace[1]-0.1,WorkSpace[3]+0.1)
rz2=random.uniform(-1.57,1.57)
color_image2,depth_image,infrared_L,infrared_R = camera.getImage()
cv2.imwrite(strnew +"/"+"image_02.jpg", color_image2)
grasp_label = success_label(color_image1,color_image2)
if grasp_label == 1:
rob.move([x2, y2, 0.20],rz2)
rob.move([x2, y2, 0.15],rz2)
rob.open_gripper()
rob.homing()
rob.close_gripper()
with open(str+'/TrainingRecord', 'aw') as f:
f.write("%d,"%i+"%f,"%x+"%f,"%y+"%f,"%rz+"%d"%grasp_label+'\n')
| [
"11510135@mail.sustech.edu.cn"
] | 11510135@mail.sustech.edu.cn |
ee00695458ca20ce01b296e151090b8079029e0b | d922f18fe53cf7738d6d536e5c9bfd8f3cb7e202 | /F20_3900_851_Group5/asgi.py | 4030e48797e6a9f32472657de2a2952339cf06e9 | [] | no_license | zhcsteven/public | 0b4f075c7f42715c1b892afabfeea5bbfdf834eb | 94358b7a256167a542c34f92f149dd93610ec1ec | refs/heads/master | 2023-01-01T03:46:48.557855 | 2020-10-23T19:59:07 | 2020-10-23T19:59:07 | 306,734,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
ASGI config for F20_3900_851_Group5 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'F20_3900_851_Group5.settings')
application = get_asgi_application()
| [
"zhcsteven@github.com"
] | zhcsteven@github.com |
5494e226e3046e745556744498dec3d1c5ebf016 | b8b233a47ee590862f3e4e02aabd22c429c34de2 | /CLIvmtracker/host.py | 8804975187538b5e921fa23e1f40354c433bc7a6 | [] | no_license | gskeats/WakeHacks19 | 91ce7fa0e94cc8eb13492574b98706de5ef2e067 | 73fc0714df201dd0daea89167bf90596b978e82e | refs/heads/master | 2020-05-03T10:28:06.846924 | 2019-04-04T15:36:12 | 2019-04-04T15:36:12 | 178,579,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,960 | py | import subprocess
class host:
def __init__(self, name, key_type_string, key_string, descrip=None):
self.names = name
if ',' in self.names:
name_split = name.split(',')
self.domain_name=name_split[0]
self.ip_addr=name_split[1]
else:
self.domain_name=None
self.ip_addr=name
self.key=key_string
self.key_type=key_type_string
self.description=descrip
self.username=None
class host_manager:
def __init__(self):
self.host_list=[]
def load_host(self,*hosts):
for item in hosts[0]:
self.host_list.append(item)
def delete_host(self,ip=None,domainName=None):
entry=self.find_entry(ip,domainName)
if entry is not None:
self.host_list.remove(entry)
return
def find_entry(self,ip="",domainName=""):
if ip is "" and domainName is "":
ip=input("ip address is: ")
for entry in self.host_list:
if entry.domain_name==domainName or entry.ip_addr==ip:
return entry
print("Entry not found, check that it does exist and you have entered its identifyng information correctly")
return None
def add_description(self,ip="",domainName=""):
entry=self.find_entry(ip,domainName)
entry.description="#"+input("Type the description for this entry here: ")
return
def write_known_hosts(self):
pipe=open("/Users/grahamskeats/.ssh/known_hosts","w")
for entry in self.host_list:
pipe.write(entry.names+" ")
pipe.write(entry.key_type+" ")
pipe.write(entry.key)
if entry.description is not None:
pipe.write("\n")
pipe.write(entry.description)
if entry.username is not None:
pipe.write(" **Username:"+entry.username+"**")
pipe.write("\n")
pipe.close()
return
def print_available(self):
for entry in self.host_list:
if entry.description is not None:
print(entry.names+" "+entry.description)
else:
print(entry.names)
def connect(self,host=None):
host=self.find_entry(host)
username=input("Username: ")
host.username=username
subprocess.call(['ssh',username+"@"+host.ip_addr])
def readhostsfromfile(self):
self.host_list=[]
known_hosts_full = subprocess.check_output(['cat', '/Users/grahamskeats/.ssh/known_hosts'])
entry = []
known_hosts_full = known_hosts_full.decode('utf-8')
host_list = []
line_split = known_hosts_full.split('\n')
iter_splits = iter(line_split)
for line in iter_splits:
if line is '':
continue
if self.checkforcomment(line):
host_list[-1].description = line
continue
split_line = line.split()
for split in split_line:
entry.append(split)
if '=' in split:
new_host = host(entry[0], entry[1], entry[2])
entry = []
host_list.append(new_host)
self.load_host(host_list)
return host_list
def checkforcomment(self,line):
if line[0] is '#':
return True
else:
return False
def createnewconnection(self,ip=None,username=None):
if ip is None:
ip=input("What is the ip address or domain name of the machine you would like to connect to: ")
if username is None:
username=input("What is your username: ")
subprocess.call(['ssh',username+"@"+ip])
self.readhostsfromfile()
def checkduplicate(self,host):
for entry in self.host_list:
if host.ip_addr==entry.ip_addr:
return True
return False | [
"gnskeats@gmail.com"
] | gnskeats@gmail.com |
1966651437f609fc2be290d921ab2809c7cb13b0 | a73f9df0b7e797a55aa40ce29ca94e981c33543d | /zxcmovies/movie.py | 5b45156ef169449a910e18894d1e7d89a7edcd37 | [] | no_license | XinuxC/SpiderMan | c7884de144ad9fbdf9e786798e91bcfa71348eb7 | d2b193573b870881520bfed970d5c295eaf69e81 | refs/heads/master | 2021-09-04T02:56:45.838338 | 2018-01-15T01:01:18 | 2018-01-15T01:01:18 | 104,993,126 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | # -*- coding: utf-8 -*-
import json
import os
import time
import csv
import requests
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}
def spider_movie():
ACTOR = '周星驰'
urls = ['http://api.douban.com/v2/movie/search?q={周星驰}&count=20&start=' + str(n) for n in range(0, 120, 20)]
for url in urls:
r = requests.get(url=url,headers= header)
data = json.loads(r.text)
item = {}
for subject in data['subjects']:
casts = [each.get('name') for each in subject.get('casts')]
directors = [each.get('name') for each in subject.get('directors')]
genres = subject.get('genres')
if ACTOR in directors or ACTOR in casts:
item['directors'] = '/'.join(directors)
item['casts'] = '/'.join(casts)
item['genres'] = '/'.join(genres)
item['movie_id'] = subject.get('id')
item['title'] = subject.get('title')
item['rate'] = subject.get('rating').get('average')
item['year'] = subject.get('year')
yield item
time.sleep(2)
movie_file = 'movies.csv'
# def write2file(movies):
# if os.path.exists('movies.json'):
# movie_ids = read_csv('movies.json')
#
# with open('movies.json','a',encoding='utf-8') as f:
# for movie in movies:
# if movie.get('movie_id') not in movie_ids:
# f.write(json.dumps(movie,ensure_ascii=False))
# f.write('\n')
# print("Write movie id:{} into file".format(movie.get('movie_id')))
# else:
# print("Movie id:{} already in file".format(movie.get('movie_id')))
def read_csv(movie_file):
movie_ids = []
with open(movie_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
movie_ids.append(row['movie_id'])
return movie_ids
def write_csv(movies):
movie_ids = []
if os.path.exists(movie_file):
movie_ids = read_csv(movie_file)
with open('movies.csv','a',newline='') as csvfile:
MOVIES_FIELDS = ['title', 'rate', 'casts', 'genres',
'directors', 'movie_id', 'year',
]
writer = csv.DictWriter(csvfile,fieldnames=MOVIES_FIELDS)
writer.writeheader()
for movie in movies:
if movie_ids:
if movie.get('movie_id') not in movie_ids:
writer.writerow(movie)
print("Write movie id:{} into file".format(movie.get('movie_id')))
else:
print("Movie id:{} already in file".format(movie.get('movie_id')))
else:
writer.writerow(movie)
def main():
movies = spider_movie()
write_csv(movies)
if __name__ == '__main__':
main()
| [
"pishit2009@gmail.com"
] | pishit2009@gmail.com |
e174afa38ec2ea5f548eadf2273ad23fbf7cb7e9 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_105/324.py | 9ddec5819097ba9f1a61905d441b8271fd8d44f7 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py |
class Item(object):
def __init__(self, index=0):
self.index = index
self.parents = []
self.childs = []
def is_source(self):
return len(self.parents) > 1
def is_dest(self):
return len(self.childs) > 1
def get_dests(self):
if len(self.parents):
dests = []
for parent in self.parents:
dests.extend(parent.get_dests())
return dests
else:
return [self]
if __name__ == '__main__':
T = int(raw_input())
for test_index in xrange(1, T+1):
N = int(raw_input())
items = [Item(_) for _ in xrange(N+1)]
for index in xrange(1, N+1):
nums = map(int, raw_input().split())
Mi,Ii = nums[0], nums[1:]
for ii in Ii:
items[index].parents.append(items[ii])
items[ii].childs.append(items[index])
src_items = filter(lambda item: item.is_source(), items)
dst_items = filter(lambda item: item.is_dest(), items)
def check_item(item):
dests = item.get_dests()
for dest in set(dests):
if dests.count(dest) > 1:
return True
return False
result = False
for src_item in src_items:
if check_item(src_item):
result = True
break
print 'Case #%d: %s' % (test_index, 'Yes' if result else 'No')
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e06d790514e028de8404d51db547b5b990b4f864 | 4a5d9f129d5129b34c55171c99f83f0893ae5c11 | /archives/migrations/0006_categorie_lien.py | 1d61623e1a6f57d121b4c3b2cf399d28cc058f6f | [
"MIT"
] | permissive | fromdanut/syndicat-riviere | ec097cf9bf9aec8829069a2a93d4750a36d87a39 | 0fd099524a2a79d0932dbf8b87f8232d470308ad | refs/heads/master | 2018-09-04T19:14:40.490656 | 2018-06-04T10:52:21 | 2018-06-04T10:52:21 | 103,665,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-15 06:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archives', '0005_remove_categorie_lien'),
]
operations = [
migrations.AddField(
model_name='categorie',
name='lien',
field=models.CharField(default='default_link', max_length=30, unique=True),
preserve_default=False,
),
]
| [
"remidelannoy@hotmail.com"
] | remidelannoy@hotmail.com |
c135d62f920dc56b65ff40f4fbe07eac168328ba | 5b6f2b0ff8828d247885204522a7fe4ad7136f7a | /test_arc4.py | fb3574f9ebf651e152ea1554a8cf92cf764e7598 | [
"MIT"
] | permissive | manicmaniac/arc4 | 5fdc292e3ac172a2e2817ff14b2d052604964cd5 | 6f0706a6f68cb84e419e8652d4196745268c9b3b | refs/heads/master | 2023-08-16T04:05:42.398404 | 2023-04-22T03:58:58 | 2023-04-22T03:58:58 | 149,815,580 | 28 | 5 | MIT | 2023-09-12T09:24:09 | 2018-09-21T20:40:18 | Python | UTF-8 | Python | false | false | 8,405 | py | try:
from setuptools.distutils.version import StrictVersion
except ImportError:
from distutils.version import StrictVersion
import doctest
import functools
import multiprocessing
import platform
import textwrap
import timeit
import unittest
import arc4
import setup
KEY = b'PYTHON3'
LOREM = b"""Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do \
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim \
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea \
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit \
esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat \
cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est \
laborum."""
LOREM_ARC4 = b"""\xf0\xa8\x59\xec\xdf\x9d\xbd\x95\x52\x91\x66\x72\x50\x01\x0d\
\x3a\xac\x62\x10\xdc\x58\x0f\x49\x02\xd9\x45\x2a\xad\x3a\x2b\x79\xd5\x2b\x29\
\xe7\x16\xf1\x9c\x93\x58\xcd\xa9\x32\x87\xfc\x9f\x6e\x29\x14\x0a\x59\x12\x21\
\x89\x51\x49\xc7\x3f\x59\x78\x0b\x16\xb6\xb2\xc4\xc3\xc0\x61\xc4\xcd\xcf\x9e\
\xff\x34\x2c\xf2\x28\x14\xf8\xc9\x08\xf0\x1f\x2d\xfa\xe8\xbf\x77\xe0\xeb\xee\
\xa1\x51\xd4\xf3\x86\x66\x60\x1c\xb1\x3a\x14\x86\xf2\x6c\xe5\x47\xf8\xb5\x50\
\xad\xbc\x1c\x64\xeb\xbc\x52\x33\x60\x41\x58\x33\x6f\x58\x8c\xfd\x41\x1b\xb0\
\x05\xb3\xbc\x46\x37\xf3\xa4\x5e\x3e\x1f\x20\xe9\x00\x02\xcc\x31\x07\xe8\x65\
\xbb\x12\x97\x05\xcb\xfd\xba\x50\x9c\x59\x14\x49\xb4\x3c\x12\x2b\x47\x27\x5f\
\x30\x52\x57\xf4\xa2\x70\xc5\x7d\x4a\xf2\x92\x01\x5d\x02\x69\x1d\x74\xff\x43\
\xb1\x73\xb9\x28\xfe\x73\x62\x7f\xbd\xcd\xa1\x53\xa2\x1e\x28\x37\x19\xc4\x59\
\xbc\x81\x93\x79\x05\x13\x07\xc2\x43\xb3\xd1\x2a\x9d\xf7\x3c\xe7\x1e\x63\x4b\
\x70\xc7\xc2\xa6\x80\x31\xc7\xc5\x07\x64\x49\x40\x08\x7a\x4f\x4f\x90\x63\x88\
\x4d\x35\x8b\xd2\x48\xe1\xc2\xfc\xa2\xb5\x47\xca\xaf\x75\x36\x31\x22\xa8\x45\
\x5d\x0f\x03\xb7\xd5\x3b\xff\x47\xbc\x6f\xe0\xa3\x49\xfb\x63\xbe\xfc\xa7\x60\
\x59\x43\x50\x8e\x95\x76\x68\xda\xfa\xdb\x9b\x96\x9d\x1b\x6d\xac\x14\x2c\x12\
\x29\xfd\xf0\xaf\xc4\xba\x12\xdf\x83\xd9\xae\xcc\x19\x80\xfd\xc2\x36\x32\xf4\
\x01\x0b\x6d\xeb\x9e\xff\x74\x2e\xfe\x58\xc7\x91\xa9\x75\xf5\xa0\xc0\x5d\xb7\
\x5e\x6a\x71\x5a\x9c\xd3\x98\xca\x6c\xae\x80\xd6\x0d\xb9\x84\x63\x7f\xdf\x31\
\x1b\x5c\x4f\x07\x4c\x9b\x23\x24\x43\xce\x9e\x4d\x29\x5f\xb9\x3a\x57\x0f\x18\
\xf5\xa0\x5a\x94\x88\xfa\x55\x64\xca\x4f\x74\x9f\x71\x33\xa5\x6d\xd4\xd8\x5a\
\xdd\x51\x66\xad\xf5\x37\xad\x44\xe9\x20\xf2\x31\xd3\x9a\xef\x3e\x47\xd1\x20\
\x88\x2c\x21\x74\xed\xa3\x5c\x7c\xa7\x03\x42\x4d\x21\x50\xe2\x9b\x2b\x99\x88\
\x1e\xd4\x53\xda\x1c\xa2\xc7\x5b\xb5\x94\x5d\xc0"""
def raises_deprecation_warning(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
with self.assertWarns(DeprecationWarning):
return f(self, *args, **kwargs)
return decorated
def raises_deprecation_warning_if(condition):
if condition:
return raises_deprecation_warning
return lambda x: x
def expected_failure_if(condition):
if condition:
return unittest.expectedFailure
return lambda x: x
class TestARC4(unittest.TestCase):
def test_arc4_module_has_doc(self):
self.assertIsNotNone(arc4.__doc__)
def test_arc4_version_is_strict_version(self):
try:
StrictVersion(arc4.__version__)
except (AttributeError, ValueError) as e:
self.fail(e)
def test_arc4_version_is_equal_to_setup_version(self):
self.assertEqual(arc4.__version__, setup.VERSION)
def test_arc4_class_has_doc(self):
self.assertIsNotNone(arc4.ARC4.__doc__)
def test_init_with_zero_length_key_raises_error(self):
with self.assertRaisesRegex(ValueError, r'^invalid key length: 0$'):
arc4.ARC4(b'')
def test_init_with_bytes_returns_instance(self):
self.assertIsInstance(arc4.ARC4(b'spam'), arc4.ARC4)
@raises_deprecation_warning
def test_init_with_unicode_returns_instance(self):
self.assertIsInstance(arc4.ARC4(u'スパム'), arc4.ARC4)
@raises_deprecation_warning_if(platform.python_implementation() == 'PyPy')
def test_init_with_bytearray_raises_type_error(self):
with self.assertRaisesRegex(
TypeError,
r'argument 1 must be .*, not bytearray'):
arc4.ARC4(bytearray([0x66, 0x6f, 0x6f]))
@raises_deprecation_warning_if(platform.python_implementation() == 'PyPy')
def test_init_with_memoryview_raises_type_error(self):
pattern = r'^argument 1 must be .*, not memoryview$'
with self.assertRaisesRegex(TypeError, pattern):
arc4.ARC4(memoryview(b'spam'))
@expected_failure_if(platform.python_implementation() == 'PyPy')
def test_encrypt_has_doc(self):
self.assertIsNotNone(arc4.ARC4.encrypt.__doc__)
def test_encrypt_with_long_bytes_returns_encrypted_bytes(self):
cipher = arc4.ARC4(KEY)
self.assertEqual(LOREM_ARC4, cipher.encrypt(LOREM))
def test_encrypt_multiple_times_returns_encrypted_bytes(self):
cipher = arc4.ARC4(KEY)
encrypted = b''
for c in LOREM:
if isinstance(c, int):
c = chr(c).encode('utf-8')
encrypted += cipher.encrypt(c)
self.assertEqual(LOREM_ARC4, encrypted)
@raises_deprecation_warning
def test_encrypt_with_unicode_returns_encrypted_bytes(self):
cipher = arc4.ARC4(b'spam')
self.assertEqual(b'Q\xcd\xb1!\xecg', cipher.encrypt(u'ハム'))
def test_encrypt_with_bytearray_raises_type_error(self):
cipher = arc4.ARC4(b'spam')
with self.assertRaisesRegex(
TypeError,
r'^crypt\(\) argument 1 must be .*, not bytearray$'):
cipher.encrypt(bytearray(b'ham'))
def test_encrypt_with_memoryview_raises_type_error(self):
cipher = arc4.ARC4(b'spam')
with self.assertRaisesRegex(
TypeError,
r'^crypt\(\) argument 1 must be .*, not memoryview$'):
cipher.encrypt(memoryview(b'ham'))
def test_encrypt_with_list_raises_type_error(self):
cipher = arc4.ARC4(b'spam')
message = (r'^crypt\(\) argument 1 must be read-only bytes-like ' +
r'object, not list')
with self.assertRaisesRegex(TypeError, message):
cipher.encrypt([0x68, 0x61, 0x6d])
@unittest.skip('takes long time and a bit flaky depends on environment')
@unittest.skipIf(multiprocessing.cpu_count() <= 1, 'needs multiple cores')
def test_encrypt_thread_performance(self):
large_text = 'a' * 10 * 1024 * 1024
number = 100
cpu_count = multiprocessing.cpu_count()
setup = textwrap.dedent("""\
from arc4 import ARC4
from threading import Thread
def target():
ARC4({key!r}).encrypt({text!r})
""".format(key=KEY, text=large_text))
# Create unused threads to make the similar conditions
# between single and multiple threads.
code = textwrap.dedent("""\
threads = []
for i in range({}):
thread = Thread(target=target)
threads.append(thread)
for thread in threads:
pass
target()
""".format(cpu_count))
single_thread_elapsed_time = timeit.timeit(code, setup, number=number)
code = textwrap.dedent("""\
threads = []
for i in range({}):
thread = Thread(target=target)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
""".format(cpu_count))
multi_thread_elapsed_time = timeit.timeit(code, setup,
number=number // cpu_count)
self.assertLess(multi_thread_elapsed_time, single_thread_elapsed_time)
@expected_failure_if(platform.python_implementation() == 'PyPy')
def test_decrypt_has_doc(self):
self.assertIsNotNone(arc4.ARC4.decrypt.__doc__)
def test_decrypt_with_long_bytes_returns_decrypted_bytes(self):
cipher = arc4.ARC4(KEY)
self.assertEqual(LOREM, cipher.decrypt(LOREM_ARC4))
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(arc4))
tests.addTests(doctest.DocFileSuite('README.rst'))
return tests
| [
"rito.0305@gmail.com"
] | rito.0305@gmail.com |
de0ac5a347c783cdbbd4f0a4d1abc51e230dce90 | c3646f146bc5e2478f07da7ec004e1b783f48e99 | /Server_Final.py | 8e28c0e287d9d43cbd56432848154b1ccb4f8e9f | [] | no_license | comnet14/Chat_Program_Final | c6e0408227ce416c538b5802fbb3c8c0dc556ae4 | e12ebe3be5b09e67d1536bc07d978c419fd5b1f5 | refs/heads/master | 2020-06-05T06:55:40.587548 | 2015-06-07T16:22:01 | 2015-06-07T16:22:01 | 37,023,174 | 0 | 0 | null | null | null | null | UHC | Python | false | false | 2,841 | py | # socket 과 select 모듈 임포트
from socket import *
from select import *
import sys
from time import ctime
# 호스트, 포트와 버퍼 사이즈를 지정
HOST = ''
PORT = 56789
BUFSIZE = 1024
ADDR = (HOST, PORT)
# 소켓 객체생성
serverSocket = socket(AF_INET, SOCK_STREAM)
# 서버 정보를 바인딩
serverSocket.bind(ADDR)
# 요청을 기다림(listen)
serverSocket.listen(10)
connection_list = [serverSocket]
print('==============================================')
print('Start Server. Waitin connection to %s port....' % str(PORT))
print('==============================================')
# 무한 루프를 시작
while connection_list:
try:
print('Waiting Request...')
# select 로 요청을 받고, 10초마다 블럭킹을 해제하도록 함
read_socket, write_socket, error_socket = select(connection_list, [], [], 10)
for sock in read_socket:
# 새로운 접속
if sock == serverSocket:
clientSocket, addr_info = serverSocket.accept()
connection_list.append(clientSocket)
print('[!] [%s] Client (%s) has connected.' % (ctime(), addr_info[0]))
# 클라이언트로 응답을 돌려줌
for socket_in_list in connection_list:
if socket_in_list != serverSocket and socket_in_list != sock:
try:
socket_in_list.send('[%s] Client has connected to room' % ctime())
except Exception as e:
socket_in_list.close()
connection_list.remove(socket_in_list)
# 접속한 사용자(클라이언트)로부터 새로운 데이터 받음
else:
data = sock.recv(BUFSIZE)
if data:
print('[%s] Got data from Client...' % ctime())
for socket_in_list in connection_list:
if socket_in_list != serverSocket and socket_in_list != sock:
try:
socket_in_list.send('[%s] %s' % (ctime(), data))
print('[%s] Sending data to Client...' % ctime())
except Exception as e:
print(e.message)
socket_in_list.close()
connection_list.remove(socket_in_list)
continue
else:
connection_list.remove(sock)
sock.close()
print('[!][%s] Disconnected...' % ctime())
except KeyboardInterrupt:
# 종료하기
serverSocket.close()
sys.exit()
| [
"skdbsxir@naver.com"
] | skdbsxir@naver.com |
1c506a903e67d279b3fce475941803929704354b | 31312e5019ce3efd927216b737808131ce208265 | /PyPoll_Challenge.py | b3cd0b70409cc507bc5187ad17ce29801192f2c1 | [] | no_license | sabrinajc/Election_Analysis | 32605b9736425ed6bf00b84a499865d4ad940427 | 4f0f27733dedb3835c8596bd3f8b70a1cad82df1 | refs/heads/master | 2022-11-29T11:42:52.122278 | 2020-08-03T02:03:44 | 2020-08-03T02:03:44 | 283,359,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,415 | py | # -*- coding: UTF-8 -*-
"""PyPoll Homework Challenge Solution."""
# Add our dependencies.
import csv
import os
# Add a variable to load a file from a path.
file_to_load = os.path.join("Resources","election_results.csv")
# Add a variable to save the file to a path.
file_to_save = os.path.join("analysis", "election_analysis.txt")
# Initialize a total vote counter.
total_votes = 0
# Candidate Options and candidate votes.
candidate_options = []
candidate_votes = {}
# 1: Create a county list and county votes dictionary.
county_options = []
county_votes = {}
# Track the winning candidate, vote count and percentage
winning_candidate = ""
winning_count = 0
winning_percentage = 0
# 2: Track the largest county and county voter turnout.
winning_county = ""
winning_county_count = 0
winning_county_percentage = 0
# Read the csv and convert it into a list of dictionaries
with open(file_to_load) as election_data:
reader = csv.reader(election_data)
# Read the header
header = next(reader)
# For each row in the CSV file.
for row in reader:
# Add to the total vote count
total_votes = total_votes + 1
# Get the candidate name from each row.
candidate_name = row[2]
# 3: Extract the county name from each row.
county_name = row[1]
# If the candidate does not match any existing candidate add it to
# the candidate list
if candidate_name not in candidate_options:
# Add the candidate name to the candidate list.
candidate_options.append(candidate_name)
# And begin tracking that candidate's voter count.
candidate_votes[candidate_name] = 0
# Add a vote to that candidate's count
candidate_votes[candidate_name] += 1
# 4a: Write a decision statement that checks that the
# county does not match any existing county in the county list.
if county_name not in county_options:
# 4b: Add the existing county to the list of counties.
county_options.append(county_name)
# 4c: Begin tracking the county's vote count.
county_votes[county_name] = 0
# 5: Add a vote to that county's vote count.
county_votes[county_name] += 1
# Save the results to our text file.
with open(file_to_save, "w") as txt_file:
# Print the final vote count (to terminal)
election_results = (
f"\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_votes:,}\n"
f"-------------------------\n\n"
f"County Votes:\n")
print(election_results, end="")
txt_file.write(election_results)
# 6a: Write a repetition statement to get the county from the county dictionary.
for county_name in county_votes:
# 6b: Retrieve the county vote count.
county_vote_count = county_votes[county_name]
# 6c: Calculate the percent of total votes for the county.
county_vote_percentage = float(county_vote_count) / float(total_votes) * 100
county_results = (f"{county_name}: {county_vote_percentage:.1f}% ({county_vote_count:,})\n")
# 6d: Print the county results to the terminal.
print(county_results)
# 6e: Save the county votes to a text file.
txt_file.write(county_results)
# 6f: Write a decision statement to determine the winning county and get its vote count.
if (county_vote_count > winning_county_count) and (county_vote_percentage > winning_county_percentage):
winning_county_count = county_vote_count
winning_county = county_name
winning_county_percentage = county_vote_percentage
# 7: Print the county with the largest turnout to the terminal.
winning_county_summary = (
f"\n-------------------------\n"
f"Largest County Turnout: {winning_county}\n"
f"-------------------------\n")
print(winning_county_summary)
# 8: Save the county with the largest turnout to a text file.
txt_file.write(winning_county_summary)
# Save the final candidate vote count to the text file.
for candidate_name in candidate_votes:
# Retrieve vote count and percentage
votes = candidate_votes.get(candidate_name)
vote_percentage = float(votes) / float(total_votes) * 100
candidate_results = (
f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n")
# Print each candidate's voter count and percentage to the
# terminal.
print(candidate_results)
# Save the candidate results to our text file.
txt_file.write(candidate_results)
# Determine winning vote count, winning percentage, and candidate.
if (votes > winning_count) and (vote_percentage > winning_percentage):
winning_count = votes
winning_candidate = candidate_name
winning_percentage = vote_percentage
# Print the winning candidate (to terminal)
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n"
f"Winning Percentage: {winning_percentage:.1f}%\n"
f"-------------------------\n")
print(winning_candidate_summary)
# Save the winning candidate's name to the text file
txt_file.write(winning_candidate_summary)
| [
"hewang@hedeMacBook-Pro.local"
] | hewang@hedeMacBook-Pro.local |
8382bb673699816c9d1b5f3f844adeedc5044dfa | 0bdbeac4d853a3e53cfc8237546c784c0f6ddc44 | /app/modules/turtleGrapher/turtle_utils.py | 2d2e20115683455d293deb8328ee1134e76f308f | [
"Apache-2.0"
] | permissive | jamez-eh/backend | 5129e8400193d82f54d81e6008e183a97c752857 | bec1a7043c28be6e3ec12b8bc55ae1bb0d1b7699 | refs/heads/master | 2021-01-25T04:35:47.195251 | 2017-05-30T22:31:29 | 2017-05-30T22:31:29 | 93,452,777 | 0 | 0 | null | 2017-06-21T21:17:20 | 2017-06-05T22:30:17 | Python | UTF-8 | Python | false | false | 3,475 | py | import urlparse
import config # this is the config.py
from rdflib import Namespace, URIRef, Literal
def generate_hash(filename):
from hashlib import sha1
# the 'b' isn't needed less you run this on Windows
with open(filename, 'rb') as f:
# we apply a sort func to make sure the contents are the same,
# regardless of order
return sha1(str(sorted(f.readlines()))).hexdigest()
def slugify(value):
"""
from Django
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
import re
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s\/\./:-]', '', value).strip())
value = unicode(re.sub('[-\s]+', '-', value))
return value
def generate_uri(uri, s=''):
"""
Takes a string as one would define for .ttl files and returns a URI for rdflib.
Args:
uri (str): a string following .ttl convention for a URI
ex. g:Identifier as shorthand for http://www.biointerchange.org/gfvo#Identifier
Returns:
(rdflib.URIRef) with URI needed to add to rdflib.Graph
"""
# if you call with a uri already
if isinstance(uri, URIRef):
s = slugify(s)
return URIRef(str(uri) + s)
elif type(uri) is str and 'http' in uri:
# if you called with a string in the form of a url
return URIRef(uri)
prefix = uri.split(':')[0]
postfix = uri.split(':')[1]
postfix = slugify(postfix)
if prefix == '': # this is our : case
return URIRef(config.namespaces['root'] + postfix)
else:
return URIRef(config.namespaces[prefix] + postfix)
def uri_to_basename(uri):
'''
This does the reverse of generate_uri(). Converts a rdflib.term.URIRef back to is base.
ex. rdflib.term.URIRef(u'https://www.github.com/superphy#4eb02f5676bc808f86c0f014bbce15775adf06ba)
gives 4eb02f5676bc808f86c0f014bbce15775adf06ba
Args:
uri(rdflib.term.URIRef): a URIRef object
Returns:
(str): just the basestring (ie. everything after the : in rdf syntax)
'''
for value in config.namespaces.keys():
if value in uri:
return str(uri).strip(value)
# if the clean method above fails, default to '/' splitting
# this will fail if a path-style uri is used
return str(uri).split('/')[-1]
def link_uris(graph, uri_towards_spfyid, uri_towards_marker):
'''
Links two vertices in a graph as required for inferencing/queries in blazegraph.
Blazegraph has problems (hangs after 3-4 uploads) with owl:SymmetricProperty, so we use :hasPart which we apply owl:TransitiveProperty to link everything in :spfyId -> :Marker and use :isFoundIn (same owl:TransitiveProperty) to link everything :Marker -> :spfyId
This means that you can't just query a vertex type and look for another vertex type -> you must know the direction you're moving in (think subway trains). We accomadate this by defining a dictionary that maps object types to a given numerical weight so we can do a comparison of weights to determine direction.
The owl:TransitiveProperty is defined in generate_graph() under turtle_grapher.py
'''
graph.add((uri_towards_spfyid, generate_uri(':hasPart'), uri_towards_marker))
graph.add((uri_towards_marker, generate_uri(':isFoundIn'), uri_towards_spfyid))
return graph
| [
"kevin.kent.le@gmail.com"
] | kevin.kent.le@gmail.com |
4c83cd3d6d774829a84cb651754cc866c1ea95be | f9bf72c344413343242ad8a9f6c4b0abc767b334 | /scripts/add_book.py | 485b3666e9c844f60461b395decc4a2b5f069199 | [
"MIT"
] | permissive | gskaggs/Refresh | 494957f2e809e6ceb41ce53a35fb1816c8778bd8 | f1d0a584b12d8c61db512742f457d8c585f8708e | refs/heads/main | 2023-02-27T09:43:23.991791 | 2021-01-01T07:57:36 | 2021-01-01T07:57:36 | 320,148,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | #! /usr/bin/python3
from io_utils import load_data, save_data, json_path, multiline_input
from card_utils import note_card, quote_card, flash_card
def get_book_data():
title = input('Title:\n')
thesis = input('Thesis:\n')
defns = []
print('Abstractions')
while True:
name = input('Name:\n')
if len(name) == 0:
break
defns.append((name, multiline_input('Definition:')))
notes = multiline_input('Takeaways:')
quotes = multiline_input('Quotes:')
cards = []
cards.append(flash_card('Thesis', thesis))
for note in notes.split('\n'):
if len(note) > 0:
cards.append(note_card(note))
for quote in quotes.split('\n'):
if len(quote) > 0:
cards.append(quote_card(quote))
for defn in defns:
cards.append(flash_card(*defn))
data = {}
data['title'] = title
data['cards'] = cards
return data
def add_book(data):
book = get_book_data()
data.append(book)
if __name__ == '__main__':
print('This script is archived and only included if someone wants to develop on it.')
exit()
user_data, book_data = load_data(json_path)
add_book(book_data)
save_data(user_data, book_data, json_path)
| [
"grant.skaggs@outlook.com"
] | grant.skaggs@outlook.com |
109a875760f5fc39260fd4abcf0b9b11c346051b | 7950c4faf15ec1dc217391d839ddc21efd174ede | /explore/2020/september/Evaluate_Division.1.py | 65d4246ab245ebe5ad135c0ae57a97572fd70b22 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | '''
Floyd
You are here!
Your runtime beats 27.33 % of python submissions.
'''
class Solution(object):
def calcEquation(self, edges, weights, pairs):
graph = collections.defaultdict(lambda: collections.defaultdict(lambda: float('inf')))
for (i, j), weight in itertools.izip(edges, weights):
graph[i][i], graph[i][j], graph[j][i], graph[j][j] = 1., weight, 1. / weight, 1.
for mid in graph:
for i in graph[mid]:
for j in graph[mid]:
graph[i][j] = min(graph[i][j], graph[i][mid] * graph[mid][j])
return [graph[i][j] if graph[i][j] < float('inf') else -1. for i, j in pairs]
| [
"838255715@qq.com"
] | 838255715@qq.com |
7726a4a36d1e204ff71e6eba409b084dffa76440 | b0be938d9a8059709d1f7f411c561773e52648bd | /doclink/clients/requests_.py | aca1c0057270ef3c7589edc276ff4d60d077080d | [
"MIT"
] | permissive | Luoyufu/doclink | 83c10f485675e0c6ba4cf8c3a2be9ffc0512414d | 41e57f7e9891f47579cf9131000d87bb8cc09d05 | refs/heads/master | 2022-12-10T06:49:45.177895 | 2018-03-04T15:06:17 | 2018-03-04T15:06:17 | 118,544,658 | 4 | 0 | MIT | 2022-12-08T14:59:06 | 2018-01-23T02:18:12 | Python | UTF-8 | Python | false | false | 5,319 | py | # -*- coding: utf-8 -*-
import atexit
import os
import requests
from requests_toolbelt import MultipartEncoder
from six import string_types
class RequestsClient(object):
optional_args = (
'params', 'data', 'headers', 'cookies', 'files',
'auth', 'timeout', 'allow_redirects', 'proxies',
'hooks', 'stream', 'verify', 'cert', 'json',
'multipart')
def __init__(self, session=None):
if session is None:
session = requests.Session()
atexit.register(session.close)
self._session = session
@classmethod
def _prepare_optional_args(cls, sending_kwargs, request_meta):
for arg in cls.optional_args:
value = request_meta.get(arg)
if value is not None:
if arg == 'auth':
sending_kwargs['auth'] = cls._create_auth_arg(value)
elif arg == 'files':
files_arg = cls._create_files_arg(value)
if files_arg:
sending_kwargs['files'] = files_arg
elif arg == 'multipart':
multipart_arg = cls._create_multipart_arg(value)
if multipart_arg:
encoder = MultipartEncoder(multipart_arg)
sending_kwargs['data'] = encoder
headers = sending_kwargs.setdefault('headers', {})
headers['Content-Type'] = encoder.content_type
else:
sending_kwargs[arg] = value
@classmethod
def _get_sending_kwargs(cls, request_meta):
sending_kwargs = {}
sending_kwargs.update(
method=request_meta['method'],
url=request_meta.get_url(),
)
cls._prepare_optional_args(sending_kwargs, request_meta)
return sending_kwargs
@classmethod
def _create_files_arg(cls, files_meta):
"""Create files arg for requests.
Args:
files_meta (dict): Countain filed name and file_info mapping.
Returns:
A dict mapping field name to file_item for multipart/form-data
"""
def create_file_item(file_info):
"""Nested function to create a file item for files arg.
Args:
File_info: If it's a file_path str, open it as file_object.
Else, pass it to requests files arg.
Returns:
File instance or file_info tuple. For example:
open('report.xls', 'rb')
('report.xls', open('report.xls', 'rb'))
"""
if isinstance(file_info, string_types):
try:
return open(file_info, 'rb') # param is file_path
except (IOError, TypeError):
pass
return file_info
files_arg = {}
for field, file_infos in files_meta.items():
if isinstance(file_infos, list):
files_arg[field] = [create_file_item(file_info) for file_info in file_infos]
else:
files_arg[field] = create_file_item(file_infos)
return files_arg
@classmethod
def _create_auth_arg(cls, auth_meta):
if auth_meta['type'] == 'basic':
return requests.auth.HTTPBasicAuth(auth_meta['username'], auth_meta['password'])
else:
return requests.auth.HTTPDigestAuth(auth_meta['username'], auth_meta['password'])
@classmethod
def _create_multipart_arg(cls, multipart_meta):
"""Create a MultipartEncoder instance for multipart/form-data.
Requests_toolbelt will not try to guess file_name. To encode a file we need
to give file_name explicitly.
Args:
multipart_meta (dict): Map field name to multipart form-data value.
"""
def create_multipart_item(item_info):
"""Nested function to create a multipart item for files arg.
Args:
item_info: If it's a file_path str, open it as file_object as set file_name.
Else, pass it to requests_toolbelt MultipartEncoder.
Returns:
File instance or file_info tuple. For example:
('report.xls', open('report.xls', 'rb'))
"""
if isinstance(item_info, string_types):
try:
return (os.path.basename(item_info), open(item_info, 'rb')) # file_path
except (IOError, TypeError):
pass
try:
return (os.path.basename(item_info.name), item_info) # file_object
except AttributeError:
pass
return item_info
multipart_arg = {}
for field, item_infos in multipart_meta.items():
if isinstance(item_infos, list):
multipart_arg[field] = [create_multipart_item(item_info)
for item_info in item_infos]
else:
multipart_arg[field] = create_multipart_item(item_infos)
return multipart_arg
def request(self, request_meta):
sending_kwargs = self._get_sending_kwargs(request_meta)
return self._session.request(**sending_kwargs)
| [
"yufu_luo@163.com"
] | yufu_luo@163.com |
687a25694621f2e864b8c7dc5e552473ecff1887 | 241724e83f5c12ed9d7dd3b825dfe4e2b1b0f777 | /pde/pdes/base.py | 9cf1ac5b1fae6171c9a2c5dc4a00496021f9c523 | [
"MIT"
] | permissive | xuanxu/py-pde | d8be358ab76d4060b14afc74bc7d836591c6188e | de33d938aea8680eff872ae1b64569895662a248 | refs/heads/master | 2021-03-09T21:37:13.920717 | 2020-03-10T12:18:03 | 2020-03-10T12:18:03 | 246,382,909 | 0 | 0 | MIT | 2020-03-10T18:54:22 | 2020-03-10T18:54:22 | null | UTF-8 | Python | false | false | 11,150 | py | """
Base classes
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
from abc import ABCMeta, abstractmethod
import logging
from typing import Callable, Dict, Optional, TYPE_CHECKING # @UnusedImport
import numpy as np
from ..fields.base import FieldBase
from ..trackers.base import TrackerCollectionDataType
from ..tools.numba import jit
if TYPE_CHECKING:
from ..solvers.controller import TRangeType # @UnusedImport
class PDEBase(metaclass=ABCMeta):
""" base class for solving partial differential equations """
explicit_time_dependence: Optional[bool] = None
def __init__(self, noise: float = 0):
"""
Args:
noise (float):
Magnitude of the additive Gaussian white noise that is supported
by default. If set to zero, a determinitics partial differential
equation will be solved. If another noise structure is required
the respective methods need to be overwritten.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self.noise = noise
@property
def is_sde(self) -> bool:
""" flag indicating whether this is a stochastic differential equation
The :class:`BasePDF` class supports additive Gaussian white noise, whose
magnitude is controlled by the `noise` property. In this case, `is_sde`
is `True` if `self.noise != 0`.
"""
# check for self.noise, in case __init__ is not called in a subclass
return hasattr(self, 'noise') and self.noise != 0
@abstractmethod
def evolution_rate(self, field: FieldBase, t: float = 0) \
-> FieldBase: pass
def _make_pde_rhs_numba(self, state: FieldBase) -> Callable:
""" create a compiled function for evaluating the right hand side """
raise NotImplementedError
def make_pde_rhs(self, state: FieldBase, backend: str = 'auto') -> Callable:
""" return a function for evaluating the right hand side of the PDE
Args:
state (:class:`~pde.fields.FieldBase`):
An example for the state from which the grid and other
information can be extracted
backend (str): Determines how the function is created. Accepted
values are 'python` and 'numba'. Alternatively, 'auto' lets the
code decide for the most optimal backend.
Returns:
Function determining the right hand side of the PDE
"""
if backend == 'auto':
try:
result = self._make_pde_rhs_numba(state)
except NotImplementedError:
backend = 'numpy'
else:
result._backend = 'numba' # type: ignore
return result
if backend == 'numba':
result = self._make_pde_rhs_numba(state)
result._backend = 'numba' # type: ignore
elif backend == 'numpy':
state = state.copy()
def evolution_rate_numpy(state_data, t: float):
""" evaluate the rhs given only a state without the grid """
state.data = state_data
return self.evolution_rate(state, t).data
result = evolution_rate_numpy
result._backend = 'numpy' # type: ignore
else:
raise ValueError(f'Unknown backend `{backend}`')
return result
def noise_realization(self, state: FieldBase, t: float = 0) -> FieldBase:
""" returns a realization for the noise
Args:
state (:class:`~pde.fields.ScalarField`):
The scalar field describing the concentration distribution
t (float): The current time point
Returns:
:class:`~pde.fields.ScalarField`:
Scalar field describing the evolution rate of the PDE
"""
if self.noise:
data = np.random.normal(scale=self.noise, size=state.data.shape)
return state.copy(data=data, label='Noise realization')
else:
return state.copy(data=0, label='Noise realization')
def _make_noise_realization_numba(self, state: FieldBase) -> Callable:
""" return a function for evaluating the noise term of the PDE
Args:
state (:class:`~pde.fields.FieldBase`):
An example for the state from which the grid and other
information can be extracted
Returns:
Function determining the right hand side of the PDE
"""
if self.noise:
noise_strength = float(self.noise)
data_shape = state.data.shape
@jit
def noise_realization(state_data: np.ndarray, t: float):
""" compiled helper function returning a noise realization """
return noise_strength * np.random.randn(*data_shape)
else:
@jit
def noise_realization(state_data: np.ndarray, t: float):
""" compiled helper function returning a noise realization """
return None
return noise_realization # type: ignore
def _make_sde_rhs_numba(self, state: FieldBase) -> Callable:
""" return a function for evaluating the noise term of the PDE
Args:
state (:class:`~pde.fields.FieldBase`):
An example for the state from which the grid and other
information can be extracted
Returns:
Function determining the right hand side of the PDE
"""
evolution_rate = self._make_pde_rhs_numba(state)
noise_realization = self._make_noise_realization_numba(state)
@jit
def sde_rhs(state_data: np.ndarray, t: float):
""" compiled helper function returning a noise realization """
return (evolution_rate(state_data, t),
noise_realization(state_data, t))
return sde_rhs # type: ignore
def make_sde_rhs(self, state: FieldBase, backend: str = 'auto') \
-> Callable:
""" return a function for evaluating the right hand side of the SDE
Args:
state (:class:`~pde.fields.FieldBase`):
An example for the state from which the grid and other
information can be extracted
backend (str): Determines how the function is created. Accepted
values are 'python` and 'numba'. Alternatively, 'auto' lets the
code decide for the most optimal backend.
Returns:
Function determining the deterministic part of the right hand side
of the PDE together with a noise realization.
"""
if backend == 'auto':
try:
sde_rhs = self._make_sde_rhs_numba(state)
except NotImplementedError:
backend = 'numpy'
else:
sde_rhs._backend = 'numba' # type: ignore
return sde_rhs
if backend == 'numba':
sde_rhs = self._make_sde_rhs_numba(state)
sde_rhs._backend = 'numba' # type: ignore
elif backend == 'numpy':
state = state.copy()
def sde_rhs(state_data, t: float):
""" evaluate the rhs given only a state without the grid """
state.data = state_data
return (self.evolution_rate(state, t).data,
self.noise_realization(state, t).data)
sde_rhs._backend = 'numpy' # type: ignore
else:
raise ValueError(f'Unknown backend `{backend}`')
return sde_rhs
def solve(self, state: FieldBase,
t_range: "TRangeType",
dt: float = None,
tracker: TrackerCollectionDataType = ['progress', 'consistency'],
method: str = 'auto',
**kwargs):
""" convenience method for solving the partial differential equation
The method constructs a suitable solver
(:class:`~pde.solvers.base.SolverBase`) and controller
(:class:`~pde.controller.Controller`) to advance the state over the
temporal range specified by `t_range`. To obtain full flexibility, it is
advisable to construct these classes explicitly.
Args:
state (:class:`~pde.fields.base.FieldBase`):
The initial state (which also defines the grid)
t_range (float or tuple):
Sets the time range for which the PDE is solved. If only a
single value `t_end` is given, the time range is assumed to be
`[0, t_end]`.
dt (float):
Time step of the chosen stepping scheme. If `None`, a default
value based on the stepper will be chosen.
tracker:
Defines a tracker that process the state of the simulation at
fixed time intervals. Multiple trackers can be specified as a
list. The default value is ['progress', 'consistency'], which
displays a progress bar and checks the state for consistency,
aborting the simulation when not-a-number values appear.
method (:class:`~pde.solvers.base.SolverBase` or str):
Specifies a method for solving the differential equation. This
can either be an instance of
:class:`~pde.solvers.base.SolverBase` or a descriptive name
like 'explicit' or 'scipy'. The valid names are given by
:meth:`pde.solvers.base.SolverBase.registered_solvers`.
**kwargs:
Additional keyword arguments are forwarded to the solver class
Returns:
:class:`~pde.fields.base.FieldBase`:
The state at the final time point.
"""
from ..solvers.base import SolverBase
if method == 'auto':
method = 'scipy' if dt is None else 'explicit'
# create solver
if callable(method):
solver = method(pde=self, **kwargs)
if not isinstance(solver, SolverBase):
self._logger.warn('Solver is not an instance of `SolverBase`. '
'Specified wrong method?')
else:
solver = SolverBase.from_name(method, pde=self, **kwargs)
# create controller
from ..solvers import Controller
controller = Controller(solver, t_range=t_range, tracker=tracker)
# run the simulation
return controller.run(state, dt)
| [
"david.zwicker@ds.mpg.de"
] | david.zwicker@ds.mpg.de |
5452fbedf431f88f306ddea67f5dd66c3d794a2a | c3a351afef234e7a4a69e348ab1dc90dd6455f1d | /supplies_control/migrations/0001_initial.py | 9a23f860bd46da6d8fd5fa0b86db036810603cec | [] | no_license | ProgrammerBaldy/bunkers_backend | bf9ca170c18c94da5b5daf0f94b45e1d0bd3085e | d9dcfa34f59bf2349e2880ede7aa9e5c5b9c8706 | refs/heads/main | 2023-03-10T07:00:12.555231 | 2021-03-02T14:14:13 | 2021-03-02T14:14:13 | 337,581,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | # Generated by Django 3.1.6 on 2021-02-10 13:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('measure_unit', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Product_subproducts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('productid', models.IntegerField()),
('subproductid', models.IntegerField()),
],
),
migrations.CreateModel(
name='Product_supplies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('productid', models.IntegerField()),
('supplyid', models.IntegerField()),
],
),
migrations.CreateModel(
name='Subproduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('measure_unit', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Subproduct_supplies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subproductid', models.IntegerField()),
('supplyid', models.IntegerField()),
],
),
migrations.CreateModel(
name='Supply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('measure_unit', models.CharField(max_length=20)),
],
),
]
| [
"tauanegri@corpstek.com.br"
] | tauanegri@corpstek.com.br |
604df648d3c490b01fd3ac766baf552625a98caf | 22a863f78432baf0cf28a924e3bc26555bef2e35 | /test/testagent/HAagent_info.py | f7b855cb018e9d264722bf7072595e215d75d4c4 | [] | no_license | Li-Shiang-Chi/atca-test-agent | be7294a1d9bcc30971abc29d8cb3dab080c417ea | 441e352a72e085b5988a794ff731694b56fa8a2f | refs/heads/master | 2021-01-23T00:40:07.206358 | 2017-07-10T11:36:00 | 2017-07-10T11:36:00 | 92,830,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,111 | py | #!/usr/bin/python
#-*- coding: utf-8 -*-
'''
@author: lsc
'''
import shell_server
import cmd_HAagent
import file
import json
import HAagent_terminal
def is_add_primary_success(parser):
"""
check is primary node add in cluster
:param parser is a dict get from base.configure
"""
is_exists = is_node_exists(parser["Cluster_name"], parser["PrimaryOS_name"], parser)
role = get_node_role(parser["PrimaryOS_name"], parser)
print "primary node is exists %s" % is_exists
print "primary role %s (expeceted 0)" % role
if is_exists and role == "primary": # if node exists and the role equals 0(primary)
return True
return False
def is_add_backup_success(parser):
"""
check is backup node add in cluster
:param parser is a dict get from base.configure
"""
is_exists = is_node_exists(parser["Cluster_name"], parser["BackupOS_name"], parser)
role = get_node_role(parser["BackupOS_name"], parser)
print "backup node is exists %s" % is_exists
print "backup role %s (expeceted 1)" % role
if is_exists and role == "backup": # if node exists and the role equals 1(backup)
return True
return False
def is_add_slave_success(parser):
"""
check is slave node add in cluster
:param parser is a dict get from base.configure
"""
is_exists = is_node_exists(parser["Cluster_name"], parser["SlaveOS_name"], parser)
role = get_node_role(parser["SlaveOS_name"], parser)
print "slave node is exists %s" % is_exists
print "slave role %s (expeceted 2)" % role
if is_exists and role == "slave": # if node exists and the role equals 2(slave)
return True
return False
def is_cluster_exist(cluster_name , parser):
"""
check is cluster in HAagent
:param cluster_name : cluster name
:param parser is a dict get from base.configure
"""
ssh = shell_server.get_ssh(parser["NFS_ip"],
parser["NFS_usr"] ,
parser["NFS_pwd"]) # get ssh object
#cmd = cmd_HAagent.overview_cmd()
#s_stdin, s_stdout, s_stderr = ssh.exec_command(cmd)
#overview = s_stdout.read()
cluster_file_content = file.get_file_content(parser["cluster_file_path"] , ssh)
print cluster_file_content
ssh.close()
if not cluster_file_content:
return False
if cluster_name in cluster_file_content:
return True
return False
def is_node_exists(cluster_name , node_name , parser):
"""
check is node in HAagent
:param cluster_name : cluster name
:param node_name : node name
:param parser : is a dict get from base.configure
"""
ssh = shell_server.get_ssh(parser["PrimaryOS_ip"],
parser["PrimaryOS_usr"],
parser["PrimaryOS_pwd"]) # get ssh object
cmd = cmd_HAagent.overview_cmd()
s_stdin, s_stdout, s_stderr = ssh.exec_command(cmd)
overview = s_stdout.read() # get overview in host terminal
ssh = shell_server.get_ssh(parser["NFS_ip"],
parser["NFS_usr"],
parser["NFS_pwd"])
cluster_file_content = file.get_remote_file_content(parser["cluster_file_path"] ,ssh) # get cluster file content in nfs
print overview
print cluster_file_content
ssh.close()
if node_name in overview and cluster_file_content:
return True
return False
def get_vm_infofail(node_name , vm_name , parser ,ssh=None):
return __get_vm_fail(node_name , vm_name, parser, ssh)
def __get_vm_fail(node_name ,vm_name , parser ,ssh=None):
cluster_file_content = file.get_file_content(parser["cluster_file_path"], ssh) # get cluster file content
print cluster_file_content
res = json.loads(cluster_file_content)["nodes"][node_name]["vms"][vm_name]["last_fail"] # get json information
return __vm_fail_parse(res)
def __vm_fail_parse(fail):
fail_model = HAagent_terminal.Vm_lastfail_messages
for row in fail_model:
key = row[0] # fail type
value = row[1] # fail message
if value == fail:
return key
def get_node_infofail(node_name , parser , ssh=None):
return __get_node_fail(node_name, parser, ssh)
def __get_node_fail(node_name, parser, ssh):
cluster_file_content = file.get_file_content(parser["cluster_file_path"], ssh)
res = json.loads(cluster_file_content)["nodes"][node_name]["last_fail"]
return __node_fail_parse(res)
def __node_fail_parse(fail):
fail_model = HAagent_terminal.Node_lastfail_messages
for key , value in fail_model.iteritems():
if value == fail:
return key
def get_node_role(name , parser):
ssh = shell_server.get_ssh(parser["NFS_ip"],
parser["NFS_usr"],
parser["NFS_pwd"])
cluster_file_content = file.get_remote_file_content(parser["cluster_file_path"] , ssh) # get cluster file content in nfs
try:
res = json.loads(cluster_file_content)["nodes"][name]["role"]
ssh.close()
return role_parse(res)
except KeyError:
return "Key not found"
def role_parse(role):
if role == 0:
return "primary"
elif role == 1:
return "backup"
elif role == 2:
return "slave"
else:
return "role not found"
if __name__ == '__main__':
#cluster_file_content = file.get_file_content("/var/ha/images/clusterFile.txt")
#jsonString = json.loads(cluster_file_content)
#print jsonString["nodes"]["n1"]["role"]
#print jsonString["nodes"]["n1"]["role"] == 0
parser["NFS_ip"] = "192.168.1.106"
parser["NFS_usr"] = "testagent"
parser["NFS_pwd"] = "root"
ssh = shell_server.get_ssh(parser["NFS_ip"],
parser["NFS_usr"],
parser["NFS_pwd"])
cluster_file_content = file.get_remote_file_content(parser["cluster_file_path"] ,ssh) # get cluster file content in nfs
| [
"lsc830621@gmail.com"
] | lsc830621@gmail.com |
4c1e07954b7f178a1cd06730975c36d9e49da4c4 | b747f3ce4a372acc6079f6aa5e08dc0b4e942a8b | /Candy_135.py | 716291e973b1e9a6f5a7a5e67e16f28a777a30cc | [] | no_license | yyang116/Leetcode-Journey | 6a65ae7615ba6eb91f6d6a4914619366da47264a | 0db77523d3f28a146646c877b7dbbe1c375666da | refs/heads/main | 2023-08-10T21:26:32.103999 | 2021-09-24T12:30:57 | 2021-09-24T12:30:57 | 407,548,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | class Solution:
def candy(self, ratings: List[int]) -> int:
l=len(ratings)
if(l<2): return l
candy=[1 for x in range(l)]
for i in range(l-1):
if(ratings[i]<ratings[i+1]):
candy[i+1]=candy[i]+1
for i in range(1,l):
if(ratings[l-i-1]>ratings[l-i] and candy[l-i-1]<=candy[l-i]):
candy[l-i-1]=candy[l-i]+1
return sum(candy)
| [
"noreply@github.com"
] | noreply@github.com |
8b42f06cdb8dea1438828f56a84250f25597c83e | 4698d0fae98b892a935e2b19ec01e60af077723c | /finalproject/bin/rst2html4.py | 322b3298dbe4ce5fcca08892b8ff7e31492a0c96 | [] | no_license | sujithksam92/SearchEngineProject | 8cee78349b8b118456f91f93b7e63587ae5a21d3 | 8182e12a64bc69403940127eeeaa353c405db127 | refs/heads/master | 2020-05-17T13:53:35.289657 | 2019-05-07T21:28:10 | 2019-05-07T21:28:10 | 183,740,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | #!/Users/sujithsam/Documents/Studies/Stevens/Sem-2/BIS-660-Web-Mining/Research_Engine_Project/finalproject/bin/python3.7
# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing (X)HTML.
The output conforms to XHTML 1.0 transitional
and almost to HTML 4.01 transitional (except for closing empty tags).
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html4', description=description)
| [
"sujithksam92@live.com"
] | sujithksam92@live.com |
cbfdbf55f91029d39dfebc3f950e4dad9f0a19e1 | 0525edb3a79a46c73576b149c29ca83464c1e1fb | /finalproy/Scripts/pilfile.py | 05d9f79a39f3f9b93b300bd6a1a70847ffcdb237 | [] | no_license | alseb4991/proyprogweb1.0 | 88119407c111a82ede5b9fe23840a551c5a75ca6 | 5d98b7a34360c8a7fbb5135a4e809b6f9bfb2d82 | refs/heads/master | 2021-01-18T21:08:52.919164 | 2016-06-07T18:34:44 | 2016-06-07T18:34:44 | 50,890,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | #!c:\users\alan\desktop\finalproy\scripts\python.exe
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import logging
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
logging_level = "WARNING"
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
logging_level = "DEBUG"
logging.basicConfig(level=logging_level)
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| [
"Alan Sebastian Fuentes Toscano"
] | Alan Sebastian Fuentes Toscano |
aaba80ff9c3f90ecc00e3675665a8f4debca40b3 | 5047a3f9be0ad269c4a8b9f2a61e537679620f29 | /answers/advent-7.py | fc14a9428d727690ce15438fe6bdaffde18e5a36 | [] | no_license | thedude42/adventofcode2020 | 2064b7d54e5fd7a299f361f9703679873998fd4a | 56472557698a0bf70234e2b6717fba093882ed8e | refs/heads/main | 2023-01-30T00:23:13.544006 | 2020-12-08T06:28:58 | 2020-12-08T06:28:58 | 318,718,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,115 | py | import sys
import re
from collections import namedtuple
from typing import List, Dict
class Bag():
'''
class to hold the 'contains' relationship for a particular bag type
'''
BagRule = namedtuple('BagRule', ["count", "name"])
name_re = re.compile(r'(\w+ \w+) \w+')
rule_re = re.compile(r'\s*(\d+) (\w+ \w+) \w+\.?|(no other bags\.)$')
def __init__(self, rule: str):
self.rule_spec = rule
@property
def rule_spec(self):
return self.__rule_spec
@rule_spec.setter
def rule_spec(self, rule: str):
self.__rule_spec = rule
self.__rules = []
name_part, rules = self.__rule_spec.split(" contain ")
m = Bag.name_re.match(name_part)
self.name = m.group(1)
rules = rules.split(", ")
for rule in rules:
#print(rule)
m = Bag.rule_re.match(rule)
#print(m)
if m.group(3) == "no other bags.":
continue
self.__rules.append(Bag.BagRule(int(m.group(1)), m.group(2)))
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def rules(self):
return self.__rules
def contains(self, bag_name) -> bool:
for rule in self.__rules:
if rule.name == bag_name:
return True
return False
def __hash__(self):
return self.name
def __eq__(self, other_bag) -> bool:
return other_bag.name == self.name
def __repr__(self) -> str:
return self.__name
def get_bags_containing(bag_list: List, containd_bag_name: str) -> List[Bag]:
result = []
for bag in bag_list:
if bag.contains(containd_bag_name):
result.append(bag)
return result
def get_nested_sum(bag_index: Dict[str, Bag], bag_name: str, factor: int) -> int:
running_sum = 0
for rule in bag_index[bag_name].rules:
running_sum += rule.count * factor
running_sum += get_nested_sum(bag_index, rule.name, rule.count*factor)
return running_sum
def main():
if len(sys.argv) != 2:
print("Single input filename required, no more, no less.")
sys.exit(1)
bags = []
with open(sys.argv[1], 'r') as infile:
for rule_line in infile:
bags.append(Bag(rule_line))
contains_shinygold = get_bags_containing(bags, "shiny gold")
contains_shinygold_set = set([ bag.name for bag in contains_shinygold ])
while len(contains_shinygold) != 0:
next_bags = []
for bag in contains_shinygold:
next_bags += get_bags_containing(bags, bag.name)
for bag in next_bags:
contains_shinygold_set.add(bag.name)
contains_shinygold = next_bags
print("Found {} total bags that could contain shiny gold".format(len(contains_shinygold_set)))
bagname_index = { b.name:b for b in bags }
contained_bags_sum = get_nested_sum(bagname_index, "shiny gold", 1)
print("shiny gold bag has {} nested bags".format(contained_bags_sum))
if __name__ == '__main__':
main() | [
"sk8rdie42@gmail.com"
] | sk8rdie42@gmail.com |
4461735f7bd7281b7b767b8e879ce7b1b19e705a | 993ff86a0c3a5fbb4e9e0e93cdb7ff0b40124e00 | /wormpose/commands/predict_dataset.py | ccd7d3d8b336f34e609d7f0a86b5fc7dc9c25d64 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | stjordanis/wormpose | e616a3cd3d4da7bf822697bf6578928e89a7a7d9 | 3e480fcf8eed720d9472b106e09c51c9853536c0 | refs/heads/master | 2022-11-25T03:27:15.834098 | 2020-07-14T08:33:54 | 2020-07-14T08:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,111 | py | #!/usr/bin/env python
"""
Predicts videos using a trained model
"""
import logging
import multiprocessing as mp
import os
import random
import shutil
import tempfile
from argparse import Namespace
from functools import partial
from typing import Tuple
import numpy as np
import tensorflow as tf
from wormpose.commands import _log_parameters
from wormpose.commands.utils.results_saver import ResultsSaver
from wormpose.commands.utils.time_sampling import resample_results
from wormpose.config import default_paths
from wormpose.config.default_paths import RESULTS_FILENAME, CONFIG_FILENAME
from wormpose.config.experiment_config import load_config, add_config_argument
from wormpose.dataset.features import Features
from wormpose.dataset.loader import get_dataset_name
from wormpose.dataset.loader import load_dataset
from wormpose.dataset.loaders.resizer import ResizeOptions
from wormpose.images.scoring import BaseScoringDataManager, ScoringDataManager, ResultsScoring
from wormpose.machine_learning.best_models_saver import BestModels
from wormpose.machine_learning.predict_data_generator import PredictDataGenerator
from wormpose.pose.centerline import skeletons_to_angles
from wormpose.pose.headtail_resolution import resolve_head_tail
from wormpose.pose.results_datatypes import (
ShuffledResults,
OriginalResults,
BaseResults,
)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
tf.get_logger().setLevel(logging.INFO)
def _make_tf_dataset(data_generator, batch_size: int, image_shape):
def run(video_name):
data_gen = partial(data_generator.run, video_name=video_name)
tf_dset = tf.data.Dataset.from_generator(
data_gen, tf.float32, tf.TensorShape([batch_size, image_shape[0], image_shape[1], 1]),
)
return tf_dset
return run
def _can_resolve_results(shuffled_results: ShuffledResults, score_threshold: float, video_name: str) -> bool:
scores = shuffled_results.scores
if np.all(np.isnan(scores)):
logger.error(f"Calculated scores are all invalid, stopping analysis for {video_name}")
return False
if np.max(scores) < score_threshold:
logger.error(
f"There is not one frame where the error metric is above the threshold {score_threshold} "
f"in the whole video {video_name}, stopping analysis. Maybe the model didn't train properly."
)
return False
return True
class _Predictor(object):
def __init__(self, results_scoring: ResultsScoring, keras_model):
self.keras_model = keras_model
self.results_scoring = results_scoring
def __call__(
self, num_frames: int, input_frames, scoring_data_manager: BaseScoringDataManager, features: Features,
) -> Tuple[OriginalResults, ShuffledResults]:
# run all frames through the neural network to get a result theta without head/tail decision
network_predictions = self.keras_model.predict(input_frames)[:num_frames]
logger.info(f"Predicted {len(network_predictions)} frames")
shuffled_results = ShuffledResults(random_theta=network_predictions)
original_results = OriginalResults(
theta=skeletons_to_angles(features.skeletons, theta_dims=network_predictions.shape[1]),
skeletons=features.skeletons,
scores=None,
)
# calculate image similarity for each frame, for the two solutions
self.results_scoring(results=shuffled_results, scoring_data_manager=scoring_data_manager)
avg_score = np.max(shuffled_results.scores, axis=1).mean()
logger.info(f"Calculated image similarity, average: {avg_score:.4f}")
resample_results(shuffled_results, features.timestamp)
resample_results(original_results, features.timestamp)
return original_results, shuffled_results
def _apply_resize_factor(results: BaseResults, resize_factor: float):
results.skeletons /= resize_factor
def _parse_arguments(dataset_path: str, kwargs: dict):
if kwargs.get("work_dir") is None:
kwargs["work_dir"] = default_paths.WORK_DIR
if kwargs.get("num_process") is None:
kwargs["num_process"] = os.cpu_count()
if kwargs.get("temp_dir") is None:
kwargs["temp_dir"] = tempfile.gettempdir()
if kwargs.get("batch_size") is None:
kwargs["batch_size"] = 512
if kwargs.get("score_threshold") is None:
kwargs["score_threshold"] = 0.7
if kwargs.get("video_names") is None:
kwargs["video_names"] = None
if kwargs.get("random_seed") is None:
kwargs["random_seed"] = None
kwargs["temp_dir"] = tempfile.mkdtemp(dir=kwargs["temp_dir"])
dataset_name = get_dataset_name(dataset_path)
kwargs["experiment_dir"] = os.path.join(kwargs["work_dir"], dataset_name)
if kwargs.get("model_path") is None:
default_models_dir = os.path.join(kwargs["experiment_dir"], default_paths.MODELS_DIRS)
kwargs["model_path"] = BestModels(default_models_dir).best_model_path
if kwargs.get("config") is None:
kwargs["config"] = os.path.join(kwargs["experiment_dir"], CONFIG_FILENAME)
_log_parameters(logger.info, {"dataset_path": dataset_path})
_log_parameters(logger.info, kwargs)
return Namespace(**kwargs)
def predict(dataset_path: str, **kwargs):
"""
Use a trained model to predict the centerlines of worm for videos in a dataset
:param dataset_path: Root path of the dataset containing videos of worm
"""
args = _parse_arguments(dataset_path, kwargs)
mp.set_start_method("spawn", force=True)
if args.random_seed is not None:
os.environ["TF_DETERMINISTIC_OPS"] = "1"
random.seed(args.random_seed)
np.random.seed(args.random_seed)
tf.random.set_seed(args.random_seed)
results_root_dir = os.path.join(args.experiment_dir, default_paths.RESULTS_DIR)
os.makedirs(results_root_dir, exist_ok=True)
config = load_config(args.config)
dataset = load_dataset(
dataset_loader=config.dataset_loader,
dataset_path=dataset_path,
selected_video_names=args.video_names,
resize_options=ResizeOptions(resize_factor=config.resize_factor),
)
keras_model = tf.keras.models.load_model(args.model_path, compile=False)
results_saver = ResultsSaver(
temp_dir=args.temp_dir, results_root_dir=results_root_dir, results_filename=RESULTS_FILENAME
)
tf_dataset_maker = _make_tf_dataset(
data_generator=PredictDataGenerator(
dataset=dataset,
num_process=args.num_process,
temp_dir=args.temp_dir,
image_shape=config.image_shape,
batch_size=args.batch_size,
),
batch_size=args.batch_size,
image_shape=config.image_shape,
)
results_scoring = ResultsScoring(
frame_preprocessing=dataset.frame_preprocessing,
num_process=args.num_process,
temp_dir=args.temp_dir,
image_shape=config.image_shape,
)
predictor = _Predictor(results_scoring=results_scoring, keras_model=keras_model)
for video_name in dataset.video_names:
logger.info(f'Processing video: "{video_name}"')
features = dataset.features_dataset[video_name]
template_indexes = features.labelled_indexes
if len(template_indexes) == 0:
logger.error(
f"Can't calculate image metric, there is no labelled frame in the video to use as a template, "
f"stopping analysis for {video_name}."
)
continue
original_results, shuffled_results = predictor(
input_frames=tf_dataset_maker(video_name),
num_frames=dataset.num_frames(video_name),
features=features,
scoring_data_manager=ScoringDataManager(
video_name=video_name, frames_dataset=dataset.frames_dataset, features=features,
),
)
results = {"original": original_results, "unaligned": shuffled_results}
if _can_resolve_results(shuffled_results, video_name=video_name, score_threshold=args.score_threshold,):
final_results = resolve_head_tail(
shuffled_results=shuffled_results,
original_results=original_results,
frame_rate=features.frame_rate,
score_threshold=args.score_threshold,
)
results["resolved"] = final_results
_apply_resize_factor(results["resolved"], config.resize_factor)
_apply_resize_factor(results["unaligned"], config.resize_factor)
results_saver.save(results=results, video_name=video_name)
# cleanup
shutil.rmtree(args.temp_dir)
def main():
import argparse
parser = argparse.ArgumentParser()
# model infos
parser.add_argument(
"--model_path", type=str, help="Load model from this path, or use best model from work_dir.",
)
parser.add_argument("--batch_size", type=int)
# inputs
parser.add_argument("dataset_path", type=str)
parser.add_argument(
"--video_names",
type=str,
nargs="+",
help="Only analyze a subset of videos. If not set, will analyze all videos in dataset_path.",
)
add_config_argument(parser)
parser.add_argument("--temp_dir", type=str, help="Where to store temporary intermediate results")
parser.add_argument("--work_dir", type=str, help="Root folder for all experiments")
# multiprocessing params
parser.add_argument("--num_process", type=int, help="How many worker processes")
# parameters of results processing
parser.add_argument(
"--score_threshold",
type=float,
help="Image metric score threshold : discard results scoring lower than this value."
" Fine tune this value using the script calibrate_dataset.py",
)
parser.add_argument("--random_seed", type=int, help="Optional random seed for deterministic results")
args = parser.parse_args()
predict(**vars(args))
if __name__ == "__main__":
main()
| [
"laetitia.hebert@oist.jp"
] | laetitia.hebert@oist.jp |
c158869c44fbf7dbcc2976dabe27d6ba3b090513 | 615593c9b15afe1219bed38efe3adc32b947b6d2 | /FaceRecognition-Webapp/DnnRecognizer.py | 5691d7d1394151210e780ce5475fbdd895446512 | [] | no_license | jbkarle/PyImageConf2018 | 7bc11f93f4a79195373eddd41deb21b9851bf597 | be7f080487dbdb99e6f52941cc26b41123882caa | refs/heads/master | 2021-09-22T06:57:34.313260 | 2018-09-06T05:18:29 | 2018-09-06T05:18:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,154 | py | # License Agreement
# 3-clause BSD License
#
# Copyright (C) 2018, Xperience.AI, all rights reserved.
#
# Third party copyrights are property of their respective owners.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the names of the copyright holders nor the names of the contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# This software is provided by the copyright holders and contributors "as is" and
# any express or implied warranties, including, but not limited to, the implied
# warranties of merchantability and fitness for a particular purpose are disclaimed.
# In no event shall copyright holders or contributors be liable for any direct,
# indirect, incidental, special, exemplary, or consequential damages
# (including, but not limited to, procurement of substitute goods or services;
# loss of use, data, or profits; or business interruption) however caused
# and on any theory of liability, whether in contract, strict liability,
# or tort (including negligence or otherwise) arising in any way out of
# the use of this software, even if advised of the possibility of such damage.
import cv2
import numpy
class DnnRecognizer():
def __init__(self, model_path='data/openface.nn4.small2.v1.t7',
model_mean = [0, 0, 0], model_in_size = (96, 96), model_scale = 1.0 / 255, conf_threshold = 0.6):
self.known_faces = dict()
self.model = cv2.dnn.readNetFromTorch(model_path)
self.mean = model_mean
self.scale = model_scale
self.in_size = model_in_size
self.confidence = conf_threshold
def enroll(self, imageBuffer, name):
vec = self._face2vec(imageBuffer)
self.known_faces[name] = vec
def recognize(self, imageBuffer):
vec = self._face2vec(imageBuffer)
best_match_name = 'unknown'
best_match_score = self.confidence
# NOTE: Replace iteritems() method to items() if you use Python3
for name, descriptor in self.known_faces.items():
score = vec.dot(descriptor.T)
if (score > best_match_score):
best_match_score = score
best_match_name = name
return best_match_name
def _face2vec(self, imageBuffer):
dataFromBuffer = numpy.frombuffer(imageBuffer, dtype=numpy.uint8)
image = cv2.imdecode(dataFromBuffer, cv2.IMREAD_COLOR)
blob = cv2.dnn.blobFromImage(image, self.scale, self.in_size, self.mean, False, False)
self.model.setInput(blob)
vec = self.model.forward()
return vec
| [
"spmallick@gmail.com"
] | spmallick@gmail.com |
4af89df75f31f967c2e9fffac40bba2655d4eac1 | 7c568ca8675ee507d231dc3ddc2c26db8af81d3f | /app/measurement/migrations/0039_auto_20210423_2203.py | e4770737f85b702259b6e9732f4c900815cf5bca | [
"MIT"
] | permissive | pnsn/squacapi | ccfb458c7230fc5b0a0be7921eb6db611d8c646a | 40d9608295daefc5e1cd83afd84ecb5b0518cc3d | refs/heads/main | 2023-04-30T22:10:51.651835 | 2023-04-28T17:01:06 | 2023-04-28T17:01:06 | 176,352,115 | 7 | 0 | MIT | 2023-04-28T17:01:07 | 2019-03-18T19:03:32 | Python | UTF-8 | Python | false | false | 3,213 | py | # Generated by Django 3.1.8 on 2021-04-23 22:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('measurement', '0038_auto_20210414_1911'),
]
operations = [
migrations.AddField(
model_name='archiveday',
name='p05',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archiveday',
name='p10',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archiveday',
name='p90',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archiveday',
name='p95',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archivehour',
name='p05',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archivehour',
name='p10',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archivehour',
name='p90',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archivehour',
name='p95',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archivemonth',
name='p05',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archivemonth',
name='p10',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archivemonth',
name='p90',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archivemonth',
name='p95',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archiveweek',
name='p05',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archiveweek',
name='p10',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archiveweek',
name='p90',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='archiveweek',
name='p95',
field=models.FloatField(default=0),
preserve_default=False,
),
]
| [
"jontconnolly@gmail.com"
] | jontconnolly@gmail.com |
7c12aa31b26bf8faa5fa8916f6ea06e98186cba2 | f722cfc58ffb73fb336cdbea28eeb264b928b21d | /testwebsite/testwebsite/views.py | c6e387d1d51afd1e3dbc3192827a0606eb18a6fb | [] | no_license | dipeshanandparab/DjangoCertificationProject | 46b66626603b71831fcff9a9bdea9fb1ebfc1f76 | d046c87d63d5eb1205e2ca189083a6ccd2f58572 | refs/heads/master | 2020-07-26T09:25:43.890549 | 2019-09-15T14:13:49 | 2019-09-15T14:13:49 | 208,603,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | from django.http import HttpResponse
def blogPage(request):
return HttpResponse("Blog Home Page") | [
"noreply@github.com"
] | noreply@github.com |
5e05b6b9b39006785cc1c655918b72ebd92fe4a6 | b7f856717e407c698453b3fddde74dc19cb81ebc | /solve/solver.py | abd766c05f81f527039bc8954dfadcd4ea596b74 | [] | no_license | grace0925/sudoku-solver | 22702915dbd290c1409d7d0840b93b51772e67f5 | 7e0a8e94b418dd5a1e3e259828e3b5f545da7369 | refs/heads/master | 2021-01-09T17:47:50.007917 | 2020-03-30T04:19:41 | 2020-03-30T04:19:41 | 242,395,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | board = [
[2,0,3,8,0,0,0,0,0],
[0,9,0,4,0,0,3,6,0],
[5,0,0,0,6,3,0,2,0],
[6,0,7,0,0,0,0,9,8],
[0,0,5,9,0,0,0,1,6],
[4,0,0,0,7,0,2,0,3],
[0,0,0,3,0,4,0,0,0],
[0,0,0,0,2,0,0,0,0],
[0,0,8,0,0,0,0,0,0]
]
def pretty_print_board(board):
for i, row in enumerate(board):
if i % 3 == 0 and i != 0:
print("- - - - - - - - - - - - -")
for j, col in enumerate(row):
if j == 8:
print(str(board[i][j]) + " ", end="")
print("|")
elif j % 3 == 0:
print("| ", end="")
print(str(board[i][j]) + " ", end="")
else:
print(str(board[i][j]) + " ", end="")
def find_empty_square(board):
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 0:
print(i, j)
return i, j
return None
# check if number if valid at certain position on the board
def is_valid(board, num, pos):
if not check_col(board, num, pos):
return False
elif not check_row(board, num, pos):
return False
elif not check_square(board, num, pos):
return False
return True
def check_square(board, num, pos):
square_pos = (pos[0]//3*3, pos[1]//3*3)
for i in range(square_pos[0], square_pos[0]+3):
for j in range(square_pos[1], square_pos[1]+3):
if (i,j) != pos and num == board[i][j]:
print("False => " + str((i, j)))
return False
return True
def check_row(board, num, pos):
for i in range(len(board[0])):
if num == board[pos[0]][i] and (pos[0],[i]) != pos:
print("False => " + str((pos[0],i)))
return False
return True
def check_col(board, num, pos):
for j in range(len(board)):
if num == board[j][pos[1]] and (j,pos[1]) != pos:
print("False => " + str((j, pos[1])))
return False
return True
def solve(board):
first_empty = find_empty_square(board)
# base case: finished filling the board
if not first_empty:
return True
else:
row, col = first_empty
for i in range(1,10):
print("for " + str(i))
if is_valid(board, i, (row,col)):
board[row][col] = i
if solve(board):
return True
board[row][col] = 0
return False
solve(board)
pretty_print_board(board) | [
"graceliu0925@gmail.com"
] | graceliu0925@gmail.com |
91bb39e87b153c78a084acbdc38998fcc5de7e04 | 5a01774b1815a3d9a5b02b26ca4d6ba9ecf41662 | /Module 2/Chapter03/django-myproject-03/quotes/models.py | 1659b30889e4e5de96390dfb7a8897a216d15bfe | [
"MIT"
] | permissive | PacktPublishing/Django-Web-Development-with-Python | bf08075ff0a85df41980cb5e272877e01177fd07 | 9f619f56553b5f0bca9b5ee2ae32953e142df1b2 | refs/heads/master | 2023-04-27T22:36:07.610076 | 2023-01-30T08:35:11 | 2023-01-30T08:35:11 | 66,646,080 | 39 | 41 | MIT | 2023-04-17T10:45:45 | 2016-08-26T12:30:45 | Python | UTF-8 | Python | false | false | 3,578 | py | # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import os
from PIL import Image
from django.db import models
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.urlresolvers import NoReverseMatch
from django.core.files.storage import default_storage as storage
from utils.models import UrlMixin
THUMBNAIL_SIZE = getattr(settings, "QUOTES_THUMBNAIL_SIZE", (50, 50))
def upload_to(instance, filename):
now = timezone_now()
filename_base, filename_ext = os.path.splitext(filename)
return "quotes/%s%s" % (
now.strftime("%Y/%m/%Y%m%d%H%M%S"),
filename_ext.lower(),
)
@python_2_unicode_compatible
class InspirationalQuote(UrlMixin):
author = models.CharField(_("Author"), max_length=200)
quote = models.TextField(_("Quote"))
picture = models.ImageField(_("Picture"), upload_to=upload_to, blank=True, null=True)
language = models.CharField(_("Language"), max_length=2, blank=True, choices=settings.LANGUAGES)
class Meta:
verbose_name = _("Inspirational Quote")
verbose_name_plural = _("Inspirational Quotes")
def __str__(self):
return self.quote
def get_url_path(self):
try:
return reverse("quote_detail", kwargs={"id": self.pk})
except NoReverseMatch:
return ""
def save(self, *args, **kwargs):
super(InspirationalQuote, self).save(*args, **kwargs)
# generate thumbnail picture version
self.create_thumbnail()
def create_thumbnail(self):
if not self.picture:
return ""
file_path = self.picture.name
filename_base, filename_ext = os.path.splitext(file_path)
thumbnail_file_path = "%s_thumbnail.jpg" % filename_base
if storage.exists(thumbnail_file_path):
# if thumbnail version exists, return its url path
return "exists"
try:
# resize the original image and return url path of the thumbnail version
f = storage.open(file_path, 'r')
image = Image.open(f)
width, height = image.size
if width > height:
delta = width - height
left = int(delta/2)
upper = 0
right = height + left
lower = height
else:
delta = height - width
left = 0
upper = int(delta/2)
right = width
lower = width + upper
image = image.crop((left, upper, right, lower))
image = image.resize(THUMBNAIL_SIZE, Image.ANTIALIAS)
f_mob = storage.open(thumbnail_file_path, "w")
image.save(f_mob, "JPEG")
f_mob.close()
return "success"
except:
return "error"
def get_thumbnail_picture_url(self):
if not self.picture:
return ""
file_path = self.picture.name
filename_base, filename_ext = os.path.splitext(file_path)
thumbnail_file_path = "%s_thumbnail.jpg" % filename_base
if storage.exists(thumbnail_file_path):
# if thumbnail version exists, return its url path
return storage.url(thumbnail_file_path)
# return original as a fallback
return self.picture.url
def title(self):
return self.quote | [
"bhavinsavalia@packtpub.com"
] | bhavinsavalia@packtpub.com |
f868eeff27e7b4b07996e98e3f5ae73e36692f8c | f19df7e4cf34af41b17fb636d4c77ab32f93f20f | /Game tutorial/rpg_game/core/Marker.py | 0d1bbe0ff377fa34804ba7afd2bb395390e0de79 | [] | no_license | Sequd/python | 2fcaa60ee0a535619b50ece0eb8bf9a43ad798e2 | 3a08ab1285b004f57266853384ed9ef4a91163d3 | refs/heads/master | 2021-07-10T02:19:40.922424 | 2021-04-04T15:05:15 | 2021-04-04T15:05:15 | 55,693,736 | 0 | 0 | null | 2018-05-11T14:39:59 | 2016-04-07T12:43:40 | Python | UTF-8 | Python | false | false | 1,051 | py | import pygame
import random
from Constants import *
class Marker:
def __init__(self, screen, x=250, y=250):
self.screen = screen
self.x = x
self.y = y
def update(self):
pass
def render(self):
surface = pygame.Surface((160, 120), pygame.SRCALPHA)
shift = 20
pygame.draw.ellipse(surface, (255, 155, 155), (10, 56, 60, 30))
pygame.draw.ellipse(surface, (0, 0, 0), (10, 56, 60, 30), 1)
pygame.draw.ellipse(surface, WHITE, (0 + shift, 40 + shift, 40, 20))
pygame.draw.polygon(surface, WHITE, [[0 + shift, 10 + shift],
[0 + shift, 50 + shift],
[40 + shift, 50 + shift],
[40 + shift, 10 + shift]])
pygame.draw.ellipse(surface, (255, 155, 155), (0 + shift, 0 + shift, 40, 20))
pygame.draw.ellipse(surface, (155, 155, 155), (0 + shift, 0 + shift, 40, 20), 1)
self.screen.blit(surface, (self.x, self.y))
| [
"e.korunov@gmail.com"
] | e.korunov@gmail.com |
10f058e9cf7828acf4110b55d6c7158a21525c0c | eeee463b73fe69972401e28ae856e551f4c30567 | /Digit Classification/custom_knn_implementation.py | ffccdf770bedcf5d20a3cd04a46c218c67b59b63 | [
"MIT"
] | permissive | pushprajsingh14/Digit-classification-knn | 828e2823809eb4795b9412bd00a2f315ddc2d7d7 | 65ae68c764159c8eb49358828e177df47e2b2ee5 | refs/heads/master | 2022-11-22T07:51:10.314289 | 2020-07-26T18:31:56 | 2020-07-26T18:31:56 | 282,705,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | import numpy as np
from sklearn.model_selection import train_test_split
data = np.load('./datasets/mnist_train_small.npy')
x = data[:, 1:]
y = data[:, 0]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
class CustomKNN:
# constructor
def __init__(self, n_neighbours=5):
self.n_neighbours = n_neighbours
# training function
def fit(self, x, y):
self._x = (x - x.mean()) / x.std() # standardisation
self._y = y
# predict point
# given a single point, tell me which class it belongs to
def predict_point(self, point):
# storing the dis of given 'point' from each point in training data
list_dist = []
# these points are from my training data
for x_point, y_point in zip(self._x, self._y):
dist_point = ((point - x_point) ** 2).sum()
list_dist.append([dist_point, y_point])
# sorting the list according to the distance
sorted_dist = sorted(list_dist)
top_k = sorted_dist[:self.n_neighbours]
# taking the count
items, counts = np.unique(np.array(top_k)[:, 1], return_counts=True)
ans = items[np.argmax(counts)]
return ans
# predict
# give me answer for each number in the array
def predict(self, x):
results = []
x = (x - x.mean()) / x.std()
for point in x:
results.append(self.predict_point(point))
return np.array(results, dtype=int)
# score to measure my accuracy
def score(self, x, y):
return sum(self.predict(x) == y) / len(y)
model = CustomKNN()
model.fit(x_train, y_train)
print("accuracy is:")
print(model.score(x_test[:100], y_test[:100]) * 100, "%")
print("thankyou")
| [
"noreply@github.com"
] | noreply@github.com |
50587eb991551487cd46558996781fa84fa73bdb | 030437bc949054b0110a4979276ce19dd03c124e | /game_controller.py | 4a8d0ca92170380805d3623c46faa3366238047f | [] | no_license | dusanradivojevic/DeepSeaAdventures | 439ede65b705bcf2e8bece546023f8fbaf1ab990 | 7a6f6efa475469a83ec8c1e02c931a31cf079d36 | refs/heads/master | 2022-12-24T13:07:42.539056 | 2020-10-03T13:14:34 | 2020-10-03T13:14:34 | 253,089,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,793 | py | import time
import math
import pygame
from sounds import SoundPlayer
import game_data as gd
import threading
import level_data as levels
import npc
# Changes time lapsed from number of seconds to hours : minutes : seconds format
def time_convert(seconds):
min = (seconds // 60) % 60
sec = seconds % 60
hours = min // 60
return f'{hours} : {min} : {round(sec,2)}'
# Checks whether given array of fish types contains type the same as asked
def has_type(array, given_type):
if len(array) == 0:
return False
if array[0] == npc.NpcSprite:
return True
for tp in array:
if tp == given_type:
return True
return False
class TaskController:
def __init__(self, task):
self.task = task
self.fishes = []
self.score = 0
def task_update(self, eaten_fish, score):
if self.task.score_needed != 0:
self.score += score
if has_type(self.task.fish_types, type(eaten_fish)):
self.fishes.append(eaten_fish)
def get_text_surface(self, font):
rows = [
font.render('Tasks:', False, gd.white_color)
]
if self.task.score_needed != 0:
sc = self.score if self.score < self.task.score_needed else self.task.score_needed
text = f'{round(sc)}/{self.task.score_needed}' \
f' Score'
rows.append(font.render(text, False, gd.white_color)) # 0/500 Score
index = 0
for tp in self.task.fish_types:
num = self.number_of_eaten_fish_of_type(tp) if self.number_of_eaten_fish_of_type(tp) < \
self.task.fish_numbers[index] else self.task.fish_numbers[index]
text = f'{num}/{self.task.fish_numbers[index]}' \
f' {levels.get_name_of_type(tp)}'
rows.append(font.render(text, False, gd.white_color)) # 0/5 BlueFish
index += 1
return rows
def number_of_eaten_fish_of_type(self, tp):
if tp == npc.NpcSprite:
return len(self.fishes)
count = 0
for fish in self.fishes:
if type(fish) == tp:
count += 1
return count
def is_completed(self):
if self.score < self.task.score_needed:
return False
# for each type of fish needed checks how much of them player has eaten
checking_fish_index = 0
for tp in self.task.fish_types:
count = self.number_of_eaten_fish_of_type(tp)
if count < self.task.fish_numbers[checking_fish_index]:
return False
checking_fish_index += 1 # next number of eaten fish needed
return True
class GameController:
def __init__(self, list, player, generator):
self.level = 1
self.score = 0
self.fish_eaten = []
self.start_time = time.time()
self.end_time = time.time()
self.played_time = "0"
self.fishes = list
self.player = player
self.generator = generator
self.work = True
self.call_danger_fish()
self.task_controller = TaskController(levels.get_random_task(self.level))
def change_level(self):
if self.task_controller.is_completed():
self.level += 1
if self.level > gd.NUM_OF_LEVELS:
pygame.event.post(gd.GAME_WIN_EVENT)
return
self.generator.change_level()
self.task_controller = TaskController(levels.get_random_task(self.level))
self.player.change_level_image(self.level)
pygame.event.post(gd.LEVEL_CHANGED_EVENT)
def stop(self):
self.work = False
self.end_time = time.time()
time_lapsed = self.end_time - self.start_time
self.played_time = time_convert(time_lapsed)
def start(self):
for fish in self.fishes:
if self.player.size < ((100 - gd.FISH_SIZE_DIFFERENCE) / 100) * fish.size \
and fish.rect.left < self.player.rect.centerx < fish.rect.right\
and fish.rect.top + 0.2 * fish.rect.height < self.player.rect.centery < \
fish.rect.bottom - 0.2 * fish.rect.height:
self.game_over()
elif self.player.size > ((100 + gd.FISH_SIZE_DIFFERENCE) / 100) * fish.size\
and self.player.rect.left < fish.rect.centerx < self.player.rect.right\
and self.player.rect.top + 0.2 * self.player.rect.height < fish.rect.centery < \
self.player.rect.bottom - 0.2 * self.player.rect.height:
self.eat(fish)
def call_danger_fish(self):
if not self.work:
return
if round(time.time() - self.start_time) != 0:
self.generator.spawn_danger_fish()
threading.Timer(gd.DANGER_FISH_SPAWN_FREQUENCY, self.call_danger_fish).start()
def get_score(self):
return f'Score: {round(self.score)}'
def get_level(self):
return f'Level: {self.level}'
def get_text_surface(self, font):
return self.task_controller.get_text_surface(font)
def eat(self, fish):
SoundPlayer(gd.eating_sound_path, False).play()
fish.stop()
score_amount = (gd.SCORE_PERCENT / 100) * fish.size
self.score += score_amount
self.fish_eaten.append(fish)
self.task_controller.task_update(fish, score_amount)
self.change_level()
# self.player.size += (gd.SIZE_PERCENT / 100) * fish.size
def game_over(self):
# Show end screen
pygame.event.post(gd.GAME_OVER_EVENT) # Raises QUIT event (should be changed so it can be
# distinguished from button interruption)
| [
"r.dusan97@gmail.com"
] | r.dusan97@gmail.com |
da8ab0b242ac7d0fd2aff929e11c271cf5e4f784 | 94a9c4765e245ad1a12a52e28de919bdbe0cb3b8 | /HolaMundo_Channel/remove_unuseless.py | db84d9e3c99e9081f5970eda03edf0966160bb29 | [] | no_license | je-castelan/Algorithms_Python | d2b86e7300c8d9a877eb5b4bb507f901e8507967 | 9056ddebb0c73839430d54d19fcdc5588263cfd4 | refs/heads/master | 2023-02-26T00:45:30.812225 | 2021-01-21T06:16:10 | 2021-01-21T06:16:10 | 301,629,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | import functools
def removeUnuseless(arr):
# Remove null or zero elements. Note than filter require to be saved on list initializtion
# Challenge of Hola Mundo Channel on https://www.youtube.com/watch?v=MXmQM_Uehtk&t=584s
lista = list(filter(lambda x: x ,arr))
return lista
if __name__ == "__main__":
print(removeUnuseless([5,3,66,False,76,None,45,0,False,0])) | [
"ingkstr@gmail.com"
] | ingkstr@gmail.com |
fb5d205f628987e7f8e328d80f149b1bf79740ad | 52a42437c4f8f978e1d9da1074d487ce8123d228 | /path_sum_3.py | c6645a212c8b62dd26108303729a0e5d2240bacf | [] | no_license | iorzt/leetcode-algorithms | 22bc987d76c52817dd6fe94e0a8331b3729e36a1 | 7508201ac7fe057769100f168bb3e5e598137894 | refs/heads/master | 2021-05-25T12:09:10.172318 | 2020-03-26T02:31:13 | 2020-03-26T02:31:13 | 127,387,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | """
You are given a binary tree in which each node contains an integer value.
Find the number of paths that sum to a given value.
The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).
The tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000.
Example:
root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
10
/ \
5 -3
/ \ \
3 2 11
/ \ \
3 -2 1
Return 3. The paths that sum to 8 are:
1. 5 -> 3
2. 5 -> 2 -> 1
3. -3 -> 11
"""
class Solution:
def __init__(self):
self.result = 0
"""
cost: 1124ms >19.33
https://leetcode.com/submissions/detail/210791563/
"""
def pathSum(self, root: TreeNode, sum: int) -> int:
if not root:
return 0
self.helper(root, sum)
self.pathSum(root.left, sum)
self.pathSum(root.right, sum)
return self.result
def helper(self, root: TreeNode, sum: int):
if not root:
return
if sum - root.val == 0:
self.result += 1
self.helper(root.left, sum - root.val)
self.helper(root.right, sum - root.val)
| [
"gg.baibo@gmail.com"
] | gg.baibo@gmail.com |
6f536a3e58cddf6f23b1f280c0d75a694929f726 | 476a8b674a76b9925dac4498b2f64cf1dee08910 | /deepnet_training.py | 5de72a72d09c7cf3e90935fe5490c9ea9f6f64d5 | [
"MIT"
] | permissive | jacenfox/pytorch-framework | 24905de58352604e24f7ceb6f7a705cb5ad72a5b | 771642a6f40b1fa4f4c90ffa40e2cb90c3fd42d4 | refs/heads/master | 2020-04-30T02:08:35.579186 | 2019-06-27T17:25:05 | 2019-06-27T17:25:05 | 176,551,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,254 | py | import torch
import os
import sys
import numpy as np
import time
import glob
from utils import time_elapse_parser
import colorama
class TrainDeepNet(object):
"""docstring for TrainDeepNet"""
def __init__(self, model_path, model, domain_adapt, learning_rate, callbacks, model_name='LDR2HDR', device='cuda'):
self.model_path = model_path
if not os.path.exists(model_path):
os.makedirs(model_path)
print('[Output Path] %s' % (model_path))
self.callbacks = callbacks
self.device = device
self.model = model
self.MODEL_NAME=model_name
self.do_domain_adapt = domain_adapt
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate, weight_decay=0, eps=1e-8)
self.epoch = 0
self.epoch_ckpt = 0
self._best_loss = None
def map_data_to_device(self, data, is_training):
'''
map dataloader data to torch device (cpu, gpu)
data: list or dict
'''
if type(data) is list:
data = [d.to(self.device) for d in data]
if type(data) is dict:
for key in data.keys():
try:
if type(data[key]) is torch.Tensor:
data[key] = data[key].to(self.device)
if is_training:
data[key].requires_grad = True
else:
data[key].requires_grad = False
if data[key].dtype is torch.float64:
data[key] = data[key].type(torch.float32)
else: # string, fname
data[key] = data[key]
except TypeError:
print('Type Error in processing: ', key, type(data[key]), data[key].shape, data[key].dtype)
raise TypeError
return data
def loop(self, train_loader, test_loader, max_epochs):
_start_time = time.time()
self.max_epochs = max_epochs
for i in range(self.epoch_ckpt, max_epochs):
_epoch_start_time = time.time()
self.epoch = i
self.train(train_loader)
self.test(test_loader)
print('Epoch [%03d/%03d] Epoch time [%s] Running time [%s]' % (self.epoch, self.max_epochs,
time_elapse_parser(time.time() - _epoch_start_time),
time_elapse_parser(time.time() - _start_time)))
if (i) % 5 == 0:
print(colorama.Fore.GREEN + '[Runing CMD] %s' % ' '.join(sys.argv[0:]) + colorama.Style.RESET_ALL)
print(colorama.Fore.GREEN + '[Output dir] %s' % self.model_path + colorama.Style.RESET_ALL)
print(colorama.Back.CYAN + '='*50 + colorama.Style.RESET_ALL)
def train(self, train_loader):
# Train the model
self.model.train()
epoch = self.epoch
total_step = len(train_loader)
print_list = {'loss':[], 'GPU': [], 'Loading': [], 'CBs': []}
_last_batch_end_time = time.time()
for i, samples in enumerate(train_loader):
print_list['Loading'].append(time.time() - _last_batch_end_time)
_avg_gpu_time_start = time.time()
# map data to device
target = self.map_data_to_device(samples['target'], is_training=True)
data = self.map_data_to_device(samples['data'], is_training=True)
# forward
network_output = self.model(data, epoch, self.max_epochs)
loss = self.model.loss(network_output, target)
print_list['loss'].append(loss.data.cpu().numpy())
# update
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
print_list['GPU'].append(time.time() - _avg_gpu_time_start)
# callbacks
_avg_callback_time_start = time.time()
self.after_batch_callbacks(network_output, target, data, loss_dict=self.model.loss_dict, is_training=True)
print_list['CBs'].append(time.time() - _avg_callback_time_start)
# additional process
_last_batch_end_time = time.time()
if (i) % np.maximum(1, int(total_step/5)) == 0:
print(' Step[%03d/%03d] Loss: [%.4f] Time(s): CPU[%.2f] GPU[%.1f] CBs[%.1f] per batch' %
(i, total_step, np.mean(print_list['loss']), np.mean(print_list['Loading']),
np.mean(print_list['GPU']), np.mean(print_list['CBs'])))
print_list = {'loss':[], 'GPU': [], 'Loading': [], 'CBs': []}
self.save_checkpoint(loss.data.cpu().numpy())
self.after_epoch_callbacks(network_output, target, data, loss_dict=self.model.loss_dict, is_training=True)
self.model.ibatch = 0
def test(self, test_loader):
# Test the model
self.model.eval()
# In test phase, no gradients (for memory efficiency)
print(' Testing', end =" ")
_test_time_start = time.time()
with torch.no_grad():
for i, samples in enumerate(test_loader):
target = self.map_data_to_device(samples['target'], is_training=False)
data = self.map_data_to_device(samples['data'], is_training=False)
network_output = self.model(data, self.epoch, self.max_epochs)
self.after_batch_callbacks(network_output, target, data, loss_dict=self.model.loss_dict, is_training=False)
self.after_epoch_callbacks(network_output, target, data, loss_dict=self.model.loss_dict, is_training=False)
print('time [%.2f]' % (time.time()-_test_time_start))
self.model.ibatch = 0
def after_batch_callbacks(self, network_output, target, data, loss_dict, is_training):
callbacks = self.callbacks
for callback_fun in callbacks:
callback_fun.batch(self, network_output, target, data, loss_dict, is_training)
def after_epoch_callbacks(self, network_output, target, data, loss_dict, is_training):
callbacks = self.callbacks
for callback_fun in callbacks:
callback_fun.epoch(self, network_output, target, data, loss_dict, is_training)
def save_checkpoint(self, loss):
# Save the model checkpoint
log_path = os.path.join(self.model_path, 'log')
if not os.path.exists(log_path):
os.makedirs(log_path)
ckpt_data = {'epoch': self.epoch,
'state_dict': self.model.state_dict(),
'loss': loss,
}
torch.save(ckpt_data, os.path.join(log_path, '.%s.model.last.ckpt'%(self.MODEL_NAME)))
if self.epoch % 50 == 0 or self.epoch >= (self.max_epochs-5):
torch.save(ckpt_data, os.path.join(log_path, '.%s.model.%03d.ckpt'%(self.MODEL_NAME, self.epoch)))
if (self._best_loss is None) or (self._best_loss > loss):
self._best_loss = loss
torch.save(ckpt_data, os.path.join(log_path, '.%s.model.best.ckpt'%(self.MODEL_NAME)))
def load_checkpoint(self, model_path, epoch=-1):
"""
:return:
"""
if (epoch == -1) or (epoch == 'last'):
filename = sorted(glob.glob(os.path.join(model_path, 'log', '.%s.model.last.ckpt'%(self.MODEL_NAME))))[-1]
elif epoch == 'best':
filename = sorted(glob.glob(os.path.join(model_path, 'log', '.%s.model.best.ckpt'%(self.MODEL_NAME))))[-1]
else:
filename = os.path.join(model_path, 'log', '.%s.model.%03d.ckpt'%(self.MODEL_NAME) % (epoch))
if os.path.exists(filename):
print("Loading model from %s" % (filename))
else:
print("Cannot load model from %s" % (filename))
return
ckpt_data = torch.load(filename)
self.model.load_state_dict(ckpt_data['state_dict'])
self.epoch_ckpt = ckpt_data['epoch']
self._best_loss = ckpt_data['loss'] if 'loss' in ckpt_data.keys() else None
def resume_training(self):
self.load_checkpoint(self.model_path, epoch=-1)
| [
"jacenfox@gmail.com"
] | jacenfox@gmail.com |
9d97806e7191bf98ae9da511e9ccb752b0504f37 | 005d4c68695bacbab7a90d2e55567c1b6eeb2881 | /tweet_EMR.py | 0e67a53e2e688c473bc60037e7bea7e8541cf24a | [] | no_license | nishuranjan/kinesis-tweet-streaming | 6c5f84e228ab0fb6cb05edd2f4a42a1df520432a | 9676ccd9f1ac82d3db1862f26f4811adfeb0d3fe | refs/heads/master | 2022-11-28T23:47:20.896421 | 2020-08-16T16:15:26 | 2020-08-16T16:15:26 | 277,126,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | from __future__ import print_function
import sys
import json
import time
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kinesis import KinesisUtils, InitialPositionInStream
if __name__ == "__main__":
applicationName = "PythonStreamingKinesis"
streamName= "KinesisDemo"
endpointUrl="https://kinesis.us-east-1.amazonaws.com"
regionName="us-east-1"
sc = SparkContext(appName=applicationName)
ssc = StreamingContext(sc, 5)
print("appname is" + applicationName + streamName + endpointUrl + regionName)
lines = KinesisUtils.createStream(ssc, applicationName, streamName, endpointUrl, regionName, InitialPositionInStream.LATEST, 2)
def filter_tweets(x):
json_tweet = json.loads(x)
if json_tweet.has_key('lang'):
if json_tweet['lang'] == 'ar':
return True
return False
lines.foreachRDD(lambda rdd: rdd.filter(filter_tweets).coalesce(1).saveAsTextFile("./tweets/%f" % time.time()) )
ssc.start()
ssc.awaitTermination()
| [
"noreply@github.com"
] | noreply@github.com |
af69173aac3e75f108dffd3657c7568959902aeb | 9a76228ac99d8a1123d290a836a3252c11060547 | /Utilities/Talon.py | 25a640280502e80dfcc002ea8865298ffd9d162c | [
"MIT"
] | permissive | rohith8272/OpenFlightSim | b8501c94483faf74f1884a547fbf4fd94a13e053 | 9638c996fdb78c85d71c0e6faa0e64eb48cc96fa | refs/heads/master | 2020-05-15T12:44:47.957707 | 2019-03-16T16:34:12 | 2019-03-16T16:34:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,972 | py | """
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019 Regents of the University of Minnesota
See: LICENSE.md for complete license details
Author: Louis Mueller, Chris Regan
"""
import numpy as np
import copy
import VspParse
import OpenFdm
#%% Constants
in2m = 0.0254
#%% Aircraft Definition
def LoadAircraftDef(load):
oFdm = {}
#%% Aero Data
# Parse VSP files and merge the data into a single set
vspData = VspParse.ParseAll(load['Aero']['vspPath'], load['Aero']['aircraftName'], load['Aero']['aeroName'])
# vspData['stabTab'] is NOT a copy, values are linked!!
# Define some of the model specific conversions for VSP to oFdm
convertDef = {}
convertDef['Lunit'] = 'm' # Specify the units used in the VSP model.
# Surface name converstions
convertDef['Surf'] = {}
convertDef['Surf']['names'] = vspData['Stab']['surfNames']
convertDef['Surf']['Vsp'] = ['d' + s + '_rad' for s in convertDef['Surf']['names']]
convertDef['Surf']['Vsp_Grp'] = ['ConGrp_' + str(d) for d in range(1, 1+len(convertDef['Surf']['Vsp']))]
convertDef['Surf']['oFdm'] = ['d' + s + '_rad' for s in convertDef['Surf']['names']]
# Convert the VSP Aero data to oFdm
oFdm['Aero'] = OpenFdm.LoadVsp(vspData, convertDef)
oFdm['Aero']['surfNames'] = vspData['Stab']['surfNames']
#%% Mass Properties - FIXIT - Need external MassProperty Source
# Mass Properties, mass has actual. Inertias are scaled from US25e test data.
oFdm['MassProp'] = {}
# Mass Properties
oFdm['MassProp']['mass_kg'] = 3.188 # Measured with Battery and Sony a6000 camera
cgX = 0.4645 # Wing 1/4 chord
cgY = 0.0
cgZ = 0.0
oFdm['MassProp']['rCG_S_m'] = np.array([cgX, cgY, cgZ])
Ixx = 0.07151 * (oFdm['MassProp']['mass_kg'] / 1.959)
Iyy = 0.08636 * (oFdm['MassProp']['mass_kg'] / 1.959)
Izz = 0.15364 * (oFdm['MassProp']['mass_kg'] / 1.959)
Ixy = 0.0 * (oFdm['MassProp']['mass_kg'] / 1.959)
Ixz = 0.0 * (oFdm['MassProp']['mass_kg'] / 1.959)
Iyz = 0.0 * (oFdm['MassProp']['mass_kg'] / 1.959)
oFdm['MassProp']['inertia_kgm2'] = np.array([[Ixx, Ixy, Ixz], [Ixy, Iyy, Iyz], [Ixz, Iyz, Izz]])
#%% Flight Control System
oFdm['FCS'] = {}
# Pilot input scaling/sensitivity
pilot = {}
pilot['kRoll'] = 60.0 * np.pi / 180.0 # Normalized stick to cmdRoll
pilot['kPitch'] = -30.0 * np.pi / 180.0 # Normalized stick to cmdPitch
pilot['kYaw'] = -10.0 * np.pi / 180.0 # Normalized stick to cmdYaw
pilot['kFlap'] = 20.0 * np.pi / 180.0 # Normalized stick to cmdFlap
oFdm['FCS']['Pilot'] = pilot
# Mixer
mixer = {}
mixer['surfNames'] = oFdm['Aero']['surfNames']
mixer['inputs'] = ['cmdRoll_rps', 'cmdPitch_rps', 'cmdYaw_rps', 'cmdFlap_rad']
mixer['surfEff'] = [
[ 1.00000,-1.00000, 0.25000,-0.25000],
[ 0.00000, 0.00000,-1.00000,-1.00000],
[ 0.00000, 0.00000,-1.00000, 1.00000],
[ 1.00000, 1.00000, 0.00000, 0.00000]
]
mixer['surfMix'] = np.linalg.pinv(mixer['surfEff'])
mixer['surfMix'][abs(mixer['surfMix'] / mixer['surfMix'].max()) < 0.05] = 0
oFdm['FCS']['Mixer'] = mixer
#%% Actuator dynamic model, second-order with freeplay and limits
act = {}
for surf in oFdm['Aero']['surfNames']:
act[surf] = {}
act[surf]['bandwidth_hz'] = 4.0 # Guess
act[surf]['bandwidth_rps'] = act[surf]['bandwidth_hz'] * 2*np.pi
act[surf]['lag_nd'] = round(200.0 / act[surf]['bandwidth_hz']) # Based on 200 Hz Sim Frame
act[surf]['delay_s'] = 0.020 # Guess
act[surf]['freeplay_rad'] = 1.0 * np.pi/180.0 # Guess
act[surf]['min'] = -30.0 * np.pi/180.0
act[surf]['max'] = 30.0 * np.pi/180.0
oFdm['Act'] = act
#%% Create Propulsion data (motor and prop)
prop = {}
prop['nameMotor'] = 'Power25'
prop['rMotor_S_m'] = np.array([43 * in2m, 0, 1 * in2m])
prop['sMotor_deg'] = np.array([0, 0, 0])
prop['nameProp'] = 'APC 12x6e'
prop['rProp_S_m'] = prop['rMotor_S_m'] + np.array([2 * in2m, 0, 0])
prop['sProp_deg'] = prop['sMotor_deg']
prop['p_factor'] = 0.0
prop['sense'] = 1.0
oFdm['Prop'] = {}
oFdm['Prop']['Main'] = prop
#%% Create Sensor data
oFdm['Sensor'] = {}
## IMU
oFdm['Sensor']['Imu'] = {}
# Accel Location and Orientation
oFdm['Sensor']['Imu']['Accel'] = {}
oFdm['Sensor']['Imu']['Accel']['r_S_m'] = [0,0,0]
oFdm['Sensor']['Imu']['Accel']['s_deg'] = [0,0,0]
# Accel Error Model Parameters (units are _mps2)
oFdm['Sensor']['Imu']['Accel']['delay_s'] = [0,0,0]
oFdm['Sensor']['Imu']['Accel']['lag'] = [0,0,0]
oFdm['Sensor']['Imu']['Accel']['noiseVar'] = [0,0,0]
oFdm['Sensor']['Imu']['Accel']['drift_ps'] = [0,0,0]
oFdm['Sensor']['Imu']['Accel']['gain_nd'] = [1,1,1]
oFdm['Sensor']['Imu']['Accel']['bias'] = [0,0,0]
# Gyro Location and Orientation
oFdm['Sensor']['Imu']['Gyro'] = {}
oFdm['Sensor']['Imu']['Gyro']['r_S_m'] = oFdm['Sensor']['Imu']['Accel']['r_S_m']
oFdm['Sensor']['Imu']['Gyro']['s_deg'] = oFdm['Sensor']['Imu']['Accel']['s_deg']
# Gyro Error Model Parameters (units are _rps)
oFdm['Sensor']['Imu']['Gyro']['delay_s'] = oFdm['Sensor']['Imu']['Accel']['delay_s']
oFdm['Sensor']['Imu']['Gyro']['lag'] = oFdm['Sensor']['Imu']['Accel']['lag']
oFdm['Sensor']['Imu']['Gyro']['noiseVar'] = [0,0,0]
oFdm['Sensor']['Imu']['Gyro']['drift_ps'] = [0,0,0]
oFdm['Sensor']['Imu']['Gyro']['gain_nd'] = [1,1,1]
oFdm['Sensor']['Imu']['Gyro']['bias'] = [0,0,0]
# Magnetometer Location and Orientation
oFdm['Sensor']['Imu']['Mag'] = {}
oFdm['Sensor']['Imu']['Mag']['r_S_m'] = oFdm['Sensor']['Imu']['Accel']['r_S_m']
oFdm['Sensor']['Imu']['Mag']['s_deg'] = oFdm['Sensor']['Imu']['Accel']['s_deg']
# Magnetometer Error Model Parameters (units are _nT)
oFdm['Sensor']['Imu']['Mag']['delay_s'] = oFdm['Sensor']['Imu']['Accel']['delay_s']
oFdm['Sensor']['Imu']['Mag']['lag'] = oFdm['Sensor']['Imu']['Accel']['lag']
oFdm['Sensor']['Imu']['Mag']['noiseVar'] = [0,0,0]
oFdm['Sensor']['Imu']['Mag']['drift_ps'] = [0,0,0]
oFdm['Sensor']['Imu']['Mag']['gain_nd'] = [1,1,1]
oFdm['Sensor']['Imu']['Mag']['bias'] = [0,0,0]
## GPS
oFdm['Sensor']['Gps'] = {}
# Gps Location
oFdm['Sensor']['Gps']['r_S_m'] = [0,0,0] # FIXIT - Not currently used
# GPS Position Error Model # NOTE units are radians, radians, meters for Lat and Long!!
oFdm['Sensor']['Gps']['Pos'] = {}
oFdm['Sensor']['Gps']['Pos']['delay_s'] = [0,0,0]
oFdm['Sensor']['Gps']['Pos']['lag'] = [0,0,0]
oFdm['Sensor']['Gps']['Pos']['noiseVar'] = [0,0,0]
oFdm['Sensor']['Gps']['Pos']['drift_ps'] = [0,0,0]
oFdm['Sensor']['Gps']['Pos']['gain_nd'] = [1,1,1]
oFdm['Sensor']['Gps']['Pos']['bias'] = [0,0,0]
# GPS Velocity Error Model
oFdm['Sensor']['Gps']['Vel'] = {}
oFdm['Sensor']['Gps']['Vel']['delay_s'] = [0,0,0]
oFdm['Sensor']['Gps']['Vel']['lag'] = [0,0,0]
oFdm['Sensor']['Gps']['Vel']['noiseVar'] = [0,0,0]
oFdm['Sensor']['Gps']['Vel']['drift_ps'] = [0,0,0]
oFdm['Sensor']['Gps']['Vel']['gain_nd'] = [1,1,1]
oFdm['Sensor']['Gps']['Vel']['bias'] = [0,0,0]
## Airdata
oFdm['Sensor']['Pitot'] = {}
# Airdata Location and Orientation
oFdm['Sensor']['Pitot']['r_S_m'] = [0,0,0]
oFdm['Sensor']['Pitot']['s_deg'] = [0,0,0]
# Airdata Error Model
# Pitot Vector - [presStatic_Pa, presTip_Pa, temp_C]
oFdm['Sensor']['Pitot']['delay_s'] = [0,0,0]
oFdm['Sensor']['Pitot']['lag'] = [0,0,0]
oFdm['Sensor']['Pitot']['noiseVar'] = [0,0,0]
oFdm['Sensor']['Pitot']['drift_ps'] = [0,0,0]
oFdm['Sensor']['Pitot']['gain_nd'] = [1,1,1]
oFdm['Sensor']['Pitot']['bias'] = [0,0,0]
#%% Create Gear data
mainH = 6.5 * in2m
mainY = 0.0 * in2m
mainX = 8.0 * in2m
tailX = 39.0 * in2m
tailH = 5.5 * in2m
tailY = 0.0 * in2m
wingH = -1.5 * in2m
wingY = 39.0 * in2m
wingX = 25.0 * in2m
cgX = oFdm['MassProp']['rCG_S_m'][0]
massMain = oFdm['MassProp']['mass_kg'] * (tailX - cgX) / -(mainX - tailX)
massTail = oFdm['MassProp']['mass_kg'] * (mainX - cgX) / -(tailX - cgX)
massWing = massTail # Needs some mass to compute the spring parameters, but should be 0.0
oFdm['Gear'] = {}
# Belly skid
oFdm['Gear']['Main'] = {}
oFdm['Gear']['Main']['rGear_S_m'] = np.array([mainX, mainY, -mainH])
oFdm['Gear']['Main']['FricStatic'] = 0.8
oFdm['Gear']['Main']['FricDynamic'] = 0.5
oFdm['Gear']['Main']['FricRoll'] = 0.25
wnDesire = 5.0 * 2*np.pi
dRatio = 1.0
oFdm['Gear']['Main']['kSpring_Npm'] = wnDesire * wnDesire * massMain
oFdm['Gear']['Main']['dampSpring_Nspm'] = 2 * dRatio * wnDesire * massMain
# Tail skid
oFdm['Gear']['Tail'] = {}
oFdm['Gear']['Tail']['rGear_S_m'] = np.array([tailX, tailY, -tailH])
oFdm['Gear']['Tail']['FricStatic'] = 0.8
oFdm['Gear']['Tail']['FricDynamic'] = 0.5
oFdm['Gear']['Tail']['FricRoll'] = 0.25
wnDesire = 5.0 * 2*np.pi
dRatio = 1.0
oFdm['Gear']['Tail']['kSpring_Npm'] = wnDesire * wnDesire * massTail
oFdm['Gear']['Tail']['dampSpring_Nspm'] = 2 * dRatio * wnDesire * massTail
# Wing skid - left
oFdm['Gear']['WingL'] = {}
oFdm['Gear']['WingL']['rGear_S_m'] = np.array([wingX, -wingY, -wingH])
oFdm['Gear']['WingL']['FricStatic'] = 0.8
oFdm['Gear']['WingL']['FricDynamic'] = 0.5
oFdm['Gear']['WingL']['FricRoll'] = 0.25
wnDesire = 5.0 * 2*np.pi
dRatio = 1.0
oFdm['Gear']['WingL']['kSpring_Npm'] = wnDesire * wnDesire * massWing
oFdm['Gear']['WingL']['dampSpring_Nspm'] = 2 * dRatio * wnDesire * massWing
# Wing skid - right
oFdm['Gear']['WingR'] = copy.deepcopy(oFdm['Gear']['WingL'])
oFdm['Gear']['WingR']['rGear_S_m'][1] = -oFdm['Gear']['WingL']['rGear_S_m'][1]
#%% Return
return (oFdm)
| [
"rega0051@umn.edu"
] | rega0051@umn.edu |
735451bc26fda37c9409923a75403a3041875b67 | 5e4fb830992c0180bf400e6993b1a491087c8c42 | /instance/config.py | 6aafcafca87812264fb088299b0d1c8c0e931bca | [] | no_license | Partck/SendITAPI | 3bbbf3c57c1611db08b1d311b27d15058ecb368c | 01cbe2e3c0e2538a35280c2ed3c06811cb0028d2 | refs/heads/develop | 2022-12-14T22:14:51.275474 | 2018-11-30T04:59:30 | 2018-11-30T04:59:30 | 156,836,938 | 0 | 1 | null | 2022-12-08T01:17:36 | 2018-11-09T09:05:36 | Python | UTF-8 | Python | false | false | 499 | py | import os
class Config(object):
SECRET_KEY = os.environ.get("JWT_SECRET_KEY")
class TestingConfig(Config):
DEBUG = True
url ="dbname = 'dfhkvc5kqeh9bi' host = 'ec2-54-235-156-60.compute-1.amazonaws.com' port = '5432' \
user = 'ehiewszseuqzyg' password = 'aff8667735390b9eebe291e92f4ad1d75a255aeefc38b8382f368e7a2a0650bd'"
class DevelopmentConfig(Config):
DEBUG = True
url = os.getenv("URL")
config={"test":TestingConfig,
"dev":DevelopmentConfig}
| [
"patgachuru@gmail.com"
] | patgachuru@gmail.com |
c102dd6c9dfdf2f28e8ea302f8887e73f5fa32c8 | 59fdbd2c22f8489c493dd71fe5fe18927ed7c040 | /Chapter2/Example8.py | 92afb01e005dcfb16422abc34da7ffe9dfb9cecc | [] | no_license | lzr2006/PythonStudy | 8d96fceb5fa13b5955071304bda342d8f19a9808 | e4db9852bfca95398cad51ff529af77e4f68c3c6 | refs/heads/master | 2022-12-21T20:24:55.960089 | 2020-10-09T11:20:33 | 2020-10-09T11:20:33 | 298,911,342 | 0 | 0 | null | 2020-09-26T22:49:08 | 2020-09-26T22:40:20 | null | UTF-8 | Python | false | false | 43 | py | #Chapter2/Example8.py
a=10
b=3
print(a//b)
| [
"noreply@github.com"
] | noreply@github.com |
da1c27f4df3f3d42ec1025d9f87a1ffc36a10f25 | d61d05748a59a1a73bbf3c39dd2c1a52d649d6e3 | /chromium/content/test/gpu/gpu_tests/gpu_integration_test_unittest.py | 7abe56fc3e5829005d6262afc304c84092b965a5 | [
"BSD-3-Clause"
] | permissive | Csineneo/Vivaldi | 4eaad20fc0ff306ca60b400cd5fad930a9082087 | d92465f71fb8e4345e27bd889532339204b26f1e | refs/heads/master | 2022-11-23T17:11:50.714160 | 2019-05-25T11:45:11 | 2019-05-25T11:45:11 | 144,489,531 | 5 | 4 | BSD-3-Clause | 2022-11-04T05:55:33 | 2018-08-12T18:04:37 | null | UTF-8 | Python | false | false | 6,491 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import tempfile
import unittest
import mock
from telemetry.testing import browser_test_runner
from gpu_tests import path_util
from gpu_tests import gpu_integration_test
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'tools', 'perf')
from chrome_telemetry_build import chromium_config
class GpuIntegrationTestUnittest(unittest.TestCase):
def setUp(self):
self._test_state = {}
def testSimpleIntegrationTest(self):
self._RunIntegrationTest(
'simple_integration_unittest',
['unittest_data.integration_tests.SimpleTest.unexpected_error',
'unittest_data.integration_tests.SimpleTest.unexpected_failure'],
['unittest_data.integration_tests.SimpleTest.expected_flaky',
'unittest_data.integration_tests.SimpleTest.expected_failure'],
['unittest_data.integration_tests.SimpleTest.expected_skip'],
[])
# It might be nice to be more precise about the order of operations
# with these browser restarts, but this is at least a start.
self.assertEquals(self._test_state['num_browser_starts'], 6)
def testIntegrationTesttWithBrowserFailure(self):
self._RunIntegrationTest(
'browser_start_failure_integration_unittest', [],
['unittest_data.integration_tests.BrowserStartFailureTest.restart'],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testIntegrationTestWithBrowserCrashUponStart(self):
self._RunIntegrationTest(
'browser_crash_after_start_integration_unittest', [],
[('unittest_data.integration_tests.BrowserCrashAfterStartTest.restart')],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testRetryLimit(self):
self._RunIntegrationTest(
'test_retry_limit',
['unittest_data.integration_tests.TestRetryLimit.unexpected_failure'],
[],
[],
['--retry-limit=2'])
# The number of attempted runs is 1 + the retry limit.
self.assertEquals(self._test_state['num_test_runs'], 3)
def testRepeat(self):
self._RunIntegrationTest(
'test_repeat',
[],
['unittest_data.integration_tests.TestRepeat.success'],
[],
['--repeat=3'])
self.assertEquals(self._test_state['num_test_runs'], 3)
def testAlsoRunDisabledTests(self):
self._RunIntegrationTest(
'test_also_run_disabled_tests',
['unittest_data.integration_tests.TestAlsoRunDisabledTests.skip',
'unittest_data.integration_tests.TestAlsoRunDisabledTests.flaky'],
# Tests that are expected to fail and do fail are treated as test passes
[('unittest_data.integration_tests.'
'TestAlsoRunDisabledTests.expected_failure')],
[],
['--also-run-disabled-tests'])
self.assertEquals(self._test_state['num_flaky_test_runs'], 4)
self.assertEquals(self._test_state['num_test_runs'], 6)
def testStartBrowser_Retries(self):
class TestException(Exception):
pass
def SetBrowserAndRaiseTestException():
gpu_integration_test.GpuIntegrationTest.browser = (
mock.MagicMock())
raise TestException
gpu_integration_test.GpuIntegrationTest.browser = None
gpu_integration_test.GpuIntegrationTest.platform = None
with mock.patch.object(
gpu_integration_test.serially_executed_browser_test_case.\
SeriallyExecutedBrowserTestCase,
'StartBrowser',
side_effect=SetBrowserAndRaiseTestException) as mock_start_browser:
with mock.patch.object(
gpu_integration_test.GpuIntegrationTest,
'StopBrowser') as mock_stop_browser:
with self.assertRaises(TestException):
gpu_integration_test.GpuIntegrationTest.StartBrowser()
self.assertEqual(mock_start_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
self.assertEqual(mock_stop_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
def _RunIntegrationTest(self, test_name, failures, successes, skips,
additional_args):
config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')])
temp_dir = tempfile.mkdtemp()
test_results_path = os.path.join(temp_dir, 'test_results.json')
test_state_path = os.path.join(temp_dir, 'test_state.json')
try:
browser_test_runner.Run(
config,
[test_name,
'--write-full-results-to=%s' % test_results_path,
'--test-state-json-path=%s' % test_state_path] + additional_args)
with open(test_results_path) as f:
test_result = json.load(f)
with open(test_state_path) as f:
self._test_state = json.load(f)
actual_successes, actual_failures, actual_skips = (
self._ExtractTestResults(test_result))
self.assertEquals(set(actual_failures), set(failures))
self.assertEquals(set(actual_successes), set(successes))
self.assertEquals(set(actual_skips), set(skips))
finally:
shutil.rmtree(temp_dir)
def _ExtractTestResults(self, test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict and
isinstance(test_dict['expected'], basestring))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split() for res in
test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['expected'] == test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k),
test_dict[k]))
return successes, failures, skips
| [
"csineneo@gmail.com"
] | csineneo@gmail.com |
e55983dca1feb96f5b4323bc57bdc755ed4a5c17 | ca97dafb6309f4ad0f0026ca8473d7db48120459 | /apps/utils/wangediter.py | 4d48e445b9f80519e83e1c54e7325c6b063a087d | [] | no_license | ITFengShuiMaster/-DjangoRestProject- | 6a1aa9bddefa25761c4c0ae16351bb2cb8d93a37 | 679cf527d850e3ee087dd7e80c976fd29341c291 | refs/heads/master | 2022-12-16T00:53:12.593745 | 2018-10-11T08:12:12 | 2018-10-11T08:12:12 | 152,553,011 | 0 | 0 | null | 2022-11-22T02:21:51 | 2018-10-11T07:54:49 | JavaScript | UTF-8 | Python | false | false | 1,192 | py | # -*- coding:utf-8 _*-
__author__ = 'luyue'
__date__ = '2018/5/28 14:05'
import os
from flask import Flask, request,Response
UPLOAD_FOLDER = '/TmageUploads'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#文件名合法性验证
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
#对文件上传进行相应
app.route("/ImageUpdate",methdos = ["POST"])
def GetImage():
file = request.files[0]
if file == None:
result = r"error|未成功获取文件,上传失败"
res = Response(result)
res.headers["ContentType"] = "text/html"
res.headers["Charset"] = "utf-8"
return res
else:
if file and allowed_file(file.filename):
filename = file.filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
imgUrl = "http://localhost:5000" + UPLOAD_FOLDER + "/" + filename
res = Response(imgUrl)
res.headers["ContentType"] = "text/html"
res.headers["Charset"] = "utf-8"
return res | [
"wxhzq520@sina.com"
] | wxhzq520@sina.com |
9040725fac22694501f3f81751cda539d3ce2333 | 6c6790ff1f940d7bcb35d5b3adec4c8feb0adffd | /url_services/tests/recipes/mommy_recipes.py | c9be044997cbfcb92db83eb7e6907c01caebb811 | [] | no_license | alexrosa/django-urlshortener-api | 102ea5e16e32c877ed7d3f8469ee3174b0353c9a | eeccb96dccfb8da82052b3899b41bdce2be76662 | refs/heads/master | 2020-04-17T16:48:14.460993 | 2019-01-21T13:29:58 | 2019-01-21T13:29:58 | 166,756,420 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from model_mommy.recipe import Recipe, seq
from url_services.models import UrlShortener
url_shortener = Recipe(UrlShortener,
url_shortener_id=seq(1),
absolute_url='www.smartbeans.com.br',
short_url='a3b4c5')
| [
"alexrosa@gmail.com"
] | alexrosa@gmail.com |
5923edf19d7db31a20d3a49ffcc3c7c9da07fe70 | 3fca5adf482809cf2825b4233362d54917841786 | /omnibus/replserver.py | 1bcf3d5c805b7e301f99bf048c1c6a934739d6e9 | [
"BSD-3-Clause"
] | permissive | wrmsr/omnibus | a5ad19cebd458893b0ae85b5c847761ad6c1ceed | 3c4ef5eb17b0fff8593fa6a2284337bf193c18d3 | refs/heads/master | 2023-01-19T11:50:39.002781 | 2020-03-12T01:53:53 | 2020-03-12T01:53:53 | 164,266,924 | 3 | 1 | BSD-3-Clause | 2022-12-26T20:58:22 | 2019-01-06T01:55:06 | Python | UTF-8 | Python | false | false | 11,234 | py | """
socat - UNIX-CONNECT:repl.sock
import sys, threading, pdb, functools
def _attach(repl):
frame = sys._current_frames()[threading.enumerate()[0].ident]
debugger = pdb.Pdb(
stdin=repl.conn.makefile('r'),
stdout=repl.conn.makefile('w'),
)
debugger.reset()
while frame:
frame.f_trace = debugger.trace_dispatch
debugger.botframe = frame
frame = frame.f_back
debugger.set_step()
frame.f_trace = debugger.trace_dispatch
"""
import ast
import codeop
import contextlib
import errno
import functools
import logging
import os
import socket as socket_
import sys
import threading
import traceback
import types
import typing as ta
import weakref
from . import check
log = logging.getLogger(__name__)
class DisconnectException(Exception):
pass
class InteractiveSocketConsole:
"""code.InteractiveConsole but just different enough to not be worth subclassing."""
ENCODING = 'utf-8'
def __init__(
self,
conn: socket_.socket,
locals: ta.MutableMapping = None,
filename: str = '<console>'
) -> None:
super().__init__()
if locals is None:
locals = {
'__name__': '__console__',
'__doc__': None,
'__console__': self,
}
self._conn = conn
self._locals = locals
self._filename = filename
self._compiler = codeop.CommandCompiler()
self._buffer: ta.List[str] = []
self._count = 0
self._write_count = -1
def reset_buffer(self) -> None:
self._buffer = []
@property
def conn(self) -> socket_.socket:
return self._conn
CPRT = 'Type "help", "copyright", "credits" or "license" for more information.'
def interact(self, banner: str = None, exitmsg: str = None) -> None:
log.info(f'Console {id(self)} on thread {threading.current_thread().ident} interacting')
try:
ps1 = getattr(sys, 'ps1', '>>> ')
ps2 = getattr(sys, 'ps2', '... ')
if banner is None:
self.write(
'Python %s on %s\n%s\n(%s)\n' %
(sys.version, sys.platform, self.CPRT, self.__class__.__name__))
elif banner:
self.write('%s\n' % (str(banner),))
more = False
while True:
try:
try:
line = self.raw_input(ps2 if more else ps1)
except EOFError:
self.write('\n')
break
else:
more = self.push_line(line)
except KeyboardInterrupt:
self.write('\nKeyboardInterrupt\n')
self.reset_buffer()
more = False
if exitmsg is None:
self.write('now exiting %s...\n' % self.__class__.__name__)
elif exitmsg != '':
self.write('%s\n' % exitmsg)
except DisconnectException:
pass
except OSError as oe:
if oe.errno == errno.EBADF:
pass
finally:
log.info(f'Console {id(self)} on thread {threading.current_thread().ident} finished')
def push_line(self, line: str) -> bool:
self._buffer.append(line)
source = '\n'.join(self._buffer)
more = self.run_source(source, self._filename)
if not more:
self.reset_buffer()
return more
def raw_input(self, prompt: str = '') -> str:
self.write(prompt)
buf = b''
while True:
b = self._conn.recv(1)
if not b:
raise DisconnectException
if b == b'\n':
break
buf += b
return buf.decode(self.ENCODING)
def write(self, data: str) -> None:
self._conn.send(data.encode(self.ENCODING))
def compile(
self,
source: ta.Union[str, ast.AST],
filename: str = '<input>',
symbol: str = 'single'
) -> ta.Optional[types.CodeType]:
if isinstance(source, ast.AST):
return self._compiler.compiler(source, filename, symbol)
else:
return self._compiler(source, filename, symbol)
def run_source(
self,
source: ta.Union[str, ast.AST],
filename: str = '<input>',
symbol: str = 'single',
) -> bool:
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1 (incorrect)
self.show_syntax_error(filename)
return False
if code is None:
# Case 2 (incomplete)
return True
# Case 3 (complete)
try:
node = ast.parse(source)
except (OverflowError, SyntaxError, ValueError):
return True
if isinstance(node, ast.Module) and node.body and isinstance(node.body[-1], ast.Expr):
expr = node.body[-1]
source = ast.Interactive(
[
*node.body[:-1],
ast.Assign(
[ast.Name(
f'_{self._count}',
ast.Store(),
lineno=expr.lineno,
col_offset=expr.col_offset,
)],
expr.value,
lineno=expr.lineno,
col_offset=expr.col_offset,
)
],
)
ast.fix_missing_locations(source)
self._write_count = self._count
code = self.compile(source, filename, symbol)
self.run_code(code)
return False
def run_code(self, code: types.CodeType) -> None:
try:
exec(code, self._locals)
except SystemExit:
raise
except Exception:
self.show_traceback()
else:
if self._count == self._write_count:
self.write(repr(self._locals[f'_{self._count}']))
self.write('\n')
self._count += 1
def show_traceback(self) -> None:
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
try:
lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
self.write(''.join(lines))
finally:
last_tb = ei = None
def show_syntax_error(self, filename: str = None) -> None:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
lines = traceback.format_exception_only(type, value)
self.write(''.join(lines))
class ReplServer:
CONNECTION_THREAD_NAME = 'ReplServerConnection'
def __init__(
self,
path: str,
*,
file_mode: int = None,
poll_interval: float = 0.5,
exit_timeout: float = 10.0,
) -> None:
super().__init__()
self._path = path
self._file_mode = file_mode
self._poll_interval = poll_interval
self._exit_timeout = exit_timeout
self._socket: socket_.socket = None
self._is_running = False
self._consoles_by_threads: ta.MutableMapping[threading.Thread, InteractiveSocketConsole] = weakref.WeakKeyDictionary() # noqa
self._is_shut_down = threading.Event()
self._should_shutdown = False
def __enter__(self):
check.state(not self._is_running)
check.state(not self._is_shut_down.is_set())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._is_shut_down.is_set():
self.shutdown(True, self._exit_timeout)
def run(self) -> None:
check.state(not self._is_running)
check.state(not self._is_shut_down.is_set())
if os.path.exists(self._path):
os.unlink(self._path)
self._socket = socket_.socket(socket_.AF_UNIX, socket_.SOCK_STREAM)
self._socket.settimeout(self._poll_interval)
self._socket.bind(self._path)
with contextlib.closing(self._socket):
self._socket.listen(1)
log.info(f'Repl server listening on file {self._path}')
self._is_running = True
try:
while not self._should_shutdown:
try:
conn, _ = self._socket.accept()
except socket_.timeout:
continue
log.info(f'Got repl server connection on file {self._path}')
def run(conn):
with contextlib.closing(conn):
variables = globals().copy()
console = InteractiveSocketConsole(conn, variables)
variables['__console__'] = console
log.info(
f'Starting console {id(console)} repl server connection '
f'on file {self._path} '
f'on thread {threading.current_thread().ident}'
)
self._consoles_by_threads[threading.current_thread()] = console
console.interact()
thread = threading.Thread(
target=functools.partial(run, conn),
daemon=True,
name=self.CONNECTION_THREAD_NAME)
thread.start()
for thread, console in self._consoles_by_threads.items():
try:
console.conn.close()
except Exception:
log.exception('Error shutting down')
for thread in self._consoles_by_threads.keys():
try:
thread.join(self._exit_timeout)
except Exception:
log.exception('Error shutting down')
os.unlink(self._path)
finally:
self._is_shut_down.set()
self._is_running = False
def shutdown(self, block: bool = False, timeout: float = None) -> None:
self._should_shutdown = True
if block:
self._is_shut_down.wait(timeout=timeout)
def _main():
with ReplServer('repl.sock') as repl_server:
repl_server.run()
if __name__ == '__main__':
_main()
| [
"timwilloney@gmail.com"
] | timwilloney@gmail.com |
266eadfc88787d8d8f35a8ad5c254af232377ec2 | 54cc98e87d96119571f0d8689d26c4d62d1f84dc | /mywebsite/settings.py | c9d0c7ef9c9819197f8c98ddc1d348acd336a876 | [] | no_license | Midnight1/django1 | 39ae231a5eccf8feae875c7a65c7d6c16de91ffc | a5f3e90a3b5c18b5339296e33a590e118ff0aa9a | refs/heads/master | 2021-01-10T20:00:56.007776 | 2015-09-17T14:43:07 | 2015-09-17T14:43:07 | 42,661,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,743 | py | """
Django settings for mywebsite project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from unipath import Path
PROJECT_DIR = Path(__file__).ancestor(2)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vp+r=_c^+o6j&rmqy!#t+5b&f(6nn7e81m-$d&988k06@+4j5j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mywebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [PROJECT_DIR.child('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mywebsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| [
"hellghost_1@hotmail.com"
] | hellghost_1@hotmail.com |
2a3a9ed6867c40b4d06206b52ad915c58b2b2ee5 | afc39333dab6fb777e7790aa5b06dfd27fdad46d | /bbs/migrations/0005_comment.py | 47635bbb154f2d3b629b00cfe1209892662f4358 | [] | no_license | SAKUMAISAO130/python_bbs | 4709460290680987de289f88bd1b4b16ffb3dfbb | 14028a1f7ca216015089f2c0c549f91e11c21c25 | refs/heads/master | 2022-09-05T10:31:36.234827 | 2020-05-27T11:49:35 | 2020-05-27T11:49:35 | 266,765,206 | 0 | 0 | null | 2020-05-26T12:45:46 | 2020-05-25T11:46:50 | Python | UTF-8 | Python | false | false | 840 | py | # Generated by Django 2.1.5 on 2020-05-27 00:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bbs', '0004_auto_20200526_1302'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.IntegerField(blank=True, default=0, null=True, verbose_name='')),
('user_name', models.CharField(max_length=200, null=True)),
('comment', models.TextField(blank=True, max_length=1000, null=True, verbose_name='')),
('image_path', models.TextField(blank=True, max_length=1000, null=True, verbose_name='')),
],
),
]
| [
"colorfullweb@gmail.com"
] | colorfullweb@gmail.com |
7278426f46e6ad4358202385753eb0788391aa02 | fca96796d913a57b60e5525d0392543c8e5cec24 | /node_modules/bcrypt/build/config.gypi | 6b7b118899d9539b36b8a36a79d2db03446290cd | [
"MIT"
] | permissive | Sathya4488/login-page-MEAN-stack | 817299bd9cafcbad46843a5bb347537c75165598 | 46fd4388c8a91096616cce8278245208d21a134e | refs/heads/master | 2020-05-04T09:57:04.665542 | 2019-04-24T13:13:57 | 2019-04-24T13:13:57 | 179,079,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,747 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"gas_version": "2.27",
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt63l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "63",
"llvm_version": 0,
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_experimental_http_parser": "false",
"node_install_npm": "true",
"node_module_version": 67,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_report": "true",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "so.67",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": "true",
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/sathya/.node-gyp/11.13.0",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/home/sathya/Desktop/work/projects/login-page-MEAN-stack/node_modules/bcrypt/lib/binding/bcrypt_lib.node",
"module_name": "bcrypt_lib",
"module_path": "/home/sathya/Desktop/work/projects/login-page-MEAN-stack/node_modules/bcrypt/lib/binding",
"napi_version": "4",
"node_abi_napi": "napi",
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/6.7.0 node/v11.13.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"prefer_online": "",
"noproxy": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/sathya/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"preid": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"audit": "true",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/usr",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/sathya/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/usr/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"audit_level": "low",
"prefer_offline": "",
"color": "true",
"sign_git_commit": "",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"update_notifier": "true",
"auth_type": "legacy",
"node_version": "11.13.0",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/sathya/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"sathyamsb91@gmail.com"
] | sathyamsb91@gmail.com |
78c967718b00bbe41546036f801047647978a636 | aeb8d8e1a25482b1f913aa0b29666d9c024a4bcf | /ccnuoj_webapi/src/__init__.py | b904aca48a97e2b007ee2d713e28bb444d58c55d | [] | no_license | OutOfCage/CCNUOJ | 8d46fe5d6072189f2ad38b27ec089c3ce9ee2cb2 | f2b07daab6132390ab6fc4d53b01eb528921c244 | refs/heads/master | 2020-04-01T13:08:05.406472 | 2018-10-15T17:19:59 | 2018-10-15T17:19:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from .global_obj import *
from . import model
from . import authentication
from .authentication import init as authentication_init
from . import user
from . import judge_scheme
from . import problem
from . import judge_command
from . import judge_request
from . import submission
| [
"fybmain@gmail.com"
] | fybmain@gmail.com |
2b062e03f669e6aaead91edb14be24e5af00d892 | 0d76013f6e1ee69713690d6d6e65ce05a3c94de1 | /account/urls.py | e37b608b6be1c0e5f060818a1a26f890b42c089d | [] | no_license | rafiulgits/law | 8f8576980a47dc27ef744a9c32447e69630d3eca | 42e6e6ac79229b648e023b3ae9c3252919045453 | refs/heads/master | 2023-03-05T22:05:25.854131 | 2021-02-20T04:02:52 | 2021-02-20T04:02:52 | 177,262,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | from account.views import auth, manage
from django.urls import path
from django.contrib.auth import views as resetviews
from rest_framework_simplejwt.views import TokenRefreshView
urlpatterns = [
path('signup/', auth.SignUp.as_view()),
path('signin/', auth.SignIn.as_view()),
path('access-renew/', TokenRefreshView.as_view()),
path('profile/', manage.Profile.as_view()),
path('update/', auth.AccountUpdate.as_view()),
path('password-change/', auth.PasswordChange.as_view()),
path('verify/', auth.VerifyEmail.as_view()),
path('password-reset/request/', auth.PasswordResetRequest.as_view()),
path('password-reset/verify/', auth.VerifyPasswordRequest.as_view()),
path('password-reset/', auth.PasswordResetView.as_view()),
] | [
"avoidcloud@gmail.com"
] | avoidcloud@gmail.com |
61da68c0cc57b787ef11ad937a8ba1a75162869f | f3dc0703f6b57389b95c6b1447efd882e65f11bd | /mysite/vehicle_garage/migrations/0005_alter_boat_boat_hin.py | c1387f895a8fe33b5515cd28eee0bc67e73a06d7 | [] | no_license | aku006/bixly-interview-test | 4f4b1a84ca8c5511ccec82ee2f153ce7796667ca | 5e5b0767b62ddc31128ca3b0205ea34b707c83c6 | refs/heads/main | 2023-07-12T21:43:15.841469 | 2021-08-20T22:28:53 | 2021-08-20T22:28:53 | 396,119,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 3.2.6 on 2021-08-20 20:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vehicle_garage', '0004_alter_boat_boat_width'),
]
operations = [
migrations.AlterField(
model_name='boat',
name='boat_hin',
field=models.CharField(max_length=12),
),
]
| [
"aku006@ucr.edu"
] | aku006@ucr.edu |
339c8b280925bb5e86c2c9103ccc84f62736179d | 6358146a22e015791c136f9b018847008ed69d1c | /data_Structure.py | bd583bcc7971bac6620eea307ee0541a3635f819 | [] | no_license | kjrendel/HighSchoolCamp | c3942004a6742562d9f3a2818847f1d98a840a93 | 1969cf4c012415ccc14a12710a2f8c4ef3e187c0 | refs/heads/master | 2020-06-03T14:22:56.760619 | 2019-06-14T19:20:55 | 2019-06-14T19:20:55 | 191,602,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | """
title: data_Structure
author: Kaly
date: 2019-06-13 11:32
"""
#shakeball
# import random
#
#
# def shake_ball():
# inp = input("ask a question")
# ans = ("yes definitely","without a doubt",'very doubtful','ask again later',"Don't count on it","Cannot Predict now",\
# "as i see it, yes","No chance in H E double hockey sticks","Literally just no","Come back later I'm busy",\
# "I mean maybe", "I hope so")
# return ans[random.randint(0,len(ans))]
# print(shake_ball())
#number1
# numbers = [89, 41, 73, 90]
# total = 0
# for i in numbers:
# total += i
# print(total)
#number2
# x = list(range(0, 15, 5))
# print(x)
#number3
# x = list(range(100, 210, 10))
# print(x)
#number4
# x = list(range(80, 32, -8))
# print(x)
#number5
# for i in range(3):
# print('Alright')
# #countdown
# countdown_number = 10
# while countdown_number>0:
# print(countdown_number, end=" ")
# countdown_number-=1
| [
"kjrendel@gmail.com"
] | kjrendel@gmail.com |
970b980241353efbce82180668a684226303165e | 3a41f35d2ce62429627145f74d1558a3e5f5a2c2 | /microbots/envs/simple_particle.py | 1c42455b69462bdf7d441b7a597530bfd6053e35 | [] | no_license | UD-IDS-LAB/Microbot-Control | a90f560aa85c1ed06d2df94a66c514001c6fa2c2 | bd80f5b14476ccc6d1faaafac6be0492a60388fa | refs/heads/main | 2023-03-19T02:39:09.365863 | 2021-02-04T01:52:01 | 2021-02-04T01:52:01 | 331,729,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,447 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 15:29:37 2021
@author: Logan
"""
import gym
from gym import spaces
import numpy as np
import math
#material for plotting
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class ParticlePlot:
def __init__(self, title=None, window_size=[10, 10]):
#defualt to array of None so we can call .all() in render()
self.last_states = np.array([None, None, None, None])
self.xlim = (window_size[0], window_size[1])
self.ylim = (window_size[0], window_size[1])
#create the plot
fig = plt.figure()
fig.suptitle(title)
#Next, draw a rectangle around the limits
rect = patches.Rectangle((self.xlim[0],self.ylim[0]),
self.xlim[1]-self.xlim[0],
self.ylim[1]-self.ylim[0],
linewidth=1,edgecolor='r',facecolor='none')
# fig.gca().set(xlim=self.xlim, ylim=self.ylim)
fig.gca().add_patch(rect)
plt.show(block=False)
fig.show()
self.fig = fig;
def render(self, states):
#plot a red circle at the particle's location
nextPos = plt.Circle( ( states[0], states[1] ), 0.5, color='r')
#get the figure and add the circle
fig = plt.gcf()
ax = fig.gca()
ax.add_patch(nextPos)
if (self.last_states != None).all():
dx = states[0] - self.last_states[0]
dy = states[1] - self.last_states[1]
plt.arrow(self.last_states[0], self.last_states[1],
dx, dy, width=0.05, shape='right')
plt.pause(.05)
self.last_states = states
def reset(self):
self.last_states = np.array([None, None, None, None])
plt.clf()
rect = patches.Rectangle((self.xlim[0],self.ylim[0]),
self.xlim[1]-self.xlim[0],
self.ylim[1]-self.ylim[0],
linewidth=1,edgecolor='r',facecolor='none')
# fig.gca().set(xlim=self.xlim, ylim=self.ylim)
plt.gca().add_patch(rect)
def close(self):
plt.show(block=True)
class SimpleParticle(gym.Env):
"""The simple particle model adapted to use opengym"""
metadata = {'render.modes': ['human']}
def __init__(self, numParticles, phi, stateBounds, dwellTime, maxSteps):
super(SimpleParticle, self).__init__()
self.visualization = None
self.numParticles = numParticles
self.phi = phi
self.stateBounds = stateBounds #[pMin, pMax, vMin, vMax]
self.dwellTime = dwellTime
self.maxSteps = maxSteps
#action space is 4 discrete values (right, up, left, down)
self.action_space = spaces.Discrete(4)
#observation space is 4 x numParticles
self.observation_space = spaces.Box(
np.array([stateBounds[0], stateBounds[2]]), #LB
np.array([stateBounds[1], stateBounds[3]]), #UB
dtype=np.float32)
def reset(self):
# Reset the state of the environment to an initial state
#### set the state randomly within +- 40% of the bounds
#self.states = np.random.default_rng().uniform(low=-1.0, high=1.0, size=self.numParticles*4)
#self.states[0:2] = (self.states[0:2] + 1) / 2 * (self.stateBounds[1] - self.stateBounds[0]) + self.stateBounds[0]
#self.states[0:2] *= 0.4 #start within 40% of the origin
#### set the state to a fixed point
self.states = np.array([self.stateBounds[0], self.stateBounds[0], self.stateBounds[3], self.stateBounds[3]])
self.states[0:2] *= 0.4; #scale position to be 40% of max distance
# Convert to a 32 bit float to play nice with the pytorch tensors
self.states = self.states.astype('float32')
self.currentStep = 0
if self.visualization != None:
self.visualization.reset()
def _next_observation(self):
#observe the current state of the particles
obs = self.states;
return obs
#update particle states based on the current action & dynamics
def _take_action(self, action):
#calculate the velocity (v=vmax always)
vMax = self.stateBounds[3];
#determine how the agent would move if the offset was 0
v = np.array([ [vMax], [0] ]) #assume to the right (action = 0)
if action == 1: #up
v = np.array([ [0], [vMax] ])
if action == 2: #left
v = np.array([ [-vMax], [0] ])
if action == 3: #down
v = np.array([ [0], [-vMax] ])
#calculate the rotation of the particle
c, s = np.cos(self.phi), np.sin(self.phi)
R = np.array( [[c, -s], [s, c]] )
#rotate u by angle phi (particle offset)
v = np.matmul(R, v)
#do the dynamics
position = self.states[:2] + np.transpose(v * self.dwellTime)
self.states = np.append(position, v)
self.states = self.states.astype('float32')
def step(self, action):
# Execute one time step within the environment
self.old_states = self.states
self._take_action(action)
self.currentStep += 1 #we just advanced by one dwellTime
#check if we have reached our timeout or otherwise ended
done = False #todo: check if we come within distance of origin
if self.currentStep >= self.maxSteps:
done = True
### Calculate the Reward ###
'''Reward is the minimum distance between the origin and
the particle as it moves between the initial and final states'''
x0 = self.old_states[0]
xf = self.states[0]
y0 = self.old_states[1]
yf = self.states[1]
#calculate slope from dx and dy
dx = xf - x0
dy = yf - y0
#first, we can assume cost = distance from final position to origin
dist = np.linalg.norm(self.states[0:2])
#check if we are passing by the origin (so we could be closer)
if xf*x0 < 0 or yf*y0 < 0:
if dx == 0: #moving only vertically
dist = abs(x0)
if dy == 0: #moving only horizontally
dist= abs(y0)
if dx != 0 and dy != 0: #moving at an angle
m = dy / dx
x = (m*m*x0 + m*y0) / (m*m + 1)
y = m*(x - x0) + y0
dist = math.min(dist, math.sqrt(x*x + y*y))
#reward the agent by -1 * dist from origin
reward = -dist
#penalize the agent by 1,000 if it exits the state bounds
if xf < self.stateBounds[0] or xf > self.stateBounds[1] or yf < self.stateBounds[0] or yf > self.stateBounds[1]:
reward -= 1000;
#finally, convert to float32 to play nice with pytorch
reward = reward.astype('float32')
#finish if we are within 0.01 microns of the goal
done = done or (dist < 0.01)
newState = self.states
#return state, reward, done, next state
return self.states, reward, done, newState
def _render_to_file(self, filename='data'):
# Append the current run to filename (default data.csv)
file = open(filename + '.csv', 'a+')
file.write(self.dwellTime * self.step, ",", self.states)
file.close()
def render(self, mode='live', title=None, **kwargs):
# Render the environment to the screen or a file
if mode == 'file':
#render to file (default to data.csv)
self._render_to_file(kwargs.get('filename', 'data'))
elif mode == 'live':
if self.visualization == None:
self.visualization = ParticlePlot(title, window_size=self.stateBounds)
#scale rendering to the state bounds instead of [-1, 1] in each dim
self.visualization.render(self.states)
def close(self):
super().close()
if self.visualization != None:
self.visualization.close()
| [
"Logiant@users.noreply.github.com"
] | Logiant@users.noreply.github.com |
ade3665c82f66c2282de4139af95b6d5e566ae0b | d98fcc71d992c71838eff77c93fba85d2a111e8e | /blog/migrations/0002_comment.py | 23dfc71ee25ce73e00d0323ae56562b2584b47d5 | [] | no_license | yeimermolina/blog | 2a9ae31da35db021abfac233baed93ee0fe7e8b1 | 900dc89b47aa118089a0aa994058b945e305b5c0 | refs/heads/master | 2021-05-07T22:14:38.033072 | 2017-11-01T21:22:56 | 2017-11-01T21:22:56 | 109,179,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('post', models.ForeignKey(related_name='comments', to='blog.Post')),
],
options={
'ordering': ('created',),
},
),
]
| [
"yeimer.molina@gmail.com"
] | yeimer.molina@gmail.com |
d62f0bd20e58a2256dc029f276709c0b9900ded3 | 0007d76b3a0bb4f5427cba4a00ab1ba244ef7eb2 | /RSA_Attack_Padding.py | 3919c6d705e4a33ef706def804bfeff363046c37 | [] | no_license | hassansallam/RSA_Attack | b4dc4fd8c2d153ebd9c561c201ae03f0f94915d7 | 650a7f868176186fb5d8c24e9ed0401d960954ef | refs/heads/master | 2021-05-31T12:26:43.098089 | 2016-03-15T10:32:20 | 2016-03-15T10:32:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | '''
THIS IS RSA ATTACK SYSTEM THAT WORKS IF THE PLAINTEXT IS PADDED
'''
import math
import binascii
# THIS IS THE MAIN METHOD YOUHAVE TO RUN IT TO RUN THE POROGRAM
def run():
print'Welcome to RSA attack system'
print'Please enter the number you want to factor'
n = input("N: ")
factor(n)
# THIS METHOD FINDS THE MODULO MULTIPLICATVE INVERS
def inverse(x, p):
inv1 = 1
inv2 = 0
while p != 1:
inv1, inv2 = inv2, inv1 - inv2 * (x / p)
x, p = p, x % p
return inv2
# THIS METHOD FINDS THE INTERGER SUQAURE ROOT OF A NUMBER
def intsqrt(n):
x = n
y = (x + n // x) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
# THIS METHOD FACTORIZE THE NUMBER TO TO ITS RPRIME FACTORS BASED OF FERMAT FACORING METHOD
def factor(n):
a = intsqrt(n)
b2 = a*a - n
b = intsqrt(n)
count = 0
while b*b != b2:
a = a + 1
b2 = a*a - n
b = intsqrt(b2)
count += 1
p=a+b
q=a-b
assert n == p * q
print'Factoraizing.........'
print'p = ',p
print'q = ',q
mode = (p-1)*(q-1)
print'(p-1)*(q-1)= ',mode
e = input("Now enter e: ")
print'Finding d .........'
d = inverse(e,mode)
print'd = ',d
ct = input("Enter the cipher text: ")
print'Decyption.........'
p = pow(ct, d, n)
print'The plain text: ',p
# IN THIS PART OF THE CODE THE PADDED DIGITS WILL BE REMOVE AND THE PLAIN TEXT WILL BE EXPOSED
myp_binary = bin(p)
print'The binary representation of plain text: ',myp_binary
myp_padd = p % pow(2,200)
print'The plain text after removing the paddings: ',myp_padd
myp_padd_bin = bin(myp_padd)
print'The binary representation of real plain text: ',myp_padd_bin
n = int(myp_padd_bin, 2)
str = binascii.unhexlify('%x' % n)
print'The ASCII representation:',str
| [
"hassansalam@outlook.com.com"
] | hassansalam@outlook.com.com |
20b9be4665f4613ae1861465fec4bfef7a370263 | d1debd994fdcf65e55d278a83ca55600b9baf13e | /Observer-machine/observer_pattern/observable.py | 9c8467981cfa4dfc8607671db938d24b80529da8 | [] | no_license | JackyCafe/designPettern | 885497ce94ddd3f9439f572a34464ab07c93b377 | 98869087f35bfee102fa96e97062e95f580e8b2c | refs/heads/master | 2023-06-15T19:20:04.441605 | 2021-07-16T05:46:11 | 2021-07-16T05:46:11 | 381,317,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from typing import List
class Observable:
observers: List
def __init__(self):
self.observers = []
def addObserver(self,observer):
self.observers.append(observer)
def removeObserver(self,observer):
self.observers.remove(observer)
def notify(self):
for o in self.observers:
o.update(self) | [
"powerworker1234@gmail.com"
] | powerworker1234@gmail.com |
00f3aa777df65829459dc28711a6a5fa5e63b497 | 514440c0319b0b9bf1ee2a7b5fd128713c68384e | /whyattend/analysis.py | fc88ebc836e0eb0ddaa907e8fa5fe87cfd4d74f5 | [
"BSD-2-Clause"
] | permissive | chipsi007/World-of-Tanks-Attendance-Tracker | 3af9d68f9b4ea81b61ad35f5a20204c2b098de28 | 576b32586c402a843b88c49bf432a3e8cb3c62dd | refs/heads/master | 2020-03-21T17:02:46.227357 | 2016-06-19T10:57:39 | 2016-06-19T10:57:39 | 138,809,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,514 | py | import math
from collections import namedtuple, defaultdict
from . import replays
PlayerPerformance = namedtuple('PlayerPerformance',
['battle_count', 'avg_dmg', 'avg_kills', 'avg_spotted', 'survival_rate',
'avg_spot_damage', 'avg_pot_damage', 'win_rate', 'wn7', 'avg_decap',
'avg_tier'])
def player_performance(battles, players):
""" Player statistics from replay files of given battles and players: damage done, spots, wn7, ... """
battle_count = defaultdict(int)
dmg = defaultdict(float)
kills = defaultdict(int)
survived = defaultdict(int)
spotted = defaultdict(int)
spot_damage = defaultdict(float)
potential_damage = defaultdict(float)
wins = defaultdict(int)
decap = defaultdict(int)
tier = defaultdict(int)
for battle in battles:
replay_data = battle.replay.unpickle()
if not replay_data or not replay_data["second"]:
continue
replay_version_tokens = replay_data['first']['clientVersionFromExe'].split(".")
if len(replay_version_tokens) == 1:
# legacy format
replay_version_tokens = replay_data['first']['clientVersionFromExe'].split(",")
replay_major_version = int(replay_version_tokens[1])
if " " in replay_version_tokens[2]:
# very strange version format ...
replay_minor_version = int(replay_version_tokens[2].split()[0])
else:
replay_minor_version = int(replay_version_tokens[2])
if replay_major_version > 8 or (replay_major_version == 8 and replay_minor_version >= 11):
players_perf = replays.player_performance(replay_data['second'], replay_data['second'][0]['vehicles'],
replay_data['second'][0]['players'])
else:
if not replay_data or not 'pickle' in replay_data or not replay_data['pickle']:
continue
if not isinstance(replay_data['pickle']['vehicles'], dict):
continue
players_perf = replays.player_performance(replay_data['second'], replay_data['pickle']['vehicles'],
replay_data['pickle']['players'])
else:
if not replay_data or not 'pickle' in replay_data or not replay_data['pickle']:
continue
if not isinstance(replay_data['pickle']['vehicles'], dict):
continue
players_perf = replays.player_performance(replay_data['second'], replay_data['pickle']['vehicles'],
replay_data['pickle']['players'])
for player in battle.get_players():
if not player in players:
continue
if not str(player.wot_id) in players_perf:
# Replay/Players mismatch (account sharing?), skip
continue
perf = players_perf[str(player.wot_id)]
battle_count[player] += 1
dmg[player] += perf['damageDealt']
spot_damage[player] += perf['damageAssistedRadio']
kills[player] += perf['kills']
survived[player] += 1 if perf['survived'] else 0
potential_damage[player] += perf['potentialDamageReceived']
wins[player] += 1 if battle.victory else 0
spotted[player] += perf['spotted']
decap[player] += perf['droppedCapturePoints']
tier[player] += perf['tank_info']['tier']
avg_dmg = defaultdict(float)
avg_kills = defaultdict(float)
survival_rate = defaultdict(float)
avg_spotted = defaultdict(float)
avg_spot_damage = defaultdict(float)
avg_pot_damage = defaultdict(float)
win_rate = defaultdict(float)
avg_decap = defaultdict(float)
avg_tier = defaultdict(float)
for p in players:
if battle_count[p] > 0:
bc = float(battle_count[p])
avg_dmg[p] = dmg[p] / bc
avg_kills[p] = kills[p] / bc
survival_rate[p] = survived[p] / bc
avg_spotted[p] = spotted[p] / bc
avg_spot_damage[p] = spot_damage[p] / bc
avg_pot_damage[p] = potential_damage[p] / bc
win_rate[p] = wins[p] / bc
avg_decap[p] = decap[p] / bc
avg_tier[p] = tier[p] / bc
wn7 = defaultdict(float)
for p in players:
if battle_count[p] == 0:
continue
tier = avg_tier[p]
wn7[p] = (1240.0 - 1040.0 / ((min(6, tier)) ** 0.164)) * avg_kills[p] \
+ avg_dmg[p] * 530.0 / (184.0 * math.exp(0.24 * tier) + 130.0) \
+ avg_spotted[p] * 125.0 * min(tier, 3) / 3.0 \
+ min(avg_decap[p], 2.2) * 100.0 \
+ ((185 / (0.17 + math.exp((win_rate[p] * 100.0 - 35.0) * -0.134))) - 500.0) * 0.45 \
- ((5.0 - min(tier, 5)) * 125.0) / (
1.0 + math.exp(( tier - (battle_count[p] / 220.0) ** (3.0 / tier) ) * 1.5))
result = PlayerPerformance(
battle_count=battle_count,
avg_dmg=avg_dmg,
avg_kills=avg_kills,
avg_spotted=avg_spotted,
survival_rate=survival_rate,
avg_spot_damage=avg_spot_damage,
avg_pot_damage=avg_pot_damage,
win_rate=win_rate,
avg_decap=avg_decap,
avg_tier=avg_tier,
wn7=wn7
)
return result | [
"daniel.diepold@gmail.com"
] | daniel.diepold@gmail.com |
41d4be9a41798bf7b2188c2827bb18a60b2d64a8 | 9a5b3d2ef1fa5c488a0eadb3bbf89df368b82589 | /hw12.py | 6ca06dd8bd8b0dd4b359cb704de3ed370ba4a6cd | [] | no_license | smax253/cs115 | bfb1ef82e10caaa36029cdde5451f47e0e53ef7b | 9c670618b274c9c6c883779d6604374c0ba96a4c | refs/heads/master | 2022-10-06T09:14:00.406567 | 2018-12-06T00:22:09 | 2018-12-06T00:22:09 | 149,790,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,694 | py | '''
Created on 11/24/18
@author: Max Shi
Pledge: I pledge my honor that I have abided by the Stevens Honor Code
CS115 - Hw 11 - Date class
'''
DAYS_IN_MONTH = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
class Date(object):
'''A user-defined data structure that stores and manipulates dates.'''
# The constructor is always named __init__.
def __init__(self, month, day, year):
'''The constructor for objects of type Date.'''
self.month = month
self.day = day
self.year = year
# The 'printing' function is always named __str__.
def __str__(self):
'''This method returns a string representation for the
object of type Date that calls it (named self).
** Note that this _can_ be called explicitly, but
it more often is used implicitly via the print
statement or simply by expressing self's value.'''
return '%02d/%02d/%04d' % (self.month, self.day, self.year)
# Here is an example of a 'method' of the Date class.
def isLeapYear(self):
'''Returns True if the calling object is in a leap year; False
otherwise.'''
if self.year % 400 == 0:
return True
if self.year % 100 == 0:
return False
if self.year % 4 == 0:
return True
return False
def copy(self):
'''Returns a new object with the same month, day, year
as the calling object (self).'''
dnew = Date(self.month, self.day, self.year)
return dnew
def equals(self, d2):
'''
Decides if self and d2
represent the same calendar date,
whether or not they are the in the same place in memory.
'''
return self.year == d2.year and self.month == d2.month and \
self.day == d2.day
def tomorrow(self):
"""Changes the object to represent the following day"""
if self.day >= DAYS_IN_MONTH[self.month]:
if self.month == 2 and self.isLeapYear() and self.day == 28:
self.day += 1
elif self.month == 12:
self.year += 1
self.month = 1
self.day = 1
else:
self.month += 1
self.day = 1
else:
self.day += 1
def yesterday(self):
"""Changes the object to represent the previous day"""
if self.day == 1:
if self.month == 3 and self.isLeapYear():
self.month -= 1
self.day = 29
elif self.month == 1:
self.month = 12
self.year -= 1
self.day = DAYS_IN_MONTH[self.month]
else:
self.month -= 1
self.day = DAYS_IN_MONTH[self.month]
else:
self.day -= 1
def addNDays(self, N):
"""Adds N days to this object"""
print(str(self))
for day in range(N):
self.tomorrow()
print(str(self))
def subNDays(self, N):
"""Subtracts N days from this object"""
print(str(self))
for day in range(N):
self.yesterday()
print(str(self))
def isAfter(self, d2):
"""Returns whether this object's date is after d2"""
if d2.year < self.year:
return True
elif d2.year == self.year:
if d2.month < self.month:
return True
elif d2.month == self.month:
if d2.day < self.day:
return True
return False
def isBefore(self, d2):
"""Returns whether this object's date is before d2"""
if d2.year > self.year:
return True
elif d2.year == self.year:
if d2.month > self.month:
return True
elif d2.month == self.month:
if d2.day > self.day:
return True
return False
def diff(self, d2):
"""Returns the difference between two days, will be negative is d2 is before this object's date"""
numDiff = 0
copySelf = self.copy()
while copySelf.isBefore(d2):
copySelf.tomorrow()
numDiff -= 1
while copySelf.isAfter(d2):
copySelf.yesterday()
numDiff += 1
return numDiff
def dow(self):
"""Returns the day of the week of this date"""
daysOfWeek = ('Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday')
refSunday = Date(11,25,2018)
difference = self.diff(refSunday)
return daysOfWeek[difference%7] | [
"smax253@gmail.com"
] | smax253@gmail.com |
992eaf158afa97f9840995f5325e13ed645e30f9 | 6d41b6f01dc7a635424775efbe816f8f835a4287 | /programmers.co.kr/Greedy/체육복/main.py | e39a021acd0868db514fcc8a8bdab4197fbe51d7 | [] | no_license | Kitsunetic/coding-test | c3a27df19f5816c79aef8823c8a7a04b5dd8114d | 0fff44440883d025b1a98f727d1b28a5dbf412e6 | refs/heads/master | 2023-04-08T06:39:59.174049 | 2021-04-29T16:01:11 | 2021-04-29T16:01:11 | 358,499,090 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | def solution(n, lost, reserve):
p = [1 for _ in range(n + 1)]
for i in lost:
p[i] -= 1
for i in reserve:
p[i] += 1
for i in range(1, n + 1):
if p[i] == 0:
if i != 1 and p[i - 1] > 1:
p[i] += 1
p[i - 1] -= 1
elif i != n and p[i + 1] > 1:
p[i] += 1
p[i + 1] -= 1
answer = 0
for v in p[1:]:
answer += min(v, 1)
return answer | [
"shenhaichenhai@gmail.com"
] | shenhaichenhai@gmail.com |
c055ae9b12dbbcdd5f6e7c94fd52a313b3e50979 | ca58b06353c0a8c0e8a87dd2bcf0d06cf54aded8 | /server/resume/about/views.py | 88f253845fc5fc2a0dc8a1b42cf9faa181811ab7 | [] | no_license | whistlepark/ECE-140A-Resume | ff9e8a67525d1a748c561b6af441f29993a51267 | 158222e91ddedddcae2fc9520a23f8f54848be5d | refs/heads/main | 2023-03-18T23:12:17.005562 | 2021-03-14T22:25:23 | 2021-03-14T22:25:23 | 347,766,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from django.shortcuts import render, HttpResponse
def about(request):
return render(request,'about.html',{})
| [
"arhanna@ucsd.edu"
] | arhanna@ucsd.edu |
c3a9591fd2ff4faec0717a1618d14ca7352353d0 | 5764b4996e64de37b762d738cd4b5d882294559c | /back_for_face/migrations/0003_auto_20210402_0419.py | f2fecf1a210b681c91cca45fd3d5618478ff6fe3 | [] | no_license | Akkutabusova/BackenForFace | 447c1bbbb79f9f5001401ef19223c1d58db91dab | a296b9bd189df56e60b75a2b08b5420c13889fe9 | refs/heads/master | 2023-04-23T06:08:00.082070 | 2021-05-08T15:23:14 | 2021-05-08T15:23:14 | 359,457,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # Generated by Django 2.2.7 on 2021-04-01 22:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('back_for_face', '0002_auto_20210402_0412'),
]
operations = [
migrations.RemoveField(
model_name='currentuser',
name='qr',
),
migrations.AddField(
model_name='currentuser',
name='door_id',
field=models.IntegerField(null=True),
),
]
| [
"tabusova.a2000@gmail.com"
] | tabusova.a2000@gmail.com |
215f7d24162ef80e6174c706bfbe4ecdf7c0d938 | c4058241ee3fd2d34e06dc90f83b39b1725a9fa1 | /Tienda/celery.py | a16651412f72caf624942b487559ed2e0aaa672e | [] | no_license | aberlanga25/DjangoTienda | ef09ed5c5c3b2c64075318ca102b368b4a3b4bbc | 8fcf23d1ac1347ad92e8adc1bc5b2a69e7ecf8f6 | refs/heads/master | 2020-03-20T06:50:21.317419 | 2018-06-22T21:45:58 | 2018-06-22T21:45:58 | 137,262,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Tienda.settings')
app = Celery('tienda')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| [
"berlanga2512@gmail.com"
] | berlanga2512@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.