blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8e16c78bb2b11eb3c5096dabf2382795cadc9840 | 7b7e11a180c36c94fe9559a01c792fe60e9ccb7f | /StockNest/stock_backend/migrations/0004_company_maxval.py | 0017536ae803acbf83ad91b7ec7bb88e7759f53b | [
"MIT"
] | permissive | pl-lee/Stock-Price-Forecasting-Using-Artificial-Intelligence | 8d833c0d87781d54ad371116cd96584a5b69a97b | 69192454542432c7120cbf95ea443b567a248400 | refs/heads/master | 2021-09-22T11:15:22.873347 | 2018-09-09T07:47:19 | 2018-09-09T07:47:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-26 13:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock_backend', '0003_auto_20170726_0910'),
]
operations = [
migrations.AddField(
model_name='company',
name='maxVal',
field=models.FloatField(default=0),
),
]
| [
"koriavinash1@gmail.com"
] | koriavinash1@gmail.com |
8ad3dd6a89b010a4f7d80ba5e5284a2c0f070bed | 22f1079c111df69711497a0a54f0be2d2556ba62 | /morpfw/crud/util.py | 5344d9a45c49eb2728886f6a4af0ad5941670b99 | [
"Apache-2.0"
] | permissive | morpframework/morpfw | 9b5d320b6d675d8d25d0021de41427c009595c78 | 1a11eb286097f0f6dd39f20e241dd83756ca87b3 | refs/heads/master | 2022-12-12T21:04:18.146975 | 2022-12-02T11:17:07 | 2022-12-02T11:17:07 | 104,162,256 | 8 | 2 | MIT | 2019-07-29T16:21:13 | 2017-09-20T03:43:29 | Python | UTF-8 | Python | false | false | 1,190 | py | import re
from morepath.publish import resolve_model as _resolve_model
from ..interfaces import ISchema
import jsl
import dataclasses
from copy import copy
import typing
from datetime import datetime, date
def resolve_model(request):
newreq = request.app.request_class(
request.environ.copy(), request.app, path_info=request.path
)
context = _resolve_model(newreq)
context.request = request
return context
_marker = object()
def generate_default(schema):
data = {}
if isinstance(schema, jsl.DocumentField):
schema = schema.document_cls
for n, f in schema._fields.items():
if isinstance(f, jsl.DocumentField):
data[n] = generate_default(f)
else:
data[n] = f.get_default()
if data[n] is None:
if isinstance(f, jsl.StringField):
data[n] = None
elif isinstance(f, jsl.IntField) or isinstance(f, jsl.NumberField):
data[n] = None
elif isinstance(f, jsl.DictField):
data[n] = {}
elif isinstance(f, jsl.ArrayField):
data[n] = []
return data
| [
"kagesenshi.87@gmail.com"
] | kagesenshi.87@gmail.com |
8bd1a61f2f5b2c302eb15d529ee167dbbc4beabe | 91b68cd2d4e50263ad53be9bf34c28f4b893b29d | /gps_viewer/settings.py | b2a3405df04c3832a53d81c285c277eb73146e39 | [] | no_license | storrellas/gps_viewer | 6145efca4bcf7f48a96a9d08ceb916bc21cd143f | e8577933b68169193f391808488cacd7ffd5ff69 | refs/heads/master | 2020-04-19T12:05:51.128193 | 2019-02-14T07:31:45 | 2019-02-14T07:31:45 | 168,184,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,391 | py | """
Django settings for gps_viewer project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o0%@ynj$m8ln=_-pn=)+de2$2ji5y7ks%_fuzi89n)p-3wowhe'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'api',
'ui'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'gps_viewer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gps_viewer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "ui_react/dist/")
]
| [
"storrellas@gmail.com"
] | storrellas@gmail.com |
60111fbb419ac10cb39513b5c247ebbd98e7015b | b51f277dfe339ea30dce10040eca40c20bd8a4dd | /src/config/setting.py | 7ac608787b2becd3c5908b109a5108d8a097ac85 | [
"BSD-3-Clause"
] | permissive | jack139/fair | e08b3b48391d0cb8e72bbc47e7592c030f587f48 | fe0ff64f8edbd794c3fb951ab6af420054e9e585 | refs/heads/master | 2021-06-30T15:17:15.590764 | 2020-09-23T07:14:20 | 2020-09-23T07:14:20 | 160,322,019 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
from pymongo import MongoClient
#####
debug_mode = True # Flase - production, True - staging
#####
#
enable_proxy = True
http_proxy = 'http://192.168.2.108:8888'
https_proxy = 'https://192.168.2.108:8888'
proxy_list = ['192.168.2.103']
enable_local_test = True
#####
web_serv_list={'web1' : ('192.168.2.99','192.168.2.99')} #
local_ip=web_serv_list['web1'][1]
cli = {'web' : MongoClient(web_serv_list['web1'][0]),}
# MongoClient('10.168.11.151', replicaset='rs0') # replica set
# MongoClient('10.168.11.151', replicaset='rs0', readPreference='secondaryPreferred') # 使用secondary 读
db_web = cli['web']['fair_db']
db_web.authenticate('ipcam','ipcam')
thread_num = 1
auth_user = ['test']
cs_admin = ['cs0']
tmp_path = '/usr/local/nginx/html/fair/static/tmp'
logs_path = '/usr/local/nginx/logs'
image_store_path = '/usr/local/nginx/html/fair/static/image/product'
default_shop='55837fd9ec6ef238912fab89'
B3_shop='55837fd9ec6ef238912fab89'
PT_shop={
'001' : '564708a2ec6ef2206f57043c', # 东南
'002' : '', # 华北
'003' : '', # 华东
}
app_host='app.urfresh.cn'
wx_host='wx.urfresh.cn'
image_host='image.urfresh.cn'
notify_host='app.urfresh.cn'
app_pool=['app.urfresh.cn']
WX_store = {
'000' : { # 测试
'wx_appid' : 'wxb920ef74b6a20e69',
'wx_appsecret' : 'ddace9d14b3413c65991278f09a03896',
'mch_id' : '1242104702',
},
'001' : { # 东南
'wx_appid' : 'wxa84493ca70802ab5',
'wx_appsecret' : 'd4624c36b6795d1d99dcf0547af5443d',
'mch_id' : '1284728201',
},
'002' : { # 华北
'wx_appid' : 'wx64a0c20da3b0acb7',
'wx_appsecret' : 'd4624c36b6795d1d99dcf0547af5443d',
'mch_id' : '1284420901',
},
'003' : { # 华东
'wx_appid' : 'wx2527355bfd909dbe',
'wx_appsecret' : '49e8eb83c3fce102215a92047e8e9290',
'mch_id' : '1253845801',
},
}
# region_id 来自文件
f=open('/region_id')
a=f.readlines()
f.close()
region_id = a[0].strip()
# 微信设置
wx_setting = WX_store[region_id]
order_fuffix=''
inner_number = {
'99990000100' : '9998',
'99990000101' : '3942',
'99990000102' : '4345',
'99990000103' : '2875',
'99990000104' : '3492',
'99990000105' : '0980',
'99990000106' : '3482',
'99990000107' : '5340',
'99990000108' : '9873',
'99990000109' : '2345',
'99990000110' : '8653',
}
http_port=80
https_port=443
mail_server='127.0.0.1'
sender='"Kam@Cloud"<kam@f8geek.com>'
worker=['2953116@qq.com']
web.config.debug = debug_mode
config = web.storage(
email = 'jack139@gmail.com',
site_name = 'ipcam',
site_des = '',
static = '/static'
)
| [
"gt@f8geek.com"
] | gt@f8geek.com |
90a7cddaa492df26fbac0ef47f1980e16f99b2ff | bb0eeade4685dc89ff8a53beb813afdf7394989d | /algorithm_test/saima/股神.py | e55fa66355c3369bf29859dbd5bbab3cf51b548f | [] | no_license | zhaocheng1996/pyproject | 72929cd0ba2f0486d7dc87a7defa82656bf75a8e | 0a1973dda314f844f9898357bc4a5c8ee3f2246d | refs/heads/master | 2021-10-26T08:38:43.675739 | 2019-04-11T13:52:46 | 2019-04-11T13:52:46 | 176,939,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | '''
有股神吗?
有,小赛就是!
经过严密的计算,小赛买了一支股票,他知道从他买股票的那天开始,股票会有以下变化:第一天不变,以后涨一天,跌一天,涨两天,跌一天,涨三天,跌一天...依此类推。
为方便计算,假设每次涨和跌皆为1,股票初始单价也为1,请计算买股票的第n天每股股票值多少钱?
输入
输入包括多组数据;
每行输入一个n,1<=n<=10^9 。
样例输入
1
2
3
4
5
输出
请输出他每股股票多少钱,对于每组数据,输出一行。
样例输出
1
2
1
2
3
'''
while 1 :
x =int(input())
k = 3
n = 3
while x-k>=n:
n+=k
k+=1#k就是减号的数量
if x<3:
print(x)
else:
print(int(x-(k-2)*2))
| [
"34829837+zhaocheng1996@users.noreply.github.com"
] | 34829837+zhaocheng1996@users.noreply.github.com |
501a8e701d08eb3184bf15a41bc714ea9b715091 | b76daa106277ef2f7ab7f6e3278546c6da0bb967 | /base/web/server.py | 25c3b1b57642b2270e4f795b12adb9c5b5a914d2 | [] | no_license | DyLanCao/ipython | d071b4659999062106438ec077d27754a711ef92 | 746e070d193de04002d277e5170ddf8b5d9d4d44 | refs/heads/master | 2021-06-12T19:31:44.325346 | 2021-02-20T03:17:58 | 2021-02-20T03:17:58 | 142,657,284 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | #-*- coding:utf-8 -*-
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
'''处理请求并返回页面'''
# 页面模板
Page = '''\
<html>
<body>
<p>Hello, web!</p>
</body>
</html>
'''
# 处理一个GET请求
def do_GET(self):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(self.Page)))
self.end_headers()
self.wfile.write(self.Page)
#----------------------------------------------------------------------
if __name__ == '__main__':
serverAddress = ('', 8080)
server = BaseHTTPServer.HTTPServer(serverAddress, RequestHandler)
server.serve_forever()
| [
"caoyin2011@163.com"
] | caoyin2011@163.com |
ca44966244b953ca337e34a0e310dced35a4d891 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/venv/lib/python3.8/site-packages/pip/_vendor/pep517/wrappers.py | aab04ff348d90f3ea88259b7d8b1bdf203e500ca | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c855382e9ed32186e6b9539363ea579e5c8667717f82b222f8994b92918df069
size 10783
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
ef5717815e789db410e78d7019e8f61a68415355 | 421fd914fb9c40fa5f8466377d7270dbf3ec1fdb | /Net/tensor/net7.py | 56820dc7e2ee57d1afae113f13bb65fbbaa0a43f | [] | no_license | jon--lee/vision-amt | 382d466647f52cb0c98a84cdb466821615d07b94 | 6a05921dbc2aa82e1aa651b935b57d470d903cbd | refs/heads/master | 2020-04-12T02:31:33.258391 | 2016-07-22T01:11:27 | 2016-07-22T01:11:27 | 50,693,639 | 1 | 1 | null | 2016-07-22T01:11:46 | 2016-01-29T21:47:10 | Python | UTF-8 | Python | false | false | 1,782 | py | import tensorflow as tf
import inputdata
import random
from tensornet import TensorNet
import time
import datetime
class NetSeven(TensorNet):
def __init__(self):
self.dir = "./net7/"
self.name = "net7"
self.channels = 3
self.x = tf.placeholder('float', shape=[None, 250, 250, self.channels])
self.y_ = tf.placeholder("float", shape=[None, 4])
self.w_conv1 = self.weight_variable([5, 5, self.channels, 5])
self.b_conv1 = self.bias_variable([5])
self.h_conv1 = tf.nn.relu(self.conv2d(self.x, self.w_conv1) + self.b_conv1)
#self.h_conv1 = self.max_pool(self.h_conv1, 4)
#self.w_conv2 = self.weight_variable([5, 5, 5, 3])
#self.b_conv2 = self.bias_variable([3])
#self.h_conv2 = tf.nn.relu(self.conv2d(self.h_conv1, self.w_conv2) + self.b_conv2)
#self.h_conv2 = self.max_pool(self.h_conv2, 4)
# print self.h_conv1.get_shape()
conv_num_nodes = self.reduce_shape(self.h_conv1.get_shape())
fc1_num_nodes = 128
self.w_fc1 = self.weight_variable([conv_num_nodes, fc1_num_nodes])
# self.w_fc1 = self.weight_variable([1000, fc1_num_nodes])
self.b_fc1 = self.bias_variable([fc1_num_nodes])
self.h_conv_flat = tf.reshape(self.h_conv1, [-1, conv_num_nodes])
self.h_fc1 = tf.nn.relu(tf.matmul(self.h_conv_flat, self.w_fc1) + self.b_fc1)
self.w_fc2 = self.weight_variable([fc1_num_nodes, 4])
self.b_fc2 = self.bias_variable([4])
self.y_out = tf.tanh(tf.matmul(self.h_fc1, self.w_fc2) + self.b_fc2)
self.loss = tf.reduce_mean(.5*tf.square(self.y_out - self.y_))
self.train_step = tf.train.MomentumOptimizer(.003, .9)
self.train = self.train_step.minimize(self.loss)
| [
"mdlaskey@umich.edu"
] | mdlaskey@umich.edu |
dbe58b94975afcbf250caa82532fcbdb215e99ef | e23b28fc3ed196866a04af4e790c1c16b1b5183e | /django/login2/apps/login2_app/views.py | 1d07c6c74710450bdc4e95fdb4c868fc722faf28 | [] | no_license | diazmc/Python | 6f47e7fcfb8c263eb154d59a5a9b3866e2c9d6a8 | 89e3d54eeb2b0ed7dc7af24103ace6fb6e45d65e | refs/heads/master | 2021-01-20T01:18:23.954877 | 2017-08-24T10:39:19 | 2017-08-24T10:39:19 | 101,283,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | from django.shortcuts import render, redirect
from .models import User
from django.contrib import messages
def index(request):
return render(request, 'login2_app/index.html')
def process(request):
if request.method == "POST":
res = User.objects.register(request.POST)
if res['status']:
request.session['user'] = request.POST['first_name']
return redirect('/success')
else:
for i in range(0, len(res['data'])):
messages.error(request, res['data'][i])
return redirect('/')
def login(request):
if request.method == "POST":
res = User.objects.login(request.POST)
if res['status']:
request.session['user'] = res['data'][0].first_name
return redirect('/success')
else:
messages.error(request, res['data'][0])
return redirect('/')
def logout(request):
request.session.flush()
return redirect('/')
def success(request):
return render(request, 'login2_app/success.html') | [
"mc.arthur_d@hotmail.com"
] | mc.arthur_d@hotmail.com |
d6102bc0218c764e7e2e42eaf102ab7f59933f42 | a18539697b2972a2ade5b8175c065f441962047d | /my_mini_web_ok/my_serverwork.py | 1b349b5ef0130071fdb8fa363ab8ac06cacfa637 | [
"MIT"
] | permissive | aGrass0825/mini_web_framwork | a68ecf2007e93ca7add1dd20973ef7bcc1f9501c | cff3881e65aae24e6cd7e1cd4567ffbea684ff89 | refs/heads/master | 2021-11-25T19:05:46.975097 | 2021-11-24T09:47:59 | 2021-11-24T09:47:59 | 226,657,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,639 | py | """
任务:写一个web服务器
1、导入模块socket\threading\sys
2、建立套接字对象
3、地址重写
3、绑定端口号
4、设置监听,套接字由主动变被动
5、接受浏览器的链接accept
6、接收浏览器的请求
8、查找请求目录
9、发送响应报文
10、结束与浏览器的链接
"""
import socket
import threading
import sys
from application import app
class HttpServer(object):
"""服务器类"""
def __init__(self):
self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.tcp_server_socket.bind(("", 8080))
self.tcp_server_socket.listen(128)
def request_client(self, new_client_socket, ip_port):
print("浏览器上线:", ip_port)
request_data = new_client_socket.recv(1024)
# print(request_data)
if not request_data:
print("浏览器下线", ip_port)
new_client_socket.close()
return
response_data = app.application("./static", request_data, ip_port)
new_client_socket.send(response_data)
new_client_socket.close()
def start(self):
while True:
new_client_socket, ip_port = self.tcp_server_socket.accept()
thread_client = threading.Thread(target=self.request_client, args=(new_client_socket, ip_port))
thread_client.setDaemon(True)
thread_client.start()
if __name__ == '__main__':
"""启动"""
http_server_socket = HttpServer()
http_server_socket.start()
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
9958750cf4c7f18679fe4caf28b4ef2121d81922 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03910/s508996847.py | 9053d73951bd78510caa8fa55e5777f1f1518c70 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | n = int(input())
cnt = 0
for i in range(1, n + 1):
cnt += i
if cnt >= n:
res = i
break
remove = cnt - n
for i in range(1, res + 1):
if i != remove:
print(i) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
18b2f3f798405ef725faeca6f1b3db16e933b964 | 864755f7d733351b205e460ec54a5f6d13050037 | /devilry/devilry_admin/tests/subject/test_crinstance_subject.py | cbe74d7589cee6ba35a93bdf5ea6b1e7b6b59d04 | [] | permissive | aless80/devilry-django | 27fc14b7bb7356f5f9d168e435a84e7bb43a682a | 416c262e75170d5662542f15e2d7fecf5ab84730 | refs/heads/master | 2020-05-20T12:22:09.255393 | 2019-05-19T21:06:57 | 2019-05-19T21:06:57 | 185,568,847 | 0 | 0 | BSD-3-Clause | 2019-05-08T08:53:52 | 2019-05-08T08:53:51 | null | UTF-8 | Python | false | false | 5,053 | py | from django.conf import settings
from django.test import TestCase, RequestFactory
from model_mommy import mommy
from devilry.devilry_account.models import PermissionGroup
from devilry.devilry_admin.views.subject import crinstance_subject
class TestCrAdminInstance(TestCase):
def test_get_rolequeryset_not_admin(self):
mommy.make('core.Subject')
testuser = mommy.make(settings.AUTH_USER_MODEL)
request = RequestFactory().get('/test')
request.user = testuser
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual([], list(instance.get_rolequeryset()))
def test_get_rolequeryset_superuser(self):
testsubject = mommy.make('core.Subject')
testuser = mommy.make(settings.AUTH_USER_MODEL, is_superuser=True)
request = RequestFactory().get('/test')
request.user = testuser
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual([testsubject], list(instance.get_rolequeryset()))
def test_get_rolequeryset_admin_on_period_does_not_apply(self):
testperiod = mommy.make('core.Period')
periodpermissiongroup = mommy.make('devilry_account.PeriodPermissionGroup',
period=testperiod)
testuser = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
request = RequestFactory().get('/test')
request.user = testuser
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual([], list(instance.get_rolequeryset()))
def test_get_rolequeryset_admin_on_subject(self):
testsubject = mommy.make('core.Subject')
subjectpermissiongroup = mommy.make('devilry_account.SubjectPermissionGroup',
subject=testsubject)
testuser = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=subjectpermissiongroup.permissiongroup)
request = RequestFactory().get('/test')
request.user = testuser
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual([testsubject], list(instance.get_rolequeryset()))
def test_get_devilryrole_for_requestuser_not_admin(self):
testsubject = mommy.make('core.Subject')
testuser = mommy.make(settings.AUTH_USER_MODEL)
request = RequestFactory().get('/test')
request.user = testuser
request.cradmin_role = testsubject
instance = crinstance_subject.CrAdminInstance(request=request)
with self.assertRaises(ValueError):
instance.get_devilryrole_for_requestuser()
def test_get_devilryrole_for_requestuser_superuser(self):
testsubject = mommy.make('core.Subject')
testuser = mommy.make(settings.AUTH_USER_MODEL, is_superuser=True)
request = RequestFactory().get('/test')
request.user = testuser
request.cradmin_role = testsubject
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual('departmentadmin', instance.get_devilryrole_for_requestuser())
def test_get_devilryrole_for_requestuser_departmentadmin(self):
testsubject = mommy.make('core.Subject')
subjectpermissiongroup = mommy.make('devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_DEPARTMENTADMIN,
subject=testsubject)
testuser = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=subjectpermissiongroup.permissiongroup)
request = RequestFactory().get('/test')
request.user = testuser
request.cradmin_role = testsubject
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual('departmentadmin', instance.get_devilryrole_for_requestuser())
def test_get_devilryrole_for_requestuser_subjectadmin(self):
testsubject = mommy.make('core.Subject')
subjectpermissiongroup = mommy.make('devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_SUBJECTADMIN,
subject=testsubject)
testuser = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=subjectpermissiongroup.permissiongroup)
request = RequestFactory().get('/test')
request.user = testuser
request.cradmin_role = testsubject
instance = crinstance_subject.CrAdminInstance(request=request)
self.assertEqual('subjectadmin', instance.get_devilryrole_for_requestuser())
| [
"post@espenak.net"
] | post@espenak.net |
a92caba3e5809f7220aed29e8c066b8129f8ccae | 53c4460e8cce123276932b4ddf2fe00fdee75b65 | /format05.py | f2d85db164b628bffe9b45d81b308b9847712256 | [] | no_license | Yush1nk1m/Study_Python | 5ba8a6eeb73184ea7f1e892daae182b78d265e06 | 516f0ba6d9411453fa0d2df00314e383e3f8cabb | refs/heads/master | 2023-07-09T16:22:22.663219 | 2021-08-22T15:22:22 | 2021-08-22T15:22:22 | 398,831,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | output_a = "{:f}".format(52.273)
output_b = "{:15f}".format(52.273) # 15칸 만들기
output_c = "{:+15f}".format(52.273) # 15칸에 부호 추가하기
output_d = "{:+015f}".format(52.273) # 15칸에 부호 추가하고 0으로 채우기
print(output_a)
print(output_b)
print(output_c)
print(output_d)
| [
"kys010306@sogang.ac.kr"
] | kys010306@sogang.ac.kr |
0246403fce1525adbf6c3160b1c8300f3eb2f89b | b1c2e16cff9f0dd9946c61c9504579e0254fef51 | /base/base_data_loader.py | 67d3e8029f2299ca206ffcf1a2af2043b34ead38 | [
"CC-BY-2.0"
] | permissive | JODONG2/semantic-segmentation-level2-cv-02 | 53bfc2a115e62889880ebb1812b790d1b5759c4b | ecef6844454e2339436d5c201392ee55b08781ee | refs/heads/master | 2023-08-26T08:29:01.990496 | 2021-11-08T04:00:49 | 2021-11-08T04:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import SubsetRandomSampler
class BaseDataLoader(DataLoader):
"""
Base class for all data loaders
"""
def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate):
self.validation_split = validation_split
self.shuffle = shuffle
self.batch_idx = 0
self.n_samples = len(dataset)
self.sampler, self.valid_sampler = self._split_sampler(self.validation_split)
self.init_kwargs = {
"dataset": dataset,
"batch_size": batch_size,
"shuffle": self.shuffle,
"collate_fn": collate_fn,
"num_workers": num_workers,
}
super().__init__(sampler=self.sampler, **self.init_kwargs)
def _split_sampler(self, split):
if split == 0.0:
return None, None
idx_full = np.arange(self.n_samples)
np.random.seed(0)
np.random.shuffle(idx_full)
if isinstance(split, int):
assert split > 0
assert split < self.n_samples, "validation set size is configured to be larger than entire dataset."
len_valid = split
else:
len_valid = int(self.n_samples * split)
valid_idx = idx_full[0:len_valid]
train_idx = np.delete(idx_full, np.arange(0, len_valid))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# turn off shuffle option which is mutually exclusive with sampler
self.shuffle = False
self.n_samples = len(train_idx)
return train_sampler, valid_sampler
def split_validation(self):
if self.valid_sampler is None:
return None
else:
return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)
| [
"hanbin@kakao.com"
] | hanbin@kakao.com |
e42f7d87fcb7c317bd746f39df25e50c7f71906b | a5884eb2aed17c25a710370654f28b0b70a4441f | /config.py | 9cbdcb715ad5bbf103039f0b43bbf1a9aaa12f57 | [
"Apache-2.0"
] | permissive | craigderington/celery-example-1 | 9ba2c6c94f422b33f6a73e74109cc20bba47cb3d | 6fdae655e512f96eeb9dbb109c647ae56f357bc9 | refs/heads/master | 2020-07-10T15:30:07.447621 | 2019-09-05T18:03:57 | 2019-09-05T18:03:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | import os
# Debug
DEBUG=True
# Celery Settings
CELERY_BROKER_URL = "pyamqp://rabbitmq:5672/"
CELERY_RESULT_BACKEND = "redis://redis:6379/0"
TASK_SERIALIZER = "json"
RESULT_SERIALZIER = "json"
ACCEPT_CONTENT = ["json"]
# Timezome
TIMEZONE = "America/New_York"
ENABLE_UTC = True
# APIs
IPINFO_API_URL = "http://ip-api.com/json/"
NEUTRINO_API_URL = "https://neutrinoapi.com/"
# Environment Settings
# NEUTRINO_API_KEY = os.environ.get("NEUTRINO_API_KEY")
# NEUTRINO_API_USERNAME = os.environ.get("NEUTRINO_API_USERNAME")
# BIGFIX_USER = os.environ.get("BF_USER")
# BIGFIX_PWD = os.environ.get("BF_PWD")
| [
"craig@craigderington.me"
] | craig@craigderington.me |
12c222037cccef2366cde89f0c450753c7f9765a | d7218d554c9d89369c0677256802ea1eb5995dd0 | /repos/cookiecutter/tasks/config.py | 7c3e457a15e8992724681556d9c943f1c27fe8a1 | [
"MIT"
] | permissive | salotz/jubeo | bf6f76d64e8774b4f1f51a8ddbaeb345d2c3676e | 8b1d715af708a7c933d5c9459c3e2ddb7d40d741 | refs/heads/master | 2022-12-10T17:38:45.957322 | 2021-02-20T02:08:31 | 2021-02-20T02:08:31 | 246,981,335 | 1 | 0 | MIT | 2022-12-08T09:51:30 | 2020-03-13T03:34:22 | Python | UTF-8 | Python | false | false | 166 | py | """User settings for a project."""
# load the system configuration. You can override them in this module,
# but beware it might break stuff
from .sysconfig import *
| [
"samuel.lotz@salotz.info"
] | samuel.lotz@salotz.info |
62520424d1b4e94fed465462661aee85fa68e102 | b29149eeee6a2351fb2904415ad751b009d80dad | /mopidy_internetarchive/backend.py | d780b3153875b51da53ab9254e34c9b153f71cd1 | [
"Apache-2.0"
] | permissive | tkem/mopidy-internetarchive | 7a33d24f6c32ca9ac040531e5725a1eeadf3fa38 | 2b6100a412120c828da8899b81562237fb808840 | refs/heads/master | 2022-05-06T19:31:44.909430 | 2022-04-03T19:57:11 | 2022-04-03T19:57:11 | 15,807,922 | 15 | 3 | Apache-2.0 | 2021-08-03T23:17:26 | 2014-01-10T19:52:25 | Python | UTF-8 | Python | false | false | 1,338 | py | import pykka
from mopidy import backend, httpclient
import cachetools
from . import Extension
from .client import InternetArchiveClient
from .library import InternetArchiveLibraryProvider
from .playback import InternetArchivePlaybackProvider
def _cache(cache_size=None, cache_ttl=None, **kwargs):
if cache_size is None:
return None
elif cache_ttl is None:
return cachetools.LRUCache(cache_size)
else:
return cachetools.TTLCache(cache_size, cache_ttl)
class InternetArchiveBackend(pykka.ThreadingActor, backend.Backend):
uri_schemes = [Extension.ext_name]
def __init__(self, config, audio):
super().__init__()
ext_config = config[Extension.ext_name]
self.client = client = InternetArchiveClient(
ext_config["base_url"],
retries=ext_config["retries"],
timeout=ext_config["timeout"],
)
product = f"{Extension.dist_name}/{Extension.version}"
client.useragent = httpclient.format_user_agent(product)
proxy = httpclient.format_proxy(config["proxy"])
client.proxies.update({"http": proxy, "https": proxy})
client.cache = _cache(**ext_config)
self.library = InternetArchiveLibraryProvider(ext_config, self)
self.playback = InternetArchivePlaybackProvider(audio, self)
| [
"tkemmer@computer.org"
] | tkemmer@computer.org |
9edbd3d9de0f548de026e32489b971d5050b5d26 | 519f1ac2b8ca9ee2793af13a88eec6eef7c2637d | /rosalind/GCON.py | 7c3bd9f98bccce480c6b529763717996320e61b3 | [] | no_license | teju85/programming | c4da3493b4cf96b8f52da9bb209636cd898310a5 | 5d64b3f5cc868f7a5ad1bac889d69da9dbe356cd | refs/heads/master | 2021-06-16T07:09:25.159021 | 2017-06-05T04:36:07 | 2017-06-05T04:36:07 | 26,383,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,499 | py | import sys
from common import readFasta
syms = {
'A' : 0,
'C' : 1,
'D' : 2,
'E' : 3,
'F' : 4,
'G' : 5,
'H' : 6,
'I' : 7,
'K' : 8,
'L' : 9,
'M' : 10,
'N' : 11,
'P' : 12,
'Q' : 13,
'R' : 14,
'S' : 15,
'T' : 16,
'V' : 17,
'W' : 18,
'Y' : 19,
}
score = [
[ 4, 0, -2, -1, -2, 0, -2, -1, -1, -1, -1, -2, -1, -1, -1, 1, 0, 0, -3, -2],
[ 0, 9, -3, -4, -2, -3, -3, -1, -3, -1, -1, -3, -3, -3, -3, -1, -1, -1, -2, -2],
[-2, -3, 6, 2, -3, -1, -1, -3, -1, -4, -3, 1, -1, 0, -2, 0, -1, -3, -4, -3],
[-1, -4, 2, 5, -3, -2, 0, -3, 1, -3, -2, 0, -1, 2, 0, 0, -1, -2, -3, -2],
[-2, -2, -3, -3, 6, -3, -1, 0, -3, 0, 0, -3, -4, -3, -3, -2, -2, -1, 1, 3],
[ 0, -3, -1, -2, -3, 6, -2, -4, -2, -4, -3, 0, -2, -2, -2, 0, -2, -3, -2, -3],
[-2, -3, -1, 0, -1, -2, 8, -3, -1, -3, -2, 1, -2, 0, 0, -1, -2, -3, -2, 2],
[-1, -1, -3, -3, 0, -4, -3, 4, -3, 2, 1, -3, -3, -3, -3, -2, -1, 3, -3, -1],
[-1, -3, -1, 1, -3, -2, -1, -3, 5, -2, -1, 0, -1, 1, 2, 0, -1, -2, -3, -2],
[-1, -1, -4, -3, 0, -4, -3, 2, -2, 4, 2, -3, -3, -2, -2, -2, -1, 1, -2, -1],
[-1, -1, -3, -2, 0, -3, -2, 1, -1, 2, 5, -2, -2, 0, -1, -1, -1, 1, -1, -1],
[-2, -3, 1, 0, -3, 0, 1, -3, 0, -3, -2, 6, -2, 0, 0, 1, 0, -3, -4, -2],
[-1, -3, -1, -1, -4, -2, -2, -3, -1, -3, -2, -2, 7, -1, -2, -1, -1, -2, -4, -3],
[-1, -3, 0, 2, -3, -2, 0, -3, 1, -2, 0, 0, -1, 5, 1, 0, -1, -2, -2, -1],
[-1, -3, -2, 0, -3, -2, 0, -3, 2, -2, -1, 0, -2, 1, 5, -1, -1, -3, -3, -2],
[ 1, -1, 0, 0, -2, 0, -1, -2, 0, -2, -1, 1, -1, 0, -1, 4, 1, -2, -3, -2],
[ 0, -1, -1, -1, -2, -2, -2, -1, -1, -1, -1, 0, -1, -1, -1, 1, 5, 0, -2, -2],
[ 0, -1, -3, -2, -1, -3, -3, 3, -2, 1, 1, -3, -2, -2, -3, -2, 0, 4, -3, -1],
[-3, -2, -4, -3, 1, -2, -2, -3, -3, -2, -1, -4, -4, -2, -3, -3, -2, -3, 11, 2],
[-2, -2, -3, -2, 3, -3, 2, -1, -2, -1, -1, -2, -3, -1, -2, -2, -2, -1, 2, 7]
]
gapPenalty = -5
def editDistance(s, t):
ls = len(s) + 1
lt = len(t) + 1
mat = [ [0 for j in range(0,lt)] for i in range(0,ls)]
gap = [ [False for j in range(0,lt)] for i in range(0,ls)]
for i in range(1,ls):
mat[i][0] = gapPenalty
gap[i][0] = True
for j in range(1,lt):
mat[0][j] = gapPenalty
gap[0][j] = True
for i in range(1,ls):
sa = s[i-1]
for j in range(1,lt):
ta = t[j-1]
scoreVal = score[syms[sa]][syms[ta]]
if sa == ta:
mat[i][j] = mat[i-1][j-1] + scoreVal
else:
agap = gap[i-1][j]
if agap:
a = mat[i-1][j]
else:
a = mat[i-1][j] + gapPenalty
bgap = gap[i][j-1]
if bgap:
b = mat[i][j-1]
else:
b = mat[i][j-1] + gapPenalty
c = mat[i-1][j-1] + scoreVal
maxi = max(a, b, c)
mat[i][j] = maxi
if maxi == a or maxi == b:
gap[i][j] = True
print s
print t
for m in mat:
for n in m:
print n,
print
for m in gap:
for n in m:
print n,
print
return mat[-1][-1]
if __name__ == '__main__':
dnas = readFasta(sys.argv[1])
print editDistance(dnas[0][1], dnas[1][1])
| [
"rao.thejaswi@gmail.com"
] | rao.thejaswi@gmail.com |
34cc1bf223f7f56c695b00d0565cca6f6dc989a5 | 8ee8fe3c2acea497a85428bfb3dfde19e58b2bc3 | /test-examples/nD_mixed.py | 83eb382991ac4b297956c5f27d493014c7047df8 | [
"BSD-3-Clause"
] | permissive | sofroniewn/image-demos | a6e46f08fd4ce621aa96d6b6378b50f63ac2b381 | 2eeeb23f34a47798ae7be0987182724ee3799eb8 | refs/heads/master | 2022-11-02T23:50:23.098830 | 2022-10-30T04:38:19 | 2022-10-30T04:38:19 | 179,378,745 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | """
Slide through 3D Volume series in 4D data using the add_volume API
"""
from skimage import data
import numpy as np
import napari
with napari.gui_qt():
blobs = np.asarray(
[
data.binary_blobs(length=64, volume_fraction=0.1, n_dim=3).astype(
float
)
for i in range(10)
]
)
viewer = napari.Viewer()
# add the volume
volume = viewer.add_image(blobs, ndisplay=3)
slice = viewer.add_image(blobs)
slice.dims.sliced = 0
| [
"sofroniewn@gmail.com"
] | sofroniewn@gmail.com |
2ef8ad9520e99842ad0aba6bd17b8b532e925ffa | 3cffa92ed70f487a3ea28b73a8f544bbc347e450 | /tests/test_oli.py | 8978a3db7caebc6920c1007df2d339b04994b0e4 | [] | no_license | vahtras/dalmisc | 63962ca86d00c59ea019db2b0e5eebba488f70b5 | 2e344132eaac0d163cdb41a5737baca8e875fc49 | refs/heads/master | 2023-01-31T06:42:38.964477 | 2020-12-15T10:38:29 | 2020-12-15T10:38:29 | 321,618,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import os
from util.full import unit, init, matrix
from dalmisc import oli
from .common_tests import assert_
def setup():
global suppdir
n, e = os.path.splitext(__file__)
suppdir = n + ".d"
def test_e2n_S():
refe2 = init([
[0.78643356, -0.50624296],
[-0.50624296, 0.78643356]
])
e2 = [oli.e2n(n, tmpdir=suppdir) for n in unit(2)]
assert_(e2, refe2)
def test_s2n_S():
refs2 = matrix.diag([2., -2.])
s2 = [oli.s2n(n, tmpdir=suppdir) for n in unit(2)]
assert_(s2, refs2)
if __name__ == "__main__":
setup()
test_e2n_S()
| [
"vahtras@kth.se"
] | vahtras@kth.se |
f54b6de297ca25b541adb6bd4b12906d8bc8fcfd | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs_with_max_replays_5/interreplay_135_r_4/interactive_replay_config.py | ce9454be78e968ff18bc201ae28a9357ab12050d | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 pyretic.examples.firewall_for_sts_no_close', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "experiments/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs/interreplay_135_r_4/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
f6a0fef8c7c0962752e3f0be0a812c8744fff280 | efd8628adc042ae2d58fa89cc31a5c1c80aa94f6 | /bi_conv_lstm/src/convlstm_cell.py | 2fce2759c2fffbd8344d862bd762e4273427b43e | [] | no_license | Xharlie/stochast_dynamic_for_video_infilling | d7e0bfaf8b71cf3f39170793e5a1a50b289aee40 | a825de4c5178f7084925817f0631ac331073866f | refs/heads/master | 2021-03-27T09:11:15.478067 | 2019-10-23T17:59:25 | 2019-10-23T17:59:25 | 110,137,739 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,946 | py | import tensorflow as tf
class ConvLSTMCell(tf.nn.rnn_cell.RNNCell):
"""A LSTM cell with convolutions instead of multiplications.
Reference:
Xingjian, S. H. I., et al. "Convolutional LSTM network: A machine learning approach for precipitation nowcasting." Advances in Neural Information Processing Systems. 2015.
"""
def __init__(self, shape, filters, kernel, forget_bias=1.0, activation=tf.tanh, normalize=True, peephole=True, data_format='channels_last', reuse=None):
super(ConvLSTMCell, self).__init__(_reuse=reuse)
self._kernel = kernel
self._filters = filters
self._forget_bias = forget_bias
self._activation = activation
self._normalize = normalize
self._peephole = peephole
if data_format == 'channels_last':
self._size = tf.TensorShape(shape + [self._filters])
self._feature_axis = self._size.ndims
self._data_format = None
elif data_format == 'channels_first':
self._size = tf.TensorShape([self._filters] + shape)
self._feature_axis = 0
self._data_format = 'NC'
else:
raise ValueError('Unknown data_format')
@property
def state_size(self):
return tf.nn.rnn_cell.LSTMStateTuple(self._size, self._size)
@property
def output_size(self):
return self._size
def call(self, x, state):
c, h = state
x = tf.concat([x, h], axis=self._feature_axis)
n = x.shape[-1].value
m = 4 * self._filters if self._filters > 1 else 4
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(x, W, 'SAME', data_format=self._data_format)
if not self._normalize:
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
j, i, f, o = tf.split(y, 4, axis=self._feature_axis)
if self._peephole:
i += tf.get_variable('W_ci', c.shape[1:]) * c
f += tf.get_variable('W_cf', c.shape[1:]) * c
if self._normalize:
j = tf.contrib.layers.layer_norm(j)
i = tf.contrib.layers.layer_norm(i)
f = tf.contrib.layers.layer_norm(f)
f = tf.sigmoid(f + self._forget_bias)
i = tf.sigmoid(i)
c = c * f + i * self._activation(j)
if self._peephole:
o += tf.get_variable('W_co', c.shape[1:]) * c
if self._normalize:
o = tf.contrib.layers.layer_norm(o)
c = tf.contrib.layers.layer_norm(c)
o = tf.sigmoid(o)
h = o * self._activation(c)
# TODO
#tf.summary.histogram('forget_gate', f)
#tf.summary.histogram('input_gate', i)
#tf.summary.histogram('output_gate', o)
#tf.summary.histogram('cell_state', c)
state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
return h, state
class ConvGRUCell(tf.nn.rnn_cell.RNNCell):
"""A GRU cell with convolutions instead of multiplications."""
def __init__(self, shape, filters, kernel, activation=tf.tanh, normalize=True, data_format='channels_last', reuse=None):
super(ConvGRUCell, self).__init__(_reuse=reuse)
self._filters = filters
self._kernel = kernel
self._activation = activation
self._normalize = normalize
if data_format == 'channels_last':
self._size = tf.TensorShape(shape + [self._filters])
self._feature_axis = self._size.ndims
self._data_format = None
elif data_format == 'channels_first':
self._size = tf.TensorShape([self._filters] + shape)
self._feature_axis = 0
self._data_format = 'NC'
else:
raise ValueError('Unknown data_format')
@property
def state_size(self):
return self._size
@property
def output_size(self):
return self._size
def call(self, x, h):
channels = x.shape[self._feature_axis].value
with tf.variable_scope('gates'):
inputs = tf.concat([x, h], axis=self._feature_axis)
n = channels + self._filters
m = 2 * self._filters if self._filters > 1 else 2
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
r, u = tf.split(y, 2, axis=self._feature_axis)
r = tf.contrib.layers.layer_norm(r)
u = tf.contrib.layers.layer_norm(u)
else:
y += tf.get_variable('bias', [m], initializer=tf.ones_initializer())
r, u = tf.split(y, 2, axis=self._feature_axis)
r, u = tf.sigmoid(r), tf.sigmoid(u)
# TODO
#tf.summary.histogram('reset_gate', r)
#tf.summary.histogram('update_gate', u)
with tf.variable_scope('candidate'):
inputs = tf.concat([x, r * h], axis=self._feature_axis)
n = channels + self._filters
m = self._filters
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
y = tf.contrib.layers.layer_norm(y)
else:
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
h = u * h + (1 - u) * self._activation(y)
return h, h | [
"charlie.xu007@yahoo.com"
] | charlie.xu007@yahoo.com |
82ab7c61bcd07b03c93eb2c5495b8867ec737671 | 9b3e46ef2ffd65cccace3e3e3d93438c077e4f9e | /main/dialog/template.py | 12e827125435f1610370d68c18651b38ebcd624b | [
"BSD-2-Clause"
] | permissive | wizadr/cports | 1dd043045fc63b061f803d1992a9ccdc995850ad | be5f4695305d9c00de9d4e252e67db8081690c3e | refs/heads/master | 2023-08-21T11:35:16.710064 | 2021-10-25T00:38:04 | 2021-10-25T00:38:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | pkgname = "dialog"
_mver = "1.3"
_date = "20210621"
pkgver = f"{_mver}.{_date}"
pkgrel = 0
build_style = "gnu_configure"
configure_args = ["--with-ncursesw", "--disable-nls"]
makedepends = ["ncurses-devel"]
pkgdesc = "Tool to display dialog boxes from shell scripts"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.1-only"
url = "https://invisible-island.net/dialog"
source = f"https://invisible-mirror.net/archives/{pkgname}/{pkgname}-{_mver}-{_date}.tgz"
sha256 = "c3af22ccfcd9baca384062108dd9354e86990929ee270c239eef69518c5da7c8"
def post_install(self):
self.rm(self.destdir / "usr/lib", force = True, recursive = True)
| [
"q66@chimera-linux.org"
] | q66@chimera-linux.org |
d039d23102a35433edadb4a67f3a22b65f11d99c | 36901e58fbdeabc7380ae2c0278010b2c51fe54d | /gatheros_event/helpers/event_business.py | 55a6a5bce19c24a740033883ffbe597b2be365cc | [] | no_license | hugoseabra/congressy | e7c43408cea86ce56e3138d8ee9231d838228959 | ac1e9b941f1fac8b7a13dee8a41982716095d3db | refs/heads/master | 2023-07-07T04:44:26.424590 | 2021-08-11T15:47:02 | 2021-08-11T15:47:02 | 395,027,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | """
Esses helpers tem como objetivo identificar o tipo do evento:
- Grátis
- Pago
Além de também mudar o tipo do evento e aplicar as configuração das features
disponiveis de acordo com o tipo em si
"""
from gatheros_event.event_state import EventState, EventPayable
from gatheros_event.models import Event
from gatheros_event.event_specifications import Saleable
def is_free_event(event: Event):
if not isinstance(event, Event):
raise Exception("{} '{}' não é uma instancia de Event".format(
event,
event.__class__,
))
return EventState(event).is_free()
def is_paid_event(event: Event):
if not isinstance(event, Event):
raise Exception("{} '{}' não é uma instancia de Event".format(
event,
event.__class__,
))
return EventState(event).is_payable()
def removing_saleable_cause_feature_change(event: Event, candidate,
candidate_type) -> bool:
if not Saleable().is_satisfied_by(candidate):
raise Exception(
"{} '{}' não é uma instancia capaz de ser vendida".format(
candidate,
candidate.__class__,
))
return not EventPayable(exclude=candidate,
exclude_type=candidate_type).is_satisfied_by(event)
| [
"nathan.eua@gmail.com"
] | nathan.eua@gmail.com |
fef0aa9b88d267ea1543943ccc5df703e7c1d7d2 | 45c7693fabf2bf6aa142ea08ed7ec45b9a6aee79 | /apps/belt_app/views.py | 576b7dfeec8067fb6ad1415728705a1827018ea8 | [] | no_license | stuffyUdaya/quote | 95cedd352b493eb29f127cc9be78456652e1cca8 | ae979eeb92b490fa12ee95d4276bef9517286c5a | refs/heads/master | 2021-01-09T06:11:50.343620 | 2017-02-04T14:43:09 | 2017-02-04T14:43:09 | 80,923,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,662 | py | from django.shortcuts import render,redirect
from django.urls import reverse
from django.contrib import messages
from .models import User,Quote,Fav
from django.db.models import Count
def index(request):
return render (request, 'belt_app/index.html')
def process(request):
results = User.objects.userValidator(request.POST['name'],request.POST['alias'],request.POST['email'],request.POST['password'],request.POST['confpassword'],request.POST['dateofbirth'])
if results[0]:
for err in results[1]:
print err
messages.error(request,err)
else:
request.session ['loggedin'] = results[1].id
return redirect('/success')
return redirect('/')
def login(request):
postData ={
'email': request.POST['email'],
'password': request.POST['password']
}
results = User.objects.loginValidator(postData)
if results[0]:
request.session['loggedin'] = results[1].id
return redirect('/success')
else:
messages.error(request,results[1])
return redirect('/')
def success(request):
fav = Fav.objects.filter(user_id = request.session['loggedin'])
quote_other = Quote.objects.all()
for x in fav :
print x.quote_id
quote_other = quote_other.exclude(id = x.quote_id)
print quote_other
context = {
'user': User.objects.get(id = request.session['loggedin']),
# 'quote': Quote.objects.all(),
'fav': Fav.objects.filter(user_id = request.session['loggedin']),
'quote':quote_other
}
return render(request,'belt_app/success.html',context)
def addquote(request,id):
postee_id = id
# user = User.objects.get(id = id)
results = Quote.objects.quoteValidator(request.POST['qname'], request.POST['message'],postee_id)
if results[0]:
for err in results[1]:
print err
messages.error(request,err)
return redirect('/success')
else:
return redirect('/success')
# context = {
# 'add': Quote.objects.create(qname = request.POST['qname'], message = request.POST['message'], postee = user)
# }
def fav(request,id,uid):
favs = Fav.objects.create(user_id = uid, quote_id = id),
return redirect('/success')
def view(request,id):
context= {
'view': Quote.objects.filter(postee_id = id)
}
return render(request,'belt_app/view.html', context)
def remove(request,id,uid):
Fav.objects.get(user_id = uid, quote_id = id ).delete()
return redirect('/success')
def logout(request):
request.session.flush()
return redirect('/')
| [
"udayatummala1@gmail.com"
] | udayatummala1@gmail.com |
b10b197f934cdecd38564cb304870ecf73768943 | e5329001263e67a4d3c13d57bb91f2502280e206 | /InvTL/lm_py/pypy/jit/metainterp/test/test_ztranslation.py | d5db7f3d2c2fc1f5deaf59451266e18775a8ea35 | [] | no_license | yanhongliu/DARLAB | d9432db6e005a39e33501d7ffffe6e648b95b3fc | f739318c9620b44ef03d155f791c7ed4111d80fa | refs/heads/master | 2021-05-27T19:58:58.458846 | 2014-02-04T12:09:26 | 2014-02-04T12:09:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,277 | py | import py
from pypy.jit.metainterp.warmspot import rpython_ll_meta_interp, ll_meta_interp
from pypy.jit.backend.llgraph import runner
from pypy.rlib.jit import JitDriver, OPTIMIZER_FULL, unroll_parameters
from pypy.rlib.jit import PARAMETERS, dont_look_inside
from pypy.jit.metainterp.jitprof import Profiler
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.ootypesystem import ootype
class TranslationTest:
CPUClass = None
type_system = None
def test_stuff_translates(self):
# this is a basic test that tries to hit a number of features and their
# translation:
# - jitting of loops and bridges
# - virtualizables
# - set_param interface
# - profiler
# - full optimizer
# - jitdriver hooks
class Frame(object):
_virtualizable2_ = ['i']
def __init__(self, i):
self.i = i
class JitCellCache:
entry = None
jitcellcache = JitCellCache()
def set_jitcell_at(entry):
jitcellcache.entry = entry
def get_jitcell_at():
return jitcellcache.entry
def get_printable_location():
return '(hello world)'
def can_inline():
return False
jitdriver = JitDriver(greens = [], reds = ['frame', 'total'],
virtualizables = ['frame'],
get_jitcell_at=get_jitcell_at,
set_jitcell_at=set_jitcell_at,
get_printable_location=get_printable_location,
can_inline=can_inline)
def f(i):
for param in unroll_parameters:
defl = PARAMETERS[param]
jitdriver.set_param(param, defl)
jitdriver.set_param("threshold", 3)
jitdriver.set_param("trace_eagerness", 2)
total = 0
frame = Frame(i)
while frame.i > 3:
jitdriver.can_enter_jit(frame=frame, total=total)
jitdriver.jit_merge_point(frame=frame, total=total)
total += frame.i
if frame.i >= 20:
frame.i -= 2
frame.i -= 1
return total * 10
res = ll_meta_interp(f, [40], CPUClass=self.CPUClass,
type_system=self.type_system)
assert res == f(40)
res = rpython_ll_meta_interp(f, [40], loops=2, CPUClass=self.CPUClass,
type_system=self.type_system,
optimizer=OPTIMIZER_FULL,
ProfilerClass=Profiler)
assert res == f(40)
def test_external_exception_handling_translates(self):
jitdriver = JitDriver(greens = [], reds = ['n', 'total'])
@dont_look_inside
def f(x):
if x > 20:
return 2
raise ValueError
@dont_look_inside
def g(x):
if x > 15:
raise ValueError
return 2
def main(i):
jitdriver.set_param("threshold", 3)
jitdriver.set_param("trace_eagerness", 2)
total = 0
n = i
while n > 3:
jitdriver.can_enter_jit(n=n, total=total)
jitdriver.jit_merge_point(n=n, total=total)
try:
total += f(n)
except ValueError:
total += 1
try:
total += g(n)
except ValueError:
total -= 1
n -= 1
return total * 10
res = ll_meta_interp(main, [40], CPUClass=self.CPUClass,
type_system=self.type_system)
assert res == main(40)
res = rpython_ll_meta_interp(main, [40], loops=2, CPUClass=self.CPUClass,
type_system=self.type_system,
optimizer=OPTIMIZER_FULL,
ProfilerClass=Profiler)
assert res == main(40)
class TestTranslationLLtype(TranslationTest):
CPUClass = runner.LLtypeCPU
type_system = 'lltype'
| [
"mickg10@gmail.com"
] | mickg10@gmail.com |
a5b0332978d11b16182826ca5685a1f52e032ea8 | afd9c9dd58d0e91b84aab77d24ccf36d8b020f94 | /obonet/io.py | e43afb702a0fc788224648937aaebdb130624ff5 | [
"BSD-2-Clause-Patent"
] | permissive | dhimmel/obonet | 11d8c3b30e77e5910d60733711f28cd756f47d9c | fec6a82e53d01338c85e77039a4dc05288f6ab99 | refs/heads/main | 2023-03-04T00:29:18.215986 | 2023-02-28T17:17:57 | 2023-02-28T17:17:57 | 35,751,761 | 115 | 25 | NOASSERTION | 2022-11-10T11:22:07 | 2015-05-17T04:23:23 | Python | UTF-8 | Python | false | false | 2,137 | py | from __future__ import annotations
import importlib
import io
import logging
import mimetypes
import os
import re
from typing import Callable, TextIO, Union
from urllib.request import urlopen
PathType = Union[str, os.PathLike, TextIO]
def open_read_file(path: PathType, encoding: str | None = None) -> TextIO:
"""
Return a file object from the path. Automatically detects and supports
URLs and compression. If path is pathlike, it's converted to a string.
If path is not a string nor pathlike, it's passed through without
modification. Use encoding to set the text character set encoding.
Use `encoding=None` to use the platform-dependent default locale encoding.
"""
# Convert pathlike objects to string paths
if hasattr(path, "__fspath__"):
path = os.fspath(path)
if not isinstance(path, str):
# Passthrough open file buffers without modification
return path
# Get opener based on file extension
opener = get_opener(path)
# Read from URL
if re.match("^(http|ftp)s?://", path):
with urlopen(path) as response:
content = response.read()
if opener == io.open:
if not encoding:
encoding = response.headers.get_content_charset(failobj="utf-8")
logging.info(f"Will decode content from {path} using {encoding} charset.")
text = content.decode(encoding)
return io.StringIO(text)
else:
compressed_bytes = io.BytesIO(content)
return opener(compressed_bytes, "rt", encoding=encoding)
# Read from file
return opener(path, "rt", encoding=encoding)
compression_to_module = {
"gzip": "gzip",
"bzip2": "bz2",
"xz": "lzma",
}
def get_opener(filename: str) -> Callable[..., TextIO]:
"""
Automatically detect compression and return the file opening function.
"""
_type, compression = mimetypes.guess_type(filename)
if compression is None:
opener = io.open
else:
module = compression_to_module[compression]
opener = importlib.import_module(module).open
return opener
| [
"daniel.himmelstein@gmail.com"
] | daniel.himmelstein@gmail.com |
5b7b72098bba09b1082880311e8f3fdb84f678c9 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/entities/ActionObjectClient.py | 6e27794a321733b47fbd58e3b427686d66348c3b | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\entities\ActionObjectClient.py
import svc
import localization
class EveActionObjectClientSvc(svc.actionObjectClientSvc):
__guid__ = 'svc.eveActionObjectClientSvc'
__replaceservice__ = 'actionObjectClientSvc'
def SetupComponent(self, entity, component):
infoComponent = entity.GetComponent('info')
if infoComponent and not infoComponent.name and component in self.preservedStates:
recipeRow = cfg.recipes.Get(self.preservedStates[component]['_recipeID'])
infoComponent.name = recipeRow.recipeName
svc.actionObjectClientSvc.SetupComponent(self, entity, component)
def Run(self, *args):
svc.actionObjectClientSvc.Run(self, *args)
def GetActionNodeTranslatedText(self, actionID, fallbackText):
treeNodeNameID = cfg.treeNodes.Get(actionID).treeNodeNameID
return localization.GetByMessageID(treeNodeNameID)
| [
"le02005@163.com"
] | le02005@163.com |
7a177033a21ab52312bd03811c74b9fa2fc6488c | 04164e028417ff8472b9f2bfec0ec45b0888f743 | /development/utilities/qh-interpolate | 23660381db250d30447b73d6e97d1e567b0a7eff | [] | no_license | Huaguiyuan/quantum-honeycomp | c2b810ff5f5e25d41b1f0c1c1ff7ae500b04dc31 | 50deb0e59fffe4031f05094572552ca5be59e741 | refs/heads/master | 2020-03-22T19:09:58.148862 | 2018-07-08T19:51:58 | 2018-07-08T19:51:58 | 140,510,217 | 1 | 2 | null | 2018-07-11T02:20:32 | 2018-07-11T02:20:32 | null | UTF-8 | Python | false | false | 1,527 | #!/usr/bin/python
import numpy as np
import numpy
def write_interpolation(centers=[[0.,0.,0.]], heights=[10.0],name=""):
""" Creates a set of lorentzian at that position and with a height"""
if len(heights)<len(centers):
heights = [10. for i in centers]
fac = 5.0 # factor to mix the gaussians
def wave(x,y):
z = 0.
for (c,h) in zip(centers,heights):
r = ((x-c[0])**2+(y-c[1])**2)*fac # renormalized gaussian
z += h*np.exp(-(r))-c[2]
return z+1.0
xs = [c[0] for c in centers] # get x coordinates
ys = [c[1] for c in centers] # get y coordinates
dxy = (max(xs) - min(xs))/200
try:
n = int(float(sys.argv[2]))
except:
n = 80
print n,"points"
x = np.linspace(min(xs),max(xs),n)
y = np.linspace(min(ys),max(ys),n)
fo = open(name,"w") # open output file
for ix in x:
for iy in y:
iz = wave(ix,iy)
fo.write(str(ix)+" ")
fo.write(str(iy)+" ")
fo.write(str(iz)+"\n")
fo.close()
#centers = np.arange(0.,10.,2.)
import sys
# get the centers of the balls
try:
name = sys.argv[1]
except:
print "Usage qh-interpolate name"
exit() # exit
m = np.genfromtxt(name).transpose()
centers = [[m[0,i],m[1,i],0.] for i in range(len(m[0]))]
heights = m[2]
#heights = read_density()
heights = 4*heights/max(heights)
#heights = np.sqrt(heights)
#balls = [ball(center=c) for c in centers]
write_interpolation(centers=centers,heights=heights,name=name+"-interpolated")
| [
"jose.luis.lado@gmail.com"
] | jose.luis.lado@gmail.com | |
9dc965a73917db5c9bc735c53cda18a3e9a59059 | fb98f1e69cb6e14d804c916c4d4ab45acb4f7215 | /setup.py | 5e81d07c8ab9ac906e6ad46326bb23fed7fc220a | [
"Apache-2.0"
] | permissive | q759729997/qytPython | 6c8f65b954e43ea2a75a1fd1286b7c2e896ba9f2 | f468ef0a4495eb7ce58cb052e0370a8f22bca008 | refs/heads/master | 2020-12-05T13:57:43.794982 | 2020-03-29T15:00:27 | 2020-03-29T15:00:27 | 232,131,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | #!/usr/bin/env python
# coding=utf-8
import os
from setuptools import setup, find_packages
__version__ = None
with open("qytPython/version.py") as f:
exec(f.read())
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('LICENSE', encoding='utf-8') as f:
license = f.read()
with open('requirements.txt', encoding='utf-8') as f:
reqs = f.read()
pkgs = [p for p in find_packages() if p.startswith('qytPython')]
print(pkgs)
# 执行依赖包安装,使用豆瓣源加速
install_cmd = 'pip install -r requirements.txt -i https://pypi.douban.com/simple/'
print(install_cmd)
os.system(install_cmd)
setup(
name='qytPython',
version=__version__,
url='https://github.com/q759729997/qytPython',
description='qytPython: Python tools',
long_description=readme,
long_description_content_type='text/markdown',
license=license,
author='qiaoyongtian',
python_requires='>=3.6',
packages=pkgs,
install_requires=reqs.strip().split('\n'),
)
| [
"qiaoyongtian@qq.com"
] | qiaoyongtian@qq.com |
ab2d4503b34fd674906127afc68cf789f6a4702b | f27f3dc88ea4b777063aa449d882663606e45990 | /pySDC/implementations/collocation_classes/equidistant_spline_right.py | 737e1a3071509125ce1512f83ab8bba7c8498ec7 | [
"BSD-2-Clause"
] | permissive | DmytroSytnyk/pySDC | 81a0a6ff86572d687338c7b0c9d2a274e78cb6ef | 9c7c41ac00411cdc58dfa30be794c3bb77a58293 | refs/heads/master | 2020-04-16T13:58:44.021412 | 2018-11-19T07:55:42 | 2018-11-19T07:55:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | from __future__ import division
import numpy as np
import scipy.interpolate as intpl
from pySDC.core.Collocation import CollBase
from pySDC.core.Errors import CollocationError
class EquidistantSpline_Right(CollBase):
"""
Implements equidistant nodes with right end point included and spline interpolation
Attributes:
order (int): order of the quadrature
num_nodes (int): number of collocation nodes
tleft (float): left interval point
tright (float): right interval point
nodes (numpy.ndarray): array of quadrature nodes
weights (numpy.ndarray): array of quadrature weights for the full interval
Qmat (numpy.ndarray): matrix containing the weights for tleft to node
Smat (numpy.ndarray): matrix containing the weights for node to node
delta_m (numpy.ndarray): array of distances between nodes
right_is_node (bool): flag to indicate whether right point is collocation node
left_is_node (bool): flag to indicate whether left point is collocation node
"""
def __init__(self, num_nodes, tleft, tright):
"""
Initialization
Args:
num_nodes (int): number of nodes
tleft (float): left interval boundary (usually 0)
tright (float): right interval boundary (usually 1)
"""
super(EquidistantSpline_Right, self).__init__(num_nodes, tleft, tright)
if num_nodes < 2:
raise CollocationError("Number of nodes should be at least 2 for equidist. splines, but is %d" % num_nodes)
# This is a fixed order since we are using splines here! No spectral accuracy!
self.order = min(num_nodes - 1, 3) # We need: 1<=order<=5 and order < num_nodes
self.nodes = self._getNodes
self.weights = self._getWeights(tleft, tright)
self.Qmat = self._gen_Qmatrix
self.Smat = self._gen_Smatrix
self.delta_m = self._gen_deltas
self.left_is_node = False
self.right_is_node = True
@property
def _getNodes(self):
"""
Compute equidistant nodes with right end point included
Returns:
np.ndarray: array of equidistant nodes
"""
return np.linspace(self.tleft + 1.0 / self.num_nodes, self.tright, self.num_nodes, endpoint=True)
def _getWeights(self, a, b):
"""
Computes weights using spline interpolation instead of Gaussian quadrature
Args:
a (float): left interval boundary
b (float): right interval boundary
Returns:
np.ndarray: weights of the collocation formula given by the nodes
"""
# get the defining tck's for each spline basis function
circ_one = np.zeros(self.num_nodes)
circ_one[0] = 1.0
tcks = []
for i in range(self.num_nodes):
tcks.append(
intpl.splrep(self.nodes, np.roll(circ_one, i), xb=self.tleft, xe=self.tright, k=self.order, s=0.0))
weights = np.zeros(self.num_nodes)
for i in range(self.num_nodes):
weights[i] = intpl.splint(a, b, tcks[i])
return weights
| [
"r.speck@fz-juelich.de"
] | r.speck@fz-juelich.de |
b15b1831c7bab23eae614a8ac41760cd3b32dee6 | 214d0b18e3bed9ae2ba33e9a3d9d2b447c13dd2e | /k_fold_cross_validation.py | cd1ff51f751ee9b217b204fc08df53afb3dec931 | [] | no_license | janFrancoo/Machine-Learning-Tutorials | 3cc1d47939fac44630b475ce5bd5dc52f84bde1e | 56dab9722606dc27df2f613bfbd277f27122eb88 | refs/heads/master | 2020-06-17T14:16:06.423200 | 2019-08-03T11:44:47 | 2019-08-03T11:44:47 | 195,948,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.model_selection import train_test_split, cross_val_score
df = load_iris()
x = df.data
y = df.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.33)
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(x_train, y_train)
res = knn.predict(x_test)
cm = confusion_matrix(res, y_test)
print(cm)
print("Accuracy = ", accuracy_score(res, y_test))
crossValScore = cross_val_score(knn, x_train, y_train, cv=3)
print("Cross Validation Score = ", crossValScore.mean())
print("Standard Deviation = ", crossValScore.std())
| [
"noreply@github.com"
] | janFrancoo.noreply@github.com |
cc5cf5398f77652abe718d6ae45d9eb527566a58 | f7772d2d686811610763aa177cc37a1ae4e0cb95 | /cosmosis/cosmosis/samplers/star/star_sampler.py | 5a497f019d6d5948cb292f3a5944d94223152aaa | [
"BSD-2-Clause"
] | permissive | joezuntz/summer-school-1 | 51d5cfb9e62a99e33bc1fd89b8ced2aa96440c63 | 0575e0d7cab34a616e107967147c9cc97f0953a6 | refs/heads/master | 2021-05-21T17:56:44.732129 | 2020-04-03T13:40:38 | 2020-04-03T13:40:38 | 252,744,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,535 | py | from __future__ import print_function
from builtins import zip
from builtins import map
from builtins import str
import itertools
import numpy as np
from .. import ParallelSampler
def task(p):
i,p = p
results = star_sampler.pipeline.run_results(p)
#If requested, save the data to file
if star_sampler.save_name and results.block is not None:
results.block.save_to_file(star_sampler.save_name+"_%d"%i, clobber=True)
return (results.post, results.prior, results.extra)
LARGE_JOB_SIZE = 1000000
class StarSampler(ParallelSampler):
parallel_output = False
sampler_outputs = [("prior", float),("post", float)]
understands_fast_subspaces = True
def config(self):
global star_sampler
star_sampler = self
self.converged = False
self.nsample = self.read_ini("nsample_dimension", int, 1)
self.save_name = self.read_ini("save", str, "")
self.nstep = self.read_ini("nstep", int, -1)
self.allow_large = self.read_ini("allow_large", bool, False)
self.sample_points = None
self.ndone = 0
def setup_sampling(self):
#Number of jobs to do at once.
#Can be taken from the ini file.
#Otherwise it is set to -1 by default
if self.nstep==-1:
#if in parallel mode do a chunk of 4*the number of tasks to do at once
#chosen arbitrarily.
if self.pool:
self.nstep = 4*self.pool.size
#if not parallel then just do a single slice through one dimension each chunk
else:
self.nstep = self.nsample
if self.output:
for name,value in zip(self.pipeline.varied_params, self.pipeline.start_vector()):
self.output.metadata("fid_{0}".format(name), value)
#Also Generate the complete collection of parameter sets to run over.
#This doesn't actually keep them all in memory, it is just the conceptual
#outer product
total_samples = self.nsample*len(self.pipeline.varied_params)
print()
print("Total number of star samples: ", total_samples)
if total_samples>LARGE_JOB_SIZE:
print("That is a very large number of samples.")
if self.allow_large:
print("But you set allow_large=T so I will continue")
else:
print("This is suspicously large so I am going to stop")
print("If you really want to do this set allow_large=T in the")
print("[star] section of the ini file.")
raise ValueError("Suspicously large number of star points %d ( = n_samp * n_dim = %d * %d); set allow_large=T in [star] section to permit this."%(total_samples,self.nsample,len(self.pipeline.varied_params)))
print()
sample_points = []
start = self.pipeline.start_vector()
for i,param in enumerate(self.pipeline.varied_params):
for p in np.linspace(*param.limits, num=self.nsample):
v = start.copy()
v[i] = p
sample_points.append(v)
self.sample_points = iter(sample_points)
def execute(self):
#First run only:
if self.sample_points is None:
self.setup_sampling()
#Chunk of tasks to do this run through, of size nstep.
#This advances the self.sample_points forward so it knows
#that these samples have been done
samples = list(itertools.islice(self.sample_points, self.nstep))
#If there are no samples left then we are done.
if not samples:
self.converged=True
return
#Each job has an index number in case we are saving
#the output results from each one
sample_index = np.arange(len(samples)) + self.ndone
jobs = list(zip(sample_index, samples))
#Actually compute the likelihood results
if self.pool:
results = self.pool.map(task, jobs)
else:
results = list(map(task, jobs))
#Update the count
self.ndone += len(results)
#Save the results of the sampling
for sample, result in zip(samples, results):
#Optionally save all the results calculated by each
#pipeline run to files
(post, prior, extra) = result
#always save the usual text output
self.output.parameters(sample, extra, prior, post)
def is_converged(self):
return self.converged
| [
"joezuntz@googlemail.com"
] | joezuntz@googlemail.com |
90666c5cc6eff1cf432eac73b42bd2a8ba4eba13 | b9adf873bc36f8e1244d889cedeee22ad9a3cb6b | /PythonForArcGIS/SF_PFA2/ch10/script/bufferLoopRange.py | 88a69b10d0421228a1d531d3d0412cb609489a48 | [] | no_license | forgetbear/B_PYTHON_GIS | e2860b1dfdf1e714ffef0fad90949c083b1d4ab4 | a92cdb97f601a4c61c399ad75c5f839983fab956 | refs/heads/master | 2023-06-08T03:18:03.786982 | 2023-05-28T16:42:37 | 2023-05-28T16:42:37 | 104,445,746 | 0 | 0 | null | 2017-09-22T07:34:40 | 2017-09-22T07:34:40 | null | UTF-8 | Python | false | false | 595 | py | # bufferLoopRange.py
# Purpose: Buffer a park varying buffer distances from 1 to 5 miles.
import arcpy
arcpy.env.workspace = 'C:/gispy/data/ch10'
outDir = 'C:/gispy/scratch/'
arcpy.env.overwriteOutput = True
inName = 'park.shp'
for num in range(1, 6):
# Set the buffer distance based on num ('1 miles', '2 miles', ...).
distance = '{0} miles'.format(num)
# Set the output name based on num ('buffer1.shp', 'buffer2.shp', ...)
outName = outDir + 'buffer{0}.shp'.format(num)
arcpy.Buffer_analysis(inName, outName, distance)
print '{0}{1} created.'.format(outDir, outName)
| [
"aaronhsu219@gmail.com"
] | aaronhsu219@gmail.com |
5c18aa542642be81a4a39dd0d778ab66114f13fa | cc13092b652113221a877db2bf907c050dc30aaa | /meta_reward_learning/textworld/lib/graph_search.py | 71904064535272cd066fb4381f410246e4c7b088 | [
"MIT",
"Apache-2.0"
] | permissive | Th-Fo/google-research | 1e62ee50f76c2931fdb6db1de736a85e94251e25 | 9d7bd968843c27216d01c92ff832b1cd58cafa85 | refs/heads/master | 2020-12-27T17:30:43.916109 | 2020-05-25T17:06:20 | 2020-05-25T17:06:20 | 237,989,659 | 1 | 3 | Apache-2.0 | 2020-05-25T17:06:22 | 2020-02-03T14:52:08 | null | UTF-8 | Python | false | false | 2,199 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph search algorithms for exploration."""
import random
# pylint: disable=g-import-not-at-top
try:
import queue
except ImportError:
import six.moves.queue as queue
# pylint: enable=g-import-not-at-top
def check_valid(graph, pos):
y, x = pos
y_max, x_max = graph.shape
if ((0 <= y and y < y_max) and (0 <= x and x < x_max)) and (graph[pos] >= 0):
return True
else:
return False
def bfs_paths(graph, agent, goal, num_actions, maxlen):
"""Find paths from any start position to a goal position using BFS."""
path_queue = queue.Queue()
path_queue.put((agent.pos, []))
while not path_queue.empty():
curr_pos, path = path_queue.get()
if len(path) >= maxlen:
continue
for action in range(num_actions):
agent.reset(curr_pos)
agent.act(action)
if check_valid(graph, agent.pos):
new_path = path + [action]
if agent.pos == goal:
yield new_path
else:
path_queue.put((agent.pos, new_path))
def dfs_paths(graph, agent, goal, num_actions, maxlen):
""""Find paths from any start position to a goal position using DFS."""
stack = [(agent.pos, [])]
all_actions = list(range(num_actions))
while stack:
curr_pos, path = stack.pop()
if len(path) >= maxlen:
continue
random.shuffle(all_actions)
for action in all_actions:
agent.reset(curr_pos)
agent.act(action)
if check_valid(graph, agent.pos):
new_path = path + [action]
if agent.pos == goal:
yield new_path
else:
stack.append((agent.pos, new_path))
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
69245191cd77a6704b51bcdadfef4132821d8865 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2283-2409/base-trunk-2283/exe/engine/idevice.py | bfd60b87586091ea323f16d24950704388984cf6 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,006 | py | """
The base class for all iDevices
"""
import copy
import logging
from exe.engine.persist import Persistable
from exe.engine.translate import lateTranslate
log = logging.getLogger(__name__)
class Idevice(Persistable):
"""
The base class for all iDevices
iDevices are mini templates which the user uses to create content in the
package
"""
nextId = 1
NoEmphasis, SomeEmphasis, StrongEmphasis = range(3)
def __init__(self, title, author, purpose, tip, icon, parentNode=None):
"""Initialize a new iDevice, setting a unique id"""
log.debug("Creating iDevice")
self.edit = True
self.lastIdevice = True
self.emphasis = Idevice.NoEmphasis
self.version = 0
self.id = unicode(Idevice.nextId)
Idevice.nextId += 1
self.parentNode = parentNode
self._title = title
self._author = author
self._purpose = purpose
self._tip = tip
self.icon = icon
self.userResources = []
if self.icon:
self.systemResources = ["icon_"+self.icon+".gif"]
else:
self.systemResources = []
def get_title(self):
"""
Gives a nicely encoded and translated title that can be put inside
xul labels (eg. <label value="my "idevice"">)
"""
if self._title:
title = _(self._title)
title = title.replace('&', '&')
title = title.replace('"', '"')
return title
else:
return u''
def set_title(self, value):
"""
Sets self._title
"""
self._title = value
title = property(get_title, set_title)
rawTitle = lateTranslate('title')
author = lateTranslate('author')
purpose = lateTranslate('purpose')
tip = lateTranslate('tip')
def __cmp__(self, other):
"""
Compare this iDevice with other
"""
return cmp(self.id, other.id)
def clone(self):
"""
Clone an iDevice just like this one
"""
log.debug("Cloning iDevice")
newIdevice = copy.deepcopy(self)
newIdevice.id = unicode(Idevice.nextId)
Idevice.nextId += 1
return newIdevice
def delete(self):
"""
delete an iDevice from it's parentNode
"""
while self.userResources:
self.userResources[0].delete()
if self.parentNode:
self.parentNode.idevices.remove(self)
self.parentNode = None
def isFirst(self):
"""
Return true if this is the first iDevice in this node
"""
index = self.parentNode.idevices.index(self)
return index == 0
def isLast(self):
"""
Return true if this is the last iDevice in this node
"""
index = self.parentNode.idevices.index(self)
return index == len(self.parentNode.idevices) - 1
def movePrev(self):
"""
Move to the previous position
"""
parentNode = self.parentNode
index = parentNode.idevices.index(self)
if index > 0:
temp = parentNode.idevices[index - 1]
parentNode.idevices[index - 1] = self
parentNode.idevices[index] = temp
def moveNext(self):
"""
Move to the next position
"""
parentNode = self.parentNode
index = parentNode.idevices.index(self)
if index < len(parentNode.idevices) - 1:
temp = parentNode.idevices[index + 1]
parentNode.idevices[index + 1] = self
parentNode.idevices[index] = temp
def setParentNode(self, parentNode):
"""
Change parentNode
"""
if self.parentNode:
self.parentNode.idevices.remove(self)
parentNode.addIdevice(self)
def onResourceNamesChanged(self, resourceNamesChanged):
"""
Called when the iDevice's resources need their names changed
Overridden by derieved classes
"""
pass
def _upgradeIdeviceToVersion1(self):
"""
Upgrades the Idevice class members from version 0 to version 1.
Should be called in derived classes.
"""
log.debug("upgrading to version 1")
self._title = self.__dict__.get('title', self.title)
self._author = self.__dict__.get('author', self.title)
self._purpose = self.__dict__.get('purpose', self.title)
self._tip = self.__dict__.get('tip', self.title)
def _upgradeIdeviceToVersion2(self):
"""
Upgrades the Idevice class members from version 1 to version 2.
Should be called in derived classes.
"""
log.debug("upgrading to version 2, for 0.12")
self.userResources = []
if self.icon:
self.systemResources = ["icon_"+self.icon+".gif"]
else:
self.systemResources = []
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
915dabab5c458624fe039b5c88de3417309b62b0 | 779bf1355be59dc85a231d7d8fe822d0fca78c9f | /coords.py | 046bee245816be8db92b594b2c0d343b263de4e8 | [
"MIT"
] | permissive | euribates/Jupyter-Intro | a37ee963e56b0335fcd58f1897ee698b2fca6368 | a199655436cc4ccd41ec22398a1c5212c541f24b | refs/heads/master | 2018-10-01T02:42:55.713920 | 2018-06-20T15:02:49 | 2018-06-20T15:02:49 | 81,320,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,678 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import random
import math
import pygame
pygame.init()
size = width, height = 640, 480
center = offset_x, offset_y = width // 2, height // 2
zoom = 20
screen = pygame.display.set_mode(size)
# Colores
black = (0, 0 , 0)
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (51, 102, 255)
yellow = (255, 255, 0)
silver = (102, 102, 102)
def random_color():
return pygame.Color(
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
255,
)
class Point:
def __init__(self, x=0, y=0, color=white):
self.x = x
self.y = y
self.color = color
def scale(self):
x, y = self.x, self.y
x = offset_x + x*zoom
y = offset_y - y*zoom
return int(round(x)), int(round(y))
def move(self):
self.x += random.random() / 20.0 - 0.025
self.y += random.random() / 20.0 - 0.025
return self
def distance(self, x, y):
return (self.x - x)**2 + (self.y - y)**2
def __repr__(self):
name = self.__class__.__name__
return '{}(x={}, y={}, color={})'.format(
name, self.x, self.y, self.color
)
def draw(self, canvas):
x, y = self.scale()
canvas.set_at((x, y), self.color) # The point itself
canvas.set_at((x-1, y), self.color) # cross
canvas.set_at((x+1, y), self.color)
canvas.set_at((x, y-1), self.color)
canvas.set_at((x, y+1), self.color)
@classmethod
def random(self):
x = random.randint(0, width)
y = random.randint(0, width)
color = random_color()
return Point(x, y, color)
class Triangle(Point):
def draw(self, canvas):
x, y = self.scale()
vertices = [
(x-4, y+4),
(x, y-4),
(x+4, y+4)
]
pygame.draw.polygon(canvas, self.color, vertices, 0)
class Circle(Point):
def draw(self, canvas):
x, y = self.scale()
pygame.draw.circle(canvas, self.color, (x,y), 6, 0)
class Square(Point):
def draw(self, canvas):
x, y = self.scale()
pygame.draw.rect(canvas, self.color, (x-4, y-4, 9, 9))
points = [
Circle(3, 4, red),
Circle(5, -3, green),
Circle(-2, 5, blue),
Circle(-4, 2, yellow),
Square(2, -2, red),
Square(-1, -5, green),
Square(-3, -2, blue),
Square(4, 0, yellow),
Triangle(-5, 0, red),
Triangle(0, 6, green),
Triangle(0, -3, blue),
Triangle(0, 0, yellow),
]
def draw_axis(screen):
pygame.draw.line(screen, silver, (0, offset_y), (width, offset_y))
for step in range(0, width, zoom):
pygame.draw.line(screen, silver,
(step, offset_y-2),
(step, offset_y+2)
)
for step in range(0, height, zoom):
pygame.draw.line(screen, silver,
(offset_x-2, step),
(offset_x+2, step)
)
pygame.draw.line(screen, silver, (offset_x, 0), (offset_x, height))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
if event.type == pygame.MOUSEBUTTONUP:
x, y = pygame.mouse.get_pos()
x = int(round((x - offset_x) / zoom))
y = -int(round((y - offset_y) / zoom))
print(x, y)
Shape = random.choice([Square, Triangle, Circle])
points.append(Shape(x, y, random_color()))
screen.fill(black)
draw_axis(screen)
for p in points:
p.move()
p.draw(screen)
pygame.display.flip()
| [
"euribates@gmail.com"
] | euribates@gmail.com |
55e9f2f4de361a1f709ffdf0a753ec52d4d177e2 | 2581fbdc72887143376a8f9d8f0da0f1508b9cdf | /Flask/06-Larger-Flask-Applications/01-Using-Blueprints/myproject/owners/forms.py | a398f9af17e4d3efc9a0b241ea479d29218f4db5 | [
"Apache-2.0"
] | permissive | Sandy1811/python-for-all | 6e8a554a336b6244af127c7bcd51d36018b047d9 | fdb6878d93502773ba8da809c2de1b33c96fb9a0 | refs/heads/master | 2022-05-16T02:36:47.676560 | 2019-08-16T08:35:42 | 2019-08-16T08:35:42 | 198,479,841 | 1 | 0 | Apache-2.0 | 2022-03-11T23:56:32 | 2019-07-23T17:39:38 | Jupyter Notebook | UTF-8 | Python | false | false | 241 | py | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField
class AddForm(FlaskForm):
name = StringField('Name of Owner:')
pup_id = IntegerField("Id of Puppy: ")
submit = SubmitField('Add Owner')
| [
"sndp1811@gmail.com"
] | sndp1811@gmail.com |
5fb26953854ce78b6558fe662ba2e222f16ae8ce | 99cf54dd53c956c12d27c15fc15b206c70a462cf | /ch05/5-4-continue.py | 29da73ff9e7640d03ce16ab64176b2b3ebd5660f | [] | no_license | 404232077/python-course | b5707735fd899af5be07b5c643f0188db54a2ae3 | 6845010db7aa8414138b0cfd8101745532c6c01e | refs/heads/master | 2020-06-15T16:54:17.367489 | 2019-07-05T06:40:14 | 2019-07-05T06:40:14 | 195,346,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | for i in range(1,6):
if i == 3:
continue
print(i)
i = 0
while (i<5):
i += 1
if i == 3:
continue
print(i) | [
"40423207@gm.nfu.edu.tw"
] | 40423207@gm.nfu.edu.tw |
75786aef013e828db98c42bcabea6ce32f0e6106 | cbc27ca33656dc85d462b2e7dc515fb991b7eda4 | /app/colors.py | 04a4108cff5f335df2c2112a2731227d9952c16d | [] | no_license | AlexandreMarcotte/PolyCortex_Gui | d82ea86bb1c068005835ad305c7e4fdaaca89405 | c3e70783daea793988ea8bd3b0a58f87fc50ec8f | refs/heads/master | 2022-10-26T06:31:59.343475 | 2019-05-17T16:31:57 | 2019-05-17T16:31:57 | 141,066,152 | 3 | 0 | null | 2021-03-25T22:40:29 | 2018-07-15T23:43:34 | Python | UTF-8 | Python | false | false | 1,865 | py | # Colors used for the regions in the signal where event occur
red = (255, 0, 0, 10)
p300_red = (255, 0, 0, 255)
pale_red = (255, 0, 0, 35)
green = (0, 255, 0, 45)
p300_green = (0, 255, 0, 255)
p300_white = (255, 255, 255, 255)
blue = (0, 0, 255, 10)
yellow = (255, 255, 0, 45)
purple = (146, 56, 219, 45)
dark_grey = (3, 3, 3)
pen_colors = ['r', 'y', 'g', 'c', 'b', 'm',
(100, 100, 100), 'w', 'k', (100, 100, 100), (100, 100, 100),
(100, 100, 100), (100, 100, 100), (100, 100, 100), (100, 100, 100),
(100, 100, 100), (100, 100, 100), (100, 100, 100), (100, 100, 100)]
button_colors = ['red', 'yellow', 'green', 'cyan',
'blue', 'magenta', 'grey', 'white',
'red', 'yellow', 'green', 'cyan',
'blue', 'magenta', 'grey', 'white',
'red', 'yellow', 'green', 'cyan',
'blue', 'magenta', 'grey', 'white'] # TODO: ALEXM: Generate colors from pyqt instead
# 58 167 215
dark_blue = 'rgba(0, 0, 80, 0.4)'
# Polycortex Color
# lighter blue
# dark_blue_tab = 'rgba(70, 175, 230, 1)'
dark_blue_tab = 'rgba(60, 160, 210, 1)'
# slightly Darker
# dark_blue_tab = 'rgba(30, 130, 170, 1)'
# Really Darker
# dark_blue_tab = 'rgba(18, 90, 140, 1)'
# old
# dark_blue = 'rgba(0, 0, 80, 0.4)'
# dark_blue_tab = 'rgba(62, 62, 160, 1)'
grey = 'rgba(100, 100, 100, 0.5)'
light_grey = 'rgba(130, 130, 130, 0.7)'
grey2 = 'rgba(160, 160, 160, 0.5)'
grey3 = 'rgba(200, 200, 200, 0.6'
label_grey = 'rgba(215, 215, 215, 0.7)'
green_b = 'rgba(0, 100, 0, 0.5)'
red_b = 'rgba(100, 0, 0, 0.5)'
blue_b = 'rgba(0, 0, 170, 0.5)'
white = 'rgba(255, 255, 255, 1)'
black = 'rgba(0, 0, 0, 0.5)'
DARK_GREY = '#585858' # hexa
LIGHT_GREY = '#C8C8C8'
blue_plane='rgba(0, 0, 255, 0.4)'
green_plane='rgba(0, 255, 0, 0.7)'
red_plane='rgba(255, 0, 0, 0.4)'
| [
"alexandre.marcotte.1094@gmail.com"
] | alexandre.marcotte.1094@gmail.com |
ae5550ca8397473eab8016c7442730089e83b4c7 | 65dce36be9eb2078def7434455bdb41e4fc37394 | /66 Plus One.py | 6576b5adcd01a259a42b93a3c4bea3c281e1c33e | [] | no_license | EvianTan/Lintcode-Leetcode | 9cf2d2f6a85c0a494382b9c347bcdb4ee0b5d21a | d12dd31e98c2bf24acc20c5634adfa950e68bd97 | refs/heads/master | 2021-01-22T08:13:55.758825 | 2017-10-20T21:46:23 | 2017-10-20T21:46:23 | 92,607,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | '''
Given a non-negative integer represented as a non-empty array of digits, plus one to the integer.
You may assume the integer do not contain any leading zero, except the number 0 itself.
The digits are stored such that the most significant digit is at the head of the list.
'''
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
hold = ''
for i in digits:
hold += str(i)
res = str(int(hold)+1)
return [int(i) for i in res] | [
"yiyun.tan@uconn.edu"
] | yiyun.tan@uconn.edu |
30c81a894419eab115ec96db9261083590ebfc47 | dae212cb615e5eba3fe8108799a39bc09d7bddb6 | /leetcode/0114_flatten_binary_tree_to_linked_list.py | 279786e78ff2bcc7c0585f3c696750b0a152436c | [] | no_license | cs-cordero/interview-prep | a291b5ce2fb8461449e6e27a1f23e12b54223540 | c3b5b4612f3641572d2237e36aa23019c680c799 | refs/heads/master | 2022-05-23T10:39:59.817378 | 2020-04-29T12:57:12 | 2020-04-29T12:57:12 | 76,767,250 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from utils import Empty, TreeNode
class Solution:
def flatten(self, root: TreeNode) -> None:
if not root:
return
if root.right and root.left:
traverse_to_next(root.left).right = root.right
if root.left:
root.right = root.left
root.left = None
if root.right:
self.flatten(root.right)
def traverse_to_next(root: TreeNode) -> None:
while True:
while root.right:
root = root.right
if root.left:
root = root.left
else:
return root
root = TreeNode.from_array([1, 2, 5, 3, 4, Empty, 6])
expected = TreeNode(1)
current = expected
for i in range(2, 7):
current.right = TreeNode(i)
current = current.right
Solution().flatten(root)
assert TreeNode.subtrees_match(root, expected)
| [
"ccordero@protonmail.com"
] | ccordero@protonmail.com |
77d28f6918656e3e80ca82ecd1f0f0d266db8677 | a6610e191090e216b0e0f23018cecc5181400a7a | /robotframework-ls/tests/robotframework_ls_tests/test_signature_help.py | 52c4d14f1ade79e59b100f9e7482619e5995ca6a | [
"Apache-2.0"
] | permissive | JohanMabille/robotframework-lsp | d7c4c00157dd7c12ab15b7125691f7052f77427c | 610f0257fdcd79b8c38107a0ecf600f60160bc1f | refs/heads/master | 2023-01-19T10:29:48.982578 | 2020-11-25T13:46:22 | 2020-11-25T13:46:22 | 296,245,093 | 0 | 0 | NOASSERTION | 2020-09-17T06:58:54 | 2020-09-17T06:58:53 | null | UTF-8 | Python | false | false | 774 | py | def test_signature_help_basic(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.signature_help import signature_help
workspace.set_root("case4", libspec_manager=libspec_manager)
doc = workspace.get_doc("case4.robot")
doc.source += """
*** Test Cases ***
Log It
Log """
completion_context = CompletionContext(doc, workspace=workspace.ws)
result = signature_help(completion_context)
signatures = result["signatures"]
# Don't check the signature documentation in the data regression so that the
# test doesn't become brittle.
docs = signatures[0].pop("documentation")
assert "Log" in docs
data_regression.check(result)
| [
"fabiofz@gmail.com"
] | fabiofz@gmail.com |
d5e085cb5ff4e318e55a21d55d80d6291f46ef1a | 5d06a33d3685a6f255194b13fd2e615e38d68850 | /opytimark/markers/boolean.py | b94c6a3c83585ca07ebb257de8df8a53c75b5e43 | [
"Apache-2.0"
] | permissive | sarikoudis/opytimark | 617a59eafaabab5e67bd4040473a99f963df7788 | cad25623f23ce4b509d59381cf7bd79e41a966b6 | refs/heads/master | 2023-07-24T04:19:55.869169 | 2021-09-03T13:09:45 | 2021-09-03T13:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,964 | py | """Boolean-based benchmarking functions.
"""
import itertools as it
import numpy as np
import opytimark.utils.constants as c
import opytimark.utils.decorator as d
import opytimark.utils.exception as e
from opytimark.core import Benchmark
class Knapsack(Benchmark):
"""Knapsack class implements a boolean-based version of the Knapsack problem.
.. math:: f(\mathbf{x}) = f(x_1, x_2, \ldots, x_n) = \min -{\sum_{i=1}^{n}v_i x_i}
s.t.
.. math:: \sum_{i=1}^{n}w_i x_i \leq b
Domain:
The function is evaluated using :math:`x_i \in \{0, 1\} \mid i = \{1, 2, \ldots, n\}`.
"""
def __init__(self, name='Knapsack', dims=-1, continuous=False, convex=False,
differentiable=False, multimodal=False, separable=False,
values=(0,), weights=(0,), max_capacity=0.0):
"""Initialization method.
Args:
name (str): Name of the function.
dims (int): Number of allowed dimensions.
continuous (bool): Whether the function is continuous.
convex (bool): Whether the function is convex.
differentiable (bool): Whether the function is differentiable.
multimodal (bool): Whether the function is multimodal.
separable (bool): Whether the function is separable.
values (tuple): Tuple of items values.
weights (tuple): Tuple of items weights.
max_capacity: Maximum capacity of the knapsack.
"""
super(Knapsack, self).__init__(name, dims, continuous,
convex, differentiable, multimodal, separable)
if len(values) != len(weights):
raise e.SizeError('`values` and `weights` needs to have the same size')
# Items values
self.values = values
# Items weights
self.weights = weights
# Maximum capacity of the knapsack
self.max_capacity = max_capacity
# Re-writes the correct number of dimensions
self.dims = len(values)
@property
def values(self):
"""tuple: values of items in the knapsack.
"""
return self._values
@values.setter
def values(self, values):
if not isinstance(values, tuple):
raise e.TypeError('`values` should be a tuple')
self._values = values
@property
def weights(self):
"""tuple: Weights of items in the knapsack.
"""
return self._weights
@weights.setter
def weights(self, weights):
if not isinstance(weights, tuple):
raise e.TypeError('`weights` should be a tuple')
self._weights = weights
@property
def max_capacity(self):
"""float: Maximum capacity of the knapsack.
"""
return self._max_capacity
@max_capacity.setter
def max_capacity(self, max_capacity):
if not isinstance(max_capacity, (float, int)):
raise e.TypeError('`max_capacity` should be a float or integer')
if max_capacity < 0:
raise e.ValueError('`max_capacity` should be >= 0')
self._max_capacity = max_capacity
@d.check_exact_dimension
def __call__(self, x):
"""This method returns the function's output when the class is called.
Args:
x (np.array): An input array for calculating the function's output.
Returns:
The benchmarking function output `f(x)`.
"""
# Gathering an array of possible values
v = np.array(list(it.compress(self.values, x)))
# Gathering an array of possible weights
w = np.array(list(it.compress(self.weights, x)))
# If the sum of weights exceed the maximum capacity
if np.sum(w) > self.max_capacity:
# Returns the maximum number possible
return c.FLOAT_MAX
# Returns its negative sum as it is a minimization problem
return -np.sum(v)
| [
"gth.rosa@uol.com.br"
] | gth.rosa@uol.com.br |
444a1bdc1e196a8d209a09c69e8e5c15df8aa12a | c0a49527d2d6bf56c04349bda832875625451a39 | /project/apps/stats/urls.py | 6b857438a50d724df7c3f12febe3c65ead52540e | [
"MIT"
] | permissive | mbi/chin-up | 03d3dd7c74320aee3924ad587f4c78e8dcee815b | 4e55082996c53fcbf3e70157ba59c9c40d1fdbcc | refs/heads/master | 2023-08-21T23:14:26.368231 | 2014-05-20T08:05:55 | 2014-05-20T08:05:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'stats[/]$', 'stats.views.stats_view', name='stats'),
)
| [
"eric@ckcollab.com"
] | eric@ckcollab.com |
2f84b40b125e8d1ca2183e5d813edf383ecb7ec2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/92/usersdata/250/37892/submittedfiles/atividade.py | d3db75de167513632efef98cec11cea616e0aa85 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | # -*- coding: utf-8 -*-
import math
n=int(input('digite um vlor:'))
i=1
contaor=0
while i<=n:
if n>0:
s=i/(n-1)
contador=0
i=i+1
print('%.5f'%s)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ac27f07c1d4dcebea6d5af25abd002e5e717a87c | eaf4408fd01ced7acbee7bd72cbae386c6249842 | /Projects/01-MidtermProject/tests/q1_14a.py | da8b80dab10b6f009f005d279943667830c30639 | [] | no_license | ucsd-ets/dsc10-wi21 | 54176ac31bf5bed75ab33bb670f7aec6358fd886 | 9ffe29f5af2cc58b58a08c82943f91a17b90fe91 | refs/heads/main | 2023-03-21T04:32:42.873980 | 2021-03-13T02:59:59 | 2021-03-13T02:59:59 | 325,040,482 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | test = { 'name': 'q1_14a',
'points': 1,
'suites': [{'cases': [{'code': '>>> isinstance(weekday_pos_avg, float)\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
| [
"yal319@ucsd.edu"
] | yal319@ucsd.edu |
d94fa9f50b45c677b965d5e716e5003ff19c3882 | c05ed32f1ef7e1eb7d73efd674e7d1fd710ad171 | /daily-coding-problems/problem231.py | 9db45bfef6c22729c0abe839d101f948f9ed752e | [] | no_license | carlhinderer/python-exercises | c8367517fdf835fa1117f96dbfee3dccc596afa6 | 4e09bbb4c4e2bd5644ed50e997db9f3c289a18f7 | refs/heads/master | 2021-06-01T16:17:00.389134 | 2021-02-09T18:21:01 | 2021-02-09T18:21:01 | 150,902,917 | 0 | 0 | null | 2021-04-20T20:33:11 | 2018-09-29T21:03:36 | Python | UTF-8 | Python | false | false | 286 | py | # Problem 231
# Easy
# Asked by IBM
#
# Given a string with repeated characters, rearrange the string so that no two
# adjacent characters are the same. If this is not possible, return None.
#
# For example, given "aaabbc", you could return "ababac". Given "aaab", return None.
# | [
"carl.hinderer4@gmail.com"
] | carl.hinderer4@gmail.com |
0e3cb13687bee190c90eda8bf825e45860f50c81 | 757705e98cc059b0ada491489660ac3bd6e49607 | /udun/balrog/updates.py | 49ddba3688bd32b4c19bca89934d0e2cbe18b651 | [] | no_license | mozilla-services/udun-bridge | 997ef4b83c0e33cab4a12ce25aef13843e279896 | c9494b89416c181c8de2771542089588e3087040 | refs/heads/master | 2021-01-10T06:11:23.530733 | 2019-03-28T14:12:08 | 2019-03-28T14:12:08 | 44,543,019 | 3 | 4 | null | 2019-03-28T14:12:10 | 2015-10-19T15:08:04 | Python | UTF-8 | Python | false | false | 801 | py | # tarek from
# https://hg.mozilla.org/build/tools/file/default/lib/python/balrog/submitter/
# version 9f4e6a2eafa1
import jsonmerge
def merge_partial_updates(base_obj, new_obj):
"""Merges 2 update objects, merging partials and replacing completes"""
schema = {
"properties": {
# Merge partials using fileUrl as an identifier field
"partials": {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {
"idRef": "from"
}
},
# Replace completes - we don't usually have more than one
"completes": {
"mergeStrategy": "overwrite"
}
}
}
merger = jsonmerge.Merger(schema=schema)
return merger.merge(base_obj, new_obj)
| [
"tarek@ziade.org"
] | tarek@ziade.org |
7680615359e2adab26c0a4e6929c0f2a1f392e18 | e2de3f6fe4373f1d98b67af61dd558a813250d54 | /Algorithm/baekjoon/2312_수복원하기.py | b65b63e92206cee06f30380b4dcfbacde3b509fd | [] | no_license | Hansung-Lee/TIL | 3fd6d48427a8b24f7889116297143855d493535b | c24ebab8b631f5c1b835fdc8bd036acbebc8d187 | refs/heads/master | 2020-04-14T11:18:54.035863 | 2019-04-05T07:26:55 | 2019-04-05T07:26:55 | 163,810,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | T = int(input())
for t in range(T):
N = int(input())
li = [0] * (N-1)
for i in range(len(li)):
while not N%(i+2):
N = N//(i+2)
li[i] += 1
for i in range(len(li)):
if li[i]:
print (f"{i+2} {li[i]}") | [
"ajtwlsgkst@naver.com"
] | ajtwlsgkst@naver.com |
4a5cf4b293ba338af36939e2ae54ccc79a1b1a16 | f33364172d2408304fbc5064774d8a864f7c1478 | /django_products/app/views_user.py | fee116c43f4275f85831dbcdcd6eb4f58ec3952e | [] | no_license | pytutorial/py2005E | 4506e13ef37810b7f5b20fcafbaee1467f9f6e97 | 7765a2d812def499ab2a8eb7dff3ad3cdcd7716f | refs/heads/master | 2022-12-09T23:09:26.657784 | 2020-09-11T13:38:05 | 2020-09-11T13:38:05 | 284,680,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,841 | py | from django.shortcuts import render, redirect
from .models import *
from .forms import OrderForm, SearchForm
from datetime import datetime
import math
def getPriceRangeValue(priceRange):
if str(priceRange) == '1': return None, 10
if str(priceRange) == '2': return 10, 20
if str(priceRange) == '3': return 20, None
return None, None
def searchProduct(data):
name = data.get('name')
categ = data.get('category')
priceRange = data.get('priceRange')
productList = Product.objects.all()
if name:
productList = productList.filter(name__contains=name)
if categ:
productList = productList.filter(category__id=categ)
minPrice, maxPrice = getPriceRangeValue(priceRange)
if minPrice:
productList = productList.filter(price__gte=minPrice*1e6)
if maxPrice:
productList = productList.filter(price__lte=maxPrice*1e6)
return productList
def createQueryString(data):
name = data.get('name', '')
category = data.get('category', '')
priceRange = data.get('priceRange', '')
return f'/?name={name}&category={category}&priceRange={priceRange}'
def index(request):
PAGE_SIZE = 3
form = SearchForm(request.GET)
productList = searchProduct(request.GET)
page = int(request.GET.get('page', 1))
start = (page-1)*PAGE_SIZE
end = page*PAGE_SIZE
total = len(productList)
num_page = math.ceil(total/PAGE_SIZE)
context = {
'productList': productList[start:end],
'total': total,
'num_page': num_page,
'page': page,
'next_page': page + 1 if page < num_page else None,
'prev_page': page - 1 if page > 1 else None,
'form': form,
'query_str': createQueryString(request.GET),
}
return render(request, 'user/index.html', context)
def viewProduct(request, pk):
product = Product.objects.get(pk=pk)
context = {'product': product}
return render(request, 'user/view_product.html', context)
def saveOrder(product, data):
order = Order()
order.product = product
order.priceUnit = product.price
order.qty = data['qty']
order.fullname = data['fullname']
order.phone = data['phone']
order.address = data['address']
order.orderDate = datetime.now()
order.status = Order.OrderStatus.PENDING
order.save()
def orderProduct(request, pk):
product = Product.objects.get(pk=pk)
form = OrderForm(initial={'qty': 1})
if request.method == 'POST':
form = OrderForm(request.POST)
if form.is_valid():
saveOrder(product, form.cleaned_data)
return redirect('/thank_you')
context = {'product': product, 'form': form}
return render(request,'user/order_product.html', context)
def thankYou(request):
return render(request, 'user/thank_you.html') | [
"duongthanhtungvn01@gmail.com"
] | duongthanhtungvn01@gmail.com |
6bf10ed48864690d532c2218b82c770acc6a402c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_subdivided.py | 4577fabad755648b5b6de69c364b017a654052bb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py |
from xai.brain.wordbase.verbs._subdivide import _SUBDIVIDE
#calss header
class _SUBDIVIDED(_SUBDIVIDE, ):
def __init__(self,):
_SUBDIVIDE.__init__(self)
self.name = "SUBDIVIDED"
self.specie = 'verbs'
self.basic = "subdivide"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c8033a45df8ddb48b786a6272cfb49e950150c1a | 6a8bc7da3104726f894ae360fce6a43a54b30812 | /gradio/themes/app.py | 0c4c5a5e4050d81fc1ba684175f285cfef7670db | [
"Apache-2.0"
] | permissive | gradio-app/gradio | 0b6b29bb0029ad3b8fc1b143f111b1230b29d23a | e4e7a4319924aaf51dcb18d07d0c9953d4011074 | refs/heads/main | 2023-09-01T10:56:50.822550 | 2023-09-01T00:28:01 | 2023-09-01T00:28:01 | 162,405,963 | 21,224 | 1,537 | Apache-2.0 | 2023-09-14T21:42:00 | 2018-12-19T08:24:04 | Python | UTF-8 | Python | false | false | 5,249 | py | import time
import gradio as gr
from gradio.themes.utils.theme_dropdown import create_theme_dropdown
dropdown, js = create_theme_dropdown()
with gr.Blocks(theme=gr.themes.Default()) as demo:
with gr.Row().style(equal_height=True):
with gr.Column(scale=10):
gr.Markdown(
"""
# Theme preview: `{THEME}`
To use this theme, set `theme='{AUTHOR}/{SPACE_NAME}'` in `gr.Blocks()` or `gr.Interface()`.
You can append an `@` and a semantic version expression, e.g. @>=1.0.0,<2.0.0 to pin to a given version
of this theme.
"""
)
with gr.Column(scale=3):
with gr.Box():
dropdown.render()
toggle_dark = gr.Button(value="Toggle Dark").style(full_width=True)
dropdown.change(None, dropdown, None, _js=js)
toggle_dark.click(
None,
_js="""
() => {
document.body.classList.toggle('dark');
}
""",
)
name = gr.Textbox(
label="Name",
info="Full name, including middle name. No special characters.",
placeholder="John Doe",
value="John Doe",
interactive=True,
)
with gr.Row():
slider1 = gr.Slider(label="Slider 1")
slider2 = gr.Slider(label="Slider 2")
gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group")
with gr.Row():
with gr.Column(variant="panel", scale=1):
gr.Markdown("## Panel 1")
radio = gr.Radio(
["A", "B", "C"],
label="Radio",
info="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
)
drop = gr.Dropdown(["Option 1", "Option 2", "Option 3"], show_label=False)
drop_2 = gr.Dropdown(
["Option A", "Option B", "Option C"],
multiselect=True,
value=["Option A"],
label="Dropdown",
interactive=True,
)
check = gr.Checkbox(label="Go")
with gr.Column(variant="panel", scale=2):
img = gr.Image(
"https://gradio-static-files.s3.us-west-2.amazonaws.com/header-image.jpg",
label="Image",
).style(height=320)
with gr.Row():
go_btn = gr.Button("Go", label="Primary Button", variant="primary")
clear_btn = gr.Button(
"Clear", label="Secondary Button", variant="secondary"
)
def go(*args):
time.sleep(3)
return "https://gradio-static-files.s3.us-west-2.amazonaws.com/header-image.jpgjpg"
go_btn.click(go, [radio, drop, drop_2, check, name], img, api_name="go")
def clear():
time.sleep(0.2)
return None
clear_btn.click(clear, None, img)
with gr.Row():
btn1 = gr.Button("Button 1").style(size="sm")
btn2 = gr.UploadButton().style(size="sm")
stop_btn = gr.Button("Stop", label="Stop Button", variant="stop").style(
size="sm"
)
with gr.Row():
gr.Dataframe(value=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], label="Dataframe")
gr.JSON(
value={"a": 1, "b": 2, "c": {"test": "a", "test2": [1, 2, 3]}}, label="JSON"
)
gr.Label(value={"cat": 0.7, "dog": 0.2, "fish": 0.1})
gr.File()
with gr.Row():
gr.ColorPicker()
gr.Video("https://gradio-static-files.s3.us-west-2.amazonaws.com/world.mp4")
gr.Gallery(
[
(
"https://gradio-static-files.s3.us-west-2.amazonaws.com/lion.jpg",
"lion",
),
(
"https://gradio-static-files.s3.us-west-2.amazonaws.com/logo.png",
"logo",
),
(
"https://gradio-static-files.s3.us-west-2.amazonaws.com/tower.jpg",
"tower",
),
]
).style(height="200px", grid=2)
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot([("Hello", "Hi")], label="Chatbot")
chat_btn = gr.Button("Add messages")
def chat(history):
time.sleep(2)
yield [["How are you?", "I am good."]]
chat_btn.click(
lambda history: history
+ [["How are you?", "I am good."]]
+ (time.sleep(2) or []),
chatbot,
chatbot,
)
with gr.Column(scale=1):
with gr.Accordion("Advanced Settings"):
gr.Markdown("Hello")
gr.Number(label="Chatbot control 1")
gr.Number(label="Chatbot control 2")
gr.Number(label="Chatbot control 3")
if __name__ == "__main__":
demo.queue().launch()
| [
"noreply@github.com"
] | gradio-app.noreply@github.com |
c6bbd6594ae99077c6c3adcd262581a0fa470645 | ecad2803537295a24fe8274f99dfb85ead3a7191 | /debian/tmp/usr/lib/python2.7/dist-packages/nova/api/openstack/compute/ips.py | 6ad888fd720bc78a4e2aa588ff7eaa17984c7526 | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/stacklab-nova | 98400585ec3b4e3e94269dcb41578fffe7e2c8c1 | 4ab1698659b663ef222255610d1a5c042706dd65 | refs/heads/master | 2022-11-20T12:07:18.250829 | 2012-12-13T04:43:00 | 2012-12-13T04:43:00 | 282,166,345 | 0 | 0 | Apache-2.0 | 2020-07-24T08:31:57 | 2020-07-24T08:31:56 | null | UTF-8 | Python | false | false | 3,472 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
import nova
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as view_addresses
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def make_network(elem):
elem.set('id', 0)
ip = xmlutil.SubTemplateElement(elem, 'ip', selector=1)
ip.set('version')
ip.set('addr')
network_nsmap = {None: xmlutil.XMLNS_V11}
class NetworkTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector(xmlutil.get_items, 0)
root = xmlutil.TemplateElement('network', selector=sel)
make_network(root)
return xmlutil.MasterTemplate(root, 1, nsmap=network_nsmap)
class AddressesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('addresses', selector='addresses')
elem = xmlutil.SubTemplateElement(root, 'network',
selector=xmlutil.get_items)
make_network(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=network_nsmap)
class Controller(wsgi.Controller):
"""The servers addresses API controller for the OpenStack API."""
_view_builder_class = view_addresses.ViewBuilder
def __init__(self, **kwargs):
super(Controller, self).__init__(**kwargs)
self._compute_api = nova.compute.API()
def _get_instance(self, context, server_id):
try:
instance = self._compute_api.get(context, server_id)
except nova.exception.NotFound:
msg = _("Instance does not exist")
raise exc.HTTPNotFound(explanation=msg)
return instance
def create(self, req, server_id, body):
raise exc.HTTPNotImplemented()
def delete(self, req, server_id, id):
raise exc.HTTPNotImplemented()
@wsgi.serializers(xml=AddressesTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
return self._view_builder.index(networks)
@wsgi.serializers(xml=NetworkTemplate)
def show(self, req, server_id, id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
if id not in networks:
msg = _("Instance is not a member of specified network")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(networks[id], id)
def create_resource():
return wsgi.Resource(Controller())
| [
"yuanotes@gmail.com"
] | yuanotes@gmail.com |
5cafe1b409dddcffba908a42582d4795944108ff | 74ab22a81ac24e6e13335b6873674de180b14c26 | /search/search_dictionary.py | 1a8acef12ca20267c97848bf4832af217fc04276 | [] | no_license | attiakihal/MazeSolver | e92d39c62a582b564bfb437c8dde06754407c626 | f737b3f4236884a70df0e35977704fe0d2836292 | refs/heads/master | 2022-04-22T22:30:40.153668 | 2019-12-08T00:56:42 | 2019-12-08T00:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from search.a_star_euclidean import a_star_euclidean
from search.a_star_manhattan import a_star_manhattan
from search.bidirectional_bfs import bidirectional_bfs
from search.bfs import bfs
from search.dfs import dfs
search_dictionary = {
"a_star_euclidean": a_star_euclidean,
"a_star_manhattan": a_star_manhattan,
"dfs": dfs,
"bfs": bfs,
"bidirectional_bfs": bidirectional_bfs
}
| [
"jonathan@Jonathans-MacBook-Pro.local"
] | jonathan@Jonathans-MacBook-Pro.local |
e5fae4b360e137a154ade338c6c6deca8b1e06e0 | d4157df22a19225b23e52476e00d854409b1f43c | /LogisticRegression/Regularized/CostReg.py | 3709d94c0be8f818e14101053f38ab8635f9bd3e | [] | no_license | shan18/Solutions-to-Machine-Learning-by-Andrew-Ng | fc4f3cd49b807ef9ce91586a4de027aa1520b04f | bcdd3a34da925944c5e03ebcf3b2c6998f731c87 | refs/heads/master | 2020-12-02T21:25:05.049786 | 2017-10-01T11:19:34 | 2017-10-01T11:19:34 | 96,312,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | import numpy as np
from LogisticRegression.Sigmoid import sigmoid
def cost_reg(theta, X, y, learning_rate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply(1 - y, np.log(1 - sigmoid(X * theta.T)))
reg = (learning_rate / (2 * len(X))) * np.sum(np.power(theta[:, 1:], 2))
return np.sum(first - second) / len(X) + reg
| [
"thegeek.004@gmail.com"
] | thegeek.004@gmail.com |
2e523a5434e9acf6c16fdb2354db39bc2eda42dc | 37a3c2ae904998a10ad6ec9f3cd715cdda718b21 | /pythonforandroid/recipes/opencv/__init__.py | 7e70162ea315ee1144d82eb1fb473565ecba8c89 | [
"Python-2.0",
"MIT"
] | permissive | agilewalker/python-for-android | 311a97422545b5861baaeeb9c52ee7f53959acb8 | a2fb5ecbc09c4847adbcfd03c6b1ca62b3d09b8d | refs/heads/master | 2021-09-14T07:14:02.044913 | 2017-12-12T08:13:23 | 2018-01-04T12:54:31 | 113,962,045 | 1 | 2 | MIT | 2018-05-09T11:29:50 | 2017-12-12T08:11:25 | Python | UTF-8 | Python | false | false | 2,228 | py | import os
import sh
from pythonforandroid.toolchain import (
NDKRecipe,
Recipe,
current_directory,
info,
shprint,
)
from multiprocessing import cpu_count
class OpenCVRecipe(NDKRecipe):
version = '2.4.10.1'
url = 'https://github.com/Itseez/opencv/archive/{version}.zip'
#md5sum = '2ddfa98e867e6611254040df841186dc'
depends = ['numpy']
patches = ['patches/p4a_build-2.4.10.1.patch']
generated_libraries = ['cv2.so']
def prebuild_arch(self, arch):
self.apply_patches(arch)
def get_recipe_env(self,arch):
env = super(OpenCVRecipe, self).get_recipe_env(arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['ANDROID_NDK'] = self.ctx.ndk_dir
env['ANDROID_SDK'] = self.ctx.sdk_dir
env['SITEPACKAGES_PATH'] = self.ctx.get_site_packages_dir()
return env
def build_arch(self, arch):
with current_directory(self.get_build_dir(arch.arch)):
env = self.get_recipe_env(arch)
cvsrc = self.get_build_dir(arch.arch)
lib_dir = os.path.join(self.ctx.get_python_install_dir(), "lib")
shprint(sh.cmake,
'-DP4A=ON','-DANDROID_ABI={}'.format(arch.arch),
'-DCMAKE_TOOLCHAIN_FILE={}/platforms/android/android.toolchain.cmake'.format(cvsrc),
'-DPYTHON_INCLUDE_PATH={}/include/python2.7'.format(env['PYTHON_ROOT']),
'-DPYTHON_LIBRARY={}/lib/libpython2.7.so'.format(env['PYTHON_ROOT']),
'-DPYTHON_NUMPY_INCLUDE_DIR={}/numpy/core/include'.format(env['SITEPACKAGES_PATH']),
'-DANDROID_EXECUTABLE={}/tools/android'.format(env['ANDROID_SDK']),
'-DBUILD_TESTS=OFF', '-DBUILD_PERF_TESTS=OFF', '-DBUILD_EXAMPLES=OFF', '-DBUILD_ANDROID_EXAMPLES=OFF',
'-DPYTHON_PACKAGES_PATH={}'.format(env['SITEPACKAGES_PATH']),
cvsrc,
_env=env)
shprint(sh.make,'-j',str(cpu_count()),'opencv_python')
shprint(sh.cmake,'-DCOMPONENT=python','-P','./cmake_install.cmake')
sh.cp('-a',sh.glob('./lib/{}/lib*.so'.format(arch.arch)),lib_dir)
recipe = OpenCVRecipe()
| [
"frmdstryr@gmail.com"
] | frmdstryr@gmail.com |
45b1b8c35f3cf8c84f1fe49e5ff6aa9be0228989 | 6ab67facf12280fedf7cc47c61ae91da0bcf7339 | /service/yowsup/yowsup/demos/echoclient/layer.py | df0d73c7ce854b213f893a6fb7a43a51cb2d7fde | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | PuneethReddyHC/whatsapp-rest-webservice | 2f035a08a506431c40b9ff0f333953b855f9c461 | 822dfc46b80e7a26eb553e5a10e723dda5a9f77d | refs/heads/master | 2022-09-17T14:31:17.273339 | 2017-11-27T11:16:43 | 2017-11-27T11:16:43 | 278,612,537 | 0 | 1 | MIT | 2020-07-10T11:04:42 | 2020-07-10T11:04:41 | null | UTF-8 | Python | false | false | 1,646 | py | from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
class EchoLayer(YowInterfaceLayer):
@ProtocolEntityCallback("message")
def onMessage(self, messageProtocolEntity):
if messageProtocolEntity.getType() == 'text':
self.onTextMessage(messageProtocolEntity)
elif messageProtocolEntity.getType() == 'media':
self.onMediaMessage(messageProtocolEntity)
self.toLower(messageProtocolEntity.forward(messageProtocolEntity.getFrom()))
self.toLower(messageProtocolEntity.ack())
self.toLower(messageProtocolEntity.ack(True))
@ProtocolEntityCallback("receipt")
def onReceipt(self, entity):
self.toLower(entity.ack())
def onTextMessage(self,messageProtocolEntity):
# just print info
print("Echoing %s to %s" % (messageProtocolEntity.getBody(), messageProtocolEntity.getFrom(False)))
def onMediaMessage(self, messageProtocolEntity):
# just print info
if messageProtocolEntity.getMediaType() == "image":
print("Echoing image %s to %s" % (messageProtocolEntity.url, messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "location":
print("Echoing location (%s, %s) to %s" % (messageProtocolEntity.getLatitude(), messageProtocolEntity.getLongitude(), messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "vcard":
print("Echoing vcard (%s, %s) to %s" % (messageProtocolEntity.getName(), messageProtocolEntity.getCardData(), messageProtocolEntity.getFrom(False)))
| [
"svub@x900.svub.net"
] | svub@x900.svub.net |
6d669d90ebcebb6b738d3b848a30cd772f7906d8 | 9e335834e7be81068f001d5451781d5c1530ebbf | /CorePython/chapter15/my_card.py | 9a24af345fd158da0a3259cb26a492e7278bfb39 | [] | no_license | jtr109/SelfLearning | c1dbffa5485d0cd2f444ea510da62a8e3d269dbc | cc920ed507647762b9855385be76869adac89e7c | refs/heads/master | 2020-04-06T04:11:31.143688 | 2016-07-22T02:19:39 | 2016-07-22T02:19:39 | 58,049,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | # !/usr/bin/python
# -*- coding: utf-8 -*-
import re
def is_legal(data):
patt = '(\d{15,16})|(\d{4}-\d{6}-\d{5})|(\d{4}(-\d{4}){3})'
try:
for i in range(1, 4):
if re.match(patt, data).group(i):
pd = purify(data, i)
return legal_card(pd)
except AttributeError:
return False
def purify(data, i):
pn = ''
if i == 1:
pn = data
else:
l = data.split('-')
for n in l:
pn += n
pd = []
for a in pn:
for b in a:
pd.append(b)
return pd
def legal_card(pd):
pd.reverse()
ns = 0
w = 1
for d in pd:
if w % 2 == 1:
d = int(d)
else:
d = 2 * int(d)
if d > 9:
d -= 9
ns += d
w += 1
return ns % 10 == 0
if __name__ == '__main__':
card_number = raw_input('Your card number is:\n> ')
if is_legal(card_number):
print 'It is a legal number!'
else:
print 'Shit! It is an illegal number!!!'
| [
"lyp_login@outlook.com"
] | lyp_login@outlook.com |
5c28d0b822397cf4e9ac5940634bfe9334e9babc | f2cc45e46a55c09570574eeaf358919491b4fae9 | /作业/7石头剪刀布.py | e48814caaf4f0f0203e12107fa0f806dc29be394 | [] | no_license | 2099454967/python_wang | f4f5e10891ed9be6112f8f3d0d6313975e2f914f | c9dd8a5f6a9864d2a3e61bad9a12cea566ebdec9 | refs/heads/master | 2020-03-11T17:43:43.288920 | 2018-04-24T05:27:53 | 2018-04-24T05:27:53 | 130,155,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | #1---石头
#2---剪刀
#3---布
import random
a = random.randint(1,3)
b = int(input('请输入1---石头 2---剪刀 3---布'))
if (a==1 and b==2) or (a==2 and b==3) or (a==3 and b==1):
print('玩家赢')
elif a==b:
print('平局')
else:
print('电脑赢')
| [
"2099454967@qq.com"
] | 2099454967@qq.com |
3388e852acf0ee62545acf09ca024839f5401c63 | 67c0d7351c145d756b2a49e048500ff361f7add6 | /xpresso/ai/admin/infra/packages/ubuntu/utility/docker_distribution_package.py | 81af3bf83624d6377f690deef24079eac561889b | [] | no_license | Krishnaarunangsu/XpressoDataHandling | ba339ae85b52e30715f47406ddb74966350848aa | 0637a465088b468d6fdb6d1bb6f7b087547cec56 | refs/heads/master | 2020-06-27T19:58:43.358340 | 2019-08-29T16:59:08 | 2019-08-29T16:59:08 | 200,035,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | """Abstract base class for packages object"""
__all__ = ['DockerDistributionPackage']
__author__ = 'Naveen Sinha'
import os
import shutil
from xpresso.ai.admin.infra.packages.abstract_package import AbstractPackage
from xpresso.ai.admin.infra.packages.local_shell_executor import \
LocalShellExecutor
from xpresso.ai.core.utils.xpr_config_parser import XprConfigParser
from xpresso.ai.admin.controller.exceptions.xpr_exceptions import\
PackageFailedException
class DockerDistributionPackage(AbstractPackage):
"""
Installs Docker Distribution Services. It installs open source Harbor
project to manage the docker registry. This installs the Harbor project
only.
"""
CONFIG_SECTION = "docker_distribution"
HARBOR_CFG_FILE = "harbor_cfg_file"
HARBOR_COMPOSE_FILE = "harbor_compose_file"
HARBOR_TMP_FOLDER = "harbor_folder"
def __init__(self, config_path=XprConfigParser.DEFAULT_CONFIG_PATH,
executor=None):
if not executor:
executor = LocalShellExecutor()
super().__init__(executor)
self.config = XprConfigParser(config_path)["packages_setup"]
def status(self, **kwargs):
"""
Checks the status of existing running application
Returns:
True, if setup is successful. False Otherwise
Raises:
PackageFailedException
"""
docker_name_list = ["nginx", "harbor-portal", "harbor-jobservice",
"harbor-core", "harbor-adminserver", "", "registry",
"registryctl", "harbor-persistence", "redis", "harbor-log"]
(_, output, _) = self.execute_command_with_output(
"docker inspect -f '{{.State.Running}}' {}".format(
' '.join(docker_name_list)
)
)
if 'false' in output:
return False
return True
def install(self, **kwargs):
"""
Sets up docker distribution in a VM
Returns:
True, if setup is successful. False Otherwise
Raises:
PackageFailedException
"""
current_directory = os.getcwd()
harbor_folder = self.config[self.CONFIG_SECTION][self.HARBOR_TMP_FOLDER]
try:
if not os.path.exists(harbor_folder):
os.makedirs(harbor_folder)
except OSError:
self.logger.error("Can not create directory")
raise PackageFailedException("Harbor temp folder can't be created")
self.execute_command(
"wget https://storage.googleapis.com/harbor-releases/"
"release-1.7.0/harbor-online-installer-v1.7.5.tgz -O "
"{}/harbor.tgz".format(harbor_folder))
os.chdir(harbor_folder)
self.execute_command("tar xvf harbor.tgz".format())
extracted_folder = os.path.join(harbor_folder, "harbor")
try:
os.chdir(extracted_folder)
except OSError:
self.logger.error("Harbor Folder not found")
raise PackageFailedException("Harbor Folder not found")
os.chdir(current_directory)
shutil.copy(self.config[self.CONFIG_SECTION][self.HARBOR_CFG_FILE],
extracted_folder)
shutil.copy(self.config[self.CONFIG_SECTION][self.HARBOR_COMPOSE_FILE],
extracted_folder)
os.chdir(extracted_folder)
self.execute_command("/bin/bash install.sh")
os.chdir(current_directory)
return True
def uninstall(self, **kwargs):
"""
Remove docker distribution
Returns:
True, if setup is successful. False Otherwise
Raises:
PackageFailedException
"""
"""
cd $PWD/config/harbor
docker-compose up -d
"""
harbor_tmp_dir = self.config[self.CONFIG_SECTION][
self.HARBOR_TMP_FOLDER]
harbor_dir = os.path.join(harbor_tmp_dir, "harbor")
try:
os.chdir(harbor_dir)
except OSError:
self.logger("{} not found.".format(harbor_dir))
raise PackageFailedException(
"{} not found. Required for stopping".format(harbor_dir))
self.execute_command("/usr/local/bin/docker-compose up -d")
return True
def start(self, **kwargs):
return self.install()
def stop(self, **kwargs):
return self.uninstall()
| [
"arunangsutech@gmail.com"
] | arunangsutech@gmail.com |
2bfdde0bcb495db0d4fffb4b7621471a705ddae0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/138/usersdata/210/47892/submittedfiles/volumeTV.py | d81c84c26ad1a6f289f01b721bd8d351fc33d0df | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py |
e=int(input('digite e:'))
f=int(input('digite f:'))
cont=e
for i in range(1,f+1,1):
a=int(input('digite alteração:'))
if (cont+a)<=100 and cont>=0:
cont=cont+a
elif (cont+a)>=100:
e=cont-100
cont=cont-e
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d18fe269d04f86552cec9fbf8aa058fd36933eb2 | 4421cdef9d23204d392726db4e3886b8aec3072d | /Django/SDD/HEHE/migrations/0005_auto_20200913_1127.py | 058d909ce14f025e64ab9d7d4d3d2cb91ca114cb | [] | no_license | Sanketdave12/PRACTICE | f38b8eae569ee670db55610202ef6f3e60fade87 | 0f71359cf5326be73b3d9d4b1219bea4832cc194 | refs/heads/master | 2022-12-27T08:08:45.953842 | 2020-09-18T17:31:23 | 2020-09-18T17:31:23 | 296,587,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Generated by Django 3.1.1 on 2020-09-13 05:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('HEHE', '0004_delete_profile'),
]
operations = [
migrations.RemoveField(
model_name='notice',
name='branch',
),
migrations.DeleteModel(
name='Branch',
),
]
| [
"sddave1998@gmail.com"
] | sddave1998@gmail.com |
df748b62727262acccea6b2dec74421a653c6b2d | fae5487c5e50d0f42cd4fc82011c67df17b424c0 | /generatorify.py | ced38a537b4f15b00af87cc145d7e2ebb0febbc2 | [
"MIT"
] | permissive | eric-wieser/generatorify | b970dbda12f7e3dca481b29647a685294dc370c9 | 7bd759ecf88f836ece6cdbcf7ce1074260c0c5ef | refs/heads/master | 2020-06-25T05:37:03.755586 | 2019-07-28T07:03:47 | 2019-07-28T07:03:47 | 199,217,917 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,760 | py | import queue
import threading
import collections.abc
class generator_from_callback(collections.abc.Generator):
"""
A generator wrapper for a function that invokes a callback multiple times.
Calling `send` on the generator emits a value from one callback, and returns
the next.
Note this starts a background thread
"""
def __init__(self, func):
self._ready_queue = queue.Queue(1)
self._done_queue = queue.Queue(1)
self._done_holder = [False]
# local to avoid reference cycles
ready_queue = self._ready_queue
done_queue = self._done_queue
done_holder = self._done_holder
def callback(value):
done_queue.put((False, value))
cmd, val = ready_queue.get()
if cmd == 'send':
return val
elif cmd == 'throw':
raise val
else:
assert False # pragma: no cover
def thread_func():
while True:
cmd, val = ready_queue.get()
if cmd == 'send' and val is not None:
done_queue.put((True, TypeError("can't send non-None value to a just-started generator")))
continue
break
try:
if cmd == 'throw':
raise val
ret = func(callback)
raise StopIteration(ret) if ret is not None else StopIteration
except BaseException as e:
done_holder[0] = True
done_queue.put((True, e))
self._thread = threading.Thread(target=thread_func)
self._thread.start()
def _put(self, *args):
if self._done_holder[0]:
raise StopIteration
self._ready_queue.put(args)
is_exception, val = self._done_queue.get()
if is_exception:
try:
raise val
finally:
# prevent val's traceback containing a reference cycle
del val
else:
return val
def send(self, value):
return self._put('send', value)
def throw(self, exc):
return self._put('throw', exc)
def __next__(self):
return self.send(None)
def close(self):
try:
self.throw(GeneratorExit)
except StopIteration:
self._thread.join()
except GeneratorExit:
self._thread.join()
except BaseException:
self._thread.join()
raise
else:
# yielded again, can't clean up the thread
raise RuntimeError('Task with callback ignored GeneratorExit')
def __del__(self):
self.close()
class callback_from_generator(collections.abc.Callable):
"""
Wraps a generator function into a function that emits items
via callbacks instead
"""
def __init__(self, generator_func):
self._generator_func = generator_func
def __call__(self, callback):
g = self._generator_func()
try:
try:
from_g = next(g)
except StopIteration as si:
return si.value
# other exceptions propagate
while True:
try:
v_from_c = callback(from_g)
except BaseException as e_from_c:
try:
from_g = g.throw(e_from_c)
except StopIteration as si:
return si.value
else:
try:
from_g = g.send(v_from_c)
except StopIteration as si:
return si.value
finally:
g.close()
| [
"wieser.eric@gmail.com"
] | wieser.eric@gmail.com |
52cef1b52cab1ee988800a17d8e10bdc0f556955 | ef8a358a3f54a26610eadcac6d0ebca406fa2578 | /undermythumb/files.py | 36b90251fc9af101bfca1712b36bdec09229c64d | [
"BSD-3-Clause"
] | permissive | GunioRobot/django-undermythumb | af8f2f0ac0ec65d6d4c777eaf380510b8e81bd1b | f70be02998cbe97e452d8e0d66e8efc276e77621 | refs/heads/master | 2020-12-25T09:18:40.332593 | 2011-11-08T20:17:56 | 2011-11-08T20:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,632 | py | from django.db.models.fields.files import ImageFieldFile
__all__ = ['ThumbnailFieldFile', 'ImageWithThumbnailsFieldFile']
class ThumbnailSet(object):
def __init__(self, field_file):
self.file = field_file
self.field = self.file.field
self.instance = self.file.instance
self._cache = {}
self._populate()
def _populate(self):
if not self._cache and self.file.name and self.instance.id:
for options in self.field.thumbnails:
try:
attname, renderer, key = options
except ValueError:
attname, renderer = options
key = attname
ext = '.%s' % renderer.format
name = self.field.get_thumbnail_filename(
instance=self.instance,
original=self.file,
key=key,
ext=ext)
thumbnail = ThumbnailFieldFile(
attname,
renderer,
self.instance,
self.field,
name)
self._cache[attname] = thumbnail
def clear_cache(self):
self._cache = {}
def __getattr__(self, name):
try:
return self._cache[name]
except KeyError:
return None
def __iter__(self):
self._populate()
for attname, value in self._cache.iteritems():
yield value
class ThumbnailFieldFile(ImageFieldFile):
def __init__(self, attname, renderer, *args, **kwargs):
self.attname = attname
self.renderer = renderer
super(ThumbnailFieldFile, self).__init__(*args, **kwargs)
def save(self):
raise NotImplemented('Thumbnails cannot be saved directly.')
class ImageWithThumbnailsFieldFile(ImageFieldFile):
"""File container for an ``ImageWithThumbnailsField``.
"""
def __init__(self, *args, **kwargs):
super(ImageWithThumbnailsFieldFile, self).__init__(*args, **kwargs)
self.thumbnails = ThumbnailSet(self)
def save(self, name, content, save=True):
"""Save the original image, and its thumbnails.
"""
super(ImageWithThumbnailsFieldFile, self).save(name, content, save)
self.thumbnails.clear_cache()
# iterate over thumbnail
for thumbnail in self.thumbnails:
rendered = thumbnail.renderer.generate(content)
self.field.storage.save(thumbnail.name, rendered)
| [
"mattdennewitz@gmail.com"
] | mattdennewitz@gmail.com |
2e826a8d46d2e1211a1caa8a225498c5824b60a3 | 52a61caff0aeb434c32e5657e38762643e9f57dd | /DataStructuresAndAlgorithms/SearchAndSort/Sort/Count&CountingSort&Digital(Bitwise)Sorting/socks.py | 189a8837bd71a575381bfb18e4a7ee984d0e9355 | [] | no_license | AndrewErmakov/PythonTrainingBasics | 1480a6378d1ec59884760e2b3014ccc3d28f058f | 639e15bbfc54da762cb9e366497754cfece30691 | refs/heads/master | 2021-06-10T15:57:58.682335 | 2021-03-25T13:37:30 | 2021-03-25T13:37:30 | 153,678,760 | 0 | 0 | null | 2018-10-30T13:52:51 | 2018-10-18T19:45:47 | Python | UTF-8 | Python | false | false | 542 | py | len_table, count_socks, count_points = map(int, input().split())
def determination_thickness(length_table, number_socks, number_points):
count_list = [0] * (length_table + 1)
for _ in range(number_socks):
left_border, right_border = map(int, input().split())
for i in range(left_border, right_border + 1):
count_list[i] += 1
for j in range(number_points):
index_point = int(input())
print(count_list[index_point])
determination_thickness(len_table, count_socks, count_points)
| [
"andrew.67@list.ru"
] | andrew.67@list.ru |
4c350d9b6720d62fa21156d31748be72346a2283 | ca3150c69ef477ea53902c51d3840195262f2903 | /ISStreamer-r-3-bucket.py | 797f6edf0693846d02d60e183933d15353f5a284 | [] | no_license | chaeplin/dash-mainnet | 66c8df6f4a6df25c53e9ba1572a39e12d9e61daf | 10891c210a3cf40f2e052ee9d2657a97a71efba6 | refs/heads/master | 2021-01-19T21:15:49.790385 | 2017-11-14T20:25:52 | 2017-11-14T20:25:52 | 82,476,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,549 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io, os, sys
import simplejson as json
import datetime
import time
import redis
from ISStreamer.Streamer import Streamer
# bucket1 = dash121
iss_bucket_name1 = 'dash121'
iss_bucket_key1 = 'xxx'
iss_access_key1 = 'xxxx'
QUE_NAME1 = 'INITIALSTATE_PUSH' + iss_bucket_name1
# bucket2 = testnet
iss_bucket_name2 = 'testnet'
iss_bucket_key2 = 'xxxx'
iss_access_key2 = 'xxxxxx'
QUE_NAME2 = 'INITIALSTATE_PUSH' + iss_bucket_name2
# bucket3 = ticker
iss_bucket_name3 = 'ticker'
iss_bucket_key3 = 'xxxx'
iss_access_key3 = 'xxxx'
QUE_NAME3 = 'INITIALSTATE_PUSH' + iss_bucket_name3
# streamer
streamer1 = Streamer(bucket_key=iss_bucket_key1, access_key=iss_access_key1)#, debug_level=2)
streamer2 = Streamer(bucket_key=iss_bucket_key2, access_key=iss_access_key2)#, debug_level=2)
streamer3 = Streamer(bucket_key=iss_bucket_key3, access_key=iss_access_key3)#, debug_level=2)
# redis
POOL = redis.ConnectionPool(host='192.168.10.2', port=16379, db=0)
r = redis.StrictRedis(connection_pool=POOL)
# main
try:
r.ping()
except Exception as e:
print(e)
sys.exit()
try:
while 1:
quelist = (QUE_NAME1, QUE_NAME2, QUE_NAME3)
jobque = r.brpop(quelist, 5)
if jobque:
redis_val = json.loads(jobque[1].decode("utf-8"))
bucket_name = redis_val.get('bucket_name', 'dash121')
kprefix = redis_val.get('key_prefix')
epoch00 = redis_val.get('epoch')
bucket = redis_val.get('bucket')
print(epoch00, bucket_name, kprefix, bucket)
if bucket_name == iss_bucket_name1:
streamer1.log_object(bucket, key_prefix=kprefix, epoch=epoch00)
elif bucket_name == iss_bucket_name2:
streamer2.log_object(bucket, key_prefix=kprefix, epoch=epoch00)
elif bucket_name == iss_bucket_name3:
streamer3.log_object(bucket, key_prefix=kprefix, epoch=epoch00)
time.sleep(0.25)
else:
b = { "tstamp": time.time() }
streamer1.log_object(b)
streamer2.log_object(b)
streamer3.log_object(b)
streamer1.flush()
time.sleep(0.25)
streamer2.flush()
time.sleep(0.25)
streamer3.flush()
time.sleep(0.25)
except Exception as e:
print(e)
sys.exit()
except KeyboardInterrupt:
print('[dequeue] intterupted by keyboard')
sys.exit()
| [
"chaeplin@gmail.com"
] | chaeplin@gmail.com |
7dfe6483c3bf294fe7a02d964523db051a1eb588 | f166278e5e626c142245e4a9164ab4ed610a5cd4 | /apps/utils/requests_wrapper.py | 8d554e89487b84d6e723d8a0a51469e5668965e3 | [
"MIT"
] | permissive | wumulong/advance_django_example | 4cffd6de2eb9fdccefff7b995317a81e63b459be | 4832438a7db1065f7351a6cf4d4580ca1b6fffeb | refs/heads/master | 2021-06-24T13:34:31.873207 | 2017-09-11T15:13:40 | 2017-09-11T15:13:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | import requests
def requests_get(protocal, host, port, url, headers={}, params={}):
reqeusts_result = {}
if port:
requests_url = '{0}://{1}:{2}{3}'.format(protocal, host, port, url)
else:
requests_url = '{0}://{1}{2}'.format(protocal, host, url)
try:
r = requests.get(requests_url, headers=headers, params=params)
r.raise_for_status()
json_result = r.json()
reqeusts_result['data'] = json_result
reqeusts_result['errors'] = ''
except requests.exceptions.RequestException as e:
json_result = r.json()
if json_result['errors']:
reqeusts_result['errors'] = json_result['errors']
else:
reqeusts_result['errors'] = e
return reqeusts_result
def requests_post(protocal, host, port, url, headers={}, json={}):
reqeusts_result = {}
if port:
requests_url = '{0}://{1}:{2}{3}'.format(protocal, host, port, url)
else:
requests_url = '{0}://{1}{2}'.format(protocal, host, url)
try:
r = requests.post(requests_url, headers=headers, json=json)
r.raise_for_status()
json_result = r.json()
reqeusts_result['data'] = json_result
reqeusts_result['errors'] = ''
except requests.exceptions.RequestException as e:
json_result = r.json()
if json_result['errors']:
reqeusts_result['errors'] = json_result['errors']
else:
reqeusts_result['errors'] = e
return reqeusts_result
| [
"lsdlab@icloud.com"
] | lsdlab@icloud.com |
320615428a22aa133469f7c75f2279ba09ba1719 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/list_az2_migrate_request.py | 82455211ce51806b36c1f4d66474c9564882e62b | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListAz2MigrateRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id'
}
def __init__(self, instance_id=None):
"""ListAz2MigrateRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self.discriminator = None
self.instance_id = instance_id
@property
def instance_id(self):
"""Gets the instance_id of this ListAz2MigrateRequest.
实例ID。
:return: The instance_id of this ListAz2MigrateRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListAz2MigrateRequest.
实例ID。
:param instance_id: The instance_id of this ListAz2MigrateRequest.
:type: str
"""
self._instance_id = instance_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAz2MigrateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
deb19ce4bc3e611f561d6ea04d4999f2e9b15fc3 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /bJxNHk7aovkx8Q776_10.py | 69e76a086c61099322e2b65ff967b675005faf40 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py |
def gold_distribution(gold):
totals, p = [0, 0], 0
while gold:
idx = -1 if gold[-1] > gold[0] else 0
totals[p] += gold.pop(idx)
p = 1 - p
return totals
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9e0aa6a22faffb9424db0cf20701f012fab75424 | 59b72b8f662cd605b3ce31f54779c17e5ca066d0 | /interview_q/leet_code/解码方法.py | 84496ba3e188f5a6b9f0ffc14b200e43e24c76c6 | [] | no_license | dongyang2/hello-world | c1f5853ccafd6b8f23836192547ab36f898e0891 | 1f859b53e2b21ed5a648da09b84950f03ec1b370 | refs/heads/master | 2022-12-11T22:07:22.853912 | 2022-11-24T03:52:35 | 2022-11-24T03:52:35 | 119,025,960 | 0 | 0 | null | 2018-01-26T10:09:58 | 2018-01-26T08:28:10 | null | UTF-8 | Python | false | false | 4,137 | py | # https://leetcode-cn.com/problems/decode-ways/
# coding:utf-8
# Python 3
# 数字与字母一一对应,求转换结果。解法参考本文件夹下的爬楼梯和电话号码的字母组合。
"""
此题和爬楼梯是同一个类型的问题,难点在于其添加了许多限制条件,只要避开限制条件就可以完美解题了
每次递进,可以选取一个数也可以选取两个数:
s[i] != '0'
如果 s[i-1]s[i] <= 26, 则 dp[i] = dp[i-1] + dp[i-2]
如果 s[i-1]s[i] > 26, 则 dp[i] = dp[i-1], 这是因为 s[i-1]s[i] 组成的两位数无法翻译
s[i] == '0'
如果 s[i-1]s[i] <= 26, 则 dp[i] = dp[i-2], 这是因为 s[i] 无法翻译
还有一些情景直接使得整个序列无法被翻译:
相邻的两个 ‘0’
以 ‘0’ 结尾的大于 26 的数字
去除这些限制条件,此题就是爬楼梯的问题了,一次可以爬一步,也可以爬两步,问有多少中方式到达终点。
作者:nfgc
链接:https://leetcode-cn.com/problems/decode-ways/solution/dong-tai-gui-hua-tu-jie-by-nfgc/
来源:力扣(LeetCode)
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
"""
# 除了解题思路以外,还要注意‘10’,‘00’, '101','301'这样的边界输入
def clim(s, dic):
"""有条件的爬楼梯,自底向上法"""
ln = len(s)
if ln < 1 or s[0] == '0':
return 0
if ln == 1:
if s == '0':
return 0
return [dic[s]]
li1 = [dic[s[0]]]
li2 = decode_2char(s[:2], dic)
if li2 is False:
return 0
for i in range(2, ln):
tmp = [] # 充当爬楼梯中的final
two_char = s[i - 1] + s[i]
if s[i] != '0':
if int(two_char) <= 26 and s[i - 1] != '0':
tmp = decode(two_char, li1, tmp, dic)
tmp = decode(s[i], li2, tmp, dic)
else:
tmp = decode(s[i], li2, tmp, dic)
else:
if s[i - 1] == '1' or s[i - 1] == '2':
tmp = decode(two_char, li1, tmp, dic)
else:
return 0
li1, li2 = li2, tmp
return li2
def decode_2char(n, dic):
# 解码两位数字
s1 = n[0]
s2 = n[1]
if s2 == '0':
if s1 == '1' or s1 == '2':
return [dic[n]]
else:
return False
li = [dic[n[0]] + dic[n[1]]]
if int(n) <= 26:
li.append(dic[n])
return li
def decode(s: str, li: list, tmp: list, dic):
for i in li:
tmp.append(i + dic[s])
return tmp
def clim_pure_num(s):
"""有条件的爬楼梯,自底向上纯计数方式,不收集元素"""
ln = len(s)
if ln < 1 or s[0] == '0':
return 0
if ln == 1:
return 1
f1 = 1
f2 = decode_2char_pure_num(s[:2])
if f2 is False:
return 0
for i in range(2, ln):
two_char = s[i - 1] + s[i]
if s[i] != '0':
if int(two_char) <= 26 and s[i - 1] != '0':
final = f1 + f2
else:
final = f2
else:
if s[i - 1] == '1' or s[i - 1] == '2':
final = f1
else:
return 0
f1, f2 = f2, final
return f2
def decode_2char_pure_num(n):
# 解码两位数字,配合纯数字版
s1 = n[0]
s2 = n[1]
if s2 == '0':
if s1 == '1' or s1 == '2':
return 1
else:
return False
if int(n) <= 26:
return 2
else:
return 1
def main():
dic = dict()
count = 1
upper_abc = [chr(65 + x) for x in range(26)]
for i in upper_abc:
dic[str(count)] = i
count += 1
# print(dic)
n = "27"
# li = clim(n, dic)
li = clim_pure_num(n)
if li == 0:
raise ValueError('Please input valid number.')
print(li)
if __name__ == '__main__':
import time
print('-' * 15, 'Start', time.ctime(), '-' * 15, '\n')
main()
# print(str(int('00000000235600000')))
print('%s%s %s %s %s' % ('\n', '-' * 16, 'End', time.ctime(), '-' * 16))
| [
"dongyangzhao@outlook.com"
] | dongyangzhao@outlook.com |
73a708a16278b09791fad51bf5d305698e82b80d | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_2352+180/sdB_PG_2352+180_coadd.py | 4f361c9b0b46950c02cba15a5912a175c25af4eb | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[358.821833,18.337653], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_PG_2352+180/sdB_PG_2352+180_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_PG_2352+180/sdB_PG_2352+180_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
e391925b4696862a60c04799026bcb83b93c85ed | 6e507e231d37d0b61d70d6694ffc928c1c638973 | /lab07password_generator.py | e140d562109bb3555972e6c8641cf57aee410451 | [] | no_license | pjz987/pdx_code_intro_class | 7a4998be23b90883aad55664ceb97baffe3fcf92 | e85c2e01718e75124b956b924af01e87cdd95ee1 | refs/heads/master | 2020-09-12T21:25:50.152682 | 2019-11-18T23:10:38 | 2019-11-18T23:10:38 | 222,561,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | '''
filename : lab07password_generator.py
Lab 7: Password Generator
Let's generate a password ten characters long using a loop (while loop or for loop) and random.choice, this will be a string of random characters.
'''
#always import random first if needed
import random
#using import string to quickly call up alphabet/numbers
import string
pass_length = input("How many characters do you want in your password?\n")
pass_length_int = int(pass_length)
#^I needed int() to turn the string pass_length into the integer pass_length_int
password = ''
for characters in range(pass_length_int):
characters = string.ascii_lowercase + string.digits + string.punctuation + string. ascii_uppercase
#^ was characters = [string.ascii_lowercase + string.digits] the brackets made it not work right
password = password + random.choice(characters)
print(password)
'''
Advanced Version 1
Allow the user to choose how many characters the password will be.
##I went back and added this code starting on line 14.##
'''
'''
Advanced Version 2
Allow the user to choose how many letters, numbers, and punctuation characters they want in their password. Mix everything up using list(), random.shuffle(), and ''.join().
##^figure out what this is about some other day^##
'''
| [
"pwj2012@gmail.com"
] | pwj2012@gmail.com |
130b6512484065d4534c3cc77c76a9869d44fb1d | 9dcac6f93c2e460009e3355976989adf3bf1af68 | /PrintSum.py | 3bfb387f9a676c65a8c7e920354df4dc5c3b67e6 | [] | no_license | lpham4/PythonPractice | 99a4db621a6e524b2264314f1d4d47e2474260f9 | fac0931d09441ad03c4b34abae01f928342d53d7 | refs/heads/main | 2023-01-02T11:56:39.192524 | 2020-10-21T23:45:52 | 2020-10-21T23:45:52 | 306,173,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # Class: 1321L
# Section: 02
# Term: Fall 2018
# Instructor: Malcolm
# Name: Ly Pham
# Lab: Python
integer = int(input('Enter an integer between 1 and 100: '))
print('You entered:', integer)
if integer < 1 or integer > 100:
print('Invalid input. Try again.')
else:
count = 0
sum = 0
while count <= integer:
sum += count
count += 1
print('Sum of values: ', sum)
| [
"lpham4@students.kennesaw.edu"
] | lpham4@students.kennesaw.edu |
0f5ce3b78bff791a5a68f6e7abc26fc45e210335 | 6f6997efe1a15d57688c12ff0197790fb2eac6bc | /database/old/Init_analysisdb.py | 1c3641363d82c7f49cc1994332fc27e6559afddc | [] | no_license | montanaviking/waferprobe | 29fa5f0eb07e60820162916e48059f63374902c5 | fb2786b376153f9b6e9495b6faf3ee5960f90a06 | refs/heads/master | 2022-11-06T10:57:01.539733 | 2020-06-19T23:47:59 | 2020-06-19T23:47:59 | 273,601,408 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | # Phil Marsh Carbonics
# initialize database
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
from sqlalchemy import (Column, Integer, Numeric, String, DateTime, ForeignKey, Boolean, create_engine)
from sqlalchemy.orm import relationship, backref, sessionmaker
Base=declarative_base()
class DataAccessLayer:
def __init__(self):
self.engine=None
#self.conn_string="mysql+pymysql:///montanaviking:nova@localhost/test"
def connect(self):
self.engine=create_engine("mysql+pymysql://montanaviking:nova@localhost/Carbonics_test")
Base.metadata.create_all(self.engine)
self.Session=sessionmaker(bind=self.engine)
dal=DataAccessLayer() | [
"microcraftx@gmail.com"
] | microcraftx@gmail.com |
4f98779fdde462765812a9d5470c61c3ca5eb16d | 442dae0500db1653541100292a356ab6452363da | /alchemy/test/autoencoder_test.py | 27b9d8e0d4d73dda490f172cd0b0abfcaf1ae4a2 | [
"MIT"
] | permissive | williamd4112/alchemy | b5da5092abd29f2541f0e91c4ed5da033318b9f5 | 6ca509aa2e332170666a67a53bea22f7749c2bc7 | refs/heads/master | 2021-09-07T22:57:15.198254 | 2018-03-02T15:30:27 | 2018-03-02T15:30:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | # -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import unittest
from alchemy import layers, utils
class AutoEncoderTest(unittest.TestCase):
def test_conv2d_autoencoder(self):
tf.reset_default_graph()
inputs_ph = tf.placeholder(tf.float32, [None, 8, 8, 1])
scope = 'autoencoder'
strides = [1, 1]
latent_output, encoder, shapes = layers.conv2d_encoder(
inputs_ph, [2, 2], [2, 2], strides, 'encoder')
outputs = layers.conv2d_decoder(
latent_output, encoder, shapes, strides, 'decoder')
self.assertTrue(
utils.all_equal(
outputs.get_shape().as_list(),
inputs_ph.get_shape().as_list()))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
input_image = np.zeros((1, 8, 8, 1))
output_image = sess.run(outputs, feed_dict={inputs_ph: input_image})
self.assertTrue(np.all(np.equal(input_image, output_image)))
| [
"samwenke@gmail.com"
] | samwenke@gmail.com |
6f5a06f1011e0d3399391a74601845265556fd8b | f6348e051252ad0be630a815e33a3f85fbe64c69 | /capture/noworkflow/now/persistence/models/environment_attr.py | a367c23b7af82ec6f6631b0f5bae34e22e5c1dad | [
"MIT"
] | permissive | stefan-grafberger/noworkflow | 74f3e2fd0358621ac785a5f1441645be2d69688b | cbb8964eba7d58a5e87f96fb5bb91ac452b80763 | refs/heads/master | 2023-01-06T02:51:27.881742 | 2020-04-15T21:18:29 | 2020-04-15T21:18:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Environment Attribute Model"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from sqlalchemy import Column, Integer, Text
from sqlalchemy import PrimaryKeyConstraint, ForeignKeyConstraint
from ...utils.prolog import PrologDescription, PrologTrial, PrologRepr
from .base import AlchemyProxy, proxy_class
@proxy_class
class EnvironmentAttr(AlchemyProxy):
"""Represent an environment attribute"""
__tablename__ = "environment_attr"
__table_args__ = (
PrimaryKeyConstraint("trial_id", "id"),
ForeignKeyConstraint(["trial_id"], ["trial.id"], ondelete="CASCADE"),
)
trial_id = Column(Integer, index=True)
id = Column(Integer, index=True) # pylint: disable=invalid-name
name = Column(Text)
value = Column(Text)
# Relationship attributes (see relationships.py):
# trial: 1 Trial
prolog_description = PrologDescription("environment", (
PrologTrial("trial_id", link="trial.id"),
PrologRepr("name"),
PrologRepr("value"),
), description=(
"informs that a environment attribute (*name*)\n"
"was defined with *value*\n"
"in a given trial (*trial_id*)."
))
@property
def brief(self):
"""Brief description of environment attribute"""
return self.name
def __hash__(self):
return hash((self.name, self.value))
def __eq__(self, other):
return self.name == other.name
def show(self, _print=lambda x, offset=0: print(x)):
"""Show object
Keyword arguments:
_print -- custom print function (default=print)
"""
_print("{0.name}: {0.value}".format(self))
def __repr__(self):
return "Environment({0.trial_id}, {0.name}, {0.value})".format(self)
| [
"joaofelipenp@gmail.com"
] | joaofelipenp@gmail.com |
ef497c88746e8bec9bf1fe0a637dd05339029c94 | 73361fc6f7ecd9a19359a828b2574499a991bde4 | /gallery2/alembic/versions/3b387b077506_tags.py | 0e50457320a7e40f4de8f43b0812513c399eb1f1 | [] | no_license | danjac/gallery2 | 3a28cd3ca364a30eaf277bfd9db3cac72dd2463a | ff8c50bdfc30d9ac5fff910589b7f976a4b40bcf | refs/heads/master | 2020-05-19T07:59:00.047308 | 2014-01-15T13:44:06 | 2014-01-15T13:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | """tags
Revision ID: 3b387b077506
Revises: 48641cbf69d7
Create Date: 2014-01-11 11:01:56.791153
"""
# revision identifiers, used by Alembic.
revision = '3b387b077506'
down_revision = '48641cbf69d7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('tags',
sa.Column('name', sa.Unicode(length=200), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('tagged_images',
sa.Column('image_id', sa.Integer(), nullable=True),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['image_id'], [u'images.id'], ),
sa.ForeignKeyConstraint(['tag_id'], [u'tags.id'], )
)
op.add_column(u'images', sa.Column('tagstring', sa.UnicodeText(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'images', 'tagstring')
op.drop_table('tagged_images')
op.drop_table('tags')
### end Alembic commands ###
| [
"danjac354@gmail.com"
] | danjac354@gmail.com |
372bd628e91fbeb9aec915bafa8af0824234caf7 | a0e777ea7e0d00c061068db132a30a8fa545cc75 | /FluentPython/coro_exc_demo.py | d91049232e9f0fe4694695ec88f001a1a1574ad1 | [] | no_license | aadisetyaa/Python-Cookbook | 87215b64d2d3631d6b18e90a68a09400e7d80919 | a8df0343a39725312686423296bfd860dbaf70ad | refs/heads/master | 2022-04-08T13:41:27.255352 | 2017-11-27T03:54:29 | 2017-11-27T03:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | class DemoException(Exception):
"""An exception type for the demonstration."""
def demo_exc_handling():
print('-> coroutine started')
while True:
try:
x = yield
except DemoException:
print('*** DemoException handled. Continuing...')
else:
print('-> coroutine received: {!r}'.format(x))
raise RuntimeError('This line should never run.')
#exc_coro = demo_exc_handling()
#next(exc_coro)
#exc_coro.send(11)
#exc_coro.send(22)
#exc_coro.close()
from inspect import getgeneratorstate
#print(getgeneratorstate(exc_coro))
#16-10
#exc_coro = demo_exc_handling()
#next(exc_coro)
#exc_coro.send(11)
#exc_coro.throw(DemoException)
#print(getgeneratorstate(exc_coro))
#16-11
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.throw(ZeroDivisionError)
print(getgeneratorstate(exc_coro))
| [
"wpr101@hotmail.com"
] | wpr101@hotmail.com |
33c243c408ec59eec4ffca44e97be6a52731d741 | 8d753bb8f19b5b1f526b0688d3cb199b396ed843 | /osp_sai_2.1.8/system/apps/rpcapi/vcl/config.py | 9b18d6ac23705afdaa4d0d8f5c3548bd9a168816 | [] | no_license | bonald/vim_cfg | f166e5ff650db9fa40b564d05dc5103552184db8 | 2fee6115caec25fd040188dda0cb922bfca1a55f | refs/heads/master | 2023-01-23T05:33:00.416311 | 2020-11-19T02:09:18 | 2020-11-19T02:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | #!/usr/bin/python
#-*- coding: utf-8 -*-
is_dev = True
remote = '10.10.39.167'
dut_regexp = r'DUT\d#'
dut_stdout_pwd_info = r'Password:'
shell_pwd = '!@#'
cmd_regexp = r'\[\w+@.+\]\$'
login_stdout_username_info = r'Username:'
login_stdout_pwd_info = r'Password:'
login_username = 'admin'
login_pwd = 'admin'
RPC_ERROR_SUCCESS = 0
RPC_ERROR_CLI_TIMEOUT = -1000
RPC_ERROR_CLI_FAIL = -1001
RPC_ERROR_CLI_AUTH_FAIL = -1002
RPC_ERROR_CLI_AUTH_LOW = -1003
RPC_ERROR_CLI_NOT_SUPPORT = -1004
RPC_ERROR_CHAR_NOT_SUPPORT = -1005
RPC_ERROR_STRING_NOT_SUPPORT = -1006
RPC_ERROR_MESSAGE_NOT_SUPPORT = -1007
| [
"zhwwan@gmail.com"
] | zhwwan@gmail.com |
ab0c31da7017d28b92c6f4359ffeff58c6e480e1 | af4b5830b2a23d1f3d126297c7eb057bb3f8e42f | /pymatflow/cp2k/base/pw_dft_iterative_solver.py | add890dd42de47199b7e0d715dad071d6063957a | [
"MIT"
] | permissive | mukhtarbayerouniversity/pymatflow | de2b2d573ceed68c1dd3c149c538588394029137 | 9ab61e56659519cd6c83d5bd32da1262f44da065 | refs/heads/master | 2023-02-13T01:50:32.993401 | 2021-01-13T15:19:36 | 2021-01-13T15:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | #!/usr/bin/evn python
# _*_ coding: utf-8 _*_
import numpy as np
import sys
import os
import shutil
"""
usage:
"""
# ============================================
# CP2K / PW_DFT / ITERATIVE_SOLVER
#=============================================
class cp2k_pw_dft_iterative_solver:
"""
"""
def __init__(self):
"""
"""
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t&ITERATIVE_SOLVER\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t&END ITERATIVE_SOLVER\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 3:
self.params[item.split("-")[-1]] = params[item]
else:
pass
| [
"deqi_tang@163.com"
] | deqi_tang@163.com |
4595c1a0ffd3faf673a9269141df638ad665012f | 37d5b97c54e48f3de690724c01f3b14248c2b194 | /origin/cartpole-a3c.py | 42cb24898277d39b5f3ade93e1e7f38d89d786f2 | [] | no_license | verystrongjoe/a3c-sketch | ae9e60ee87155b7991a7fab4dfa55702e4cc56e9 | 7c8d9bfc76396ef652b609f1b366f98807adbf53 | refs/heads/master | 2020-03-06T14:41:04.358586 | 2018-05-08T09:37:32 | 2018-05-08T09:37:32 | 126,940,203 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,911 | py | import threading
import numpy as np
import tensorflow as tf
import pylab
import time
import gym
from keras.layers import Dense, Input
from keras.models import Model
from keras.optimizers import Adam
from keras import backend as K
# global variables for threading
episode = 0
scores = []
EPISODES = 2000
# This is A3C(Asynchronous Advantage Actor Critic) agent(global) for the Cartpole
# In this example, we use A3C algorithm
class A3CAgent:
def __init__(self, state_size, action_size, env_name):
# get size of state and action
self.state_size = state_size
self.action_size = action_size
# get gym environment name
self.env_name = env_name
# these are hyper parameters for the A3C
self.actor_lr = 0.001
self.critic_lr = 0.001
self.discount_factor = .99
self.hidden1, self.hidden2 = 24, 24
self.threads = 8
# create model for actor and critic network
self.actor, self.critic = self.build_model()
# method for training actor and critic network
self.optimizer = [self.actor_optimizer(), self.critic_optimizer()]
self.sess = tf.InteractiveSession()
K.set_session(self.sess)
self.sess.run(tf.global_variables_initializer())
# approximate policy and value using Neural Network
# actor -> state is input and probability of each action is output of network
# critic -> state is input and value of state is output of network
# actor and critic network share first hidden layer
def build_model(self):
state = Input(batch_shape=(None, self.state_size))
shared = Dense(self.hidden1, input_dim=self.state_size, activation='relu', kernel_initializer='glorot_uniform')(state)
actor_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='glorot_uniform')(shared)
action_prob = Dense(self.action_size, activation='softmax', kernel_initializer='glorot_uniform')(actor_hidden)
value_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='he_uniform')(shared)
state_value = Dense(1, activation='linear', kernel_initializer='he_uniform')(value_hidden)
actor = Model(inputs=state, outputs=action_prob)
critic = Model(inputs=state, outputs=state_value)
actor._make_predict_function()
critic._make_predict_function()
actor.summary()
critic.summary()
return actor, critic
# make loss function for Policy Gradient
# [log(action probability) * advantages] will be input for the back prop
# we add entropy of action probability to loss
def actor_optimizer(self):
action = K.placeholder(shape=(None, self.action_size))
advantages = K.placeholder(shape=(None, ))
policy = self.actor.output
good_prob = K.sum(action * policy, axis=1)
eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
loss = -K.sum(eligibility)
entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
actor_loss = loss + 0.01*entropy
optimizer = Adam(lr=self.actor_lr)
updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
train = K.function([self.actor.input, action, advantages], [], updates=updates)
return train #, optimizer.get_gradients(actor_loss, self.actor.trainable_weights)
# make loss function for Value approximation
def critic_optimizer(self):
discounted_reward = K.placeholder(shape=(None, ))
value = self.critic.output
loss = K.mean(K.square(discounted_reward - value))
optimizer = Adam(lr=self.critic_lr)
updates = optimizer.get_updates(self.critic.trainable_weights, [], loss)
train = K.function([self.critic.input, discounted_reward], [], updates=updates)
return train
# make agents(local) and start training
def train(self):
# self.load_model('./save_model/cartpole_a3c.h5')
agents = [Agent(i, self.actor, self.critic, self.optimizer, self.env_name, self.discount_factor,
self.action_size, self.state_size) for i in range(self.threads)]
for agent in agents:
agent.start()
while True:
time.sleep(20)
plot = scores[:]
pylab.plot(range(len(plot)), plot, 'b')
pylab.savefig("./save_graph/cartpole_a3c.png")
self.save_model('./save_model/cartpole_a3c.h5')
def save_model(self, name):
self.actor.save_weights(name + "_actor.h5")
self.critic.save_weights(name + "_critic.h5")
def load_model(self, name):
self.actor.load_weights(name + "_actor.h5")
self.critic.load_weights(name + "_critic.h5")
# This is Agent(local) class for threading
class Agent(threading.Thread):
def __init__(self, index, actor, critic, optimizer, env_name, discount_factor, action_size, state_size):
threading.Thread.__init__(self)
self.states = []
self.rewards = []
self.actions = []
self.index = index
self.actor = actor
self.critic = critic
self.optimizer = optimizer
self.env_name = env_name
self.discount_factor = discount_factor
self.action_size = action_size
self.state_size = state_size
# Thread interactive with environment
def run(self):
global episode
env = gym.make(self.env_name)
while episode < EPISODES:
state = env.reset()
score = 0
while True:
action = self.get_action(state)
next_state, reward, done, _ = env.step(action)
score += reward
self.memory(state, action, reward)
state = next_state
if done:
episode += 1
print("episode: ", episode, "/ score : ", score)
scores.append(score)
self.train_episode(score != 500)
break
# In Policy Gradient, Q function is not available.
# Instead agent uses sample returns for evaluating policy
def discount_rewards(self, rewards, done=True):
discounted_rewards = np.zeros_like(rewards)
running_add = 0
if not done:
running_add = self.critic.predict(np.reshape(self.states[-1], (1, self.state_size)))[0]
for t in reversed(range(0, len(rewards))):
running_add = running_add * self.discount_factor + rewards[t]
discounted_rewards[t] = running_add
return discounted_rewards
# save <s, a ,r> of each step
# this is used for calculating discounted rewards
def memory(self, state, action, reward):
self.states.append(state)
act = np.zeros(self.action_size)
act[action] = 1
self.actions.append(act)
self.rewards.append(reward)
# update policy network and value network every episode
def train_episode(self, done):
discounted_rewards = self.discount_rewards(self.rewards, done)
values = self.critic.predict(np.array(self.states))
values = np.reshape(values, len(values))
advantages = discounted_rewards - values
self.optimizer[0]([self.states, self.actions, advantages])
self.optimizer[1]([self.states, discounted_rewards])
self.states, self.actions, self.rewards = [], [], []
def get_action(self, state):
policy = self.actor.predict(np.reshape(state, [1, self.state_size]))[0]
return np.random.choice(self.action_size, 1, p=policy)[0]
if __name__ == "__main__":
env_name = 'CartPole-v1'
env = gym.make(env_name)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
env.close()
global_agent = A3CAgent(state_size, action_size, env_name)
global_agent.train() | [
"verystrongjoe@gmail.com"
] | verystrongjoe@gmail.com |
d6f6ff67d8a4ab492c8c24d10ca6a25088ee5e15 | de59ece5d773d8607ba7afe747088ff07062494c | /py-core/tuples/tuples.py | 6198cdf35d43671e60083fd9e761259bb727ab13 | [] | no_license | loggar/py | 4094c6919b040dfc0bb5453dc752145b5f3b46ba | 1116969fa6de00bbc30fe8dcf6445aa46190e506 | refs/heads/master | 2023-08-21T16:47:41.721298 | 2023-08-14T16:12:27 | 2023-08-14T16:12:27 | 114,955,782 | 0 | 0 | null | 2023-07-20T15:11:04 | 2017-12-21T03:01:54 | Python | UTF-8 | Python | false | false | 556 | py | tuple1 = ('abcd', 786, 2.23, 'john', 70.2)
tinytuple = (123, 'john')
print(tuple1) # Prints complete tuple
print(tuple1[0]) # Prints first element of the tuple
print(tuple1[1:3]) # Prints elements starting from 2nd till 3rd
print(tuple1[2:]) # Prints elements starting from 3rd element
print(tinytuple * 2) # Prints tuple two times
print(tuple1 + tinytuple) # Prints concatenated tuple
list1 = ['abcd', 786, 2.23, 'john', 70.2]
# tuple1[2] = 1000 # Invalid syntax with tuple
list1[2] = 1000 # Valid syntax with list
| [
"webnl@DT-Charly.koi.local"
] | webnl@DT-Charly.koi.local |
aa6662318c74771c0ab32e588e4a200a782bdb3a | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/python/client/pywrap_tf_session.py | 96f42ef026b82b874aacfc3b1778ce72ac5c3785 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 2,973 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python module for Session ops, vars, and functions exported by pybind11."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-import-order,g-bad-import-order, wildcard-import, unused-import
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client._pywrap_tf_session import *
from tensorflow.python.client._pywrap_tf_session import _TF_SetTarget
from tensorflow.python.client._pywrap_tf_session import _TF_SetConfig
from tensorflow.python.client._pywrap_tf_session import _TF_NewSessionOptions
# Convert versions to strings for Python2 and keep api_compatibility_test green.
# We can remove this hack once we remove Python2 presubmits. pybind11 can only
# return unicode for Python2 even with py::str.
# https://pybind11.readthedocs.io/en/stable/advanced/cast/strings.html#returning-c-strings-to-python
# pylint: disable=undefined-variable
__version__ = str(get_version())
__git_version__ = str(get_git_version())
__compiler_version__ = str(get_compiler_version())
__cxx11_abi_flag__ = get_cxx11_abi_flag()
__monolithic_build__ = get_monolithic_build()
# User getters to hold attributes rather than pybind11's m.attr due to
# b/145559202.
GRAPH_DEF_VERSION = get_graph_def_version()
GRAPH_DEF_VERSION_MIN_CONSUMER = get_graph_def_version_min_consumer()
GRAPH_DEF_VERSION_MIN_PRODUCER = get_graph_def_version_min_producer()
TENSOR_HANDLE_KEY = get_tensor_handle_key()
# pylint: enable=undefined-variable
# Disable pylint invalid name warnings for legacy functions.
# pylint: disable=invalid-name
def TF_NewSessionOptions(target=None, config=None):
# NOTE: target and config are validated in the session constructor.
opts = _TF_NewSessionOptions()
if target is not None:
_TF_SetTarget(opts, target)
if config is not None:
config_str = config.SerializeToString()
_TF_SetConfig(opts, config_str)
return opts
# Disable pylind undefined-variable as the variable is exported in the shared
# object via pybind11.
# pylint: disable=undefined-variable
def TF_Reset(target, containers=None, config=None):
opts = TF_NewSessionOptions(target=target, config=config)
try:
TF_Reset_wrapper(opts, containers)
finally:
TF_DeleteSessionOptions(opts)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
8b0b6d0d068e5c0c0d871706b8de364a8b45b874 | 805593291790843645dc884616c2a672f9cf953a | /graph/spanning_tree/kuraskal.py | 9959bd549a7ac8edeac8ec868b9f23f7ff5978db | [] | no_license | Shumpei-Kikuta/library | 1aa3e5aa1a619734441b431eaf2a872784030ee0 | cfa5a035df2e98641259032c936e063767e53230 | refs/heads/master | 2020-08-03T01:03:31.859006 | 2019-10-25T01:58:30 | 2019-10-25T01:58:30 | 211,575,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | """クラスカル法"""
class Node:
def __init__(self, idx):
self.idx = idx
self.parent = None
def unite(x: Node, y: Node, nodes):
"""xを含む集合とyを含む集合を併合"""
x_root, x_depth = root(x, 0)
y_root, y_depth = root(y, 0)
# xの根を併合後の根とする
if y_root != x_root:
if x_depth >= y_depth:
y_root.parent = x_root
nodes[y_root.idx] = y_root
else:
x_root.parent = y_root
nodes[x_root.idx] = x_root
return nodes
def same(x: Node, y: Node):
"""xとyが同じ集合に所属するか?すれば1, しなければ0を返す"""
x_root, _ = root(x, 0)
y_root, _ = root(y, 0)
if x_root.idx == y_root.idx:
return 1
else:
return 0
def root(x: Node, cnt: int):
"""Node xの所属する木の根を探索"""
if x.parent is None:
return x, cnt
else:
return root(x.parent, cnt + 1)
def initialize_adjlists(lists, V):
for i in range(V):
lists[i] = []
return lists
def adjacency_lists2kuraskal_list(adjacency_lists: dict) -> dict:
"""
OUTPUT: {(from_, to_): weight}
"""
dicts = {}
for from_ in adjacency_lists:
for to_, weight in adjacency_lists[from_]:
dicts[(from_, to_)] = weight
return dicts
def kuraskal(adjacency_lists: dict):
V = len(adjacency_lists)
kuraskal_lists = adjacency_lists2kuraskal_list(adjacency_lists)
kuraskal_lists = sorted(kuraskal_lists.items(), key=lambda x: x[1])
nodes = []
for i in range(V):
node = Node(i)
nodes.append(node)
num = 0
for (from_, to_), weight in kuraskal_lists:
if same(nodes[from_], nodes[to_]):
continue
else:
nodes = unite(nodes[from_], nodes[to_], nodes)
num += weight
return num
def main():
V = int(input())
adjacency_lists = {} # key: node, value: (node, weight)
adjacency_lists = initialize_adjlists(adjacency_lists, V)
for i in range(V):
lists = [int(c) for c in input().split()]
for j, w in enumerate(lists):
if w == -1:
continue
adjacency_lists[i].append((j, w))
print(kuraskal(adjacency_lists))
if __name__ == '__main__':
main()
| [
"shunpei-kikuta775@g.ecc.u-tokyo.ac.jp"
] | shunpei-kikuta775@g.ecc.u-tokyo.ac.jp |
2e9b4bb92dd86faf0815ab836243cd924a9fd5ca | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc088/C/4904355.py | 50700176dd66473e27c6decd361c1ba164066058 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | c1 = list(map(int, input().rstrip().split()))
c2 = list(map(int, input().rstrip().split()))
c3 = list(map(int, input().rstrip().split()))
answer = 'No'
#a1?????a2,a3,b1,b2,b3?????????
for i in range(101):
a1 = i
b1 = c1[0] - a1
b2 = c1[1] - a1
b3 = c1[2] - a1
a2 = c2[0] - b1
a3 = c3[0] - b1
c2_2 = a2 + b2
c2_3 = a2 + b3
c3_2 = a3 + b2
c3_3 = a3 + b3
if c2_2 == c2[1] and c2_3 == c2[2] and c3_2 == c3[1] and c3_3 == c3[2]:
answer = 'Yes'
break
print(answer) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
d3ee9535e42a5f92a997d4814a06b0f8fa25b6c0 | 41a4887a52afe81f203d0917c5ef54ccbe2389fe | /toys/kids/flip_fen.py | 2cc78123c07ce81bb982af2c037ea82d657d8293 | [] | no_license | tgandor/meats | 2efc2e144fc59b2b99aeeaec5f5419dbbb323f9b | 26eb57e49752dab98722a356e80a15f26cbf5929 | refs/heads/master | 2023-08-30T20:35:47.949622 | 2023-08-25T13:26:23 | 2023-08-25T13:26:23 | 32,311,574 | 13 | 9 | null | 2022-06-22T20:44:44 | 2015-03-16T08:39:21 | Python | UTF-8 | Python | false | false | 193 | py | #!/usr/bin/env python
import sys
while True:
line = sys.stdin.readline().split()
if not line:
break
line[0] = '/'.join(line[0].split('/')[::-1])
print(' '.join(line))
| [
"tomasz.gandor@gmail.com"
] | tomasz.gandor@gmail.com |
85b78518837e1b6109966119e16266b004ade7f8 | d66818f4b951943553826a5f64413e90120e1fae | /hackerrank/10 Days of Statistics/Day 8 - Least Square Regression Line/test.py | f5478d0354c945b2bef65095410b7eec0bb973e3 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 521 | py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'95 85',
'85 95',
'80 70',
'70 65',
'60 70',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), '78.288\n')
if __name__ == '__main__':
unittest.main()
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
c4d3467d4d06eb14220feda8004d28995b35fb8d | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/sklearn/metrics/cluster/setup.py | ee0fc49bd4888209bfc12eab6d56e2a17ddf12c9 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e3107d1ba6c899cf749991d704ff933206baa46ff4c7d81e504222277355d67d
size 667
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
8fcbd338b424590aa21d59e3c91d905708a89c6c | 1be3fd4f49ff1ba41b36bdb45ad1cd738b7e2e97 | /动态规划/序列型动态规划/LeetCode53_最大子序和.py | 6e96ab053d5009923bc8cc47501246da1dcd9274 | [] | no_license | ltzp/LeetCode | d5dcc8463e46b206515c1205582305d8ce981cc5 | f43d70cac56bdf6377b22b865174af822902ff78 | refs/heads/master | 2023-07-28T02:36:44.202092 | 2021-09-08T15:55:09 | 2021-09-08T15:55:09 | 308,343,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/04/09
# @Author : yuetao
# @Site :
# @File : LeetCode53_最大子序和.py
# @Desc :
"""
输入:nums = [-2,1,-3,4,-1,2,1,-5,4]
输出:6
解释:连续子数组 [4,-1,2,1] 的和最大,为 6 。
在状态的计算过程中我们可以发现,后面状态的计算只与当前状态的值有关,而与此阶段之前的值无关,所以具有无后效性。
"""
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = len(nums)
dp = [float("-inf") for _ in range(length)]
dp[0] = nums[0]
res = dp[0]
for i in range(1, length):
dp[i] = max(dp[i-1] + nums[i], nums[i])
res = max(dp[i], res)
return res
if __name__ == '__main__':
solve = Solution()
nums = [-2,1,-3,4,-1,2,1,-5,4]
result = solve.maxSubArray(nums)
print(result)
| [
"806518802@qq.com"
] | 806518802@qq.com |
d7b51f7ff602b1bd5707500365b2f4011d95eb01 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_ethiopian.py | c12e2d18374d3610fe33cc409fbae8d3b9221e7b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py |
#calss header
class _ETHIOPIAN():
def __init__(self,):
self.name = "ETHIOPIAN"
self.definitions = [u'belonging to or relating to Ethiopia or its people']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c8609399aa50011c76fc5e290f0e4ee907c1c282 | e4fcd551a9d83e37a2cd6d5a2b53a3cc397ccb10 | /codes/t2i_models/CogView2/SwissArmyTransformer-main/examples/roberta/finetune_roberta_rte.py | 2a97af8db773d45f62776f782c3b787a6622c713 | [
"Apache-2.0"
] | permissive | eslambakr/HRS_benchmark | 20f32458a47c6e1032285b44e70cf041a64f842c | 9f153d8c71d1119e4b5c926b899bb556a6eb8a59 | refs/heads/main | 2023-08-08T11:57:26.094578 | 2023-07-22T12:24:51 | 2023-07-22T12:24:51 | 597,550,499 | 33 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,667 | py | import os
import torch
import argparse
import numpy as np
from SwissArmyTransformer import mpu, get_args
from SwissArmyTransformer.training.deepspeed_training import training_main
from roberta_model import RobertaModel
from SwissArmyTransformer.model.mixins import PrefixTuningMixin, MLPHeadMixin
class ClassificationModel(RobertaModel):
def __init__(self, args, transformer=None, parallel_output=True):
super().__init__(args, transformer=transformer, parallel_output=parallel_output)
self.del_mixin('roberta-final')
self.add_mixin('classification_head', MLPHeadMixin(args.hidden_size, 2048, 1))
self.add_mixin('prefix-tuning', PrefixTuningMixin(args.num_layers, args.hidden_size // args.num_attention_heads, args.num_attention_heads, args.prefix_len))
def disable_untrainable_params(self):
self.transformer.word_embeddings.requires_grad_(False)
# for layer_id in range(len(self.transformer.layers)):
# self.transformer.layers[layer_id].requires_grad_(False)
def get_batch(data_iterator, args, timers):
# Items and their type.
keys = ['input_ids', 'position_ids', 'attention_mask', 'label']
datatype = torch.int64
# Broadcast data.
timers('data loader').start()
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
timers('data loader').stop()
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens = data_b['input_ids'].long()
labels = data_b['label'].long()
position_ids = data_b['position_ids'].long()
attention_mask = data_b['attention_mask'][:, None, None, :].float()
# Convert
if args.fp16:
attention_mask = attention_mask.half()
return tokens, labels, attention_mask, position_ids, (tokens!=1)
def forward_step(data_iterator, model, args, timers):
"""Forward step."""
# Get the batch.
timers('batch generator').start()
tokens, labels, attention_mask, position_ids, loss_mask = get_batch(
data_iterator, args, timers)
timers('batch generator').stop()
logits, *mems = model(tokens, position_ids, attention_mask)
# pred = ((logits.contiguous().float().squeeze(-1)) * loss_mask).sum(dim=-1) / loss_mask.sum(dim=-1)
pred = logits.contiguous().float().squeeze(-1)[..., 0]
loss = torch.nn.functional.binary_cross_entropy_with_logits(
pred,
labels.float()
)
acc = ((pred > 0.).long() == labels).sum() / labels.numel()
return loss, {'acc': acc}
pretrain_path = ''
from transformers import RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained(os.path.join(pretrain_path, 'roberta-large'))
from transformers.models.roberta.modeling_roberta import create_position_ids_from_input_ids
def _encode(text, text_pair):
encoded_input = tokenizer(text, text_pair, max_length=args.sample_length, padding='max_length', truncation='only_first')
position_ids = create_position_ids_from_input_ids(torch.tensor([encoded_input['input_ids']]), 1, 0)
return dict(input_ids=encoded_input['input_ids'], position_ids=position_ids[0].numpy(), attention_mask=encoded_input['attention_mask'])
from SwissArmyTransformer.data_utils import load_hf_dataset
def create_dataset_function(path, args):
def process_fn(row):
pack, label = _encode(row['premise'], row['hypothesis']), int(row['label'])
return {
'input_ids': np.array(pack['input_ids'], dtype=np.int64),
'position_ids': np.array(pack['position_ids'], dtype=np.int64),
'attention_mask': np.array(pack['attention_mask'], dtype=np.int64),
'label': label
}
return load_hf_dataset(path, process_fn, columns = ["input_ids", "position_ids", "attention_mask", "label"], cache_dir='/dataset/fd5061f6/SwissArmyTransformerDatasets', offline=False, transformer_name="rte_transformer")
if __name__ == '__main__':
py_parser = argparse.ArgumentParser(add_help=False)
py_parser.add_argument('--new_hyperparam', type=str, default=None)
py_parser.add_argument('--sample_length', type=int, default=512-16)
py_parser.add_argument('--prefix_len', type=int, default=16)
py_parser.add_argument('--old_checkpoint', action="store_true")
known, args_list = py_parser.parse_known_args()
args = get_args(args_list)
args = argparse.Namespace(**vars(args), **vars(known))
# from cogdata.utils.ice_tokenizer import get_tokenizer as get_ice
# tokenizer = get_tokenizer(args=args, outer_tokenizer=get_ice())
training_main(args, model_cls=ClassificationModel, forward_step_function=forward_step, create_dataset_function=create_dataset_function)
| [
"islam.bakr.2017@gmail.com"
] | islam.bakr.2017@gmail.com |
17d251d9dfe4693be09b37532fad90b492e6416d | 11ccb6827cf643b37c44a2e174422f9c6f9497f2 | /falcon/bench/dj/manage.py | 1ed3638a4df1bec17ec47502b337b01f857ace4a | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | falconry/falcon | 7506f838520e5105714702d9a9b2f0e608a704b9 | 32207fe8a7ebdfb95271d8430c4977c7a654928c | refs/heads/master | 2023-08-31T05:32:03.755869 | 2023-08-21T21:45:34 | 2023-08-21T21:45:34 | 7,040,500 | 8,922 | 1,183 | Apache-2.0 | 2023-09-09T20:58:36 | 2012-12-06T18:17:51 | Python | UTF-8 | Python | false | false | 808 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # NOQA
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
'available on your PYTHONPATH environment variable? Did you '
'forget to activate a virtual environment?'
)
raise
execute_from_command_line(sys.argv)
| [
"john.vrbanac@linux.com"
] | john.vrbanac@linux.com |
78e28807403ebdbf2c4fe8e40201ff5e0fc9d861 | b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339 | /入门学习/datetime_eg.py | a136cabe3d671ca43e319ccc8cc933f54ffd180a | [] | no_license | python-yc/pycharm_script | ae0e72898ef44a9de47e7548170a030c0a752eb5 | c8947849090c71e131df5dc32173ebe9754df951 | refs/heads/master | 2023-01-05T06:16:33.857668 | 2020-10-31T08:09:53 | 2020-10-31T08:09:53 | 296,778,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | #coding:utf-8
import datetime
#datetime常见属性
#datetime.date():一个理想和的日期,提供year,month,day属性
dt = datetime.date(2018,3,26)
print(dt)
print(dt.day)
print(dt.year)
print(dt.month)
#datetime.time:提供一个理想和的时间
#datetime.datetime:提供日期与实践的组合
#datetime.timedelta:提供一个时间差,时间长度
from datetime import datetime
import time
#常用类方法:today、now、utcnow、fromtimestamp(从时间戳中返回本地时间)
dt = datetime(2018,2,26) #此处的时间没有用到,但是需要这三个参数,删除报错
print(datetime.today())
print(dt.now())
print(dt.fromtimestamp(time.time()))
print("===============11111111")
#datetime.timedelta:表示一个时间间隔
from datetime import datetime,timedelta
t1 = datetime.now()
print(t1.strftime("%Y-%m-%d %H:%M:%S"))
#td表示以小时的时间长度
td = timedelta(hours=1)
print(td)
#当前时间加上时间间隔后,把得到的一小时后的时间格式化输出
print((t1+td).strftime("%Y-%m-%d %H:%M:%S"))
##timeit-时间测量工具
###- 测量程序运行时间间隔实验
print("===============222222")
def p():
time.sleep(3.6)
t1 = time.time()
p()
print(time.time() - t1)
print("===========3333333333")
#利用timeit调用代码,执行100000次,查看运行时间
#格式timeit.timeit(stmt=c,number=10000),c可以是函数,也可以是字符串式的代码块
####字符串代码块形式s='''内容在三引号之间'''
#timeit可以执行一个函数,来测量函数的执行时间,如:
import timeit
def doIt():
num = 2
for i in range(num):
print("Repeat for {0}".format(i))
#执行函数,重复10次
print(doIt)
print(type(doIt))
t = timeit.timeit(stmt=doIt,number=10)
print(t)
print("=============或者这样同上一个")
import timeit
s ='''
def doIt(num):
num = 2
for i in range(num):
print("Repeat for {0}".format(i))
'''
#执行函数,重复10次
#执行doIt(num),setup负责把环境变量准备好
#实际相当于给timeit创造一个小环境,在创造的小环境中,代码的执行顺序大致是
#
'''
def doIt(num):
......
num = 2
doIt(num)
'''
#此处的setup后的num=2循环输出的范围(即for后range的参数),number后的数字表示的循环次数
t = timeit.timeit("doIt(num)",setup=s+"num=0",number=10)
print(t)
# help(timeit.timeit)
| [
"15655982512.com"
] | 15655982512.com |
f044f713fb899779529717704ff3f507de299d33 | a905f5b56732cb49d5d692b75c7334d772b67144 | /Gui/t2.py | bf8c4c91ef955c1e02ec9ad8aec3f390ced0d5f7 | [] | no_license | weilaidb/PythonExample2 | d859acee3eb3e9b6448553b4f444c95ab2b2fc8f | 492fa2d687a8f3b9370ed8c49ffb0d06118246c7 | refs/heads/master | 2022-04-20T00:35:35.456199 | 2020-04-26T00:32:12 | 2020-04-26T00:32:12 | 114,774,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | '''''5.向Listbox中添加一个item'''
# 以上的例子均使用了insert来向Listbox中添加 一个item,这个函数有两个属性一个为添加的索引值,另一个为添加的项(item)
# 有两个特殊的值ACTIVE和END,ACTIVE是向当前选中的item前插入一个(即使用当前选中的索引作为插入位置);END是向
# Listbox的最后一项添加插入一项
# 先向Listbox中追加三个item,再在Listbox开始添加三项
from tkinter import *
root = Tk()
lb = Listbox(root)
for item in ['python','tkinter','widget']:
lb.insert(END,item)
#只添加一项将[]作为一个item
#lb.insert(0,['linux','windows','unix'])
#添加三项,每个string为一个item
lb.insert(0,'linux','windows','unix')
lb.pack()
root.mainloop() | [
"wxjlmr@126.com"
] | wxjlmr@126.com |
374eb79ae24863d43ed492b428e933b87a306275 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_43/66.py | 6e5512c1daca685a498a0b628f17dadb6578b723 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | import sys
lines = sys.stdin.readlines()
i = 1
while i<len(lines):
s = set()
msg = lines[i].rstrip()
for each in msg:
s.add(each)
#print len(s)
maxbase = len(s)
if maxbase == 1:
maxbase = 2
cost = maxbase ** (len(msg)-1)
cands = range(len(s))
cands.reverse()
try:
cands.remove(1)
except:
pass
power = len(msg)-2
index = 2
prev = {}
prev[msg[0]] = 1
for x in xrange(1, len(msg)):
c = msg[x]
#print c
if not prev.has_key(c):
prev[c] = int(cands.pop())
#print prev[c]
#print "maxbase ", maxbase
#print "power ", power
cost += prev[c] * (maxbase ** power)
power -= 1
#print prev
print "Case #"+str(i)+": "+str(cost)
i += 1
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
52b9507585c470c8c272b918d3b8c1f3741990ae | 651a296c8f45b5799781fd78a6b5329effe702a0 | /subset/i4_sqrt.py | 15231735ca1b9e9e6b31120a626bc3326da2d377 | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py | #!/usr/bin/env python
def i4_sqrt ( n ):
#*****************************************************************************80
#
## I4_SQRT finds the integer square root of N by solving N = Q^2 + R.
#
# Discussion:
#
# The integer square root of N is an integer Q such that
# Q^2 <= N but N < (Q+1)^2.
#
# A simpler calculation would be something like
#
# Q = INT ( SQRT ( REAL ( N ) ) )
#
# but this calculation has the virtue of using only integer arithmetic.
#
# To avoid the tedium of worrying about negative arguments, the routine
# automatically considers the absolute value of the argument.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 14 March 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Mark Herkommer,
# Number Theory, A Programmer's Guide,
# McGraw Hill, 1999, pages 294-307.
#
# Parameters:
#
# Input, integer N, the number whose integer square root is desired.
# Actually, only the absolute value of N is considered.
#
# Output, integer Q, R, the integer square root, and positive remainder,
# of N.
#
n_abs = abs ( n )
q = n_abs
if ( 0 < n_abs ):
while ( ( n_abs // q ) < q ):
q = ( ( q + ( n_abs // q ) ) // 2 )
r = n_abs - q * q
return q, r
def i4_sqrt_test ( ):
#*****************************************************************************80
#
## I4_SQRT_TEST tests I4_SQRT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 14 March 2015
#
# Author:
#
# John Burkardt
#
print ''
print 'I4_SQRT_TEST'
print ' I4_SQRT computes the square root of an I4.'
print ''
print ' N Sqrt(N) Remainder'
print ''
for n in range ( -5, 21 ):
q, r = i4_sqrt ( n )
print ' %7d %7d %7d' % ( n, q, r )
#
# Terminate.
#
print ''
print 'I4_SQRT_TEST'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
i4_sqrt_test ( )
timestamp ( )
| [
"siplukabir@gmail.com"
] | siplukabir@gmail.com |
bd49b4bc90efd2d2ceda83f672ca908ee94d8909 | 520e7d0bdc294e89e807ffc5d0277e0b1df035d4 | /taskloaf/object_ref.py | 4021c994f4aaf111c88d9cb39bd4b13d507c22d4 | [
"MIT"
] | permissive | tanwanirahul/taskloaf | 5a454e683dbf681f0a417911280c12176bd4a3a6 | 7f7b027ef18b8475922054ccc44dfcb5de0433bc | refs/heads/master | 2020-04-20T20:55:52.124091 | 2018-12-10T19:21:45 | 2018-12-10T19:21:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,008 | py | import asyncio
import taskloaf.serialize
import taskloaf.allocator
import taskloaf.refcounting
def put(worker, obj):
return FreshObjectRef(worker, obj)
def alloc(worker, nbytes):
ptr = worker.allocator.malloc(nbytes)
def on_delete(_id, worker = worker, ptr = ptr):
worker.allocator.free(ptr)
ref = taskloaf.refcounting.Ref(worker, on_delete)
return ObjectRef(ref, ptr, False)
def submit_ref_work(worker, to, f):
ref = put(worker, f).convert()
worker.send(to, worker.protocol.REFWORK, [ref, b''])
def setup_plugin(worker):
assert(hasattr(worker, 'allocator'))
assert(hasattr(worker, 'ref_manager'))
worker.object_cache = dict()
worker.protocol.add_msg_type(
'REMOTEGET', type = ObjectMsg, handler = handle_remote_get
)
worker.protocol.add_msg_type(
'REMOTEPUT', type = ObjectMsg, handler = handle_remote_put
)
worker.protocol.add_msg_type(
'REFWORK', type = ObjectMsg, handler = handle_ref_work
)
def handle_ref_work(worker, args):
f_ref = args[0]
async def run_me(worker):
f = await f_ref.get()
await worker.wait_for_work(f)
worker.start_async_work(run_me)
def is_ref(x):
return isinstance(x, FreshObjectRef) or isinstance(x, ObjectRef)
"""
This class barely needs to do anything because python already uses reference
counting for GC. It just needs to make sure that once it is serialized, all
serialized versions with the same id point to the same serialized chunk of
memory. This is based on the observation that in order to access a chunk of
memory from another worker, the reference first has to arrive at that other
worker and thus serialization of the reference can be used as a trigger for
serialization of the underlying object.
"""
class FreshObjectRef:
def __init__(self, worker, obj):
self.worker = worker
self._id = self.worker.get_new_id()
self.obj = obj
self.objref = None
async def get(self):
return self.get_local()
def get_local(self):
return self.obj
def __reduce__(self):
objref = self.convert()
return (FreshObjectRef.reconstruct, (objref,))
@classmethod
def reconstruct(cls, objref):
return objref
def convert(self):
if self.objref is not None:
return self.objref
else:
return self._new_ref()
def _new_ref(self):
deserialize, child_refs, serialized_obj = serialize_if_needed(
self.worker, self.obj
)
nbytes = len(serialized_obj)
ptr = self.worker.allocator.malloc(nbytes)
ptr.deref()[:] = serialized_obj
self.worker.object_cache[(self.worker.addr, self._id)] = self.obj
def on_delete(_id, worker = self.worker, ptr = ptr):
key = (worker.addr, _id)
del worker.object_cache[key]
worker.allocator.free(ptr)
ref = taskloaf.refcounting.Ref(
self.worker, on_delete, _id = self._id, child_refs = child_refs
)
self.objref = ObjectRef(ref, ptr, deserialize)
return self.objref
def encode_capnp(self, msg):
self.convert().encode_capnp(msg)
def is_bytes(v):
return isinstance(v, bytes) or isinstance(v, memoryview)
def serialize_if_needed(worker, obj):
if is_bytes(obj):
return False, [], obj
else:
child_refs, blob = taskloaf.serialize.dumps(worker, obj)
return True, child_refs, blob
"""
It seems like we're recording two indexes to the data:
-- the ptr itself
-- the (owner, _id) pair
This isn't strictly necessary, but has some advantages.
"""
class ObjectRef:
def __init__(self, ref, ptr, deserialize):
self.ref = ref
self.ptr = ptr
self.deserialize = deserialize
def key(self):
return self.ref.key()
@property
def worker(self):
return self.ref.worker
async def get(self):
await self._ensure_available()
self._ensure_deserialized()
return self.get_local()
async def get_buffer(self):
await self._ensure_available()
return self.ptr.deref()
def get_local(self):
return self.worker.object_cache[self.key()]
async def _ensure_available(self):
self.ref._ensure_child_refs_deserialized()
if self.key() in self.worker.object_cache:
val = self.worker.object_cache[self.key()]
if isinstance(val, asyncio.Future):
await val
ptr_accessible = self.ptr is not None
if not ptr_accessible:
await self._remote_get()
def _ensure_deserialized(self):
if self.key() not in self.worker.object_cache:
self._deserialize_and_store(self.ptr.deref())
async def _remote_get(self):
future = asyncio.Future(loop = self.worker.ioloop)
self.worker.object_cache[self.key()] = future
self.worker.send(
self.ref.owner, self.worker.protocol.REMOTEGET, [self, b'']
)
return (await future)
def _remote_put(self, buf):
future = self.worker.object_cache[self.key()]
obj = self._deserialize_and_store(buf)
future.set_result(obj)
def _deserialize_and_store(self, buf):
if self.deserialize:
assert(isinstance(self.ref.child_refs, list))
out = taskloaf.serialize.loads(
self.ref.worker, self.ref.child_refs, buf
)
else:
out = buf
self.worker.object_cache[self.key()] = out
return out
def __getstate__(self):
return dict(
ref = self.ref,
deserialize = self.deserialize,
ptr = self.ptr,
)
def encode_capnp(self, msg):
self.ref.encode_capnp(msg.ref)
msg.deserialize = self.deserialize
self.ptr.encode_capnp(msg.ptr)
@classmethod
def decode_capnp(cls, worker, msg):
objref = ObjectRef.__new__(ObjectRef)
objref.ref = taskloaf.refcounting.Ref.decode_capnp(worker, msg.ref)
objref.deserialize = msg.deserialize
objref.ptr = taskloaf.allocator.Ptr.decode_capnp(
worker, objref.ref.owner, msg.ptr
)
return objref
class ObjectMsg:
@staticmethod
def serialize(args):
ref, v = args
m = taskloaf.message_capnp.Message.new_message()
m.init('object')
ref.encode_capnp(m.object.objref)
m.object.val = bytes(v)
return m
@staticmethod
def deserialize(worker, msg):
return (
ObjectRef.decode_capnp(worker, msg.object.objref),
msg.object.val
)
def handle_remote_get(worker, args):
msg = worker.cur_msg
async def reply(w):
worker.send(
msg.sourceAddr,
worker.protocol.REMOTEPUT,
[args[0], await args[0].get_buffer()]
)
worker.run_work(reply)
def handle_remote_put(worker, args):
args[0]._remote_put(args[1])
| [
"t.ben.thompson@gmail.com"
] | t.ben.thompson@gmail.com |
4cce22269773651f5e8260b8a82c4e13ef2f377d | e2de3f6fe4373f1d98b67af61dd558a813250d54 | /Algorithm/baekjoon/1546_평균.py | 01a88b1be3fc1e2c36405cedb1ab135649d5e59f | [] | no_license | Hansung-Lee/TIL | 3fd6d48427a8b24f7889116297143855d493535b | c24ebab8b631f5c1b835fdc8bd036acbebc8d187 | refs/heads/master | 2020-04-14T11:18:54.035863 | 2019-04-05T07:26:55 | 2019-04-05T07:26:55 | 163,810,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import sys
N = int(sys.stdin.readline())
X = list(map(int, sys.stdin.readline().split()))
M = max(X)
new_X = []
for score in X:
new_X.append(score/M*100)
print(sum(new_X)/len(new_X)) | [
"ajtwlsgkst@naver.com"
] | ajtwlsgkst@naver.com |
27b0dfc843974ab2897bb7c12bc2b7fa9de0dd72 | e76f47d5e6752b838d5f7e23e22cfef65482b8e1 | /SeniorProject/pages/forms.py | d830a5d3dfe4395e0c5225d7ae57c111cf95cde5 | [] | no_license | AmirIdris/Final-Project | b006adfc4074df6687abaac83942b1b151300a51 | 7b0e28d01b7d5b4e4825d5d8b98ba193bd3f49e8 | refs/heads/master | 2023-06-10T21:13:12.875771 | 2021-07-08T20:23:59 | 2021-07-08T20:23:59 | 362,912,491 | 0 | 1 | null | 2021-07-08T20:24:00 | 2021-04-29T18:34:24 | CSS | UTF-8 | Python | false | false | 520 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm,UserChangeForm
from django.contrib.auth.models import User
from django.forms import fields
class CustomUserCreationForm(UserCreationForm):
def __init__(self,*args,**kwargs):
super(CustomUserCreationForm, self).__init__(*args,**kwargs)
self.fields['is_staff']=forms.BooleanField(label=("Traffic Police"),required=False)
class Meta:
model=User
fields=UserCreationForm.Meta.fields+('is_staff',)
| [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.